diff --git a/WORKSPACE b/WORKSPACE index 1b63bb2a..c5064e77 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -163,3 +163,15 @@ npm_install( package_lock_json = "@com_github_buildbarn_bb_storage//:package-lock.json", symlink_node_modules = False, ) + +http_archive( + name = "rules_antlr", + patches = ["@com_github_buildbarn_go_xdr//:patches/rules_antlr/antlr-4.10.diff"], + sha256 = "26e6a83c665cf6c1093b628b3a749071322f0f70305d12ede30909695ed85591", + strip_prefix = "rules_antlr-0.5.0", + urls = ["https://github.com/marcohu/rules_antlr/archive/0.5.0.tar.gz"], +) + +load("@rules_antlr//antlr:repositories.bzl", "rules_antlr_dependencies") + +rules_antlr_dependencies("4.10") diff --git a/go.mod b/go.mod index 4ecf0346..927f1b78 100644 --- a/go.mod +++ b/go.mod @@ -9,88 +9,74 @@ replace github.com/gordonklaus/ineffassign => github.com/gordonklaus/ineffassign replace mvdan.cc/gofumpt => mvdan.cc/gofumpt v0.3.0 require ( - github.com/aws/aws-sdk-go-v2 v1.16.2 - github.com/aws/aws-sdk-go-v2/service/autoscaling v1.22.4 - github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3 + github.com/aws/aws-sdk-go-v2 v1.16.4 + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.23.2 + github.com/aws/aws-sdk-go-v2/service/sqs v1.18.5 github.com/aws/smithy-go v1.11.2 - github.com/bazelbuild/remote-apis v0.0.0-20220223171137-04784f4a830c + github.com/bazelbuild/remote-apis v0.0.0-20220510175640-3b4b64021035 github.com/buildbarn/bb-storage v0.0.0-20220409201227-01fc04b652f6 - github.com/golang/mock v1.6.0 + github.com/buildbarn/go-xdr v0.0.0-20220519081609-da07cfd21fa0 github.com/golang/protobuf v1.5.2 github.com/google/uuid v1.3.0 - github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect; GitHub Workflow github.com/gorilla/mux v1.8.0 github.com/hanwen/go-fuse/v2 v2.1.0 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/prometheus/client_golang v1.12.1 + github.com/prometheus/client_golang v1.12.2 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/otel v1.6.3 - go.opentelemetry.io/otel/trace v1.6.3 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f - google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac - google.golang.org/grpc v1.45.0 + go.opentelemetry.io/otel v1.7.0 + go.opentelemetry.io/otel/trace v1.7.0 + golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 + golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e + google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335 + google.golang.org/grpc v1.46.2 google.golang.org/protobuf v1.28.0 - mvdan.cc/gofumpt v0.3.0 // indirect; GitHub Workflow ) require ( - cloud.google.com/go v0.100.2 // indirect - cloud.google.com/go/compute v1.5.0 // indirect - dmitri.shuralyov.com/go/generated v0.0.0-20211227232225-c5b6cf572ec5 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/config v1.15.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.26.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 // indirect + cloud.google.com/go v0.100.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.15.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.10.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.26.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-redis/redis/extra/rediscmd v0.2.0 // indirect github.com/go-redis/redis/extra/redisotel v0.3.0 // indirect - github.com/go-redis/redis/v8 v8.11.5 // indirect - github.com/google/go-cmp v0.5.7 // indirect + github.com/go-redis/redis/v8 v8.11.4 // indirect github.com/google/go-jsonnet v0.18.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/lazybeaver/xorshift v0.0.0-20170702203709-ce511d4823dd // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.33.0 // indirect + github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/stretchr/objx v0.3.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.6.0 // indirect - go.opentelemetry.io/otel/exporters/jaeger v1.6.3 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 // indirect - go.opentelemetry.io/otel/sdk v1.6.3 // indirect - go.opentelemetry.io/proto/otlp v0.15.0 // indirect - golang.org/x/mod v0.5.1 // indirect - golang.org/x/net v0.0.0-20220407224826-aac1ed45d8e3 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.4.0 // indirect + go.opentelemetry.io/otel/exporters/jaeger v1.4.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect + go.opentelemetry.io/otel/sdk v1.4.1 // indirect + go.opentelemetry.io/proto/otlp v0.12.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.9 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index cb0e842e..200d90ba 100644 --- a/go.sum +++ b/go.sum @@ -28,18 +28,12 @@ cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Ud cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.1 h1:i2ukt/HTgcBhgL1J0Dx9w7gb5oCe7zWEcumzQSh+9I4= cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0 h1:b1zWmYuuHz7gO9kDcM/EpHGr06UgsYNRpNJzI2kFiLM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -51,7 +45,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/go/generated v0.0.0-20211227232225-c5b6cf572ec5/go.mod h1:WG7q7swWsS2f9PYpt5DoEP/EBYWx8We5UoRltn9vJl8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -62,93 +55,62 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aws/aws-sdk-go-v2 v1.15.0 h1:f9kWLNfyCzCB43eupDAk3/XgJ2EpgktiySD6leqs0js= github.com/aws/aws-sdk-go-v2 v1.15.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= -github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA= -github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2 v1.16.4 h1:swQTEQUyJF/UkEA94/Ga55miiKFoXmm/Zd67XHgmjSg= +github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.0 h1:J/tiyHbl07LL4/1i0rFrW5pbLMvo7M6JrekBUNpLeT4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.0/go.mod h1:ohZjRmiToJ4NybwWTGOCbzlUQU8dxSHxYKzuX7k5l6Y= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= github.com/aws/aws-sdk-go-v2/config v1.15.0 h1:cibCYF2c2uq0lsbu0Ggbg8RuGeiHCmXwUlTMS77CiK4= github.com/aws/aws-sdk-go-v2/config v1.15.0/go.mod h1:NccaLq2Z9doMmeQXHQRrt2rm+2FbkrcPvfdbCaQn5hY= -github.com/aws/aws-sdk-go-v2/config v1.15.3 h1:5AlQD0jhVXlGzwo+VORKiUuogkG7pQcLJNzIzK7eodw= -github.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg= github.com/aws/aws-sdk-go-v2/credentials v1.10.0 h1:M/FFpf2w31F7xqJqJLgiM0mFpLOtBvwZggORr6QCpo8= github.com/aws/aws-sdk-go-v2/credentials v1.10.0/go.mod h1:HWJMr4ut5X+Lt/7epc7I6Llg5QIcoFHKAeIzw32t6EE= -github.com/aws/aws-sdk-go-v2/credentials v1.11.2 h1:RQQ5fzclAKJyY5TvF+fkjJEwzK4hnxQCLOu5JXzDmQo= -github.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.0 h1:gUlb+I7NwDtqJUIRcFYDiheYa97PdVHG/5Iz+SwdoHE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.0/go.mod h1:prX26x9rmLwkEE1VVCelQOQgRN9sOVIssgowIJ270SE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 h1:LWPg5zjHV9oz/myQr4wMs0gi4CjnDN/ILmyZUFYXZsU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 h1:xiGjGVQsem2cxoIX61uRGy+Jux2s9C/kKbTrWLdrU54= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6/go.mod h1:SSPEdf9spsFgJyhjrXvawfpyzrXHBCUe+2eQ1CjC1Ak= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 h1:onz/VaaxZ7Z4V+WIN9Txly9XLTmoOh1oJ8XcAC3pako= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 h1:bt3zw79tm209glISdMRCIVRCwvSDXxgAxh5KWe2qHkY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11 h1:gsqHplNh1DaQunEKZISK56wlpbCg0yKxNVvGWCFuF1k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0/go.mod h1:viTrxhAuejD+LszDahzAE2x40YjYWhMqzHxv2ZiWaME= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 h1:9stUQR/u2KXU6HkFJYlqnZEjBnbgrVbG6I5HN09xZh0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5 h1:PLFj+M2PgIDHG//hw3T0O0KLI4itVtAjtxrZx4AHPLg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.7 h1:QOMEP8jnO8sm0SX/4G7dbaIq2eEP2wcWEsF0jzrXLJc= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.7/go.mod h1:P5sjYYf2nc5dE6cZIzEMsVtq6XeLD7c4rM+kQJPrByA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 h1:by9P+oy3P/CwggN4ClnW2D4oL91QV7pBzBICi1chZvQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.0 h1:cq+47u1zpHyH+PSkbBx1N9whx4TiM9m9ibimOPaNlBg= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.0/go.mod h1:Nf3QiqrNy2sj3Rku+9z4nN/bThI97gQmR7YxG3s+ez8= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.22.1 h1:9kvgpjvAJynrMyNKE71rf4MFOl9XjMGSh0QLs0hgd4w= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.22.1/go.mod h1:Rj8xktogEjzRs4G7uGkm/y62b0FGuE8m2sFk5sl8gHU= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.22.4 h1:5idZ9wTGvXKApIm7MtcJoMRM8/016tLWLmuXz2chYqc= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.22.4/go.mod h1:mXzRCMCqLSHkUbw6vW4xHFSbSPFvD28OpeRQsNohImo= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.23.2 h1:RQhRsMv7qcIQXI6KO5MytJYXVo3cSl4EJQmGI9FTdcU= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.23.2/go.mod h1:M2gcYyhXfaxkXahv2lQAff/RpGWE+7g0Ni+bTAAffXw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.0 h1:uhb7moM7VjqIEpWzTpCvceLDSwrWpaleXm39OnVjuLE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.0/go.mod h1:pA2St3Pu2Ldy6fBPY45Azoh1WBG4oS7eIKOd4XN7Meg= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 h1:T4pFel53bkHjL2mMo+4DKE6r6AuoZnM0fg7k1/ratr4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.0 h1:IhiVUezzcKlszx6wXSDQYDjEn/bIO6Mc73uNQ1YfTmA= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.0/go.mod h1:kLKc4lo+XKlMhENIpKbp7dCePpyUqUG1PqGIAXoxwNE= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3 h1:I0dcwWitE752hVSMrsLCxqNQ+UdEp3nACx2bYNMQq+k= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3/go.mod h1:Seb8KNmD6kVTjwRjVEgOT5hPin6sq+v4C2ycJQDwuH8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 h1:YQ3fTXACo7xeAqg0NiqcCmBOXJruUfh+4+O2qxF2EjQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0/go.mod h1:R31ot6BgESRCIoxwfKtIHzZMo/vsZn2un81g9BJ4nmo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 h1:Gh1Gpyh01Yvn7ilO/b/hr01WgNpaszfbKMUgqM186xQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.0 h1:i+7ve93k5G0S2xWBu60CKtmzU5RjBj9g7fcSypQNLR0= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.0/go.mod h1:L8EoTDLnnN2zL7MQPhyfCbmiZqEs8Cw7+1d9RlLXT5s= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3 h1:BKjwCJPnANbkwQ8vzSbaZDKawwagDubrH/z/c0X+kbQ= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3/go.mod h1:Bm/v2IaN6rZ+Op7zX+bOUMdL4fsrYZiD0dsjLhNKwZc= github.com/aws/aws-sdk-go-v2/service/s3 v1.26.0 h1:6IdBZVY8zod9umkwWrtbH2opcM00eKEmIfZKGUg5ywI= github.com/aws/aws-sdk-go-v2/service/s3 v1.26.0/go.mod h1:WJzrjAFxq82Hl42oh8HuvwpugTgxmoiJBBX8SLwVs74= -github.com/aws/aws-sdk-go-v2/service/s3 v1.26.4 h1:frOI/v6KWuKGlKUA5gheRw01EDpxcCxTalFQkCOZXAo= -github.com/aws/aws-sdk-go-v2/service/s3 v1.26.4/go.mod h1:qFKU5d+PAv+23bi9ZhtWeA+TmLUz7B/R59ZGXQ1Mmu4= -github.com/aws/aws-sdk-go-v2/service/sqs v1.18.0 h1:nKaxCMASO9YbaLROWQqwpUiv82oWks6hHHbTmWiRx00= -github.com/aws/aws-sdk-go-v2/service/sqs v1.18.0/go.mod h1:sXyfsQ0VN6V8HxkMIvH+eFuy9tVEgCSp+ZkT3trHRTQ= -github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3 h1:uHjK81fESbGy2Y9lspub1+C6VN5W2UXTDo2A/Pm4G0U= -github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3/go.mod h1:skmQo0UPvsjsuYYSYMVmrPc1HWCbHUJyrCEp+ZaLzqM= +github.com/aws/aws-sdk-go-v2/service/sqs v1.18.5 h1:Nt1QV0zSgC9WNbcRIgHeYIgFtuuEzijKGYEeB8Xa/zY= +github.com/aws/aws-sdk-go-v2/service/sqs v1.18.5/go.mod h1:UCrTk+1stZ/o3VdJVUhtRIMiU99MY+bKNK8lNtySonQ= github.com/aws/aws-sdk-go-v2/service/sso v1.11.0 h1:gZLEXLH6NiU8Y52nRhK1jA+9oz7LZzBK242fi/ziXa4= github.com/aws/aws-sdk-go-v2/service/sso v1.11.0/go.mod h1:d1WcT0OjggjQCAdOkph8ijkr5sUwk1IH/VenOn7W1PU= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 h1:frW4ikGcxfAEDfmQqWgMLp+F1n4nRo9sF39OcIb5BkQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU= github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 h1:0+X/rJ2+DTBKWbUsn7WtF0JvNk/fRf928vkFsXkbbZs= github.com/aws/aws-sdk-go-v2/service/sts v1.16.0/go.mod h1:+8k4H2ASUZZXmjx/s3DFLo9tGBb44lkz3XcgfypJY7s= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 h1:cJGRyzCSVwZC7zZZ1xbx9m32UnrKydRYhOvcD1NYP9Q= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8= -github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= -github.com/bazelbuild/remote-apis v0.0.0-20220223171137-04784f4a830c h1:vhW2gGrc9vvUQqtcAjgKDuMoqzU7mDC15Gf/KVjTt7o= -github.com/bazelbuild/remote-apis v0.0.0-20220223171137-04784f4a830c/go.mod h1:ry8Y6CkQqCVcYsjPOlLXDX2iRVjOnjogdNwhvHmRcz8= +github.com/bazelbuild/remote-apis v0.0.0-20220510175640-3b4b64021035 h1:Zf6l8jLGtvSgdpqqiCRtLvF99VsoDD++uz7eSxvIafE= +github.com/bazelbuild/remote-apis v0.0.0-20220510175640-3b4b64021035/go.mod h1:ry8Y6CkQqCVcYsjPOlLXDX2iRVjOnjogdNwhvHmRcz8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/buildbarn/bb-storage v0.0.0-20220316195748-251ae686ce20 h1:Z/L1vAx2b8fdDlP7459uvf84zoNgttF7cMVZuR9LdwY= -github.com/buildbarn/bb-storage v0.0.0-20220316195748-251ae686ce20/go.mod h1:prAd3v0rqcVFP2fzVrvzh5N5x3OitwcEiyiVoBTleN4= github.com/buildbarn/bb-storage v0.0.0-20220409201227-01fc04b652f6 h1:GvhBeVPp8mnSKzVjMJXKjMgUNr2xnO7V2DD4xubgC/Y= github.com/buildbarn/bb-storage v0.0.0-20220409201227-01fc04b652f6/go.mod h1:prAd3v0rqcVFP2fzVrvzh5N5x3OitwcEiyiVoBTleN4= +github.com/buildbarn/go-xdr v0.0.0-20220513202525-0bde1f7f5dd4 h1:krYErvKbBvSV/XTHt8Taq2x1I09yVjxuIV8m09jrZjc= +github.com/buildbarn/go-xdr v0.0.0-20220513202525-0bde1f7f5dd4/go.mod h1:z/IoLaQHBTk5YL1OFY4WSjW2NMoATNltPHJDD+xXgmE= +github.com/buildbarn/go-xdr v0.0.0-20220519071414-5ac7dc151220 h1:jporlcQKb4KBxLVfSYRUzHznNGECOPcYBBZxBiz1kPo= +github.com/buildbarn/go-xdr v0.0.0-20220519071414-5ac7dc151220/go.mod h1:z/IoLaQHBTk5YL1OFY4WSjW2NMoATNltPHJDD+xXgmE= +github.com/buildbarn/go-xdr v0.0.0-20220519081609-da07cfd21fa0 h1:n4WcJBxZ7nKYGRreNmXGMhsLaGRAQsCeH9UfD6u2NuY= +github.com/buildbarn/go-xdr v0.0.0-20220519081609-da07cfd21fa0/go.mod h1:z/IoLaQHBTk5YL1OFY4WSjW2NMoATNltPHJDD+xXgmE= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -164,10 +126,9 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= @@ -179,11 +140,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -192,12 +152,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -211,14 +168,11 @@ github.com/go-redis/redis/v8 v8.3.2/go.mod h1:jszGxBCez8QA1HWSmQxJO9Y82kNibbUmeY github.com/go-redis/redis/v8 v8.5.0/go.mod h1:YmEcgBDttjnkbMzDAhDtQxY9yVA7jMN6PCR5HeMvqFE= github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -230,7 +184,6 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -265,7 +218,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-jsonnet v0.18.0 h1:/6pTy6g+Jh1a1I2UMoAODkqELFiVIdOxbNwv0DDzoOg= github.com/google/go-jsonnet v0.18.0/go.mod h1:C3fTzyVJDslXdiTqw/bTFk7vSGyCtH3MGRbDfvEwGd0= @@ -296,7 +248,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gordonklaus/ineffassign v0.0.0-20201223204552-cba2d2a1d5d9/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -305,9 +256,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0 h1:ESEyqQqXXFIcImj/BE8oKEX37Zsuceb2cZI+EL/zNCY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0/go.mod h1:XnLCLFp3tjoZJszVKjfpyAK6J8sYIcQXWQxmqLWF21I= github.com/hanwen/go-fuse v1.0.0 h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.1.0 h1:+32ffteETaLYClUj0a3aHjZ1hOPxxaNEHiZiujuDaek= @@ -319,7 +267,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -340,12 +287,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/lazybeaver/xorshift v0.0.0-20170702203709-ce511d4823dd h1:TfmftEfB1zJiDTFi3Qw1xlbEbfJPKUhEDC19clfBMb8= github.com/lazybeaver/xorshift v0.0.0-20170702203709-ce511d4823dd/go.mod h1:qXyNSomGEqu0M7ewNl3CLgle09PFHk8++5NrBWCz7+Q= @@ -361,33 +304,28 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -398,8 +336,6 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= -github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -408,10 +344,6 @@ github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -421,22 +353,18 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -446,49 +374,32 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/propagators/b3 v1.4.0 h1:wDb2ct7xMzossYpx44w81skxkEyeT2IRnBgYKqyEork= go.opentelemetry.io/contrib/propagators/b3 v1.4.0/go.mod h1:K399DN23drp0RQGXCbSPOt9075HopQigMgUL99oR8hc= -go.opentelemetry.io/contrib/propagators/b3 v1.6.0 h1:rHeNbko1wNe1Sazpw5IJD83x43lfzMnDb8vckdKxRu8= -go.opentelemetry.io/contrib/propagators/b3 v1.6.0/go.mod h1:6kJAkL2/nNqP9AYhm/8j4dzVU8BfpcvYr2cy25RGBak= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= go.opentelemetry.io/otel v0.16.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= go.opentelemetry.io/otel v0.17.0/go.mod h1:Oqtdxmf7UtEvL037ohlgnaYa1h7GtMh0NcSd9eqkC9s= go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= -go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g= go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= -go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= -go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= -go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= go.opentelemetry.io/otel/exporters/jaeger v1.4.1 h1:VHCK+2yTZDqDaVXj7JH2Z/khptuydo6C0ttBh2bxAbc= go.opentelemetry.io/otel/exporters/jaeger v1.4.1/go.mod h1:ZW7vkOu9nC1CxsD8bHNHCia5JUbwP39vxgd1q4Z5rCI= -go.opentelemetry.io/otel/exporters/jaeger v1.6.3 h1:7tvBU1Ydbzq080efuepYYqC1Pv3/vOFBgCSrxLb24d0= -go.opentelemetry.io/otel/exporters/jaeger v1.6.3/go.mod h1:YgX3eZWbJzgrNyNHCK0otGreAMBTIAcObtZS2VRi6sU= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 h1:4/UjHWMVVc5VwX/KAtqJOHErKigMCH8NexChMuanb/o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3/go.mod h1:UJmXdiVVBaZ63umRUTwJuCMAV//GCMvDiQwn703/GoY= go.opentelemetry.io/otel/metric v0.17.0/go.mod h1:hUz9lH1rNXyEwWAhIWCMFWKhYtpASgSnObJFnU26dJ0= go.opentelemetry.io/otel/oteltest v0.17.0/go.mod h1:JT/LGFxPwpN+nlsTiinSYjdIx3hZIGqHCpChcIZmdoE= go.opentelemetry.io/otel/sdk v1.4.1 h1:J7EaW71E0v87qflB4cDolaqq3AcujGrtyIPGQoZOB0Y= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= -go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/trace v0.17.0/go.mod h1:bIujpqg6ZL6xUTubIUgziI1jSaUPthmabA/ygf/6Cfg= go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= -go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ= go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= -go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= -go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= -go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= -go.opentelemetry.io/proto/otlp v0.15.0 h1:h0bKrvdrT/9sBwEJ6iWUqT/N/xPcS66bL4u3isneJ6w= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -533,7 +444,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -578,12 +488,9 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210505214959-0714010a04ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220407224826-aac1ed45d8e3 h1:EN5+DfgmRMvRUrMGERW2gQl3Vc+Z7ZMnI/xdEpPSf0c= -golang.org/x/net v0.0.0-20220407224826-aac1ed45d8e3/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -600,7 +507,6 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -613,8 +519,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -678,17 +585,12 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86 h1:A9i04dxx7Cribqbs8jf3FQLogkL/CV2YN7hj9KWJCkc= -golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw= -golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e h1:w36l2Uw3dRan1K3TyXriXvY+6T56GNmlKGcqiQUJDfM= +golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -758,11 +660,9 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -795,8 +695,6 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -867,14 +765,8 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 h1:ErU+UA6wxadoU8nWrsy5MZUVBs75K17zUCsUCIfrXCE= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac h1:qSNTkEN+L2mvWcLgJOR+8bdHX9rN/IdU3A1Ghpfb1Rg= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335 h1:2D0OT6tPVdrQTOnVe1VQjfJPTED6EZ7fdJ/f6Db6OsY= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -901,11 +793,11 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -919,18 +811,15 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -943,8 +832,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -952,7 +839,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/go_dependencies.bzl b/go_dependencies.bzl index 304dd104..d5ca6f7b 100644 --- a/go_dependencies.bzl +++ b/go_dependencies.bzl @@ -32,113 +32,113 @@ def go_dependencies(): sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=", version = "v1.0.0", ) + go_repository( + name = "com_github_antlr_antlr4_runtime_go_antlr", + importpath = "github.com/antlr/antlr4/runtime/Go/antlr", + sum = "h1:ue9pVfIcP+QMEjfgo/Ez4ZjNZfonGgR6NgjMaJMu1Cg=", + version = "v0.0.0-20220418222510-f25a4f6275ed", + ) go_repository( name = "com_github_aws_aws_sdk_go_v2", importpath = "github.com/aws/aws-sdk-go-v2", - sum = "h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA=", - version = "v1.16.2", + sum = "h1:swQTEQUyJF/UkEA94/Ga55miiKFoXmm/Zd67XHgmjSg=", + version = "v1.16.4", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_aws_protocol_eventstream", importpath = "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream", - sum = "h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU=", - version = "v1.4.1", + sum = "h1:J/tiyHbl07LL4/1i0rFrW5pbLMvo7M6JrekBUNpLeT4=", + version = "v1.4.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_config", importpath = "github.com/aws/aws-sdk-go-v2/config", - sum = "h1:5AlQD0jhVXlGzwo+VORKiUuogkG7pQcLJNzIzK7eodw=", - version = "v1.15.3", + sum = "h1:cibCYF2c2uq0lsbu0Ggbg8RuGeiHCmXwUlTMS77CiK4=", + version = "v1.15.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_credentials", importpath = "github.com/aws/aws-sdk-go-v2/credentials", - sum = "h1:RQQ5fzclAKJyY5TvF+fkjJEwzK4hnxQCLOu5JXzDmQo=", - version = "v1.11.2", + sum = "h1:M/FFpf2w31F7xqJqJLgiM0mFpLOtBvwZggORr6QCpo8=", + version = "v1.10.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_feature_ec2_imds", importpath = "github.com/aws/aws-sdk-go-v2/feature/ec2/imds", - sum = "h1:LWPg5zjHV9oz/myQr4wMs0gi4CjnDN/ILmyZUFYXZsU=", - version = "v1.12.3", + sum = "h1:gUlb+I7NwDtqJUIRcFYDiheYa97PdVHG/5Iz+SwdoHE=", + version = "v1.12.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_internal_configsources", importpath = "github.com/aws/aws-sdk-go-v2/internal/configsources", - sum = "h1:onz/VaaxZ7Z4V+WIN9Txly9XLTmoOh1oJ8XcAC3pako=", - version = "v1.1.9", + sum = "h1:gsqHplNh1DaQunEKZISK56wlpbCg0yKxNVvGWCFuF1k=", + version = "v1.1.11", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_internal_endpoints_v2", importpath = "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2", - sum = "h1:9stUQR/u2KXU6HkFJYlqnZEjBnbgrVbG6I5HN09xZh0=", - version = "v2.4.3", + sum = "h1:PLFj+M2PgIDHG//hw3T0O0KLI4itVtAjtxrZx4AHPLg=", + version = "v2.4.5", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_internal_ini", importpath = "github.com/aws/aws-sdk-go-v2/internal/ini", - sum = "h1:by9P+oy3P/CwggN4ClnW2D4oL91QV7pBzBICi1chZvQ=", - version = "v1.3.10", - ) - go_repository( - name = "com_github_aws_aws_sdk_go_v2_internal_v4a", - importpath = "github.com/aws/aws-sdk-go-v2/internal/v4a", - sum = "h1:cq+47u1zpHyH+PSkbBx1N9whx4TiM9m9ibimOPaNlBg=", - version = "v1.0.0", + sum = "h1:QOMEP8jnO8sm0SX/4G7dbaIq2eEP2wcWEsF0jzrXLJc=", + version = "v1.3.7", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_autoscaling", importpath = "github.com/aws/aws-sdk-go-v2/service/autoscaling", - sum = "h1:5idZ9wTGvXKApIm7MtcJoMRM8/016tLWLmuXz2chYqc=", - version = "v1.22.4", + sum = "h1:RQhRsMv7qcIQXI6KO5MytJYXVo3cSl4EJQmGI9FTdcU=", + version = "v1.23.2", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_internal_accept_encoding", importpath = "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding", - sum = "h1:T4pFel53bkHjL2mMo+4DKE6r6AuoZnM0fg7k1/ratr4=", - version = "v1.9.1", + sum = "h1:uhb7moM7VjqIEpWzTpCvceLDSwrWpaleXm39OnVjuLE=", + version = "v1.9.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_internal_checksum", importpath = "github.com/aws/aws-sdk-go-v2/service/internal/checksum", - sum = "h1:I0dcwWitE752hVSMrsLCxqNQ+UdEp3nACx2bYNMQq+k=", - version = "v1.1.3", + sum = "h1:IhiVUezzcKlszx6wXSDQYDjEn/bIO6Mc73uNQ1YfTmA=", + version = "v1.1.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_internal_presigned_url", importpath = "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url", - sum = "h1:Gh1Gpyh01Yvn7ilO/b/hr01WgNpaszfbKMUgqM186xQ=", - version = "v1.9.3", + sum = "h1:YQ3fTXACo7xeAqg0NiqcCmBOXJruUfh+4+O2qxF2EjQ=", + version = "v1.9.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_internal_s3shared", importpath = "github.com/aws/aws-sdk-go-v2/service/internal/s3shared", - sum = "h1:BKjwCJPnANbkwQ8vzSbaZDKawwagDubrH/z/c0X+kbQ=", - version = "v1.13.3", + sum = "h1:i+7ve93k5G0S2xWBu60CKtmzU5RjBj9g7fcSypQNLR0=", + version = "v1.13.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_s3", importpath = "github.com/aws/aws-sdk-go-v2/service/s3", - sum = "h1:frOI/v6KWuKGlKUA5gheRw01EDpxcCxTalFQkCOZXAo=", - version = "v1.26.4", + sum = "h1:6IdBZVY8zod9umkwWrtbH2opcM00eKEmIfZKGUg5ywI=", + version = "v1.26.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_sqs", importpath = "github.com/aws/aws-sdk-go-v2/service/sqs", - sum = "h1:uHjK81fESbGy2Y9lspub1+C6VN5W2UXTDo2A/Pm4G0U=", - version = "v1.18.3", + sum = "h1:Nt1QV0zSgC9WNbcRIgHeYIgFtuuEzijKGYEeB8Xa/zY=", + version = "v1.18.5", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_sso", importpath = "github.com/aws/aws-sdk-go-v2/service/sso", - sum = "h1:frW4ikGcxfAEDfmQqWgMLp+F1n4nRo9sF39OcIb5BkQ=", - version = "v1.11.3", + sum = "h1:gZLEXLH6NiU8Y52nRhK1jA+9oz7LZzBK242fi/ziXa4=", + version = "v1.11.0", ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_sts", importpath = "github.com/aws/aws-sdk-go-v2/service/sts", - sum = "h1:cJGRyzCSVwZC7zZZ1xbx9m32UnrKydRYhOvcD1NYP9Q=", - version = "v1.16.3", + sum = "h1:0+X/rJ2+DTBKWbUsn7WtF0JvNk/fRf928vkFsXkbbZs=", + version = "v1.16.0", ) go_repository( name = "com_github_aws_smithy_go", @@ -150,8 +150,8 @@ def go_dependencies(): name = "com_github_bazelbuild_remote_apis", importpath = "github.com/bazelbuild/remote-apis", patches = ["@com_github_buildbarn_bb_storage//:patches/com_github_bazelbuild_remote_apis/golang.diff"], - sum = "h1:vhW2gGrc9vvUQqtcAjgKDuMoqzU7mDC15Gf/KVjTt7o=", - version = "v0.0.0-20220223171137-04784f4a830c", + sum = "h1:Zf6l8jLGtvSgdpqqiCRtLvF99VsoDD++uz7eSxvIafE=", + version = "v0.0.0-20220510175640-3b4b64021035", ) go_repository( name = "com_github_beorn7_perks", @@ -165,6 +165,12 @@ def go_dependencies(): sum = "h1:GvhBeVPp8mnSKzVjMJXKjMgUNr2xnO7V2DD4xubgC/Y=", version = "v0.0.0-20220409201227-01fc04b652f6", ) + go_repository( + name = "com_github_buildbarn_go_xdr", + importpath = "github.com/buildbarn/go-xdr", + sum = "h1:n4WcJBxZ7nKYGRreNmXGMhsLaGRAQsCeH9UfD6u2NuY=", + version = "v0.0.0-20220519081609-da07cfd21fa0", + ) go_repository( name = "com_github_burntsushi_toml", importpath = "github.com/BurntSushi/toml", @@ -238,12 +244,6 @@ def go_dependencies(): sum = "h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=", version = "v0.0.0-20211011173535-cb28da3451f1", ) - go_repository( - name = "com_github_creack_pty", - importpath = "github.com/creack/pty", - sum = "h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=", - version = "v1.1.9", - ) go_repository( name = "com_github_davecgh_go_spew", importpath = "github.com/davecgh/go-spew", @@ -259,8 +259,8 @@ def go_dependencies(): go_repository( name = "com_github_envoyproxy_go_control_plane", importpath = "github.com/envoyproxy/go-control-plane", - sum = "h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs=", - version = "v0.9.10-0.20210907150352-cf90f659a021", + sum = "h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0=", + version = "v0.10.2-0.20220325020618-49ff273808a1", ) go_repository( name = "com_github_envoyproxy_protoc_gen_validate", @@ -274,12 +274,6 @@ def go_dependencies(): sum = "h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=", version = "v1.10.0", ) - go_repository( - name = "com_github_frankban_quicktest", - importpath = "github.com/frankban/quicktest", - sum = "h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns=", - version = "v1.14.2", - ) go_repository( name = "com_github_fsnotify_fsnotify", importpath = "github.com/fsnotify/fsnotify", @@ -313,14 +307,14 @@ def go_dependencies(): go_repository( name = "com_github_go_kit_log", importpath = "github.com/go-kit/log", - sum = "h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw=", - version = "v0.2.0", + sum = "h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=", + version = "v0.1.0", ) go_repository( name = "com_github_go_logfmt_logfmt", importpath = "github.com/go-logfmt/logfmt", - sum = "h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=", - version = "v0.5.1", + sum = "h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=", + version = "v0.5.0", ) go_repository( name = "com_github_go_logr_logr", @@ -349,8 +343,8 @@ def go_dependencies(): go_repository( name = "com_github_go_redis_redis_v8", importpath = "github.com/go-redis/redis/v8", - sum = "h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=", - version = "v8.11.5", + sum = "h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=", + version = "v8.11.4", ) go_repository( name = "com_github_go_stack_stack", @@ -358,6 +352,12 @@ def go_dependencies(): sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=", version = "v1.8.0", ) + go_repository( + name = "com_github_go_task_slim_sprig", + importpath = "github.com/go-task/slim-sprig", + sum = "h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=", + version = "v0.0.0-20210107165309-348f09dbbbc0", + ) go_repository( name = "com_github_gogo_protobuf", importpath = "github.com/gogo/protobuf", @@ -367,8 +367,8 @@ def go_dependencies(): go_repository( name = "com_github_golang_glog", importpath = "github.com/golang/glog", - sum = "h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=", - version = "v1.0.0", + sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=", + version = "v0.0.0-20160126235308-23def4e6c14b", ) go_repository( name = "com_github_golang_groupcache", @@ -491,12 +491,6 @@ def go_dependencies(): sum = "h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=", version = "v1.16.0", ) - go_repository( - name = "com_github_grpc_ecosystem_grpc_gateway_v2", - importpath = "github.com/grpc-ecosystem/grpc-gateway/v2", - sum = "h1:ESEyqQqXXFIcImj/BE8oKEX37Zsuceb2cZI+EL/zNCY=", - version = "v2.10.0", - ) go_repository( name = "com_github_hanwen_go_fuse", importpath = "github.com/hanwen/go-fuse", @@ -608,8 +602,8 @@ def go_dependencies(): go_repository( name = "com_github_kr_pretty", importpath = "github.com/kr/pretty", - sum = "h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=", - version = "v0.3.0", + sum = "h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=", + version = "v0.1.0", ) go_repository( name = "com_github_kr_pty", @@ -620,8 +614,8 @@ def go_dependencies(): go_repository( name = "com_github_kr_text", importpath = "github.com/kr/text", - sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=", - version = "v0.2.0", + sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=", + version = "v0.1.0", ) go_repository( name = "com_github_kylelemons_godebug", @@ -686,14 +680,14 @@ def go_dependencies(): go_repository( name = "com_github_onsi_ginkgo", importpath = "github.com/onsi/ginkgo", - sum = "h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=", - version = "v1.16.5", + sum = "h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=", + version = "v1.16.4", ) go_repository( name = "com_github_onsi_gomega", importpath = "github.com/onsi/gomega", - sum = "h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=", - version = "v1.18.1", + sum = "h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=", + version = "v1.16.0", ) go_repository( name = "com_github_opentracing_opentracing_go", @@ -701,12 +695,6 @@ def go_dependencies(): sum = "h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=", version = "v1.1.0", ) - go_repository( - name = "com_github_pkg_diff", - importpath = "github.com/pkg/diff", - sum = "h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=", - version = "v0.0.0-20210226163009-20ebb0f2a09e", - ) go_repository( name = "com_github_pkg_errors", importpath = "github.com/pkg/errors", @@ -722,8 +710,8 @@ def go_dependencies(): go_repository( name = "com_github_prometheus_client_golang", importpath = "github.com/prometheus/client_golang", - sum = "h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=", - version = "v1.12.1", + sum = "h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=", + version = "v1.12.2", ) go_repository( name = "com_github_prometheus_client_model", @@ -734,8 +722,8 @@ def go_dependencies(): go_repository( name = "com_github_prometheus_common", importpath = "github.com/prometheus/common", - sum = "h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE=", - version = "v0.33.0", + sum = "h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=", + version = "v0.32.1", ) go_repository( name = "com_github_prometheus_procfs", @@ -752,8 +740,8 @@ def go_dependencies(): go_repository( name = "com_github_rogpeppe_go_internal", importpath = "github.com/rogpeppe/go-internal", - sum = "h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=", - version = "v1.8.1", + sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=", + version = "v1.3.0", ) go_repository( name = "com_github_sergi_go_diff", @@ -794,14 +782,14 @@ def go_dependencies(): go_repository( name = "com_github_yuin_goldmark", importpath = "github.com/yuin/goldmark", - sum = "h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM=", - version = "v1.4.1", + sum = "h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs=", + version = "v1.3.5", ) go_repository( name = "com_google_cloud_go", importpath = "cloud.google.com/go", - sum = "h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=", - version = "v0.100.2", + sum = "h1:i2ukt/HTgcBhgL1J0Dx9w7gb5oCe7zWEcumzQSh+9I4=", + version = "v0.100.1", ) go_repository( name = "com_google_cloud_go_bigquery", @@ -809,12 +797,6 @@ def go_dependencies(): sum = "h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=", version = "v1.8.0", ) - go_repository( - name = "com_google_cloud_go_compute", - importpath = "cloud.google.com/go/compute", - sum = "h1:b1zWmYuuHz7gO9kDcM/EpHGr06UgsYNRpNJzI2kFiLM=", - version = "v1.5.0", - ) go_repository( name = "com_google_cloud_go_datastore", importpath = "cloud.google.com/go/datastore", @@ -902,39 +884,39 @@ def go_dependencies(): go_repository( name = "io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc", importpath = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", - sum = "h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ=", - version = "v0.31.0", + sum = "h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c=", + version = "v0.29.0", ) go_repository( name = "io_opentelemetry_go_contrib_propagators_b3", importpath = "go.opentelemetry.io/contrib/propagators/b3", - sum = "h1:rHeNbko1wNe1Sazpw5IJD83x43lfzMnDb8vckdKxRu8=", - version = "v1.6.0", + sum = "h1:wDb2ct7xMzossYpx44w81skxkEyeT2IRnBgYKqyEork=", + version = "v1.4.0", ) go_repository( name = "io_opentelemetry_go_otel", build_file_proto_mode = "disable", importpath = "go.opentelemetry.io/otel", - sum = "h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE=", - version = "v1.6.3", + sum = "h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=", + version = "v1.7.0", ) go_repository( name = "io_opentelemetry_go_otel_exporters_jaeger", importpath = "go.opentelemetry.io/otel/exporters/jaeger", - sum = "h1:7tvBU1Ydbzq080efuepYYqC1Pv3/vOFBgCSrxLb24d0=", - version = "v1.6.3", + sum = "h1:VHCK+2yTZDqDaVXj7JH2Z/khptuydo6C0ttBh2bxAbc=", + version = "v1.4.1", ) go_repository( name = "io_opentelemetry_go_otel_exporters_otlp_internal_retry", importpath = "go.opentelemetry.io/otel/exporters/otlp/internal/retry", - sum = "h1:nAmg1WgsUXoXf46dJG9eS/AzOcvkCTK4xJSUYpWyHYg=", - version = "v1.6.3", + sum = "h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8=", + version = "v1.4.1", ) go_repository( name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace", importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace", - sum = "h1:4/UjHWMVVc5VwX/KAtqJOHErKigMCH8NexChMuanb/o=", - version = "v1.6.3", + sum = "h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8=", + version = "v1.4.1", ) go_repository( name = "io_opentelemetry_go_otel_metric", @@ -951,20 +933,20 @@ def go_dependencies(): go_repository( name = "io_opentelemetry_go_otel_sdk", importpath = "go.opentelemetry.io/otel/sdk", - sum = "h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs=", - version = "v1.6.3", + sum = "h1:J7EaW71E0v87qflB4cDolaqq3AcujGrtyIPGQoZOB0Y=", + version = "v1.4.1", ) go_repository( name = "io_opentelemetry_go_otel_trace", importpath = "go.opentelemetry.io/otel/trace", - sum = "h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc=", - version = "v1.6.3", + sum = "h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=", + version = "v1.7.0", ) go_repository( name = "io_opentelemetry_go_proto_otlp", importpath = "go.opentelemetry.io/proto/otlp", - sum = "h1:h0bKrvdrT/9sBwEJ6iWUqT/N/xPcS66bL4u3isneJ6w=", - version = "v0.15.0", + sum = "h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c=", + version = "v0.12.0", ) go_repository( name = "io_rsc_binaryregexp", @@ -987,8 +969,8 @@ def go_dependencies(): go_repository( name = "org_golang_google_api", importpath = "google.golang.org/api", - sum = "h1:67zQnAE0T2rB0A3CwLSas0K+SbVzSxP+zTLkQLexeiw=", - version = "v0.70.0", + sum = "h1:n2bqqK895ygnBpdPDYetfy23K7fJ22wsrZKCyfuRkkA=", + version = "v0.63.0", ) go_repository( name = "org_golang_google_appengine", @@ -999,15 +981,15 @@ def go_dependencies(): go_repository( name = "org_golang_google_genproto", importpath = "google.golang.org/genproto", - sum = "h1:qSNTkEN+L2mvWcLgJOR+8bdHX9rN/IdU3A1Ghpfb1Rg=", - version = "v0.0.0-20220407144326-9054f6ed7bac", + sum = "h1:2D0OT6tPVdrQTOnVe1VQjfJPTED6EZ7fdJ/f6Db6OsY=", + version = "v0.0.0-20220518221133-4f43b3371335", ) go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", - sum = "h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=", - version = "v1.45.0", + sum = "h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ=", + version = "v1.46.2", ) go_repository( name = "org_golang_google_grpc_cmd_protoc_gen_go_grpc", @@ -1063,8 +1045,8 @@ def go_dependencies(): go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", - sum = "h1:EN5+DfgmRMvRUrMGERW2gQl3Vc+Z7ZMnI/xdEpPSf0c=", - version = "v0.0.0-20220407224826-aac1ed45d8e3", + sum = "h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=", + version = "v0.0.0-20220225172249-27dd8689420f", ) go_repository( name = "org_golang_x_oauth2", @@ -1075,14 +1057,14 @@ def go_dependencies(): go_repository( name = "org_golang_x_sync", importpath = "golang.org/x/sync", - sum = "h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=", - version = "v0.0.0-20210220032951-036812b2e83c", + sum = "h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4=", + version = "v0.0.0-20220513210516-0976fa681c29", ) go_repository( name = "org_golang_x_sys", importpath = "golang.org/x/sys", - sum = "h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw=", - version = "v0.0.0-20220408201424-a24fb2fb8a0f", + sum = "h1:w36l2Uw3dRan1K3TyXriXvY+6T56GNmlKGcqiQUJDfM=", + version = "v0.0.0-20220517195934-5e4e11fc645e", ) go_repository( name = "org_golang_x_term", diff --git a/internal/mock/BUILD.bazel b/internal/mock/BUILD.bazel index 74118885..4b72d18a 100644 --- a/internal/mock/BUILD.bazel +++ b/internal/mock/BUILD.bazel @@ -166,6 +166,7 @@ gomock( "FileAllocator", "FUSERemovalNotifier", "FUSERemovalNotifierRegistrar", + "HandleResolver", "InitialContentsFetcher", "Leaf", "NativeLeaf", diff --git a/pkg/filesystem/virtual/configuration/BUILD.bazel b/pkg/filesystem/virtual/configuration/BUILD.bazel index fe3e8f3e..84f16ef2 100644 --- a/pkg/filesystem/virtual/configuration/BUILD.bazel +++ b/pkg/filesystem/virtual/configuration/BUILD.bazel @@ -6,42 +6,47 @@ go_library( "configuration.go", "fuse_mount_disabled.go", "fuse_mount_enabled.go", + "nfsv4_mount_darwin.go", + "nfsv4_mount_disabled.go", ], importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/configuration", visibility = ["//visibility:public"], deps = [ "//pkg/filesystem/virtual", + "//pkg/filesystem/virtual/nfsv4", "//pkg/proto/configuration/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/clock", "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_buildbarn_go_xdr//pkg/protocols/nfsv4", + "@com_github_buildbarn_go_xdr//pkg/rpcserver", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//pkg/filesystem/virtual/fuse", - "@com_github_buildbarn_bb_storage//pkg/clock", "@com_github_buildbarn_bb_storage//pkg/filesystem", - "@com_github_buildbarn_bb_storage//pkg/util", "@com_github_hanwen_go_fuse_v2//fuse", ], "@io_bazel_rules_go//go/platform:darwin": [ "//pkg/filesystem/virtual/fuse", - "@com_github_buildbarn_bb_storage//pkg/clock", "@com_github_buildbarn_bb_storage//pkg/filesystem", - "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_buildbarn_go_xdr//pkg/protocols/darwin_nfs_sys_prot", + "@com_github_buildbarn_go_xdr//pkg/runtime", "@com_github_hanwen_go_fuse_v2//fuse", + "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:ios": [ "//pkg/filesystem/virtual/fuse", - "@com_github_buildbarn_bb_storage//pkg/clock", "@com_github_buildbarn_bb_storage//pkg/filesystem", - "@com_github_buildbarn_bb_storage//pkg/util", + "@com_github_buildbarn_go_xdr//pkg/protocols/darwin_nfs_sys_prot", + "@com_github_buildbarn_go_xdr//pkg/runtime", "@com_github_hanwen_go_fuse_v2//fuse", + "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/filesystem/virtual/fuse", - "@com_github_buildbarn_bb_storage//pkg/clock", "@com_github_buildbarn_bb_storage//pkg/filesystem", - "@com_github_buildbarn_bb_storage//pkg/util", "@com_github_hanwen_go_fuse_v2//fuse", ], "//conditions:default": [], diff --git a/pkg/filesystem/virtual/configuration/configuration.go b/pkg/filesystem/virtual/configuration/configuration.go index ca0e16c4..bac92cbd 100644 --- a/pkg/filesystem/virtual/configuration/configuration.go +++ b/pkg/filesystem/virtual/configuration/configuration.go @@ -2,8 +2,13 @@ package configuration import ( "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/nfsv4" pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/clock" "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/bb-storage/pkg/util" + nfsv4_xdr "github.com/buildbarn/go-xdr/pkg/protocols/nfsv4" + "github.com/buildbarn/go-xdr/pkg/rpcserver" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -24,6 +29,47 @@ type fuseMount struct { fsName string } +type nfsv4Mount struct { + mountPath string + configuration *pb.NFSv4MountConfiguration + handleAllocator *virtual.NFSStatefulHandleAllocator +} + +func (m *nfsv4Mount) Expose(rootDirectory virtual.Directory) error { + // Random values that the client can use to detect that the + // server has been restarted and lost all state. + var verifier nfsv4_xdr.Verifier4 + random.FastThreadSafeGenerator.Read(verifier[:]) + var stateIDOtherPrefix [4]byte + random.FastThreadSafeGenerator.Read(stateIDOtherPrefix[:]) + + enforcedLeaseTime := m.configuration.EnforcedLeaseTime + if err := enforcedLeaseTime.CheckValid(); err != nil { + return util.StatusWrap(err, "Invalid enforced lease time") + } + announcedLeaseTime := m.configuration.AnnouncedLeaseTime + if err := announcedLeaseTime.CheckValid(); err != nil { + return util.StatusWrap(err, "Invalid announced lease time") + } + + // Create an RPC server that offers the NFSv4 program. + rpcServer := rpcserver.NewServer(map[uint32]rpcserver.Service{ + nfsv4_xdr.NFS4_PROGRAM_PROGRAM_NUMBER: nfsv4_xdr.NewNfs4ProgramService( + nfsv4.NewMetricsProgram( + nfsv4.NewBaseProgram( + rootDirectory, + m.handleAllocator.ResolveHandle, + random.NewFastSingleThreadedGenerator(), + verifier, + stateIDOtherPrefix, + clock.SystemClock, + enforcedLeaseTime.AsDuration(), + announcedLeaseTime.AsDuration()))), + }) + + return m.mount(rpcServer) +} + // NewMountFromConfiguration creates a new FUSE mount based on options // specified in a configuration message and starts processing of // incoming requests. @@ -37,6 +83,13 @@ func NewMountFromConfiguration(configuration *pb.MountConfiguration, fsName stri handleAllocator: handleAllocator, fsName: fsName, }, handleAllocator, nil + case *pb.MountConfiguration_Nfsv4: + handleAllocator := virtual.NewNFSHandleAllocator(random.NewFastSingleThreadedGenerator()) + return &nfsv4Mount{ + mountPath: configuration.MountPath, + configuration: backend.Nfsv4, + handleAllocator: handleAllocator, + }, handleAllocator, nil default: return nil, nil, status.Error(codes.InvalidArgument, "No virtual file system backend configuration provided") } diff --git a/pkg/filesystem/virtual/configuration/nfsv4_mount_darwin.go b/pkg/filesystem/virtual/configuration/nfsv4_mount_darwin.go new file mode 100644 index 00000000..cf5a170c --- /dev/null +++ b/pkg/filesystem/virtual/configuration/nfsv4_mount_darwin.go @@ -0,0 +1,170 @@ +//go:build darwin +// +build darwin + +package configuration + +import ( + "bytes" + "log" + "math" + "net" + "os" + "time" + "unsafe" + + pb "github.com/buildbarn/bb-remote-execution/pkg/proto/configuration/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/util" + nfs_sys_prot "github.com/buildbarn/go-xdr/pkg/protocols/darwin_nfs_sys_prot" + "github.com/buildbarn/go-xdr/pkg/rpcserver" + "github.com/buildbarn/go-xdr/pkg/runtime" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func toNfstime32(d time.Duration) *nfs_sys_prot.Nfstime32 { + nanos := d.Nanoseconds() + return &nfs_sys_prot.Nfstime32{ + Seconds: int32(nanos / 1e9), + Nseconds: uint32(nanos % 1e9), + } +} + +func (m *nfsv4Mount) mount(rpcServer *rpcserver.Server) error { + darwinConfiguration, ok := m.configuration.OperatingSystem.(*pb.NFSv4MountConfiguration_Darwin) + if !ok { + return status.Error(codes.InvalidArgument, "Darwin specific NFSv4 server configuration options not provided") + } + + // Expose the NFSv4 server on the network. + osConfiguration := darwinConfiguration.Darwin + socketPath := osConfiguration.SocketPath + var sock net.Listener + var err error + if socketPath == "" { + // Launch NFSv4 server on a TCP socket. + // TODO: Remove this once UNIX socket support is stable. + sock, err = net.Listen("tcp", "localhost:") + } else { + // Launch NFSv4 server on a UNIX socket. + if err := os.Remove(socketPath); err != nil && !os.IsNotExist(err) { + return util.StatusWrapf(err, "Could not remove stale socket for NFSv4 server %#v", socketPath) + } + sock, err = net.Listen("unix", socketPath) + } + if err != nil { + return util.StatusWrapf(err, "Failed to create listening socket for NFSv4 server %#v", socketPath) + } + go func() { + for { + c, err := sock.Accept() + if err != nil { + log.Print("Got accept error: ", err) + } + go func() { + err := rpcServer.HandleConnection(c, c) + c.Close() + if err != nil { + log.Print("Failure handling NFSv4 connection: ", err) + } + }() + } + }() + + // Construct attributes that are provided to mount(2). For NFS, + // these attributes are stored in an XDR message. Similar to how + // NFSv4's fattr4 works, the attributes need to be emitted in + // increasing order by bitmask field. + var attrMask uint32 + attrVals := bytes.NewBuffer(nil) + + // Don't bother setting up a callback service, as we don't issue + // CB_NOTIFY operations. Using this option is also a requirement + // for making NFSv4 over UNIX sockets work. + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_FLAGS + flags := nfs_sys_prot.NfsMattrFlags{ + Mask: []uint32{ + 1 << nfs_sys_prot.NFS_MFLAG_NOCALLBACK, + }, + Value: []uint32{ + 1 << nfs_sys_prot.NFS_MFLAG_NOCALLBACK, + }, + } + flags.WriteTo(attrVals) + + // Explicitly request the use of NFSv4.0. + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_NFS_VERSION + nfs_sys_prot.WriteNfsMattrNfsVersion(attrVals, 4) + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_NFS_MINOR_VERSION + nfs_sys_prot.WriteNfsMattrNfsMinorVersion(attrVals, 0) + + if d := osConfiguration.MinimumDirectoriesAttributeCacheTimeout; d != nil { + if err := d.CheckValid(); err != nil { + return util.StatusWrapWithCode(err, codes.InvalidArgument, "Invalid minimum directories attribute cache timeout") + } + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_ATTRCACHE_DIR_MIN + toNfstime32(d.AsDuration()).WriteTo(attrVals) + } + if d := osConfiguration.MaximumDirectoriesAttributeCacheTimeout; d != nil { + if err := d.CheckValid(); err != nil { + return util.StatusWrapWithCode(err, codes.InvalidArgument, "Invalid maximum directories attribute cache timeout") + } + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_ATTRCACHE_DIR_MAX + toNfstime32(d.AsDuration()).WriteTo(attrVals) + } + + if socketPath != "" { + // "ticotsord" is the X/Open Transport Interface (XTI) + // equivalent of AF_LOCAL with SOCK_STREAM. + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_SOCKET_TYPE + nfs_sys_prot.WriteNfsMattrSocketType(attrVals, "ticotsord") + } + + if socketPath == "" { + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_NFS_PORT + nfs_sys_prot.WriteNfsMattrNfsPort(attrVals, nfs_sys_prot.NfsMattrNfsPort(sock.Addr().(*net.TCPAddr).Port)) + } + + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_FS_LOCATIONS + serverAddress := socketPath + if socketPath == "" { + serverAddress = sock.Addr().(*net.TCPAddr).IP.String() + } + fsLocations := nfs_sys_prot.NfsFsLocations{ + NfslLocation: []nfs_sys_prot.NfsFsLocation{{ + NfslServer: []nfs_sys_prot.NfsFsServer{{ + NfssAddress: []string{serverAddress}, + }}, + }}, + } + fsLocations.WriteTo(attrVals) + + if socketPath != "" { + attrMask |= 1 << nfs_sys_prot.NFS_MATTR_LOCAL_NFS_PORT + runtime.WriteUTF8String(attrVals, math.MaxUint32, socketPath) + } + + // Construct the nfs_mount_args message and serialize it. + mountArgs := nfs_sys_prot.NfsMountArgs{ + ArgsVersion: 88, // NFS_ARGSVERSION_XDR. + XdrArgsVersion: nfs_sys_prot.NFS_XDRARGS_VERSION_0, + NfsMountAttrs: nfs_sys_prot.NfsMattr{ + Attrmask: nfs_sys_prot.Bitmap{attrMask}, + AttrVals: attrVals.Bytes(), + }, + } + mountArgs.ArgsLength = uint32(mountArgs.GetEncodedSizeBytes()) + + mountArgsBuf := bytes.NewBuffer(make([]byte, 0, mountArgs.ArgsLength)) + if _, err := mountArgs.WriteTo(mountArgsBuf); err != nil { + return util.StatusWrap(err, "Failed to marshal NFS mount arguments") + } + + // Call mount(2) with the serialized nfs_mount_args message. + unix.Unmount(m.mountPath, 0) + if err := unix.Mount("nfs", m.mountPath, 0, unsafe.Pointer(&mountArgsBuf.Bytes()[0])); err != nil { + return util.StatusWrap(err, "Mounting NFS volume failed") + } + return nil +} diff --git a/pkg/filesystem/virtual/configuration/nfsv4_mount_disabled.go b/pkg/filesystem/virtual/configuration/nfsv4_mount_disabled.go new file mode 100644 index 00000000..fbe03bb0 --- /dev/null +++ b/pkg/filesystem/virtual/configuration/nfsv4_mount_disabled.go @@ -0,0 +1,15 @@ +//go:build freebsd || linux || windows +// +build freebsd linux windows + +package configuration + +import ( + "github.com/buildbarn/go-xdr/pkg/rpcserver" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (m *nfsv4Mount) mount(rpcServer *rpcserver.Server) error { + return status.Error(codes.Unimplemented, "NFSv4 is not supported on this platform") +} diff --git a/pkg/filesystem/virtual/nfsv4/BUILD.bazel b/pkg/filesystem/virtual/nfsv4/BUILD.bazel new file mode 100644 index 00000000..7c70b683 --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/BUILD.bazel @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "nfsv4", + srcs = [ + "base_program.go", + "metrics_program.go", + ], + importpath = "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/nfsv4", + visibility = ["//visibility:public"], + deps = [ + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/clock", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_bb_storage//pkg/random", + "@com_github_buildbarn_go_xdr//pkg/protocols/nfsv4", + "@com_github_buildbarn_go_xdr//pkg/protocols/rpcv2", + "@com_github_buildbarn_go_xdr//pkg/runtime", + "@com_github_prometheus_client_golang//prometheus", + ], +) + +go_test( + name = "nfsv4_test", + srcs = ["base_program_test.go"], + deps = [ + ":nfsv4", + "//internal/mock", + "//pkg/filesystem/virtual", + "@com_github_buildbarn_bb_storage//pkg/filesystem", + "@com_github_buildbarn_bb_storage//pkg/filesystem/path", + "@com_github_buildbarn_go_xdr//pkg/protocols/nfsv4", + "@com_github_buildbarn_go_xdr//pkg/protocols/rpcv2", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/filesystem/virtual/nfsv4/base_program.go b/pkg/filesystem/virtual/nfsv4/base_program.go new file mode 100644 index 00000000..b90ea386 --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/base_program.go @@ -0,0 +1,3243 @@ +package nfsv4 + +import ( + "bytes" + "context" + "io" + "math" + "sync" + "time" + + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-storage/pkg/clock" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + "github.com/buildbarn/bb-storage/pkg/random" + "github.com/buildbarn/go-xdr/pkg/protocols/nfsv4" + "github.com/buildbarn/go-xdr/pkg/protocols/rpcv2" + "github.com/buildbarn/go-xdr/pkg/runtime" + "github.com/prometheus/client_golang/prometheus" +) + +// stateIDOtherPrefixLength is the number of bytes of a state ID's +// 'other' field that are set to a constant value. This permits the +// server to detect whether state IDs belong to a previous incarnation +// of the server. +const stateIDOtherPrefixLength = 4 + +var ( + baseProgramPrometheusMetrics sync.Once + + baseProgramOpenOwnersCreated = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "nfsv4", + Name: "base_program_open_owners_created_total", + Help: "Number of open-owners created through NFSv4 OPEN operations.", + }) + baseProgramOpenOwnersRemoved = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "nfsv4", + Name: "base_program_open_owners_removed_total", + Help: "Number of open-owners removed due to inactivity.", + }) +) + +type baseProgram struct { + rootFileHandle fileHandle + handleResolver virtual.HandleResolver + rebootVerifier nfsv4.Verifier4 + stateIDOtherPrefix [stateIDOtherPrefixLength]byte + clock clock.Clock + enforcedLeaseTime time.Duration + announcedLeaseTime nfsv4.NfsLease4 + + lock sync.Mutex + now time.Time + randomNumberGenerator random.SingleThreadedGenerator + clientsByLongID map[string]*clientState + clientConfirmationsByKey map[clientConfirmationKey]*clientConfirmationState + clientConfirmationsByShortID map[nfsv4.Clientid4]*clientConfirmationState + openOwnerFilesByOther map[regularStateIDOther]*openOwnerFileState + openedFilesByHandle map[string]*openedFileState + lockOwnerFilesByOther map[regularStateIDOther]*lockOwnerFileState + idleClientConfirmations clientConfirmationState + unusedOpenOwners openOwnerState +} + +// NewBaseProgram creates an nfsv4.Nfs4Program that forwards all +// operations to a virtual file system. It implements most of the +// features of NFSv4.0. +func NewBaseProgram(rootDirectory virtual.Directory, handleResolver virtual.HandleResolver, randomNumberGenerator random.SingleThreadedGenerator, rebootVerifier nfsv4.Verifier4, stateIDOtherPrefix [stateIDOtherPrefixLength]byte, clock clock.Clock, enforcedLeaseTime, announcedLeaseTime time.Duration) nfsv4.Nfs4Program { + baseProgramPrometheusMetrics.Do(func() { + prometheus.MustRegister(baseProgramOpenOwnersCreated) + prometheus.MustRegister(baseProgramOpenOwnersRemoved) + }) + + var attributes virtual.Attributes + rootDirectory.VirtualGetAttributes(virtual.AttributesMaskFileHandle, &attributes) + p := &baseProgram{ + rootFileHandle: fileHandle{ + handle: attributes.GetFileHandle(), + directory: rootDirectory, + }, + handleResolver: handleResolver, + rebootVerifier: rebootVerifier, + stateIDOtherPrefix: stateIDOtherPrefix, + clock: clock, + enforcedLeaseTime: enforcedLeaseTime, + announcedLeaseTime: nfsv4.NfsLease4(announcedLeaseTime.Seconds()), + + randomNumberGenerator: randomNumberGenerator, + clientsByLongID: map[string]*clientState{}, + clientConfirmationsByKey: map[clientConfirmationKey]*clientConfirmationState{}, + clientConfirmationsByShortID: map[nfsv4.Clientid4]*clientConfirmationState{}, + openOwnerFilesByOther: map[regularStateIDOther]*openOwnerFileState{}, + openedFilesByHandle: map[string]*openedFileState{}, + lockOwnerFilesByOther: map[regularStateIDOther]*lockOwnerFileState{}, + } + p.idleClientConfirmations.previousIdle = &p.idleClientConfirmations + p.idleClientConfirmations.nextIdle = &p.idleClientConfirmations + p.unusedOpenOwners.previousUnused = &p.unusedOpenOwners + p.unusedOpenOwners.nextUnused = &p.unusedOpenOwners + return p +} + +func (*baseProgram) NfsV4Nfsproc4Null(ctx context.Context) error { + return nil +} + +func (p *baseProgram) NfsV4Nfsproc4Compound(ctx context.Context, arguments *nfsv4.Compound4args) (*nfsv4.Compound4res, error) { + // Create compound state and process all operations sequentially + // against it. + state := compoundState{program: p} + resarray := make([]nfsv4.NfsResop4, 0, len(arguments.Argarray)) + status := nfsv4.NFS4_OK + for _, operation := range arguments.Argarray { + switch op := operation.(type) { + case *nfsv4.NfsArgop4_OP_ACCESS: + res := state.opAccess(&op.Opaccess) + resarray = append(resarray, &nfsv4.NfsResop4_OP_ACCESS{ + Opaccess: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_CLOSE: + res := state.opClose(&op.Opclose) + resarray = append(resarray, &nfsv4.NfsResop4_OP_CLOSE{ + Opclose: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_COMMIT: + res := state.opCommit(&op.Opcommit) + resarray = append(resarray, &nfsv4.NfsResop4_OP_COMMIT{ + Opcommit: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_CREATE: + res := state.opCreate(&op.Opcreate) + resarray = append(resarray, &nfsv4.NfsResop4_OP_CREATE{ + Opcreate: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_DELEGPURGE: + res := state.opDelegpurge(&op.Opdelegpurge) + resarray = append(resarray, &nfsv4.NfsResop4_OP_DELEGPURGE{ + Opdelegpurge: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_DELEGRETURN: + res := state.opDelegreturn(&op.Opdelegreturn) + resarray = append(resarray, &nfsv4.NfsResop4_OP_DELEGRETURN{ + Opdelegreturn: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_GETATTR: + res := state.opGetattr(&op.Opgetattr) + resarray = append(resarray, &nfsv4.NfsResop4_OP_GETATTR{ + Opgetattr: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_GETFH: + res := state.opGetfh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_GETFH{ + Opgetfh: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LINK: + res := state.opLink(&op.Oplink) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LINK{ + Oplink: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LOCK: + res := state.opLock(&op.Oplock) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOCK{ + Oplock: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LOCKT: + res := state.opLockt(&op.Oplockt) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOCKT{ + Oplockt: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LOCKU: + res := state.opLocku(&op.Oplocku) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOCKU{ + Oplocku: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_LOOKUP: + res := state.opLookup(&op.Oplookup) + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOOKUP{ + Oplookup: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_LOOKUPP: + res := state.opLookupp() + resarray = append(resarray, &nfsv4.NfsResop4_OP_LOOKUPP{ + Oplookupp: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_NVERIFY: + res := state.opNverify(&op.Opnverify) + resarray = append(resarray, &nfsv4.NfsResop4_OP_NVERIFY{ + Opnverify: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_OPEN: + res := state.opOpen(&op.Opopen) + resarray = append(resarray, &nfsv4.NfsResop4_OP_OPEN{ + Opopen: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_OPENATTR: + res := state.opOpenattr(&op.Opopenattr) + resarray = append(resarray, &nfsv4.NfsResop4_OP_OPENATTR{ + Opopenattr: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_OPEN_CONFIRM: + res := state.opOpenConfirm(&op.OpopenConfirm) + resarray = append(resarray, &nfsv4.NfsResop4_OP_OPEN_CONFIRM{ + OpopenConfirm: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_OPEN_DOWNGRADE: + res := state.opOpenDowngrade(&op.OpopenDowngrade) + resarray = append(resarray, &nfsv4.NfsResop4_OP_OPEN_DOWNGRADE{ + OpopenDowngrade: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_PUTFH: + res := state.opPutfh(&op.Opputfh) + resarray = append(resarray, &nfsv4.NfsResop4_OP_PUTFH{ + Opputfh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_PUTPUBFH: + res := state.opPutpubfh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_PUTPUBFH{ + Opputpubfh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_PUTROOTFH: + res := state.opPutrootfh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_READ: + res := state.opRead(&op.Opread) + resarray = append(resarray, &nfsv4.NfsResop4_OP_READ{ + Opread: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_READDIR: + res := state.opReaddir(&op.Opreaddir) + resarray = append(resarray, &nfsv4.NfsResop4_OP_READDIR{ + Opreaddir: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_READLINK: + res := state.opReadlink() + resarray = append(resarray, &nfsv4.NfsResop4_OP_READLINK{ + Opreadlink: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_RELEASE_LOCKOWNER: + res := state.opReleaseLockowner(&op.OpreleaseLockowner) + resarray = append(resarray, &nfsv4.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_REMOVE: + res := state.opRemove(&op.Opremove) + resarray = append(resarray, &nfsv4.NfsResop4_OP_REMOVE{ + Opremove: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_RENAME: + res := state.opRename(&op.Oprename) + resarray = append(resarray, &nfsv4.NfsResop4_OP_RENAME{ + Oprename: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_RENEW: + res := state.opRenew(&op.Oprenew) + resarray = append(resarray, &nfsv4.NfsResop4_OP_RENEW{ + Oprenew: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_RESTOREFH: + res := state.opRestorefh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_RESTOREFH{ + Oprestorefh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_SAVEFH: + res := state.opSavefh() + resarray = append(resarray, &nfsv4.NfsResop4_OP_SAVEFH{ + Opsavefh: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_SECINFO: + res := state.opSecinfo(&op.Opsecinfo) + resarray = append(resarray, &nfsv4.NfsResop4_OP_SECINFO{ + Opsecinfo: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_SETATTR: + res := state.opSetattr(&op.Opsetattr) + resarray = append(resarray, &nfsv4.NfsResop4_OP_SETATTR{ + Opsetattr: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_SETCLIENTID: + res := state.opSetclientid(&op.Opsetclientid) + resarray = append(resarray, &nfsv4.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: res, + }) + status = res.GetStatus() + case *nfsv4.NfsArgop4_OP_SETCLIENTID_CONFIRM: + res := state.opSetclientidConfirm(&op.OpsetclientidConfirm) + resarray = append(resarray, &nfsv4.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_VERIFY: + res := state.opVerify(&op.Opverify) + resarray = append(resarray, &nfsv4.NfsResop4_OP_VERIFY{ + Opverify: res, + }) + status = res.Status + case *nfsv4.NfsArgop4_OP_WRITE: + res := state.opWrite(&op.Opwrite) + resarray = append(resarray, &nfsv4.NfsResop4_OP_WRITE{ + Opwrite: res, + }) + status = res.GetStatus() + default: + res := nfsv4.Illegal4res{Status: nfsv4.NFS4ERR_OP_ILLEGAL} + resarray = append(resarray, &nfsv4.NfsResop4_OP_ILLEGAL{ + Opillegal: res, + }) + status = res.Status + } + if status != nfsv4.NFS4_OK { + // Terminate evaluation of further operations + // upon failure. + break + } + } + return &nfsv4.Compound4res{ + Status: status, + Tag: arguments.Tag, + Resarray: resarray, + }, nil +} + +// enter acquires the lock on the NFSv4 server. After acquiring the +// lock, it cleans up state belonging to clients and open-owners that +// have stopped contacting the server. +func (p *baseProgram) enter() { + for { + now := p.clock.Now() + p.lock.Lock() + if p.now.Before(now) { + p.now = now + } + + // Remove clients that have not renewed their state in + // some time. Close all of the files and release all + // locks owned by these clients. + var ll leavesToClose + minimumLastSeen := p.now.Add(-p.enforcedLeaseTime) + for p.idleClientConfirmations.nextIdle != &p.idleClientConfirmations && p.idleClientConfirmations.nextIdle.lastSeen.Before(minimumLastSeen) { + p.idleClientConfirmations.nextIdle.remove(p, &ll) + } + + // Remove open-owners that no longer have any open files + // associated with them, or are unconfirmed, and have + // not been used for some time. If the client decides to + // use the same open-owner once again, the next OPEN + // operation will need to be confirmed using + // OPEN_CONFIRM. + for p.unusedOpenOwners.nextUnused != &p.unusedOpenOwners && p.unusedOpenOwners.nextUnused.lastUsed.Before(minimumLastSeen) { + p.unusedOpenOwners.nextUnused.remove(p, &ll) + } + + // If the code above ended up yielding files that need + // to be closed, we close the files and retry. + if ll.empty() { + return + } + p.lock.Unlock() + ll.closeAll() + } +} + +func (p *baseProgram) leave() { + p.lock.Unlock() +} + +// getConfirmedClientByShortID looks up a confirmed client by short +// client ID. +func (p *baseProgram) getConfirmedClientByShortID(shortID nfsv4.Clientid4) (*confirmedClientState, nfsv4.Nfsstat4) { + clientConfirmation, ok := p.clientConfirmationsByShortID[shortID] + if !ok { + return nil, nfsv4.NFS4ERR_STALE_CLIENTID + } + confirmedClient := clientConfirmation.client.confirmed + if confirmedClient == nil || confirmedClient.confirmation != clientConfirmation { + return nil, nfsv4.NFS4ERR_STALE_CLIENTID + } + return confirmedClient, nfsv4.NFS4_OK +} + +// getOpenOwnerByOtherForTransaction looks up an open-owner by +// open-owner state ID. It waits for any existing transactions to +// complete. This makes it possible to start a new transaction. +func (p *baseProgram) getOpenOwnerByOtherForTransaction(other regularStateIDOther) (*openOwnerState, nfsv4.Nfsstat4) { + for { + oofs, ok := p.openOwnerFilesByOther[other] + if !ok { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if oos := oofs.openOwner; oos.waitForCurrentTransactionCompletion(p) { + return oos, nfsv4.NFS4_OK + } + } +} + +// getLockOwnerByOtherForTransaction looks up a lock-owner by lock-owner +// state ID, for the purpose of starting a new transaction. +// +// Unlike getOpenOwnerByOtherForTransaction() it does not need to wait +// for other transactions to complete, as we don't need to support any +// blocking operations against locks. +func (p *baseProgram) getLockOwnerByOtherForTransaction(other regularStateIDOther) (*lockOwnerState, nfsv4.Nfsstat4) { + lofs, ok := p.lockOwnerFilesByOther[other] + if !ok { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + return lofs.lockOwner, nfsv4.NFS4_OK +} + +// newRegularStateID allocates a new open-owner or lock-owner state ID. +func (p *baseProgram) newRegularStateID(seqID nfsv4.Seqid4) (stateID regularStateID) { + stateID.seqID = seqID + p.randomNumberGenerator.Read(stateID.other[:]) + return +} + +// internalizeStateID converts a state ID that's provided as part of a +// request to the format that's used internally. +// +// This method returns a nil state ID when the provided state ID is +// special (i.e., an anonymous state ID or READ bypass state ID). +func (p *baseProgram) internalizeStateID(stateID *nfsv4.Stateid4) (*regularStateID, nfsv4.Nfsstat4) { + switch stateID.Other { + case [nfsv4.NFS4_OTHER_SIZE]byte{}: + // Anonymous state ID. + if stateID.Seqid != 0 { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + return nil, nfsv4.NFS4_OK + case [...]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}: + // READ bypass state ID. + if stateID.Seqid != 0xffffffff { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + return nil, nfsv4.NFS4_OK + default: + // Regular state ID. Only permit state IDs with a given + // prefix, so that we can accurately distinguish between + // NFS4ERR_STATE_STATEID and NFS4ERR_BAD_STATEID. + var prefix [stateIDOtherPrefixLength]byte + copy(prefix[:], stateID.Other[:]) + if prefix != p.stateIDOtherPrefix { + // State ID is from before a reboot/restart. + return nil, nfsv4.NFS4ERR_STALE_STATEID + } + internalStateID := ®ularStateID{seqID: stateID.Seqid} + copy(internalStateID.other[:], stateID.Other[stateIDOtherPrefixLength:]) + return internalStateID, nfsv4.NFS4_OK + } +} + +// internalizeRegularStateID is identical to internalizeStateID, except +// that it denies the use of special state IDs. +func (p *baseProgram) internalizeRegularStateID(stateID *nfsv4.Stateid4) (regularStateID, nfsv4.Nfsstat4) { + internalStateID, st := p.internalizeStateID(stateID) + if st != nfsv4.NFS4_OK { + return regularStateID{}, st + } + if internalStateID == nil { + return regularStateID{}, nfsv4.NFS4ERR_BAD_STATEID + } + return *internalStateID, nfsv4.NFS4_OK +} + +// externalizeStateID converts a regular state ID that's encoded in the +// internal format to the format used by the NFSv4 protocol. +func (p *baseProgram) externalizeStateID(stateID regularStateID) nfsv4.Stateid4 { + externalStateID := nfsv4.Stateid4{Seqid: stateID.seqID} + copy(externalStateID.Other[:], p.stateIDOtherPrefix[:]) + copy(externalStateID.Other[stateIDOtherPrefixLength:], stateID.other[:]) + return externalStateID +} + +// writeAttributes converts file attributes returned by the virtual file +// system into the NFSv4 wire format. It also returns a bitmask +// indicating which attributes were actually emitted. +func (p *baseProgram) writeAttributes(attributes *virtual.Attributes, attrRequest nfsv4.Bitmap4, w io.Writer) nfsv4.Bitmap4 { + attrMask := make(nfsv4.Bitmap4, len(attrRequest)) + if len(attrRequest) > 0 { + // Attributes 0 to 31. + f := attrRequest[0] + var s uint32 + if b := uint32(1 << nfsv4.FATTR4_SUPPORTED_ATTRS); f&b != 0 { + s |= b + nfsv4.WriteBitmap4(w, nfsv4.Bitmap4{ + (1 << nfsv4.FATTR4_SUPPORTED_ATTRS) | + (1 << nfsv4.FATTR4_TYPE) | + (1 << nfsv4.FATTR4_FH_EXPIRE_TYPE) | + (1 << nfsv4.FATTR4_CHANGE) | + (1 << nfsv4.FATTR4_SIZE) | + (1 << nfsv4.FATTR4_LINK_SUPPORT) | + (1 << nfsv4.FATTR4_SYMLINK_SUPPORT) | + (1 << nfsv4.FATTR4_NAMED_ATTR) | + (1 << nfsv4.FATTR4_FSID) | + (1 << nfsv4.FATTR4_UNIQUE_HANDLES) | + (1 << nfsv4.FATTR4_LEASE_TIME) | + (1 << nfsv4.FATTR4_RDATTR_ERROR) | + (1 << nfsv4.FATTR4_FILEHANDLE) | + (1 << nfsv4.FATTR4_FILEID), + (1 << (nfsv4.FATTR4_MODE - 32)) | + (1 << (nfsv4.FATTR4_NUMLINKS - 32)) | + (1 << (nfsv4.FATTR4_TIME_ACCESS - 32)) | + (1 << (nfsv4.FATTR4_TIME_METADATA - 32)) | + (1 << (nfsv4.FATTR4_TIME_MODIFY - 32)), + }) + } + if b := uint32(1 << nfsv4.FATTR4_TYPE); f&b != 0 { + s |= b + switch attributes.GetFileType() { + case filesystem.FileTypeRegularFile: + nfsv4.NF4REG.WriteTo(w) + case filesystem.FileTypeDirectory: + nfsv4.NF4DIR.WriteTo(w) + case filesystem.FileTypeSymlink: + nfsv4.NF4LNK.WriteTo(w) + case filesystem.FileTypeBlockDevice: + nfsv4.NF4BLK.WriteTo(w) + case filesystem.FileTypeCharacterDevice: + nfsv4.NF4CHR.WriteTo(w) + case filesystem.FileTypeFIFO: + nfsv4.NF4FIFO.WriteTo(w) + case filesystem.FileTypeSocket: + nfsv4.NF4SOCK.WriteTo(w) + default: + panic("Unknown file type") + } + } + if b := uint32(1 << nfsv4.FATTR4_FH_EXPIRE_TYPE); f&b != 0 { + s |= b + // Using HandleResolver, we can resolve any + // object in the file system until it is removed + // from the file system. + nfsv4.WriteUint32T(w, nfsv4.FH4_PERSISTENT) + } + if b := uint32(1 << nfsv4.FATTR4_CHANGE); f&b != 0 { + s |= b + nfsv4.WriteChangeid4(w, attributes.GetChangeID()) + } + if b := uint32(1 << nfsv4.FATTR4_SIZE); f&b != 0 { + sizeBytes, ok := attributes.GetSizeBytes() + if !ok { + panic("FATTR4_SIZE is a required attribute") + } + s |= b + nfsv4.WriteUint64T(w, sizeBytes) + } + if b := uint32(1 << nfsv4.FATTR4_LINK_SUPPORT); f&b != 0 { + s |= b + runtime.WriteBool(w, true) + } + if b := uint32(1 << nfsv4.FATTR4_SYMLINK_SUPPORT); f&b != 0 { + s |= b + runtime.WriteBool(w, true) + } + if b := uint32(1 << nfsv4.FATTR4_NAMED_ATTR); f&b != 0 { + s |= b + runtime.WriteBool(w, false) + } + if b := uint32(1 << nfsv4.FATTR4_FSID); f&b != 0 { + s |= b + fsid := nfsv4.Fsid4{ + Major: 1, + Minor: 1, + } + fsid.WriteTo(w) + } + if b := uint32(1 << nfsv4.FATTR4_UNIQUE_HANDLES); f&b != 0 { + s |= b + runtime.WriteBool(w, true) + } + if b := uint32(1 << nfsv4.FATTR4_LEASE_TIME); f&b != 0 { + s |= b + nfsv4.WriteNfsLease4(w, p.announcedLeaseTime) + } + if b := uint32(1 << nfsv4.FATTR4_FILEHANDLE); f&b != 0 { + s |= b + nfsv4.WriteNfsFh4(w, attributes.GetFileHandle()) + } + if b := uint32(1 << nfsv4.FATTR4_FILEID); f&b != 0 { + s |= b + nfsv4.WriteUint64T(w, attributes.GetInodeNumber()) + } + attrMask[0] = s + } + if len(attrRequest) > 1 { + // Attributes 32 to 63. + f := attrRequest[1] + var s uint32 + if b := uint32(1 << (nfsv4.FATTR4_MODE - 32)); f&b != 0 { + if permissions, ok := attributes.GetPermissions(); ok { + s |= b + nfsv4.WriteMode4(w, permissions.ToMode()) + } + } + if b := uint32(1 << (nfsv4.FATTR4_NUMLINKS - 32)); f&b != 0 { + s |= b + nfsv4.WriteUint32T(w, attributes.GetLinkCount()) + } + if b := uint32(1 << (nfsv4.FATTR4_TIME_ACCESS - 32)); f&b != 0 { + s |= b + deterministicNfstime4.WriteTo(w) + } + if b := uint32(1 << (nfsv4.FATTR4_TIME_METADATA - 32)); f&b != 0 { + s |= b + deterministicNfstime4.WriteTo(w) + } + if b := uint32(1 << (nfsv4.FATTR4_TIME_MODIFY - 32)); f&b != 0 { + s |= b + deterministicNfstime4.WriteTo(w) + } + attrMask[1] = s + } + return attrMask +} + +// attributesToFattr4 converts attributes returned by the virtual file +// system layer to an NFSv4 fattr4 structure. As required by the +// protocol, attributes are stored in the order of the FATTR4_* +// constants. +func (p *baseProgram) attributesToFattr4(attributes *virtual.Attributes, attrRequest nfsv4.Bitmap4) nfsv4.Fattr4 { + w := bytes.NewBuffer(nil) + attrMask := p.writeAttributes(attributes, attrRequest, w) + return nfsv4.Fattr4{ + Attrmask: attrMask, + AttrVals: w.Bytes(), + } +} + +// regularStateID is an internal representation of non-special +// open-owner or lock-owner state IDs. +type regularStateID struct { + seqID nfsv4.Seqid4 + other regularStateIDOther +} + +// regularStateIDOther is an internal representation of the 'other' +// field of non-special open-owner or lock-owner state IDs. +type regularStateIDOther [nfsv4.NFS4_OTHER_SIZE - stateIDOtherPrefixLength]byte + +// compoundState contains the state that needs to be tracked during the +// lifetime of a single NFSv4 COMPOUND procedure. It provides +// implementations of each of the operations contained in the COMPOUND +// procedure. +type compoundState struct { + program *baseProgram + + currentFileHandle fileHandle + savedFileHandle fileHandle +} + +// getOpenOwnerFileByStateID obtains an open-owner file by open state +// ID. It also checks whether the open state ID corresponds to the +// current file handle, and that the client provided sequence ID matches +// the server's value. +func (s *compoundState) getOpenOwnerFileByStateID(stateID regularStateID, allowUnconfirmed bool) (*openOwnerFileState, nfsv4.Nfsstat4) { + p := s.program + oofs, ok := p.openOwnerFilesByOther[stateID.other] + if !ok { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if s.currentFileHandle.leaf == nil && s.currentFileHandle.directory == nil { + return nil, nfsv4.NFS4ERR_NOFILEHANDLE + } + if oofs.useCount == 0 { + // File has already been closed. + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if !bytes.Equal(s.currentFileHandle.handle, oofs.openedFile.handle) { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if !oofs.openOwner.confirmed && !allowUnconfirmed { + // The state ID was returned by a previous OPEN call + // that still requires a call to OPEN_CONFIRM. We should + // treat the state ID as non-existent until OPEN_CONFIRM + // has been called. + // + // More details: RFC 7530, section 16.18.5, paragraph 6. + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if st := compareStateSeqID(stateID.seqID, oofs.stateID.seqID); st != nfsv4.NFS4_OK { + return nil, st + } + return oofs, nfsv4.NFS4_OK +} + +// getLockOwnerFileByStateID obtains a lock-owner file by state ID. It +// also checks whether the lock state ID corresponds to the current file +// handle, and that the client provided sequence ID matches the server's +// value. +func (s *compoundState) getLockOwnerFileByStateID(stateID regularStateID) (*lockOwnerFileState, nfsv4.Nfsstat4) { + p := s.program + lofs, ok := p.lockOwnerFilesByOther[stateID.other] + if !ok { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if s.currentFileHandle.leaf == nil && s.currentFileHandle.directory == nil { + return nil, nfsv4.NFS4ERR_NOFILEHANDLE + } + if !bytes.Equal(s.currentFileHandle.handle, lofs.openOwnerFile.openedFile.handle) { + return nil, nfsv4.NFS4ERR_BAD_STATEID + } + if st := compareStateSeqID(stateID.seqID, lofs.stateID.seqID); st != nfsv4.NFS4_OK { + return nil, st + } + return lofs, nfsv4.NFS4_OK +} + +// getOpenedLeaf is used by READ and WRITE operations to obtain an +// opened leaf corresponding to a file handle and open-owner state ID. +// +// When a special state ID is provided, it ensures the file is +// temporarily opened for the duration of the operation. When a +// non-special state ID is provided, it ensures that the file was +// originally opened with the correct share access mask. +func (s *compoundState) getOpenedLeaf(stateID *nfsv4.Stateid4, shareAccess virtual.ShareMask) (virtual.Leaf, func(), nfsv4.Nfsstat4) { + p := s.program + internalStateID, st := p.internalizeStateID(stateID) + if st != nfsv4.NFS4_OK { + return nil, nil, st + } + + if internalStateID == nil { + // Client provided the anonymous state ID or READ bypass + // state ID. Temporarily open the file to perform the + // operation. + currentLeaf, st := s.currentFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + return nil, nil, st + } + if vs := currentLeaf.VirtualOpenSelf( + shareAccess, + &virtual.OpenExistingOptions{}, + 0, + &virtual.Attributes{}, + ); vs != virtual.StatusOK { + return nil, nil, toNFSv4Status(vs) + } + return currentLeaf, func() { currentLeaf.VirtualClose(1) }, nfsv4.NFS4_OK + } + + p.enter() + defer p.leave() + + oofs, st := s.getOpenOwnerFileByStateID(*internalStateID, false) + switch st { + case nfsv4.NFS4_OK: + if shareAccess&^oofs.shareAccess != 0 { + // Attempting to write to a file opened for + // reading, or vice versa. + return nil, nil, nfsv4.NFS4ERR_OPENMODE + } + case nfsv4.NFS4ERR_BAD_STATEID: + // Client may have provided a lock state ID. + lofs, st := s.getLockOwnerFileByStateID(*internalStateID) + if st != nfsv4.NFS4_OK { + return nil, nil, st + } + oofs = lofs.openOwnerFile + if shareAccess&^lofs.shareAccess != 0 { + // Attempted to write to a file that was opened + // for reading at the time the lock-owner state + // was established, or vice versa. + // + // More details: RFC 7530, section 9.1.6, + // paragraph 7. + return nil, nil, nfsv4.NFS4ERR_OPENMODE + } + default: + return nil, nil, st + } + + // Ensure that both the client and file are not released while + // the I/O operation is taking place. + clientConfirmation := oofs.openOwner.confirmedClient.confirmation + clientConfirmation.hold(p) + oofs.useCount.increase() + return oofs.openedFile.leaf, func() { + var ll leavesToClose + p.enter() + oofs.maybeClose(&ll) + clientConfirmation.release(p) + p.leave() + ll.closeAll() + }, nfsv4.NFS4_OK +} + +// verifyAttributes is the common implementation of the VERIFY and +// NVERIFY operations. +func (s *compoundState) verifyAttributes(fattr *nfsv4.Fattr4) nfsv4.Nfsstat4 { + currentNode, _, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return st + } + + // Request attributes of the file. Don't actually store them in + // a fattr4 structure. Use comparingWriter to check whether the + // generated attributes are equal to the ones provided. + attrRequest := fattr.Attrmask + var attributes virtual.Attributes + currentNode.VirtualGetAttributes(attrRequestToAttributesMask(attrRequest), &attributes) + w := comparingWriter{ + reference: fattr.AttrVals, + status: nfsv4.NFS4ERR_SAME, + } + p := s.program + attrMask := p.writeAttributes(&attributes, attrRequest, &w) + + for i := 0; i < len(attrRequest); i++ { + if attrMask[i] != attrRequest[i] { + // One or more of the provided attributes were + // not generated. This either means that the + // client provided unsupported attributes or + // ones that are write-only. + if attrRequest[0]&(1< 0 { + // Provided attributes contain trailing data. + return nfsv4.NFS4ERR_BADXDR + } + return w.status +} + +func (s *compoundState) opAccess(args *nfsv4.Access4args) nfsv4.Access4res { + currentNode, isDirectory, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return &nfsv4.Access4res_default{Status: st} + } + + // Depending on whether the node is a directory or a leaf, we + // need to report different NFSv4 acccess permissions. + readMask := uint32(nfsv4.ACCESS4_READ) + writeMask := uint32(nfsv4.ACCESS4_EXTEND | nfsv4.ACCESS4_MODIFY) + executeMask := uint32(0) + if isDirectory { + writeMask |= nfsv4.ACCESS4_DELETE + executeMask |= nfsv4.ACCESS4_LOOKUP + } else { + executeMask |= nfsv4.ACCESS4_EXECUTE + } + + // Request node permissions and convert them to NFSv4 values. + var attributes virtual.Attributes + currentNode.VirtualGetAttributes(virtual.AttributesMaskPermissions, &attributes) + permissions, ok := attributes.GetPermissions() + if !ok { + panic("Permissions attribute requested, but not returned") + } + var access nfsv4.Uint32T + if permissions&virtual.PermissionsRead != 0 { + access |= readMask + } + if permissions&virtual.PermissionsWrite != 0 { + access |= writeMask + } + if permissions&virtual.PermissionsExecute != 0 { + access |= executeMask + } + + return &nfsv4.Access4res_NFS4_OK{ + Resok4: nfsv4.Access4resok{ + Supported: (readMask | writeMask | executeMask) & args.Access, + Access: access & args.Access, + }, + } +} + +func (s *compoundState) opClose(args *nfsv4.Close4args) nfsv4.Close4res { + p := s.program + openStateID, st := p.internalizeRegularStateID(&args.OpenStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Close4res_default{Status: st} + } + + var ll leavesToClose + defer ll.closeAll() + + p.enter() + defer p.leave() + + oos, st := p.getOpenOwnerByOtherForTransaction(openStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.Close4res_default{Status: st} + } + transaction, lastResponse, st := oos.startTransaction(p, args.Seqid, &ll, unconfirmedOpenOwnerPolicyDeny) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Close4res); ok { + return r + } + return &nfsv4.Close4res_default{Status: st} + } + response, closedFile := s.txClose(openStateID, &ll) + transaction.complete(&openOwnerLastResponse{ + response: response, + closedFile: closedFile, + }) + return response +} + +func (s *compoundState) txClose(openStateID regularStateID, ll *leavesToClose) (nfsv4.Close4res, *openOwnerFileState) { + oofs, st := s.getOpenOwnerFileByStateID(openStateID, false) + if st != nfsv4.NFS4_OK { + return &nfsv4.Close4res_default{Status: st}, nil + } + + // Only half-close the file, so that the state ID remains valid + // for doing replays of the CLOSE request. + // + // More details: RFC 7530, section 9.10.1. + p := s.program + oofs.removeStart(p, ll) + oofs.stateID.seqID = nextSeqID(oofs.stateID.seqID) + + return &nfsv4.Close4res_NFS4_OK{ + OpenStateid: p.externalizeStateID(oofs.stateID), + }, oofs +} + +func (s *compoundState) opCommit(args *nfsv4.Commit4args) nfsv4.Commit4res { + // As this implementation is purely built for the purpose of + // doing builds, there is no need to actually commit to storage. + if _, st := s.currentFileHandle.getLeaf(); st != nfsv4.NFS4_OK { + return &nfsv4.Commit4res_default{Status: st} + } + return &nfsv4.Commit4res_NFS4_OK{ + Resok4: nfsv4.Commit4resok{ + Writeverf: s.program.rebootVerifier, + }, + } +} + +func (s *compoundState) opCreate(args *nfsv4.Create4args) nfsv4.Create4res { + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Create4res_default{Status: st} + } + name, st := nfsv4NewComponent(args.Objname) + if st != nfsv4.NFS4_OK { + return &nfsv4.Create4res_default{Status: st} + } + + var attributes virtual.Attributes + var changeInfo virtual.ChangeInfo + var fileHandle fileHandle + var vs virtual.Status + switch objectType := args.Objtype.(type) { + case *nfsv4.Createtype4_NF4BLK, *nfsv4.Createtype4_NF4CHR: + // Character and block devices can only be provided as + // part of input roots, if workers are set up to provide + // them. They can't be created through the virtual file + // system. + return &nfsv4.Create4res_default{Status: nfsv4.NFS4ERR_PERM} + case *nfsv4.Createtype4_NF4DIR: + var directory virtual.Directory + directory, changeInfo, vs = currentDirectory.VirtualMkdir(name, virtual.AttributesMaskFileHandle, &attributes) + fileHandle.directory = directory + case *nfsv4.Createtype4_NF4FIFO: + var leaf virtual.Leaf + leaf, changeInfo, vs = currentDirectory.VirtualMknod(name, filesystem.FileTypeFIFO, virtual.AttributesMaskFileHandle, &attributes) + fileHandle.leaf = leaf + case *nfsv4.Createtype4_NF4LNK: + var leaf virtual.Leaf + leaf, changeInfo, vs = currentDirectory.VirtualSymlink(objectType.Linkdata, name, virtual.AttributesMaskFileHandle, &attributes) + fileHandle.leaf = leaf + case *nfsv4.Createtype4_NF4SOCK: + var leaf virtual.Leaf + leaf, changeInfo, vs = currentDirectory.VirtualMknod(name, filesystem.FileTypeSocket, virtual.AttributesMaskFileHandle, &attributes) + fileHandle.leaf = leaf + default: + return &nfsv4.Create4res_default{Status: nfsv4.NFS4ERR_BADTYPE} + } + if vs != virtual.StatusOK { + return &nfsv4.Create4res_default{Status: toNFSv4Status(vs)} + } + fileHandle.handle = attributes.GetFileHandle() + + s.currentFileHandle = fileHandle + return &nfsv4.Create4res_NFS4_OK{ + Resok4: nfsv4.Create4resok{ + Cinfo: toNFSv4ChangeInfo(&changeInfo), + }, + } +} + +func (s *compoundState) opDelegpurge(args *nfsv4.Delegpurge4args) nfsv4.Delegpurge4res { + // This implementation does not support CLAIM_DELEGATE_PREV, so + // there is no need to implement DELEGPURGE. + return nfsv4.Delegpurge4res{Status: nfsv4.NFS4ERR_NOTSUPP} +} + +func (s *compoundState) opDelegreturn(args *nfsv4.Delegreturn4args) nfsv4.Delegreturn4res { + // This implementation never hands out any delegations to the + // client, meaning that any state ID provided to this operation + // is invalid. + return nfsv4.Delegreturn4res{Status: nfsv4.NFS4ERR_BAD_STATEID} +} + +func (s *compoundState) opGetattr(args *nfsv4.Getattr4args) nfsv4.Getattr4res { + currentNode, _, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return &nfsv4.Getattr4res_default{Status: st} + } + var attributes virtual.Attributes + currentNode.VirtualGetAttributes(attrRequestToAttributesMask(args.AttrRequest), &attributes) + p := s.program + return &nfsv4.Getattr4res_NFS4_OK{ + Resok4: nfsv4.Getattr4resok{ + ObjAttributes: p.attributesToFattr4(&attributes, args.AttrRequest), + }, + } +} + +func (s *compoundState) opGetfh() nfsv4.Getfh4res { + _, _, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return &nfsv4.Getfh4res_default{Status: st} + } + return &nfsv4.Getfh4res_NFS4_OK{ + Resok4: nfsv4.Getfh4resok{ + Object: s.currentFileHandle.handle, + }, + } +} + +func (s *compoundState) opLink(args *nfsv4.Link4args) nfsv4.Link4res { + sourceLeaf, st := s.savedFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + return &nfsv4.Link4res_default{Status: st} + } + targetDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Link4res_default{Status: st} + } + name, st := nfsv4NewComponent(args.Newname) + if st != nfsv4.NFS4_OK { + return &nfsv4.Link4res_default{Status: st} + } + changeInfo, vs := targetDirectory.VirtualLink(name, sourceLeaf, 0, &virtual.Attributes{}) + if vs != virtual.StatusOK { + return &nfsv4.Link4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Link4res_NFS4_OK{ + Resok4: nfsv4.Link4resok{ + Cinfo: toNFSv4ChangeInfo(&changeInfo), + }, + } +} + +func (s *compoundState) opLock(args *nfsv4.Lock4args) nfsv4.Lock4res { + var ll leavesToClose + defer ll.closeAll() + + p := s.program + p.enter() + defer p.leave() + + switch locker := args.Locker.(type) { + case *nfsv4.Locker4_TRUE: + // Create a new lock-owner file. + owner := &locker.OpenOwner + openStateID, st := p.internalizeRegularStateID(&owner.OpenStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + oos, st := p.getOpenOwnerByOtherForTransaction(openStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + transaction, lastResponse, st := oos.startTransaction(p, owner.OpenSeqid, &ll, unconfirmedOpenOwnerPolicyDeny) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Lock4res); ok { + return r + } + return &nfsv4.Lock4res_default{Status: st} + } + response := s.txLockInitial(args, openStateID, owner) + transaction.complete(&openOwnerLastResponse{ + response: response, + }) + return response + case *nfsv4.Locker4_FALSE: + // Add additional lock to existing lock-owner file. + owner := &locker.LockOwner + lockStateID, st := p.internalizeRegularStateID(&owner.LockStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + los, st := p.getLockOwnerByOtherForTransaction(lockStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + transaction, lastResponse, st := los.startTransaction(p, owner.LockSeqid, false) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Lock4res); ok { + return r + } + return &nfsv4.Lock4res_default{Status: st} + } + response := s.txLockSuccessive(args, lockStateID, owner) + transaction.complete(response) + return response + default: + // Incorrectly encoded boolean value. + return &nfsv4.Lock4res_default{Status: nfsv4.NFS4ERR_BADXDR} + } +} + +func (s *compoundState) txLockInitial(args *nfsv4.Lock4args, openStateID regularStateID, owner *nfsv4.OpenToLockOwner4) nfsv4.Lock4res { + oofs, st := s.getOpenOwnerFileByStateID(openStateID, false) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + oos := oofs.openOwner + if owner.LockOwner.Clientid != oos.confirmedClient.confirmation.key.shortClientID { + // Provided lock-owner's client ID does not match with + // that of the open-owner. + return &nfsv4.Lock4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + + confirmedClient := oos.confirmedClient + lockOwnerKey := string(owner.LockOwner.Owner) + los, ok := confirmedClient.lockOwners[lockOwnerKey] + initialTransaction := false + if !ok { + // Lock-owner does not yet exist. Create a new one. + los = &lockOwnerState{ + confirmedClient: confirmedClient, + owner: owner.LockOwner.Owner, + } + confirmedClient.lockOwners[lockOwnerKey] = los + initialTransaction = true + } else { + if _, ok := oofs.lockOwnerFiles[los]; ok { + // Lock-owner has already been associated with + // this file. We should have gone through + // txLockSuccessive() instead. + // + // More details: RFC 7530, section 16.10.5, + // bullet point 2. + return &nfsv4.Lock4res_default{Status: nfsv4.NFS4ERR_BAD_SEQID} + } + } + + // Start a nested transaction on the lock-owner. + p := s.program + transaction, lastResponse, st := los.startTransaction(p, owner.LockSeqid, initialTransaction) + if st != nfsv4.NFS4_OK { + if initialTransaction { + panic("Failed to start transaction on a new lock-owner, which is impossible. This would cause the lock-owner to leak.") + } + if r, ok := lastResponse.(nfsv4.Lock4res); ok { + return r + } + return &nfsv4.Lock4res_default{Status: st} + } + + // Create a new lock-owner file. Set the sequence ID to zero, as + // txLockCommon() will already bump it to one. + lofs := &lockOwnerFileState{ + lockOwner: los, + openOwnerFile: oofs, + shareAccess: oofs.shareAccess, + lockOwnerIndex: len(los.files), + stateID: p.newRegularStateID(0), + } + p.lockOwnerFilesByOther[lofs.stateID.other] = lofs + oofs.lockOwnerFiles[los] = lofs + los.files = append(los.files, lofs) + + response := s.txLockCommon(args, lofs) + transaction.complete(response) + + // Upon failure, undo the creation of the newly created + // lock-owner file. This may also remove the lock-owner if it + // references no other files. + if response.GetStatus() != nfsv4.NFS4_OK { + if lofs.lockCount > 0 { + panic("Failed to acquire lock on a newly created lock-owner file, yet its lock count is non-zero") + } + lofs.remove(p) + } + return response +} + +func (s *compoundState) txLockSuccessive(args *nfsv4.Lock4args, lockStateID regularStateID, owner *nfsv4.ExistLockOwner4) nfsv4.Lock4res { + lofs, st := s.getLockOwnerFileByStateID(lockStateID) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + return s.txLockCommon(args, lofs) +} + +func (s *compoundState) txLockCommon(args *nfsv4.Lock4args, lofs *lockOwnerFileState) nfsv4.Lock4res { + start, end, st := offsetLengthToStartEnd(args.Offset, args.Length) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + lockType, st := nfsLockType4ToByteRangeLockType(args.Locktype) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lock4res_default{Status: st} + } + + lock := &virtual.ByteRangeLock[*lockOwnerState]{ + Owner: lofs.lockOwner, + Start: start, + End: end, + Type: lockType, + } + + // Test whether the new lock conflicts with an existing one. + openedFile := lofs.openOwnerFile.openedFile + if conflictingLock := openedFile.locks.Test(lock); conflictingLock != nil { + return &nfsv4.Lock4res_NFS4ERR_DENIED{ + Denied: byteRangeLockToLock4Denied(conflictingLock), + } + } + + lofs.lockCount += openedFile.locks.Set(lock) + if lofs.lockCount < 0 { + panic("Negative lock count") + } + lofs.stateID.seqID = nextSeqID(lofs.stateID.seqID) + p := s.program + return &nfsv4.Lock4res_NFS4_OK{ + Resok4: nfsv4.Lock4resok{ + LockStateid: p.externalizeStateID(lofs.stateID), + }, + } +} + +func (s *compoundState) opLockt(args *nfsv4.Lockt4args) nfsv4.Lockt4res { + _, st := s.currentFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + return &nfsv4.Lockt4res_default{Status: st} + } + handleKey := string(s.currentFileHandle.handle) + + p := s.program + p.enter() + defer p.leave() + + start, end, st := offsetLengthToStartEnd(args.Offset, args.Length) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lockt4res_default{Status: st} + } + lockType, st := nfsLockType4ToByteRangeLockType(args.Locktype) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lockt4res_default{Status: st} + } + + openedFile, ok := p.openedFilesByHandle[handleKey] + if !ok { + // File isn't opened by anyone, meaning no locks may + // cause a conflict. Just return success. + return &nfsv4.Lockt4res_NFS4_OK{} + } + + confirmedClient, st := p.getConfirmedClientByShortID(args.Owner.Clientid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Lockt4res_default{Status: st} + } + confirmedClient.confirmation.hold(p) + defer confirmedClient.confirmation.release(p) + + // Attempt to obtain the lock owner that is provided in the + // arguments. It may be the case that none exists, in which case + // we just pass a nil value to ByteRangeLockSet.Test(), + // indicating a lock-owner that differs from any existing one. + los := confirmedClient.lockOwners[string(args.Owner.Owner)] + lock := &virtual.ByteRangeLock[*lockOwnerState]{ + Owner: los, + Start: start, + End: end, + Type: lockType, + } + + if conflictingLock := openedFile.locks.Test(lock); conflictingLock != nil { + return &nfsv4.Lockt4res_NFS4ERR_DENIED{ + Denied: byteRangeLockToLock4Denied(conflictingLock), + } + } + return &nfsv4.Lockt4res_NFS4_OK{} +} + +func (s *compoundState) opLocku(args *nfsv4.Locku4args) nfsv4.Locku4res { + p := s.program + p.enter() + defer p.leave() + + lockStateID, st := p.internalizeRegularStateID(&args.LockStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Locku4res_default{Status: st} + } + + los, st := p.getLockOwnerByOtherForTransaction(lockStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.Locku4res_default{Status: st} + } + + transaction, lastResponse, st := los.startTransaction(p, args.Seqid, false) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Locku4res); ok { + return r + } + return &nfsv4.Locku4res_default{Status: st} + } + response := s.txLocku(args, lockStateID) + transaction.complete(response) + return response +} + +func (s *compoundState) txLocku(args *nfsv4.Locku4args, lockStateID regularStateID) nfsv4.Locku4res { + lofs, st := s.getLockOwnerFileByStateID(lockStateID) + if st != nfsv4.NFS4_OK { + return &nfsv4.Locku4res_default{Status: st} + } + start, end, st := offsetLengthToStartEnd(args.Offset, args.Length) + if st != nfsv4.NFS4_OK { + return &nfsv4.Locku4res_default{Status: st} + } + + lock := &virtual.ByteRangeLock[*lockOwnerState]{ + Owner: lofs.lockOwner, + Start: start, + End: end, + Type: virtual.ByteRangeLockTypeUnlocked, + } + + lofs.lockCount += lofs.openOwnerFile.openedFile.locks.Set(lock) + if lofs.lockCount < 0 { + panic("Negative lock count") + } + lofs.stateID.seqID = nextSeqID(lofs.stateID.seqID) + p := s.program + return &nfsv4.Locku4res_NFS4_OK{ + LockStateid: p.externalizeStateID(lofs.stateID), + } +} + +func (s *compoundState) opLookup(args *nfsv4.Lookup4args) nfsv4.Lookup4res { + currentDirectory, st := s.currentFileHandle.getDirectoryOrSymlink() + if st != nfsv4.NFS4_OK { + return nfsv4.Lookup4res{Status: st} + } + name, st := nfsv4NewComponent(args.Objname) + if st != nfsv4.NFS4_OK { + return nfsv4.Lookup4res{Status: st} + } + var attributes virtual.Attributes + directory, leaf, vs := currentDirectory.VirtualLookup(name, virtual.AttributesMaskFileHandle, &attributes) + if vs != virtual.StatusOK { + return nfsv4.Lookup4res{Status: toNFSv4Status(vs)} + } + s.currentFileHandle = fileHandle{ + handle: attributes.GetFileHandle(), + directory: directory, + leaf: leaf, + } + return nfsv4.Lookup4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opLookupp() nfsv4.Lookupp4res { + if _, st := s.currentFileHandle.getDirectoryOrSymlink(); st != nfsv4.NFS4_OK { + return nfsv4.Lookupp4res{Status: st} + } + + // TODO: Do we want to implement this method as well? For most + // directory types (e.g., CAS backed directories) this method is + // hard to implement, as they don't necessarily have a single + // parent. + return nfsv4.Lookupp4res{Status: nfsv4.NFS4ERR_NOENT} +} + +func (s *compoundState) opNverify(args *nfsv4.Nverify4args) nfsv4.Nverify4res { + if st := s.verifyAttributes(&args.ObjAttributes); st != nfsv4.NFS4ERR_NOT_SAME { + return nfsv4.Nverify4res{Status: st} + } + return nfsv4.Nverify4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opOpen(args *nfsv4.Open4args) nfsv4.Open4res { + var ll leavesToClose + defer ll.closeAll() + + p := s.program + p.enter() + defer p.leave() + + openOwnerKey := string(args.Owner.Owner) + var oos *openOwnerState + for { + // Obtain confirmed client state. + confirmedClient, st := p.getConfirmedClientByShortID(args.Owner.Clientid) + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + + var ok bool + oos, ok = confirmedClient.openOwners[openOwnerKey] + if !ok { + // Open-owner has never been seen before. Create + // a new one that is in the unconfirmed tate. + oos = &openOwnerState{ + confirmedClient: confirmedClient, + key: openOwnerKey, + filesByHandle: map[string]*openOwnerFileState{}, + } + confirmedClient.openOwners[openOwnerKey] = oos + baseProgramOpenOwnersCreated.Inc() + } + + if oos.waitForCurrentTransactionCompletion(p) { + break + } + } + + transaction, lastResponse, st := oos.startTransaction(p, args.Seqid, &ll, unconfirmedOpenOwnerPolicyReinitialize) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.Open4res); ok { + // Last call was also an OPEN. Return cached response. + return r + } + return &nfsv4.Open4res_default{Status: st} + } + response := s.txOpen(args, oos) + transaction.complete(&openOwnerLastResponse{ + response: response, + }) + return response +} + +func (s *compoundState) txOpen(args *nfsv4.Open4args, oos *openOwnerState) nfsv4.Open4res { + // Drop the lock, as VirtualOpenChild may block. This is safe to + // do within open-owner transactions. + p := s.program + p.leave() + isLocked := false + defer func() { + if !isLocked { + p.enter() + } + }() + + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + + // Convert share_* fields. + shareAccess, st := shareAccessToShareMask(args.ShareAccess) + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + + // As with most UNIX-like systems, we don't support share_deny. + // Only permit this field to be set to OPEN4_SHARE_DENY_NONE, + // behaving as if there's an implicit OPEN4_SHARE_ACCESS_BOTH on + // all files. + // + // More details: RFC 7530, section 16.16.5, paragraph 6. + switch args.ShareDeny { + case nfsv4.OPEN4_SHARE_DENY_NONE: + case nfsv4.OPEN4_SHARE_DENY_READ, nfsv4.OPEN4_SHARE_DENY_WRITE, nfsv4.OPEN4_SHARE_DENY_BOTH: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_SHARE_DENIED} + default: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + + // Convert openhow. + var createAttributes *virtual.Attributes + var existingOptions *virtual.OpenExistingOptions + if openHow, ok := args.Openhow.(*nfsv4.Openflag4_OPEN4_CREATE); ok { + createAttributes = &virtual.Attributes{} + switch how := openHow.How.(type) { + case *nfsv4.Createhow4_UNCHECKED4: + // Create a file, allowing the file to already exist. + if st := fattr4ToAttributes(&how.Createattrs, createAttributes); st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + existingOptions = &virtual.OpenExistingOptions{} + if sizeBytes, ok := createAttributes.GetSizeBytes(); ok && sizeBytes == 0 { + existingOptions.Truncate = true + } + case *nfsv4.Createhow4_GUARDED4: + // Create a file, disallowing the file to already exist. + if st := fattr4ToAttributes(&how.Createattrs, createAttributes); st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + case *nfsv4.Createhow4_EXCLUSIVE4: + // Create a file, allowing the file to exist if + // it was created by a previous call that + // provided the same verifier. + // TODO: Implement this! + default: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + } else { + // Don't create a new file. Only open an existing file. + existingOptions = &virtual.OpenExistingOptions{} + } + + // Convert claim. As we don't support delegations, we can only + // meaningfully support CLAIM_NULL. + var name path.Component + switch claim := args.Claim.(type) { + case *nfsv4.OpenClaim4_CLAIM_NULL: + var st nfsv4.Nfsstat4 + name, st = nfsv4NewComponent(claim.File) + if st != nfsv4.NFS4_OK { + return &nfsv4.Open4res_default{Status: st} + } + case *nfsv4.OpenClaim4_CLAIM_PREVIOUS, *nfsv4.OpenClaim4_CLAIM_DELEGATE_CUR: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_RECLAIM_BAD} + case *nfsv4.OpenClaim4_CLAIM_DELEGATE_PREV: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_NOTSUPP} + default: + return &nfsv4.Open4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + + // Open the file. + var attributes virtual.Attributes + leaf, respected, changeInfo, vs := currentDirectory.VirtualOpenChild( + name, + shareAccess, + createAttributes, + existingOptions, + virtual.AttributesMaskFileHandle, + &attributes) + if vs != virtual.StatusOK { + return &nfsv4.Open4res_default{Status: toNFSv4Status(vs)} + } + + handle := attributes.GetFileHandle() + handleKey := string(handle) + + s.currentFileHandle = fileHandle{ + handle: handle, + leaf: leaf, + } + + response := &nfsv4.Open4res_NFS4_OK{ + Resok4: nfsv4.Open4resok{ + Cinfo: toNFSv4ChangeInfo(&changeInfo), + Rflags: nfsv4.OPEN4_RESULT_LOCKTYPE_POSIX, + Attrset: attributesMaskToBitmap4(respected), + Delegation: &nfsv4.OpenDelegation4_OPEN_DELEGATE_NONE{}, + }, + } + + p.enter() + isLocked = true + + oofs, ok := oos.filesByHandle[handleKey] + if ok { + // This file has already been opened by this open-owner, + // meaning we should upgrade the open file. Increase + // closeCount, so that the file is released a sufficient + // number of times upon last close. + // + // More details: RFC 7530, section 9.11. + oofs.shareAccess |= shareAccess + oofs.stateID.seqID = nextSeqID(oofs.stateID.seqID) + if oofs.closeCount == 0 { + panic("Attempted to use file that has already been closed. It should have been removed before this transaction started.") + } + oofs.closeCount++ + } else { + openedFile, ok := p.openedFilesByHandle[handleKey] + if ok { + openedFile.openOwnersCount.increase() + } else { + // This file has not been opened by any + // open-owner. Keep track of it, so that we + // don't need to call into HandleResolver. This + // ensures that the file remains accessible + // while opened, even when unlinked. + openedFile = &openedFileState{ + handle: handle, + handleKey: handleKey, + leaf: leaf, + openOwnersCount: 1, + } + openedFile.locks.Initialize() + p.openedFilesByHandle[handleKey] = openedFile + } + + // This file has not been opened by this open-owner. + // Create a new state ID. + oofs = &openOwnerFileState{ + openOwner: oos, + openedFile: openedFile, + shareAccess: shareAccess, + stateID: p.newRegularStateID(1), + closeCount: 1, + useCount: 1, + lockOwnerFiles: map[*lockOwnerState]*lockOwnerFileState{}, + } + oos.filesByHandle[handleKey] = oofs + p.openOwnerFilesByOther[oofs.stateID.other] = oofs + } + + response.Resok4.Stateid = p.externalizeStateID(oofs.stateID) + if !oos.confirmed { + // The first time that this open-owner is used. Request + // that the caller issues an OPEN_CONFIRM operation. + response.Resok4.Rflags |= nfsv4.OPEN4_RESULT_CONFIRM + } + return response +} + +func (s *compoundState) opOpenattr(args *nfsv4.Openattr4args) nfsv4.Openattr4res { + // This implementation does not support named attributes. + if _, _, st := s.currentFileHandle.getNode(); st != nfsv4.NFS4_OK { + return nfsv4.Openattr4res{Status: st} + } + return nfsv4.Openattr4res{Status: nfsv4.NFS4ERR_NOTSUPP} +} + +func (s *compoundState) opOpenConfirm(args *nfsv4.OpenConfirm4args) nfsv4.OpenConfirm4res { + p := s.program + openStateID, st := p.internalizeRegularStateID(&args.OpenStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenConfirm4res_default{Status: st} + } + + var ll leavesToClose + defer ll.closeAll() + + p.enter() + defer p.leave() + + oos, st := p.getOpenOwnerByOtherForTransaction(openStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenConfirm4res_default{Status: st} + } + transaction, lastResponse, st := oos.startTransaction(p, args.Seqid, &ll, unconfirmedOpenOwnerPolicyAllow) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.OpenConfirm4res); ok { + return r + } + return &nfsv4.OpenConfirm4res_default{Status: st} + } + response := s.txOpenConfirm(openStateID, &ll) + transaction.complete(&openOwnerLastResponse{ + response: response, + }) + return response +} + +func (s *compoundState) txOpenConfirm(openStateID regularStateID, ll *leavesToClose) nfsv4.OpenConfirm4res { + oofs, st := s.getOpenOwnerFileByStateID(openStateID, true) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenConfirm4res_default{Status: st} + } + oofs.openOwner.confirmed = true + oofs.stateID.seqID = nextSeqID(oofs.stateID.seqID) + + p := s.program + return &nfsv4.OpenConfirm4res_NFS4_OK{ + Resok4: nfsv4.OpenConfirm4resok{ + OpenStateid: p.externalizeStateID(oofs.stateID), + }, + } +} + +func (s *compoundState) opOpenDowngrade(args *nfsv4.OpenDowngrade4args) nfsv4.OpenDowngrade4res { + p := s.program + openStateID, st := p.internalizeRegularStateID(&args.OpenStateid) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + + var ll leavesToClose + defer ll.closeAll() + + p.enter() + defer p.leave() + + oos, st := p.getOpenOwnerByOtherForTransaction(openStateID.other) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + transaction, lastResponse, st := oos.startTransaction(p, args.Seqid, &ll, unconfirmedOpenOwnerPolicyDeny) + if st != nfsv4.NFS4_OK { + if r, ok := lastResponse.(nfsv4.OpenDowngrade4res); ok { + return r + } + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + response := s.txOpenDowngrade(args, openStateID) + transaction.complete(&openOwnerLastResponse{ + response: response, + }) + return response +} + +func (s *compoundState) txOpenDowngrade(args *nfsv4.OpenDowngrade4args, openStateID regularStateID) nfsv4.OpenDowngrade4res { + oofs, st := s.getOpenOwnerFileByStateID(openStateID, false) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + + shareAccess, st := shareAccessToShareMask(args.ShareAccess) + if st != nfsv4.NFS4_OK { + return &nfsv4.OpenDowngrade4res_default{Status: st} + } + if shareAccess&^oofs.shareAccess != 0 || args.ShareDeny != nfsv4.OPEN4_SHARE_DENY_NONE { + // Attempted to upgrade. The client should have called OPEN. + // + // More details: RFC 7530, section 16.19.4, paragraph 2. + return &nfsv4.OpenDowngrade4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + + // We don't actually reopen/downgrade the underlying virtual + // file system object. The original access mode may have been + // duplicated into lock state IDs, meaning we may still see + // READ, WRITE and SETATTR operations that assume the original + // access mode. + // + // More details: RFC 7530, section 9.1.6, paragraph 7. + oofs.shareAccess = shareAccess + oofs.stateID.seqID = nextSeqID(oofs.stateID.seqID) + + p := s.program + return &nfsv4.OpenDowngrade4res_NFS4_OK{ + Resok4: nfsv4.OpenDowngrade4resok{ + OpenStateid: p.externalizeStateID(oofs.stateID), + }, + } +} + +func (s *compoundState) opPutfh(args *nfsv4.Putfh4args) nfsv4.Putfh4res { + p := s.program + p.enter() + if openedFile, ok := p.openedFilesByHandle[string(args.Object)]; ok { + // File is opened at least once. Return this copy, so + // that we're guaranteed to work, even if the file has + // been removed from the file system. + s.currentFileHandle = fileHandle{ + handle: openedFile.handle, + leaf: openedFile.leaf, + } + p.leave() + } else { + // File is currently not open. Call into the handle + // resolver to do a lookup. + p.leave() + directory, leaf, vs := p.handleResolver(bytes.NewBuffer(args.Object)) + if vs != virtual.StatusOK { + return nfsv4.Putfh4res{Status: toNFSv4Status(vs)} + } + s.currentFileHandle = fileHandle{ + handle: args.Object, + directory: directory, + leaf: leaf, + } + } + return nfsv4.Putfh4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opPutpubfh() nfsv4.Putpubfh4res { + p := s.program + s.currentFileHandle = p.rootFileHandle + return nfsv4.Putpubfh4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opPutrootfh() nfsv4.Putrootfh4res { + p := s.program + s.currentFileHandle = p.rootFileHandle + return nfsv4.Putrootfh4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opRead(args *nfsv4.Read4args) nfsv4.Read4res { + currentLeaf, cleanup, st := s.getOpenedLeaf(&args.Stateid, virtual.ShareMaskRead) + if st != nfsv4.NFS4_OK { + return &nfsv4.Read4res_default{Status: st} + } + defer cleanup() + + buf := make([]byte, args.Count) + n, eof, vs := currentLeaf.VirtualRead(buf, args.Offset) + if vs != virtual.StatusOK { + return &nfsv4.Read4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Read4res_NFS4_OK{ + Resok4: nfsv4.Read4resok{ + Eof: eof, + Data: buf[:n], + }, + } +} + +func (s *compoundState) opReaddir(args *nfsv4.Readdir4args) nfsv4.Readdir4res { + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Readdir4res_default{Status: st} + } + + // Validate the cookie verifier. + p := s.program + if args.Cookie != 0 && args.Cookieverf != p.rebootVerifier { + return &nfsv4.Readdir4res_default{Status: nfsv4.NFS4ERR_NOT_SAME} + } + + // Restore read offset. + firstCookie := uint64(0) + if args.Cookie > lastReservedCookie { + firstCookie = args.Cookie - lastReservedCookie + } + + // Empty response. + res := nfsv4.Readdir4res_NFS4_OK{ + Resok4: nfsv4.Readdir4resok{ + Cookieverf: p.rebootVerifier, + Reply: nfsv4.Dirlist4{ + Eof: true, + }, + }, + } + + // Attach entries. + reporter := readdirReporter{ + program: p, + attrRequest: args.AttrRequest, + maxCount: args.Maxcount, + dirCount: args.Dircount, + + currentMaxCount: nfsv4.Count4(res.Resok4.GetEncodedSizeBytes()), + nextEntry: &res.Resok4.Reply.Entries, + endOfFile: &res.Resok4.Reply.Eof, + } + if vs := currentDirectory.VirtualReadDir( + firstCookie, + attrRequestToAttributesMask(args.AttrRequest), + &reporter, + ); vs != virtual.StatusOK { + return &nfsv4.Readdir4res_default{Status: toNFSv4Status(vs)} + } + if res.Resok4.Reply.Entries == nil && !res.Resok4.Reply.Eof { + // Not enough space to store a single entry. + return &nfsv4.Readdir4res_default{Status: nfsv4.NFS4ERR_TOOSMALL} + } + return &res +} + +func (s *compoundState) opReadlink() nfsv4.Readlink4res { + currentLeaf, st := s.currentFileHandle.getLeaf() + if st != nfsv4.NFS4_OK { + if st == nfsv4.NFS4ERR_ISDIR { + return &nfsv4.Readlink4res_default{Status: nfsv4.NFS4ERR_INVAL} + } + return &nfsv4.Readlink4res_default{Status: st} + } + target, vs := currentLeaf.VirtualReadlink() + if vs != virtual.StatusOK { + return &nfsv4.Readlink4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Readlink4res_NFS4_OK{ + Resok4: nfsv4.Readlink4resok{ + Link: target, + }, + } +} + +func (s *compoundState) opReleaseLockowner(args *nfsv4.ReleaseLockowner4args) nfsv4.ReleaseLockowner4res { + p := s.program + p.enter() + defer p.leave() + + confirmedClient, st := p.getConfirmedClientByShortID(args.LockOwner.Clientid) + if st != nfsv4.NFS4_OK { + return nfsv4.ReleaseLockowner4res{Status: st} + } + confirmedClient.confirmation.hold(p) + defer confirmedClient.confirmation.release(p) + + lockOwnerKey := string(args.LockOwner.Owner) + if los, ok := confirmedClient.lockOwners[lockOwnerKey]; ok { + // Check whether any of the files associated with this + // lock-owner still have locks held. In that case the + // client should call LOCKU first. + // + // More details: RFC 7530, section 16.37.4, last sentence. + for _, lofs := range los.files { + if lofs.lockCount > 0 { + return nfsv4.ReleaseLockowner4res{Status: nfsv4.NFS4ERR_LOCKS_HELD} + } + } + + // None of the files have locks held. Remove the state + // associated with all files. The final call to remove() + // will also remove the lock-owner state. + for len(los.files) > 0 { + los.files[len(los.files)-1].remove(p) + } + if _, ok := confirmedClient.lockOwners[lockOwnerKey]; ok { + panic("Removing all lock-owner files did not remove lock-owner") + } + } + + return nfsv4.ReleaseLockowner4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opRename(args *nfsv4.Rename4args) nfsv4.Rename4res { + oldDirectory, st := s.savedFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Rename4res_default{Status: st} + } + oldName, st := nfsv4NewComponent(args.Oldname) + if st != nfsv4.NFS4_OK { + return &nfsv4.Rename4res_default{Status: st} + } + newDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Rename4res_default{Status: st} + } + newName, st := nfsv4NewComponent(args.Newname) + if st != nfsv4.NFS4_OK { + return &nfsv4.Rename4res_default{Status: nfsv4.NFS4ERR_BADNAME} + } + + oldChangeInfo, newChangeInfo, vs := oldDirectory.VirtualRename(oldName, newDirectory, newName) + if vs != virtual.StatusOK { + return &nfsv4.Rename4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Rename4res_NFS4_OK{ + Resok4: nfsv4.Rename4resok{ + SourceCinfo: toNFSv4ChangeInfo(&oldChangeInfo), + TargetCinfo: toNFSv4ChangeInfo(&newChangeInfo), + }, + } +} + +func (s *compoundState) opRemove(args *nfsv4.Remove4args) nfsv4.Remove4res { + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Remove4res_default{Status: st} + } + name, st := nfsv4NewComponent(args.Target) + if st != nfsv4.NFS4_OK { + return &nfsv4.Remove4res_default{Status: st} + } + + changeInfo, vs := currentDirectory.VirtualRemove(name, true, true) + if vs != virtual.StatusOK { + return &nfsv4.Remove4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Remove4res_NFS4_OK{ + Resok4: nfsv4.Remove4resok{ + Cinfo: toNFSv4ChangeInfo(&changeInfo), + }, + } +} + +func (s *compoundState) opRenew(args *nfsv4.Renew4args) nfsv4.Renew4res { + p := s.program + p.enter() + defer p.leave() + + confirmedClient, st := p.getConfirmedClientByShortID(args.Clientid) + if st != nfsv4.NFS4_OK { + return nfsv4.Renew4res{Status: st} + } + + // Hold and release the client, so that the time at which the + // client gets garbage collected is extended. + confirmedClient.confirmation.hold(p) + defer confirmedClient.confirmation.release(p) + + return nfsv4.Renew4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opRestorefh() nfsv4.Restorefh4res { + if s.savedFileHandle.directory == nil && s.savedFileHandle.leaf == nil { + return nfsv4.Restorefh4res{Status: nfsv4.NFS4ERR_RESTOREFH} + } + s.currentFileHandle = s.savedFileHandle + return nfsv4.Restorefh4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opSavefh() nfsv4.Savefh4res { + _, _, st := s.currentFileHandle.getNode() + if st == nfsv4.NFS4_OK { + s.savedFileHandle = s.currentFileHandle + } + return nfsv4.Savefh4res{Status: st} +} + +func (s *compoundState) opSecinfo(args *nfsv4.Secinfo4args) nfsv4.Secinfo4res { + // The standard states that the SECINFO operation is expected to + // be used by the NFS client when the error value of + // NFS4ERR_WRONGSEC is returned from another NFS operation. In + // practice, we even see it being called if no such error was + // returned. + // + // Because this NFS server is intended to be used for loopback + // purposes only, simply announce the use of AUTH_NONE. + currentDirectory, st := s.currentFileHandle.getDirectory() + if st != nfsv4.NFS4_OK { + return &nfsv4.Secinfo4res_default{Status: st} + } + name, st := nfsv4NewComponent(args.Name) + if st != nfsv4.NFS4_OK { + return &nfsv4.Secinfo4res_default{Status: st} + } + if _, _, vs := currentDirectory.VirtualLookup(name, 0, &virtual.Attributes{}); vs != virtual.StatusOK { + return &nfsv4.Secinfo4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Secinfo4res_NFS4_OK{ + Resok4: []nfsv4.Secinfo4{ + &nfsv4.Secinfo4_default{ + Flavor: rpcv2.AUTH_NONE, + }, + }, + } +} + +func (s *compoundState) opSetattr(args *nfsv4.Setattr4args) nfsv4.Setattr4res { + // TODO: Respect the state ID, if provided! + currentNode, _, st := s.currentFileHandle.getNode() + if st != nfsv4.NFS4_OK { + return nfsv4.Setattr4res{Status: st} + } + var attributes virtual.Attributes + if st := fattr4ToAttributes(&args.ObjAttributes, &attributes); st != nfsv4.NFS4_OK { + return nfsv4.Setattr4res{Status: st} + } + if vs := currentNode.VirtualSetAttributes(&attributes, 0, &virtual.Attributes{}); vs != virtual.StatusOK { + return nfsv4.Setattr4res{Status: toNFSv4Status(vs)} + } + return nfsv4.Setattr4res{ + Status: st, + Attrsset: args.ObjAttributes.Attrmask, + } +} + +func (s *compoundState) opSetclientid(args *nfsv4.Setclientid4args) nfsv4.Setclientid4res { + p := s.program + p.enter() + defer p.leave() + + // As we don't care about using the client callback, our + // implementation of SETCLIENTID can be a lot simpler than + // what's described by the spec. SETCLIENTID can normally be + // used to update the client callback as well, which is + // something we don't need to care about. + + longID := string(args.Client.Id) + clientVerifier := args.Client.Verifier + client, ok := p.clientsByLongID[longID] + if !ok { + // Client has not been observed before. Create it. + client = &clientState{ + longID: longID, + confirmationsByClientVerifier: map[nfsv4.Verifier4]*clientConfirmationState{}, + } + p.clientsByLongID[longID] = client + } + + confirmation, ok := client.confirmationsByClientVerifier[clientVerifier] + if !ok { + // Create a new confirmation record for SETCLIENTID_CONFIRM. + confirmation = &clientConfirmationState{ + client: client, + clientVerifier: clientVerifier, + key: clientConfirmationKey{ + shortClientID: p.randomNumberGenerator.Uint64(), + }, + } + p.randomNumberGenerator.Read(confirmation.key.serverVerifier[:]) + client.confirmationsByClientVerifier[clientVerifier] = confirmation + p.clientConfirmationsByKey[confirmation.key] = confirmation + p.clientConfirmationsByShortID[confirmation.key.shortClientID] = confirmation + confirmation.insertIntoIdleList(p) + } + + return &nfsv4.Setclientid4res_NFS4_OK{ + Resok4: nfsv4.Setclientid4resok{ + Clientid: confirmation.key.shortClientID, + SetclientidConfirm: confirmation.key.serverVerifier, + }, + } +} + +func (s *compoundState) opSetclientidConfirm(args *nfsv4.SetclientidConfirm4args) nfsv4.SetclientidConfirm4res { + var ll leavesToClose + defer ll.closeAll() + + p := s.program + p.enter() + defer p.leave() + + key := clientConfirmationKey{ + shortClientID: args.Clientid, + serverVerifier: args.SetclientidConfirm, + } + confirmation, ok := p.clientConfirmationsByKey[key] + if !ok { + return nfsv4.SetclientidConfirm4res{Status: nfsv4.NFS4ERR_STALE_CLIENTID} + } + + client := confirmation.client + if confirmedClient := client.confirmed; confirmedClient == nil || confirmedClient.confirmation != confirmation { + // Client record has not been confirmed yet. + confirmation.hold(p) + defer confirmation.release(p) + + if confirmedClient != nil { + // The client has another confirmed entry. + // Remove all state, such as open files and locks. + oldConfirmation := confirmedClient.confirmation + if oldConfirmation.holdCount > 0 { + // The client is currently running one + // or more blocking operations. This + // prevents us from closing files and + // releasing locks. + return nfsv4.SetclientidConfirm4res{Status: nfsv4.NFS4ERR_DELAY} + } + oldConfirmation.remove(p, &ll) + } + + if client.confirmed != nil { + panic("Attempted to replace confirmed client record") + } + client.confirmed = &confirmedClientState{ + confirmation: confirmation, + openOwners: map[string]*openOwnerState{}, + lockOwners: map[string]*lockOwnerState{}, + } + } + + return nfsv4.SetclientidConfirm4res{Status: nfsv4.NFS4_OK} +} + +func (s *compoundState) opWrite(args *nfsv4.Write4args) nfsv4.Write4res { + currentLeaf, cleanup, st := s.getOpenedLeaf(&args.Stateid, virtual.ShareMaskWrite) + if st != nfsv4.NFS4_OK { + return &nfsv4.Write4res_default{Status: st} + } + defer cleanup() + + n, vs := currentLeaf.VirtualWrite(args.Data, args.Offset) + if vs != virtual.StatusOK { + return &nfsv4.Write4res_default{Status: toNFSv4Status(vs)} + } + return &nfsv4.Write4res_NFS4_OK{ + Resok4: nfsv4.Write4resok{ + Count: nfsv4.Count4(n), + Committed: nfsv4.FILE_SYNC4, + Writeverf: s.program.rebootVerifier, + }, + } +} + +func (s *compoundState) opVerify(args *nfsv4.Verify4args) nfsv4.Verify4res { + if st := s.verifyAttributes(&args.ObjAttributes); st != nfsv4.NFS4ERR_SAME { + return nfsv4.Verify4res{Status: st} + } + return nfsv4.Verify4res{Status: nfsv4.NFS4_OK} +} + +// comparingWriter is an io.Writer that merely compares data that is +// written to a reference value. +type comparingWriter struct { + reference []byte + status nfsv4.Nfsstat4 +} + +func (w *comparingWriter) Write(p []byte) (int, error) { + if w.status == nfsv4.NFS4ERR_SAME { + if len(p) > len(w.reference) { + if bytes.Equal(p[:len(w.reference)], w.reference) { + // Reference value is a prefix of the provided + // data. With XDR this is never possible. + *w = comparingWriter{status: nfsv4.NFS4ERR_BADXDR} + } else { + *w = comparingWriter{status: nfsv4.NFS4ERR_NOT_SAME} + } + } else { + if bytes.Equal(p, w.reference[:len(p)]) { + w.reference = w.reference[len(p):] + } else { + *w = comparingWriter{status: nfsv4.NFS4ERR_NOT_SAME} + } + } + } + return len(p), nil +} + +type referenceCount int + +func (rc *referenceCount) increase() { + if *rc <= 0 { + panic("Attempted to increase zero reference count") + } + (*rc)++ +} + +func (rc *referenceCount) decrease() bool { + if *rc <= 0 { + panic("Attempted to decrease zero reference count") + } + (*rc)-- + return *rc == 0 +} + +// fileHandle contains information on the current or saved file handle +// that is tracked in a COMPOUND procedure. +type fileHandle struct { + handle nfsv4.NfsFh4 + directory virtual.Directory + leaf virtual.Leaf +} + +func (fh *fileHandle) getNode() (virtual.Node, bool, nfsv4.Nfsstat4) { + if fh.directory != nil { + return fh.directory, true, nfsv4.NFS4_OK + } + if fh.leaf != nil { + return fh.leaf, false, nfsv4.NFS4_OK + } + return nil, false, nfsv4.NFS4ERR_NOFILEHANDLE +} + +func (fh *fileHandle) getDirectory() (virtual.Directory, nfsv4.Nfsstat4) { + if fh.directory != nil { + return fh.directory, nfsv4.NFS4_OK + } + if fh.leaf != nil { + return nil, nfsv4.NFS4ERR_NOTDIR + } + return nil, nfsv4.NFS4ERR_NOFILEHANDLE +} + +func (fh *fileHandle) getDirectoryOrSymlink() (virtual.Directory, nfsv4.Nfsstat4) { + if fh.directory != nil { + return fh.directory, nfsv4.NFS4_OK + } + if fh.leaf != nil { + // This call requires that we return NFS4ERR_SYMLINK if + // we stumble upon a symlink. That way the client knows + // that symlink expansion needs to be performed. + var attributes virtual.Attributes + fh.leaf.VirtualGetAttributes(virtual.AttributesMaskFileType, &attributes) + if attributes.GetFileType() == filesystem.FileTypeSymlink { + return nil, nfsv4.NFS4ERR_SYMLINK + } + return nil, nfsv4.NFS4ERR_NOTDIR + } + return nil, nfsv4.NFS4ERR_NOFILEHANDLE +} + +func (fh *fileHandle) getLeaf() (virtual.Leaf, nfsv4.Nfsstat4) { + if fh.leaf != nil { + return fh.leaf, nfsv4.NFS4_OK + } + if fh.directory != nil { + return nil, nfsv4.NFS4ERR_ISDIR + } + return nil, nfsv4.NFS4ERR_NOFILEHANDLE +} + +// toNFSv4Status converts a status code returned by the virtual file +// system to its NFSv4 equivalent. +func toNFSv4Status(s virtual.Status) nfsv4.Nfsstat4 { + switch s { + case virtual.StatusErrAccess: + return nfsv4.NFS4ERR_ACCESS + case virtual.StatusErrBadHandle: + return nfsv4.NFS4ERR_BADHANDLE + case virtual.StatusErrExist: + return nfsv4.NFS4ERR_EXIST + case virtual.StatusErrInval: + return nfsv4.NFS4ERR_INVAL + case virtual.StatusErrIO: + return nfsv4.NFS4ERR_IO + case virtual.StatusErrIsDir: + return nfsv4.NFS4ERR_ISDIR + case virtual.StatusErrNoEnt: + return nfsv4.NFS4ERR_NOENT + case virtual.StatusErrNotDir: + return nfsv4.NFS4ERR_NOTDIR + case virtual.StatusErrNotEmpty: + return nfsv4.NFS4ERR_NOTEMPTY + case virtual.StatusErrNXIO: + return nfsv4.NFS4ERR_NXIO + case virtual.StatusErrPerm: + return nfsv4.NFS4ERR_PERM + case virtual.StatusErrROFS: + return nfsv4.NFS4ERR_ROFS + case virtual.StatusErrStale: + return nfsv4.NFS4ERR_STALE + case virtual.StatusErrSymlink: + return nfsv4.NFS4ERR_SYMLINK + case virtual.StatusErrXDev: + return nfsv4.NFS4ERR_XDEV + default: + panic("Unknown status") + } +} + +// toNFSv4ChangeInfo converts directory change information returned by +// the virtual file system to its NFSv4 equivalent. +func toNFSv4ChangeInfo(changeInfo *virtual.ChangeInfo) nfsv4.ChangeInfo4 { + // Implementations of virtual.Directory should make sure that + // mutations are implemented atomically, so it's safe to report + // the operation as being atomic. + return nfsv4.ChangeInfo4{ + Atomic: true, + Before: changeInfo.Before, + After: changeInfo.After, + } +} + +// clientState keeps track of all state corresponding to a single +// client. For every client we track one or more confirmations that can +// be completed using SETCLIENTID_CONFIRM. If SETCLIENTID_CONFIRM is +// called at least once, we track a confirmed client state. +type clientState struct { + longID string + + confirmationsByClientVerifier map[nfsv4.Verifier4]*clientConfirmationState + confirmed *confirmedClientState +} + +// clientConfirmationState keeps track of all state corresponding to a +// single client confirmation record created through SETCLIENTID. +type clientConfirmationState struct { + client *clientState + clientVerifier nfsv4.Verifier4 + key clientConfirmationKey + + nextIdle *clientConfirmationState + previousIdle *clientConfirmationState + lastSeen time.Time + holdCount int +} + +// removeFromIdleList removes the client confirmation from the list of +// clients that are currently not performing any operations against the +// server. +func (ccs *clientConfirmationState) removeFromIdleList() { + ccs.previousIdle.nextIdle = ccs.nextIdle + ccs.nextIdle.previousIdle = ccs.previousIdle + ccs.previousIdle = nil + ccs.nextIdle = nil +} + +// insertIntoIdleList inserts the client confirmation into the list of +// clients that are currently not performing any operations against the +// server. +func (ccs *clientConfirmationState) insertIntoIdleList(p *baseProgram) { + ccs.previousIdle = p.idleClientConfirmations.previousIdle + ccs.nextIdle = &p.idleClientConfirmations + ccs.previousIdle.nextIdle = ccs + ccs.nextIdle.previousIdle = ccs + ccs.lastSeen = p.now +} + +// hold the client confirmation in such a way that it's not garbage +// collected. This needs to be called prior to performing a blocking +// operation. +func (ccs *clientConfirmationState) hold(p *baseProgram) { + if ccs.holdCount == 0 { + ccs.removeFromIdleList() + } + ccs.holdCount++ +} + +// release the client confirmation in such a way that it may be garbage +// collected. +func (ccs *clientConfirmationState) release(p *baseProgram) { + if ccs.holdCount == 0 { + panic("Attempted to decrease zero hold count") + } + ccs.holdCount-- + if ccs.holdCount == 0 { + ccs.insertIntoIdleList(p) + } +} + +// remove the client confirmation. If the client was confirmed through +// SETCLIENTID_CONFIRM, all open files and acquired locks will be +// released. +func (ccs *clientConfirmationState) remove(p *baseProgram, ll *leavesToClose) { + if ccs.holdCount != 0 { + panic("Attempted to remove a client confirmation that was running one or more blocking operations") + } + + client := ccs.client + confirmedClient := client.confirmed + if confirmedClient != nil && confirmedClient.confirmation == ccs { + // This client confirmation record was confirmed, + // meaning that removing it should also close all opened + // files and release all locks. + for _, oos := range confirmedClient.openOwners { + oos.remove(p, ll) + } + if len(confirmedClient.lockOwners) != 0 { + panic("Removing open-owners should have removed lock-owners as well") + } + client.confirmed = nil + } + + // Remove the client confirmation. + delete(client.confirmationsByClientVerifier, ccs.clientVerifier) + delete(p.clientConfirmationsByKey, ccs.key) + delete(p.clientConfirmationsByShortID, ccs.key.shortClientID) + ccs.removeFromIdleList() + + // Remove the client if it no longer contains any confirmations. + if len(client.confirmationsByClientVerifier) == 0 { + delete(p.clientsByLongID, client.longID) + } +} + +// confirmedClientState stores all state for a client that has been +// confirmed through SETCLIENTID_CONFIRM. +type confirmedClientState struct { + confirmation *clientConfirmationState + openOwners map[string]*openOwnerState + lockOwners map[string]*lockOwnerState +} + +// clientConfirmationKey contains the information that a client must +// provide through SETCLIENTID_CONFIRM to confirm the client's +// registration. +type clientConfirmationKey struct { + shortClientID uint64 + serverVerifier nfsv4.Verifier4 +} + +// openOwnerState stores information on a single open-owner, which is a +// single process running on the client that opens files through the +// mount. +type openOwnerState struct { + confirmedClient *confirmedClientState + key string + + // When not nil, an OPEN or OPEN_CONFIRM operation is in progress. + currentTransactionWait <-chan struct{} + + confirmed bool + lastSeqID nfsv4.Seqid4 + lastResponse *openOwnerLastResponse + filesByHandle map[string]*openOwnerFileState + + // Double linked list for open-owners that are unused. These + // need to be garbage collected after some time, as the client + // does not do that explicitly. + nextUnused *openOwnerState + previousUnused *openOwnerState + lastUsed time.Time +} + +// waitForCurrentTransactionCompletion blocks until any transaction that +// is running right now completes. Because it needs to drop the lock +// while waiting, this method returns a boolean value indicating whether +// it's safe to progress. If not, the caller should retry the lookup of +// the open-owner state. +func (oos *openOwnerState) waitForCurrentTransactionCompletion(p *baseProgram) bool { + if wait := oos.currentTransactionWait; wait != nil { + p.leave() + <-wait + p.enter() + return false + } + return true +} + +// forgetLastResponse can be called when the cached response of the last +// transaction needs to be removed. This is at the start of any +// subsequent transaction, or when reinitializing/removing the +// open-owner state. +// +// This method MUST be called before making any mutations to the +// open-owner state, as it also removes resources that were released +// during the previous transaction. +func (oos *openOwnerState) forgetLastResponse(p *baseProgram) { + if oolr := oos.lastResponse; oolr != nil { + oos.lastResponse = nil + if oofs := oolr.closedFile; oofs != nil { + oofs.removeFinalize(p) + } + } +} + +// reinitialize the open-owner state in such a way that no files are +// opened. This method can be called when an unconfirmed open-owner is +// repurposed, or prior to forcefully removing an open-owner. +func (oos *openOwnerState) reinitialize(p *baseProgram, ll *leavesToClose) { + if oos.currentTransactionWait != nil { + panic("Attempted to reinitialize an open-owner while a transaction is in progress") + } + + oos.forgetLastResponse(p) + for _, oofs := range oos.filesByHandle { + oofs.removeStart(p, ll) + oofs.removeFinalize(p) + } +} + +// isUnused returns whether the open-owner state is unused, meaning that +// it should be garbage collected if a sufficient amount of time passes. +func (oos *openOwnerState) isUnused() bool { + return len(oos.filesByHandle) == 0 || + (len(oos.filesByHandle) == 1 && oos.lastResponse != nil && oos.lastResponse.closedFile != nil) || + !oos.confirmed +} + +// removeFromUnusedList removes the open-owner state from the list of +// open-owner states that have no open files or are not confirmed. These +// are garbage collected if a sufficient amount of time passes. +func (oos *openOwnerState) removeFromUnusedList() { + oos.previousUnused.nextUnused = oos.nextUnused + oos.nextUnused.previousUnused = oos.previousUnused + oos.previousUnused = nil + oos.nextUnused = nil +} + +// remove the open-owner state. All opened files will be closed. +func (oos *openOwnerState) remove(p *baseProgram, ll *leavesToClose) { + oos.reinitialize(p, ll) + if oos.nextUnused != nil { + oos.removeFromUnusedList() + } + delete(oos.confirmedClient.openOwners, oos.key) + baseProgramOpenOwnersRemoved.Inc() +} + +// unconfirmedOpenOwnerPolicy is an enumeration that describes how +// startTransaction() should behave when called against an open-owner +// that has not been confirmed. +type unconfirmedOpenOwnerPolicy int + +const ( + // Allow the transaction to take place against unconfirmed + // open-owners. This should be used by OPEN_CONFIRM. + unconfirmedOpenOwnerPolicyAllow = iota + // Don't allow the transaction to take place against unconfirmed + // open-owners. This should be used by CLOSE, OPEN_DOWNGRADE, + // etc.. + unconfirmedOpenOwnerPolicyDeny + // Allow the transaction to take place against unconfirmed + // open-owners, but do reinitialize them before progressing. + // This should be used by OPEN, as it should assume that the + // previously sent operation was a replay. + // + // More details: RFC 7530, section 16.18.5, paragraph 5. + unconfirmedOpenOwnerPolicyReinitialize +) + +func (oos *openOwnerState) startTransaction(p *baseProgram, seqID nfsv4.Seqid4, ll *leavesToClose, policy unconfirmedOpenOwnerPolicy) (*openOwnerTransaction, interface{}, nfsv4.Nfsstat4) { + if oos.currentTransactionWait != nil { + panic("Attempted to start a new transaction while another one is in progress") + } + + if lastResponse := oos.lastResponse; lastResponse != nil && seqID == oos.lastSeqID { + // Replay of the last request, meaning we should return + // a cached response. This can only be done when the + // type of operation is the same, which is determined by + // the caller. + // + // More details: RFC 7530, section 9.1.9, bullet point 3. + return nil, lastResponse.response, nfsv4.NFS4ERR_BAD_SEQID + } + + if oos.confirmed { + // For confirmed open-owners, only permit operations + // that start the next transaction. + if seqID != nextSeqID(oos.lastSeqID) { + return nil, nil, nfsv4.NFS4ERR_BAD_SEQID + } + } else { + switch policy { + case unconfirmedOpenOwnerPolicyAllow: + if seqID != nextSeqID(oos.lastSeqID) { + return nil, nil, nfsv4.NFS4ERR_BAD_SEQID + } + case unconfirmedOpenOwnerPolicyDeny: + return nil, nil, nfsv4.NFS4ERR_BAD_SEQID + case unconfirmedOpenOwnerPolicyReinitialize: + oos.reinitialize(p, ll) + } + } + + // Start a new transaction. Because the client has sent a + // request with a new sequence ID, we know it will no longer + // attempt to retry the previous operation. Release the last + // response and any state IDs that were closed during the + // previous operation. + oos.forgetLastResponse(p) + wait := make(chan struct{}, 1) + oos.currentTransactionWait = wait + + if oos.nextUnused != nil { + // Prevent garbage collection of the open-owner while + // operation takes place. It will be reinserted upon + // completion of the transaction, if needed. + oos.removeFromUnusedList() + } + + // Prevent garbage collection of the client. + oos.confirmedClient.confirmation.hold(p) + + return &openOwnerTransaction{ + program: p, + state: oos, + seqID: seqID, + wait: wait, + }, nil, nfsv4.NFS4_OK +} + +// openOwnerTransaction is a helper object for completing a transaction +// that was created using startTransaction(). +type openOwnerTransaction struct { + program *baseProgram + state *openOwnerState + seqID nfsv4.Seqid4 + wait chan<- struct{} +} + +func (oot *openOwnerTransaction) complete(lastResponse *openOwnerLastResponse) { + close(oot.wait) + oos := oot.state + oos.currentTransactionWait = nil + + if transactionShouldComplete(lastResponse.response.GetStatus()) { + oos.lastSeqID = oot.seqID + oos.lastResponse = lastResponse + } + + p := oot.program + if oos.isUnused() { + // Open-owner should be garbage collected. Insert it + // into the list of open-owners to be removed. + oos.previousUnused = p.unusedOpenOwners.previousUnused + oos.nextUnused = &p.unusedOpenOwners + oos.previousUnused.nextUnused = oos + oos.nextUnused.previousUnused = oos + oos.lastUsed = p.now + } + + // Re-enable garbage collection of the client. + oos.confirmedClient.confirmation.release(p) +} + +// responseMessage is an interface for response messages of an +// open-owner or lock-owner transaction. +type responseMessage interface{ GetStatus() nfsv4.Nfsstat4 } + +// openOwnerLastResponse contains information on the outcome of the last +// transaction of a given open-owner. This information is needed both to +// respond to retries, but also to definitively remove state IDs closed +// by the last transaction. +type openOwnerLastResponse struct { + response responseMessage + closedFile *openOwnerFileState +} + +// openOwnerFileState stores information on a file that is currently +// opened within the context of a single open-owner. +type openOwnerFileState struct { + // Constant fields. + openedFile *openedFileState + + // Variable fields. + openOwner *openOwnerState + shareAccess virtual.ShareMask + stateID regularStateID + closeCount uint + useCount referenceCount + lockOwnerFiles map[*lockOwnerState]*lockOwnerFileState +} + +// maybeClose decreases the use count on an opened file. If zero, it +// schedules closure of the underlying virtual.Leaf object. This method +// is called at the end of CLOSE, but also at the end of READ or WRITE. +// A call to CLOSE may not immediately close a file if one or more +// READ/WRITE operations are still in progress. +func (oofs *openOwnerFileState) maybeClose(ll *leavesToClose) { + if oofs.useCount.decrease() { + ll.leaves = append(ll.leaves, leafToClose{ + leaf: oofs.openedFile.leaf, + closeCount: oofs.closeCount, + }) + if oofs.closeCount == 0 { + panic("Attempted to close file multiple times") + } + oofs.closeCount = 0 + } +} + +func (oofs *openOwnerFileState) removeStart(p *baseProgram, ll *leavesToClose) { + // Release lock state IDs associated with the file. This should + // be done as part of CLOSE; not LOCKU. If these have one or + // more byte ranges locked, we unlock them. It would also be + // permitted to return NFS4ERR_LOCKS_HELD, requiring that the + // client issues LOCKU operations before retrying, but that is + // less efficient. + // + // More details: + // - RFC 7530, section 9.1.4.4, paragraph 1. + // - RFC 7530, section 9.10, paragraph 3. + // - RFC 7530, section 16.2.4, paragraph 2. + for _, lofs := range oofs.lockOwnerFiles { + lofs.remove(p) + } + + oofs.maybeClose(ll) +} + +// removeFinalize removes an opened file from the open-owner state. This +// method is not called during CLOSE, but during the next transaction on +// an open-owner. This ensures that its state ID remains resolvable, +// allowing the CLOSE operation to be retried. +func (oofs *openOwnerFileState) removeFinalize(p *baseProgram) { + // Disconnect the openOwnerFileState. + handleKey := oofs.openedFile.handleKey + delete(oofs.openOwner.filesByHandle, handleKey) + delete(p.openOwnerFilesByOther, oofs.stateID.other) + oofs.openOwner = nil + + // Disconnect the openedFileState. Do leave it attached to the + // openOwnerFileState, so that in-flight READ and WRITE + // operations can still safely call close(). + if oofs.openedFile.openOwnersCount.decrease() { + delete(p.openedFilesByHandle, handleKey) + } +} + +// openedFileState stores information on a file that is currently opened +// at least once. It is stored in the openedFilesByHandle map. This +// allows these files to be resolvable through PUTFH, even if they are +// no longer linked in the file system. +type openedFileState struct { + // Constant fields. + handle nfsv4.NfsFh4 + handleKey string + leaf virtual.Leaf + + // Variable fields. + openOwnersCount referenceCount + locks virtual.ByteRangeLockSet[*lockOwnerState] +} + +// lockOwnerState represents byte-range locking state associated with a +// given opened file and given lock-owner. Because lock-owners are bound +// to a single file (i.e., they can't contain locks belonging to +// different files), it is contained in the openedFileState. +// +// More details: RFC 7530, section 16.10.5, paragraph 6. +type lockOwnerState struct { + confirmedClient *confirmedClientState + owner []byte + + lastSeqID nfsv4.Seqid4 + lastResponse responseMessage + files []*lockOwnerFileState +} + +func (los *lockOwnerState) forgetLastResponse(p *baseProgram) { + los.lastResponse = nil +} + +func (los *lockOwnerState) startTransaction(p *baseProgram, seqID nfsv4.Seqid4, initialTransaction bool) (*lockOwnerTransaction, interface{}, nfsv4.Nfsstat4) { + if lastResponse := los.lastResponse; lastResponse != nil && seqID == los.lastSeqID { + // Replay of the last request, meaning we should return + // a cached response. This can only be done when the + // type of operation is the same, which is determined by + // the caller. + // + // More details: RFC 7530, section 9.1.9, bullet point 3. + return nil, lastResponse, nfsv4.NFS4ERR_BAD_SEQID + } + + if !initialTransaction && seqID != nextSeqID(los.lastSeqID) { + return nil, nil, nfsv4.NFS4ERR_BAD_SEQID + } + + // Start a new transaction. Because the client has sent a + // request with a new sequence ID, we know it will no longer + // attempt to retry the previous operation. Release the last + // response and any state IDs that were closed during the + // previous operation. + los.forgetLastResponse(p) + + // Prevent garbage collection of the client. + los.confirmedClient.confirmation.hold(p) + + return &lockOwnerTransaction{ + program: p, + state: los, + seqID: seqID, + }, nil, nfsv4.NFS4_OK +} + +type lockOwnerTransaction struct { + program *baseProgram + state *lockOwnerState + seqID nfsv4.Seqid4 +} + +func (lot *lockOwnerTransaction) complete(lastResponse responseMessage) { + los := lot.state + if transactionShouldComplete(lastResponse.GetStatus()) { + los.lastSeqID = lot.seqID + los.lastResponse = lastResponse + } + + // Re-enable garbage collection of the client. + p := lot.program + los.confirmedClient.confirmation.release(p) +} + +type lockOwnerFileState struct { + // Constant fields. + lockOwner *lockOwnerState + openOwnerFile *openOwnerFileState + shareAccess virtual.ShareMask + + // Variable fields. + lockOwnerIndex int + stateID regularStateID + lockCount int +} + +func (lofs *lockOwnerFileState) remove(p *baseProgram) { + if lofs.lockCount > 0 { + // Lock-owner still has one or more locks held on this + // file. Issue an unlock operation that spans the full + // range of the file to release all locks at once. + lock := &virtual.ByteRangeLock[*lockOwnerState]{ + Owner: lofs.lockOwner, + Start: 0, + End: math.MaxUint64, + Type: virtual.ByteRangeLockTypeUnlocked, + } + lofs.lockCount += lofs.openOwnerFile.openedFile.locks.Set(lock) + if lofs.lockCount != 0 { + panic("Failed to release locks") + } + } + + // Remove the lock-owner file from maps. + delete(p.lockOwnerFilesByOther, lofs.stateID.other) + los := lofs.lockOwner + delete(lofs.openOwnerFile.lockOwnerFiles, los) + + // Remove the lock-owner file from the list in the lock-owner. + // We do need to make sure the list remains contiguous. + lastIndex := len(los.files) - 1 + lastLOFS := los.files[lastIndex] + lastLOFS.lockOwnerIndex = lofs.lockOwnerIndex + los.files[lastLOFS.lockOwnerIndex] = lastLOFS + los.files[lastIndex] = nil + los.files = los.files[:lastIndex] + lofs.lockOwnerIndex = -1 + + // Remove the lock-owner if there are no longer any files + // associated with it. + if len(los.files) == 0 { + delete(los.confirmedClient.lockOwners, string(los.owner)) + } +} + +// leafToClose contains information on a virtual file system leaf node +// that needs to be closed at the end of the current operation, after +// locks have been released. +type leafToClose struct { + leaf virtual.Leaf + closeCount uint +} + +// leavesToClose is a list of virtual file system leaf nodes that need +// to be closed at the end of the current operation, after locks have +// been released. +type leavesToClose struct { + leaves []leafToClose +} + +func (ll *leavesToClose) empty() bool { + return len(ll.leaves) == 0 +} + +func (ll *leavesToClose) closeAll() { + for _, l := range ll.leaves { + l.leaf.VirtualClose(l.closeCount) + } +} + +// attrRequestToAttributesMask converts a bitmap of NFSv4 attributes to +// their virtual file system counterparts. This method is used by +// GETATTR and READDIR to determine which attributes need to be +// requested. +func attrRequestToAttributesMask(attrRequest nfsv4.Bitmap4) virtual.AttributesMask { + var attributesMask virtual.AttributesMask + if len(attrRequest) > 0 { + // Attributes 0 to 31. + f := attrRequest[0] + if f&uint32(1< 1 { + // Attributes 32 to 63. + f := attrRequest[1] + if f&uint32(1<<(nfsv4.FATTR4_MODE-32)) != 0 { + attributesMask |= virtual.AttributesMaskPermissions + } + if f&uint32(1<<(nfsv4.FATTR4_NUMLINKS-32)) != 0 { + attributesMask |= virtual.AttributesMaskLinkCount + } + } + return attributesMask +} + +// deterministicNfstime4 is a timestamp that is reported as the access, +// metadata and modify time of all files. If these timestamps were not +// returned, clients would use 1970-01-01T00:00:00Z. As this tends to +// confuse many tools, a deterministic timestamp is used instead. +var deterministicNfstime4 = nfsv4.Nfstime4{ + Seconds: filesystem.DeterministicFileModificationTimestamp.Unix(), +} + +func attributesMaskToBitmap4(in virtual.AttributesMask) []uint32 { + out := make([]uint32, 2) + if in&virtual.AttributesMaskPermissions != 0 { + out[1] |= (1 << (nfsv4.FATTR4_MODE - 32)) + } + if in&virtual.AttributesMaskSizeBytes != 0 { + out[0] |= (1 << nfsv4.FATTR4_SIZE) + } + for len(out) > 0 && out[len(out)-1] == 0 { + out = out[:len(out)-1] + } + return out +} + +// nextSeqID increments a sequence ID according to the rules decribed in +// RFC 7530, section 9.1.3. +func nextSeqID(seqID nfsv4.Seqid4) nfsv4.Seqid4 { + if seqID == math.MaxUint32 { + return 1 + } + return seqID + 1 +} + +// toShareMask converts NFSv4 share_access values that are part of OPEN +// and OPEN_DOWNGRADE requests to our equivalent ShareMask values. +func shareAccessToShareMask(in uint32) (virtual.ShareMask, nfsv4.Nfsstat4) { + switch in { + case nfsv4.OPEN4_SHARE_ACCESS_READ: + return virtual.ShareMaskRead, nfsv4.NFS4_OK + case nfsv4.OPEN4_SHARE_ACCESS_WRITE: + return virtual.ShareMaskWrite, nfsv4.NFS4_OK + case nfsv4.OPEN4_SHARE_ACCESS_BOTH: + return virtual.ShareMaskRead | virtual.ShareMaskWrite, nfsv4.NFS4_OK + default: + return 0, nfsv4.NFS4ERR_INVAL + } +} + +// Even though no "." and ".." entries should be returned, the NFSv4 +// spec requires that cookie values 0, 1 and 2 are never returned. +// Offset all responses by this value. +const lastReservedCookie = 2 + +// readdirReporter is an implementation of DirectoryEntryReporter that +// reports the contents of a directory in the NFSv4 directory entry +// format. +type readdirReporter struct { + program *baseProgram + attrRequest nfsv4.Bitmap4 + maxCount nfsv4.Count4 + dirCount nfsv4.Count4 + + currentMaxCount nfsv4.Count4 + currentDirCount nfsv4.Count4 + nextEntry **nfsv4.Entry4 + endOfFile *bool +} + +func (r *readdirReporter) report(nextCookie uint64, name path.Component, attributes *virtual.Attributes) bool { + // The dircount field is a hint of the maximum number of bytes + // of directory information that should be returned. Only the + // size of the XDR encoded filename and cookie should contribute + // to its value. + filename := name.String() + if r.dirCount != 0 { + r.currentDirCount += nfsv4.Count4(nfsv4.GetComponent4EncodedSizeBytes(filename) + nfsv4.NfsCookie4EncodedSizeBytes) + if r.currentDirCount > r.dirCount { + *r.endOfFile = false + return false + } + } + + p := r.program + entry := nfsv4.Entry4{ + Cookie: lastReservedCookie + nextCookie, + Name: filename, + Attrs: p.attributesToFattr4(attributes, r.attrRequest), + } + + // The maxcount field is the maximum number of bytes for the + // READDIR4resok structure. + r.currentMaxCount += nfsv4.Count4(entry.GetEncodedSizeBytes()) + if r.currentMaxCount > r.maxCount { + *r.endOfFile = false + return false + } + + *r.nextEntry = &entry + r.nextEntry = &entry.Nextentry + return true +} + +func (r *readdirReporter) ReportDirectory(nextCookie uint64, name path.Component, directory virtual.Directory, attributes *virtual.Attributes) bool { + return r.report(nextCookie, name, attributes) +} + +func (r *readdirReporter) ReportLeaf(nextCookie uint64, name path.Component, leaf virtual.Leaf, attributes *virtual.Attributes) bool { + return r.report(nextCookie, name, attributes) +} + +// fattr4ToAttributes converts a client-provided NFSv4 fattr4 to a set +// of virtual file system attributes. Only attributes that are both +// writable and supported by this implementation are accepted. +func fattr4ToAttributes(in *nfsv4.Fattr4, out *virtual.Attributes) nfsv4.Nfsstat4 { + r := bytes.NewBuffer(in.AttrVals) + if len(in.Attrmask) > 0 { + // Attributes 0 to 31. + f := in.Attrmask[0] + if f&^(1< 1 { + // Attributes 32 to 63. + f := in.Attrmask[1] + if f&^(1<<(nfsv4.FATTR4_MODE-32)) != 0 { + return nfsv4.NFS4ERR_ATTRNOTSUPP + } + if f&(1<<(nfsv4.FATTR4_MODE-32)) != 0 { + mode, _, err := nfsv4.ReadMode4(r) + if err != nil { + return nfsv4.NFS4ERR_BADXDR + } + out.SetPermissions(virtual.NewPermissionsFromMode(mode)) + } + } + for i := 2; i < len(in.Attrmask); i++ { + // Attributes 64 or higher. + if in.Attrmask[i] != 0 { + return nfsv4.NFS4ERR_ATTRNOTSUPP + } + } + if r.Len() != 0 { + // Provided attributes contain trailing data. + return nfsv4.NFS4ERR_BADXDR + } + return nfsv4.NFS4_OK +} + +// transactionShouldComplete returns whether a transaction should be +// completed, based on the resulting status code of the transaction. +// Even in the case where an errors occurs, should the sequence number +// of the client be advanced. The only exception is if the operation +// fails with any of the errors listed below. +// +// More details: RFC 7530, section 9.1.7, last paragraph. +func transactionShouldComplete(st nfsv4.Nfsstat4) bool { + return st != nfsv4.NFS4ERR_STALE_CLIENTID && + st != nfsv4.NFS4ERR_STALE_STATEID && + st != nfsv4.NFS4ERR_BAD_STATEID && + st != nfsv4.NFS4ERR_BAD_SEQID && + st != nfsv4.NFS4ERR_BADXDR && + st != nfsv4.NFS4ERR_RESOURCE && + st != nfsv4.NFS4ERR_NOFILEHANDLE && + st != nfsv4.NFS4ERR_MOVED +} + +// compareStateSeqID compares a client-provided sequence ID value with +// one present on the server. The error that needs to be returned in +// case of non-matching sequence IDs depends on whether the value lies +// in the past or future. +// +// More details: RFC 7530, section 9.1.3, last paragraph. +func compareStateSeqID(clientValue, serverValue nfsv4.Seqid4) nfsv4.Nfsstat4 { + if clientValue == serverValue { + return nfsv4.NFS4_OK + } + if int32(clientValue-serverValue) > 0 { + return nfsv4.NFS4ERR_BAD_STATEID + } + return nfsv4.NFS4ERR_OLD_STATEID +} + +// nfsLockType4ToByteRangeLockType converts an NFSv4 lock type to a +// virtual file system byte range lock type. As this implementation does +// not attempt to provide any fairness, no distinction is made between +// waiting and non-waiting lock type variants. +func nfsLockType4ToByteRangeLockType(in nfsv4.NfsLockType4) (virtual.ByteRangeLockType, nfsv4.Nfsstat4) { + switch in { + case nfsv4.READ_LT, nfsv4.READW_LT: + return virtual.ByteRangeLockTypeLockedShared, nfsv4.NFS4_OK + case nfsv4.WRITE_LT, nfsv4.WRITEW_LT: + return virtual.ByteRangeLockTypeLockedExclusive, nfsv4.NFS4_OK + default: + return 0, nfsv4.NFS4ERR_INVAL + } +} + +// offsetLengthToStartEnd converts an (offset, length) pair to a +// (start, end) pair. The former is used by NFSv4, while the latter is +// used by ByteRangeLock. +// +// More details: RFC 7530, section 16.10.4, paragraph 2. +func offsetLengthToStartEnd(offset, length uint64) (uint64, uint64, nfsv4.Nfsstat4) { + switch length { + case 0: + return 0, 0, nfsv4.NFS4ERR_INVAL + case math.MaxUint64: + // A length of all ones indicates end-of-file. + return offset, math.MaxUint64, nfsv4.NFS4_OK + default: + if length > math.MaxUint64-offset { + // The end exceeds the maximum 64-bit unsigned + // integer value. + return 0, 0, nfsv4.NFS4ERR_INVAL + } + return offset, offset + length, nfsv4.NFS4_OK + } +} + +// byteRangeLockToLock4Denied converts information on a conflicting byte +// range lock into a LOCK4denied response. +func byteRangeLockToLock4Denied(lock *virtual.ByteRangeLock[*lockOwnerState]) nfsv4.Lock4denied { + length := uint64(math.MaxUint64) + if lock.End != math.MaxUint64 { + length = lock.End - lock.Start + } + var lockType nfsv4.NfsLockType4 + switch lock.Type { + case virtual.ByteRangeLockTypeLockedShared: + lockType = nfsv4.READ_LT + case virtual.ByteRangeLockTypeLockedExclusive: + lockType = nfsv4.WRITE_LT + default: + panic("Unexpected lock type") + } + los := lock.Owner + return nfsv4.Lock4denied{ + Offset: lock.Start, + Length: length, + Locktype: lockType, + Owner: nfsv4.LockOwner4{ + Clientid: los.confirmedClient.confirmation.key.shortClientID, + Owner: los.owner, + }, + } +} + +// nfsv4NewComponent converts a filename string that's provided as part +// of an incoming request to a pathname component that can be provided +// to the virtual file system layer. +func nfsv4NewComponent(name string) (path.Component, nfsv4.Nfsstat4) { + if name == "" { + // Inherently invalid name. + return path.Component{}, nfsv4.NFS4ERR_INVAL + } + component, ok := path.NewComponent(name) + if !ok { + // Name that is invalid for this implementation. + return path.Component{}, nfsv4.NFS4ERR_BADNAME + } + return component, nfsv4.NFS4_OK +} diff --git a/pkg/filesystem/virtual/nfsv4/base_program_test.go b/pkg/filesystem/virtual/nfsv4/base_program_test.go new file mode 100644 index 00000000..b3271500 --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/base_program_test.go @@ -0,0 +1,5637 @@ +package nfsv4_test + +import ( + "bytes" + "context" + "io" + "testing" + "time" + + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual" + "github.com/buildbarn/bb-remote-execution/pkg/filesystem/virtual/nfsv4" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/filesystem/path" + nfsv4_xdr "github.com/buildbarn/go-xdr/pkg/protocols/nfsv4" + "github.com/buildbarn/go-xdr/pkg/protocols/rpcv2" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func handleResolverExpectCall(t *testing.T, handleResolver *mock.MockHandleResolver, expectedID []byte, directory virtual.Directory, leaf virtual.Leaf, status virtual.Status) { + handleResolver.EXPECT().Call(gomock.Any()). + DoAndReturn(func(id io.WriterTo) (virtual.Directory, virtual.Leaf, virtual.Status) { + idBuf := bytes.NewBuffer(nil) + n, err := id.WriteTo(idBuf) + require.NoError(t, err) + require.Equal(t, int64(len(expectedID)), n) + require.Equal(t, expectedID, idBuf.Bytes()) + return directory, leaf, status + }) +} + +func randomNumberGeneratorExpectRead(randomNumberGenerator *mock.MockSingleThreadedGenerator, data []byte) { + randomNumberGenerator.EXPECT().Read(gomock.Len(len(data))). + DoAndReturn(func(p []byte) (int, error) { + return copy(p, data), nil + }) +} + +func setClientIDForTesting(ctx context.Context, t *testing.T, randomNumberGenerator *mock.MockSingleThreadedGenerator, program nfsv4_xdr.Nfs4Program, shortClientID nfsv4_xdr.Clientid4) { + randomNumberGenerator.EXPECT().Uint64().Return(uint64(shortClientID)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xf8, 0x6e, 0x57, 0x12, 0x9c, 0x7a, 0x62, 0x8a}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID{ + Opsetclientid: nfsv4_xdr.Setclientid4args{ + Client: nfsv4_xdr.NfsClientId4{ + Verifier: nfsv4_xdr.Verifier4{0x95, 0x38, 0xc4, 0xfc, 0x81, 0x3e, 0x92, 0x2a}, + Id: []byte{0xa6, 0x9d, 0x64, 0x34, 0xdb, 0xcb, 0x09, 0x53}, + }, + Callback: nfsv4_xdr.CbClient4{ + CbProgram: 0x8554a7c7, + CbLocation: nfsv4_xdr.Clientaddr4{ + RNetid: "tcp", + RAddr: "127.0.0.1.196.95", + }, + }, + CallbackIdent: 0xa2bef9ca, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: &nfsv4_xdr.Setclientid4res_NFS4_OK{ + Resok4: nfsv4_xdr.Setclientid4resok{ + Clientid: shortClientID, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xf8, 0x6e, 0x57, 0x12, 0x9c, 0x7a, 0x62, 0x8a}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: shortClientID, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xf8, 0x6e, 0x57, 0x12, 0x9c, 0x7a, 0x62, 0x8a}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) +} + +func openUnconfirmedFileForTesting(ctx context.Context, t *testing.T, randomNumberGenerator *mock.MockSingleThreadedGenerator, program nfsv4_xdr.Nfs4Program, rootDirectory *mock.MockVirtualDirectory, leaf *mock.MockVirtualLeaf, fileHandle nfsv4_xdr.NfsFh4, shortClientID nfsv4_xdr.Clientid4, seqID nfsv4_xdr.Seqid4, stateIDOther [nfsv4_xdr.NFS4_OTHER_SIZE]byte) { + rootDirectory.EXPECT().VirtualOpenChild( + path.MustNewComponent("Hello"), + virtual.ShareMaskRead, + nil, + &virtual.OpenExistingOptions{}, + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(name path.Component, shareAccess virtual.ShareMask, createAttributes *virtual.Attributes, existingOptions *virtual.OpenExistingOptions, requested virtual.AttributesMask, openedFileAttributes *virtual.Attributes) (virtual.Leaf, virtual.AttributesMask, virtual.ChangeInfo, virtual.Status) { + openedFileAttributes.SetFileHandle(fileHandle) + return leaf, 0, virtual.ChangeInfo{ + Before: 0x29291f1b07caf9ea, + After: 0x360e671892329978, + }, virtual.StatusOK + }) + randomNumberGeneratorExpectRead(randomNumberGenerator, stateIDOther[4:]) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_OPEN{ + Opopen: nfsv4_xdr.Open4args{ + Seqid: seqID, + ShareAccess: nfsv4_xdr.OPEN4_SHARE_ACCESS_READ, + ShareDeny: nfsv4_xdr.OPEN4_SHARE_DENY_NONE, + Owner: nfsv4_xdr.OpenOwner4{ + Clientid: shortClientID, + Owner: []byte{0xc4, 0x85, 0x50, 0x6b, 0xa5, 0xec, 0x8e, 0x2c}, + }, + Openhow: &nfsv4_xdr.Openflag4_default{}, + Claim: &nfsv4_xdr.OpenClaim4_CLAIM_NULL{ + File: "Hello", + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN{ + Opopen: &nfsv4_xdr.Open4res_NFS4_OK{ + Resok4: nfsv4_xdr.Open4resok{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: stateIDOther, + }, + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x29291f1b07caf9ea, + After: 0x360e671892329978, + }, + Rflags: nfsv4_xdr.OPEN4_RESULT_CONFIRM | nfsv4_xdr.OPEN4_RESULT_LOCKTYPE_POSIX, + Attrset: nfsv4_xdr.Bitmap4{}, + Delegation: &nfsv4_xdr.OpenDelegation4_OPEN_DELEGATE_NONE{}, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: fileHandle, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) +} + +func openConfirmForTesting(ctx context.Context, t *testing.T, randomNumberGenerator *mock.MockSingleThreadedGenerator, program nfsv4_xdr.Nfs4Program, fileHandle nfsv4_xdr.NfsFh4, seqID nfsv4_xdr.Seqid4, stateIDOther [nfsv4_xdr.NFS4_OTHER_SIZE]byte) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "open_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: fileHandle, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_OPEN_CONFIRM{ + OpopenConfirm: nfsv4_xdr.OpenConfirm4args{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: stateIDOther, + }, + Seqid: seqID, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "open_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPEN_CONFIRM{ + OpopenConfirm: &nfsv4_xdr.OpenConfirm4res_NFS4_OK{ + Resok4: nfsv4_xdr.OpenConfirm4resok{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: stateIDOther, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) +} + +func TestBaseProgramCompound_OP_ACCESS(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x63, 0x40, 0xb6, 0x51, 0x6d, 0xa1, 0x7f, 0xcb}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x96, 0x63, 0x54, 0xf1, 0xa2, 0x6b, 0x8c, 0x61} + stateIDOtherPrefix := [...]byte{0x68, 0x78, 0x20, 0xb7} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling ACCESS without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "access", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_ACCESS{ + Opaccess: nfsv4_xdr.Access4args{ + Access: nfsv4_xdr.ACCESS4_READ, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "access", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_ACCESS{ + Opaccess: &nfsv4_xdr.Access4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("Directory", func(t *testing.T) { + // Access checks against a directory. + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskPermissions, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetPermissions(virtual.PermissionsExecute) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "access", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_ACCESS{ + Opaccess: nfsv4_xdr.Access4args{ + Access: nfsv4_xdr.ACCESS4_EXECUTE | nfsv4_xdr.ACCESS4_LOOKUP | nfsv4_xdr.ACCESS4_READ, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "access", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_ACCESS{ + Opaccess: &nfsv4_xdr.Access4res_NFS4_OK{ + Resok4: nfsv4_xdr.Access4resok{ + Supported: nfsv4_xdr.ACCESS4_LOOKUP | nfsv4_xdr.ACCESS4_READ, + Access: nfsv4_xdr.ACCESS4_LOOKUP, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("File", func(t *testing.T) { + // Access checks against a file. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + leaf.EXPECT().VirtualGetAttributes(virtual.AttributesMaskPermissions, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetPermissions(virtual.PermissionsRead | virtual.PermissionsWrite) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "access", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_ACCESS{ + Opaccess: nfsv4_xdr.Access4args{ + Access: nfsv4_xdr.ACCESS4_EXECUTE | nfsv4_xdr.ACCESS4_LOOKUP | nfsv4_xdr.ACCESS4_READ, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "access", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_ACCESS{ + Opaccess: &nfsv4_xdr.Access4res_NFS4_OK{ + Resok4: nfsv4_xdr.Access4resok{ + Supported: nfsv4_xdr.ACCESS4_EXECUTE | nfsv4_xdr.ACCESS4_READ, + Access: nfsv4_xdr.ACCESS4_READ, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_CLOSE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x14, 0x55, 0xb5, 0x51, 0x02, 0x31, 0xd6, 0x75}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x9f, 0xa8, 0x23, 0x40, 0x68, 0x9f, 0x3e, 0xac} + stateIDOtherPrefix := [...]byte{0xf5, 0x47, 0xa8, 0x88} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("AnonymousStateID", func(t *testing.T) { + // Calling CLOSE against the anonymous state ID is of + // course not permitted. This operation only works when + // called against regular state IDs. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 0x33cfa3a9, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("StaleStateID", func(t *testing.T) { + // Providing an arbitrary state ID that does not start + // with a known prefix should return + // NFS4ERR_STALE_STATEID, as it's likely from before a + // restart. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 0x299f061e, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 0x7746b4d2, + Other: [...]byte{ + 0x36, 0xeb, 0x77, 0x13, + 0x42, 0xfa, 0x7f, 0xbc, + 0xe2, 0x36, 0x20, 0x1b, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, res) + }) + + t.Run("BadStateID", func(t *testing.T) { + // Providing an arbitrary state ID that does not start with + // the known prefix should return NFS4ERR_BAD_STATEID. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 0xf4cf976e, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 0x444b408c, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x91, 0x2a, 0x94, 0x35, + 0x7f, 0xc9, 0x06, 0x70, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + // The remainder of the test assumes the availability of a client ID. + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0xc4cf32ab1168aabc) + + // Open a file for reading, but don't confirm it yet. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + /* shortClientID = */ 0xc4cf32ab1168aabc, + /* seqID = */ 241, + /* stateIDOther = */ [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }) + + t.Run("UnconfirmedStateID", func(t *testing.T) { + // CLOSE can't be called against an open-owner that + // hasn't been confirmed yet. + clock.EXPECT().Now().Return(time.Unix(1008, 0)) + clock.EXPECT().Now().Return(time.Unix(1009, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 242, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_SEQID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_SEQID, + }, res) + }) + + // Confirm the open-owner for the remainder of the test. + clock.EXPECT().Now().Return(time.Unix(1010, 0)) + clock.EXPECT().Now().Return(time.Unix(1011, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + /* seqID = */ 242, + /* stateIDOther = */ [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }) + + t.Run("OldStateID", func(t *testing.T) { + // Can't call CLOSE on a state ID from the past. + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + clock.EXPECT().Now().Return(time.Unix(1013, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 243, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_OLD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_OLD_STATEID, + }, res) + }) + + t.Run("FuturisticStateID", func(t *testing.T) { + // Can't call CLOSE on a state ID from the future. + clock.EXPECT().Now().Return(time.Unix(1014, 0)) + clock.EXPECT().Now().Return(time.Unix(1015, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 244, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling CLOSE without a file handle should fail. + clock.EXPECT().Now().Return(time.Unix(1016, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 244, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + // Actually close the file. It should be safe to call + // this multiple times, as it should just return a + // cached response. + for i := int64(0); i < 2*10; i++ { + clock.EXPECT().Now().Return(time.Unix(1017+i, 0)) + } + leaf.EXPECT().VirtualClose(uint(1)) + + for i := uint32(0); i < 10; i++ { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 244, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_NFS4_OK{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + } + }) + + t.Run("CloseAfterClosed", func(t *testing.T) { + // We should no longer be able to interact with the + // state ID after closing it. Attempting to close a file + // that has already been closed should just return + // NFS4ERR_BAD_STATEID. + clock.EXPECT().Now().Return(time.Unix(1037, 0)) + clock.EXPECT().Now().Return(time.Unix(1038, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0x1f, 0x5b, 0x1f, 0x0e, 0x8c, 0xf4, 0xf5, 0x40}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 245, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0xf5, 0x47, 0xa8, 0x88, + 0x74, 0x62, 0xab, 0x46, + 0x26, 0x1d, 0x14, 0x7f, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) +} + +func TestBaseProgramCompound_OP_COMMIT(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x5e, 0x1e, 0xca, 0x70, 0xcc, 0x9d, 0x5e, 0xd5}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x1a, 0xa6, 0x7e, 0x3b, 0xf7, 0x29, 0xa4, 0x7b} + stateIDOtherPrefix := [...]byte{0x24, 0xa7, 0x48, 0xbc} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling COMMIT without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "fsync", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_COMMIT{ + Opcommit: nfsv4_xdr.Commit4args{ + Offset: 10, + Count: 20, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "fsync", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_COMMIT{ + Opcommit: &nfsv4_xdr.Commit4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotFile", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "fsync", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_COMMIT{ + Opcommit: nfsv4_xdr.Commit4args{ + Offset: 10, + Count: 20, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "fsync", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_COMMIT{ + Opcommit: &nfsv4_xdr.Commit4res_default{ + Status: nfsv4_xdr.NFS4ERR_ISDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ISDIR, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "fsync", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_COMMIT{ + Opcommit: nfsv4_xdr.Commit4args{ + Offset: 10, + Count: 20, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "fsync", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_COMMIT{ + Opcommit: &nfsv4_xdr.Commit4res_NFS4_OK{ + Resok4: nfsv4_xdr.Commit4resok{ + Writeverf: rebootVerifier, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_CREATE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x9b, 0xe9, 0x83, 0x67, 0x8d, 0x92, 0x5e, 0x62}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x8d, 0x3d, 0xe8, 0x2e, 0xee, 0x3b, 0xca, 0x60} + stateIDOtherPrefix := [...]byte{0x60, 0xf5, 0x56, 0x97} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling CREATE without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotDirectory", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + Objname: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("BadType", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_default{ + Type: nfsv4_xdr.NF4REG, + }, + Objname: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADTYPE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADTYPE, + }, res) + }) + + t.Run("SymlinkFailure", func(t *testing.T) { + rootDirectory.EXPECT().VirtualSymlink( + []byte("target"), + path.MustNewComponent("symlink"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).Return(nil, virtual.ChangeInfo{}, virtual.StatusErrAccess) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4LNK{ + Linkdata: nfsv4_xdr.Linktext4("target"), + }, + Objname: "symlink", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, res) + }) + + t.Run("SymlinkSuccess", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualSymlink( + []byte("target"), + path.MustNewComponent("symlink"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(target []byte, name path.Component, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Leaf, virtual.ChangeInfo, virtual.Status) { + attributes.SetFileHandle([]byte{0xbe, 0xb7, 0xe9, 0xb1, 0xbb, 0x21, 0x9a, 0xa8}) + return leaf, virtual.ChangeInfo{ + Before: 0x803325cc21deffd8, + After: 0xa1b8abe75e185bb5, + }, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4LNK{ + Linkdata: nfsv4_xdr.Linktext4("target"), + }, + Objname: "symlink", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_NFS4_OK{ + Resok4: nfsv4_xdr.Create4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x803325cc21deffd8, + After: 0xa1b8abe75e185bb5, + }, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0xbe, 0xb7, 0xe9, 0xb1, 0xbb, 0x21, 0x9a, 0xa8, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("BlockDeviceFailure", func(t *testing.T) { + // Disallow the creation of block devices. There is no + // need for build actions to do that. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4BLK{ + Devdata: nfsv4_xdr.Specdata4{ + Specdata1: 8, + Specdata2: 0, + }, + }, + Objname: "sda", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_PERM, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_PERM, + }, res) + }) + + t.Run("CharacterDeviceFailure", func(t *testing.T) { + // Disallow the creation of character devices. There is no + // need for build actions to do that. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4CHR{ + Devdata: nfsv4_xdr.Specdata4{ + Specdata1: 1, + Specdata2: 3, + }, + }, + Objname: "null", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_default{ + Status: nfsv4_xdr.NFS4ERR_PERM, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_PERM, + }, res) + }) + + t.Run("SocketSuccess", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualMknod( + path.MustNewComponent("socket"), + filesystem.FileTypeSocket, + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(name path.Component, fileType filesystem.FileType, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Leaf, virtual.ChangeInfo, virtual.Status) { + attributes.SetFileHandle([]byte{0xe0, 0x45, 0x9a, 0xca, 0x4f, 0x67, 0x7c, 0xaa}) + return leaf, virtual.ChangeInfo{ + Before: 0xf46dd045aaf43210, + After: 0xc687134057752dbb, + }, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4SOCK{}, + Objname: "socket", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_NFS4_OK{ + Resok4: nfsv4_xdr.Create4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0xf46dd045aaf43210, + After: 0xc687134057752dbb, + }, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0xe0, 0x45, 0x9a, 0xca, 0x4f, 0x67, 0x7c, 0xaa, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("FIFOSuccess", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualMknod( + path.MustNewComponent("fifo"), + filesystem.FileTypeFIFO, + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(name path.Component, fileType filesystem.FileType, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Leaf, virtual.ChangeInfo, virtual.Status) { + attributes.SetFileHandle([]byte{0x73, 0x9c, 0x31, 0x40, 0x63, 0x49, 0xbb, 0x09}) + return leaf, virtual.ChangeInfo{ + Before: 0x1e80315f7745fc50, + After: 0xe280a823543ce5ac, + }, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4FIFO{}, + Objname: "fifo", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_NFS4_OK{ + Resok4: nfsv4_xdr.Create4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x1e80315f7745fc50, + After: 0xe280a823543ce5ac, + }, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0x73, 0x9c, 0x31, 0x40, 0x63, 0x49, 0xbb, 0x09, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("DirectorySuccess", func(t *testing.T) { + directory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualMkdir( + path.MustNewComponent("dir"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(name path.Component, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Directory, virtual.ChangeInfo, virtual.Status) { + attributes.SetFileHandle([]byte{0x19, 0xe5, 0x26, 0x1b, 0xee, 0x25, 0x4a, 0x76}) + return directory, virtual.ChangeInfo{ + Before: 0x60a4a64a5af2116f, + After: 0x58e160960c2d0339, + }, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "create", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_CREATE{ + Opcreate: nfsv4_xdr.Create4args{ + Objtype: &nfsv4_xdr.Createtype4_NF4DIR{}, + Objname: "dir", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "create", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CREATE{ + Opcreate: &nfsv4_xdr.Create4res_NFS4_OK{ + Resok4: nfsv4_xdr.Create4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x60a4a64a5af2116f, + After: 0x58e160960c2d0339, + }, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0x19, 0xe5, 0x26, 0x1b, 0xee, 0x25, 0x4a, 0x76, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_DELEGPURGE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x45, 0x22, 0xbb, 0xf6, 0xf0, 0x61, 0x71, 0x6d}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x0b, 0xb3, 0x0d, 0xa3, 0x50, 0x11, 0x6b, 0x38} + stateIDOtherPrefix := [...]byte{0x17, 0x18, 0x71, 0xc6} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NotSupported", func(t *testing.T) { + // As we don't support CLAIM_DELEGATE_PREV, this method + // is required to return NFS4ERR_NOTSUPP. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "stat", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_DELEGPURGE{ + Opdelegpurge: nfsv4_xdr.Delegpurge4args{ + Clientid: 0xc08f7e033702ee2c, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "stat", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_DELEGPURGE{ + Opdelegpurge: nfsv4_xdr.Delegpurge4res{ + Status: nfsv4_xdr.NFS4ERR_NOTSUPP, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTSUPP, + }, res) + }) +} + +// TODO: DELEGRETURN + +func TestBaseProgramCompound_OP_GETATTR(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x9b, 0x51, 0x40, 0x9b, 0x8c, 0x7a, 0x54, 0x47}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x5e, 0x5f, 0xfe, 0x34, 0x05, 0x98, 0x9d, 0xf1} + stateIDOtherPrefix := [...]byte{0x3d, 0xc0, 0x5d, 0xd2} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling GETATTR without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "stat", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_GETATTR{ + Opgetattr: nfsv4_xdr.Getattr4args{ + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "stat", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_GETATTR{ + Opgetattr: &nfsv4_xdr.Getattr4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NoAttributes", func(t *testing.T) { + // Request absolutely no attributes. + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMask(0), gomock.Any()) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "stat", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETATTR{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "stat", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETATTR{ + Opgetattr: &nfsv4_xdr.Getattr4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getattr4resok{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{}, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("AllAttributes", func(t *testing.T) { + // Request all supported attributes. + rootDirectory.EXPECT().VirtualGetAttributes( + virtual.AttributesMaskChangeID|virtual.AttributesMaskFileHandle|virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber|virtual.AttributesMaskLinkCount|virtual.AttributesMaskPermissions|virtual.AttributesMaskSizeBytes, + gomock.Any(), + ).Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetChangeID(0xeaab7253dad16ee5) + attributes.SetFileHandle([]byte{0xcd, 0xe9, 0xc7, 0x4c, 0x8b, 0x8d, 0x58, 0xef, 0xd9, 0x9f}) + attributes.SetFileType(filesystem.FileTypeDirectory) + attributes.SetInodeNumber(0xfcadd45521cb1db2) + attributes.SetLinkCount(12) + attributes.SetPermissions(virtual.PermissionsRead | virtual.PermissionsExecute) + attributes.SetSizeBytes(8192) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "stat", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETATTR{ + Opgetattr: nfsv4_xdr.Getattr4args{ + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_SUPPORTED_ATTRS) | + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FH_EXPIRE_TYPE) | + (1 << nfsv4_xdr.FATTR4_CHANGE) | + (1 << nfsv4_xdr.FATTR4_SIZE) | + (1 << nfsv4_xdr.FATTR4_LINK_SUPPORT) | + (1 << nfsv4_xdr.FATTR4_SYMLINK_SUPPORT) | + (1 << nfsv4_xdr.FATTR4_NAMED_ATTR) | + (1 << nfsv4_xdr.FATTR4_FSID) | + (1 << nfsv4_xdr.FATTR4_UNIQUE_HANDLES) | + (1 << nfsv4_xdr.FATTR4_LEASE_TIME) | + (1 << nfsv4_xdr.FATTR4_FILEHANDLE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + (1 << (nfsv4_xdr.FATTR4_MODE - 32)) | + (1 << (nfsv4_xdr.FATTR4_NUMLINKS - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_ACCESS - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_METADATA - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_MODIFY - 32)), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "stat", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETATTR{ + Opgetattr: &nfsv4_xdr.Getattr4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getattr4resok{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_SUPPORTED_ATTRS) | + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FH_EXPIRE_TYPE) | + (1 << nfsv4_xdr.FATTR4_CHANGE) | + (1 << nfsv4_xdr.FATTR4_SIZE) | + (1 << nfsv4_xdr.FATTR4_LINK_SUPPORT) | + (1 << nfsv4_xdr.FATTR4_SYMLINK_SUPPORT) | + (1 << nfsv4_xdr.FATTR4_NAMED_ATTR) | + (1 << nfsv4_xdr.FATTR4_FSID) | + (1 << nfsv4_xdr.FATTR4_UNIQUE_HANDLES) | + (1 << nfsv4_xdr.FATTR4_LEASE_TIME) | + (1 << nfsv4_xdr.FATTR4_FILEHANDLE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + (1 << (nfsv4_xdr.FATTR4_MODE - 32)) | + (1 << (nfsv4_xdr.FATTR4_NUMLINKS - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_ACCESS - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_METADATA - 32)) | + (1 << (nfsv4_xdr.FATTR4_TIME_MODIFY - 32)), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_SUPPORTED_ATTRS. + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x18, 0x0f, 0xff, + 0x00, 0x30, 0x80, 0x0a, + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_FH_EXPIRE_TYPE == FH4_PERSISTENT. + 0x00, 0x00, 0x00, 0x00, + // FATTR4_CHANGE. + 0xea, 0xab, 0x72, 0x53, 0xda, 0xd1, 0x6e, 0xe5, + // FATTR4_SIZE. + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + // FATTR4_LINK_SUPPORT == TRUE. + 0x00, 0x00, 0x00, 0x01, + // FATTR4_SYMLINK_SUPPORT == TRUE. + 0x00, 0x00, 0x00, 0x01, + // FATTR4_NAMED_ATTR == FALSE. + 0x00, 0x00, 0x00, 0x00, + // FATTR4_FSID. + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // FATTR4_UNIQUE_HANDLES == TRUE. + 0x00, 0x00, 0x00, 0x01, + // FATTR4_LEASE_TIME == 60 seconds. + 0x00, 0x00, 0x00, 0x3c, + // FATTR4_FILEHANDLE. + 0x00, 0x00, 0x00, 0x0a, + 0xcd, 0xe9, 0xc7, 0x4c, 0x8b, 0x8d, 0x58, 0xef, 0xd9, 0x9f, 0x00, 0x00, + // FATTR4_FILEID. + 0xfc, 0xad, 0xd4, 0x55, 0x21, 0xcb, 0x1d, 0xb2, + // FATTR4_MODE. + 0x00, 0x00, 0x01, 0x6d, + // FATTR4_NUMLINKS. + 0x00, 0x00, 0x00, 0x0c, + // FATTR4_TIME_ACCESS == 2000-01-01T00:00:00Z. + 0x00, 0x00, 0x00, 0x00, 0x38, 0x6d, 0x43, 0x80, + 0x00, 0x00, 0x00, 0x00, + // FATTR4_TIME_METADATA == 2000-01-01T00:00:00Z. + 0x00, 0x00, 0x00, 0x00, 0x38, 0x6d, 0x43, 0x80, + 0x00, 0x00, 0x00, 0x00, + // FATTR4_TIME_MODIFY == 2000-01-01T00:00:00Z. + 0x00, 0x00, 0x00, 0x00, 0x38, 0x6d, 0x43, 0x80, + 0x00, 0x00, 0x00, 0x00, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_GETFH(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x85, 0xc5, 0x54, 0x77, 0x90, 0x7c, 0xf1, 0xf9}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x3c, 0x79, 0xba, 0xfe, 0xd6, 0x87, 0x1e, 0x32} + stateIDOtherPrefix := [...]byte{0x95, 0xce, 0xb4, 0x96} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling GETFH without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "getfh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "getfh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "getfh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "getfh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0x85, 0xc5, 0x54, 0x77, 0x90, 0x7c, 0xf1, 0xf9, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_ILLEGAL(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x0e, 0xad, 0xf1, 0x83, 0xb1, 0xc0, 0xfc, 0x6f}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x42, 0x51, 0x65, 0x8b, 0xd2, 0x27, 0xc4, 0x13} + stateIDOtherPrefix := [...]byte{0x01, 0x22, 0xe2, 0xaa} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("Failure", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "illegal", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_ILLEGAL{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "illegal", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_ILLEGAL{ + Opillegal: nfsv4_xdr.Illegal4res{ + Status: nfsv4_xdr.NFS4ERR_OP_ILLEGAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_OP_ILLEGAL, + }, res) + }) +} + +func TestBaseProgramCompound_OP_LINK(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x27, 0xec, 0x12, 0x85, 0xcb, 0x2d, 0x57, 0xe2}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x8d, 0x94, 0x96, 0x9c, 0xe9, 0x4b, 0xcf, 0xf5} + stateIDOtherPrefix := [...]byte{0xdf, 0xdb, 0x0d, 0x38} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle1", func(t *testing.T) { + // Calling LINK without any file handles should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NoFileHandle2", func(t *testing.T) { + // Calling LINK without a saved file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + // Calling LINK with a bad filename should fail. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x62, 0xfc, 0x0c, 0x8c, 0x94, 0x86, 0x8d, 0xc7}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x62, 0xfc, 0x0c, 0x8c, 0x94, 0x86, 0x8d, 0xc7, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + // Calling LINK with a name of length zero should fail + // with NFS4ERR_INVAL. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x62, 0xfc, 0x0c, 0x8c, 0x94, 0x86, 0x8d, 0xc7}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x62, 0xfc, 0x0c, 0x8c, 0x94, 0x86, 0x8d, 0xc7, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("SourceIsDirectory", func(t *testing.T) { + // Calling LINK with a directory as a source object should fail. + directory := mock.NewMockVirtualDirectory(ctrl) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x92, 0xcc, 0xd9, 0x59, 0xef, 0xf3, 0xef, 0x0a}, directory, nil, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x92, 0xcc, 0xd9, 0x59, 0xef, 0xf3, 0xef, 0x0a, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_ISDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ISDIR, + }, res) + }) + + t.Run("LinkCreationFailure", func(t *testing.T) { + // All arguments are correct, but the underlying + // directory does not allow the link to be created. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x98, 0x55, 0x2f, 0xf4, 0x06, 0xa1, 0xea, 0xbd}, nil, leaf, virtual.StatusOK) + rootDirectory.EXPECT().VirtualLink( + path.MustNewComponent("Hello"), + leaf, + virtual.AttributesMask(0), + gomock.Any(), + ).Return(virtual.ChangeInfo{}, virtual.StatusErrXDev) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x98, 0x55, 0x2f, 0xf4, 0x06, 0xa1, 0xea, 0xbd, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_default{ + Status: nfsv4_xdr.NFS4ERR_XDEV, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_XDEV, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + handleResolverExpectCall(t, handleResolver, []byte{0x98, 0x55, 0x2f, 0xf4, 0x06, 0xa1, 0xea, 0xbd}, nil, leaf, virtual.StatusOK) + rootDirectory.EXPECT().VirtualLink( + path.MustNewComponent("Hello"), + leaf, + virtual.AttributesMask(0), + gomock.Any(), + ).Return(virtual.ChangeInfo{ + Before: 0x6eee6c2bf6db7101, + After: 0x5d2447d9e6bec4b8, + }, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "link", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{ + 0x98, 0x55, 0x2f, 0xf4, 0x06, 0xa1, 0xea, 0xbd, + }, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LINK{ + Oplink: nfsv4_xdr.Link4args{ + Newname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "link", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LINK{ + Oplink: &nfsv4_xdr.Link4res_NFS4_OK{ + Resok4: nfsv4_xdr.Link4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x6eee6c2bf6db7101, + After: 0x5d2447d9e6bec4b8, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_LOOKUP(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x5a, 0x8a, 0xf7, 0x7b, 0x6f, 0x5e, 0xbc, 0xff}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xf5, 0x66, 0xea, 0xae, 0x76, 0x70, 0xd1, 0x5b} + stateIDOtherPrefix := [...]byte{0x2d, 0x48, 0xd3, 0x9b} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling LOOKUP without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("NotDirectory", func(t *testing.T) { + // When called against files other than symbolic links, + // LOOKUP should return NFS4ERR_NOTDIR. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + leaf.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeRegularFile) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("Symlink", func(t *testing.T) { + // When called against symbolic links, LOOKUP should + // return NFS4ERR_SYMLINK. That way the client knows it + // may need to do symlink expansion. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, nil, leaf, virtual.StatusOK) + leaf.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeSymlink) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_SYMLINK, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_SYMLINK, + }, res) + }) + + t.Run("NotFound", func(t *testing.T) { + rootDirectory.EXPECT().VirtualLookup( + path.MustNewComponent("Hello"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).Return(nil, nil, virtual.StatusErrNoEnt) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4ERR_NOENT, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOENT, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualLookup( + path.MustNewComponent("Hello"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(name path.Component, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Directory, virtual.Leaf, virtual.Status) { + attributes.SetFileHandle([]byte{0x98, 0xb2, 0xdc, 0x6e, 0x34, 0xa2, 0xcf, 0xa5}) + return nil, leaf, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lookup", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "Hello", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lookup", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{ + 0x98, 0xb2, 0xdc, 0x6e, 0x34, 0xa2, 0xcf, 0xa5, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: LOOKUPP + +func TestBaseProgramCompound_OP_NVERIFY(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xe0, 0x7a, 0x5b, 0x53, 0x03, 0x7a, 0x0a, 0x6f}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xab, 0x23, 0xe8, 0x04, 0x79, 0x23, 0x0a, 0x27} + stateIDOtherPrefix := [...]byte{0x41, 0x40, 0x91, 0x69} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + // Only basic testing coverage for NVERIFY is provided, as it is + // assumed most of the logic is shared with VERIFY. + + t.Run("Match", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + attributes.SetInodeNumber(0x676b7bcb66d92ed6) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "nverify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_NVERIFY{ + Opnverify: nfsv4_xdr.Nverify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_FILEID. + 0x67, 0x6b, 0x7b, 0xcb, 0x66, 0xd9, 0x2e, 0xd6, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "nverify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_NVERIFY{ + Opnverify: nfsv4_xdr.Nverify4res{ + Status: nfsv4_xdr.NFS4ERR_SAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_SAME, + }, res) + }) + + t.Run("Mismatch", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "nverify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_NVERIFY{ + Opnverify: nfsv4_xdr.Nverify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4BLK. + 0x00, 0x00, 0x00, 0x03, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "nverify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_NVERIFY{ + Opnverify: nfsv4_xdr.Nverify4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: OPEN + +func TestBaseProgramCompound_OP_OPENATTR(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x03, 0x86, 0xd4, 0xcb, 0x44, 0x7c, 0x7e, 0x77}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xe6, 0x7e, 0xb7, 0xdb, 0x52, 0x9c, 0x7c, 0x86} + stateIDOtherPrefix := [...]byte{0x06, 0x00, 0x7c, 0x9d} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling OPENATTR without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "openattr", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_OPENATTR{ + Opopenattr: nfsv4_xdr.Openattr4args{ + Createdir: true, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "openattr", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_OPENATTR{ + Opopenattr: nfsv4_xdr.Openattr4res{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotSupported", func(t *testing.T) { + // This implementation does not support named attributes. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "openattr", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_OPENATTR{ + Opopenattr: nfsv4_xdr.Openattr4args{ + Createdir: true, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "openattr", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_OPENATTR{ + Opopenattr: nfsv4_xdr.Openattr4res{ + Status: nfsv4_xdr.NFS4ERR_NOTSUPP, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTSUPP, + }, res) + }) +} + +// TODO: OPEN_CONFIRM +// TODO: OPEN_DOWNGRADE +// TODO: PUTFH +// TODO: PUTPUBFH +// TODO: PUTROOTFH + +func TestBaseProgramCompound_OP_READ(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x37, 0xfd, 0xd0, 0xfc, 0x45, 0x2b, 0x79, 0x32}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x58, 0x61, 0xb4, 0xff, 0x82, 0x40, 0x8f, 0x1a} + stateIDOtherPrefix := [...]byte{0x55, 0xc7, 0xc6, 0xa0} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("StaleStateID", func(t *testing.T) { + // Providing a state ID that uses an unknown prefix + // should cause READ to fail with NFS4ERR_STALE_STATEID, + // as it likely refers to a state ID from before a + // restart. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 0xce56be4e, + Other: [...]byte{ + 0x88, 0xa8, 0x5a, 0x60, + 0x01, 0xa8, 0x3e, 0xff, + 0x36, 0xe4, 0xcf, 0xd8, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_STATEID, + }, res) + }) + + t.Run("BadStateID", func(t *testing.T) { + // The prefix of the state ID matches, but it does not + // correspond to a known value. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 0xce56be4e, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xdf, 0xa1, 0xb4, 0x3b, + 0xb2, 0x4c, 0x2b, 0x5f, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("BadReadBypassStateID", func(t *testing.T) { + // The standard requires that if the "other" field in + // the state ID is all zeroes or all ones, the "seqid" + // field must match. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 123, + Other: [...]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("AnonymousStateIDNoFileHandle", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("AnonymousStateIDOpenFailure", func(t *testing.T) { + // A state ID consisting exclusively of zero bits is + // referred to as the anonymous state ID. It should + // cause the underlying file to be opened temporarily. + // Failures when doing so should propagate. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, nil, leaf, virtual.StatusOK) + leaf.EXPECT().VirtualOpenSelf(virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()).Return(virtual.StatusErrIO) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_IO, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_IO, + }, res) + }) + + t.Run("AnonymousStateIDReadFailure", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, nil, leaf, virtual.StatusOK) + gomock.InOrder( + leaf.EXPECT().VirtualOpenSelf(virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()), + leaf.EXPECT().VirtualRead(gomock.Len(100), uint64(1000)).Return(0, false, virtual.StatusErrIO), + leaf.EXPECT().VirtualClose(uint(1))) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_IO, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_IO, + }, res) + }) + + t.Run("AnonymousStateIDSuccess", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, nil, leaf, virtual.StatusOK) + gomock.InOrder( + leaf.EXPECT().VirtualOpenSelf(virtual.ShareMaskRead, &virtual.OpenExistingOptions{}, virtual.AttributesMask(0), gomock.Any()), + leaf.EXPECT().VirtualRead(gomock.Len(100), uint64(1000)). + DoAndReturn(func(buf []byte, offset uint64) (int, bool, virtual.Status) { + return copy(buf, "Hello"), true, virtual.StatusOK + }), + leaf.EXPECT().VirtualClose(uint(1))) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_NFS4_OK{ + Resok4: nfsv4_xdr.Read4resok{ + Eof: true, + Data: []byte("Hello"), + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // The remainder of the test assumes the availability of a client ID. + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + clock.EXPECT().Now().Return(time.Unix(1008, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0xf86e57129c7a628a) + + // Open a file for reading, but don't confirm it yet. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1009, 0)) + clock.EXPECT().Now().Return(time.Unix(1010, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + /* shortClientID = */ 0xf86e57129c7a628a, + /* seqID = */ 7010, + /* stateIDOther = */ [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }) + + t.Run("UnconfirmedStateID", func(t *testing.T) { + // The state ID belongs to an open-owner that has not + // been confirmed using OPEN_CONFIRM yet. The READ + // operation should not be permitted. + clock.EXPECT().Now().Return(time.Unix(1011, 0)) + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + // Confirm the open-owner for the remainder of the test. + clock.EXPECT().Now().Return(time.Unix(1013, 0)) + clock.EXPECT().Now().Return(time.Unix(1014, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + /* seqID = */ 7011, + /* stateIDOther = */ [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }) + + t.Run("OldStateID", func(t *testing.T) { + // The OPEN_CONFIRM call above increased the sequence ID + // of the state ID to 2. Calling READ with a lower value + // should cause us to return NFS4ERR_OLD_STATEID. + clock.EXPECT().Now().Return(time.Unix(1015, 0)) + clock.EXPECT().Now().Return(time.Unix(1016, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_OLD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_OLD_STATEID, + }, res) + }) + + t.Run("FuturisticStateID", func(t *testing.T) { + // Similarly, using sequence ID 3 is too new, as it's + // never been handed out by the server. + clock.EXPECT().Now().Return(time.Unix(1017, 0)) + clock.EXPECT().Now().Return(time.Unix(1018, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("OpenStateIDSuccess", func(t *testing.T) { + clock.EXPECT().Now().Return(time.Unix(1019, 0)) + clock.EXPECT().Now().Return(time.Unix(1020, 0)) + clock.EXPECT().Now().Return(time.Unix(1021, 0)) + leaf.EXPECT().VirtualRead(gomock.Len(100), uint64(1000)). + DoAndReturn(func(buf []byte, offset uint64) (int, bool, virtual.Status) { + return copy(buf, "Hello"), true, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_NFS4_OK{ + Resok4: nfsv4_xdr.Read4resok{ + Eof: true, + Data: []byte("Hello"), + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Acquire a lock on the file to test the behaviour of READ when + // called with a lock state ID. + clock.EXPECT().Now().Return(time.Unix(1022, 0)) + clock.EXPECT().Now().Return(time.Unix(1023, 0)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xe8, 0xf2, 0xf2, 0x43, 0xc1, 0x91, 0x76, 0x91}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCK{ + Oplock: nfsv4_xdr.Lock4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Reclaim: false, + Offset: 100, + Length: 100, + Locker: &nfsv4_xdr.Locker4_TRUE{ + OpenOwner: nfsv4_xdr.OpenToLockOwner4{ + OpenSeqid: 7012, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + LockSeqid: 9640, + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf86e57129c7a628a, + Owner: []byte{0x58, 0xa8, 0x53, 0x4c, 0xf8, 0xe8, 0xaa, 0xf3}, + }, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCK{ + Oplock: &nfsv4_xdr.Lock4res_NFS4_OK{ + Resok4: nfsv4_xdr.Lock4resok{ + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe8, 0xf2, 0xf2, 0x43, + 0xc1, 0x91, 0x76, 0x91, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + t.Run("LockStateIDSuccess", func(t *testing.T) { + // It's also permitted to call READ using a lock state ID. + clock.EXPECT().Now().Return(time.Unix(1024, 0)) + clock.EXPECT().Now().Return(time.Unix(1025, 0)) + clock.EXPECT().Now().Return(time.Unix(1026, 0)) + leaf.EXPECT().VirtualRead(gomock.Len(100), uint64(1000)). + DoAndReturn(func(buf []byte, offset uint64) (int, bool, virtual.Status) { + return copy(buf, "Hello"), true, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe8, 0xf2, 0xf2, 0x43, + 0xc1, 0x91, 0x76, 0x91, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_NFS4_OK{ + Resok4: nfsv4_xdr.Read4resok{ + Eof: true, + Data: []byte("Hello"), + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Close the file for the remainder of the test. + clock.EXPECT().Now().Return(time.Unix(1027, 0)) + clock.EXPECT().Now().Return(time.Unix(1028, 0)) + leaf.EXPECT().VirtualClose(uint(1)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "close", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_CLOSE{ + Opclose: nfsv4_xdr.Close4args{ + Seqid: 7013, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "close", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_CLOSE{ + Opclose: &nfsv4_xdr.Close4res_NFS4_OK{ + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + t.Run("ClosedStateIDBefore", func(t *testing.T) { + // Normally a subsequent operation on a state ID with a + // sequence ID that's too low should return + // NFS4ERR_OLD_STATEID. Because the state ID has been + // closed altogether, we should see NFS4ERR_BAD_STATEID + // instead. + clock.EXPECT().Now().Return(time.Unix(1029, 0)) + clock.EXPECT().Now().Return(time.Unix(1030, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) + + t.Run("ClosedStateIDAfter", func(t *testing.T) { + // Similar to the above, using the file with the exact + // state ID should also return NFS4ERR_BAD_STATEID. + clock.EXPECT().Now().Return(time.Unix(1030, 0)) + clock.EXPECT().Now().Return(time.Unix(1031, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "read", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xd8, 0x47, 0x07, 0x55, 0x44, 0x96, 0x88, 0x8d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READ{ + Opread: nfsv4_xdr.Read4args{ + Stateid: nfsv4_xdr.Stateid4{ + Seqid: 3, + Other: [...]byte{ + 0x55, 0xc7, 0xc6, 0xa0, + 0xe0, 0x17, 0x83, 0x9c, + 0x17, 0x7d, 0xa2, 0x16, + }, + }, + Offset: 1000, + Count: 100, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "read", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READ{ + Opread: &nfsv4_xdr.Read4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) +} + +func TestBaseProgramCompound_OP_READDIR(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x52, 0x5e, 0x17, 0x6e, 0xad, 0x2f, 0xc3, 0xf9}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x80, 0x29, 0x6e, 0xe3, 0x1a, 0xf1, 0xec, 0x41} + stateIDOtherPrefix := [...]byte{0xce, 0x11, 0x76, 0xe8} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling READDIR without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 1000, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotDir", func(t *testing.T) { + // Calling READDIR with a non-directory file handle + // should fail. + leaf := mock.NewMockVirtualLeaf(ctrl) + rootDirectory.EXPECT().VirtualLookup( + path.MustNewComponent("file"), + virtual.AttributesMaskFileHandle, + gomock.Any(), + ).DoAndReturn(func(name path.Component, requested virtual.AttributesMask, attributes *virtual.Attributes) (virtual.Directory, virtual.Leaf, virtual.Status) { + attributes.SetFileHandle([]byte{0x1c, 0xae, 0xab, 0x22, 0xdf, 0xf4, 0x9e, 0x93}) + return nil, leaf, virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4args{ + Objname: "file", + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 1000, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOOKUP{ + Oplookup: nfsv4_xdr.Lookup4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("EmptyDirectory", func(t *testing.T) { + // Returning no results should cause EOF to be set. + rootDirectory.EXPECT().VirtualReadDir( + uint64(0), + virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).Return(virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 1000, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_NFS4_OK{ + Resok4: nfsv4_xdr.Readdir4resok{ + Cookieverf: rebootVerifier, + Reply: nfsv4_xdr.Dirlist4{ + Eof: true, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("TooSmall", func(t *testing.T) { + // The requested entry is going to be 56 bytes in size. + // If READDIR is called with maxcount set to 59, the + // request should fail with NFS4ERR_TOOSMALL. + rootDirectory.EXPECT().VirtualReadDir( + uint64(0), + virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).DoAndReturn(func(firstCookie uint64, attributesMask virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + leaf := mock.NewMockVirtualLeaf(ctrl) + require.False(t, reporter.ReportLeaf( + uint64(1), + path.MustNewComponent("file"), + leaf, + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeRegularFile). + SetInodeNumber(123))) + return virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 59, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_default{ + Status: nfsv4_xdr.NFS4ERR_TOOSMALL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_TOOSMALL, + }, res) + }) + + t.Run("JustBigEnough", func(t *testing.T) { + // The same test as the one above, but with a maxcount + // of 60 bytes. This should make the call succeed. + rootDirectory.EXPECT().VirtualReadDir( + uint64(0), + virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, + gomock.Any(), + ).DoAndReturn(func(firstCookie uint64, attributesMask virtual.AttributesMask, reporter virtual.DirectoryEntryReporter) virtual.Status { + leaf := mock.NewMockVirtualLeaf(ctrl) + require.True(t, reporter.ReportLeaf( + uint64(1), + path.MustNewComponent("file"), + leaf, + (&virtual.Attributes{}). + SetFileType(filesystem.FileTypeRegularFile). + SetInodeNumber(123))) + return virtual.StatusOK + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Maxcount: 60, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_NFS4_OK{ + Resok4: nfsv4_xdr.Readdir4resok{ + Cookieverf: rebootVerifier, + Reply: nfsv4_xdr.Dirlist4{ + Eof: true, + Entries: &nfsv4_xdr.Entry4{ + Cookie: 3, + Name: "file", + Attrs: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4REG. + 0x00, 0x00, 0x00, 0x01, + // FATTR4_FILEID == 123. + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + }, + }, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("IncorrectVerifier", func(t *testing.T) { + // Passing in a cookie rebootVerifier that doesn't match with + // what was handed out previously should cause an + // NFS4ERR_NOT_SAME error. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readdir", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READDIR{ + Opreaddir: nfsv4_xdr.Readdir4args{ + Cookie: 72, + Cookieverf: nfsv4_xdr.Verifier4{ + 0xb, 0xa, 0xd, 0xc, 0x00, 0xc, 0x1, 0xe, + }, + Maxcount: 1000, + AttrRequest: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readdir", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READDIR{ + Opreaddir: &nfsv4_xdr.Readdir4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOT_SAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOT_SAME, + }, res) + }) +} + +func TestBaseProgramCompound_OP_READLINK(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xe7, 0x09, 0xea, 0x64, 0xd4, 0x5a, 0xf2, 0x87}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xa8, 0x90, 0x8c, 0x43, 0xb7, 0xd6, 0x0f, 0x74} + stateIDOtherPrefix := [...]byte{0x46, 0x64, 0x44, 0x31} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling READLINK without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_READLINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_READLINK{ + Opreadlink: &nfsv4_xdr.Readlink4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("Directory", func(t *testing.T) { + // Even though most file operations will return + // NFS4ERR_ISDIR when called against a directory, + // READLINK is required to return NFS4ERR_INVAL. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_READLINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READLINK{ + Opreadlink: &nfsv4_xdr.Readlink4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("Failure", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + leaf.EXPECT().VirtualReadlink().Return(nil, virtual.StatusErrIO) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READLINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READLINK{ + Opreadlink: &nfsv4_xdr.Readlink4res_default{ + Status: nfsv4_xdr.NFS4ERR_IO, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_IO, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + handleResolverExpectCall(t, handleResolver, []byte{4, 5, 6}, nil, leaf, virtual.StatusOK) + leaf.EXPECT().VirtualReadlink().Return([]byte("target"), virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "readlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{4, 5, 6}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_READLINK{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "readlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_READLINK{ + Opreadlink: &nfsv4_xdr.Readlink4res_NFS4_OK{ + Resok4: nfsv4_xdr.Readlink4resok{ + Link: nfsv4_xdr.Linktext4("target"), + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_RELEASE_LOCKOWNER(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x8e, 0x16, 0xec, 0x1a, 0x60, 0x6a, 0x9d, 0x3d}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x27, 0xe1, 0xcd, 0x6a, 0x3f, 0xf8, 0xb7, 0xb2} + stateIDOtherPrefix := [...]byte{0xab, 0x4f, 0xf6, 0x1c} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("StaleClientID", func(t *testing.T) { + // Calling RELEASE_LOCKOWNER against a non-existent + // short client ID should result in failure. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xac, 0xce, 0x68, 0x2f, 0x60, 0x36, 0x4f, 0xbf}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, res) + }) + + // The remainder of the test assumes the availability of a client ID. + clock.EXPECT().Now().Return(time.Unix(1001, 0)) + clock.EXPECT().Now().Return(time.Unix(1002, 0)) + setClientIDForTesting(ctx, t, randomNumberGenerator, program, 0xf7fdfdc38f805b08) + + t.Run("SuccessNoOp", func(t *testing.T) { + // Now that a client ID has been allocated, the + // RELEASE_LOCKOWNER call should succeed. Because we + // haven't acquired any locks yet, it should still be a + // no-op. + clock.EXPECT().Now().Return(time.Unix(1003, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xad, 0x75, 0x31, 0x9f, 0xe7, 0xef, 0x5a, 0x00}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Open a file and acquire a lock on it for the remainder of + // this test. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1004, 0)) + clock.EXPECT().Now().Return(time.Unix(1005, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + /* shortClientID = */ 0xf7fdfdc38f805b08, + /* seqID = */ 30430, + /* stateIDOther = */ [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0x1e, 0x72, 0x2a, 0xe1, + 0x85, 0x8e, 0x31, 0x01, + }) + clock.EXPECT().Now().Return(time.Unix(1006, 0)) + clock.EXPECT().Now().Return(time.Unix(1007, 0)) + openConfirmForTesting( + ctx, + t, + randomNumberGenerator, + program, + nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + /* seqID = */ 30431, + /* stateIDOther = */ [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0x1e, 0x72, 0x2a, 0xe1, + 0x85, 0x8e, 0x31, 0x01, + }) + + clock.EXPECT().Now().Return(time.Unix(1008, 0)) + clock.EXPECT().Now().Return(time.Unix(1009, 0)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xe8, 0xef, 0xf4, 0x3d, 0x9b, 0x99, 0x0e, 0xf1}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCK{ + Oplock: nfsv4_xdr.Lock4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Reclaim: false, + Offset: 100, + Length: 100, + Locker: &nfsv4_xdr.Locker4_TRUE{ + OpenOwner: nfsv4_xdr.OpenToLockOwner4{ + OpenSeqid: 30432, + OpenStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0x1e, 0x72, 0x2a, 0xe1, + 0x85, 0x8e, 0x31, 0x01, + }, + }, + LockSeqid: 16946, + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xad, 0x75, 0x31, 0x9f, 0xe7, 0xef, 0x5a, 0x00}, + }, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCK{ + Oplock: &nfsv4_xdr.Lock4res_NFS4_OK{ + Resok4: nfsv4_xdr.Lock4resok{ + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0xe8, 0xef, 0xf4, 0x3d, + 0x9b, 0x99, 0x0e, 0xf1, + }, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + t.Run("LocksHeld", func(t *testing.T) { + // Now that this lock-owner holds one or more locks, + // RELEASE_LOCKOWNER can no longer be called. + clock.EXPECT().Now().Return(time.Unix(1010, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xad, 0x75, 0x31, 0x9f, 0xe7, 0xef, 0x5a, 0x00}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4ERR_LOCKS_HELD, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_LOCKS_HELD, + }, res) + }) + + // Drop the lock. + clock.EXPECT().Now().Return(time.Unix(1011, 0)) + clock.EXPECT().Now().Return(time.Unix(1012, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCKU{ + Oplocku: nfsv4_xdr.Locku4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Seqid: 16947, + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 1, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0xe8, 0xef, 0xf4, 0x3d, + 0x9b, 0x99, 0x0e, 0xf1, + }, + }, + Offset: 50, + Length: 200, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCKU{ + Oplocku: &nfsv4_xdr.Locku4res_NFS4_OK{ + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0xe8, 0xef, 0xf4, 0x3d, + 0x9b, 0x99, 0x0e, 0xf1, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + t.Run("SuccessAfterUnlock", func(t *testing.T) { + // Now that the file has been unlocked, it should be + // possible to call RELEASE_LOCKOWNER once again. + clock.EXPECT().Now().Return(time.Unix(1013, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "release_lockowner", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4args{ + LockOwner: nfsv4_xdr.LockOwner4{ + Clientid: 0xf7fdfdc38f805b08, + Owner: []byte{0xad, 0x75, 0x31, 0x9f, 0xe7, 0xef, 0x5a, 0x00}, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "release_lockowner", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RELEASE_LOCKOWNER{ + OpreleaseLockowner: nfsv4_xdr.ReleaseLockowner4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + // As a consequence, future LOCK operations for the + // lock-owner should fail, as long as no + // open_to_lock_owner4 is provided. + clock.EXPECT().Now().Return(time.Unix(1014, 0)) + clock.EXPECT().Now().Return(time.Unix(1015, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "lock", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{0xa7, 0x7b, 0xdf, 0xee, 0x60, 0xed, 0x37, 0x9d}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_LOCK{ + Oplock: nfsv4_xdr.Lock4args{ + Locktype: nfsv4_xdr.WRITE_LT, + Reclaim: false, + Offset: 100, + Length: 100, + Locker: &nfsv4_xdr.Locker4_FALSE{ + LockOwner: nfsv4_xdr.ExistLockOwner4{ + LockSeqid: 16947, + LockStateid: nfsv4_xdr.Stateid4{ + Seqid: 2, + Other: [...]byte{ + 0xab, 0x4f, 0xf6, 0x1c, + 0xe8, 0xef, 0xf4, 0x3d, + 0x9b, 0x99, 0x0e, 0xf1, + }, + }, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "lock", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_LOCK{ + Oplock: &nfsv4_xdr.Lock4res_default{ + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BAD_STATEID, + }, res) + }) +} + +func TestBaseProgramCompound_OP_REMOVE(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xe3, 0x85, 0x4a, 0x60, 0x0d, 0xaf, 0x14, 0x20}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xe7, 0x77, 0x33, 0xf4, 0x21, 0xad, 0x7a, 0x1b} + stateIDOtherPrefix := [...]byte{0x4b, 0x46, 0x62, 0x3c} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling REMOVE without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotDirectory", func(t *testing.T) { + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("Failure", func(t *testing.T) { + rootDirectory.EXPECT().VirtualRemove( + path.MustNewComponent("file"), + true, + true, + ).Return(virtual.ChangeInfo{}, virtual.StatusErrAccess) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_default{ + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ACCESS, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + rootDirectory.EXPECT().VirtualRemove( + path.MustNewComponent("file"), + true, + true, + ).Return(virtual.ChangeInfo{ + Before: 0x65821b4665becdc0, + After: 0x9c6360fa70cc3aea, + }, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "unlink", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_REMOVE{ + Opremove: nfsv4_xdr.Remove4args{ + Target: "file", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "unlink", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_REMOVE{ + Opremove: &nfsv4_xdr.Remove4res_NFS4_OK{ + Resok4: nfsv4_xdr.Remove4resok{ + Cinfo: nfsv4_xdr.ChangeInfo4{ + Atomic: true, + Before: 0x65821b4665becdc0, + After: 0x9c6360fa70cc3aea, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: RENAME +// TODO: RENEW + +func TestBaseProgramCompound_OP_RESTOREFH(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x16, 0xb9, 0x45, 0x1d, 0x06, 0x85, 0xc4, 0xbb}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x5f, 0x98, 0x5c, 0xdf, 0x8a, 0xac, 0x4d, 0x97} + stateIDOtherPrefix := [...]byte{0xd4, 0x7c, 0xd1, 0x8f} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoSavedFileHandle", func(t *testing.T) { + // Calling RESTOREFH without a saved file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "restorefh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_RESTOREFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "restorefh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_RESTOREFH{ + Oprestorefh: nfsv4_xdr.Restorefh4res{ + Status: nfsv4_xdr.NFS4ERR_RESTOREFH, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_RESTOREFH, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + // RESTOREFH should restore the file that was saved + // previously. The current file handle for successive + // operations should apply to that file instead. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "restorefh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_RESTOREFH{}, + &nfsv4_xdr.NfsArgop4_OP_GETFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "restorefh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + }, + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_RESTOREFH{ + Oprestorefh: nfsv4_xdr.Restorefh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_GETFH{ + Opgetfh: &nfsv4_xdr.Getfh4res_NFS4_OK{ + Resok4: nfsv4_xdr.Getfh4resok{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_SAVEFH(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xc4, 0x2b, 0x0e, 0x04, 0xde, 0x15, 0x66, 0x77}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0xe9, 0xf5, 0x40, 0xa0, 0x20, 0xd9, 0x2c, 0x52} + stateIDOtherPrefix := [...]byte{0xf1, 0xd0, 0x0e, 0xa0} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling SAVEFH without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "savefh", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SAVEFH{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "savefh", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SAVEFH{ + Opsavefh: nfsv4_xdr.Savefh4res{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + // The success case is tested as part of OP_RESTOREFH. +} + +func TestBaseProgramCompound_OP_SECINFO(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x0a, 0xa2, 0x92, 0x2f, 0x06, 0x66, 0xd8, 0x80}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x70, 0x34, 0xc6, 0x7a, 0x25, 0x6e, 0x08, 0xc0} + stateIDOtherPrefix := [...]byte{0xf9, 0x44, 0xa6, 0x25} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling SECINFO without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("NotDirectory", func(t *testing.T) { + // Even though LOOKUP may return NFS4ERR_SYMLINK when + // called against a symbolic link, SECINFO has no such + // requirement. It should always return NFS4ERR_NOTDIR. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + handleResolverExpectCall(t, handleResolver, []byte{1, 2, 3}, nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4args{ + Object: nfsv4_xdr.NfsFh4{1, 2, 3}, + }, + }, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTFH{ + Opputfh: nfsv4_xdr.Putfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOTDIR, + }, res) + }) + + t.Run("BadName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "..", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADNAME, + }, res) + }) + + t.Run("MissingName", func(t *testing.T) { + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{}, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("NotFound", func(t *testing.T) { + rootDirectory.EXPECT().VirtualLookup( + path.MustNewComponent("Hello"), + virtual.AttributesMask(0), + gomock.Any(), + ).Return(nil, nil, virtual.StatusErrNoEnt) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_default{ + Status: nfsv4_xdr.NFS4ERR_NOENT, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOENT, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + leaf := mock.NewMockNativeLeaf(ctrl) + rootDirectory.EXPECT().VirtualLookup( + path.MustNewComponent("Hello"), + virtual.AttributesMask(0), + gomock.Any(), + ).Return(nil, leaf, virtual.StatusOK) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "secinfo", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_SECINFO{ + Opsecinfo: nfsv4_xdr.Secinfo4args{ + Name: "Hello", + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "secinfo", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_SECINFO{ + Opsecinfo: &nfsv4_xdr.Secinfo4res_NFS4_OK{ + Resok4: []nfsv4_xdr.Secinfo4{ + &nfsv4_xdr.Secinfo4_default{ + Flavor: rpcv2.AUTH_NONE, + }, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: SETATTR +// TODO: SETCLIENTID + +func TestBaseProgramCompound_OP_SETCLIENTID_CONFIRM(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0x3d, 0x01, 0x56, 0xaf, 0xab, 0x16, 0xe9, 0x23}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x73, 0xaf, 0xeb, 0xd6, 0x5b, 0x96, 0x74, 0xde} + stateIDOtherPrefix := [...]byte{0xdb, 0xd3, 0xb5, 0x41} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoKnownClientID", func(t *testing.T) { + // Calling SETCLIENTID_CONFIRM without calling + // SETCLIENTID first doesn't work. + clock.EXPECT().Now().Return(time.Unix(1000, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0x90fee2857d7b5f5b, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xa1, 0x30, 0xf6, 0x1a, 0xc0, 0xac, 0x1f, 0x36}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, res) + }) + + t.Run("TooSlow", func(t *testing.T) { + // As the server was created with a maximum lease time + // of 120 seconds, we should see SETCLIENTID_CONFIRM + // fail if there are 121 seconds in between. + clock.EXPECT().Now().Return(time.Unix(1100, 0)) + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0xabd34c548970a69b)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xbd, 0x89, 0xa7, 0x95, 0xc4, 0x18, 0xd0, 0xd0}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID{ + Opsetclientid: nfsv4_xdr.Setclientid4args{ + Client: nfsv4_xdr.NfsClientId4{ + Verifier: nfsv4_xdr.Verifier4{0xcd, 0xef, 0x89, 0x02, 0x4c, 0x39, 0x2d, 0xeb}, + Id: []byte{0x06, 0x3f, 0xfe, 0x38, 0x30, 0xc5, 0xa8, 0xbc}, + }, + Callback: nfsv4_xdr.CbClient4{ + CbProgram: 0x7b3f75b9, + CbLocation: nfsv4_xdr.Clientaddr4{ + RNetid: "tcp", + RAddr: "127.0.0.1.200.123", + }, + }, + CallbackIdent: 0x1d004919, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: &nfsv4_xdr.Setclientid4res_NFS4_OK{ + Resok4: nfsv4_xdr.Setclientid4resok{ + Clientid: 0xabd34c548970a69b, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xbd, 0x89, 0xa7, 0x95, 0xc4, 0x18, 0xd0, 0xd0}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + clock.EXPECT().Now().Return(time.Unix(1221, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0xabd34c548970a69b, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xbd, 0x89, 0xa7, 0x95, 0xc4, 0x18, 0xd0, 0xd0}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_STALE_CLIENTID, + }, res) + }) + + t.Run("Success", func(t *testing.T) { + // Successfully confirm a client. + clock.EXPECT().Now().Return(time.Unix(1300, 0)) + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0x23078b2a3f2e1856)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0xd8, 0x4d, 0xc4, 0x51, 0xcb, 0xe9, 0xec, 0xb9}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID{ + Opsetclientid: nfsv4_xdr.Setclientid4args{ + Client: nfsv4_xdr.NfsClientId4{ + Verifier: nfsv4_xdr.Verifier4{0xd4, 0x3e, 0x5a, 0x75, 0x93, 0x4f, 0x01, 0x7c}, + Id: []byte{0x75, 0x89, 0x89, 0xbf, 0x89, 0x10, 0x20, 0xd3}, + }, + Callback: nfsv4_xdr.CbClient4{ + CbProgram: 0xc32f5c62, + CbLocation: nfsv4_xdr.Clientaddr4{ + RNetid: "tcp", + RAddr: "127.0.0.1.200.472", + }, + }, + CallbackIdent: 0xf5dc603e, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: &nfsv4_xdr.Setclientid4res_NFS4_OK{ + Resok4: nfsv4_xdr.Setclientid4resok{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xd8, 0x4d, 0xc4, 0x51, 0xcb, 0xe9, 0xec, 0xb9}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + clock.EXPECT().Now().Return(time.Unix(1301, 0)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xd8, 0x4d, 0xc4, 0x51, 0xcb, 0xe9, 0xec, 0xb9}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + // Open a file for the remainder of this test. + leaf := mock.NewMockVirtualLeaf(ctrl) + clock.EXPECT().Now().Return(time.Unix(1302, 0)) + clock.EXPECT().Now().Return(time.Unix(1303, 0)) + openUnconfirmedFileForTesting( + ctx, + t, + randomNumberGenerator, + program, + rootDirectory, + leaf, + nfsv4_xdr.NfsFh4{0xc0, 0xa3, 0xb8, 0x99, 0x08, 0x03, 0xe8, 0x45}, + /* shortClientID = */ 0x23078b2a3f2e1856, + /* seqID = */ 3726, + /* stateIDOther = */ [...]byte{ + 0xdb, 0xd3, 0xb5, 0x41, + 0xc3, 0x2f, 0x5c, 0x62, + 0xf5, 0xdc, 0x60, 0x3e, + }) + + t.Run("Idempotence", func(t *testing.T) { + // Sending the same confirmation as before should cause + // no meaningful change. We should see the same response + // as before. + clock.EXPECT().Now().Return(time.Unix(1304, 0)) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0xd8, 0x4d, 0xc4, 0x51, 0xcb, 0xe9, 0xec, 0xb9}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) + + t.Run("DifferentVerifier", func(t *testing.T) { + // Sending a new SETCLIENTID request with the same ID, + // but a different verifier should cause the server to + // return a new verifier as well. + clock.EXPECT().Now().Return(time.Unix(1305, 0)) + randomNumberGenerator.EXPECT().Uint64().Return(uint64(0x23078b2a3f2e1856)) + randomNumberGeneratorExpectRead(randomNumberGenerator, []byte{0x76, 0x23, 0x20, 0xcb, 0xb5, 0x5d, 0xed, 0x61}) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID{ + Opsetclientid: nfsv4_xdr.Setclientid4args{ + Client: nfsv4_xdr.NfsClientId4{ + Verifier: nfsv4_xdr.Verifier4{0x2c, 0xf9, 0x38, 0xc4, 0xc6, 0xea, 0x03, 0x72}, + Id: []byte{0x75, 0x89, 0x89, 0xbf, 0x89, 0x10, 0x20, 0xd3}, + }, + Callback: nfsv4_xdr.CbClient4{ + CbProgram: 0xc32f5c62, + CbLocation: nfsv4_xdr.Clientaddr4{ + RNetid: "tcp", + RAddr: "127.0.0.1.200.472", + }, + }, + CallbackIdent: 0xf5dc603e, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID{ + Opsetclientid: &nfsv4_xdr.Setclientid4res_NFS4_OK{ + Resok4: nfsv4_xdr.Setclientid4resok{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0x76, 0x23, 0x20, 0xcb, 0xb5, 0x5d, 0xed, 0x61}, + }, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + + // Confirming the new verifier should cause the server + // to discard all state associated with the previous + // one, as it indicated the client rebooted. + clock.EXPECT().Now().Return(time.Unix(1306, 0)) + leaf.EXPECT().VirtualClose(uint(1)) + + res, err = program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "setclientid_confirm", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4args{ + Clientid: 0x23078b2a3f2e1856, + SetclientidConfirm: nfsv4_xdr.Verifier4{0x76, 0x23, 0x20, 0xcb, 0xb5, 0x5d, 0xed, 0x61}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "setclientid_confirm", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_SETCLIENTID_CONFIRM{ + OpsetclientidConfirm: nfsv4_xdr.SetclientidConfirm4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +func TestBaseProgramCompound_OP_VERIFY(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + + rootDirectory := mock.NewMockVirtualDirectory(ctrl) + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileHandle, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileHandle([]byte{0xe1, 0x79, 0xc1, 0x39, 0x2a, 0xef, 0xbb, 0xde}) + }) + handleResolver := mock.NewMockHandleResolver(ctrl) + randomNumberGenerator := mock.NewMockSingleThreadedGenerator(ctrl) + rebootVerifier := nfsv4_xdr.Verifier4{0x71, 0x69, 0x6c, 0x7c, 0x90, 0x79, 0x3b, 0x13} + stateIDOtherPrefix := [...]byte{0x19, 0xed, 0x93, 0x5f} + clock := mock.NewMockClock(ctrl) + program := nfsv4.NewBaseProgram(rootDirectory, handleResolver.Call, randomNumberGenerator, rebootVerifier, stateIDOtherPrefix, clock, 2*time.Minute, time.Minute) + + t.Run("NoFileHandle", func(t *testing.T) { + // Calling VERIFY without a file handle should fail. + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOFILEHANDLE, + }, res) + }) + + t.Run("BadXDR1", func(t *testing.T) { + // If the client provides attributes that are an exact + // prefix of what we compute ourselves, then the data + // provided by the client must be corrupted. XDR would + // never allow that. + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE, truncated. + 0x00, 0x00, 0x00, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_BADXDR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADXDR, + }, res) + }) + + t.Run("BadXDR2", func(t *testing.T) { + // The same holds for when the client provides more data + // than we generate. + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // Trailing garbage. + 0xde, 0xad, 0xc0, 0xde, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_BADXDR, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_BADXDR, + }, res) + }) + + t.Run("UnsupportedAttribute", func(t *testing.T) { + // We don't support the 'system' attribute. Providing it + // as part of VERIFY should cause us to return + // NFS4ERR_ATTRNOTSUPP. + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + 1 << (nfsv4_xdr.FATTR4_SYSTEM - 32), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_SYSTEM == TRUE. + 0x00, 0x00, 0x00, 0x01, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_ATTRNOTSUPP, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_ATTRNOTSUPP, + }, res) + }) + + t.Run("InvalidAttribute", func(t *testing.T) { + // The 'rdattr_error' attribute is only returned as part + // of READDIR. It cannot be provided to VERIFY. + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_RDATTR_ERROR), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_RDATTR_ERROR == NFS4ERR_IO. + 0x00, 0x00, 0x00, 0x05, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_INVAL, + }, res) + }) + + t.Run("Mismatch", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + 1 << nfsv4_xdr.FATTR4_TYPE, + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4BLK. + 0x00, 0x00, 0x00, 0x03, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4ERR_NOT_SAME, + }, + }, + }, + Status: nfsv4_xdr.NFS4ERR_NOT_SAME, + }, res) + }) + + t.Run("Match", func(t *testing.T) { + rootDirectory.EXPECT().VirtualGetAttributes(virtual.AttributesMaskFileType|virtual.AttributesMaskInodeNumber, gomock.Any()). + Do(func(requested virtual.AttributesMask, attributes *virtual.Attributes) { + attributes.SetFileType(filesystem.FileTypeDirectory) + attributes.SetInodeNumber(0x676b7bcb66d92ed6) + }) + + res, err := program.NfsV4Nfsproc4Compound(ctx, &nfsv4_xdr.Compound4args{ + Tag: "verify", + Argarray: []nfsv4_xdr.NfsArgop4{ + &nfsv4_xdr.NfsArgop4_OP_PUTROOTFH{}, + &nfsv4_xdr.NfsArgop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4args{ + ObjAttributes: nfsv4_xdr.Fattr4{ + Attrmask: nfsv4_xdr.Bitmap4{ + (1 << nfsv4_xdr.FATTR4_TYPE) | + (1 << nfsv4_xdr.FATTR4_FILEID), + }, + AttrVals: nfsv4_xdr.Attrlist4{ + // FATTR4_TYPE == NF4DIR. + 0x00, 0x00, 0x00, 0x02, + // FATTR4_FILEID. + 0x67, 0x6b, 0x7b, 0xcb, 0x66, 0xd9, 0x2e, 0xd6, + }, + }, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, &nfsv4_xdr.Compound4res{ + Tag: "verify", + Resarray: []nfsv4_xdr.NfsResop4{ + &nfsv4_xdr.NfsResop4_OP_PUTROOTFH{ + Opputrootfh: nfsv4_xdr.Putrootfh4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + &nfsv4_xdr.NfsResop4_OP_VERIFY{ + Opverify: nfsv4_xdr.Verify4res{ + Status: nfsv4_xdr.NFS4_OK, + }, + }, + }, + Status: nfsv4_xdr.NFS4_OK, + }, res) + }) +} + +// TODO: WRITE diff --git a/pkg/filesystem/virtual/nfsv4/metrics_program.go b/pkg/filesystem/virtual/nfsv4/metrics_program.go new file mode 100644 index 00000000..623be55c --- /dev/null +++ b/pkg/filesystem/virtual/nfsv4/metrics_program.go @@ -0,0 +1,86 @@ +package nfsv4 + +import ( + "context" + "sync" + + "github.com/buildbarn/go-xdr/pkg/protocols/nfsv4" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + programPrometheusMetrics sync.Once + + programCompoundOperations = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "buildbarn", + Subsystem: "nfsv4", + Name: "program_compound_operations_total", + Help: "Number of operations provided as part of calls to NFSv4 COMPOUND.", + }, + []string{"operation", "status"}) + programCompoundOperationsOK map[nfsv4.NfsOpnum4]prometheus.Counter +) + +type metricsProgram struct { + nfsv4.Nfs4Program +} + +// NewMetricsProgram creates a decorator for nfsv4.Nfs4Program that +// exposes Prometheus metrics for all compound operations called. +// +// Right now it only provides counters for the number of operations +// called. Timing of operation is not exposed, as it can only be +// computed at the procedure level, which isn't meaningful in practice. +func NewMetricsProgram(base nfsv4.Nfs4Program) nfsv4.Nfs4Program { + programPrometheusMetrics.Do(func() { + prometheus.MustRegister(programCompoundOperations) + + // Already create counters for all of the operations + // where status is NFS4_OK. This allows us to skip calls + // to WithLabelValues() in the common case. + programCompoundOperationsOK = map[nfsv4.NfsOpnum4]prometheus.Counter{} + for code, name := range nfsv4.NfsOpnum4_name { + programCompoundOperationsOK[code] = programCompoundOperations.WithLabelValues(name, nfsv4.Nfsstat4_name[nfsv4.NFS4_OK]) + } + }) + + return &metricsProgram{ + Nfs4Program: base, + } +} + +func (p *metricsProgram) NfsV4Nfsproc4Compound(ctx context.Context, arguments *nfsv4.Compound4args) (*nfsv4.Compound4res, error) { + compoundRes, err := p.Nfs4Program.NfsV4Nfsproc4Compound(ctx, arguments) + if err != nil { + return nil, err + } + + for i, res := range compoundRes.Resarray { + operation := res.GetResop() + status := nfsv4.NFS4_OK + if i == len(compoundRes.Resarray)-1 { + status = compoundRes.Status + } + if status == nfsv4.NFS4_OK { + if counter, ok := programCompoundOperationsOK[operation]; ok { + // Fast path: a known operation has succeeded. + counter.Inc() + continue + } + } + + // Slow path: either the operation is not known, or it failed. + operationStr, ok := nfsv4.NfsOpnum4_name[operation] + if !ok { + operationStr = "UNKNOWN" + } + statusStr, ok := nfsv4.Nfsstat4_name[status] + if !ok { + statusStr = "UNKNOWN" + } + programCompoundOperations.WithLabelValues(operationStr, statusStr).Inc() + } + + return compoundRes, nil +} diff --git a/pkg/proto/configuration/filesystem/virtual/virtual.pb.go b/pkg/proto/configuration/filesystem/virtual/virtual.pb.go index 55c1d916..951825d6 100644 --- a/pkg/proto/configuration/filesystem/virtual/virtual.pb.go +++ b/pkg/proto/configuration/filesystem/virtual/virtual.pb.go @@ -29,6 +29,7 @@ type MountConfiguration struct { MountPath string `protobuf:"bytes,1,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` // Types that are assignable to Backend: // *MountConfiguration_Fuse + // *MountConfiguration_Nfsv4 Backend isMountConfiguration_Backend `protobuf_oneof:"backend"` } @@ -85,6 +86,13 @@ func (x *MountConfiguration) GetFuse() *FUSEMountConfiguration { return nil } +func (x *MountConfiguration) GetNfsv4() *NFSv4MountConfiguration { + if x, ok := x.GetBackend().(*MountConfiguration_Nfsv4); ok { + return x.Nfsv4 + } + return nil +} + type isMountConfiguration_Backend interface { isMountConfiguration_Backend() } @@ -93,8 +101,14 @@ type MountConfiguration_Fuse struct { Fuse *FUSEMountConfiguration `protobuf:"bytes,2,opt,name=fuse,proto3,oneof"` } +type MountConfiguration_Nfsv4 struct { + Nfsv4 *NFSv4MountConfiguration `protobuf:"bytes,3,opt,name=nfsv4,proto3,oneof"` +} + func (*MountConfiguration_Fuse) isMountConfiguration_Backend() {} +func (*MountConfiguration_Nfsv4) isMountConfiguration_Backend() {} + type FUSEMountConfiguration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -174,6 +188,151 @@ func (x *FUSEMountConfiguration) GetDirectMount() bool { return false } +type NFSv4MountConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OperatingSystem: + // *NFSv4MountConfiguration_Darwin + OperatingSystem isNFSv4MountConfiguration_OperatingSystem `protobuf_oneof:"operating_system"` + EnforcedLeaseTime *durationpb.Duration `protobuf:"bytes,2,opt,name=enforced_lease_time,json=enforcedLeaseTime,proto3" json:"enforced_lease_time,omitempty"` + AnnouncedLeaseTime *durationpb.Duration `protobuf:"bytes,3,opt,name=announced_lease_time,json=announcedLeaseTime,proto3" json:"announced_lease_time,omitempty"` +} + +func (x *NFSv4MountConfiguration) Reset() { + *x = NFSv4MountConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NFSv4MountConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NFSv4MountConfiguration) ProtoMessage() {} + +func (x *NFSv4MountConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NFSv4MountConfiguration.ProtoReflect.Descriptor instead. +func (*NFSv4MountConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP(), []int{2} +} + +func (m *NFSv4MountConfiguration) GetOperatingSystem() isNFSv4MountConfiguration_OperatingSystem { + if m != nil { + return m.OperatingSystem + } + return nil +} + +func (x *NFSv4MountConfiguration) GetDarwin() *NFSv4DarwinMountConfiguration { + if x, ok := x.GetOperatingSystem().(*NFSv4MountConfiguration_Darwin); ok { + return x.Darwin + } + return nil +} + +func (x *NFSv4MountConfiguration) GetEnforcedLeaseTime() *durationpb.Duration { + if x != nil { + return x.EnforcedLeaseTime + } + return nil +} + +func (x *NFSv4MountConfiguration) GetAnnouncedLeaseTime() *durationpb.Duration { + if x != nil { + return x.AnnouncedLeaseTime + } + return nil +} + +type isNFSv4MountConfiguration_OperatingSystem interface { + isNFSv4MountConfiguration_OperatingSystem() +} + +type NFSv4MountConfiguration_Darwin struct { + Darwin *NFSv4DarwinMountConfiguration `protobuf:"bytes,1,opt,name=darwin,proto3,oneof"` +} + +func (*NFSv4MountConfiguration_Darwin) isNFSv4MountConfiguration_OperatingSystem() {} + +type NFSv4DarwinMountConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SocketPath string `protobuf:"bytes,1,opt,name=socket_path,json=socketPath,proto3" json:"socket_path,omitempty"` + MinimumDirectoriesAttributeCacheTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=minimum_directories_attribute_cache_timeout,json=minimumDirectoriesAttributeCacheTimeout,proto3" json:"minimum_directories_attribute_cache_timeout,omitempty"` + MaximumDirectoriesAttributeCacheTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=maximum_directories_attribute_cache_timeout,json=maximumDirectoriesAttributeCacheTimeout,proto3" json:"maximum_directories_attribute_cache_timeout,omitempty"` +} + +func (x *NFSv4DarwinMountConfiguration) Reset() { + *x = NFSv4DarwinMountConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NFSv4DarwinMountConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NFSv4DarwinMountConfiguration) ProtoMessage() {} + +func (x *NFSv4DarwinMountConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NFSv4DarwinMountConfiguration.ProtoReflect.Descriptor instead. +func (*NFSv4DarwinMountConfiguration) Descriptor() ([]byte, []int) { + return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP(), []int{3} +} + +func (x *NFSv4DarwinMountConfiguration) GetSocketPath() string { + if x != nil { + return x.SocketPath + } + return "" +} + +func (x *NFSv4DarwinMountConfiguration) GetMinimumDirectoriesAttributeCacheTimeout() *durationpb.Duration { + if x != nil { + return x.MinimumDirectoriesAttributeCacheTimeout + } + return nil +} + +func (x *NFSv4DarwinMountConfiguration) GetMaximumDirectoriesAttributeCacheTimeout() *durationpb.Duration { + if x != nil { + return x.MaximumDirectoriesAttributeCacheTimeout + } + return nil +} + var File_pkg_proto_configuration_filesystem_virtual_virtual_proto protoreflect.FileDescriptor var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDesc = []byte{ @@ -185,7 +344,7 @@ var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDesc = []by 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x98, 0x01, 0x0a, 0x12, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf5, 0x01, 0x0a, 0x12, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x58, 0x0a, 0x04, @@ -194,35 +353,79 @@ var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDesc = []by 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x46, 0x55, 0x53, 0x45, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, - 0x52, 0x04, 0x66, 0x75, 0x73, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, - 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x16, 0x46, 0x55, 0x53, 0x45, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x18, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, - 0x79, 0x12, 0x53, 0x0a, 0x18, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, - 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x1e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x1b, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x44, 0x69, 0x72, 0x74, 0x79, 0x50, 0x61, 0x67, 0x65, - 0x73, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0b, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4a, - 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x55, 0x5a, 0x53, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, - 0x2d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, - 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x04, 0x66, 0x75, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x05, 0x6e, 0x66, 0x73, 0x76, 0x34, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, + 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x76, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x2e, 0x4e, 0x46, 0x53, 0x76, 0x34, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x6e, 0x66, + 0x73, 0x76, 0x34, 0x42, 0x09, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x22, 0xd1, + 0x02, 0x0a, 0x16, 0x46, 0x55, 0x53, 0x45, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x18, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, 0x53, + 0x0a, 0x18, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x69, 0x6e, 0x6f, + 0x64, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x1e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x64, + 0x69, 0x72, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x1b, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x44, 0x69, 0x72, 0x74, 0x79, 0x50, 0x61, 0x67, 0x65, 0x73, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x22, 0xaa, 0x02, 0x0a, 0x17, 0x4e, 0x46, 0x53, 0x76, 0x34, 0x4d, 0x6f, 0x75, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x63, + 0x0a, 0x06, 0x64, 0x61, 0x72, 0x77, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x2e, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x2e, 0x4e, 0x46, 0x53, 0x76, + 0x34, 0x44, 0x61, 0x72, 0x77, 0x69, 0x6e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x64, 0x61, 0x72, + 0x77, 0x69, 0x6e, 0x12, 0x49, 0x0a, 0x13, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x6e, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, + 0x0a, 0x14, 0x61, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x61, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, + 0x65, 0x64, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, + 0xb2, 0x02, 0x0a, 0x1d, 0x4e, 0x46, 0x53, 0x76, 0x34, 0x44, 0x61, 0x72, 0x77, 0x69, 0x6e, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x77, 0x0a, 0x2b, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x27, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x77, 0x0a, 0x2b, 0x6d, + 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, + 0x65, 0x73, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x27, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x42, 0x55, 0x5a, 0x53, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x62, 0x61, 0x72, 0x6e, 0x2f, 0x62, 0x62, 0x2d, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x2f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -237,21 +440,29 @@ func file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescGZIP() return file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDescData } -var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_goTypes = []interface{}{ - (*MountConfiguration)(nil), // 0: buildbarn.configuration.filesystem.virtual.MountConfiguration - (*FUSEMountConfiguration)(nil), // 1: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration - (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*MountConfiguration)(nil), // 0: buildbarn.configuration.filesystem.virtual.MountConfiguration + (*FUSEMountConfiguration)(nil), // 1: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration + (*NFSv4MountConfiguration)(nil), // 2: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration + (*NFSv4DarwinMountConfiguration)(nil), // 3: buildbarn.configuration.filesystem.virtual.NFSv4DarwinMountConfiguration + (*durationpb.Duration)(nil), // 4: google.protobuf.Duration } var file_pkg_proto_configuration_filesystem_virtual_virtual_proto_depIdxs = []int32{ 1, // 0: buildbarn.configuration.filesystem.virtual.MountConfiguration.fuse:type_name -> buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration - 2, // 1: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.directory_entry_validity:type_name -> google.protobuf.Duration - 2, // 2: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.inode_attribute_validity:type_name -> google.protobuf.Duration - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 2, // 1: buildbarn.configuration.filesystem.virtual.MountConfiguration.nfsv4:type_name -> buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration + 4, // 2: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.directory_entry_validity:type_name -> google.protobuf.Duration + 4, // 3: buildbarn.configuration.filesystem.virtual.FUSEMountConfiguration.inode_attribute_validity:type_name -> google.protobuf.Duration + 3, // 4: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration.darwin:type_name -> buildbarn.configuration.filesystem.virtual.NFSv4DarwinMountConfiguration + 4, // 5: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration.enforced_lease_time:type_name -> google.protobuf.Duration + 4, // 6: buildbarn.configuration.filesystem.virtual.NFSv4MountConfiguration.announced_lease_time:type_name -> google.protobuf.Duration + 4, // 7: buildbarn.configuration.filesystem.virtual.NFSv4DarwinMountConfiguration.minimum_directories_attribute_cache_timeout:type_name -> google.protobuf.Duration + 4, // 8: buildbarn.configuration.filesystem.virtual.NFSv4DarwinMountConfiguration.maximum_directories_attribute_cache_timeout:type_name -> google.protobuf.Duration + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_pkg_proto_configuration_filesystem_virtual_virtual_proto_init() } @@ -284,9 +495,37 @@ func file_pkg_proto_configuration_filesystem_virtual_virtual_proto_init() { return nil } } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NFSv4MountConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NFSv4DarwinMountConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[0].OneofWrappers = []interface{}{ (*MountConfiguration_Fuse)(nil), + (*MountConfiguration_Nfsv4)(nil), + } + file_pkg_proto_configuration_filesystem_virtual_virtual_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*NFSv4MountConfiguration_Darwin)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -294,7 +533,7 @@ func file_pkg_proto_configuration_filesystem_virtual_virtual_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_pkg_proto_configuration_filesystem_virtual_virtual_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 4, NumExtensions: 0, NumServices: 0, }, diff --git a/pkg/proto/configuration/filesystem/virtual/virtual.proto b/pkg/proto/configuration/filesystem/virtual/virtual.proto index ad28cf70..d02acf7a 100644 --- a/pkg/proto/configuration/filesystem/virtual/virtual.proto +++ b/pkg/proto/configuration/filesystem/virtual/virtual.proto @@ -11,9 +11,23 @@ message MountConfiguration { string mount_path = 1; oneof backend { - // Use the kernel's FUSE driver to expose the mount. This option is - // only supported on Linux and macOS. + // Use the kernel's FUSE driver to expose the mount. + // + // This option is supported on Linux and macOS, though its use is + // only recommended on Linux. On macOS, this option requires the + // OSXFUSE or macFUSE kernel extension to be installed. This kernel + // extension tends to cause system lockups under high load. FUSEMountConfiguration fuse = 2; + + // Run an in-process NFSv4 server and use the kernel's NFSv4 client + // to expose the mount. This option is currently only supported on + // macOS. + // + // The NFS server is expected to conform to NFSv4.0 (RFC 7530). + // Features provided by newer versions of the protocol, such as + // NFSv4.1 (RFC 8881) and NFSv4.2 (RFC 7862), are not supported at + // this time. macOS also does not support the latter. + NFSv4MountConfiguration nfsv4 = 3; } } @@ -88,3 +102,77 @@ message FUSEMountConfiguration { // available, such as the bb_worker container images. bool direct_mount = 7; } + +message NFSv4MountConfiguration { + oneof operating_system { + // Configuration options specific to mounting the NFSv4 file system + // on macOS. + NFSv4DarwinMountConfiguration darwin = 1; + } + + // The amount of time that needs to pass for the server to close files + // and remove state belonging to a client that has not shown any + // activity. + // + // This option also controls how long state associated with a single + // process on a client (an 'open-owner') is allowed to continue to + // exist on the server if no files are opened, or if left unconfirmed. + // + // Recommended value: 120s + google.protobuf.Duration enforced_lease_time = 2; + + // The lease time to announce to clients through the FATTR4_LEASE_TIME + // attribute. This option should be set lower than + // 'enforced_lease_time', as it needs to account for network delays + // and instability. + // + // Recommended value: 60s + google.protobuf.Duration announced_lease_time = 3; +} + +message NFSv4DarwinMountConfiguration { + // Path on which to bind the UNIX socket of the NFSv4 server. The + // kernel will connect to this socket when mounting. + // + // NOTE: No facilities are provided to set the ownership or + // permissions on the socket file. On most operating systems, the + // socket file will have mode 0777. How the mode is interpreted when + // changed is inconsistent between operating systems. Some require the + // socket to be writable in order to connect, while others ignore the + // permissions altogether. + // + // It is therefore strongly advised that socket files are placed + // inside directories that have access controls set up properly. + // + // TODO: Using UNIX sockets currently causes EIO errors under high + // load, caused by EMSGSIZE errors returned to the kernel's NFS client + // while writing data into the UNIX socket. As a workaround, this + // option can be left empty, causing to use TCP instead. This should + // be removed once rdar://82122890 is addressed. + string socket_path = 1; + + // When set, override the minimum amount of time attributes of + // directories may be cached. This option is equivalent to + // mount_nfs(8)'s 'acdirmin'. + // + // Because the macOS NFS client doesn't support NFSv4.1's CB_NOTIFY + // callback operation, there is no way this backend can inform the + // kernel of files being removed from directories. Because of this, it + // is recommended that attribute caching timeouts for directories is + // either reduced or disabled, so that removals are detected more + // quickly. + // + // Recommended value: + // - For bb_worker: 1s + // - For bb_clientd: 0s + google.protobuf.Duration minimum_directories_attribute_cache_timeout = 2; + + // When set, override the maximum amount of time attributes of + // directories may be cached. This option is equivalent to + // mount_nfs(8)'s 'acdirmax'. + // + // Recommended value: + // - For bb_worker: 1s + // - For bb_clientd: 0s + google.protobuf.Duration maximum_directories_attribute_cache_timeout = 3; +}