From 508aa8da8de1a3670f444564b296d699cf9de144 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Mon, 19 Aug 2024 15:42:09 +0200 Subject: [PATCH] bump reva to 2.23.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörn Friedrich Dreyer --- changelog/unreleased/bump-reva.md | 19 ++- go.mod | 2 +- go.sum | 4 +- .../sharesstorageprovider.go | 149 +++++++++++++----- .../pkg/storage/fs/owncloudsql/owncloudsql.go | 29 +++- .../v2/pkg/storage/fs/owncloudsql/spaces.go | 11 +- .../storage/utils/decomposedfs/node/xattrs.go | 14 +- .../user/manager/owncloudsql/owncloudsql.go | 5 + vendor/modules.txt | 2 +- 9 files changed, 181 insertions(+), 54 deletions(-) diff --git a/changelog/unreleased/bump-reva.md b/changelog/unreleased/bump-reva.md index d83dee9af24..52ec9c48ca9 100644 --- a/changelog/unreleased/bump-reva.md +++ b/changelog/unreleased/bump-reva.md @@ -1,5 +1,22 @@ -Enhancement: Bump reva +Enhancement: Bump reva to 2.23.0 +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4741): Always find unique providers +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4762): Blanks in dav Content-Disposition header +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4775): Fixed the response code when copying the shared from to personal +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4633): Allow all users to create internal links +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4771): Deleting resources via their id +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4768): Fixed the file name validation if nodeid is used +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4758): Fix moving locked files, enable handling locked files via ocdav +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4774): Fix micro ocdav service init and registration +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4776): Fix response code for DEL file that in postprocessing +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4746): Uploading the same file multiple times leads to orphaned blobs +* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4778): Zero byte uploads +* Chg [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4759): Updated to the latest version of the go-cs3apis +* Chg [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4773): Ocis bumped +* Enh [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4766): Set archiver output format via query parameter +* Enh [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4763): Improve posixfs storage driver + +https://github.com/owncloud/ocis/pull/9852 https://github.com/owncloud/ocis/pull/9763 https://github.com/owncloud/ocis/pull/9714 https://github.com/owncloud/ocis/pull/9715 diff --git a/go.mod b/go.mod index 9e037d7a6df..dc043e2a9af 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible github.com/coreos/go-oidc/v3 v3.11.0 github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb - github.com/cs3org/reva/v2 v2.22.1-0.20240809114512-56b26ddd82cc + github.com/cs3org/reva/v2 v2.23.0 github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25 github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e github.com/egirna/icap-client v0.1.1 diff --git a/go.sum b/go.sum index b48e94f022b..a36d9aabe2d 100644 --- a/go.sum +++ b/go.sum @@ -255,8 +255,8 @@ github.com/crewjam/saml v0.4.14 h1:g9FBNx62osKusnFzs3QTN5L9CVA/Egfgm+stJShzw/c= github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1nl2mME= github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb h1:KmYZDReplv/yfwc1LNYpDcVhVujC3Pasv6WjXx1haSU= github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb/go.mod h1:yyP8PRo0EZou3nSH7H4qjlzQwaydPeIRNgX50npQHpE= -github.com/cs3org/reva/v2 v2.22.1-0.20240809114512-56b26ddd82cc h1:ctPVsRj/QeWhYpNDAkUFXsBgtcR/PPsehdk8AIMLHok= -github.com/cs3org/reva/v2 v2.22.1-0.20240809114512-56b26ddd82cc/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI= +github.com/cs3org/reva/v2 v2.23.0 h1:tRa+q6usndTQ6LbaxtfEub3UsKVruJ1l7HY6K+ZKS9s= +github.com/cs3org/reva/v2 v2.23.0/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= diff --git a/vendor/github.com/cs3org/reva/v2/internal/grpc/services/sharesstorageprovider/sharesstorageprovider.go b/vendor/github.com/cs3org/reva/v2/internal/grpc/services/sharesstorageprovider/sharesstorageprovider.go index 97de593b6d3..61efbacefa1 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/grpc/services/sharesstorageprovider/sharesstorageprovider.go +++ b/vendor/github.com/cs3org/reva/v2/internal/grpc/services/sharesstorageprovider/sharesstorageprovider.go @@ -25,6 +25,7 @@ import ( "strings" "github.com/cs3org/reva/v2/pkg/storagespace" + "golang.org/x/sync/errgroup" "google.golang.org/genproto/protobuf/field_mask" "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -60,11 +61,13 @@ func init() { type config struct { GatewayAddr string `mapstructure:"gateway_addr"` UserShareProviderEndpoint string `mapstructure:"usershareprovidersvc"` + MaxConcurrency int `mapstructure:"max_concurrency"` } type service struct { gatewaySelector pool.Selectable[gateway.GatewayAPIClient] sharingCollaborationSelector pool.Selectable[collaboration.CollaborationAPIClient] + maxConcurrency int } func (s *service) Close() error { @@ -98,14 +101,19 @@ func NewDefault(m map[string]interface{}, _ *grpc.Server) (rgrpc.Service, error) return nil, errors.Wrap(err, "sharesstorageprovider: error getting UserShareProvider client") } - return New(gatewaySelector, sharingCollaborationSelector) + if c.MaxConcurrency <= 0 { + c.MaxConcurrency = 5 + } + + return New(gatewaySelector, sharingCollaborationSelector, c.MaxConcurrency) } // New returns a new instance of the SharesStorageProvider service -func New(gatewaySelector pool.Selectable[gateway.GatewayAPIClient], sharingCollaborationSelector pool.Selectable[collaboration.CollaborationAPIClient]) (rgrpc.Service, error) { +func New(gatewaySelector pool.Selectable[gateway.GatewayAPIClient], sharingCollaborationSelector pool.Selectable[collaboration.CollaborationAPIClient], maxConcurrency int) (rgrpc.Service, error) { s := &service{ gatewaySelector: gatewaySelector, sharingCollaborationSelector: sharingCollaborationSelector, + maxConcurrency: maxConcurrency, } return s, nil } @@ -399,7 +407,7 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora var shareInfo map[string]*provider.ResourceInfo var err error if fetchShares { - receivedShares, shareInfo, err = s.fetchShares(ctx, req.Opaque, []string{}, &fieldmaskpb.FieldMask{ /*TODO mtime and etag only?*/ }) + receivedShares, shareInfo, err = s.fetchAcceptedShares(ctx, req.Opaque, []string{}, &fieldmaskpb.FieldMask{ /*TODO mtime and etag only?*/ }) if err != nil { return nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest") } @@ -710,7 +718,7 @@ func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provide if !ok { return nil, fmt.Errorf("missing user in context") } - receivedShares, shareMd, err := s.fetchShares(ctx, req.Opaque, req.ArbitraryMetadataKeys, req.FieldMask) + receivedShares, shareMd, err := s.fetchAcceptedShares(ctx, req.Opaque, req.ArbitraryMetadataKeys, req.FieldMask) if err != nil { return nil, err } @@ -806,7 +814,7 @@ func (s *service) ListContainer(ctx context.Context, req *provider.ListContainer // The root is empty, it is filled by mountpoints // so, when accessing the root via /dav/spaces, we need to list the accepted shares with their mountpoint - receivedShares, shareMd, err := s.fetchShares(ctx, req.Opaque, req.ArbitraryMetadataKeys, req.FieldMask) + receivedShares, shareMd, err := s.fetchAcceptedShares(ctx, req.Opaque, req.ArbitraryMetadataKeys, req.FieldMask) if err != nil { return nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest") } @@ -1143,14 +1151,21 @@ func (s *service) rejectReceivedShare(ctx context.Context, receivedShare *collab return errtypes.NewErrtypeFromStatus(res.Status) } -func (s *service) fetchShares(ctx context.Context, opaque *typesv1beta1.Opaque, arbitraryMetadataKeys []string, fieldMask *field_mask.FieldMask) ([]*collaboration.ReceivedShare, map[string]*provider.ResourceInfo, error) { +func (s *service) fetchAcceptedShares(ctx context.Context, opaque *typesv1beta1.Opaque, arbitraryMetadataKeys []string, fieldMask *field_mask.FieldMask) ([]*collaboration.ReceivedShare, map[string]*provider.ResourceInfo, error) { sharingCollaborationClient, err := s.sharingCollaborationSelector.Next() if err != nil { return nil, nil, err } lsRes, err := sharingCollaborationClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{ - // FIXME filter by received shares for resource id - listing all shares is tooo expensive! + Filters: []*collaboration.Filter{ + { + Type: collaboration.Filter_TYPE_STATE, + Term: &collaboration.Filter_State{ + State: collaboration.ShareState_SHARE_STATE_ACCEPTED, + }, + }, + }, }) if err != nil { return nil, nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest") @@ -1159,42 +1174,98 @@ func (s *service) fetchShares(ctx context.Context, opaque *typesv1beta1.Opaque, return nil, nil, fmt.Errorf("sharesstorageprovider: error calling ListReceivedSharesRequest") } - gatewayClient, err := s.gatewaySelector.Next() - if err != nil { - return nil, nil, err + numWorkers := s.maxConcurrency + if len(lsRes.Shares) < numWorkers { + numWorkers = len(lsRes.Shares) + } + type res struct { + shareid string + info *provider.ResourceInfo } + work := make(chan *collaboration.ReceivedShare, len(lsRes.Shares)) + results := make(chan res, len(lsRes.Shares)) - shareMetaData := make(map[string]*provider.ResourceInfo, len(lsRes.Shares)) - for _, rs := range lsRes.Shares { - // only stat accepted shares - if rs.State != collaboration.ShareState_SHARE_STATE_ACCEPTED { - continue - } - if rs.Share.ResourceId.SpaceId == "" { - // convert backwards compatible share id - rs.Share.ResourceId.StorageId, rs.Share.ResourceId.SpaceId = storagespace.SplitStorageID(rs.Share.ResourceId.StorageId) + g, ctx := errgroup.WithContext(ctx) + + // Distribute work + g.Go(func() error { + defer close(work) + for _, share := range lsRes.Shares { + select { + case work <- share: + case <-ctx.Done(): + return ctx.Err() + } } - sRes, err := gatewayClient.Stat(ctx, &provider.StatRequest{ - Opaque: opaque, - Ref: &provider.Reference{ResourceId: rs.Share.ResourceId}, - ArbitraryMetadataKeys: arbitraryMetadataKeys, - FieldMask: fieldMask, + return nil + }) + + // Spawn workers that'll concurrently work the queue + for i := 0; i < numWorkers; i++ { + g.Go(func() error { + for rs := range work { + + // only stat accepted shares + if rs.State != collaboration.ShareState_SHARE_STATE_ACCEPTED { + continue + } + if rs.Share.ResourceId.SpaceId == "" { + // convert backwards compatible share id + rs.Share.ResourceId.StorageId, rs.Share.ResourceId.SpaceId = storagespace.SplitStorageID(rs.Share.ResourceId.StorageId) + } + + gatewayClient, err := s.gatewaySelector.Next() + if err != nil { + appctx.GetLogger(ctx).Error(). + Err(err). + Interface("resourceID", rs.Share.ResourceId). + Msg("ListRecievedShares: failed to select next gateway client") + return err + } + sRes, err := gatewayClient.Stat(ctx, &provider.StatRequest{ + Opaque: opaque, + Ref: &provider.Reference{ResourceId: rs.Share.ResourceId}, + ArbitraryMetadataKeys: arbitraryMetadataKeys, + FieldMask: fieldMask, + }) + if err != nil { + appctx.GetLogger(ctx).Error(). + Err(err). + Interface("resourceID", rs.Share.ResourceId). + Msg("ListRecievedShares: failed to make stat call") + return err + } + if sRes.Status.Code != rpc.Code_CODE_OK { + appctx.GetLogger(ctx).Debug(). + Interface("resourceID", rs.Share.ResourceId). + Interface("status", sRes.Status). + Msg("ListRecievedShares: failed to stat the resource") + continue + } + select { + case results <- res{shareid: rs.Share.Id.OpaqueId, info: sRes.Info}: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil }) - if err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Interface("resourceID", rs.Share.ResourceId). - Msg("ListRecievedShares: failed to make stat call") - continue - } - if sRes.Status.Code != rpc.Code_CODE_OK { - appctx.GetLogger(ctx).Debug(). - Interface("resourceID", rs.Share.ResourceId). - Interface("status", sRes.Status). - Msg("ListRecievedShares: failed to stat the resource") - continue - } - shareMetaData[rs.Share.Id.OpaqueId] = sRes.Info + } + + // Wait for things to settle down, then close results chan + go func() { + _ = g.Wait() // error is checked later + close(results) + }() + + // some results might have been skipped, so we cannot preallocate the map + shareMetaData := make(map[string]*provider.ResourceInfo) + for r := range results { + shareMetaData[r.shareid] = r.info + } + + if err := g.Wait(); err != nil { + return nil, nil, err } return lsRes.Shares, shareMetaData, nil diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/owncloudsql.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/owncloudsql.go index 67ba8123938..0c51f1c5523 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/owncloudsql.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/owncloudsql.go @@ -309,6 +309,17 @@ func (fs *owncloudsqlfs) toDatabasePath(ip string) string { return p } +func (fs *owncloudsqlfs) toResourcePath(ip, owner string) string { + trim := filepath.Join(fs.c.DataDirectory, owner, "files") + p := strings.TrimPrefix(ip, trim) + p = strings.TrimPrefix(p, "/") + // root directory + if p == "" { + p = "." + } + return p +} + func (fs *owncloudsqlfs) toStoragePath(ctx context.Context, ip string) (sp string) { if fs.c.EnableHome { u := ctxpkg.ContextMustGetUser(ctx) @@ -523,14 +534,15 @@ func (fs *owncloudsqlfs) convertToResourceInfo(ctx context.Context, entry *filec if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok { returnAllKeys = true } - + owner := fs.getOwner(ip) + path := fs.toResourcePath(ip, owner) isDir := entry.MimeTypeString == "httpd/unix-directory" ri := &provider.ResourceInfo{ Id: &provider.ResourceId{ // return ownclouds numeric storage id as the space id! SpaceId: strconv.Itoa(entry.Storage), OpaqueId: strconv.Itoa(entry.ID), }, - Path: filepath.Base(ip), + Path: filepath.Base(path), // we currently only return the name, decomposedfs returns the path if the request was path based. is that even still possible? Type: getResourceType(isDir), Etag: entry.Etag, MimeType: entry.MimeTypeString, @@ -542,8 +554,16 @@ func (fs *owncloudsqlfs) convertToResourceInfo(ctx context.Context, entry *filec Metadata: map[string]string{}, // TODO aduffeck: which metadata needs to go in here? }, } + // omit parentid for root + if path != "." { + ri.Name = entry.Name + ri.ParentId = &provider.ResourceId{ + // return ownclouds numeric storage id as the space id! + SpaceId: strconv.Itoa(entry.Storage), OpaqueId: strconv.Itoa(entry.Parent), + } + } - if owner, err := fs.getUser(ctx, fs.getOwner(ip)); err == nil { + if owner, err := fs.getUser(ctx, owner); err == nil { ri.Owner = owner.Id } else { appctx.GetLogger(ctx).Error().Err(err).Msg("error getting owner") @@ -1419,9 +1439,6 @@ func (fs *owncloudsqlfs) listWithNominalHome(ctx context.Context, ip string, mdK finfos := []*provider.ResourceInfo{} for _, entry := range entries { cp := filepath.Join(fs.c.DataDirectory, owner, entry.Path) - if err != nil { - return nil, err - } m, err := fs.convertToResourceInfo(ctx, entry, cp, mdKeys) if err != nil { appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/spaces.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/spaces.go index 05bd1644a21..eb2d88c398d 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/spaces.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/spaces.go @@ -31,12 +31,14 @@ import ( "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/filecache" "github.com/cs3org/reva/v2/pkg/storagespace" + "github.com/cs3org/reva/v2/pkg/utils" ) // ListStorageSpaces lists storage spaces according to the provided filters func (fs *owncloudsqlfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter, unrestricted bool) ([]*provider.StorageSpace, error) { var ( spaceID = "*" + // uid *userpb.UserId ) filteringUnsupportedSpaceTypes := false @@ -49,7 +51,7 @@ func (fs *owncloudsqlfs) ListStorageSpaces(ctx context.Context, filter []*provid case provider.ListStorageSpacesRequest_Filter_TYPE_ID: _, spaceID, _, _ = storagespace.SplitID(filter[i].GetId().OpaqueId) case provider.ListStorageSpacesRequest_Filter_TYPE_USER: - _, spaceID, _, _ = storagespace.SplitID(filter[i].GetId().OpaqueId) + // uid = filter[i].GetUser() } } if filteringUnsupportedSpaceTypes { @@ -63,6 +65,9 @@ func (fs *owncloudsqlfs) ListStorageSpaces(ctx context.Context, filter []*provid if !ok { return nil, errtypes.UserRequired("error getting user from context") } + // if uid != nil && utils.UserIDEqual(uid, u.Id) { + // return nil, errtypes.PermissionDenied("cannot access space of other user?") + // } space, err := fs.getPersonalSpace(ctx, u) if err != nil { return nil, err @@ -141,6 +146,8 @@ func (fs *owncloudsqlfs) getPersonalSpace(ctx context.Context, owner *userpb.Use Mtime: &types.Timestamp{Seconds: uint64(root.MTime)}, Owner: owner, } + space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "spaceAlias", "personal/"+owner.Username) + space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "etag", fmt.Sprintf(`"%s"`, root.Etag)) return space, nil } @@ -179,5 +186,7 @@ func (fs *owncloudsqlfs) storageToSpace(ctx context.Context, storage *filecache. Mtime: &types.Timestamp{Seconds: uint64(root.MTime)}, Owner: owner, } + space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "spaceAlias", "personal/"+owner.Username) + space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "etag", fmt.Sprintf(`"%s"`, root.Etag)) return space, nil } diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node/xattrs.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node/xattrs.go index 0e5d186b345..621be8843d6 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node/xattrs.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node/xattrs.go @@ -21,6 +21,7 @@ package node import ( "context" "io" + "io/fs" "strconv" "time" @@ -144,14 +145,21 @@ func (n *Node) Xattrs(ctx context.Context) (Attributes, error) { // Xattr returns an extended attribute of the node. If the attributes have already // been cached it is not read from disk again. func (n *Node) Xattr(ctx context.Context, key string) ([]byte, error) { + path := n.InternalPath() + + if path == "" { + // Do not try to read the attribute of an non-existing node + return []byte{}, fs.ErrNotExist + } + if n.ID == "" { // Do not try to read the attribute of an empty node. The InternalPath points to the // base nodes directory in this case. - return []byte{}, &xattr.Error{Op: "node.Xattr", Path: n.InternalPath(), Name: key, Err: xattr.ENOATTR} + return []byte{}, &xattr.Error{Op: "node.Xattr", Path: path, Name: key, Err: xattr.ENOATTR} } if n.xattrsCache == nil { - attrs, err := n.lu.MetadataBackend().All(ctx, n.InternalPath()) + attrs, err := n.lu.MetadataBackend().All(ctx, path) if err != nil { return []byte{}, err } @@ -162,7 +170,7 @@ func (n *Node) Xattr(ctx context.Context, key string) ([]byte, error) { return val, nil } // wrap the error as xattr does - return []byte{}, &xattr.Error{Op: "node.Xattr", Path: n.InternalPath(), Name: key, Err: xattr.ENOATTR} + return []byte{}, &xattr.Error{Op: "node.Xattr", Path: path, Name: key, Err: xattr.ENOATTR} } // XattrString returns the string representation of an attribute diff --git a/vendor/github.com/cs3org/reva/v2/pkg/user/manager/owncloudsql/owncloudsql.go b/vendor/github.com/cs3org/reva/v2/pkg/user/manager/owncloudsql/owncloudsql.go index 166bdc8c7ad..c61d485f6d1 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/user/manager/owncloudsql/owncloudsql.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/user/manager/owncloudsql/owncloudsql.go @@ -167,6 +167,11 @@ func (m *manager) convertToCS3User(ctx context.Context, a *accounts.Account, ski GidNumber: m.c.Nobody, UidNumber: m.c.Nobody, } + // https://github.com/cs3org/reva/pull/4135 + // fall back to userid + if u.Id.OpaqueId == "" { + u.Id.OpaqueId = a.UserID + } if u.Username == "" { u.Username = u.Id.OpaqueId } diff --git a/vendor/modules.txt b/vendor/modules.txt index e00865de0ef..b2fef3fdd1d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -367,7 +367,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1 github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1 github.com/cs3org/go-cs3apis/cs3/tx/v1beta1 github.com/cs3org/go-cs3apis/cs3/types/v1beta1 -# github.com/cs3org/reva/v2 v2.22.1-0.20240809114512-56b26ddd82cc +# github.com/cs3org/reva/v2 v2.23.0 ## explicit; go 1.21 github.com/cs3org/reva/v2/cmd/revad/internal/grace github.com/cs3org/reva/v2/cmd/revad/runtime