diff --git a/.drone.star b/.drone.star index cbbe9330d3..5cd04df9f7 100644 --- a/.drone.star +++ b/.drone.star @@ -572,7 +572,7 @@ def virtualViews(): "PATH_TO_CORE": "/drone/src/tmp/testrunner", "TEST_SERVER_URL": "http://revad-services:20180", "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", - "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/nodes/root/* /drone/src/tmp/reva/data/nodes/*-*-*-* /drone/src/tmp/reva/data/blobs/*", + "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/spaces/* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spacetypes/*", "STORAGE_DRIVER": "OCIS", "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", "TEST_REVA": "true", @@ -751,7 +751,7 @@ def litmusOcisSpacesDav(): "commands": [ # The spaceid is randomly generated during the first login so we need this hack to construct the correct url. "curl -s -k -u einstein:relativity -I http://revad-services:20080/remote.php/dav/files/einstein", - "export LITMUS_URL=http://revad-services:20080/remote.php/dav/spaces/$(ls /drone/src/tmp/reva/data/spaces/personal/)", + "export LITMUS_URL=http://revad-services:20080/remote.php/dav/spaces/$(ls /drone/src/tmp/reva/data/spacetypes/personal/)", "/usr/local/bin/litmus-wrapper", ], }, @@ -813,7 +813,7 @@ def ocisIntegrationTests(parallelRuns, skipExceptParts = []): "environment": { "TEST_SERVER_URL": "http://revad-services:20080", "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", - "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/nodes/root/* /drone/src/tmp/reva/data/nodes/*-*-*-* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spaces/*/*", + "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/spaces/* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spacetypes/*", "STORAGE_DRIVER": "OCIS", "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", "TEST_WITH_LDAP": "true", @@ -890,7 +890,7 @@ def s3ngIntegrationTests(parallelRuns, skipExceptParts = []): "environment": { "TEST_SERVER_URL": "http://revad-services:20080", "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", - "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/nodes/root/* /drone/src/tmp/reva/data/nodes/*-*-*-* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spaces/*/*", + "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/spaces/* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spacetypes/*", "STORAGE_DRIVER": "S3NG", "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", "TEST_WITH_LDAP": "true", diff --git a/changelog/unreleased/decomposedfs-shard-nodes-per-space.md b/changelog/unreleased/decomposedfs-shard-nodes-per-space.md new file mode 100644 index 0000000000..be6f6ae86c --- /dev/null +++ b/changelog/unreleased/decomposedfs-shard-nodes-per-space.md @@ -0,0 +1,5 @@ +Change: shard nodes per space in decomposedfs + +The decomposedfs changas the on disk layout to shard nodes per space. + +https://github.com/cs3org/reva/pull/2554 diff --git a/pkg/permission/manager/demo/demo.go b/pkg/permission/manager/demo/demo.go index eb9da7544d..06a23ea6a0 100644 --- a/pkg/permission/manager/demo/demo.go +++ b/pkg/permission/manager/demo/demo.go @@ -36,8 +36,18 @@ func New(c map[string]interface{}) (permission.Manager, error) { type manager struct { } -func (m manager) CheckPermission(permission string, subject string, ref *provider.Reference) bool { - // We can currently return true all the time. - // Once we beginn testing roles we need to somehow check the roles of the users here - return false +func (m manager) CheckPermission(perm string, subject string, ref *provider.Reference) bool { + switch perm { + case permission.CreateSpace: + // TODO Users can only create their own personal space + // TODO guest accounts cannot create spaces + return true + case permission.ListAllSpaces: + // TODO introduce an admin role to allow listing all spaces + return false + default: + // We can currently return false all the time. + // Once we beginn testing roles we need to somehow check the roles of the users here + return false + } } diff --git a/pkg/permission/permission.go b/pkg/permission/permission.go index e5e5c76a52..6aaa808845 100644 --- a/pkg/permission/permission.go +++ b/pkg/permission/permission.go @@ -22,6 +22,13 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" ) +const ( + // ListAllSpaces is the hardcoded name for the list all spaces permission + ListAllSpaces string = "list-all-spaces" + // CreateSpace is the hardcoded name for the create space permission + CreateSpace string = "create-space" +) + // Manager defines the interface for the permission service driver type Manager interface { CheckPermission(permission string, subject string, ref *provider.Reference) bool diff --git a/pkg/storage/fs/nextcloud/nextcloud_server_mock.go b/pkg/storage/fs/nextcloud/nextcloud_server_mock.go index db514cbb5c..74cb43d2a2 100644 --- a/pkg/storage/fs/nextcloud/nextcloud_server_mock.go +++ b/pkg/storage/fs/nextcloud/nextcloud_server_mock.go @@ -66,6 +66,7 @@ var responses = map[string]Response{ `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateHome `: {200, ``, serverStateHome}, `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateHome {}`: {200, ``, serverStateHome}, `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateStorageSpace {"owner":{"id":{"idp":"0.0.0.0:19000","opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c","type":1},"username":"einstein"},"type":"personal","name":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"}`: {200, `{"status":{"code":1}}`, serverStateHome}, + `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateStorageSpace {"owner":{"id":{"idp":"0.0.0.0:19000","opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c","type":1},"username":"einstein"},"type":"personal"}`: {200, `{"status":{"code":1}}`, serverStateHome}, `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateReference {"path":"/Shares/reference","url":"scheme://target"}`: {200, `[]`, serverStateReference}, @@ -103,7 +104,8 @@ var responses = map[string]Response{ `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/GetPathByID {"storage_id":"00000000-0000-0000-0000-000000000000","opaque_id":"fileid-/some/path"} EMPTY`: {200, "/subdir", serverStateEmpty}, - `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"path":"/file"},"uploadLength":0,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty}, + `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"path":"/file"},"uploadLength":0,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty}, + `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"resource_id":{"storage_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"path":"/versionedFile"},"uploadLength":0,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty}, `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListFolder {"ref":{"path":"/"},"mdKeys":null}`: {200, `[{"opaque":{},"type":2,"id":{"opaque_id":"fileid-/subdir"},"checksum":{},"etag":"deadbeef","mime_type":"text/plain","mtime":{"seconds":1234567890},"path":"/subdir","permission_set":{},"size":12345,"canonical_metadata":{},"owner":{"opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"arbitrary_metadata":{"metadata":{"da":"ta","some":"arbi","trary":"meta"}}}]`, serverStateEmpty}, diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go index 7c7c0dce3a..f474e1429e 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -19,6 +19,7 @@ package decomposedfs // go:generate mockery -name PermissionsChecker +// go:generate mockery -name CS3PermissionsClient // go:generate mockery -name Tree import ( @@ -33,15 +34,17 @@ import ( "strings" "syscall" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" + rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/sharedconf" + "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" @@ -51,6 +54,7 @@ import ( "github.com/cs3org/reva/pkg/utils" "github.com/pkg/errors" "go.opentelemetry.io/otel/codes" + "google.golang.org/grpc" ) // PermissionsChecker defines an interface for checking permissions on a Node @@ -59,9 +63,14 @@ type PermissionsChecker interface { HasPermission(ctx context.Context, n *node.Node, check func(*provider.ResourcePermissions) bool) (can bool, err error) } +// CS3PermissionsClient defines an interface for checking permissions against the CS3 permissions service +type CS3PermissionsClient interface { + CheckPermission(ctx context.Context, in *cs3permissions.CheckPermissionRequest, opts ...grpc.CallOption) (*cs3permissions.CheckPermissionResponse, error) +} + // Tree is used to manage a tree hierarchy type Tree interface { - Setup(owner *userpb.UserId, propagateToRoot bool) error + Setup() error GetMD(ctx context.Context, node *node.Node) (os.FileInfo, error) ListFolder(ctx context.Context, node *node.Node) ([]*node.Node, error) @@ -82,11 +91,12 @@ type Tree interface { // Decomposedfs provides the base for decomposed filesystem implementations type Decomposedfs struct { - lu *Lookup - tp Tree - o *options.Options - p PermissionsChecker - chunkHandler *chunking.ChunkHandler + lu *lookup.Lookup + tp Tree + o *options.Options + p PermissionsChecker + chunkHandler *chunking.ChunkHandler + permissionsClient CS3PermissionsClient } // NewDefault returns an instance with default components @@ -96,30 +106,25 @@ func NewDefault(m map[string]interface{}, bs tree.Blobstore) (storage.FS, error) return nil, err } - lu := &Lookup{} + lu := &lookup.Lookup{} p := node.NewPermissions(lu) lu.Options = o tp := tree.New(o.Root, o.TreeTimeAccounting, o.TreeSizeAccounting, lu, bs) - o.GatewayAddr = sharedconf.GetGatewaySVC(o.GatewayAddr) - return New(o, lu, p, tp) -} + permissionsClient, err := pool.GetPermissionsClient(o.PermissionsSVC) + if err != nil { + return nil, err + } -// when enable home is false we want propagation to root if tree size or mtime accounting is enabled -func enablePropagationForRoot(o *options.Options) bool { - return (o.TreeSizeAccounting || o.TreeTimeAccounting) + return New(o, lu, p, tp, permissionsClient) } // New returns an implementation of the storage.FS interface that talks to // a local filesystem. -func New(o *options.Options, lu *Lookup, p PermissionsChecker, tp Tree) (storage.FS, error) { - err := tp.Setup(&userpb.UserId{ - OpaqueId: o.Owner, - Idp: o.OwnerIDP, - Type: userpb.UserType(userpb.UserType_value[o.OwnerType]), - }, enablePropagationForRoot(o)) +func New(o *options.Options, lu *lookup.Lookup, p PermissionsChecker, tp Tree, permissionsClient CS3PermissionsClient) (storage.FS, error) { + err := tp.Setup() if err != nil { logger.New().Error().Err(err). Msg("could not setup tree") @@ -127,11 +132,12 @@ func New(o *options.Options, lu *Lookup, p PermissionsChecker, tp Tree) (storage } return &Decomposedfs{ - tp: tp, - lu: lu, - o: o, - p: p, - chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), + tp: tp, + lu: lu, + o: o, + p: p, + chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), + permissionsClient: permissionsClient, }, nil } @@ -144,14 +150,12 @@ func (fs *Decomposedfs) Shutdown(ctx context.Context) error { // TODO Document in the cs3 should we return quota or free space? func (fs *Decomposedfs) GetQuota(ctx context.Context, ref *provider.Reference) (total uint64, inUse uint64, err error) { var n *node.Node - if ref != nil { - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return 0, 0, err - } - } else { - if n, err = fs.lu.RootNode(ctx); err != nil { - return 0, 0, err - } + if ref == nil { + err = errtypes.BadRequest("no space given") + return 0, 0, err + } + if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return 0, 0, err } if !n.Exists { @@ -204,57 +208,18 @@ func (fs *Decomposedfs) CreateHome(ctx context.Context) (err error) { return errtypes.NotSupported("Decomposedfs: CreateHome() home supported disabled") } - var n, h *node.Node - if n, err = fs.lu.RootNode(ctx); err != nil { - return - } - h, err = fs.lu.WalkPath(ctx, n, fs.lu.mustGetUserLayout(ctx), false, func(ctx context.Context, n *node.Node) error { - if !n.Exists { - if err := fs.tp.CreateDir(ctx, n); err != nil { - return err - } - } - return nil + u := ctxpkg.ContextMustGetUser(ctx) + res, err := fs.CreateStorageSpace(ctx, &provider.CreateStorageSpaceRequest{ + Type: spaceTypePersonal, + Owner: u, }) - - // make sure to delete the created directory if things go wrong - defer func() { - if err != nil { - // do not catch the error to not shadow the original error - if tmpErr := fs.tp.Delete(ctx, n); tmpErr != nil { - appctx.GetLogger(ctx).Error().Err(tmpErr).Msg("Can not revert file system change after error") - } - } - }() - if err != nil { - return - } - - // update the owner - u := ctxpkg.ContextMustGetUser(ctx) - if err = h.WriteAllNodeMetadata(u.Id); err != nil { - return - } - - if fs.o.TreeTimeAccounting || fs.o.TreeSizeAccounting { - // mark the home node as the end of propagation - if err = h.SetMetadata(xattrs.PropagationAttr, "1"); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", h).Msg("could not mark home as propagation root") - return - } - } - - if err := h.SetMetadata(xattrs.SpaceNameAttr, u.DisplayName); err != nil { return err } - - // add storage space - if err := fs.createStorageSpace(ctx, spaceTypePersonal, h.ID); err != nil { - return err + if res.Status.Code != rpcv1beta1.Code_CODE_OK { + return errtypes.NewErrtypeFromStatus(res.Status) } - - return + return nil } // The os not exists error is buried inside the xattr error, diff --git a/pkg/storage/utils/decomposedfs/grants.go b/pkg/storage/utils/decomposedfs/grants.go index 923a4478a1..f80a0d0344 100644 --- a/pkg/storage/utils/decomposedfs/grants.go +++ b/pkg/storage/utils/decomposedfs/grants.go @@ -56,16 +56,12 @@ func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g return err } - owner, err := node.Owner() - if err != nil { - return err - } - + owner := node.Owner() // If the owner is empty and there are no grantees then we are dealing with a just created project space. // In this case we don't need to check for permissions and just add the grant since this will be the project // manager. // When the owner is empty but grants are set then we do want to check the grants. - if !(len(grantees) == 0 && owner.OpaqueId == "") { + if !(len(grantees) == 0 && (owner == nil || owner.OpaqueId == "")) { ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 return rp.AddGrant || rp.UpdateGrant @@ -91,7 +87,7 @@ func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g // when a grant is added to a space, do not add a new space under "shares" if spaceGrant := ctx.Value(utils.SpaceGrant); spaceGrant == nil { - err := fs.createStorageSpace(ctx, spaceTypeShare, node.ID) + err := fs.linkStorageSpaceType(ctx, spaceTypeShare, node.ID) if err != nil { return err } @@ -122,7 +118,7 @@ func (fs *Decomposedfs) ListGrants(ctx context.Context, ref *provider.Reference) } log := appctx.GetLogger(ctx) - np := fs.lu.InternalPath(node.ID) + np := node.InternalPath() var attrs []string if attrs, err = xattr.List(np); err != nil { log.Error().Err(err).Msg("error listing attributes") @@ -174,8 +170,7 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference attr = xattrs.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId } - np := fs.lu.InternalPath(node.ID) - if err = xattr.Remove(np, attr); err != nil { + if err = xattrs.Remove(node.InternalPath(), attr); err != nil { return } diff --git a/pkg/storage/utils/decomposedfs/grants_test.go b/pkg/storage/utils/decomposedfs/grants_test.go index 4b28c740e0..b77bb49985 100644 --- a/pkg/storage/utils/decomposedfs/grants_test.go +++ b/pkg/storage/utils/decomposedfs/grants_test.go @@ -21,7 +21,6 @@ package decomposedfs_test import ( "io/fs" "os" - "path" "path/filepath" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" @@ -115,7 +114,7 @@ var _ = Describe("Grants", func() { err = env.Fs.AddGrant(env.Ctx, ref, grant) Expect(err).ToNot(HaveOccurred()) - localPath := path.Join(env.Root, "nodes", n.ID) + localPath := n.InternalPath() attr, err := xattr.Get(localPath, xattrs.GrantUserAcePrefix+grant.Grantee.GetUserId().OpaqueId) Expect(err).ToNot(HaveOccurred()) Expect(string(attr)).To(Equal("\x00t=A:f=:p=rw")) @@ -125,8 +124,8 @@ var _ = Describe("Grants", func() { err := env.Fs.AddGrant(env.Ctx, ref, grant) Expect(err).ToNot(HaveOccurred()) - spacesPath := filepath.Join(env.Root, "spaces") - tfs.root = spacesPath + spaceTypesPath := filepath.Join(env.Root, "spacetypes") + tfs.root = spaceTypesPath entries, err := fs.ReadDir(tfs, "share") Expect(err).ToNot(HaveOccurred()) Expect(len(entries)).To(BeNumerically(">=", 1)) diff --git a/pkg/storage/utils/decomposedfs/lookup.go b/pkg/storage/utils/decomposedfs/lookup/lookup.go similarity index 79% rename from pkg/storage/utils/decomposedfs/lookup.go rename to pkg/storage/utils/decomposedfs/lookup/lookup.go index 06017e96a2..38032a1acd 100644 --- a/pkg/storage/utils/decomposedfs/lookup.go +++ b/pkg/storage/utils/decomposedfs/lookup/lookup.go @@ -16,23 +16,20 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package decomposedfs +package lookup import ( "context" "fmt" - "os" "path/filepath" "strings" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/storage/utils/templates" ) // Lookup implements transformations from filepath to node and back @@ -58,6 +55,7 @@ func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) if err != nil { return nil, err } + n.SpaceID = ref.ResourceId.StorageId } } return n, nil @@ -76,32 +74,28 @@ func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *n // The Resource references the root of a space return lu.NodeFromSpaceID(ctx, id) } - n, err = node.ReadNode(ctx, lu, id.OpaqueId) - if err != nil { - return nil, err - } + return node.ReadNode(ctx, lu, id.StorageId, id.OpaqueId) +} - return n, n.FindStorageSpaceRoot() +// Pathify segments the beginning of a string into depth segments of width length +// Pathify("aabbccdd", 3, 1) will return "a/a/b/bccdd" +func Pathify(id string, depth, width int) string { + b := strings.Builder{} + i := 0 + for ; i < depth; i++ { + if len(id) <= i*width+width { + break + } + b.WriteString(id[i*width : i*width+width]) + b.WriteRune(filepath.Separator) + } + b.WriteString(id[i*width:]) + return b.String() } // NodeFromSpaceID converts a resource id without an opaque id into a Node func (lu *Lookup) NodeFromSpaceID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) { - d := filepath.Join(lu.Options.Root, "spaces", spaceTypeAny, id.StorageId) - matches, err := filepath.Glob(d) - if err != nil { - return nil, err - } - - if len(matches) != 1 { - return nil, fmt.Errorf("can't determine node from spaceID: found %d matching spaces. Path: %s", len(matches), d) - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } - - node, err := node.ReadNode(ctx, lu, filepath.Base(target)) + node, err := node.ReadNode(ctx, lu, id.StorageId, id.StorageId) if err != nil { return nil, err } @@ -128,13 +122,6 @@ func (lu *Lookup) Path(ctx context.Context, n *node.Node) (p string, err error) return } -// RootNode returns the root node of the storage -func (lu *Lookup) RootNode(ctx context.Context) (*node.Node, error) { - n := node.New(node.RootID, "", "", 0, "", nil, lu) - n.Exists = true - return n, nil -} - // WalkPath calls n.Child(segment) on every path segment in p starting at the node r. // If a function f is given it will be executed for every segment node, but not the root node r. // If followReferences is given the current visited reference node is replaced by the referenced node. @@ -182,13 +169,8 @@ func (lu *Lookup) InternalRoot() string { } // InternalPath returns the internal path for a given ID -func (lu *Lookup) InternalPath(id string) string { - return filepath.Join(lu.Options.Root, "nodes", id) -} - -func (lu *Lookup) mustGetUserLayout(ctx context.Context) string { - u := ctxpkg.ContextMustGetUser(ctx) - return templates.WithUser(u, lu.Options.UserLayout) +func (lu *Lookup) InternalPath(spaceID, nodeID string) string { + return filepath.Join(lu.Options.Root, "spaces", Pathify(spaceID, 1, 2), "nodes", Pathify(nodeID, 4, 2)) } // ShareFolder returns the internal storage root directory diff --git a/pkg/storage/utils/decomposedfs/lookup_test.go b/pkg/storage/utils/decomposedfs/lookup/lookup_test.go similarity index 99% rename from pkg/storage/utils/decomposedfs/lookup_test.go rename to pkg/storage/utils/decomposedfs/lookup/lookup_test.go index 2895445383..4faf6f58ac 100644 --- a/pkg/storage/utils/decomposedfs/lookup_test.go +++ b/pkg/storage/utils/decomposedfs/lookup/lookup_test.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package decomposedfs_test +package lookup_test import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" diff --git a/pkg/storage/utils/decomposedfs/metadata.go b/pkg/storage/utils/decomposedfs/metadata.go index f45c9ea216..c7b7edaf0c 100644 --- a/pkg/storage/utils/decomposedfs/metadata.go +++ b/pkg/storage/utils/decomposedfs/metadata.go @@ -22,7 +22,6 @@ import ( "context" "fmt" "path/filepath" - "syscall" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" @@ -181,10 +180,8 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide continue } fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Remove(nodePath, fa); err != nil { - if isNoData(err) { - // TODO align with default case: is there a difference between darwin and linux? - // refactor this properly into a function in the "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" package + if err := xattrs.Remove(nodePath, fa); err != nil { + if xattrs.IsAttrUnset(err) { continue // already gone, ignore } sublog.Error().Err(err). @@ -194,17 +191,14 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) } default: - if err = xattr.Remove(nodePath, xattrs.MetadataPrefix+k); err != nil { - // a non-existing attribute will return an error, which we can ignore - // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) - if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - sublog.Error().Err(err). - Str("key", k). - Msg("could not unset metadata") - errs = append(errs, errors.Wrap(err, "could not unset metadata")) + if err = xattrs.Remove(nodePath, xattrs.MetadataPrefix+k); err != nil { + if xattrs.IsAttrUnset(err) { + continue // already gone, ignore } + sublog.Error().Err(err). + Str("key", k). + Msg("could not unset metadata") + errs = append(errs, errors.Wrap(err, "could not unset metadata")) } } } @@ -220,14 +214,3 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide return errors.New("multiple errors occurred, see log for details") } } - -// The os ENODATA error is buried inside the xattr error, -// so we cannot just use os.IsNotExists(). -func isNoData(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENODATA - } - } - return false -} diff --git a/pkg/storage/utils/decomposedfs/mocks/CS3PermissionsClient.go b/pkg/storage/utils/decomposedfs/mocks/CS3PermissionsClient.go new file mode 100644 index 0000000000..c63573fce8 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/mocks/CS3PermissionsClient.go @@ -0,0 +1,66 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// Code generated by mockery v1.1.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + permissionsv1beta1 "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" +) + +// CS3PermissionsClient is an autogenerated mock type for the CS3PermissionsClient type +type CS3PermissionsClient struct { + mock.Mock +} + +// CheckPermission provides a mock function with given fields: ctx, in, opts +func (_m *CS3PermissionsClient) CheckPermission(ctx context.Context, in *permissionsv1beta1.CheckPermissionRequest, opts ...grpc.CallOption) (*permissionsv1beta1.CheckPermissionResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *permissionsv1beta1.CheckPermissionResponse + if rf, ok := ret.Get(0).(func(context.Context, *permissionsv1beta1.CheckPermissionRequest, ...grpc.CallOption) *permissionsv1beta1.CheckPermissionResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*permissionsv1beta1.CheckPermissionResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *permissionsv1beta1.CheckPermissionRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/storage/utils/decomposedfs/node/locks.go b/pkg/storage/utils/decomposedfs/node/locks.go index e0392364dd..afb0443e72 100644 --- a/pkg/storage/utils/decomposedfs/node/locks.go +++ b/pkg/storage/utils/decomposedfs/node/locks.go @@ -21,7 +21,9 @@ package node import ( "context" "encoding/json" + "io/fs" "os" + "path/filepath" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" @@ -35,6 +37,7 @@ import ( // SetLock sets a lock on the node func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { + lockFilePath := n.LockFilePath() // check existing lock if l, _ := n.ReadLock(ctx); l != nil { @@ -43,13 +46,17 @@ func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { return errtypes.Locked(l.LockId) } - err := os.Remove(n.LockFilePath()) + err := os.Remove(lockFilePath) if err != nil { return err } } - fileLock, err := filelocks.AcquireWriteLock(n.LockFilePath()) + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(lockFilePath), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireWriteLock(n.InternalPath()) if err != nil { return err @@ -65,7 +72,7 @@ func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { }() // O_EXCL to make open fail when the file already exists - f, err := os.OpenFile(n.LockFilePath(), os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0600) + f, err := os.OpenFile(lockFilePath, os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0600) if err != nil { return errors.Wrap(err, "Decomposedfs: could not create lock file") } @@ -81,7 +88,11 @@ func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { // ReadLock reads the lock id for a node func (n Node) ReadLock(ctx context.Context) (*provider.Lock, error) { - fileLock, err := filelocks.AcquireReadLock(n.LockFilePath()) + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil { + return nil, errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireReadLock(n.InternalPath()) if err != nil { return nil, err @@ -98,7 +109,7 @@ func (n Node) ReadLock(ctx context.Context) (*provider.Lock, error) { f, err := os.Open(n.LockFilePath()) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound("no lock found") } return nil, errors.Wrap(err, "Decomposedfs: could not open lock file") @@ -116,7 +127,11 @@ func (n Node) ReadLock(ctx context.Context) (*provider.Lock, error) { // RefreshLock refreshes the node's lock func (n *Node) RefreshLock(ctx context.Context, lock *provider.Lock) error { - fileLock, err := filelocks.AcquireWriteLock(n.LockFilePath()) + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireWriteLock(n.InternalPath()) if err != nil { return err @@ -133,7 +148,7 @@ func (n *Node) RefreshLock(ctx context.Context, lock *provider.Lock) error { f, err := os.OpenFile(n.LockFilePath(), os.O_RDWR, os.ModeExclusive) switch { - case os.IsNotExist(err): + case errors.Is(err, fs.ErrNotExist): return errtypes.PreconditionFailed("lock does not exist") case err != nil: return errors.Wrap(err, "Decomposedfs: could not open lock file") @@ -169,7 +184,11 @@ func (n *Node) RefreshLock(ctx context.Context, lock *provider.Lock) error { // Unlock unlocks the node func (n *Node) Unlock(ctx context.Context, lock *provider.Lock) error { - fileLock, err := filelocks.AcquireWriteLock(n.LockFilePath()) + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireWriteLock(n.InternalPath()) if err != nil { return err @@ -186,7 +205,7 @@ func (n *Node) Unlock(ctx context.Context, lock *provider.Lock) error { f, err := os.OpenFile(n.LockFilePath(), os.O_RDONLY, os.ModeExclusive) switch { - case os.IsNotExist(err): + case errors.Is(err, fs.ErrNotExist): return errtypes.PreconditionFailed("lock does not exist") case err != nil: return errors.Wrap(err, "Decomposedfs: could not open lock file") @@ -234,8 +253,13 @@ func (n *Node) CheckLock(ctx context.Context) error { return nil // ok } -func readLocksIntoOpaque(ctx context.Context, lockPath string, ri *provider.ResourceInfo) error { - fileLock, err := filelocks.AcquireReadLock(lockPath) +func readLocksIntoOpaque(ctx context.Context, n *Node, ri *provider.ResourceInfo) error { + + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireReadLock(n.InternalPath()) if err != nil { return err @@ -250,7 +274,7 @@ func readLocksIntoOpaque(ctx context.Context, lockPath string, ri *provider.Reso } }() - f, err := os.Open(lockPath) + f, err := os.Open(n.LockFilePath()) if err != nil { appctx.GetLogger(ctx).Error().Err(err).Msg("Decomposedfs: could not open lock file") return err diff --git a/pkg/storage/utils/decomposedfs/node/locks_test.go b/pkg/storage/utils/decomposedfs/node/locks_test.go index d86c677ad2..5cd4dd0ad5 100644 --- a/pkg/storage/utils/decomposedfs/node/locks_test.go +++ b/pkg/storage/utils/decomposedfs/node/locks_test.go @@ -45,7 +45,7 @@ var _ = Describe("Node locks", func() { otherUser = &userpb.User{ Id: &userpb.UserId{ Idp: "idp", - OpaqueId: "foo", + OpaqueId: "otheruserid", Type: userpb.UserType_USER_TYPE_PRIMARY, }, Username: "foo", @@ -68,8 +68,8 @@ var _ = Describe("Node locks", func() { User: env.Owner.Id, LockId: uuid.New().String(), } - n = node.New("tobelockedid", "", "tobelocked", 10, "", env.Owner.Id, env.Lookup) - n2 = node.New("neverlockedid", "", "neverlocked", 10, "", env.Owner.Id, env.Lookup) + n = node.New("u-s-e-r-id", "tobelockedid", "", "tobelocked", 10, "", env.Owner.Id, env.Lookup) + n2 = node.New("u-s-e-r-id", "neverlockedid", "", "neverlocked", 10, "", env.Owner.Id, env.Lookup) }) AfterEach(func() { diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index 519c8e31d9..f5febee408 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -63,7 +63,8 @@ const ( QuotaUnlimited = "-3" // TrashIDDelimiter represents the characters used to separate the nodeid and the deletion time. - TrashIDDelimiter = ".T." + TrashIDDelimiter = ".T." + RevisionIDDelimiter = ".REV." // RootID defines the root node's ID RootID = "root" @@ -71,6 +72,7 @@ const ( // Node represents a node in the tree and provides methods to get a Parent or Child instance type Node struct { + SpaceID string ParentID string ID string Name string @@ -85,20 +87,19 @@ type Node struct { // PathLookup defines the interface for the lookup component type PathLookup interface { - RootNode(ctx context.Context) (node *Node, err error) - InternalRoot() string - InternalPath(ID string) string + InternalPath(spaceID, nodeID string) string Path(ctx context.Context, n *Node) (path string, err error) ShareFolder() string } // New returns a new instance of Node -func New(id, parentID, name string, blobsize int64, blobID string, owner *userpb.UserId, lu PathLookup) *Node { +func New(spaceID, id, parentID, name string, blobsize int64, blobID string, owner *userpb.UserId, lu PathLookup) *Node { if blobID == "" { blobID = uuid.New().String() } return &Node{ + SpaceID: spaceID, ID: id, ParentID: parentID, Name: name, @@ -111,14 +112,14 @@ func New(id, parentID, name string, blobsize int64, blobID string, owner *userpb // ChangeOwner sets the owner of n to newOwner func (n *Node) ChangeOwner(new *userpb.UserId) (err error) { - nodePath := n.InternalPath() - n.owner = new + rootNodePath := n.SpaceRoot.InternalPath() + n.SpaceRoot.owner = new var attribs = map[string]string{xattrs.OwnerIDAttr: new.OpaqueId, xattrs.OwnerIDPAttr: new.Idp, xattrs.OwnerTypeAttr: utils.UserTypeToString(new.Type)} - if err := xattrs.SetMultiple(nodePath, attribs); err != nil { + if err := xattrs.SetMultiple(rootNodePath, attribs); err != nil { return err } @@ -135,6 +136,14 @@ func (n *Node) SetMetadata(key string, val string) (err error) { return nil } +// RemoveMetadata removes a given key +func (n *Node) RemoveMetadata(key string) (err error) { + if err = xattrs.Remove(n.InternalPath(), key); err == nil || xattrs.IsAttrUnset(err) { + return nil + } + return err +} + // GetMetadata reads the metadata for the given key func (n *Node) GetMetadata(key string) (val string, err error) { nodePath := n.InternalPath() @@ -145,7 +154,7 @@ func (n *Node) GetMetadata(key string) (val string, err error) { } // WriteAllNodeMetadata writes the Node metadata to disk -func (n *Node) WriteAllNodeMetadata(owner *userpb.UserId) (err error) { +func (n *Node) WriteAllNodeMetadata() (err error) { attribs := make(map[string]string) attribs[xattrs.ParentidAttr] = n.ParentID @@ -154,77 +163,130 @@ func (n *Node) WriteAllNodeMetadata(owner *userpb.UserId) (err error) { attribs[xattrs.BlobsizeAttr] = strconv.FormatInt(n.Blobsize, 10) nodePath := n.InternalPath() - attribs[xattrs.OwnerIDAttr] = "" - attribs[xattrs.OwnerIDPAttr] = "" - attribs[xattrs.OwnerTypeAttr] = "" + return xattrs.SetMultiple(nodePath, attribs) +} - if owner != nil { - attribs[xattrs.OwnerIDAttr] = owner.OpaqueId - attribs[xattrs.OwnerIDPAttr] = owner.Idp - attribs[xattrs.OwnerTypeAttr] = utils.UserTypeToString(owner.Type) +// WriteOwner writes the space owner +func (n *Node) WriteOwner(owner *userpb.UserId) error { + n.SpaceRoot.owner = owner + attribs := map[string]string{ + xattrs.OwnerIDAttr: owner.OpaqueId, + xattrs.OwnerIDPAttr: owner.Idp, + xattrs.OwnerTypeAttr: utils.UserTypeToString(owner.Type), } - if err := xattrs.SetMultiple(nodePath, attribs); err != nil { + nodeRootPath := n.SpaceRoot.InternalPath() + if err := xattrs.SetMultiple(nodeRootPath, attribs); err != nil { return err } - return + n.SpaceRoot.owner = owner + return nil } // ReadNode creates a new instance from an id and checks if it exists -func ReadNode(ctx context.Context, lu PathLookup, id string) (n *Node, err error) { +// FIXME check if user is allowed to access disabled spaces +func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string) (n *Node, err error) { + + // read space root + r := &Node{ + SpaceID: spaceID, + lu: lu, + ID: spaceID, + } + r.SpaceRoot = r + r.owner, err = r.readOwner() + switch { + case xattrs.IsNotExist(err): + return r, nil // swallow not found, the node defaults to exists = false + case err != nil: + return nil, err + } + r.Exists = true + + // check if this is a space root + if spaceID == nodeID { + return r, nil + } + + // read node n = &Node{ - lu: lu, - ID: id, + SpaceID: spaceID, + lu: lu, + ID: nodeID, + SpaceRoot: r, } nodePath := n.InternalPath() - // lookup parent id in extended attributes - var attr string - attr, err = xattrs.Get(nodePath, xattrs.ParentidAttr) + // lookup name in extended attributes + n.Name, err = xattrs.Get(nodePath, xattrs.NameAttr) switch { - case err == nil: - n.ParentID = attr - case xattrs.IsAttrUnset(err): - return nil, errtypes.InternalError(err.Error()) case xattrs.IsNotExist(err): return n, nil // swallow not found, the node defaults to exists = false - default: - return nil, errtypes.InternalError(err.Error()) + case err != nil: + return nil, err } - // check if this is a space root - if _, err = xattrs.Get(nodePath, xattrs.SpaceNameAttr); err == nil { - n.SpaceRoot = n - } - // lookup name in extended attributes - if attr, err = xattrs.Get(nodePath, xattrs.NameAttr); err == nil { - n.Name = attr - } else { - return - } + n.Exists = true + // lookup blobID in extended attributes - if attr, err = xattrs.Get(nodePath, xattrs.BlobIDAttr); err == nil { - n.BlobID = attr - } else { - return + n.BlobID, err = xattrs.Get(nodePath, xattrs.BlobIDAttr) + switch { + case xattrs.IsNotExist(err): + return n, nil // swallow not found, the node defaults to exists = false + case err != nil: + return nil, err } + // Lookup blobsize - var blobSize int64 - if blobSize, err = ReadBlobSizeAttr(nodePath); err == nil { - n.Blobsize = blobSize - } else { - return + n.Blobsize, err = ReadBlobSizeAttr(nodePath) + switch { + case xattrs.IsNotExist(err): + return n, nil // swallow not found, the node defaults to exists = false + case err != nil: + return nil, err } - // Check if parent exists. Otherwise this node is part of a deleted subtree - _, err = os.Stat(lu.InternalPath(n.ParentID)) + // lookup parent id in extended attributes + n.ParentID, err = xattrs.Get(nodePath, xattrs.ParentidAttr) + switch { + case xattrs.IsAttrUnset(err): + return nil, errtypes.InternalError(err.Error()) + case xattrs.IsNotExist(err): + return n, nil // swallow not found, the node defaults to exists = false + case err != nil: + return nil, errtypes.InternalError(err.Error()) + } + + // TODO why do we stat the parent? to determine if the current node is in the trash we would need to traverse all parents... + // we need to traverse all parents for permissions anyway ... + // - we can compare to space root owner with the current user + // - we can compare the share permissions on the root for spaces, which would work for managers + // - for non managers / owners we need to traverse all path segments because an intermediate node might have been shared + // - if we want to support negative acls we need to traverse the path for all users (but the owner) + // for trashed items we need to check all parents + // - one of them might have the trash suffix ... + // - options: + // - move deleted nodes in a trash folder that is still part of the tree (aka freedesktop org trash spec) + // - shares should still be removed, which requires traversing all trashed children ... and it should be undoable ... + // - what if a trashed file is restored? will child items be accessible by a share? + // - compare paths of trash root items and the trashed file? + // - to determine the relative path of a file we would need to traverse all intermediate nodes anyway + // - recursively mark all children as trashed ... async ... it is ok when that is not synchronous + // - how do we pick up if an error occurs? write a journal somewhere? activity log / delta? + // - stat requests will not pick up trashed items at all + // - recursively move all children into the trash folder? + // - no need to write an additional trash entry + // - can be made more robust with a journal + // - same recursion mechanism can be used to purge items? sth we still need to do + // - flag the two above options with dtime + _, err = os.Stat(n.ParentInternalPath()) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound(err.Error()) } return nil, err } - n.Exists = true + return } @@ -238,12 +300,30 @@ func isNotDir(err error) bool { return false } +func readChildNodeFromLink(path string) (string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", err + } + nodeID := strings.TrimLeft(link, "/.") + nodeID = strings.ReplaceAll(nodeID, "/", "") + return nodeID, nil +} + // Child returns the child node with the given name func (n *Node) Child(ctx context.Context, name string) (*Node, error) { - link, err := os.Readlink(filepath.Join(n.InternalPath(), filepath.Join("/", name))) + spaceID := n.SpaceID + if spaceID == "" && n.ParentID == "root" { + spaceID = n.ID + } else if n.SpaceRoot != nil { + spaceID = n.SpaceRoot.ID + } + nodeID, err := readChildNodeFromLink(filepath.Join(n.InternalPath(), name)) if err != nil { - if os.IsNotExist(err) || isNotDir(err) { + if errors.Is(err, fs.ErrNotExist) || isNotDir(err) { + c := &Node{ + SpaceID: spaceID, lu: n.lu, ParentID: n.ID, Name: name, @@ -256,15 +336,11 @@ func (n *Node) Child(ctx context.Context, name string) (*Node, error) { } var c *Node - if strings.HasPrefix(link, "../") { - c, err = ReadNode(ctx, n.lu, filepath.Base(link)) - if err != nil { - return nil, errors.Wrap(err, "could not read child node") - } - c.SpaceRoot = n.SpaceRoot - } else { - return nil, fmt.Errorf("decomposedfs: expected '../ prefix, got' %+v", link) + c, err = ReadNode(ctx, n.lu, spaceID, nodeID) + if err != nil { + return nil, errors.Wrap(err, "could not read child node") } + c.SpaceRoot = n.SpaceRoot return c, nil } @@ -275,12 +351,14 @@ func (n *Node) Parent() (p *Node, err error) { return nil, fmt.Errorf("decomposedfs: root has no parent") } p = &Node{ + SpaceID: n.SpaceID, lu: n.lu, ID: n.ParentID, SpaceRoot: n.SpaceRoot, } - parentPath := n.lu.InternalPath(n.ParentID) + // parentPath := n.lu.InternalPath(spaceID, n.ParentID) + parentPath := p.InternalPath() // lookup parent id in extended attributes if p.ParentID, err = xattrs.Get(parentPath, xattrs.ParentidAttr); err != nil { @@ -301,59 +379,59 @@ func (n *Node) Parent() (p *Node, err error) { return } -// Owner returns the cached owner id or reads it from the extended attributes -// TODO can be private as only the AsResourceInfo uses it -func (n *Node) Owner() (*userpb.UserId, error) { - if n.owner != nil { - return n.owner, nil - } +// Owner returns the space owner +func (n *Node) Owner() *userpb.UserId { + return n.SpaceRoot.owner +} + +// readOwner reads the owner from the extended attributes of the space root +// in case either owner id or owner idp are unset we return an error and an empty owner object +func (n *Node) readOwner() (*userpb.UserId, error) { owner := &userpb.UserId{} - // FIXME ... do we return the owner of the reference or the owner of the target? - // we don't really know the owner of the target ... and as the reference may point anywhere we cannot really find out - // but what are the permissions? all? none? the gateway has to fill in? - // TODO what if this is a reference? - nodePath := n.InternalPath() + rootNodePath := n.SpaceRoot.InternalPath() // lookup parent id in extended attributes var attr string var err error // lookup ID in extended attributes - attr, err = xattrs.Get(nodePath, xattrs.OwnerIDAttr) + attr, err = xattrs.Get(rootNodePath, xattrs.OwnerIDAttr) switch { case err == nil: owner.OpaqueId = attr - case xattrs.IsAttrUnset(err), xattrs.IsNotExist(err): - fallthrough + case xattrs.IsAttrUnset(err): + // ignore default: return nil, err } // lookup IDP in extended attributes - attr, err = xattrs.Get(nodePath, xattrs.OwnerIDPAttr) + attr, err = xattrs.Get(rootNodePath, xattrs.OwnerIDPAttr) switch { case err == nil: owner.Idp = attr - case xattrs.IsAttrUnset(err), xattrs.IsNotExist(err): - fallthrough + case xattrs.IsAttrUnset(err): + // ignore default: return nil, err } // lookup type in extended attributes - attr, err = xattrs.Get(nodePath, xattrs.OwnerTypeAttr) + attr, err = xattrs.Get(rootNodePath, xattrs.OwnerTypeAttr) switch { case err == nil: owner.Type = utils.UserTypeMap(attr) - case xattrs.IsAttrUnset(err), xattrs.IsNotExist(err): - fallthrough + case xattrs.IsAttrUnset(err): + // ignore default: - // TODO the user type defaults to invalid, which is the case - err = nil + return nil, err } - n.owner = owner - return n.owner, err + // owner is an optional property + if owner.Idp == "" && owner.OpaqueId == "" { + return nil, nil + } + return owner, nil } // PermissionSet returns the permission set for the current user @@ -364,7 +442,7 @@ func (n *Node) PermissionSet(ctx context.Context) provider.ResourcePermissions { appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") return NoPermissions() } - if o, _ := n.Owner(); utils.UserEqual(u.Id, o) { + if utils.UserEqual(u.Id, n.SpaceRoot.Owner()) { return OwnerPermissions() } // read the permissions for the current user from the acls of the current node @@ -376,12 +454,17 @@ func (n *Node) PermissionSet(ctx context.Context) provider.ResourcePermissions { // InternalPath returns the internal path of the Node func (n *Node) InternalPath() string { - return n.lu.InternalPath(n.ID) + return n.lu.InternalPath(n.SpaceID, n.ID) +} + +// ParentInternalPath returns the internal path of the parent of the current node +func (n *Node) ParentInternalPath() string { + return n.lu.InternalPath(n.SpaceID, n.ParentID) } // LockFilePath returns the internal path of the lock file of the node func (n *Node) LockFilePath() string { - return n.lu.InternalPath(n.ID) + ".lock" + return n.InternalPath() + ".lock" } // CalculateEtag returns a hash of fileid + tmtime (or mtime) @@ -409,7 +492,7 @@ func calculateEtag(nodeID string, tmTime time.Time) (string, error) { func (n *Node) SetMtime(ctx context.Context, mtime string) error { sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() if mt, err := parseMTime(mtime); err == nil { - nodePath := n.lu.InternalPath(n.ID) + nodePath := n.InternalPath() // updating mtime also updates atime if err := os.Chtimes(nodePath, mt, mt); err != nil { sublog.Error().Err(err). @@ -429,7 +512,7 @@ func (n *Node) SetMtime(ctx context.Context, mtime string) error { // SetEtag sets the temporary etag of a node if it differs from the current etag func (n *Node) SetEtag(ctx context.Context, val string) (err error) { sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - nodePath := n.lu.InternalPath(n.ID) + nodePath := n.InternalPath() var tmTime time.Time if tmTime, err = n.GetTMTime(); err != nil { // no tmtime, use mtime @@ -474,7 +557,7 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) { // obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem // public tags can be mapped to extended attributes func (n *Node) SetFavorite(uid *userpb.UserId, val string) error { - nodePath := n.lu.InternalPath(n.ID) + nodePath := n.InternalPath() // the favorite flag is specific to the user, so we need to incorporate the userid fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) return xattrs.Set(nodePath, fa, val) @@ -485,7 +568,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi sublog := appctx.GetLogger(ctx).With().Interface("node", n.ID).Logger() var fn string - nodePath := n.lu.InternalPath(n.ID) + nodePath := n.InternalPath() var fi os.FileInfo @@ -510,8 +593,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi // nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE } - // TODO ensure we always have a space root - id := &provider.ResourceId{StorageId: n.SpaceRoot.Name, OpaqueId: n.ID} + id := &provider.ResourceId{StorageId: n.SpaceID, OpaqueId: n.ID} if returnBasename { fn = n.Name @@ -530,6 +612,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi Size: uint64(n.Blobsize), Target: target, PermissionSet: rp, + Owner: n.Owner(), } if nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER { @@ -542,10 +625,6 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi } } - if ri.Owner, err = n.Owner(); err != nil { - sublog.Debug().Err(err).Msg("could not determine owner") - } - // TODO make etag of files use fileid and checksum var tmTime time.Time @@ -605,7 +684,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi // read locks if _, ok := mdKeysMap[LockdiscoveryKey]; returnAllKeys || ok { if n.hasLocks(ctx) { - err = readLocksIntoOpaque(ctx, n.LockFilePath(), ri) + err = readLocksIntoOpaque(ctx, n, ri) if err != nil { sublog.Debug().Err(errtypes.InternalError("lockfail")) } @@ -628,19 +707,8 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi } // quota if _, ok := mdKeysMap[QuotaKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER) && returnAllKeys || ok { - var quotaPath string - if n.SpaceRoot == nil { - root, err := n.lu.RootNode(ctx) - if err == nil { - quotaPath = root.InternalPath() - } else { - sublog.Debug().Err(err).Msg("error determining the space root node for quota") - } - } else { - quotaPath = n.SpaceRoot.InternalPath() - } - if quotaPath != "" { - readQuotaIntoOpaque(ctx, quotaPath, ri) + if n.SpaceRoot != nil && n.SpaceRoot.InternalPath() != "" { + readQuotaIntoOpaque(ctx, n.SpaceRoot.InternalPath(), ri) } } @@ -752,7 +820,7 @@ func readQuotaIntoOpaque(ctx context.Context, nodePath string, ri *provider.Reso // HasPropagation checks if the propagation attribute exists and is set to "1" func (n *Node) HasPropagation() (propagation bool) { - if b, err := xattrs.Get(n.lu.InternalPath(n.ID), xattrs.PropagationAttr); err == nil { + if b, err := xattrs.Get(n.InternalPath(), xattrs.PropagationAttr); err == nil { return b == "1" } return false @@ -761,15 +829,53 @@ func (n *Node) HasPropagation() (propagation bool) { // GetTMTime reads the tmtime from the extended attributes func (n *Node) GetTMTime() (tmTime time.Time, err error) { var b string - if b, err = xattrs.Get(n.lu.InternalPath(n.ID), xattrs.TreeMTimeAttr); err != nil { + if b, err = xattrs.Get(n.InternalPath(), xattrs.TreeMTimeAttr); err != nil { return } return time.Parse(time.RFC3339Nano, b) } -// SetTMTime writes the tmtime to the extended attributes -func (n *Node) SetTMTime(t time.Time) (err error) { - return xattrs.Set(n.lu.InternalPath(n.ID), xattrs.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano)) +// SetTMTime writes the UTC tmtime to the extended attributes or removes the attribute if nil is passed +func (n *Node) SetTMTime(t *time.Time) (err error) { + if t == nil { + err = xattrs.Remove(n.InternalPath(), xattrs.TreeMTimeAttr) + if xattrs.IsAttrUnset(err) { + return nil + } + return err + } + return xattrs.Set(n.InternalPath(), xattrs.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano)) +} + +// GetDTime reads the dtime from the extended attributes +func (n *Node) GetDTime() (tmTime time.Time, err error) { + var b string + if b, err = xattrs.Get(n.InternalPath(), xattrs.DTimeAttr); err != nil { + return + } + return time.Parse(time.RFC3339Nano, b) +} + +// SetDTime writes the UTC dtime to the extended attributes or removes the attribute if nil is passed +func (n *Node) SetDTime(t *time.Time) (err error) { + if t == nil { + err = xattrs.Remove(n.InternalPath(), xattrs.DTimeAttr) + if xattrs.IsAttrUnset(err) { + return nil + } + return err + } + return xattrs.Set(n.InternalPath(), xattrs.DTimeAttr, t.UTC().Format(time.RFC3339Nano)) +} + +// IsDisabled returns true when the node has a dmtime attribute set +// only used to check if a space is disabled +// FIXME confusing with the trash logic +func (n *Node) IsDisabled() bool { + if _, err := n.GetDTime(); err == nil { + return true + } + return false } // GetTreeSize reads the treesize from the extended attributes @@ -793,12 +899,9 @@ func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) { // UnsetTempEtag removes the temporary etag attribute func (n *Node) UnsetTempEtag() (err error) { - if err = xattr.Remove(n.lu.InternalPath(n.ID), xattrs.TmpEtagAttr); err != nil { - if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - return nil - } + err = xattrs.Remove(n.InternalPath(), xattrs.TmpEtagAttr) + if xattrs.IsAttrUnset(err) { + return nil } return err } @@ -806,20 +909,7 @@ func (n *Node) UnsetTempEtag() (err error) { // ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap provider.ResourcePermissions, err error) { // check if the current user is the owner - o, err := n.Owner() - if err != nil { - // TODO check if a parent folder has the owner set? - appctx.GetLogger(ctx).Error().Err(err).Str("node", n.ID).Msg("could not determine owner, returning default permissions") - return NoPermissions(), err - } - if o.OpaqueId == "" { - // this happens for root nodes and project spaces in the storage. the extended attributes are set to emptystring to indicate: no owner - // for project spaces we need to go over the grants and check the granted permissions - if n.ID == RootID { - return NoOwnerPermissions(), nil - } - } - if utils.UserEqual(u.Id, o) { + if utils.UserEqual(u.Id, n.Owner()) { appctx.GetLogger(ctx).Debug().Str("node", n.ID).Msg("user is owner, returning owner permissions") return OwnerPermissions(), nil } diff --git a/pkg/storage/utils/decomposedfs/node/node_test.go b/pkg/storage/utils/decomposedfs/node/node_test.go index d1db620247..5b1582f5be 100644 --- a/pkg/storage/utils/decomposedfs/node/node_test.go +++ b/pkg/storage/utils/decomposedfs/node/node_test.go @@ -22,7 +22,6 @@ import ( "encoding/json" "time" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" @@ -56,8 +55,8 @@ var _ = Describe("Node", func() { Describe("New", func() { It("generates unique blob ids if none are given", func() { - n1 := node.New(id, "", name, 10, "", env.Owner.Id, env.Lookup) - n2 := node.New(id, "", name, 10, "", env.Owner.Id, env.Lookup) + n1 := node.New(env.SpaceRootRes.StorageId, id, "", name, 10, "", env.Owner.Id, env.Lookup) + n2 := node.New(env.SpaceRootRes.StorageId, id, "", name, 10, "", env.Owner.Id, env.Lookup) Expect(len(n1.BlobID)).To(Equal(36)) Expect(n1.BlobID).ToNot(Equal(n2.BlobID)) @@ -72,7 +71,7 @@ var _ = Describe("Node", func() { }) Expect(err).ToNot(HaveOccurred()) - n, err := node.ReadNode(env.Ctx, env.Lookup, lookupNode.ID) + n, err := node.ReadNode(env.Ctx, env.Lookup, lookupNode.SpaceID, lookupNode.ID) Expect(err).ToNot(HaveOccurred()) Expect(n.BlobID).To(Equal("file1-blobid")) }) @@ -91,13 +90,8 @@ var _ = Describe("Node", func() { n.Name = "TestName" n.BlobID = "TestBlobID" n.Blobsize = int64(blobsize) - owner := &userpb.UserId{ - Idp: "testidp", - OpaqueId: "testuserid", - Type: userpb.UserType_USER_TYPE_PRIMARY, - } - err = n.WriteAllNodeMetadata(owner) + err = n.WriteAllNodeMetadata() Expect(err).ToNot(HaveOccurred()) n2, err := env.Lookup.NodeFromResource(env.Ctx, ref) Expect(err).ToNot(HaveOccurred()) @@ -202,7 +196,8 @@ var _ = Describe("Node", func() { Expect(len(ri.Etag)).To(Equal(34)) before := ri.Etag - Expect(n.SetTMTime(time.Now().UTC())).To(Succeed()) + tmtime := time.Now() + Expect(n.SetTMTime(&tmtime)).To(Succeed()) ri, err = n.AsResourceInfo(env.Ctx, &perms, []string{}, false) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/storage/utils/decomposedfs/node/permissions.go b/pkg/storage/utils/decomposedfs/node/permissions.go index ec0f8c8789..fdc31e8054 100644 --- a/pkg/storage/utils/decomposedfs/node/permissions.go +++ b/pkg/storage/utils/decomposedfs/node/permissions.go @@ -100,21 +100,9 @@ func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap prov appctx.GetLogger(ctx).Debug().Interface("node", n.ID).Msg("no user in context, returning default permissions") return NoPermissions(), nil } + // check if the current user is the owner - o, err := n.Owner() - if err != nil { - // TODO check if a parent folder has the owner set? - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n.ID).Msg("could not determine owner, returning default permissions") - return NoPermissions(), err - } - if o.OpaqueId == "" { - // this happens for root nodes and project spaces in the storage. the extended attributes are set to emptystring to indicate: no owner - // for project spaces we need to go over the grants and check the granted permissions - if n.ID == RootID { - return NoOwnerPermissions(), nil - } - } - if utils.UserEqual(u.Id, o) { + if utils.UserEqual(u.Id, n.Owner()) { lp, err := n.lu.Path(ctx, n) if err == nil && lp == n.lu.ShareFolder() { return ShareFolderPermissions(), nil @@ -123,10 +111,8 @@ func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap prov return OwnerPermissions(), nil } // determine root - var rn *Node - if rn, err = p.lu.RootNode(ctx); err != nil { - return NoPermissions(), err - } + + rn := n.SpaceRoot cn := n @@ -152,6 +138,13 @@ func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap prov } } + // for the root node + if np, err := cn.ReadUserPermissions(ctx, u); err == nil { + AddPermissions(&ap, &np) + } else { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn.ID).Msg("error reading root node permissions") + } + appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n.ID).Interface("user", u).Msg("returning agregated permissions") return ap, nil } @@ -189,9 +182,11 @@ func (p *Permissions) HasPermission(ctx context.Context, n *Node, check func(*pr } // determine root - if err = n.FindStorageSpaceRoot(); err != nil { - return false, err - } + /* + if err = n.FindStorageSpaceRoot(); err != nil { + return false, err + } + */ // for an efficient group lookup convert the list of groups to a map // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! @@ -271,22 +266,7 @@ func (p *Permissions) getUserAndPermissions(ctx context.Context, n *Node) (*user return nil, &perms } // check if the current user is the owner - o, err := n.Owner() - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n.ID).Msg("could not determine owner, returning default permissions") - perms := NoPermissions() - return nil, &perms - } - if o.OpaqueId == "" { - // this happens for root nodes and project spaces in the storage. the extended attributes are set to emptystring to indicate: no owner - // for project spaces we need to go over the grants and check the granted permissions - if n.ID != RootID { - return u, nil - } - perms := NoOwnerPermissions() - return nil, &perms - } - if utils.UserEqual(u.Id, o) { + if utils.UserEqual(u.Id, n.Owner()) { appctx.GetLogger(ctx).Debug().Str("node", n.ID).Msg("user is owner, returning owner permissions") perms := OwnerPermissions() return u, &perms diff --git a/pkg/storage/utils/decomposedfs/options/options.go b/pkg/storage/utils/decomposedfs/options/options.go index 846cc03913..765c0c0c53 100644 --- a/pkg/storage/utils/decomposedfs/options/options.go +++ b/pkg/storage/utils/decomposedfs/options/options.go @@ -46,12 +46,8 @@ type Options struct { // propagate size changes as treesize TreeSizeAccounting bool `mapstructure:"treesize_accounting"` - // set an owner for the root node - Owner string `mapstructure:"owner"` - OwnerIDP string `mapstructure:"owner_idp"` - OwnerType string `mapstructure:"owner_type"` - - GatewayAddr string `mapstructure:"gateway_addr"` + // permissions service to use when checking permissions + PermissionsSVC string `mapstructure:"permissionssvc"` } // New returns a new Options instance for the given configuration diff --git a/pkg/storage/utils/decomposedfs/recycle.go b/pkg/storage/utils/decomposedfs/recycle.go index 4670e8281b..fbf5b093b7 100644 --- a/pkg/storage/utils/decomposedfs/recycle.go +++ b/pkg/storage/utils/decomposedfs/recycle.go @@ -20,8 +20,8 @@ package decomposedfs import ( "context" + iofs "io/fs" "os" - "path" "path/filepath" "strings" "time" @@ -30,6 +30,7 @@ import ( types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/pkg/errors" @@ -47,13 +48,13 @@ import ( // ListRecycle returns the list of available recycle items // ref -> the space (= resourceid), key -> deleted node id, relativePath = relative to key func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference, key, relativePath string) ([]*provider.RecycleItem, error) { - log := appctx.GetLogger(ctx) - - items := make([]*provider.RecycleItem, 0) if ref == nil || ref.ResourceId == nil || ref.ResourceId.OpaqueId == "" { - return items, errtypes.BadRequest("spaceid required") + return nil, errtypes.BadRequest("spaceid required") } + spaceID := ref.ResourceId.OpaqueId + + sublog := appctx.GetLogger(ctx).With().Str("space", spaceID).Str("key", key).Str("relative_path", relativePath).Logger() // check permissions trashnode, err := fs.lu.NodeFromSpaceID(ctx, ref.ResourceId) @@ -70,92 +71,94 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference return nil, errtypes.PermissionDenied(key) } - spaceID := ref.ResourceId.OpaqueId if key == "" && relativePath == "/" { return fs.listTrashRoot(ctx, spaceID) } - trashRoot := fs.getRecycleRoot(ctx, spaceID) - f, err := os.Open(filepath.Join(trashRoot, key, relativePath)) + // build a list of trash items relative to the given trash root and path + items := make([]*provider.RecycleItem, 0) + + trashRootPath := filepath.Join(fs.getRecycleRoot(ctx, spaceID), lookup.Pathify(key, 4, 2)) + _, timeSuffix, err := readTrashLink(trashRootPath) if err != nil { - if os.IsNotExist(err) { - return items, nil + sublog.Error().Err(err).Str("trashRoot", trashRootPath).Msg("error reading trash link") + return nil, err + } + + origin := "" + // lookup origin path in extended attributes + if attrBytes, err := xattr.Get(trashRootPath, xattrs.TrashOriginAttr); err == nil { + origin = string(attrBytes) + } else { + sublog.Error().Err(err).Str("space", spaceID).Msg("could not read origin path, skipping") + return nil, err + } + + // all deleted items have the same deletion time + var deletionTime *types.Timestamp + if parsed, err := time.Parse(time.RFC3339Nano, timeSuffix); err == nil { + deletionTime = &types.Timestamp{ + Seconds: uint64(parsed.Unix()), + // TODO nanos } - return nil, errors.Wrapf(err, "tree: error listing %s", trashRoot) + } else { + sublog.Error().Err(err).Msg("could not parse time format, ignoring") } - defer f.Close() - parentNode, err := os.Readlink(filepath.Join(trashRoot, key)) + trashItemPath := filepath.Join(trashRootPath, relativePath) + + f, err := os.Open(trashItemPath) if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Msg("error reading trash link, skipping") - return nil, err + if errors.Is(err, iofs.ErrNotExist) { + return items, nil + } + return nil, errors.Wrapf(err, "recycle: error opening trashItemPath %s", trashItemPath) } + defer f.Close() if md, err := f.Stat(); err != nil { return nil, err } else if !md.IsDir() { // this is the case when we want to directly list a file in the trashbin - item, err := fs.createTrashItem(ctx, parentNode, filepath.Dir(relativePath), filepath.Join(trashRoot, key, relativePath)) + item, err := fs.createTrashItem(ctx, md, filepath.Join(key, relativePath), deletionTime) if err != nil { return items, err } + item.Ref = &provider.Reference{ + Path: filepath.Join(origin, relativePath), + } items = append(items, item) return items, err } + // we have to read the names and stat the path to follow the symlinks names, err := f.Readdirnames(0) if err != nil { return nil, err } - for i := range names { - if item, err := fs.createTrashItem(ctx, parentNode, relativePath, filepath.Join(trashRoot, key, relativePath, names[i])); err == nil { + for _, name := range names { + md, err := os.Stat(filepath.Join(trashItemPath, name)) + if err != nil { + sublog.Error().Err(err).Str("name", name).Msg("could not stat, skipping") + continue + } + if item, err := fs.createTrashItem(ctx, md, filepath.Join(key, relativePath, name), deletionTime); err == nil { + item.Ref = &provider.Reference{ + Path: filepath.Join(origin, relativePath, name), + } items = append(items, item) } } return items, nil } -func (fs *Decomposedfs) createTrashItem(ctx context.Context, parentNode, intermediatePath, itemPath string) (*provider.RecycleItem, error) { - log := appctx.GetLogger(ctx) - trashnode, err := os.Readlink(itemPath) - if err != nil { - log.Error().Err(err).Msg("error reading trash link, skipping") - return nil, err - } - parts := strings.SplitN(filepath.Base(parentNode), node.TrashIDDelimiter, 2) - if len(parts) != 2 { - log.Error().Str("trashnode", trashnode).Interface("parts", parts).Msg("malformed trash link, skipping") - return nil, errors.New("malformed trash link") - } - - nodePath := fs.lu.InternalPath(filepath.Base(trashnode)) - md, err := os.Stat(nodePath) - if err != nil { - log.Error().Err(err).Str("trashnode", trashnode).Msg("could not stat trash item, skipping") - return nil, err - } +func (fs *Decomposedfs) createTrashItem(ctx context.Context, md iofs.FileInfo, key string, deletionTime *types.Timestamp) (*provider.RecycleItem, error) { item := &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Size: uint64(md.Size()), - Key: path.Join(parts[0], intermediatePath, filepath.Base(itemPath)), - } - if deletionTime, err := time.Parse(time.RFC3339Nano, parts[1]); err == nil { - item.DeletionTime = &types.Timestamp{ - Seconds: uint64(deletionTime.Unix()), - // TODO nanos - } - } else { - log.Error().Err(err).Str("link", trashnode).Interface("parts", parts).Msg("could parse time format, ignoring") - } - - // lookup origin path in extended attributes - parentPath := fs.lu.InternalPath(filepath.Base(parentNode)) - if attrBytes, err := xattr.Get(parentPath, xattrs.TrashOriginAttr); err == nil { - item.Ref = &provider.Reference{Path: filepath.Join(string(attrBytes), intermediatePath, filepath.Base(itemPath))} - } else { - log.Error().Err(err).Str("link", trashnode).Msg("could not read origin path, skipping") - return nil, err + Type: getResourceType(md.IsDir()), + Size: uint64(md.Size()), + Key: key, + DeletionTime: deletionTime, } // TODO filter results by permission ... on the original parent? or the trashed node? @@ -166,56 +169,58 @@ func (fs *Decomposedfs) createTrashItem(ctx context.Context, parentNode, interme return item, nil } +// readTrashLink returns nodeID and timestamp +func readTrashLink(path string) (string, string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", "", err + } + // ../../../../../nodes/e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z + // TODO use filepath.Separator to support windows + link = strings.ReplaceAll(link, "/", "") + // ..........nodese56c75a8-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z + if link[0:15] != "..........nodes" || link[51:54] != node.TrashIDDelimiter { + return "", "", errtypes.InternalError("malformed trash link") + } + return link[15:51], link[54:], nil +} + func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*provider.RecycleItem, error) { log := appctx.GetLogger(ctx) items := make([]*provider.RecycleItem, 0) trashRoot := fs.getRecycleRoot(ctx, spaceID) - f, err := os.Open(trashRoot) - if err != nil { - if os.IsNotExist(err) { - return items, nil - } - return nil, errors.Wrap(err, "tree: error listing "+trashRoot) - } - defer f.Close() - - names, err := f.Readdirnames(0) + matches, err := filepath.Glob(trashRoot + "/*/*/*/*/*") if err != nil { return nil, err } - for i := range names { - trashnode, err := os.Readlink(filepath.Join(trashRoot, names[i])) + for _, itemPath := range matches { + nodeID, timeSuffix, err := readTrashLink(itemPath) if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Msg("error reading trash link, skipping") - continue - } - parts := strings.SplitN(filepath.Base(trashnode), node.TrashIDDelimiter, 2) - if len(parts) != 2 { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode).Interface("parts", parts).Msg("malformed trash link, skipping") + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Msg("error reading trash link, skipping") continue } - nodePath := fs.lu.InternalPath(filepath.Base(trashnode)) + nodePath := fs.lu.InternalPath(spaceID, nodeID) + node.TrashIDDelimiter + timeSuffix md, err := os.Stat(nodePath) if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode). /*.Interface("parts", parts)*/ Msg("could not stat trash item, skipping") + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not stat trash item, skipping") continue } item := &provider.RecycleItem{ Type: getResourceType(md.IsDir()), Size: uint64(md.Size()), - Key: parts[0], + Key: nodeID, } - if deletionTime, err := time.Parse(time.RFC3339Nano, parts[1]); err == nil { + if deletionTime, err := time.Parse(time.RFC3339Nano, timeSuffix); err == nil { item.DeletionTime = &types.Timestamp{ Seconds: uint64(deletionTime.Unix()), // TODO nanos } } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Interface("parts", parts).Msg("could parse time format, ignoring") + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node", nodeID).Str("dtime", timeSuffix).Msg("could not parse time format, ignoring") } // lookup origin path in extended attributes @@ -223,7 +228,7 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p if attrBytes, err = xattr.Get(nodePath, xattrs.TrashOriginAttr); err == nil { item.Ref = &provider.Reference{Path: string(attrBytes)} } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("could not read origin path, skipping") + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node", nodeID).Str("dtime", timeSuffix).Msg("could not read origin path, skipping") continue } // TODO filter results by permission ... on the original parent? or the trashed node? @@ -326,5 +331,5 @@ func getResourceType(isDir bool) provider.ResourceType { } func (fs *Decomposedfs) getRecycleRoot(ctx context.Context, spaceID string) string { - return filepath.Join(fs.o.Root, "trash", spaceID) + return filepath.Join(fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2), "trash") } diff --git a/pkg/storage/utils/decomposedfs/recycle_test.go b/pkg/storage/utils/decomposedfs/recycle_test.go index eb71af14a2..e341ee89ee 100644 --- a/pkg/storage/utils/decomposedfs/recycle_test.go +++ b/pkg/storage/utils/decomposedfs/recycle_test.go @@ -47,8 +47,8 @@ var _ = Describe("Recycle", func() { When("a user deletes files from the same space", func() { BeforeEach(func() { - // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + // in this scenario user "u-s-e-r-id" has this permissions: + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ InitiateFileUpload: true, Delete: true, ListRecycle: true, @@ -132,8 +132,8 @@ var _ = Describe("Recycle", func() { Username: "anotherusername", }) - // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + // in this scenario user "u-s-e-r-id" has this permissions: + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ InitiateFileUpload: true, Delete: true, ListRecycle: true, @@ -240,12 +240,13 @@ var _ = Describe("Recycle", func() { When("a user deletes files from different spaces", func() { BeforeEach(func() { var err error + env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Times(1) // Permissions required for setup below (AddGrant) projectID, err = env.CreateTestStorageSpace("project", &provider.Quota{QuotaMaxBytes: 2000}) Expect(err).ToNot(HaveOccurred()) Expect(projectID).ToNot(BeNil()) - // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + // in this scenario user "u-s-e-r-id" has this permissions: + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ InitiateFileUpload: true, Delete: true, ListRecycle: true, @@ -289,7 +290,7 @@ var _ = Describe("Recycle", func() { Expect(len(items)).To(Equal(1)) // use up 2000 byte quota - _, err = env.CreateTestFile("largefile", "largefile-blobid", 2000, projectID.OpaqueId) + _, err = env.CreateTestFile("largefile", "largefile-blobid", projectID.OpaqueId, projectID.StorageId, 2000) Expect(err).ToNot(HaveOccurred()) err = env.Fs.RestoreRecycleItem(env.Ctx, &provider.Reference{ResourceId: projectID}, items[0].Key, "/", nil) @@ -316,8 +317,8 @@ var _ = Describe("Recycle", func() { Username: "readusername", }) - // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + // in this scenario user "u-s-e-r-id" has this permissions: + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ Delete: true, ListRecycle: true, PurgeRecycle: true, @@ -395,14 +396,14 @@ var _ = Describe("Recycle", func() { ctx = ctxpkg.ContextSetUser(context.Background(), &userpb.User{ Id: &userpb.UserId{ Idp: "maliciousidp", - OpaqueId: "hacker", + OpaqueId: "h-a-c-k-er", Type: userpb.UserType_USER_TYPE_PRIMARY, }, Username: "mrhacker", }) // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ Delete: true, ListRecycle: true, PurgeRecycle: true, @@ -410,7 +411,7 @@ var _ = Describe("Recycle", func() { }) // and user "hacker" has no permissions: - registerPermissions(env.Permissions, "hacker", &provider.ResourcePermissions{}) + registerPermissions(env.Permissions, "h-a-c-k-er", &provider.ResourcePermissions{}) }) It("cannot delete, list, purge or restore", func() { diff --git a/pkg/storage/utils/decomposedfs/revisions.go b/pkg/storage/utils/decomposedfs/revisions.go index 8e50b8fcc2..91cf568e06 100644 --- a/pkg/storage/utils/decomposedfs/revisions.go +++ b/pkg/storage/utils/decomposedfs/revisions.go @@ -21,6 +21,7 @@ package decomposedfs import ( "context" "io" + iofs "io/fs" "os" "path/filepath" "strings" @@ -64,12 +65,17 @@ func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Referen revisions = []*provider.FileVersion{} np := n.InternalPath() - if items, err := filepath.Glob(np + ".REV.*"); err == nil { + if items, err := filepath.Glob(np + node.RevisionIDDelimiter + "*"); err == nil { for i := range items { if fi, err := os.Stat(items[i]); err == nil { + parts := strings.SplitN(fi.Name(), node.RevisionIDDelimiter, 2) + if len(parts) != 2 { + appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("invalid revision name, skipping") + continue + } mtime := fi.ModTime() rev := &provider.FileVersion{ - Key: filepath.Base(items[i]), + Key: n.ID + node.RevisionIDDelimiter + parts[1], Mtime: uint64(mtime.Unix()), } blobSize, err := node.ReadBlobSizeAttr(items[i]) @@ -94,15 +100,16 @@ func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Refe log := appctx.GetLogger(ctx) // verify revision key format - kp := strings.SplitN(revisionKey, ".REV.", 2) + kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) if len(kp) != 2 { log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") return nil, errtypes.NotFound(revisionKey) } log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") + spaceID := ref.ResourceId.OpaqueId // check if the node is available and has not been deleted - n, err := node.ReadNode(ctx, fs.lu, kp[0]) + n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0]) if err != nil { return nil, err } @@ -122,11 +129,11 @@ func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Refe return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) } - contentPath := fs.lu.InternalPath(revisionKey) + contentPath := fs.lu.InternalPath(spaceID, revisionKey) r, err := os.Open(contentPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, iofs.ErrNotExist) { return nil, errtypes.NotFound(contentPath) } return nil, errors.Wrap(err, "Decomposedfs: error opening revision "+revisionKey) @@ -139,14 +146,15 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer log := appctx.GetLogger(ctx) // verify revision key format - kp := strings.SplitN(revisionKey, ".REV.", 2) + kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) if len(kp) != 2 { log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") return errtypes.NotFound(revisionKey) } + spaceID := ref.ResourceId.StorageId // check if the node is available and has not been deleted - n, err := node.ReadNode(ctx, fs.lu, kp[0]) + n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0]) if err != nil { return err } @@ -171,11 +179,11 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer } // move current version to new revision - nodePath := fs.lu.InternalPath(kp[0]) + nodePath := fs.lu.InternalPath(spaceID, kp[0]) var fi os.FileInfo if fi, err = os.Stat(nodePath); err == nil { // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - versionsPath := fs.lu.InternalPath(kp[0] + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) + versionsPath := fs.lu.InternalPath(spaceID, kp[0]+node.RevisionIDDelimiter+fi.ModTime().UTC().Format(time.RFC3339Nano)) err = os.Rename(nodePath, versionsPath) if err != nil { @@ -184,7 +192,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer // copy old revision to current location - revisionPath := fs.lu.InternalPath(revisionKey) + revisionPath := fs.lu.InternalPath(spaceID, revisionKey) if err = os.Rename(revisionPath, nodePath); err != nil { return diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go index 4fa9aab127..6dcc3cc0bb 100644 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ b/pkg/storage/utils/decomposedfs/spaces.go @@ -30,7 +30,7 @@ import ( "time" userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - permissionsv1beta1 "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" @@ -39,29 +39,25 @@ import ( ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/rgrpc/status" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/utils" "github.com/cs3org/reva/pkg/utils/resourceid" "github.com/google/uuid" + "github.com/pkg/errors" ) const ( spaceTypePersonal = "personal" - spaceTypeProject = "project" - spaceTypeShare = "share" - spaceTypeAny = "*" - spaceIDAny = "*" + // spaceTypeProject = "project" + spaceTypeShare = "share" + spaceTypeAny = "*" + spaceIDAny = "*" ) // CreateStorageSpace creates a storage space func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { - // spaces will be located by default in the root of the storage. - r, err := fs.lu.RootNode(ctx) - if err != nil { - return nil, err - } // "everything is a resource" this is the unique ID for the Space resource. spaceID := uuid.New().String() @@ -81,86 +77,85 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr // TODO enforce a uuid? // TODO clarify if we want to enforce a single personal storage space or if we want to allow sending the spaceid if req.Type == spaceTypePersonal { - spaceID = req.Owner.Id.OpaqueId - } - - n, err := r.Child(ctx, spaceID) - if err != nil { - return nil, err + spaceID = req.GetOwner().GetId().GetOpaqueId() } - if n.Exists { + root, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID) + if err == nil && root.Exists { return nil, errtypes.AlreadyExists("decomposedfs: spaces: space already exists") } - // spaceid and nodeid must be the same - // TODO enforce a uuid? - n.ID = spaceID - - if err := fs.tp.CreateDir(ctx, n); err != nil { - return nil, err + if !fs.canCreateSpace(ctx, spaceID) { + return nil, errtypes.PermissionDenied(spaceID) } - // always enable propagation on the storage space root - // mark the space root node as the end of propagation - if err = n.SetMetadata(xattrs.PropagationAttr, "1"); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") - return nil, err - } - - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return nil, fmt.Errorf("decomposedfs: spaces: contextual user not found") + // create a directory node + rootPath := root.InternalPath() + if err = os.MkdirAll(rootPath, 0700); err != nil { + return nil, errors.Wrap(err, "decomposedfs: error creating node") } - ownerID := u.Id - if req.Type == spaceTypeProject { - ownerID = &userv1beta1.UserId{} - } - - if err := n.ChangeOwner(ownerID); err != nil { + if err := root.WriteAllNodeMetadata(); err != nil { return nil, err } + if req.GetOwner() != nil && req.GetOwner().GetId() != nil { + if err := root.WriteOwner(req.GetOwner().GetId()); err != nil { + return nil, err + } + } - err = fs.createStorageSpace(ctx, req.Type, n.ID) + err = fs.linkStorageSpaceType(ctx, req.Type, root.ID) if err != nil { return nil, err } metadata := make(map[string]string, 3) + + // always enable propagation on the storage space root + // mark the space root node as the end of propagation + metadata[xattrs.PropagationAttr] = "1" + metadata[xattrs.SpaceNameAttr] = req.Name + + if req.Type != "" { + metadata[xattrs.SpaceTypeAttr] = req.Type + } + if q := req.GetQuota(); q != nil { // set default space quota metadata[xattrs.QuotaAttr] = strconv.FormatUint(q.QuotaMaxBytes, 10) } - metadata[xattrs.SpaceNameAttr] = req.Name if description != "" { metadata[xattrs.SpaceDescriptionAttr] = description } - if err := xattrs.SetMultiple(n.InternalPath(), metadata); err != nil { + + if err := xattrs.SetMultiple(root.InternalPath(), metadata); err != nil { return nil, err } ctx = context.WithValue(ctx, utils.SpaceGrant, struct{}{}) - if err := fs.AddGrant(ctx, &provider.Reference{ - ResourceId: &provider.ResourceId{ - StorageId: spaceID, - OpaqueId: spaceID, - }, - }, &provider.Grant{ - Grantee: &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{ - UserId: u.Id, + if req.Type != spaceTypePersonal { + u := ctxpkg.ContextMustGetUser(ctx) + if err := fs.AddGrant(ctx, &provider.Reference{ + ResourceId: &provider.ResourceId{ + StorageId: spaceID, + OpaqueId: spaceID, }, - }, - Permissions: ocsconv.NewManagerRole().CS3ResourcePermissions(), - }); err != nil { - return nil, err + }, &provider.Grant{ + Grantee: &provider.Grantee{ + Type: provider.GranteeType_GRANTEE_TYPE_USER, + Id: &provider.Grantee_UserId{ + UserId: u.Id, + }, + }, + Permissions: ocsconv.NewManagerRole().CS3ResourcePermissions(), + }); err != nil { + return nil, err + } } - space, err := fs.storageSpaceFromNode(ctx, n, "*", n.InternalPath(), false) + space, err := fs.storageSpaceFromNode(ctx, root, root.InternalPath(), false) if err != nil { return nil, err } @@ -174,6 +169,63 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr return resp, nil } +func (fs *Decomposedfs) canListAllSpaces(ctx context.Context) bool { + user := ctxpkg.ContextMustGetUser(ctx) + checkRes, err := fs.permissionsClient.CheckPermission(ctx, &cs3permissions.CheckPermissionRequest{ + Permission: "list-all-spaces", + SubjectRef: &cs3permissions.SubjectReference{ + Spec: &cs3permissions.SubjectReference_UserId{ + UserId: user.Id, + }, + }, + }) + if err != nil { + return false + } + + return checkRes.Status.Code == v1beta11.Code_CODE_OK +} + +// returns true when the user in the context can create a space / resource with storageID and nodeID set to his user opaqueID +func (fs *Decomposedfs) canCreateSpace(ctx context.Context, spaceID string) bool { + user := ctxpkg.ContextMustGetUser(ctx) + checkRes, err := fs.permissionsClient.CheckPermission(ctx, &cs3permissions.CheckPermissionRequest{ + Permission: "create-space", + SubjectRef: &cs3permissions.SubjectReference{ + Spec: &cs3permissions.SubjectReference_UserId{ + UserId: user.Id, + }, + }, + Ref: &provider.Reference{ + ResourceId: &provider.ResourceId{ + StorageId: spaceID, + // OpaqueId is the same, no need to transfer it + }, + }, + }) + if err != nil { + return false + } + + return checkRes.Status.Code == v1beta11.Code_CODE_OK +} + +func readSpaceAndNodeFromSpaceTypeLink(path string) (string, string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", "", err + } + // ../../spaces/4c/510ada-c86b-4815-8820-42cdf82c3d51/nodes/4c/51/0a/da/-c86b-4815-8820-42cdf82c3d51 + // ../../spaces/4c/510ada-c86b-4815-8820-42cdf82c3d51/nodes/4c/51/0a/da/-c86b-4815-8820-42cdf82c3d51.T.2022-02-24T12:35:18.196484592Z + // TODO use filepath.Separator to support windows + link = strings.ReplaceAll(link, "/", "") + // ....spaces4c510ada-c86b-4815-8820-42cdf82c3d51nodes4c510ada-c86b-4815-8820-42cdf82c3d51 + if link[0:10] != "....spaces" || link[46:51] != "nodes" { + return "", "", errtypes.InternalError("malformed link") + } + return link[10:46], link[51:], nil +} + // ListStorageSpaces returns a list of StorageSpaces. // The list can be filtered by space type or space id. // Spaces are persisted with symlinks in /spaces// pointing to ../../nodes/, the root node of the space @@ -215,18 +267,45 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide } } if len(spaceTypes) == 0 { - spaceTypes = []string{"*"} + spaceTypes = []string{spaceTypeAny} } + canListAllSpaces := fs.canListAllSpaces(ctx) + spaces := []*provider.StorageSpace{} // build the glob path, eg. // /path/to/root/spaces/{spaceType}/{spaceId} // /path/to/root/spaces/personal/nodeid // /path/to/root/spaces/shared/nodeid + if spaceID != spaceIDAny && nodeID != spaceIDAny { + // try directly reading the node + n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("id", nodeID).Msg("could not read node") + return nil, err + } + if !n.Exists { + // return empty list + return spaces, nil + } + space, err := fs.storageSpaceFromNode(ctx, n, n.InternalPath(), canListAllSpaces) + if err != nil { + return nil, err + } + // filter space types + for _, spaceType := range spaceTypes { + if spaceType == spaceTypeAny || spaceType == space.SpaceType { + spaces = append(spaces, space) + } + } + + return spaces, nil + } + matches := []string{} for _, spaceType := range spaceTypes { - path := filepath.Join(fs.o.Root, "spaces", spaceType, nodeID) + path := filepath.Join(fs.o.Root, "spacetypes", spaceType, nodeID) m, err := filepath.Glob(path) if err != nil { return nil, err @@ -246,41 +325,23 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide // the personal spaces must also use the nodeid and not the name numShares := 0 - client, err := pool.GetGatewayServiceClient(fs.o.GatewayAddr) - if err != nil { - return nil, err - } - - user := ctxpkg.ContextMustGetUser(ctx) - checkRes, err := client.CheckPermission(ctx, &permissionsv1beta1.CheckPermissionRequest{ - Permission: "list-all-spaces", - SubjectRef: &permissionsv1beta1.SubjectReference{ - Spec: &permissionsv1beta1.SubjectReference_UserId{ - UserId: user.Id, - }, - }, - }) - if err != nil { - return nil, err - } - - canListAllSpaces := false - if checkRes.Status.Code == v1beta11.Code_CODE_OK { - canListAllSpaces = true - } for i := range matches { - var target string var err error // always read link in case storage space id != node id - if target, err = os.Readlink(matches[i]); err != nil { + spaceID, nodeID, err = readSpaceAndNodeFromSpaceTypeLink(matches[i]) + if err != nil { appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[i]).Msg("could not read link, skipping") continue } - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("id", filepath.Base(target)).Msg("could not read node, skipping") + appctx.GetLogger(ctx).Error().Err(err).Str("id", nodeID).Msg("could not read node, skipping") + continue + } + + if !n.Exists { continue } @@ -294,7 +355,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide } // TODO apply more filters - space, err := fs.storageSpaceFromNode(ctx, n, spaceType, matches[i], canListAllSpaces) + space, err := fs.storageSpaceFromNode(ctx, n, matches[i], canListAllSpaces) if err != nil { if _, ok := err.(errtypes.IsPermissionDenied); !ok { appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not convert to storage space") @@ -307,13 +368,12 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide // if there are no matches (or they happened to be spaces for the owner) and the node is a child return a space if len(matches) <= numShares && nodeID != spaceID { // try node id - target := filepath.Join(fs.o.Root, "nodes", nodeID) - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID) if err != nil { return nil, err } if n.Exists { - space, err := fs.storageSpaceFromNode(ctx, n, "*", n.InternalPath(), canListAllSpaces) + space, err := fs.storageSpaceFromNode(ctx, n, n.InternalPath(), canListAllSpaces) if err != nil { return nil, err } @@ -333,66 +393,17 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up } space := req.StorageSpace - _, spaceID, _ := utils.SplitStorageSpaceID(space.Id.OpaqueId) - - if restore { - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, spaceID)) - if err != nil { - return nil, err - } - - if len(matches) != 1 { - return &provider.UpdateStorageSpaceResponse{ - Status: &v1beta11.Status{ - Code: v1beta11.Code_CODE_NOT_FOUND, - Message: fmt.Sprintf("restoring space failed: found %d matching spaces", len(matches)), - }, - }, nil - - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } + spaceID, _, _ := utils.SplitStorageSpaceID(space.Id.OpaqueId) - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) - if err != nil { - return nil, err - } - - newnode := *n - newnode.Name = strings.Split(n.Name, node.TrashIDDelimiter)[0] - newnode.Exists = false - - err = fs.tp.Move(ctx, n, &newnode) - if err != nil { - return nil, err - } - } - - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, spaceID)) + node, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID) if err != nil { return nil, err } - if len(matches) != 1 { - return &provider.UpdateStorageSpaceResponse{ - Status: &v1beta11.Status{ - Code: v1beta11.Code_CODE_NOT_FOUND, - Message: fmt.Sprintf("update space failed: found %d matching spaces", len(matches)), - }, - }, nil - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } - - node, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) - if err != nil { - return nil, err + if restore { + if err := node.SetDTime(nil); err != nil { + return nil, err + } } u, ok := ctxpkg.ContextGetUser(ctx) @@ -461,7 +472,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up } // send back the updated data from the storage - updatedSpace, err := fs.storageSpaceFromNode(ctx, node, "*", node.InternalPath(), false) + updatedSpace, err := fs.storageSpaceFromNode(ctx, node, node.InternalPath(), false) if err != nil { return nil, err } @@ -482,91 +493,49 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De spaceID := req.Id.OpaqueId - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, spaceID)) - if err != nil { - return err - } - - if len(matches) != 1 { - return fmt.Errorf("delete space failed: found %d matching spaces", len(matches)) - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } - - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID) if err != nil { return err } if purge { - if !strings.Contains(n.Name, node.TrashIDDelimiter) { + if !n.IsDisabled() { return errtypes.NewErrtypeFromStatus(status.NewInvalidArg(ctx, "can't purge enabled space")) } - ip := fs.lu.InternalPath(req.Id.OpaqueId) - matches, err := filepath.Glob(ip) - if err != nil { - return err - } - - // TODO: remove blobs - if err := os.RemoveAll(matches[0]); err != nil { - return err - } - matches, err = filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, req.Id.OpaqueId)) + spaceType, err := n.GetMetadata(xattrs.SpaceTypeAttr) if err != nil { return err } - if len(matches) != 1 { - return fmt.Errorf("delete space failed: found %d matching spaces", len(matches)) - } - - if err := os.RemoveAll(matches[0]); err != nil { + // remove type index + spaceTypePath := filepath.Join(fs.o.Root, "spacetypes", spaceType, spaceID) + if err := os.Remove(spaceTypePath); err != nil { return err } - matches, err = filepath.Glob(filepath.Join(fs.o.Root, "nodes", node.RootID, req.Id.OpaqueId+node.TrashIDDelimiter+"*")) - if err != nil { + // remove space metadata + if err := os.RemoveAll(filepath.Join(fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2))); err != nil { return err } - if len(matches) != 1 { - return fmt.Errorf("delete root node failed: found %d matching root nodes", len(matches)) - } - - return os.RemoveAll(matches[0]) - } - // don't delete - just rename - dn := *n - deletionTime := time.Now().UTC().Format(time.RFC3339Nano) - dn.Name = n.Name + node.TrashIDDelimiter + deletionTime - dn.Exists = false - err = fs.tp.Move(ctx, n, &dn) - if err != nil { - return err - } + // FIXME remove space blobs - err = os.RemoveAll(matches[0]) - if err != nil { - return err + return nil } - trashPath := dn.InternalPath() - np := filepath.Join(filepath.Dir(matches[0]), filepath.Base(trashPath)) - return os.Symlink(trashPath, np) + // mark as disabled by writing a dtime attribute + dtime := time.Now() + return n.SetDTime(&dtime) } -func (fs *Decomposedfs) createStorageSpace(ctx context.Context, spaceType, spaceID string) error { +func (fs *Decomposedfs) linkStorageSpaceType(ctx context.Context, spaceType string, spaceID string) error { // create space type dir - if err := os.MkdirAll(filepath.Join(fs.o.Root, "spaces", spaceType), 0700); err != nil { + if err := os.MkdirAll(filepath.Join(fs.o.Root, "spacetypes", spaceType), 0700); err != nil { return err } - // we can reuse the node id as the space id - err := os.Symlink("../../nodes/"+spaceID, filepath.Join(fs.o.Root, "spaces", spaceType, spaceID)) + // link space in spacetypes + err := os.Symlink("../../spaces/"+lookup.Pathify(spaceID, 1, 2)+"/nodes/"+lookup.Pathify(spaceID, 4, 2), filepath.Join(fs.o.Root, "spacetypes", spaceType, spaceID)) if err != nil { if isAlreadyExists(err) { appctx.GetLogger(ctx).Debug().Err(err).Str("space", spaceID).Str("spacetype", spaceType).Msg("symlink already exists") @@ -581,7 +550,7 @@ func (fs *Decomposedfs) createStorageSpace(ctx context.Context, spaceType, space return err } -func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, spaceType, nodePath string, canListAllSpaces bool) (*provider.StorageSpace, error) { +func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, nodePath string, canListAllSpaces bool) (*provider.StorageSpace, error) { user := ctxpkg.ContextMustGetUser(ctx) if !canListAllSpaces { ok, err := node.NewPermissions(fs.lu).HasPermission(ctx, n, func(p *provider.ResourcePermissions) bool { @@ -591,7 +560,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, return nil, errtypes.PermissionDenied(fmt.Sprintf("user %s is not allowed to Stat the space %s", user.Username, n.ID)) } - if strings.Contains(n.Name, node.TrashIDDelimiter) { + if n.SpaceRoot.IsDisabled() { ok, err := node.NewPermissions(fs.lu).HasPermission(ctx, n, func(p *provider.ResourcePermissions) bool { // TODO: Which permission do I need to see the space? return p.AddGrant @@ -602,34 +571,21 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, } } - owner, err := n.Owner() - if err != nil { - return nil, err - } - + var err error // TODO apply more filters var sname string - if sname, err = n.GetMetadata(xattrs.SpaceNameAttr); err != nil { + if sname, err = n.SpaceRoot.GetMetadata(xattrs.SpaceNameAttr); err != nil { // FIXME: Is that a severe problem? appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a name attribute") } - if err := n.FindStorageSpaceRoot(); err != nil { - return nil, err - } - - glob := filepath.Join(fs.o.Root, "spaces", spaceType, n.SpaceRoot.ID) - matches, err := filepath.Glob(glob) - if err != nil { - return nil, err - } - - if len(matches) != 1 { - return nil, errtypes.InternalError("expected only one match for " + glob) - } - - spaceType = filepath.Base(filepath.Dir(matches[0])) + /* + if err := n.FindStorageSpaceRoot(); err != nil { + return nil, err + } + */ + // read the grants from the current node, not the root grants, err := n.ListGrants(ctx) if err != nil { return nil, err @@ -663,26 +619,30 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, }, }, }, - Id: &provider.StorageSpaceId{OpaqueId: n.SpaceRoot.ID}, + Id: &provider.StorageSpaceId{OpaqueId: n.SpaceRoot.SpaceID}, Root: &provider.ResourceId{ - StorageId: n.SpaceRoot.ID, + StorageId: n.SpaceRoot.SpaceID, OpaqueId: n.SpaceRoot.ID, }, - Name: sname, - SpaceType: spaceType, + Name: sname, + // SpaceType is read from xattr below // Mtime is set either as node.tmtime or as fi.mtime below } - if strings.Contains(n.Name, node.TrashIDDelimiter) { + if space.SpaceType, err = n.SpaceRoot.GetMetadata(xattrs.SpaceTypeAttr); err != nil { + appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a type attribute") + } + + if n.SpaceRoot.IsDisabled() { space.Opaque.Map["trashed"] = &types.OpaqueEntry{ Decoder: "plain", Value: []byte("trashed"), } } - if spaceType != spaceTypeProject && owner.OpaqueId != "" { + if n.Owner() != nil && n.Owner().OpaqueId != "" { space.Owner = &userv1beta1.User{ // FIXME only return a UserID, not a full blown user object - Id: owner, + Id: n.Owner(), } } diff --git a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go index 102647811a..25ce44889c 100644 --- a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go +++ b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go @@ -23,12 +23,15 @@ import ( "os" "path/filepath" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/google/uuid" "github.com/pkg/xattr" "github.com/stretchr/testify/mock" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" + v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" ruser "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/storage" @@ -43,15 +46,16 @@ import ( // TestEnv represents a test environment for unit tests type TestEnv struct { - Root string - Fs storage.FS - Tree *tree.Tree - Permissions *mocks.PermissionsChecker - Blobstore *treemocks.Blobstore - Owner *userpb.User - Lookup *decomposedfs.Lookup - Ctx context.Context - SpaceRootRes *providerv1beta1.ResourceId + Root string + Fs storage.FS + Tree *tree.Tree + Permissions *mocks.PermissionsChecker + Blobstore *treemocks.Blobstore + Owner *userpb.User + Lookup *lookup.Lookup + Ctx context.Context + SpaceRootRes *providerv1beta1.ResourceId + PermissionsClient *mocks.CS3PermissionsClient } // NewTestEnv prepares a test environment on disk @@ -81,30 +85,32 @@ func NewTestEnv() (*TestEnv, error) { owner := &userpb.User{ Id: &userpb.UserId{ Idp: "idp", - OpaqueId: "userid", + OpaqueId: "u-s-e-r-id", Type: userpb.UserType_USER_TYPE_PRIMARY, }, Username: "username", } - lookup := &decomposedfs.Lookup{Options: o} + lookup := &lookup.Lookup{Options: o} permissions := &mocks.PermissionsChecker{} + cs3permissionsclient := &mocks.CS3PermissionsClient{} bs := &treemocks.Blobstore{} tree := tree.New(o.Root, true, true, lookup, bs) - fs, err := decomposedfs.New(o, lookup, permissions, tree) + fs, err := decomposedfs.New(o, lookup, permissions, tree, cs3permissionsclient) if err != nil { return nil, err } ctx := ruser.ContextSetUser(context.Background(), owner) env := &TestEnv{ - Root: tmpRoot, - Fs: fs, - Tree: tree, - Lookup: lookup, - Permissions: permissions, - Blobstore: bs, - Owner: owner, - Ctx: ctx, + Root: tmpRoot, + Fs: fs, + Tree: tree, + Lookup: lookup, + Permissions: permissions, + Blobstore: bs, + Owner: owner, + Ctx: ctx, + PermissionsClient: cs3permissionsclient, } env.SpaceRootRes, err = env.CreateTestStorageSpace("personal", nil) @@ -136,9 +142,10 @@ func (t *TestEnv) CreateTestDir(name string, parentRef *providerv1beta1.Referenc } // CreateTestFile creates a new file and its metadata and returns a corresponding Node -func (t *TestEnv) CreateTestFile(name, blobID string, blobSize int64, parentID string) (*node.Node, error) { - // Create file in dir1 - file := node.New( +func (t *TestEnv) CreateTestFile(name, blobID, parentID, spaceID string, blobSize int64) (*node.Node, error) { + // Create n in dir1 + n := node.New( + spaceID, uuid.New().String(), parentID, name, @@ -147,22 +154,26 @@ func (t *TestEnv) CreateTestFile(name, blobID string, blobSize int64, parentID s nil, t.Lookup, ) - _, err := os.OpenFile(file.InternalPath(), os.O_CREATE, 0700) + nodePath := n.InternalPath() + if err := os.MkdirAll(filepath.Dir(nodePath), 0700); err != nil { + return nil, err + } + _, err := os.OpenFile(nodePath, os.O_CREATE, 0700) if err != nil { return nil, err } - err = file.WriteAllNodeMetadata(t.Owner.Id) + err = n.WriteAllNodeMetadata() if err != nil { return nil, err } // Link in parent - childNameLink := filepath.Join(t.Lookup.InternalPath(file.ParentID), file.Name) - err = os.Symlink("../"+file.ID, childNameLink) + childNameLink := filepath.Join(n.ParentInternalPath(), n.Name) + err = os.Symlink("../../../../../"+lookup.Pathify(n.ID, 4, 2), childNameLink) if err != nil { return nil, err } - return file, err + return n, n.FindStorageSpaceRoot() } // CreateTestStorageSpace will create a storage space with some directories and files @@ -172,7 +183,9 @@ func (t *TestEnv) CreateTestFile(name, blobID string, blobSize int64, parentID s // /dir1/file1 // /dir1/subdir1 func (t *TestEnv) CreateTestStorageSpace(typ string, quota *providerv1beta1.Quota) (*providerv1beta1.ResourceId, error) { - t.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Times(1) // Permissions required for setup below + t.PermissionsClient.On("CheckPermission", mock.Anything, mock.Anything, mock.Anything).Times(1).Return(&cs3permissions.CheckPermissionResponse{ + Status: &v1beta11.Status{Code: v1beta11.Code_CODE_OK}, + }, nil) space, err := t.Fs.CreateStorageSpace(t.Ctx, &providerv1beta1.CreateStorageSpaceRequest{ Owner: t.Owner, Type: typ, @@ -185,7 +198,7 @@ func (t *TestEnv) CreateTestStorageSpace(typ string, quota *providerv1beta1.Quot ref := buildRef(space.StorageSpace.Id.OpaqueId, "") // the space name attribute is the stop condition in the lookup - h, err := node.ReadNode(t.Ctx, t.Lookup, space.StorageSpace.Id.OpaqueId) + h, err := node.ReadNode(t.Ctx, t.Lookup, space.StorageSpace.Id.OpaqueId, space.StorageSpace.Id.OpaqueId) if err != nil { return nil, err } @@ -201,7 +214,7 @@ func (t *TestEnv) CreateTestStorageSpace(typ string, quota *providerv1beta1.Quot } // Create file1 in dir1 - _, err = t.CreateTestFile("file1", "file1-blobid", 1234, dir1.ID) + _, err = t.CreateTestFile("file1", "file1-blobid", dir1.ID, dir1.SpaceID, 1234) if err != nil { return nil, err } diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go index 3a65789614..a7f57c9266 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -22,33 +22,28 @@ import ( "context" "fmt" "io" + "io/fs" "os" "path/filepath" "strconv" "strings" "time" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/utils" "github.com/google/uuid" "github.com/pkg/errors" - "github.com/pkg/xattr" "github.com/rs/zerolog/log" ) // go:generate mockery -name Blobstore -const ( - spaceTypePersonal = "personal" - spaceTypeShare = "share" -) - // Blobstore defines an interface for storing blobs in a blobstore type Blobstore interface { Upload(key string, reader io.Reader) error @@ -60,10 +55,9 @@ type Blobstore interface { type PathLookup interface { NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) - RootNode(ctx context.Context) (node *node.Node, err error) InternalRoot() string - InternalPath(ID string) string + InternalPath(spaceID, nodeID string) string Path(ctx context.Context, n *node.Node) (path string, err error) ShareFolder() string } @@ -93,14 +87,13 @@ func New(root string, tta bool, tsa bool, lu PathLookup, bs Blobstore) *Tree { } // Setup prepares the tree structure -func (t *Tree) Setup(owner *userpb.UserId, propagateToRoot bool) error { +func (t *Tree) Setup() error { // create data paths for internal layout dataPaths := []string{ - filepath.Join(t.root, "nodes"), + filepath.Join(t.root, "spaces"), // notes contain symlinks from nodes//uploads/ to ../../uploads/ // better to keep uploads on a fast / volatile storage before a workflow finally moves them to the nodes dir filepath.Join(t.root, "uploads"), - filepath.Join(t.root, "trash"), } for _, v := range dataPaths { err := os.MkdirAll(v, 0700) @@ -109,37 +102,12 @@ func (t *Tree) Setup(owner *userpb.UserId, propagateToRoot bool) error { } } - // the root node has an empty name - // the root node has no parent - n := node.New(node.RootID, "", "", 0, "", nil, t.lookup) - err := t.createNode(n, owner) - if err != nil { - return err - } - - // set propagation flag - v := "0" - if propagateToRoot { - v = "1" - } - if err = n.SetMetadata(xattrs.PropagationAttr, v); err != nil { - return err - } - // create spaces folder and iterate over existing nodes to populate it - spacesPath := filepath.Join(t.root, "spaces") - fi, err := os.Stat(spacesPath) - if os.IsNotExist(err) { - // create personal spaces dir - if err := os.MkdirAll(filepath.Join(spacesPath, spaceTypePersonal), 0700); err != nil { - return err - } - // create share spaces dir - if err := os.MkdirAll(filepath.Join(spacesPath, spaceTypeShare), 0700); err != nil { - return err - } + nodesPath := filepath.Join(t.root, "nodes") + fi, err := os.Stat(nodesPath) + if err == nil && fi.IsDir() { - f, err := os.Open(filepath.Join(t.root, "nodes")) + f, err := os.Open(nodesPath) if err != nil { return err } @@ -148,41 +116,66 @@ func (t *Tree) Setup(owner *userpb.UserId, propagateToRoot bool) error { return err } - for i := range nodes { - nodePath := filepath.Join(t.root, "nodes", nodes[i].Name()) + for _, node := range nodes { + nodePath := filepath.Join(nodesPath, node.Name()) - // is it a user root? -> create personal space if isRootNode(nodePath) { - // we can reuse the node id as the space id - t.linkSpace(spaceTypePersonal, nodes[i].Name(), nodes[i].Name()) + if err := t.moveNode(node.Name(), node.Name()); err != nil { + logger.New().Error().Err(err). + Str("space", node.Name()). + Msg("could not move space") + continue + } + t.linkSpace("personal", node.Name()) } + } + // TODO delete nodesPath if empty + + } - // is it a shared node? -> create share space - if isSharedNode(nodePath) { - // we can reuse the node id as the space id - t.linkSpace(spaceTypeShare, nodes[i].Name(), nodes[i].Name()) + return nil +} +func (t *Tree) moveNode(spaceID, nodeID string) error { + dirPath := filepath.Join(t.root, "nodes", nodeID) + f, err := os.Open(dirPath) + if err != nil { + return err + } + children, err := f.Readdir(0) + if err != nil { + return err + } + for _, child := range children { + old := filepath.Join(t.root, "nodes", child.Name()) + new := filepath.Join(t.root, "spaces", lookup.Pathify(spaceID, 1, 2), "nodes", lookup.Pathify(child.Name(), 4, 2)) + if err := os.Rename(old, new); err != nil { + logger.New().Error().Err(err). + Str("space", spaceID). + Str("nodes", child.Name()). + Str("oldpath", old). + Str("newpath", new). + Msg("could not rename node") + } + if child.IsDir() { + if err := t.moveNode(spaceID, child.Name()); err != nil { + return err } } - } else if !fi.IsDir() { - // check if it is a directory - return fmt.Errorf("%s is not a directory", spacesPath) } - return nil } // linkSpace creates a new symbolic link for a space with the given type st, and node id -func (t *Tree) linkSpace(spaceType, spaceID, nodeID string) { - spacesPath := filepath.Join(t.root, "spaces", spaceType, spaceID) - expectedTarget := "../../nodes/" + nodeID - linkTarget, err := os.Readlink(spacesPath) +func (t *Tree) linkSpace(spaceType, spaceID string) { + spaceTypesPath := filepath.Join(t.root, "spacetypes", spaceType, spaceID) + expectedTarget := "../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2) + linkTarget, err := os.Readlink(spaceTypesPath) if errors.Is(err, os.ErrNotExist) { - err = os.Symlink(expectedTarget, spacesPath) + err = os.Symlink(expectedTarget, spaceTypesPath) if err != nil { logger.New().Error().Err(err). Str("space_type", spaceType). Str("space", spaceID). - Str("node", nodeID). Msg("could not create symlink") } } else { @@ -190,14 +183,12 @@ func (t *Tree) linkSpace(spaceType, spaceID, nodeID string) { logger.New().Error().Err(err). Str("space_type", spaceType). Str("space", spaceID). - Str("node", nodeID). Msg("could not read symlink") } if linkTarget != expectedTarget { logger.New().Warn(). Str("space_type", spaceType). Str("space", spaceID). - Str("node", nodeID). Str("expected", expectedTarget). Str("actual", linkTarget). Msg("expected a different link target") @@ -205,10 +196,13 @@ func (t *Tree) linkSpace(spaceType, spaceID, nodeID string) { } } +// isRootNode checks if a node is a space root func isRootNode(nodePath string) bool { attr, err := xattrs.Get(nodePath, xattrs.ParentidAttr) return err == nil && attr == node.RootID } + +/* func isSharedNode(nodePath string) bool { if attrs, err := xattr.List(nodePath); err == nil { for i := range attrs { @@ -219,12 +213,13 @@ func isSharedNode(nodePath string) bool { } return false } +*/ // GetMD returns the metadata of a node in the tree func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) { md, err := os.Stat(n.InternalPath()) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound(n.ID) } return nil, errors.Wrap(err, "tree: error stating "+n.ID) @@ -245,25 +240,14 @@ func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) { n.ID = uuid.New().String() } - // who will become the owner? the owner of the parent node, not the current user - var p *node.Node - p, err = n.Parent() - if err != nil { - return - } - var owner *userpb.UserId - owner, err = p.Owner() - if err != nil { - return - } - - err = t.createNode(n, owner) + err = t.createNode(n) if err != nil { return } // make child appear in listings - err = os.Symlink("../"+n.ID, filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name)) + relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2)) + err = os.Symlink(relativeNodePath, filepath.Join(n.ParentInternalPath(), n.Name)) if err != nil { // no better way to check unfortunately if !strings.Contains(err.Error(), "file exists") { @@ -298,7 +282,8 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) // are we just renaming (parent stays the same)? if oldNode.ParentID == newNode.ParentID { - parentPath := t.lookup.InternalPath(oldNode.ParentID) + // parentPath := t.lookup.InternalPath(oldNode.SpaceID, oldNode.ParentID) + parentPath := oldNode.ParentInternalPath() // rename child err = os.Rename( @@ -322,8 +307,8 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) // rename child err = os.Rename( - filepath.Join(t.lookup.InternalPath(oldNode.ParentID), oldNode.Name), - filepath.Join(t.lookup.InternalPath(newNode.ParentID), newNode.Name), + filepath.Join(oldNode.ParentInternalPath(), oldNode.Name), + filepath.Join(newNode.ParentInternalPath(), newNode.Name), ) if err != nil { return errors.Wrap(err, "Decomposedfs: could not move child") @@ -352,12 +337,22 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) return nil } +func readChildNodeFromLink(path string) (string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", err + } + nodeID := strings.TrimLeft(link, "/.") + nodeID = strings.ReplaceAll(nodeID, "/", "") + return nodeID, nil +} + // ListFolder lists the content of a folder node func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) { dir := n.InternalPath() f, err := os.Open(dir) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound(dir) } return nil, errors.Wrap(err, "tree: error listing "+dir) @@ -370,13 +365,13 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro } nodes := []*node.Node{} for i := range names { - link, err := os.Readlink(filepath.Join(dir, names[i])) + nodeID, err := readChildNodeFromLink(filepath.Join(dir, names[i])) if err != nil { // TODO log continue } - child, err := node.ReadNode(ctx, t.lookup, filepath.Base(link)) + child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID) if err != nil { // TODO log continue @@ -394,14 +389,9 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { deletingSharedResource := ctx.Value(appctx.DeletingSharedResource) if deletingSharedResource != nil && deletingSharedResource.(bool) { - src := filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name) + src := filepath.Join(n.ParentInternalPath(), n.Name) return os.Remove(src) } - // Prepare the trash - err = os.MkdirAll(filepath.Join(t.root, "trash", n.SpaceRoot.ID), 0700) - if err != nil { - return - } // get the original path origin, err := t.lookup.Path(ctx, n) @@ -417,13 +407,24 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { deletionTime := time.Now().UTC().Format(time.RFC3339Nano) + // Prepare the trash + trashLink := filepath.Join(t.root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) + if err := os.MkdirAll(filepath.Dir(trashLink), 0700); err != nil { + // Roll back changes + _ = n.RemoveMetadata(xattrs.TrashOriginAttr) + return err + } + + // FIXME can we just move the node into the trash dir? instead of adding another symlink and appending a trash timestamp? + // can we just use the mtime as the trash time? + // TODO store a trashed by userid + // first make node appear in the space trash // parent id and name are stored as extended attributes in the node itself - trashLink := filepath.Join(t.root, "trash", n.SpaceRoot.ID, n.ID) - err = os.Symlink("../../nodes/"+n.ID+node.TrashIDDelimiter+deletionTime, trashLink) + err = os.Symlink("../../../../../nodes/"+lookup.Pathify(n.ID, 4, 2)+node.TrashIDDelimiter+deletionTime, trashLink) if err != nil { - // To roll back changes - // TODO unset trashOriginAttr + // Roll back changes + _ = n.RemoveMetadata(xattrs.TrashOriginAttr) return } @@ -435,7 +436,8 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { if err != nil { // To roll back changes // TODO remove symlink - // TODO unset trashOriginAttr + // Roll back changes + _ = n.RemoveMetadata(xattrs.TrashOriginAttr) return } @@ -443,13 +445,14 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { _ = os.Remove(n.LockFilePath()) // finally remove the entry from the parent dir - src := filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name) + src := filepath.Join(n.ParentInternalPath(), n.Name) err = os.Remove(src) if err != nil { // To roll back changes // TODO revert the rename // TODO remove symlink - // TODO unset trashOriginAttr + // Roll back changes + _ = n.RemoveMetadata(xattrs.TrashOriginAttr) return } @@ -490,7 +493,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa } // add the entry for the parent dir - err = os.Symlink("../"+recycleNode.ID, filepath.Join(t.lookup.InternalPath(targetNode.ParentID), targetNode.Name)) + err = os.Symlink("../../../../../"+lookup.Pathify(recycleNode.ID, 4, 2), filepath.Join(targetNode.ParentInternalPath(), targetNode.Name)) if err != nil { return err } @@ -506,21 +509,6 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa } } - // the new node will inherit the permissions of its parent - p, err := targetNode.Parent() - if err != nil { - return err - } - - po, err := p.Owner() - if err != nil { - return err - } - - if err := recycleNode.ChangeOwner(po); err != nil { - return err - } - targetNode.Exists = true // update name attribute if err := recycleNode.SetMetadata(xattrs.NameAttr, targetNode.Name); err != nil { @@ -536,7 +524,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa // delete item link in trash if err = os.Remove(trashItem); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trashitem") + log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item") } return t.Propagate(ctx, targetNode) } @@ -551,6 +539,8 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa } fn := func() error { + // delete the actual node + // TODO recursively delete children if err := os.RemoveAll(deletedNodePath); err != nil { log.Error().Err(err).Str("deletedNodePath", deletedNodePath).Msg("error deleting trash node") return err @@ -586,15 +576,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { } // is propagation enabled for the parent node? - - var root *node.Node - if n.SpaceRoot == nil { - if root, err = t.lookup.RootNode(ctx); err != nil { - return - } - } else { - root = n.SpaceRoot - } + root := n.SpaceRoot // use a sync time and don't rely on the mtime of the current node, as the stat might not change when a rename happened too quickly sTime := time.Now().UTC() @@ -644,7 +626,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { if updateSyncTime { // update the tree time of the parent node - if err = n.SetTMTime(sTime); err != nil { + if err = n.SetTMTime(&sTime); err != nil { sublog.Error().Err(err).Time("tmtime", sTime).Msg("could not update tmtime of parent node") } else { sublog.Debug().Time("tmtime", sTime).Msg("updated tmtime of parent node") @@ -772,56 +754,81 @@ func (t *Tree) DeleteBlob(key string) error { } // TODO check if node exists? -func (t *Tree) createNode(n *node.Node, owner *userpb.UserId) (err error) { +func (t *Tree) createNode(n *node.Node) (err error) { // create a directory node nodePath := n.InternalPath() if err = os.MkdirAll(nodePath, 0700); err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } - return n.WriteAllNodeMetadata(owner) + return n.WriteAllNodeMetadata() } -// TODO refactor the returned params into Node properties? would make all the path transformations go away... -func (t *Tree) readRecycleItem(ctx context.Context, spaceid, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) { - if key == "" { - return nil, "", "", "", errtypes.InternalError("key is empty") +// readTrashLink returns nodeID and timestamp +func readTrashLink(path string) (string, string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", "", err } + // ../../../../../nodes/e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z + // TODO use filepath.Separator to support windows + link = strings.ReplaceAll(link, "/", "") + // ..........nodese56c75a8-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z + if link[0:15] != "..........nodes" || link[51:54] != ".T." { + return "", "", errtypes.InternalError("malformed trash link") + } + return link[15:51], link[54:], nil +} - trashItem = filepath.Join(t.lookup.InternalRoot(), "trash", spaceid, key, path) - - var link string - link, err = os.Readlink(trashItem) +// readTrashChildLink returns nodeID +func readTrashChildLink(path string) (string, error) { + link, err := os.Readlink(path) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") - return + return "", err } + // ../../../../../e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094 + // TODO use filepath.Separator to support windows + link = strings.ReplaceAll(link, "/", "") + // ..........e56c75a8-d235-4cbb-8b4e-48b6fd0f2094 + if link[0:10] != ".........." { + return "", errtypes.InternalError("malformed trash child link") + } + return link[10:], nil +} - var attrStr string - trashNodeID := filepath.Base(link) - deletedNodePath = t.lookup.InternalPath(trashNodeID) - - owner := &userpb.UserId{} - // lookup ownerId in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.OwnerIDAttr); err == nil { - owner.OpaqueId = attrStr - } else { - return +// TODO refactor the returned params into Node properties? would make all the path transformations go away... +func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) { + if key == "" { + return nil, "", "", "", errtypes.InternalError("key is empty") } - // lookup ownerIdp in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.OwnerIDPAttr); err == nil { - owner.Idp = attrStr + + var nodeID, timeSuffix string + + trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2), path) + if path == "" || path == "/" { + nodeID, timeSuffix, err = readTrashLink(trashItem) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") + return + } + deletedNodePath = filepath.Join(t.lookup.InternalPath(spaceID, nodeID) + node.TrashIDDelimiter + timeSuffix) } else { - return + // children of a trashed node are in the nodes folder + nodeID, err = readTrashChildLink(trashItem) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash child link") + return + } + deletedNodePath = t.lookup.InternalPath(spaceID, nodeID) } - // lookup ownerType in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.OwnerTypeAttr); err == nil { - owner.Type = utils.UserTypeMap(attrStr) - } else { + + recycleNode = node.New(spaceID, nodeID, "", "", 0, "", nil, t.lookup) + recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID) + if err != nil { return } - recycleNode = node.New(trashNodeID, "", "", 0, "", owner, t.lookup) + var attrStr string // lookup blobID in extended attributes if attrStr, err = xattrs.Get(deletedNodePath, xattrs.BlobIDAttr); err == nil { recycleNode.BlobID = attrStr @@ -843,38 +850,15 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceid, key, path string) ( return } - // look up space root from the trashed node - err = recycleNode.FindStorageSpaceRoot() - - if path == "" || path == "/" { - parts := strings.SplitN(filepath.Base(link), node.TrashIDDelimiter, 2) - if len(parts) != 2 { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Interface("parts", parts).Msg("malformed trash link") - return - } - // update the node id, drop the `.T.{timestamp}` suffix - recycleNode.ID = parts[0] - } - // get origin node, is relative to space root origin = "/" - deletedNodeRootPath := deletedNodePath - if path != "" && path != "/" { - trashItemRoot := filepath.Join(t.lookup.InternalRoot(), "trash", spaceid, key) - var rootLink string - rootLink, err = os.Readlink(trashItemRoot) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") - return - } - deletedNodeRootPath = t.lookup.InternalPath(filepath.Base(rootLink)) - } + trashRootItemPath := filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2)) // lookup origin path in extended attributes - if attrStr, err = xattrs.Get(deletedNodeRootPath, xattrs.TrashOriginAttr); err == nil { + if attrStr, err = xattrs.Get(trashRootItemPath, xattrs.TrashOriginAttr); err == nil { origin = filepath.Join(attrStr, path) } else { - log.Error().Err(err).Str("trashItem", trashItem).Str("link", link).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /") + log.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /") } return diff --git a/pkg/storage/utils/decomposedfs/tree/tree_test.go b/pkg/storage/utils/decomposedfs/tree/tree_test.go index 09d79f072e..72e61dcdac 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree_test.go +++ b/pkg/storage/utils/decomposedfs/tree/tree_test.go @@ -23,6 +23,7 @@ import ( "path" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" @@ -106,7 +107,7 @@ var _ = Describe("Tree", func() { }) It("moves the file to the trash", func() { - trashPath := path.Join(env.Root, "trash", n.SpaceRoot.ID, n.ID) + trashPath := path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) _, err := os.Stat(trashPath) Expect(err).ToNot(HaveOccurred()) }) @@ -117,7 +118,7 @@ var _ = Describe("Tree", func() { }) It("sets the trash origin xattr", func() { - trashPath := path.Join(env.Root, "trash", n.SpaceRoot.ID, n.ID) + trashPath := path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) attr, err := xattr.Get(trashPath, xattrs.TrashOriginAttr) Expect(err).ToNot(HaveOccurred()) Expect(string(attr)).To(Equal("/dir1/file1")) @@ -136,7 +137,7 @@ var _ = Describe("Tree", func() { JustBeforeEach(func() { env.Blobstore.On("Delete", n.BlobID).Return(nil) - trashPath = path.Join(env.Root, "trash", n.SpaceRoot.ID, n.ID) + trashPath = path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) Expect(t.Delete(env.Ctx, n)).To(Succeed()) }) @@ -238,7 +239,7 @@ var _ = Describe("Tree", func() { ) JustBeforeEach(func() { - trashPath = path.Join(env.Root, "trash", n.SpaceRoot.ID, n.ID) + trashPath = path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) Expect(t.Delete(env.Ctx, n)).To(Succeed()) }) @@ -278,7 +279,7 @@ var _ = Describe("Tree", func() { Describe("with TreeTimeAccounting enabled", func() { It("sets the tmtime of the parent", func() { - file, err := env.CreateTestFile("file1", "", 1, dir.ID) + file, err := env.CreateTestFile("file1", "", dir.ID, dir.SpaceID, 1) Expect(err).ToNot(HaveOccurred()) perms := node.OwnerPermissions() @@ -296,7 +297,7 @@ var _ = Describe("Tree", func() { Describe("with TreeSizeAccounting enabled", func() { It("calculates the size", func() { - file, err := env.CreateTestFile("file1", "", 1, dir.ID) + file, err := env.CreateTestFile("file1", "", dir.ID, dir.SpaceID, 1) Expect(err).ToNot(HaveOccurred()) err = env.Tree.Propagate(env.Ctx, file) @@ -307,9 +308,9 @@ var _ = Describe("Tree", func() { }) It("considers all files", func() { - _, err := env.CreateTestFile("file1", "", 1, dir.ID) + _, err := env.CreateTestFile("file1", "", dir.ID, dir.SpaceID, 1) Expect(err).ToNot(HaveOccurred()) - file2, err := env.CreateTestFile("file2", "", 100, dir.ID) + file2, err := env.CreateTestFile("file2", "", dir.ID, dir.SpaceID, 100) Expect(err).ToNot(HaveOccurred()) err = env.Tree.Propagate(env.Ctx, file2) @@ -325,7 +326,7 @@ var _ = Describe("Tree", func() { err = subdir.SetTreeSize(uint64(200)) Expect(err).ToNot(HaveOccurred()) - file, err := env.CreateTestFile("file1", "", 1, dir.ID) + file, err := env.CreateTestFile("file1", "", dir.ID, dir.SpaceID, 1) Expect(err).ToNot(HaveOccurred()) err = env.Tree.Propagate(env.Ctx, file) diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go index f73342ade6..a1d5a2f7a6 100644 --- a/pkg/storage/utils/decomposedfs/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -28,6 +28,7 @@ import ( "hash" "hash/adler32" "io" + iofs "io/fs" "io/ioutil" "os" "path/filepath" @@ -41,6 +42,7 @@ import ( "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/utils" @@ -245,10 +247,6 @@ func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (uplo } usr := ctxpkg.ContextMustGetUser(ctx) - owner, err := p.Owner() - if err != nil { - return nil, errors.Wrap(err, "Decomposedfs: error determining owner") - } var spaceRoot string if info.Storage != nil { if spaceRoot, ok = info.Storage["SpaceRoot"]; !ok { @@ -272,9 +270,6 @@ func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (uplo "UserType": utils.UserTypeToString(usr.Id.Type), "UserName": usr.Username, - "OwnerIdp": owner.Idp, - "OwnerId": owner.OpaqueId, - "LogLevel": log.GetLevel().String(), } // Create binary file in the upload folder with no content @@ -313,7 +308,7 @@ func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, info := tusd.FileInfo{} data, err := ioutil.ReadFile(infoPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, iofs.ErrNotExist) { // Interpret os.ErrNotExist as 404 Not Found err = tusd.ErrNotFound } @@ -459,7 +454,9 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { return } + spaceID := upload.info.Storage["SpaceRoot"] n := node.New( + spaceID, upload.info.Storage["NodeId"], upload.info.Storage["NodeParentId"], upload.info.Storage["NodeName"], @@ -468,7 +465,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { nil, upload.fs.lu, ) - n.SpaceRoot = node.New(upload.info.Storage["SpaceRoot"], "", "", 0, "", nil, upload.fs.lu) + n.SpaceRoot = node.New(spaceID, spaceID, "", "", 0, "", nil, upload.fs.lu) // check lock if err := n.CheckLock(ctx); err != nil { @@ -556,7 +553,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { // FIXME move versioning to blobs ... no need to copy all the metadata! well ... it does if we want to version metadata... // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - versionsPath = upload.fs.lu.InternalPath(n.ID + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) + versionsPath = upload.fs.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+fi.ModTime().UTC().Format(time.RFC3339Nano)) // This move drops all metadata!!! We copy it below with CopyMetadata // FIXME the node must remain the same. otherwise we might restore share metadata @@ -589,9 +586,11 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { Msg("Decomposedfs: could not truncate") return } + if err := os.MkdirAll(filepath.Dir(targetPath), 0700); err != nil { + sublog.Warn().Err(err).Msg("Decomposedfs: could not create node dir, trying to write file anyway") + } if err = os.Rename(upload.binPath, targetPath); err != nil { - sublog.Err(err). - Msg("Decomposedfs: could not rename") + sublog.Error().Err(err).Msg("Decomposedfs: could not rename") return } if versionsPath != "" { @@ -618,16 +617,13 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { tryWritingChecksum(&sublog, n, "adler32", adler32h) // who will become the owner? the owner of the parent actually ... not the currently logged in user - err = n.WriteAllNodeMetadata(&userpb.UserId{ - Idp: upload.info.Storage["OwnerIdp"], - OpaqueId: upload.info.Storage["OwnerId"], - }) + err = n.WriteAllNodeMetadata() if err != nil { return errors.Wrap(err, "Decomposedfs: could not write metadata") } // link child name to parent if it is new - childNameLink := filepath.Join(upload.fs.lu.InternalPath(n.ParentID), n.Name) + childNameLink := filepath.Join(n.ParentInternalPath(), n.Name) var link string link, err = os.Readlink(childNameLink) if err == nil && link != "../"+n.ID { @@ -641,15 +637,16 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { return errors.Wrap(err, "Decomposedfs: could not remove symlink child entry") } } - if os.IsNotExist(err) || link != "../"+n.ID { - if err = os.Symlink("../"+n.ID, childNameLink); err != nil { + if errors.Is(err, iofs.ErrNotExist) || link != "../"+n.ID { + relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2)) + if err = os.Symlink(relativeNodePath, childNameLink); err != nil { return errors.Wrap(err, "Decomposedfs: could not symlink child entry") } } // only delete the upload if it was successfully written to the storage if err = os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { sublog.Err(err).Msg("Decomposedfs: could not delete upload info") return } @@ -687,13 +684,13 @@ func tryWritingChecksum(log *zerolog.Logger, n *node.Node, algo string, h hash.H func (upload *fileUpload) discardChunk() { if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk") return } } if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("infoPath", upload.infoPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk info") return } @@ -712,12 +709,12 @@ func (fs *Decomposedfs) AsTerminatableUpload(upload tusd.Upload) tusd.Terminatab // Terminate terminates the upload func (upload *fileUpload) Terminate(ctx context.Context) error { if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { return err } } if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { return err } } diff --git a/pkg/storage/utils/decomposedfs/upload_test.go b/pkg/storage/utils/decomposedfs/upload_test.go index 074cd68e96..70735e5052 100644 --- a/pkg/storage/utils/decomposedfs/upload_test.go +++ b/pkg/storage/utils/decomposedfs/upload_test.go @@ -26,12 +26,14 @@ import ( "os" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" ruser "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/mocks" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" @@ -55,16 +57,17 @@ var _ = Describe("File uploads", func() { ctx context.Context spaceID string - o *options.Options - lookup *decomposedfs.Lookup - permissions *mocks.PermissionsChecker - bs *treemocks.Blobstore + o *options.Options + lu *lookup.Lookup + permissions *mocks.PermissionsChecker + cs3permissionsclient *mocks.CS3PermissionsClient + bs *treemocks.Blobstore ) BeforeEach(func() { ref = &provider.Reference{ ResourceId: &provider.ResourceId{ - StorageId: "userid", + StorageId: "u-s-e-r-id", }, Path: "/foo", } @@ -72,7 +75,7 @@ var _ = Describe("File uploads", func() { user = &userpb.User{ Id: &userpb.UserId{ Idp: "idp", - OpaqueId: "userid", + OpaqueId: "u-s-e-r-id", Type: userpb.UserType_USER_TYPE_PRIMARY, }, Username: "username", @@ -80,7 +83,7 @@ var _ = Describe("File uploads", func() { rootRef = &provider.Reference{ ResourceId: &provider.ResourceId{ - StorageId: "userid", + StorageId: "u-s-e-r-id", }, Path: "/", } @@ -94,8 +97,9 @@ var _ = Describe("File uploads", func() { "root": tmpRoot, }) Expect(err).ToNot(HaveOccurred()) - lookup = &decomposedfs.Lookup{Options: o} + lu = &lookup.Lookup{Options: o} permissions = &mocks.PermissionsChecker{} + cs3permissionsclient = &mocks.CS3PermissionsClient{} bs = &treemocks.Blobstore{} }) @@ -107,10 +111,12 @@ var _ = Describe("File uploads", func() { }) BeforeEach(func() { - permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Times(1).Return(true, nil) + cs3permissionsclient.On("CheckPermission", mock.Anything, mock.Anything, mock.Anything).Return(&cs3permissions.CheckPermissionResponse{ + Status: &v1beta11.Status{Code: v1beta11.Code_CODE_OK}, + }, nil) var err error - tree := tree.New(o.Root, true, true, lookup, bs) - fs, err = decomposedfs.New(o, lookup, permissions, tree) + tree := tree.New(o.Root, true, true, lu, bs) + fs, err = decomposedfs.New(o, lu, permissions, tree, cs3permissionsclient) Expect(err).ToNot(HaveOccurred()) resp, err := fs.CreateStorageSpace(ctx, &provider.CreateStorageSpaceRequest{Owner: user, Type: "personal"}) @@ -141,7 +147,7 @@ var _ = Describe("File uploads", func() { When("the user wants to initiate a file upload", func() { It("fails", func() { - msg := "error: permission denied: userid/foo" + msg := "error: permission denied: u-s-e-r-id/foo" _, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) Expect(err).To(MatchError(msg)) }) @@ -152,7 +158,7 @@ var _ = Describe("File uploads", func() { JustBeforeEach(func() { var err error // the space name attribute is the stop condition in the lookup - h, err := lookup.RootNode(ctx) + h, err := lu.NodeFromResource(ctx, rootRef) Expect(err).ToNot(HaveOccurred()) err = xattr.Set(h.InternalPath(), xattrs.SpaceNameAttr, []byte("username")) Expect(err).ToNot(HaveOccurred()) @@ -161,7 +167,7 @@ var _ = Describe("File uploads", func() { When("the user wants to initiate a file upload", func() { It("fails", func() { - msg := "error: permission denied: userid/foo" + msg := "error: permission denied: u-s-e-r-id/foo" _, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) Expect(err).To(MatchError(msg)) }) diff --git a/pkg/storage/utils/decomposedfs/xattrs/xattrs.go b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go index 49f8e86fa0..36f5a506c9 100644 --- a/pkg/storage/utils/decomposedfs/xattrs/xattrs.go +++ b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go @@ -76,6 +76,11 @@ const ( // stored as a readable time.RFC3339Nano TreeMTimeAttr string = OcisPrefix + "tmtime" + // the deletion/disabled time of a space or node + // used to mark space roots as disabled + // stored as a readable time.RFC3339Nano + DTimeAttr string = OcisPrefix + "dtime" + // the size of the tree below this node, // propagated when treesize_accounting is true and // user.ocis.propagation=1 is set @@ -87,6 +92,7 @@ const ( // the name given to a storage space. It should not contain any semantics as its only purpose is to be read. SpaceNameAttr string = OcisPrefix + "space.name" + SpaceTypeAttr string = OcisPrefix + "space.type" SpaceDescriptionAttr string = OcisPrefix + "space.description" SpaceReadmeAttr string = OcisPrefix + "space.readme" SpaceImageAttr string = OcisPrefix + "space.image" @@ -185,13 +191,19 @@ func CopyMetadata(src, target string, filter func(attributeName string) bool) (e // No file locking is involved here as writing a single xattr is // considered to be atomic. func Set(filePath string, key string, val string) error { - if err := xattr.Set(filePath, key, []byte(val)); err != nil { return err } return nil } +// Remove an extended attribute key +// No file locking is involved here as writing a single xattr is +// considered to be atomic. +func Remove(filePath string, key string) error { + return xattr.Remove(filePath, key) +} + // SetMultiple allows setting multiple key value pairs at once // the changes are protected with an file lock // If the file lock can not be acquired the function returns a diff --git a/tests/acceptance/expected-failures-on-OCIS-storage.md b/tests/acceptance/expected-failures-on-OCIS-storage.md index 089ea06169..9ec1ac111a 100644 --- a/tests/acceptance/expected-failures-on-OCIS-storage.md +++ b/tests/acceptance/expected-failures-on-OCIS-storage.md @@ -480,8 +480,6 @@ The first two tests work against ocis. There must be something wrong in the CI s - [apiSharePublicLink1/createPublicLinkShare.feature:375](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L375) - [apiSharePublicLink1/createPublicLinkShare.feature:376](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L376) -- [apiSharePublicLink1/createPublicLinkShare.feature:477](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L477) -- [apiSharePublicLink1/createPublicLinkShare.feature:478](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L478) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:212](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L212) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:213](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L213) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:214](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L214) diff --git a/tests/acceptance/expected-failures-on-S3NG-storage.md b/tests/acceptance/expected-failures-on-S3NG-storage.md index 1ed8ede453..164294783f 100644 --- a/tests/acceptance/expected-failures-on-S3NG-storage.md +++ b/tests/acceptance/expected-failures-on-S3NG-storage.md @@ -501,8 +501,6 @@ File and sync features in a shared scenario - [apiSharePublicLink1/createPublicLinkShare.feature:375](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L375) - [apiSharePublicLink1/createPublicLinkShare.feature:376](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L376) -- [apiSharePublicLink1/createPublicLinkShare.feature:477](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L477) -- [apiSharePublicLink1/createPublicLinkShare.feature:478](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L478) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:212](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L212) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:213](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L213) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:214](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L214) diff --git a/tests/integration/grpc/fixtures/storageprovider-ocis.toml b/tests/integration/grpc/fixtures/storageprovider-ocis.toml index 49c0dd1af5..b9012140d2 100644 --- a/tests/integration/grpc/fixtures/storageprovider-ocis.toml +++ b/tests/integration/grpc/fixtures/storageprovider-ocis.toml @@ -9,5 +9,4 @@ root = "{{root}}/storage" treetime_accounting = true treesize_accounting = true enable_home = true -userprovidersvc = "localhost:18000" -gateway_addr = "{{gateway_address}}" \ No newline at end of file +permissionssvc = "{{permissions_address}}" diff --git a/tests/integration/grpc/gateway_storageprovider_test.go b/tests/integration/grpc/gateway_storageprovider_test.go index 2a3abbb5a6..3cb75a8b45 100644 --- a/tests/integration/grpc/gateway_storageprovider_test.go +++ b/tests/integration/grpc/gateway_storageprovider_test.go @@ -30,13 +30,13 @@ import ( userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" storagep "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/pkg/auth/scope" ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/fs/ocis" jwt "github.com/cs3org/reva/pkg/token/manager/jwt" + "github.com/cs3org/reva/pkg/utils" "github.com/cs3org/reva/tests/helpers" . "github.com/onsi/ginkgo/v2" @@ -177,6 +177,7 @@ var _ = Describe("gateway", func() { shard1Fs, err = ocis.New(map[string]interface{}{ "root": revads["storage"].StorageRoot, "userprovidersvc": revads["users"].GrpcAddress, + "permissionssvc": revads["permissions"].GrpcAddress, "enable_home": true, "treesize_accounting": true, "treetime_accounting": true, @@ -201,6 +202,7 @@ var _ = Describe("gateway", func() { shard2Fs, err = ocis.New(map[string]interface{}{ "root": revads["storage"].StorageRoot, "userprovidersvc": revads["users"].GrpcAddress, + "permissionssvc": revads["permissions"].GrpcAddress, "enable_home": true, "treesize_accounting": true, "treetime_accounting": true, @@ -285,14 +287,6 @@ var _ = Describe("gateway", func() { It("places new spaces in the correct shard", func() { createRes, err := serviceClient.CreateStorageSpace(ctx, &storagep.CreateStorageSpaceRequest{ - Opaque: &typesv1beta1.Opaque{ - Map: map[string]*typesv1beta1.OpaqueEntry{ - "path": { - Decoder: "plain", - Value: []byte("/projects"), - }, - }, - }, Owner: user, Type: "project", Name: "o - project", @@ -310,9 +304,9 @@ var _ = Describe("gateway", func() { Expect(err).ToNot(HaveOccurred()) Expect(listRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - _, err = os.Stat(path.Join(revads["storage"].StorageRoot, "/spaces/project", space.Id.OpaqueId)) + _, err = os.Stat(path.Join(revads["storage"].StorageRoot, "/spacetypes/project", space.Id.OpaqueId)) Expect(err).To(HaveOccurred()) - _, err = os.Stat(path.Join(revads["storage2"].StorageRoot, "/spaces/project", space.Id.OpaqueId)) + _, err = os.Stat(path.Join(revads["storage2"].StorageRoot, "/spacetypes/project", space.Id.OpaqueId)) Expect(err).ToNot(HaveOccurred()) }) @@ -347,12 +341,11 @@ var _ = Describe("gateway", func() { Context("with a basic user storage", func() { var ( - fs storage.FS - embeddedFs storage.FS - homeSpace *storagep.StorageSpace - embeddedSpace *storagep.StorageSpace - embeddedSpaceID string - embeddedRef *storagep.Reference + fs storage.FS + embeddedFs storage.FS + homeSpace *storagep.StorageSpace + embeddedSpace *storagep.StorageSpace + embeddedRef *storagep.Reference ) BeforeEach(func() { @@ -369,8 +362,7 @@ var _ = Describe("gateway", func() { var err error fs, err = ocis.New(map[string]interface{}{ "root": revads["storage"].StorageRoot, - "userprovidersvc": revads["users"].GrpcAddress, - "gateway_addr": revads["gateway"].GrpcAddress, + "permissionssvc": revads["permissions"].GrpcAddress, "enable_home": true, "treesize_accounting": true, "treetime_accounting": true, @@ -395,6 +387,7 @@ var _ = Describe("gateway", func() { embeddedFs, err = ocis.New(map[string]interface{}{ "root": revads["storage2"].StorageRoot, "userprovidersvc": revads["users"].GrpcAddress, + "permissionssvc": revads["permissions"].GrpcAddress, "enable_home": true, "treesize_accounting": true, "treetime_accounting": true, @@ -421,7 +414,6 @@ var _ = Describe("gateway", func() { []byte("22"), ) Expect(err).ToNot(HaveOccurred()) - embeddedSpaceID = embeddedSpace.Id.OpaqueId }) Describe("ListContainer", func() { @@ -480,21 +472,23 @@ var _ = Describe("gateway", func() { info := statRes.Info Expect(info.Type).To(Equal(storagep.ResourceType_RESOURCE_TYPE_CONTAINER)) - Expect(info.Path).To(Equal(user.Id.OpaqueId)) + Expect(utils.ResourceIDEqual(info.Id, homeRef.ResourceId)).To(BeTrue()) + Expect(info.Path).To(Equal("")) // path of a root node of a space is always "" Expect(info.Owner.OpaqueId).To(Equal(user.Id.OpaqueId)) // TODO: size aggregating is done by the client now - so no chance testing that here // Expect(info.Size).To(Equal(uint64(3))) // home: 1, embedded: 2 }) - It("stats the embedded space", func() { + It("stats the root of embedded space", func() { statRes, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: embeddedRef}) Expect(err).ToNot(HaveOccurred()) Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) info := statRes.Info Expect(info.Type).To(Equal(storagep.ResourceType_RESOURCE_TYPE_CONTAINER)) - Expect(info.Path).To(Equal(embeddedSpaceID)) + Expect(utils.ResourceIDEqual(info.Id, embeddedRef.ResourceId)).To(BeTrue()) + Expect(info.Path).To(Equal("")) // path of a root node of a space is always "" Expect(info.Size).To(Equal(uint64(2))) }) diff --git a/tests/integration/grpc/grpc_suite_test.go b/tests/integration/grpc/grpc_suite_test.go index 042725da66..b9bbc7fe9a 100644 --- a/tests/integration/grpc/grpc_suite_test.go +++ b/tests/integration/grpc/grpc_suite_test.go @@ -58,7 +58,7 @@ type Revad struct { Cleanup cleanupFunc // Function to kill the process and cleanup the temp. root. If the given parameter is true the files will be kept to make debugging failures easier. } -// stardRevads takes a list of revad configuration files plus a map of +// startRevads takes a list of revad configuration files plus a map of // variables that need to be substituted in them and starts them. // // A unique port is assigned to each spawned instance. @@ -152,7 +152,7 @@ func startRevads(configs map[string]string, variables map[string]string) (map[st } // even the port is open the service might not be available yet - time.Sleep(1 * time.Second) + time.Sleep(2 * time.Second) revad := &Revad{ TmpRoot: tmpRoot, diff --git a/tests/integration/grpc/storageprovider_test.go b/tests/integration/grpc/storageprovider_test.go index 2679c98b19..5656619f64 100644 --- a/tests/integration/grpc/storageprovider_test.go +++ b/tests/integration/grpc/storageprovider_test.go @@ -30,6 +30,7 @@ import ( ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/storage" + "github.com/cs3org/reva/pkg/storage/fs/nextcloud" "github.com/cs3org/reva/pkg/storage/fs/ocis" jwt "github.com/cs3org/reva/pkg/token/manager/jwt" "github.com/cs3org/reva/pkg/utils" @@ -59,11 +60,12 @@ func createFS(provider string, revads map[string]*Revad) (storage.FS, error) { switch provider { case "ocis": conf["root"] = revads["storage"].StorageRoot + conf["permissionssvc"] = revads["permissions"].GrpcAddress f = ocis.New case "nextcloud": - conf["root"] = revads["storage"].StorageRoot - conf["enable_home"] = true - f = ocis.New + conf["endpoint"] = "http://localhost:8080/apps/sciencemesh/" + conf["mock_http"] = true + f = nextcloud.New } return f(conf) } @@ -130,19 +132,19 @@ var _ = Describe("storage providers", func() { assertCreateHome := func(provider string) { It("creates a home directory", func() { homeRef := ref(provider, homePath) - _, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: homeRef}) + statRes, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: homeRef}) Expect(err).ToNot(HaveOccurred()) - // Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_NOT_FOUND)) + Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_NOT_FOUND)) res, err := serviceClient.CreateStorageSpace(ctx, &storagep.CreateStorageSpaceRequest{ Owner: user, Type: "personal", Name: user.Id.OpaqueId, }) - Expect(res.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) Expect(err).ToNot(HaveOccurred()) + Expect(res.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - statRes, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: ref(provider, homePath)}) + statRes, err = serviceClient.Stat(ctx, &storagep.StatRequest{Ref: ref(provider, homePath)}) Expect(err).ToNot(HaveOccurred()) Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) @@ -705,8 +707,6 @@ var _ = Describe("storage providers", func() { vRef.ResourceId = &storagep.ResourceId{StorageId: user.Id.OpaqueId} } - ctx := ctxpkg.ContextSetUser(context.Background(), user) - _, err = fs.CreateStorageSpace(ctx, &storagep.CreateStorageSpaceRequest{ Owner: user, Type: "personal", @@ -729,7 +729,8 @@ var _ = Describe("storage providers", func() { }) suite("ocis", map[string]string{ - "storage": "storageprovider-ocis.toml", + "storage": "storageprovider-ocis.toml", + "permissions": "permissions-ocis-ci.toml", }) }) diff --git a/tests/oc-integration-tests/drone/storage-users-0-9.toml b/tests/oc-integration-tests/drone/storage-users-0-9.toml index 5fc9255a41..805d57d2a1 100644 --- a/tests/oc-integration-tests/drone/storage-users-0-9.toml +++ b/tests/oc-integration-tests/drone/storage-users-0-9.toml @@ -26,6 +26,7 @@ data_server_url = "http://revad-services:11001/data" root = "/drone/src/tmp/reva/data-0-9" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,3 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/drone/src/tmp/reva/data-0-9" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/drone/storage-users-a-f.toml b/tests/oc-integration-tests/drone/storage-users-a-f.toml index 2a868acd93..e72c368f0e 100644 --- a/tests/oc-integration-tests/drone/storage-users-a-f.toml +++ b/tests/oc-integration-tests/drone/storage-users-a-f.toml @@ -26,6 +26,7 @@ data_server_url = "http://revad-services:11011/data" root = "/drone/src/tmp/reva/data-a-f" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,3 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/drone/src/tmp/reva/data-a-f" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/drone/storage-users-ocis.toml b/tests/oc-integration-tests/drone/storage-users-ocis.toml index 0cb5a88f86..ed175d2850 100644 --- a/tests/oc-integration-tests/drone/storage-users-ocis.toml +++ b/tests/oc-integration-tests/drone/storage-users-ocis.toml @@ -17,13 +17,12 @@ address = "0.0.0.0:11000" driver = "ocis" expose_data_server = true data_server_url = "http://revad-services:11001/data" -gateway_addr = "0.0.0.0:19000" [grpc.services.storageprovider.drivers.ocis] root = "/drone/src/tmp/reva/data" treetime_accounting = true treesize_accounting = true -gateway_addr = "0.0.0.0:19000" +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -37,3 +36,4 @@ temp_folder = "/drone/src/tmp/reva/tmp" root = "/drone/src/tmp/reva/data" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/drone/storage-users-s3ng.toml b/tests/oc-integration-tests/drone/storage-users-s3ng.toml index 90d98068ae..88b707ae45 100644 --- a/tests/oc-integration-tests/drone/storage-users-s3ng.toml +++ b/tests/oc-integration-tests/drone/storage-users-s3ng.toml @@ -21,6 +21,7 @@ data_server_url = "http://revad-services:11001/data" root = "/drone/src/tmp/reva/data" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" "s3.endpoint" = "http://ceph:8080" "s3.region" = "default" "s3.bucket" = "test" @@ -39,6 +40,7 @@ temp_folder = "/drone/src/tmp/reva/tmp" root = "/drone/src/tmp/reva/data" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" "s3.endpoint" = "http://ceph:8080" "s3.region" = "default" "s3.bucket" = "test" diff --git a/tests/oc-integration-tests/local/ldap-users.toml b/tests/oc-integration-tests/local/ldap-users.toml index 65e2d00f6a..b853c480c3 100644 --- a/tests/oc-integration-tests/local/ldap-users.toml +++ b/tests/oc-integration-tests/local/ldap-users.toml @@ -16,7 +16,7 @@ address = "0.0.0.0:18000" auth_manager = "ldap" [grpc.services.authprovider.auth_managers.ldap] -hostname="localhost" +hostname="ldap" port=636 insecure=true base_dn="dc=owncloud,dc=com" @@ -34,7 +34,7 @@ cn="cn" driver = "ldap" [grpc.services.userprovider.drivers.ldap] -hostname="localhost" +hostname="ldap" port=636 insecure=true base_dn="dc=owncloud,dc=com" @@ -57,7 +57,7 @@ gid="entryuuid" driver = "ldap" [grpc.services.groupprovider.drivers.ldap] -hostname="localhost" +hostname="ldap" port=636 insecure=true base_dn="dc=owncloud,dc=com" diff --git a/tests/oc-integration-tests/local/storage-users-0-9.toml b/tests/oc-integration-tests/local/storage-users-0-9.toml index 89c7975c7b..bd79f5917f 100644 --- a/tests/oc-integration-tests/local/storage-users-0-9.toml +++ b/tests/oc-integration-tests/local/storage-users-0-9.toml @@ -26,6 +26,7 @@ data_server_url = "http://localhost:11001/data" root = "/var/tmp/reva/data-0-9" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,3 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/var/tmp/reva/data-0-9" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/local/storage-users-a-f.toml b/tests/oc-integration-tests/local/storage-users-a-f.toml index 18f201df34..f4941fd7e2 100644 --- a/tests/oc-integration-tests/local/storage-users-a-f.toml +++ b/tests/oc-integration-tests/local/storage-users-a-f.toml @@ -26,6 +26,7 @@ data_server_url = "http://localhost:11011/data" root = "/var/tmp/reva/data-a-f" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,3 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/var/tmp/reva/data-a-f" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/local/storage-users.toml b/tests/oc-integration-tests/local/storage-users.toml index 350cd8ef6e..0c50bcb3d8 100644 --- a/tests/oc-integration-tests/local/storage-users.toml +++ b/tests/oc-integration-tests/local/storage-users.toml @@ -26,6 +26,7 @@ data_server_url = "http://localhost:11001/data" root = "/var/tmp/reva/data" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,4 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/var/tmp/reva/data" treetime_accounting = true treesize_accounting = true -gateway_addr = "0.0.0.0:19000" +permissionssvc = "localhost:10000"