From 7ba39c19c4b4d49cdbcdbcc10db53752a60fdc2f Mon Sep 17 00:00:00 2001 From: Hugo Gonzalez Labrador Date: Mon, 15 Feb 2021 14:47:54 +0100 Subject: [PATCH 1/6] eosfs: set quota --- .../config/packages/cbox/group/rest/_index.md | 4 +-- .../services/gateway/usershareprovider.go | 3 +++ pkg/eosclient/eosbinary/eosbinary.go | 12 +++++++++ pkg/eosclient/eosclient.go | 10 +++++++ pkg/eosclient/eosgrpc/eosgrpc.go | 5 ++++ pkg/storage/utils/eosfs/config.go | 9 +++++++ pkg/storage/utils/eosfs/eosfs.go | 27 +++++++++++++++++++ 7 files changed, 68 insertions(+), 2 deletions(-) diff --git a/docs/content/en/docs/config/packages/cbox/group/rest/_index.md b/docs/content/en/docs/config/packages/cbox/group/rest/_index.md index de5cbcec53..7c32ff8126 100644 --- a/docs/content/en/docs/config/packages/cbox/group/rest/_index.md +++ b/docs/content/en/docs/config/packages/cbox/group/rest/_index.md @@ -16,11 +16,11 @@ redis_address = "localhost:6379" {{< /highlight >}} {{% /dir %}} -{{% dir name="user_groups_cache_expiration" type="int" default=5 %}} +{{% dir name="group_members_cache_expiration" type="int" default=5 %}} The time in minutes for which the members of a group would be cached [[Ref]](https://github.com/cs3org/reva/tree/master/pkg/cbox/group/rest/rest.go#L75) {{< highlight toml >}} [cbox.group.rest] -user_groups_cache_expiration = 5 +group_members_cache_expiration = 5 {{< /highlight >}} {{% /dir %}} diff --git a/internal/grpc/services/gateway/usershareprovider.go b/internal/grpc/services/gateway/usershareprovider.go index 53066fdb90..c3a638f2fe 100644 --- a/internal/grpc/services/gateway/usershareprovider.go +++ b/internal/grpc/services/gateway/usershareprovider.go @@ -64,6 +64,9 @@ func (s *svc) CreateShare(ctx context.Context, req *collaboration.CreateShareReq // TODO(labkode): if both commits are enabled they could be done concurrently. if s.c.CommitShareToStorageGrant { addGrantStatus, err := s.addGrant(ctx, req.ResourceInfo.Id, req.Grant.Grantee, req.Grant.Permissions.Permissions) + if err != nil { + return nil, errors.New("gateway: error adding grant to storage") + } if addGrantStatus.Code != rpc.Code_CODE_OK { return &collaboration.CreateShareResponse{ Status: addGrantStatus, diff --git a/pkg/eosclient/eosbinary/eosbinary.go b/pkg/eosclient/eosbinary/eosbinary.go index 8db63bdcc2..b49ce42606 100644 --- a/pkg/eosclient/eosbinary/eosbinary.go +++ b/pkg/eosclient/eosbinary/eosbinary.go @@ -489,6 +489,18 @@ func (c *Client) GetQuota(ctx context.Context, username, rootUID, rootGID, path return c.parseQuota(path, stdout) } +// SetQuota sets the quota of a user on the quota node defined by path +func (c *Client) SetQuota(ctx context.Context, rootUID, rootGID string, info *eosclient.SetQuotaInfo) error { + maxBytes := fmt.Sprintf("%d", info.MaxBytes) + maxFiles := fmt.Sprintf("%d", info.MaxFiles) + cmd := exec.CommandContext(ctx, c.opt.EosBinary, "-r", rootUID, rootGID, "quota", "set", "-u", info.Username, "-p", info.QuotaNode, "-v", maxBytes, "-i", maxFiles) + _, _, err := c.executeEOS(ctx, cmd) + if err != nil { + return err + } + return nil +} + // Touch creates a 0-size,0-replica file in the EOS namespace. func (c *Client) Touch(ctx context.Context, uid, gid, path string) error { cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", uid, gid, "file", "touch", path) diff --git a/pkg/eosclient/eosclient.go b/pkg/eosclient/eosclient.go index ec38bcf376..502630260d 100644 --- a/pkg/eosclient/eosclient.go +++ b/pkg/eosclient/eosclient.go @@ -38,6 +38,7 @@ type EOSClient interface { SetAttr(ctx context.Context, uid, gid string, attr *Attribute, recursive bool, path string) error UnsetAttr(ctx context.Context, uid, gid string, attr *Attribute, path string) error GetQuota(ctx context.Context, username, rootUID, rootGID, path string) (*QuotaInfo, error) + SetQuota(ctx context.Context, rootUID, rootGID string, info *SetQuotaInfo) error Touch(ctx context.Context, uid, gid, path string) error Chown(ctx context.Context, uid, gid, chownUID, chownGID, path string) error Chmod(ctx context.Context, uid, gid, mode, path string) error @@ -99,3 +100,12 @@ type QuotaInfo struct { AvailableBytes, UsedBytes int AvailableInodes, UsedInodes int } + +// SetQuotaInfo encapsulates the information needed to +// create a quota space in EOS for a user +type SetQuotaInfo struct { + Username string + QuotaNode string + MaxBytes uint64 + MaxFiles uint64 +} diff --git a/pkg/eosclient/eosgrpc/eosgrpc.go b/pkg/eosclient/eosgrpc/eosgrpc.go index 154dcdb1c5..b8a22e1782 100644 --- a/pkg/eosclient/eosgrpc/eosgrpc.go +++ b/pkg/eosclient/eosgrpc/eosgrpc.go @@ -588,6 +588,11 @@ func (c *Client) GetQuota(ctx context.Context, username, rootUID, rootGID, path return nil, errtypes.NotSupported("eosgrpc: GetQuota not implemented") } +// SetQuota sets the quota of a user on the quota node defined by path +func (c *Client) SetQuota(ctx context.Context, rootUID, rootGID string, info *eosclient.SetQuotaInfo) error { + return errtypes.NotSupported("eosgrpc: SetQuota not implemented") +} + // Touch creates a 0-size,0-replica file in the EOS namespace. func (c *Client) Touch(ctx context.Context, uid, gid, path string) error { log := appctx.GetLogger(ctx) diff --git a/pkg/storage/utils/eosfs/config.go b/pkg/storage/utils/eosfs/config.go index a05f41c843..515d7c0452 100644 --- a/pkg/storage/utils/eosfs/config.go +++ b/pkg/storage/utils/eosfs/config.go @@ -23,6 +23,15 @@ type Config struct { // Namespace for metadata operations Namespace string `mapstructure:"namespace"` + // QuotaNode for storing quota information + QuotaNode string `mapstructure:"quota_node"` + + // DefaultQuotaBytes sets the default maximum bytes available for a user + DefaultQuotaBytes uint64 `mapstructure:"default_quota_bytes"` + + // DefaultQuotaFiles sets the default maximum files available for a user + DefaultQuotaFiles uint64 `mapstructure:"default_quota_files"` + // ShadowNamespace for storing shadow data ShadowNamespace string `mapstructure:"shadow_namespace"` diff --git a/pkg/storage/utils/eosfs/eosfs.go b/pkg/storage/utils/eosfs/eosfs.go index 01574982c8..0b6ea00556 100644 --- a/pkg/storage/utils/eosfs/eosfs.go +++ b/pkg/storage/utils/eosfs/eosfs.go @@ -76,6 +76,18 @@ func (c *Config) init() { c.ShadowNamespace = path.Join(c.Namespace, ".shadow") } + // Quota node defaults to namespace if empty + if c.QuotaNode == "" { + c.QuotaNode = c.Namespace + } + + if c.DefaultQuotaBytes == 0 { + c.DefaultQuotaBytes = 1000000000000 // 1 TB + } + if c.DefaultQuotaFiles == 0 { + c.DefaultQuotaFiles = 1000000 // 1 Million + } + if c.ShareFolder == "" { c.ShareFolder = "/MyShares" } @@ -913,6 +925,21 @@ func (fs *eosfs) createUserDir(ctx context.Context, u *userpb.User, path string, return errors.Wrap(err, "eos: error setting attribute") } } + + // set quota for user + quotaInfo := &eosclient.SetQuotaInfo{ + Username: u.Username, + MaxBytes: fs.conf.DefaultQuotaBytes, + MaxFiles: fs.conf.DefaultQuotaFiles, + QuotaNode: fs.conf.QuotaNode, + } + + err = fs.c.SetQuota(ctx, uid, gid, quotaInfo) + if err != nil { + err := errors.Wrap(err, "eosfs: error setting quota") + return err + } + return nil } From 02477a4b65daf9ccc207264ee707c3f7df55a6cd Mon Sep 17 00:00:00 2001 From: Hugo Gonzalez Labrador Date: Mon, 15 Feb 2021 15:03:11 +0100 Subject: [PATCH 2/6] set changelog --- changelog/unreleased/eos-set-quota.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 changelog/unreleased/eos-set-quota.md diff --git a/changelog/unreleased/eos-set-quota.md b/changelog/unreleased/eos-set-quota.md new file mode 100644 index 0000000000..ae63927da3 --- /dev/null +++ b/changelog/unreleased/eos-set-quota.md @@ -0,0 +1,3 @@ +Enhancement: Set quota when creating home directory in EOS + +https://github.com/cs3org/reva/pull/1477 From f21e233e81558c1c81383bc42678b279f71b9926 Mon Sep 17 00:00:00 2001 From: Hugo Gonzalez Labrador Date: Mon, 15 Feb 2021 15:29:19 +0100 Subject: [PATCH 3/6] wrap error when setting grant --- 1 | 528 ++++++++++++++++++ .../services/gateway/usershareprovider.go | 2 +- 2 files changed, 529 insertions(+), 1 deletion(-) create mode 100644 1 diff --git a/1 b/1 new file mode 100644 index 0000000000..f9d0395c37 --- /dev/null +++ b/1 @@ -0,0 +1,528 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package gateway + +import ( + "context" + "fmt" + "path" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/rgrpc/status" + "github.com/cs3org/reva/pkg/rgrpc/todo/pool" + "github.com/pkg/errors" +) + +// TODO(labkode): add multi-phase commit logic when commit share or commit ref is enabled. +func (s *svc) CreateShare(ctx context.Context, req *collaboration.CreateShareRequest) (*collaboration.CreateShareResponse, error) { + + if s.isSharedFolder(ctx, req.ResourceInfo.GetPath()) { + return nil, errors.New("gateway: can't share the share folder itself") + } + + c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) + if err != nil { + return &collaboration.CreateShareResponse{ + Status: status.NewInternal(ctx, err, "error getting user share provider client"), + }, nil + } + // TODO the user share manager needs to be able to decide if the current user is allowed to create that share (and not eg. incerase permissions) + // jfd: AFAICT this can only be determined by a storage driver - either the storage provider is queried first or the share manager needs to access the storage using a storage driver + res, err := c.CreateShare(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling CreateShare") + } + if res.Status.Code != rpc.Code_CODE_OK { + return res, nil + } + + // if we don't need to commit we return earlier + if !s.c.CommitShareToStorageGrant && !s.c.CommitShareToStorageRef { + return res, nil + } + + // TODO(labkode): if both commits are enabled they could be done concurrently. + if s.c.CommitShareToStorageGrant { + addGrantStatus, err := s.addGrant(ctx, req.ResourceInfo.Id, req.Grant.Grantee, req.Grant.Permissions.Permissions) + if err != nil { + return nil, errors.Wrap(err, "gateway: error adding grant to storage") + } + if addGrantStatus.Code != rpc.Code_CODE_OK { + return &collaboration.CreateShareResponse{ + Status: addGrantStatus, + }, err + } + } + + return res, nil +} + +func (s *svc) RemoveShare(ctx context.Context, req *collaboration.RemoveShareRequest) (*collaboration.RemoveShareResponse, error) { + c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) + if err != nil { + return &collaboration.RemoveShareResponse{ + Status: status.NewInternal(ctx, err, "error getting user share provider client"), + }, nil + } + + // if we need to commit the share, we need the resource it points to. + var share *collaboration.Share + if s.c.CommitShareToStorageGrant || s.c.CommitShareToStorageRef { + getShareReq := &collaboration.GetShareRequest{ + Ref: req.Ref, + } + getShareRes, err := c.GetShare(ctx, getShareReq) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling GetShare") + } + + if getShareRes.Status.Code != rpc.Code_CODE_OK { + res := &collaboration.RemoveShareResponse{ + Status: status.NewInternal(ctx, status.NewErrorFromCode(getShareRes.Status.Code, "gateway"), + "error getting share when committing to the storage"), + } + return res, nil + } + share = getShareRes.Share + } + + res, err := c.RemoveShare(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling RemoveShare") + } + + // if we don't need to commit we return earlier + if !s.c.CommitShareToStorageGrant && !s.c.CommitShareToStorageRef { + return res, nil + } + + // TODO(labkode): if both commits are enabled they could be done concurrently. + if s.c.CommitShareToStorageGrant { + removeGrantStatus, err := s.removeGrant(ctx, share.ResourceId, share.Grantee, share.Permissions.Permissions) + if removeGrantStatus.Code != rpc.Code_CODE_OK { + return &collaboration.RemoveShareResponse{ + Status: removeGrantStatus, + }, err + } + } + + return res, nil +} + +// TODO(labkode): we need to validate share state vs storage grant and storage ref +// If there are any inconsistencies, the share needs to be flag as invalid and a background process +// or active fix needs to be performed. +func (s *svc) GetShare(ctx context.Context, req *collaboration.GetShareRequest) (*collaboration.GetShareResponse, error) { + return s.getShare(ctx, req) +} + +func (s *svc) getShare(ctx context.Context, req *collaboration.GetShareRequest) (*collaboration.GetShareResponse, error) { + c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) + if err != nil { + err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") + return &collaboration.GetShareResponse{ + Status: status.NewInternal(ctx, err, "error getting user share provider client"), + }, nil + } + + res, err := c.GetShare(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling GetShare") + } + + return res, nil +} + +// TODO(labkode): read GetShare comment. +func (s *svc) ListShares(ctx context.Context, req *collaboration.ListSharesRequest) (*collaboration.ListSharesResponse, error) { + c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) + if err != nil { + err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") + return &collaboration.ListSharesResponse{ + Status: status.NewInternal(ctx, err, "error getting user share provider client"), + }, nil + } + + res, err := c.ListShares(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling ListShares") + } + + return res, nil +} + +func (s *svc) UpdateShare(ctx context.Context, req *collaboration.UpdateShareRequest) (*collaboration.UpdateShareResponse, error) { + c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) + if err != nil { + err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") + return &collaboration.UpdateShareResponse{ + Status: status.NewInternal(ctx, err, "error getting share provider client"), + }, nil + } + + res, err := c.UpdateShare(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling UpdateShare") + } + + // if we don't need to commit we return earlier + if !s.c.CommitShareToStorageGrant && !s.c.CommitShareToStorageRef { + return res, nil + } + + // TODO(labkode): if both commits are enabled they could be done concurrently. + + if s.c.CommitShareToStorageGrant { + getShareReq := &collaboration.GetShareRequest{ + Ref: req.Ref, + } + getShareRes, err := c.GetShare(ctx, getShareReq) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling GetShare") + } + + if getShareRes.Status.Code != rpc.Code_CODE_OK { + return &collaboration.UpdateShareResponse{ + Status: status.NewInternal(ctx, status.NewErrorFromCode(getShareRes.Status.Code, "gateway"), + "error getting share when committing to the share"), + }, nil + } + updateGrantStatus, err := s.updateGrant(ctx, getShareRes.GetShare().GetResourceId(), + getShareRes.GetShare().GetGrantee(), + getShareRes.GetShare().GetPermissions().GetPermissions()) + + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling updateGrant") + } + + if updateGrantStatus.Code != rpc.Code_CODE_OK { + return &collaboration.UpdateShareResponse{ + Status: updateGrantStatus, + }, nil + } + } + + return res, nil +} + +// TODO(labkode): listing received shares just goes to the user share manager and gets the list of +// received shares. The display name of the shares should be the a friendly name, like the basename +// of the original file. +func (s *svc) ListReceivedShares(ctx context.Context, req *collaboration.ListReceivedSharesRequest) (*collaboration.ListReceivedSharesResponse, error) { + c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) + if err != nil { + err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") + return &collaboration.ListReceivedSharesResponse{ + Status: status.NewInternal(ctx, err, "error getting share provider client"), + }, nil + } + + res, err := c.ListReceivedShares(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling ListReceivedShares") + } + return res, nil +} + +func (s *svc) GetReceivedShare(ctx context.Context, req *collaboration.GetReceivedShareRequest) (*collaboration.GetReceivedShareResponse, error) { + c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) + if err != nil { + err := errors.Wrap(err, "gateway: error getting user share provider client") + return &collaboration.GetReceivedShareResponse{ + Status: status.NewInternal(ctx, err, "error getting received share"), + }, nil + } + + res, err := c.GetReceivedShare(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling GetReceivedShare") + } + + return res, nil +} + +// When updating a received share: +// if the update contains update for displayName: +// 1) if received share is mounted: we also do a rename in the storage +// 2) if received share is not mounted: we only rename in user share provider. +func (s *svc) UpdateReceivedShare(ctx context.Context, req *collaboration.UpdateReceivedShareRequest) (*collaboration.UpdateReceivedShareResponse, error) { + log := appctx.GetLogger(ctx) + c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) + if err != nil { + err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") + return &collaboration.UpdateReceivedShareResponse{ + Status: status.NewInternal(ctx, err, "error getting share provider client"), + }, nil + } + + res, err := c.UpdateReceivedShare(ctx, req) + if err != nil { + log.Err(err).Msg("gateway: error calling UpdateReceivedShare") + return &collaboration.UpdateReceivedShareResponse{ + Status: &rpc.Status{ + Code: rpc.Code_CODE_INTERNAL, + }, + }, nil + } + + // error failing to update share state. + if res.Status.Code != rpc.Code_CODE_OK { + return res, nil + } + + // if we don't need to create/delete references then we return early. + if !s.c.CommitShareToStorageRef { + return res, nil + } + + // we don't commit to storage invalid update fields or empty display names. + if req.Field.GetState() == collaboration.ShareState_SHARE_STATE_INVALID && req.Field.GetDisplayName() == "" { + log.Error().Msg("the update field is invalid, aborting reference manipulation") + return res, nil + + } + + // TODO(labkode): if update field is displayName we need to do a rename on the storage to align + // share display name and storage filename. + if req.Field.GetState() != collaboration.ShareState_SHARE_STATE_INVALID { + if req.Field.GetState() == collaboration.ShareState_SHARE_STATE_ACCEPTED { + getShareReq := &collaboration.GetReceivedShareRequest{Ref: req.Ref} + getShareRes, err := s.GetReceivedShare(ctx, getShareReq) + if err != nil { + log.Err(err).Msg("gateway: error calling GetReceivedShare") + return &collaboration.UpdateReceivedShareResponse{ + Status: &rpc.Status{ + Code: rpc.Code_CODE_INTERNAL, + }, + }, nil + } + + if getShareRes.Status.Code != rpc.Code_CODE_OK { + log.Error().Msg("gateway: error calling GetReceivedShare") + return &collaboration.UpdateReceivedShareResponse{ + Status: &rpc.Status{ + Code: rpc.Code_CODE_INTERNAL, + }, + }, nil + } + + share := getShareRes.Share + if share == nil { + panic("gateway: error updating a received share: the share is nil") + } + createRefStatus, err := s.createReference(ctx, share.Share.ResourceId) + return &collaboration.UpdateReceivedShareResponse{ + Status: createRefStatus, + }, err + } + } + + // TODO(labkode): implementing updating display name + err = errors.New("gateway: update of display name is not yet implemented") + return &collaboration.UpdateReceivedShareResponse{ + Status: status.NewUnimplemented(ctx, err, "error updating received share"), + }, nil +} + +func (s *svc) createReference(ctx context.Context, resourceID *provider.ResourceId) (*rpc.Status, error) { + + log := appctx.GetLogger(ctx) + + // get the metadata about the share + c, err := s.findByID(ctx, resourceID) + if err != nil { + if _, ok := err.(errtypes.IsNotFound); ok { + return status.NewNotFound(ctx, "storage provider not found"), nil + } + return status.NewInternal(ctx, err, "error finding storage provider"), nil + } + + statReq := &provider.StatRequest{ + Ref: &provider.Reference{ + Spec: &provider.Reference_Id{ + Id: resourceID, + }, + }, + } + + statRes, err := c.Stat(ctx, statReq) + if err != nil { + return status.NewInternal(ctx, err, "gateway: error calling Stat for the share resource id: "+resourceID.String()), nil + } + + if statRes.Status.Code != rpc.Code_CODE_OK { + err := status.NewErrorFromCode(statRes.Status.GetCode(), "gateway") + log.Err(err).Msg("gateway: Stat failed on the share resource id: " + resourceID.String()) + return status.NewInternal(ctx, err, "error updating received share"), nil + } + + homeRes, err := s.GetHome(ctx, &provider.GetHomeRequest{}) + if err != nil { + err := errors.Wrap(err, "gateway: error calling GetHome") + return status.NewInternal(ctx, err, "error updating received share"), nil + } + + // reference path is the home path + some name + // CreateReferene(cs3://home/MyShares/x) + // that can end up in the storage provider like: + // /eos/user/.shadow/g/gonzalhu/MyShares/x + // A reference can point to any place, for that reason the namespace starts with cs3:// + // For example, a reference can point also to a dropbox resource: + // CreateReference(dropbox://x/y/z) + // It is the responsibility of the gateway to resolve these references and merge the response back + // from the main request. + // TODO(labkode): the name of the share should be the filename it points to by default. + refPath := path.Join(homeRes.Path, s.c.ShareFolder, path.Base(statRes.Info.Path)) + log.Info().Msg("mount path will be:" + refPath) + + createRefReq := &provider.CreateReferenceRequest{ + Path: refPath, + // cs3 is the Scheme and %s/%s is the Opaque parts of a net.URL. + TargetUri: fmt.Sprintf("cs3:%s/%s", resourceID.GetStorageId(), resourceID.GetOpaqueId()), + } + + c, err = s.findByPath(ctx, refPath) + if err != nil { + if _, ok := err.(errtypes.IsNotFound); ok { + return status.NewNotFound(ctx, "storage provider not found"), nil + } + return status.NewInternal(ctx, err, "error finding storage provider"), nil + } + + createRefRes, err := c.CreateReference(ctx, createRefReq) + if err != nil { + log.Err(err).Msg("gateway: error calling GetHome") + return &rpc.Status{ + Code: rpc.Code_CODE_INTERNAL, + }, nil + } + + if createRefRes.Status.Code != rpc.Code_CODE_OK { + err := status.NewErrorFromCode(createRefRes.Status.GetCode(), "gateway") + return status.NewInternal(ctx, err, "error updating received share"), nil + } + + return status.NewOK(ctx), nil +} + +func (s *svc) addGrant(ctx context.Context, id *provider.ResourceId, g *provider.Grantee, p *provider.ResourcePermissions) (*rpc.Status, error) { + + grantReq := &provider.AddGrantRequest{ + Ref: &provider.Reference{ + Spec: &provider.Reference_Id{ + Id: id, + }, + }, + Grant: &provider.Grant{ + Grantee: g, + Permissions: p, + }, + } + + c, err := s.findByID(ctx, id) + if err != nil { + if _, ok := err.(errtypes.IsNotFound); ok { + return status.NewNotFound(ctx, "storage provider not found"), nil + } + return status.NewInternal(ctx, err, "error finding storage provider"), nil + } + + grantRes, err := c.AddGrant(ctx, grantReq) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling AddGrant") + } + if grantRes.Status.Code != rpc.Code_CODE_OK { + return status.NewInternal(ctx, status.NewErrorFromCode(grantRes.Status.Code, "gateway"), + "error committing share to storage grant"), nil + } + + return status.NewOK(ctx), nil +} + +func (s *svc) updateGrant(ctx context.Context, id *provider.ResourceId, g *provider.Grantee, p *provider.ResourcePermissions) (*rpc.Status, error) { + + grantReq := &provider.UpdateGrantRequest{ + Ref: &provider.Reference{ + Spec: &provider.Reference_Id{ + Id: id, + }, + }, + Grant: &provider.Grant{ + Grantee: g, + Permissions: p, + }, + } + + c, err := s.findByID(ctx, id) + if err != nil { + if _, ok := err.(errtypes.IsNotFound); ok { + return status.NewNotFound(ctx, "storage provider not found"), nil + } + return status.NewInternal(ctx, err, "error finding storage provider"), nil + } + + grantRes, err := c.UpdateGrant(ctx, grantReq) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling UpdateGrant") + } + if grantRes.Status.Code != rpc.Code_CODE_OK { + return status.NewInternal(ctx, status.NewErrorFromCode(grantRes.Status.Code, "gateway"), + "error committing share to storage grant"), nil + } + + return status.NewOK(ctx), nil +} + +func (s *svc) removeGrant(ctx context.Context, id *provider.ResourceId, g *provider.Grantee, p *provider.ResourcePermissions) (*rpc.Status, error) { + + grantReq := &provider.RemoveGrantRequest{ + Ref: &provider.Reference{ + Spec: &provider.Reference_Id{ + Id: id, + }, + }, + Grant: &provider.Grant{ + Grantee: g, + Permissions: p, + }, + } + + c, err := s.findByID(ctx, id) + if err != nil { + if _, ok := err.(errtypes.IsNotFound); ok { + return status.NewNotFound(ctx, "storage provider not found"), nil + } + return status.NewInternal(ctx, err, "error finding storage provider"), nil + } + + grantRes, err := c.RemoveGrant(ctx, grantReq) + if err != nil { + return nil, errors.Wrap(err, "gateway: error calling RemoveGrant") + } + if grantRes.Status.Code != rpc.Code_CODE_OK { + return status.NewInternal(ctx, status.NewErrorFromCode(grantRes.Status.Code, "gateway"), + "error removing storage grant"), nil + } + + return status.NewOK(ctx), nil +} diff --git a/internal/grpc/services/gateway/usershareprovider.go b/internal/grpc/services/gateway/usershareprovider.go index c3a638f2fe..f9d0395c37 100644 --- a/internal/grpc/services/gateway/usershareprovider.go +++ b/internal/grpc/services/gateway/usershareprovider.go @@ -65,7 +65,7 @@ func (s *svc) CreateShare(ctx context.Context, req *collaboration.CreateShareReq if s.c.CommitShareToStorageGrant { addGrantStatus, err := s.addGrant(ctx, req.ResourceInfo.Id, req.Grant.Grantee, req.Grant.Permissions.Permissions) if err != nil { - return nil, errors.New("gateway: error adding grant to storage") + return nil, errors.Wrap(err, "gateway: error adding grant to storage") } if addGrantStatus.Code != rpc.Code_CODE_OK { return &collaboration.CreateShareResponse{ From 5c723ead59f7d9eef6d405059d816fe24832364f Mon Sep 17 00:00:00 2001 From: Hugo Gonzalez Labrador Date: Mon, 15 Feb 2021 15:35:51 +0100 Subject: [PATCH 4/6] eosfs: move set quota logic to nominal home creation branch --- pkg/storage/utils/eosfs/eosfs.go | 34 +++++++++++++++++++------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/pkg/storage/utils/eosfs/eosfs.go b/pkg/storage/utils/eosfs/eosfs.go index 0b6ea00556..123c5286ea 100644 --- a/pkg/storage/utils/eosfs/eosfs.go +++ b/pkg/storage/utils/eosfs/eosfs.go @@ -850,6 +850,25 @@ func (fs *eosfs) createNominalHome(ctx context.Context) error { } err = fs.createUserDir(ctx, u, home, false) + if err != nil { + err := errors.Wrap(err, "eosfs: error creating user dir") + return err + } + + // set quota for user + quotaInfo := &eosclient.SetQuotaInfo{ + Username: u.Username, + MaxBytes: fs.conf.DefaultQuotaBytes, + MaxFiles: fs.conf.DefaultQuotaFiles, + QuotaNode: fs.conf.QuotaNode, + } + + err = fs.c.SetQuota(ctx, uid, gid, quotaInfo) + if err != nil { + err := errors.Wrap(err, "eosfs: error setting quota") + return err + } + return err } @@ -926,20 +945,6 @@ func (fs *eosfs) createUserDir(ctx context.Context, u *userpb.User, path string, } } - // set quota for user - quotaInfo := &eosclient.SetQuotaInfo{ - Username: u.Username, - MaxBytes: fs.conf.DefaultQuotaBytes, - MaxFiles: fs.conf.DefaultQuotaFiles, - QuotaNode: fs.conf.QuotaNode, - } - - err = fs.c.SetQuota(ctx, uid, gid, quotaInfo) - if err != nil { - err := errors.Wrap(err, "eosfs: error setting quota") - return err - } - return nil } @@ -987,6 +992,7 @@ func (fs *eosfs) CreateReference(ctx context.Context, p string, targetURI *url.U if err != nil { return nil } + if err := fs.createUserDir(ctx, u, tmp, false); err != nil { err = errors.Wrapf(err, "eos: error creating temporary ref file") return err From 8e0b23b9e714bdbdb616cd38ac1e60d3f253c0ca Mon Sep 17 00:00:00 2001 From: Hugo Gonzalez Labrador Date: Mon, 15 Feb 2021 15:45:06 +0100 Subject: [PATCH 5/6] just create ref-dir --- pkg/storage/utils/eosfs/eosfs.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/pkg/storage/utils/eosfs/eosfs.go b/pkg/storage/utils/eosfs/eosfs.go index 123c5286ea..4b71e2f071 100644 --- a/pkg/storage/utils/eosfs/eosfs.go +++ b/pkg/storage/utils/eosfs/eosfs.go @@ -973,11 +973,6 @@ func (fs *eosfs) CreateDir(ctx context.Context, p string) error { func (fs *eosfs) CreateReference(ctx context.Context, p string, targetURI *url.URL) error { // TODO(labkode): for the time being we only allow to create references // on the virtual share folder to not pollute the nominal user tree. - u, err := getUser(ctx) - if err != nil { - return errors.Wrap(err, "eos: no user in ctx") - } - if !fs.isShareFolder(ctx, p) { return errtypes.PermissionDenied("eos: cannot create references outside the share folder: share_folder=" + fs.conf.ShareFolder + " path=" + p) } @@ -993,9 +988,10 @@ func (fs *eosfs) CreateReference(ctx context.Context, p string, targetURI *url.U return nil } - if err := fs.createUserDir(ctx, u, tmp, false); err != nil { - err = errors.Wrapf(err, "eos: error creating temporary ref file") - return err + err = fs.c.CreateDir(ctx, uid, gid, tmp) + if err != nil { + // EOS will return success on mkdir over an existing directory. + return errors.Wrap(err, "eos: error creating ref-dir") } // set xattr on ref From 04660f933962d70a7d3a713b0c9777406adac4ea Mon Sep 17 00:00:00 2001 From: Hugo Gonzalez Labrador Date: Mon, 15 Feb 2021 17:15:17 +0100 Subject: [PATCH 6/6] remove 1 --- 1 | 528 -------------------------------------------------------------- 1 file changed, 528 deletions(-) delete mode 100644 1 diff --git a/1 b/1 deleted file mode 100644 index f9d0395c37..0000000000 --- a/1 +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package gateway - -import ( - "context" - "fmt" - "path" - - rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/rgrpc/status" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" - "github.com/pkg/errors" -) - -// TODO(labkode): add multi-phase commit logic when commit share or commit ref is enabled. -func (s *svc) CreateShare(ctx context.Context, req *collaboration.CreateShareRequest) (*collaboration.CreateShareResponse, error) { - - if s.isSharedFolder(ctx, req.ResourceInfo.GetPath()) { - return nil, errors.New("gateway: can't share the share folder itself") - } - - c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) - if err != nil { - return &collaboration.CreateShareResponse{ - Status: status.NewInternal(ctx, err, "error getting user share provider client"), - }, nil - } - // TODO the user share manager needs to be able to decide if the current user is allowed to create that share (and not eg. incerase permissions) - // jfd: AFAICT this can only be determined by a storage driver - either the storage provider is queried first or the share manager needs to access the storage using a storage driver - res, err := c.CreateShare(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling CreateShare") - } - if res.Status.Code != rpc.Code_CODE_OK { - return res, nil - } - - // if we don't need to commit we return earlier - if !s.c.CommitShareToStorageGrant && !s.c.CommitShareToStorageRef { - return res, nil - } - - // TODO(labkode): if both commits are enabled they could be done concurrently. - if s.c.CommitShareToStorageGrant { - addGrantStatus, err := s.addGrant(ctx, req.ResourceInfo.Id, req.Grant.Grantee, req.Grant.Permissions.Permissions) - if err != nil { - return nil, errors.Wrap(err, "gateway: error adding grant to storage") - } - if addGrantStatus.Code != rpc.Code_CODE_OK { - return &collaboration.CreateShareResponse{ - Status: addGrantStatus, - }, err - } - } - - return res, nil -} - -func (s *svc) RemoveShare(ctx context.Context, req *collaboration.RemoveShareRequest) (*collaboration.RemoveShareResponse, error) { - c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) - if err != nil { - return &collaboration.RemoveShareResponse{ - Status: status.NewInternal(ctx, err, "error getting user share provider client"), - }, nil - } - - // if we need to commit the share, we need the resource it points to. - var share *collaboration.Share - if s.c.CommitShareToStorageGrant || s.c.CommitShareToStorageRef { - getShareReq := &collaboration.GetShareRequest{ - Ref: req.Ref, - } - getShareRes, err := c.GetShare(ctx, getShareReq) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling GetShare") - } - - if getShareRes.Status.Code != rpc.Code_CODE_OK { - res := &collaboration.RemoveShareResponse{ - Status: status.NewInternal(ctx, status.NewErrorFromCode(getShareRes.Status.Code, "gateway"), - "error getting share when committing to the storage"), - } - return res, nil - } - share = getShareRes.Share - } - - res, err := c.RemoveShare(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling RemoveShare") - } - - // if we don't need to commit we return earlier - if !s.c.CommitShareToStorageGrant && !s.c.CommitShareToStorageRef { - return res, nil - } - - // TODO(labkode): if both commits are enabled they could be done concurrently. - if s.c.CommitShareToStorageGrant { - removeGrantStatus, err := s.removeGrant(ctx, share.ResourceId, share.Grantee, share.Permissions.Permissions) - if removeGrantStatus.Code != rpc.Code_CODE_OK { - return &collaboration.RemoveShareResponse{ - Status: removeGrantStatus, - }, err - } - } - - return res, nil -} - -// TODO(labkode): we need to validate share state vs storage grant and storage ref -// If there are any inconsistencies, the share needs to be flag as invalid and a background process -// or active fix needs to be performed. -func (s *svc) GetShare(ctx context.Context, req *collaboration.GetShareRequest) (*collaboration.GetShareResponse, error) { - return s.getShare(ctx, req) -} - -func (s *svc) getShare(ctx context.Context, req *collaboration.GetShareRequest) (*collaboration.GetShareResponse, error) { - c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) - if err != nil { - err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") - return &collaboration.GetShareResponse{ - Status: status.NewInternal(ctx, err, "error getting user share provider client"), - }, nil - } - - res, err := c.GetShare(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling GetShare") - } - - return res, nil -} - -// TODO(labkode): read GetShare comment. -func (s *svc) ListShares(ctx context.Context, req *collaboration.ListSharesRequest) (*collaboration.ListSharesResponse, error) { - c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) - if err != nil { - err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") - return &collaboration.ListSharesResponse{ - Status: status.NewInternal(ctx, err, "error getting user share provider client"), - }, nil - } - - res, err := c.ListShares(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling ListShares") - } - - return res, nil -} - -func (s *svc) UpdateShare(ctx context.Context, req *collaboration.UpdateShareRequest) (*collaboration.UpdateShareResponse, error) { - c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) - if err != nil { - err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") - return &collaboration.UpdateShareResponse{ - Status: status.NewInternal(ctx, err, "error getting share provider client"), - }, nil - } - - res, err := c.UpdateShare(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling UpdateShare") - } - - // if we don't need to commit we return earlier - if !s.c.CommitShareToStorageGrant && !s.c.CommitShareToStorageRef { - return res, nil - } - - // TODO(labkode): if both commits are enabled they could be done concurrently. - - if s.c.CommitShareToStorageGrant { - getShareReq := &collaboration.GetShareRequest{ - Ref: req.Ref, - } - getShareRes, err := c.GetShare(ctx, getShareReq) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling GetShare") - } - - if getShareRes.Status.Code != rpc.Code_CODE_OK { - return &collaboration.UpdateShareResponse{ - Status: status.NewInternal(ctx, status.NewErrorFromCode(getShareRes.Status.Code, "gateway"), - "error getting share when committing to the share"), - }, nil - } - updateGrantStatus, err := s.updateGrant(ctx, getShareRes.GetShare().GetResourceId(), - getShareRes.GetShare().GetGrantee(), - getShareRes.GetShare().GetPermissions().GetPermissions()) - - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling updateGrant") - } - - if updateGrantStatus.Code != rpc.Code_CODE_OK { - return &collaboration.UpdateShareResponse{ - Status: updateGrantStatus, - }, nil - } - } - - return res, nil -} - -// TODO(labkode): listing received shares just goes to the user share manager and gets the list of -// received shares. The display name of the shares should be the a friendly name, like the basename -// of the original file. -func (s *svc) ListReceivedShares(ctx context.Context, req *collaboration.ListReceivedSharesRequest) (*collaboration.ListReceivedSharesResponse, error) { - c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) - if err != nil { - err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") - return &collaboration.ListReceivedSharesResponse{ - Status: status.NewInternal(ctx, err, "error getting share provider client"), - }, nil - } - - res, err := c.ListReceivedShares(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling ListReceivedShares") - } - return res, nil -} - -func (s *svc) GetReceivedShare(ctx context.Context, req *collaboration.GetReceivedShareRequest) (*collaboration.GetReceivedShareResponse, error) { - c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) - if err != nil { - err := errors.Wrap(err, "gateway: error getting user share provider client") - return &collaboration.GetReceivedShareResponse{ - Status: status.NewInternal(ctx, err, "error getting received share"), - }, nil - } - - res, err := c.GetReceivedShare(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling GetReceivedShare") - } - - return res, nil -} - -// When updating a received share: -// if the update contains update for displayName: -// 1) if received share is mounted: we also do a rename in the storage -// 2) if received share is not mounted: we only rename in user share provider. -func (s *svc) UpdateReceivedShare(ctx context.Context, req *collaboration.UpdateReceivedShareRequest) (*collaboration.UpdateReceivedShareResponse, error) { - log := appctx.GetLogger(ctx) - c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) - if err != nil { - err = errors.Wrap(err, "gateway: error calling GetUserShareProviderClient") - return &collaboration.UpdateReceivedShareResponse{ - Status: status.NewInternal(ctx, err, "error getting share provider client"), - }, nil - } - - res, err := c.UpdateReceivedShare(ctx, req) - if err != nil { - log.Err(err).Msg("gateway: error calling UpdateReceivedShare") - return &collaboration.UpdateReceivedShareResponse{ - Status: &rpc.Status{ - Code: rpc.Code_CODE_INTERNAL, - }, - }, nil - } - - // error failing to update share state. - if res.Status.Code != rpc.Code_CODE_OK { - return res, nil - } - - // if we don't need to create/delete references then we return early. - if !s.c.CommitShareToStorageRef { - return res, nil - } - - // we don't commit to storage invalid update fields or empty display names. - if req.Field.GetState() == collaboration.ShareState_SHARE_STATE_INVALID && req.Field.GetDisplayName() == "" { - log.Error().Msg("the update field is invalid, aborting reference manipulation") - return res, nil - - } - - // TODO(labkode): if update field is displayName we need to do a rename on the storage to align - // share display name and storage filename. - if req.Field.GetState() != collaboration.ShareState_SHARE_STATE_INVALID { - if req.Field.GetState() == collaboration.ShareState_SHARE_STATE_ACCEPTED { - getShareReq := &collaboration.GetReceivedShareRequest{Ref: req.Ref} - getShareRes, err := s.GetReceivedShare(ctx, getShareReq) - if err != nil { - log.Err(err).Msg("gateway: error calling GetReceivedShare") - return &collaboration.UpdateReceivedShareResponse{ - Status: &rpc.Status{ - Code: rpc.Code_CODE_INTERNAL, - }, - }, nil - } - - if getShareRes.Status.Code != rpc.Code_CODE_OK { - log.Error().Msg("gateway: error calling GetReceivedShare") - return &collaboration.UpdateReceivedShareResponse{ - Status: &rpc.Status{ - Code: rpc.Code_CODE_INTERNAL, - }, - }, nil - } - - share := getShareRes.Share - if share == nil { - panic("gateway: error updating a received share: the share is nil") - } - createRefStatus, err := s.createReference(ctx, share.Share.ResourceId) - return &collaboration.UpdateReceivedShareResponse{ - Status: createRefStatus, - }, err - } - } - - // TODO(labkode): implementing updating display name - err = errors.New("gateway: update of display name is not yet implemented") - return &collaboration.UpdateReceivedShareResponse{ - Status: status.NewUnimplemented(ctx, err, "error updating received share"), - }, nil -} - -func (s *svc) createReference(ctx context.Context, resourceID *provider.ResourceId) (*rpc.Status, error) { - - log := appctx.GetLogger(ctx) - - // get the metadata about the share - c, err := s.findByID(ctx, resourceID) - if err != nil { - if _, ok := err.(errtypes.IsNotFound); ok { - return status.NewNotFound(ctx, "storage provider not found"), nil - } - return status.NewInternal(ctx, err, "error finding storage provider"), nil - } - - statReq := &provider.StatRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Id{ - Id: resourceID, - }, - }, - } - - statRes, err := c.Stat(ctx, statReq) - if err != nil { - return status.NewInternal(ctx, err, "gateway: error calling Stat for the share resource id: "+resourceID.String()), nil - } - - if statRes.Status.Code != rpc.Code_CODE_OK { - err := status.NewErrorFromCode(statRes.Status.GetCode(), "gateway") - log.Err(err).Msg("gateway: Stat failed on the share resource id: " + resourceID.String()) - return status.NewInternal(ctx, err, "error updating received share"), nil - } - - homeRes, err := s.GetHome(ctx, &provider.GetHomeRequest{}) - if err != nil { - err := errors.Wrap(err, "gateway: error calling GetHome") - return status.NewInternal(ctx, err, "error updating received share"), nil - } - - // reference path is the home path + some name - // CreateReferene(cs3://home/MyShares/x) - // that can end up in the storage provider like: - // /eos/user/.shadow/g/gonzalhu/MyShares/x - // A reference can point to any place, for that reason the namespace starts with cs3:// - // For example, a reference can point also to a dropbox resource: - // CreateReference(dropbox://x/y/z) - // It is the responsibility of the gateway to resolve these references and merge the response back - // from the main request. - // TODO(labkode): the name of the share should be the filename it points to by default. - refPath := path.Join(homeRes.Path, s.c.ShareFolder, path.Base(statRes.Info.Path)) - log.Info().Msg("mount path will be:" + refPath) - - createRefReq := &provider.CreateReferenceRequest{ - Path: refPath, - // cs3 is the Scheme and %s/%s is the Opaque parts of a net.URL. - TargetUri: fmt.Sprintf("cs3:%s/%s", resourceID.GetStorageId(), resourceID.GetOpaqueId()), - } - - c, err = s.findByPath(ctx, refPath) - if err != nil { - if _, ok := err.(errtypes.IsNotFound); ok { - return status.NewNotFound(ctx, "storage provider not found"), nil - } - return status.NewInternal(ctx, err, "error finding storage provider"), nil - } - - createRefRes, err := c.CreateReference(ctx, createRefReq) - if err != nil { - log.Err(err).Msg("gateway: error calling GetHome") - return &rpc.Status{ - Code: rpc.Code_CODE_INTERNAL, - }, nil - } - - if createRefRes.Status.Code != rpc.Code_CODE_OK { - err := status.NewErrorFromCode(createRefRes.Status.GetCode(), "gateway") - return status.NewInternal(ctx, err, "error updating received share"), nil - } - - return status.NewOK(ctx), nil -} - -func (s *svc) addGrant(ctx context.Context, id *provider.ResourceId, g *provider.Grantee, p *provider.ResourcePermissions) (*rpc.Status, error) { - - grantReq := &provider.AddGrantRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Id{ - Id: id, - }, - }, - Grant: &provider.Grant{ - Grantee: g, - Permissions: p, - }, - } - - c, err := s.findByID(ctx, id) - if err != nil { - if _, ok := err.(errtypes.IsNotFound); ok { - return status.NewNotFound(ctx, "storage provider not found"), nil - } - return status.NewInternal(ctx, err, "error finding storage provider"), nil - } - - grantRes, err := c.AddGrant(ctx, grantReq) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling AddGrant") - } - if grantRes.Status.Code != rpc.Code_CODE_OK { - return status.NewInternal(ctx, status.NewErrorFromCode(grantRes.Status.Code, "gateway"), - "error committing share to storage grant"), nil - } - - return status.NewOK(ctx), nil -} - -func (s *svc) updateGrant(ctx context.Context, id *provider.ResourceId, g *provider.Grantee, p *provider.ResourcePermissions) (*rpc.Status, error) { - - grantReq := &provider.UpdateGrantRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Id{ - Id: id, - }, - }, - Grant: &provider.Grant{ - Grantee: g, - Permissions: p, - }, - } - - c, err := s.findByID(ctx, id) - if err != nil { - if _, ok := err.(errtypes.IsNotFound); ok { - return status.NewNotFound(ctx, "storage provider not found"), nil - } - return status.NewInternal(ctx, err, "error finding storage provider"), nil - } - - grantRes, err := c.UpdateGrant(ctx, grantReq) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling UpdateGrant") - } - if grantRes.Status.Code != rpc.Code_CODE_OK { - return status.NewInternal(ctx, status.NewErrorFromCode(grantRes.Status.Code, "gateway"), - "error committing share to storage grant"), nil - } - - return status.NewOK(ctx), nil -} - -func (s *svc) removeGrant(ctx context.Context, id *provider.ResourceId, g *provider.Grantee, p *provider.ResourcePermissions) (*rpc.Status, error) { - - grantReq := &provider.RemoveGrantRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Id{ - Id: id, - }, - }, - Grant: &provider.Grant{ - Grantee: g, - Permissions: p, - }, - } - - c, err := s.findByID(ctx, id) - if err != nil { - if _, ok := err.(errtypes.IsNotFound); ok { - return status.NewNotFound(ctx, "storage provider not found"), nil - } - return status.NewInternal(ctx, err, "error finding storage provider"), nil - } - - grantRes, err := c.RemoveGrant(ctx, grantReq) - if err != nil { - return nil, errors.Wrap(err, "gateway: error calling RemoveGrant") - } - if grantRes.Status.Code != rpc.Code_CODE_OK { - return status.NewInternal(ctx, status.NewErrorFromCode(grantRes.Status.Code, "gateway"), - "error removing storage grant"), nil - } - - return status.NewOK(ctx), nil -}