Skip to content

Commit

Permalink
Refactor handling of blob id and size attrs to allow specialization
Browse files Browse the repository at this point in the history
  • Loading branch information
aduffeck committed Aug 14, 2024
1 parent 35d40e5 commit ba7048e
Show file tree
Hide file tree
Showing 9 changed files with 43 additions and 54 deletions.
18 changes: 4 additions & 14 deletions pkg/storage/fs/posix/lookup/lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,22 +124,12 @@ func (lu *Lookup) MetadataBackend() metadata.Backend {
return lu.metadataBackend
}

// ReadBlobSizeAttr reads the blobsize from the xattrs
func (lu *Lookup) ReadBlobSizeAttr(ctx context.Context, path string) (int64, error) {
blobSize, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.BlobsizeAttr)
func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, path string, _ node.Attributes) (string, int64, error) {
fi, err := os.Stat(path)
if err != nil {
return 0, errors.Wrapf(err, "error reading blobsize xattr")
return "", 0, errors.Wrap(err, "error stating file")
}
return blobSize, nil
}

// ReadBlobIDAttr reads the blobsize from the xattrs
func (lu *Lookup) ReadBlobIDAttr(ctx context.Context, path string) (string, error) {
attr, err := lu.metadataBackend.Get(ctx, path, prefixes.BlobIDAttr)
if err != nil {
return "", errors.Wrapf(err, "error reading blobid xattr")
}
return string(attr), nil
return "", fi.Size(), nil
}

// TypeFromPath returns the type of the node at the given path
Expand Down
3 changes: 0 additions & 3 deletions pkg/storage/fs/posix/tree/assimilation.go
Original file line number Diff line number Diff line change
Expand Up @@ -497,9 +497,6 @@ assimilate:
attributes[prefixes.PropagationAttr] = []byte("1")
} else {
attributes.SetInt64(prefixes.TypeAttr, int64(provider.ResourceType_RESOURCE_TYPE_FILE))
attributes.SetString(prefixes.BlobIDAttr, id)
attributes.SetInt64(prefixes.BlobsizeAttr, fi.Size())

}

n := node.New(spaceID, id, parentID, filepath.Base(path), fi.Size(), "", provider.ResourceType_RESOURCE_TYPE_FILE, nil, t.lookup)
Expand Down
8 changes: 0 additions & 8 deletions pkg/storage/fs/posix/tree/tree.go
Original file line number Diff line number Diff line change
Expand Up @@ -709,10 +709,6 @@ func (t *Tree) WriteBlob(node *node.Node, source string) error {

// ReadBlob reads a blob from the blobstore
func (t *Tree) ReadBlob(node *node.Node) (io.ReadCloser, error) {
if node.BlobID == "" {
// there is no blob yet - we are dealing with a 0 byte file
return io.NopCloser(bytes.NewReader([]byte{})), nil
}
return t.blobstore.Download(node)
}

Expand All @@ -721,10 +717,6 @@ func (t *Tree) DeleteBlob(node *node.Node) error {
if node == nil {
return fmt.Errorf("could not delete blob, nil node was given")
}
if node.BlobID == "" {
return fmt.Errorf("could not delete blob, node with empty blob id was given")
}

return t.blobstore.Delete(node)
}

Expand Down
32 changes: 30 additions & 2 deletions pkg/storage/utils/decomposedfs/lookup/lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,36 @@ func (lu *Lookup) MetadataBackend() metadata.Backend {
return lu.metadataBackend
}

func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, path string, attrs node.Attributes) (string, int64, error) {
blobID := ""
blobSize := int64(0)
var err error

if attrs != nil {
blobID = attrs.String(prefixes.BlobIDAttr)
if blobID != "" {
blobSize, err = attrs.Int64(prefixes.BlobsizeAttr)
if err != nil {
return "", 0, err
}
}
} else {
attrs, err := lu.metadataBackend.All(ctx, path)
if err != nil {
return "", 0, errors.Wrapf(err, "error reading blobid xattr")
}
nodeAttrs := node.Attributes(attrs)
blobID = nodeAttrs.String(prefixes.BlobIDAttr)
blobSize, err = nodeAttrs.Int64(prefixes.BlobsizeAttr)
if err != nil {
return "", 0, errors.Wrapf(err, "error reading blobsize xattr")
}
}
return blobID, blobSize, nil
}

// ReadBlobSizeAttr reads the blobsize from the xattrs
func (lu *Lookup) ReadBlobSizeAttr(ctx context.Context, path string) (int64, error) {
func (lu *Lookup) readBlobSizeAttr(ctx context.Context, path string) (int64, error) {
blobSize, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.BlobsizeAttr)
if err != nil {
return 0, errors.Wrapf(err, "error reading blobsize xattr")
Expand All @@ -82,7 +110,7 @@ func (lu *Lookup) ReadBlobSizeAttr(ctx context.Context, path string) (int64, err
}

// ReadBlobIDAttr reads the blobsize from the xattrs
func (lu *Lookup) ReadBlobIDAttr(ctx context.Context, path string) (string, error) {
func (lu *Lookup) readBlobIDAttr(ctx context.Context, path string) (string, error) {
attr, err := lu.metadataBackend.Get(ctx, path, prefixes.BlobIDAttr)
if err != nil {
return "", errors.Wrapf(err, "error reading blobid xattr")
Expand Down
19 changes: 4 additions & 15 deletions pkg/storage/utils/decomposedfs/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,7 @@ type PathLookup interface {
Path(ctx context.Context, n *Node, hasPermission PermissionFunc) (path string, err error)
MetadataBackend() metadata.Backend
TimeManager() TimeManager
ReadBlobSizeAttr(ctx context.Context, path string) (int64, error)
ReadBlobIDAttr(ctx context.Context, path string) (string, error)
ReadBlobIDAndSizeAttr(ctx context.Context, path string, attrs Attributes) (string, int64, error)
TypeFromPath(ctx context.Context, path string) provider.ResourceType
CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error)
CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error)
Expand Down Expand Up @@ -396,22 +395,12 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
}

if revisionSuffix == "" {
n.BlobID = attrs.String(prefixes.BlobIDAttr)
if n.BlobID != "" {
blobSize, err := attrs.Int64(prefixes.BlobsizeAttr)
if err != nil {
return nil, err
}
n.Blobsize = blobSize
}
} else {
n.BlobID, err = lu.ReadBlobIDAttr(ctx, nodePath+revisionSuffix)
n.BlobID, n.Blobsize, err = lu.ReadBlobIDAndSizeAttr(ctx, nodePath, attrs)
if err != nil {
return nil, err
}

// Lookup blobsize
n.Blobsize, err = lu.ReadBlobSizeAttr(ctx, nodePath+revisionSuffix)
} else {
n.BlobID, n.Blobsize, err = lu.ReadBlobIDAndSizeAttr(ctx, nodePath+revisionSuffix, nil)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/utils/decomposedfs/recycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ func (tb *DecomposedfsTrashbin) ListRecycle(ctx context.Context, ref *provider.R
nodeType = tb.fs.lu.TypeFromPath(ctx, resolvedChildPath)
switch nodeType {
case provider.ResourceType_RESOURCE_TYPE_FILE:
size, err = tb.fs.lu.ReadBlobSizeAttr(ctx, resolvedChildPath)
_, size, err = tb.fs.lu.ReadBlobIDAndSizeAttr(ctx, resolvedChildPath, nil)
if err != nil {
sublog.Error().Err(err).Str("name", name).Msg("invalid blob size, skipping")
continue
Expand Down
11 changes: 2 additions & 9 deletions pkg/storage/utils/decomposedfs/revisions.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Referen
Key: n.ID + node.RevisionIDDelimiter + parts[1],
Mtime: uint64(mtime.Unix()),
}
blobSize, err := fs.lu.ReadBlobSizeAttr(ctx, items[i])
_, blobSize, err := fs.lu.ReadBlobIDAndSizeAttr(ctx, items[i], nil)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0")
}
Expand Down Expand Up @@ -148,14 +148,7 @@ func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Refe

contentPath := fs.lu.InternalPath(spaceID, revisionKey)

blobid, err := fs.lu.ReadBlobIDAttr(ctx, contentPath)
if err != nil {
return nil, errors.Wrapf(err, "Decomposedfs: could not read blob id of revision '%s' for node '%s'", n.ID, revisionKey)
}
blobsize, err := fs.lu.ReadBlobSizeAttr(ctx, contentPath)
if err != nil {
return nil, errors.Wrapf(err, "Decomposedfs: could not read blob size of revision '%s' for node '%s'", n.ID, revisionKey)
}
blobid, blobsize, err := fs.lu.ReadBlobIDAndSizeAttr(ctx, contentPath, nil)

revisionNode := node.Node{SpaceID: spaceID, BlobID: blobid, Blobsize: blobsize} // blobsize is needed for the s3ng blobstore

Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/utils/decomposedfs/tree/tree.go
Original file line number Diff line number Diff line change
Expand Up @@ -759,7 +759,7 @@ func (t *Tree) removeNode(ctx context.Context, path, timeSuffix string, n *node.
continue
}

bID, err := t.lookup.ReadBlobIDAttr(ctx, rev)
bID, _, err := t.lookup.ReadBlobIDAndSizeAttr(ctx, rev, nil)
if err != nil {
logger.Error().Err(err).Str("revision", rev).Msg("error reading blobid attribute")
return err
Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/utils/decomposedfs/upload/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ func (store OcisStore) updateExistingNode(ctx context.Context, session *OcisSess
}

// delete old blob
bID, err := session.store.lu.ReadBlobIDAttr(ctx, versionPath)
bID, _, err := session.store.lu.ReadBlobIDAndSizeAttr(ctx, versionPath, nil)
if err != nil {
return unlock, err
}
Expand Down

0 comments on commit ba7048e

Please sign in to comment.