Skip to content

Commit

Permalink
fuse passthrough: fix some review comments
Browse files Browse the repository at this point in the history
Signed-off-by: abushwang <[email protected]>
  • Loading branch information
wswsmao committed Nov 19, 2024
1 parent 39a2e55 commit e328127
Show file tree
Hide file tree
Showing 5 changed files with 35 additions and 10 deletions.
2 changes: 2 additions & 0 deletions cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ type BlobCache interface {
type Reader interface {
io.ReaderAt
Close() error

// If a blob is backed by a file, it should return *os.File so that it can be used for FUSE passthrough
GetReaderAt() io.ReaderAt
}

Expand Down
4 changes: 4 additions & 0 deletions cmd/containerd-stargz-grpc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,10 @@ func main() {
// Configure FUSE passthrough
if config.Config.Config.FuseConfig.PassThrough, err = isFusePthEnable(); err != nil {
log.G(ctx).Warnf("failed to check FUSE passthrough support")
} else if config.Config.Config.FuseConfig.PassThrough {
// Always set Direct to true to ensure that
// *directoryCache.Get always return *os.File instead of buffer
config.Config.Config.DirectoryCacheConfig.Direct = true
}

// Configure keychain
Expand Down
2 changes: 1 addition & 1 deletion fs/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,5 +150,5 @@ type FuseConfig struct {
EntryTimeout int64 `toml:"entry_timeout"`

// PassThrough indicates whether to enable FUSE passthrough mode to improve local file read performance. Default is false.
PassThrough bool `toml:"passthrough"`
PassThrough bool `toml:"passthrough" default:"false"`
}
7 changes: 4 additions & 3 deletions fs/layer/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -356,10 +356,11 @@ func (n *node) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fu
if getter, ok := ra.(reader.PassthroughFdGetter); ok {
fd, err := getter.GetPassthroughFd()
if err != nil {
n.fs.s.report(fmt.Errorf("node.Open: %v", err))
return nil, 0, syscall.EIO
n.fs.s.report(fmt.Errorf("passThrough model failed due to node.Open: %v", err))
n.fs.passThrough = false
} else {
f.InitFd(int(fd))
}
f.InitFd(int(fd))
}
}

Expand Down
30 changes: 24 additions & 6 deletions fs/reader/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -496,9 +496,9 @@ func (sf *file) ReadAt(p []byte, offset int64) (int, error) {

func (sf *file) GetPassthroughFd() (uintptr, error) {
var (
offset int64 = 0
offset int64
firstChunkOffset int64 = -1
totalSize int64 = 0
totalSize int64
)

for {
Expand Down Expand Up @@ -538,9 +538,9 @@ func (sf *file) GetPassthroughFd() (uintptr, error) {

func (sf *file) prefetchEntireFile() error {
var (
offset int64 = 0
offset int64
firstChunkOffset int64 = -1
totalSize int64 = 0
totalSize int64
)
combinedBuffer := sf.gr.bufPool.Get().(*bytes.Buffer)
combinedBuffer.Reset()
Expand All @@ -554,10 +554,28 @@ func (sf *file) prefetchEntireFile() error {
if firstChunkOffset == -1 {
firstChunkOffset = chunkOffset
}

id := genID(sf.id, chunkOffset, chunkSize)
b := sf.gr.bufPool.Get().(*bytes.Buffer)
b.Reset()
b.Grow(int(chunkSize))
ip := b.Bytes()[:chunkSize]

// Check if the content exists in the cache
if r, err := sf.gr.cache.Get(id); err == nil {
n, err := r.ReadAt(ip, 0)
if (err == nil || err == io.EOF) && int64(n) == chunkSize {
combinedBuffer.Write(ip[:n])
totalSize += int64(n)
offset = chunkOffset + int64(n)
r.Close()
sf.gr.putBuffer(b)
continue
}
r.Close()
}

// cache miss, prefetch the whole chunk
if _, err := sf.fr.ReadAt(ip, chunkOffset); err != nil && err != io.EOF {
sf.gr.putBuffer(b)
return fmt.Errorf("failed to read data: %w", err)
Expand All @@ -572,8 +590,8 @@ func (sf *file) prefetchEntireFile() error {
sf.gr.putBuffer(b)
}
combinedIp := combinedBuffer.Bytes()

Check failure on line 592 in fs/reader/reader.go

View workflow job for this annotation

GitHub Actions / Linter (.)

var-naming: var combinedIp should be combinedIP (revive)
id := genID(sf.id, firstChunkOffset, totalSize)
sf.gr.cacheData(combinedIp, id)
combinedId := genID(sf.id, firstChunkOffset, totalSize)

Check failure on line 593 in fs/reader/reader.go

View workflow job for this annotation

GitHub Actions / Linter (.)

var-naming: var combinedId should be combinedID (revive)
sf.gr.cacheData(combinedIp, combinedId)
return nil
}

Expand Down

0 comments on commit e328127

Please sign in to comment.