mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-06-28 15:41:13 +08:00
FUSE: add chunk cache for recently accessed file chunks
This commit is contained in:
parent
4aa82c95e6
commit
826bc0b7e3
@ -183,6 +183,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
|
|||||||
glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err)
|
glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err)
|
||||||
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
|
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
|
||||||
}
|
}
|
||||||
|
pages.f.wfs.chunkCache.SetChunk(fileId, data)
|
||||||
|
|
||||||
return &filer_pb.FileChunk{
|
return &filer_pb.FileChunk{
|
||||||
FileId: fileId,
|
FileId: fileId,
|
||||||
|
@ -92,7 +92,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
|||||||
|
|
||||||
if fh.f.reader == nil {
|
if fh.f.reader == nil {
|
||||||
chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32)
|
chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32)
|
||||||
fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews)
|
fh.f.reader = NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache)
|
||||||
}
|
}
|
||||||
|
|
||||||
totalRead, err := fh.f.reader.ReadAt(buff, offset)
|
totalRead, err := fh.f.reader.ReadAt(buff, offset)
|
||||||
@ -153,6 +153,8 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
|
|||||||
fh.dirtyPages.releaseResource()
|
fh.dirtyPages.releaseResource()
|
||||||
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
|
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
|
||||||
}
|
}
|
||||||
|
fh.f.entryViewCache = nil
|
||||||
|
fh.f.reader = nil
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package filer2
|
package filesys
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -7,30 +7,34 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ChunkReadAt struct {
|
type ChunkReadAt struct {
|
||||||
masterClient *wdclient.MasterClient
|
masterClient *wdclient.MasterClient
|
||||||
chunkViews []*ChunkView
|
chunkViews []*filer2.ChunkView
|
||||||
buffer []byte
|
buffer []byte
|
||||||
bufferOffset int64
|
bufferOffset int64
|
||||||
lookupFileId func(fileId string) (targetUrl string, err error)
|
lookupFileId func(fileId string) (targetUrl string, err error)
|
||||||
readerLock sync.Mutex
|
readerLock sync.Mutex
|
||||||
|
|
||||||
|
chunkCache *pb_cache.ChunkCache
|
||||||
}
|
}
|
||||||
|
|
||||||
// var _ = io.ReaderAt(&ChunkReadAt{})
|
// var _ = io.ReaderAt(&ChunkReadAt{})
|
||||||
|
|
||||||
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView) *ChunkReadAt {
|
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*filer2.ChunkView, chunkCache *pb_cache.ChunkCache) *ChunkReadAt {
|
||||||
|
|
||||||
return &ChunkReadAt{
|
return &ChunkReadAt{
|
||||||
chunkViews: chunkViews,
|
chunkViews: chunkViews,
|
||||||
lookupFileId: func(fileId string) (targetUrl string, err error) {
|
lookupFileId: func(fileId string) (targetUrl string, err error) {
|
||||||
err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||||
vid := VolumeId(fileId)
|
vid := filer2.VolumeId(fileId)
|
||||||
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
|
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
|
||||||
VolumeIds: []string{vid},
|
VolumeIds: []string{vid},
|
||||||
})
|
})
|
||||||
@ -61,7 +65,6 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
|
|||||||
c.readerLock.Lock()
|
c.readerLock.Lock()
|
||||||
defer c.readerLock.Unlock()
|
defer c.readerLock.Unlock()
|
||||||
|
|
||||||
|
|
||||||
for n < len(p) && err == nil {
|
for n < len(p) && err == nil {
|
||||||
readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
|
readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
|
||||||
n += readCount
|
n += readCount
|
||||||
@ -80,7 +83,8 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
|
|||||||
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
|
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
|
||||||
found = true
|
found = true
|
||||||
if c.bufferOffset != chunk.LogicOffset {
|
if c.bufferOffset != chunk.LogicOffset {
|
||||||
c.fetchChunkToBuffer(chunk)
|
c.buffer, err = c.fetchChunkData(chunk)
|
||||||
|
c.bufferOffset = chunk.LogicOffset
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -97,27 +101,34 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChunkReadAt) fetchChunkToBuffer(chunkView *ChunkView) error {
|
func (c *ChunkReadAt) fetchChunkData(chunkView *filer2.ChunkView) ([]byte, error) {
|
||||||
|
|
||||||
// fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
||||||
|
|
||||||
|
chunkData := c.chunkCache.GetChunk(chunkView.FileId)
|
||||||
|
if chunkData != nil {
|
||||||
|
glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
||||||
|
return chunkData, nil
|
||||||
|
}
|
||||||
|
|
||||||
urlString, err := c.lookupFileId(chunkView.FileId)
|
urlString, err := c.lookupFileId(chunkView.FileId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
||||||
buffer.Write(data)
|
buffer.Write(data)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
|
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
c.buffer = buffer.Bytes()
|
|
||||||
c.bufferOffset = chunkView.LogicOffset
|
|
||||||
|
|
||||||
glog.V(3).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
glog.V(3).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
||||||
|
|
||||||
return nil
|
chunkData = buffer.Bytes()
|
||||||
|
c.chunkCache.SetChunk(chunkView.FileId, chunkData)
|
||||||
|
|
||||||
|
return chunkData, nil
|
||||||
}
|
}
|
@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/seaweedfs/fuse"
|
"github.com/seaweedfs/fuse"
|
||||||
"github.com/seaweedfs/fuse/fs"
|
"github.com/seaweedfs/fuse/fs"
|
||||||
@ -62,6 +63,8 @@ type WFS struct {
|
|||||||
|
|
||||||
root fs.Node
|
root fs.Node
|
||||||
fsNodeCache *FsCache
|
fsNodeCache *FsCache
|
||||||
|
|
||||||
|
chunkCache *pb_cache.ChunkCache
|
||||||
}
|
}
|
||||||
type statsCache struct {
|
type statsCache struct {
|
||||||
filer_pb.StatisticsResponse
|
filer_pb.StatisticsResponse
|
||||||
@ -78,6 +81,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||||||
return make([]byte, option.ChunkSizeLimit)
|
return make([]byte, option.ChunkSizeLimit)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
chunkCache: pb_cache.NewChunkCache(),
|
||||||
}
|
}
|
||||||
|
|
||||||
wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs}
|
wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs}
|
||||||
|
32
weed/pb/pb_cache/chunk_cache.go
Normal file
32
weed/pb/pb_cache/chunk_cache.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package pb_cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/karlseguin/ccache"
|
||||||
|
)
|
||||||
|
|
||||||
|
// a global cache for recently accessed file chunks
|
||||||
|
type ChunkCache struct {
|
||||||
|
cache *ccache.Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewChunkCache() *ChunkCache {
|
||||||
|
return &ChunkCache{
|
||||||
|
cache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ChunkCache) GetChunk(fileId string) []byte {
|
||||||
|
item := c.cache.Get(fileId)
|
||||||
|
if item == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
data := item.Value().([]byte)
|
||||||
|
item.Extend(time.Hour)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ChunkCache) SetChunk(fileId string, data []byte) {
|
||||||
|
c.cache.Set(fileId, data, time.Hour)
|
||||||
|
}
|
@ -13,9 +13,11 @@ import (
|
|||||||
"golang.org/x/net/webdav"
|
"golang.org/x/net/webdav"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/filesys"
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||||
@ -66,6 +68,7 @@ type WebDavFileSystem struct {
|
|||||||
secret security.SigningKey
|
secret security.SigningKey
|
||||||
filer *filer2.Filer
|
filer *filer2.Filer
|
||||||
grpcDialOption grpc.DialOption
|
grpcDialOption grpc.DialOption
|
||||||
|
chunkCache *pb_cache.ChunkCache
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileInfo struct {
|
type FileInfo struct {
|
||||||
@ -96,6 +99,7 @@ type WebDavFile struct {
|
|||||||
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
|
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
|
||||||
return &WebDavFileSystem{
|
return &WebDavFileSystem{
|
||||||
option: option,
|
option: option,
|
||||||
|
chunkCache: pb_cache.NewChunkCache(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -476,7 +480,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
|
|||||||
}
|
}
|
||||||
if f.reader == nil {
|
if f.reader == nil {
|
||||||
chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
|
chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
|
||||||
f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews)
|
f.reader = filesys.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache)
|
||||||
}
|
}
|
||||||
|
|
||||||
readSize, err = f.reader.ReadAt(p, f.off)
|
readSize, err = f.reader.ReadAt(p, f.off)
|
||||||
|
Loading…
Reference in New Issue
Block a user