mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-09-22 22:53:33 +08:00
Revert "weed mount, weed dav add option to force cache"
This reverts commit 7367b976b0
.
This commit is contained in:
@@ -20,7 +20,6 @@ type MountOptions struct {
|
|||||||
cacheDirForRead *string
|
cacheDirForRead *string
|
||||||
cacheDirForWrite *string
|
cacheDirForWrite *string
|
||||||
cacheSizeMBForRead *int64
|
cacheSizeMBForRead *int64
|
||||||
forceCache *bool
|
|
||||||
dataCenter *string
|
dataCenter *string
|
||||||
allowOthers *bool
|
allowOthers *bool
|
||||||
umaskString *string
|
umaskString *string
|
||||||
@@ -59,7 +58,6 @@ func init() {
|
|||||||
mountOptions.cacheDirForRead = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
|
mountOptions.cacheDirForRead = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
|
||||||
mountOptions.cacheSizeMBForRead = cmdMount.Flag.Int64("cacheCapacityMB", 0, "file chunk read cache capacity in MB")
|
mountOptions.cacheSizeMBForRead = cmdMount.Flag.Int64("cacheCapacityMB", 0, "file chunk read cache capacity in MB")
|
||||||
mountOptions.cacheDirForWrite = cmdMount.Flag.String("cacheDirWrite", "", "buffer writes mostly for large files")
|
mountOptions.cacheDirForWrite = cmdMount.Flag.String("cacheDirWrite", "", "buffer writes mostly for large files")
|
||||||
mountOptions.forceCache = cmdMount.Flag.Bool("forceCache", true, "force to cache all reads")
|
|
||||||
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
|
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
|
||||||
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
|
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
|
||||||
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
|
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
|
||||||
|
@@ -235,7 +235,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||||||
CacheDirForRead: *option.cacheDirForRead,
|
CacheDirForRead: *option.cacheDirForRead,
|
||||||
CacheSizeMBForRead: *option.cacheSizeMBForRead,
|
CacheSizeMBForRead: *option.cacheSizeMBForRead,
|
||||||
CacheDirForWrite: cacheDirForWrite,
|
CacheDirForWrite: cacheDirForWrite,
|
||||||
ForceCache: *option.forceCache,
|
|
||||||
DataCenter: *option.dataCenter,
|
DataCenter: *option.dataCenter,
|
||||||
Quota: int64(*option.collectionQuota) * 1024 * 1024,
|
Quota: int64(*option.collectionQuota) * 1024 * 1024,
|
||||||
MountUid: uid,
|
MountUid: uid,
|
||||||
|
@@ -32,7 +32,6 @@ type WebDavOption struct {
|
|||||||
tlsCertificate *string
|
tlsCertificate *string
|
||||||
cacheDir *string
|
cacheDir *string
|
||||||
cacheSizeMB *int64
|
cacheSizeMB *int64
|
||||||
forceCache *bool
|
|
||||||
maxMB *int
|
maxMB *int
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,7 +46,6 @@ func init() {
|
|||||||
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
|
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
|
||||||
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
|
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
|
||||||
webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 0, "local cache capacity in MB")
|
webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 0, "local cache capacity in MB")
|
||||||
webDavStandaloneOptions.forceCache = cmdWebDav.Flag.Bool("forceCache", false, "force to cache reads to local disk")
|
|
||||||
webDavStandaloneOptions.maxMB = cmdWebDav.Flag.Int("maxMB", 4, "split files larger than the limit")
|
webDavStandaloneOptions.maxMB = cmdWebDav.Flag.Int("maxMB", 4, "split files larger than the limit")
|
||||||
webDavStandaloneOptions.filerRootPath = cmdWebDav.Flag.String("filer.path", "/", "use this remote path from filer server")
|
webDavStandaloneOptions.filerRootPath = cmdWebDav.Flag.String("filer.path", "/", "use this remote path from filer server")
|
||||||
}
|
}
|
||||||
@@ -120,7 +118,6 @@ func (wo *WebDavOption) startWebDav() bool {
|
|||||||
Cipher: cipher,
|
Cipher: cipher,
|
||||||
CacheDir: util.ResolvePath(*wo.cacheDir),
|
CacheDir: util.ResolvePath(*wo.cacheDir),
|
||||||
CacheSizeMB: *wo.cacheSizeMB,
|
CacheSizeMB: *wo.cacheSizeMB,
|
||||||
ForceCache: *wo.forceCache,
|
|
||||||
MaxMB: *wo.maxMB,
|
MaxMB: *wo.maxMB,
|
||||||
})
|
})
|
||||||
if webdavServer_err != nil {
|
if webdavServer_err != nil {
|
||||||
|
@@ -19,7 +19,6 @@ type ChunkReadAt struct {
|
|||||||
fileSize int64
|
fileSize int64
|
||||||
readerCache *ReaderCache
|
readerCache *ReaderCache
|
||||||
readerPattern *ReaderPattern
|
readerPattern *ReaderPattern
|
||||||
forceCache bool
|
|
||||||
lastChunkFid string
|
lastChunkFid string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,9 +196,7 @@ func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, next
|
|||||||
if n > 0 {
|
if n > 0 {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
if !c.forceCache {
|
return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
|
||||||
return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.ViewOffset == 0)
|
n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.ViewOffset == 0)
|
||||||
|
@@ -7,9 +7,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
|
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
|
||||||
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util/mem"
|
"github.com/seaweedfs/seaweedfs/weed/util/mem"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
||||||
|
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ReaderCache struct {
|
type ReaderCache struct {
|
||||||
@@ -69,7 +69,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) {
|
|||||||
|
|
||||||
// glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset)
|
// glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset)
|
||||||
// cache this chunk if not yet
|
// cache this chunk if not yet
|
||||||
cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), chunkView.ViewOffset == 0)
|
cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), false)
|
||||||
go cacher.startCaching()
|
go cacher.startCaching()
|
||||||
<-cacher.cacheStartedCh
|
<-cacher.cacheStartedCh
|
||||||
rc.downloaders[chunkView.FileId] = cacher
|
rc.downloaders[chunkView.FileId] = cacher
|
||||||
|
@@ -41,7 +41,6 @@ type Option struct {
|
|||||||
CacheDirForRead string
|
CacheDirForRead string
|
||||||
CacheSizeMBForRead int64
|
CacheSizeMBForRead int64
|
||||||
CacheDirForWrite string
|
CacheDirForWrite string
|
||||||
ForceCache bool
|
|
||||||
DataCenter string
|
DataCenter string
|
||||||
Umask os.FileMode
|
Umask os.FileMode
|
||||||
Quota int64
|
Quota int64
|
||||||
@@ -96,7 +95,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||||||
wfs.option.filerIndex = int32(rand.Intn(len(option.FilerAddresses)))
|
wfs.option.filerIndex = int32(rand.Intn(len(option.FilerAddresses)))
|
||||||
wfs.option.setupUniqueCacheDirectory()
|
wfs.option.setupUniqueCacheDirectory()
|
||||||
if option.CacheSizeMBForRead > 0 {
|
if option.CacheSizeMBForRead > 0 {
|
||||||
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024, option.ForceCache)
|
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDirForRead(), "meta"), option.UidGidMapper,
|
wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDirForRead(), "meta"), option.UidGidMapper,
|
||||||
|
@@ -38,7 +38,6 @@ type WebDavOption struct {
|
|||||||
Cipher bool
|
Cipher bool
|
||||||
CacheDir string
|
CacheDir string
|
||||||
CacheSizeMB int64
|
CacheSizeMB int64
|
||||||
ForceCache bool
|
|
||||||
MaxMB int
|
MaxMB int
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,7 +133,7 @@ func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
|
|||||||
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
|
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
|
||||||
|
|
||||||
os.MkdirAll(cacheDir, os.FileMode(0755))
|
os.MkdirAll(cacheDir, os.FileMode(0755))
|
||||||
chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024, option.ForceCache)
|
chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
|
||||||
t := &WebDavFileSystem{
|
t := &WebDavFileSystem{
|
||||||
option: option,
|
option: option,
|
||||||
chunkCache: chunkCache,
|
chunkCache: chunkCache,
|
||||||
|
@@ -23,16 +23,14 @@ type TieredChunkCache struct {
|
|||||||
onDiskCacheSizeLimit0 uint64
|
onDiskCacheSizeLimit0 uint64
|
||||||
onDiskCacheSizeLimit1 uint64
|
onDiskCacheSizeLimit1 uint64
|
||||||
onDiskCacheSizeLimit2 uint64
|
onDiskCacheSizeLimit2 uint64
|
||||||
forceCache bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ ChunkCache = &TieredChunkCache{}
|
var _ ChunkCache = &TieredChunkCache{}
|
||||||
|
|
||||||
func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64, forceCache bool) *TieredChunkCache {
|
func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache {
|
||||||
|
|
||||||
c := &TieredChunkCache{
|
c := &TieredChunkCache{
|
||||||
memCache: NewChunkCacheInMemory(maxEntries),
|
memCache: NewChunkCacheInMemory(maxEntries),
|
||||||
forceCache: forceCache,
|
|
||||||
}
|
}
|
||||||
c.diskCaches = make([]*OnDiskCacheLayer, 3)
|
c.diskCaches = make([]*OnDiskCacheLayer, 3)
|
||||||
c.onDiskCacheSizeLimit0 = uint64(unitSize)
|
c.onDiskCacheSizeLimit0 = uint64(unitSize)
|
||||||
|
@@ -13,7 +13,7 @@ func TestOnDisk(t *testing.T) {
|
|||||||
|
|
||||||
totalDiskSizeInKB := int64(32)
|
totalDiskSizeInKB := int64(32)
|
||||||
|
|
||||||
cache := NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024, false)
|
cache := NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024)
|
||||||
|
|
||||||
writeCount := 5
|
writeCount := 5
|
||||||
type test_data struct {
|
type test_data struct {
|
||||||
@@ -61,7 +61,7 @@ func TestOnDisk(t *testing.T) {
|
|||||||
|
|
||||||
cache.Shutdown()
|
cache.Shutdown()
|
||||||
|
|
||||||
cache = NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024, false)
|
cache = NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024)
|
||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
data := mem.Allocate(testData[i].size)
|
data := mem.Allocate(testData[i].size)
|
||||||
|
Reference in New Issue
Block a user