mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-09-20 01:49:23 +08:00
Revert "Changing needle_byte_cache so that it doesn't grow so big when larger files are added."
This reverts commit 87fee21ef5
.
This commit is contained in:
@@ -8,7 +8,6 @@ import (
|
|||||||
"github.com/hashicorp/golang-lru"
|
"github.com/hashicorp/golang-lru"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -25,7 +24,7 @@ In caching, the string~[]byte mapping is cached
|
|||||||
*/
|
*/
|
||||||
func init() {
|
func init() {
|
||||||
bytesPool = util.NewBytesPool()
|
bytesPool = util.NewBytesPool()
|
||||||
bytesCache, _ = lru.NewWithEvict(50, func(key interface{}, value interface{}) {
|
bytesCache, _ = lru.NewWithEvict(512, func(key interface{}, value interface{}) {
|
||||||
value.(*Block).decreaseReference()
|
value.(*Block).decreaseReference()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -47,37 +46,22 @@ func (block *Block) increaseReference() {
|
|||||||
// get bytes from the LRU cache of []byte first, then from the bytes pool
|
// get bytes from the LRU cache of []byte first, then from the bytes pool
|
||||||
// when []byte in LRU cache is evicted, it will be put back to the bytes pool
|
// when []byte in LRU cache is evicted, it will be put back to the bytes pool
|
||||||
func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) {
|
func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) {
|
||||||
//Skip the cache if we are looking for a block that is too big to fit in the cache (defaulting to 10MB)
|
|
||||||
cacheable := readSize <= (1024*1024*10)
|
|
||||||
if !cacheable {
|
|
||||||
glog.V(4).Infoln("Block too big to keep in cache. Size:", readSize)
|
|
||||||
}
|
|
||||||
cacheKey := string("")
|
|
||||||
if cacheable {
|
|
||||||
// check cache, return if found
|
// check cache, return if found
|
||||||
cacheKey = fmt.Sprintf("%d:%d:%d", r.Fd(), offset >> 3, readSize)
|
cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize)
|
||||||
if obj, found := bytesCache.Get(cacheKey); found {
|
if obj, found := bytesCache.Get(cacheKey); found {
|
||||||
glog.V(4).Infoln("Found block in cache. Size:", readSize)
|
|
||||||
block = obj.(*Block)
|
block = obj.(*Block)
|
||||||
block.increaseReference()
|
block.increaseReference()
|
||||||
dataSlice = block.Bytes[0:readSize]
|
dataSlice = block.Bytes[0:readSize]
|
||||||
return dataSlice, block, nil
|
return dataSlice, block, nil
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the []byte from pool
|
// get the []byte from pool
|
||||||
b := bytesPool.Get(readSize)
|
b := bytesPool.Get(readSize)
|
||||||
// refCount = 2, one by the bytesCache, one by the actual needle object
|
// refCount = 2, one by the bytesCache, one by the actual needle object
|
||||||
refCount := int32(1)
|
block = &Block{Bytes: b, refCount: 2}
|
||||||
if cacheable {
|
|
||||||
refCount = 2
|
|
||||||
}
|
|
||||||
block = &Block{Bytes: b, refCount: refCount}
|
|
||||||
dataSlice = block.Bytes[0:readSize]
|
dataSlice = block.Bytes[0:readSize]
|
||||||
_, err = r.ReadAt(dataSlice, offset)
|
_, err = r.ReadAt(dataSlice, offset)
|
||||||
if cacheable {
|
|
||||||
bytesCache.Add(cacheKey, block)
|
bytesCache.Add(cacheKey, block)
|
||||||
}
|
|
||||||
return dataSlice, block, err
|
return dataSlice, block, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user