mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-12-17 09:42:29 +08:00
Merge branch 'master' into support_ssd_volume
This commit is contained in:
@@ -234,12 +234,12 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file
|
||||
}
|
||||
}
|
||||
|
||||
func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
|
||||
func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64, httpStatusCode int) error) {
|
||||
rangeReq := r.Header.Get("Range")
|
||||
|
||||
if rangeReq == "" {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
|
||||
if err := writeFn(w, 0, totalSize); err != nil {
|
||||
if err := writeFn(w, 0, totalSize, 0); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
@@ -278,9 +278,8 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||
ra := ranges[0]
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
|
||||
w.Header().Set("Content-Range", ra.contentRange(totalSize))
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
|
||||
err = writeFn(w, ra.start, ra.length)
|
||||
err = writeFn(w, ra.start, ra.length, http.StatusPartialContent)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@@ -308,7 +307,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||
pw.CloseWithError(e)
|
||||
return
|
||||
}
|
||||
if e = writeFn(part, ra.start, ra.length); e != nil {
|
||||
if e = writeFn(part, ra.start, ra.length, 0); e != nil {
|
||||
pw.CloseWithError(e)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error {
|
||||
func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) (err error) {
|
||||
|
||||
glog.V(4).Infof("ListEntries %v", req)
|
||||
|
||||
@@ -60,23 +60,12 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
|
||||
|
||||
lastFileName := req.StartFromFileName
|
||||
includeLastFile := req.InclusiveStartFrom
|
||||
var listErr error
|
||||
for limit > 0 {
|
||||
entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit, req.Prefix)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
includeLastFile = false
|
||||
|
||||
for _, entry := range entries {
|
||||
|
||||
lastFileName = entry.Name()
|
||||
|
||||
if err := stream.Send(&filer_pb.ListEntriesResponse{
|
||||
var hasEntries bool
|
||||
lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", func(entry *filer.Entry) bool {
|
||||
hasEntries = true
|
||||
if err = stream.Send(&filer_pb.ListEntriesResponse{
|
||||
Entry: &filer_pb.Entry{
|
||||
Name: entry.Name(),
|
||||
IsDirectory: entry.IsDirectory(),
|
||||
@@ -88,18 +77,27 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
|
||||
Content: entry.Content,
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
return false
|
||||
}
|
||||
|
||||
limit--
|
||||
if limit == 0 {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if listErr != nil {
|
||||
return listErr
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !hasEntries {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(entries) < paginationLimit {
|
||||
break
|
||||
}
|
||||
includeLastFile = false
|
||||
|
||||
}
|
||||
|
||||
@@ -327,7 +325,7 @@ func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntr
|
||||
|
||||
err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures)
|
||||
resp = &filer_pb.DeleteEntryResponse{}
|
||||
if err != nil {
|
||||
if err != nil && err != filer_pb.ErrNotFound {
|
||||
resp.Error = err.Error()
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
@@ -75,7 +75,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
|
||||
includeLastFile := false
|
||||
for {
|
||||
|
||||
entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "")
|
||||
entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(entries) < 1024 {
|
||||
if !hasMore {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,16 +29,20 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
|
||||
|
||||
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
|
||||
|
||||
processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading from persisted logs: %v", err)
|
||||
}
|
||||
|
||||
if processedTsNs != 0 {
|
||||
lastReadTime = time.Unix(0, processedTsNs)
|
||||
}
|
||||
var processedTsNs int64
|
||||
var err error
|
||||
|
||||
for {
|
||||
|
||||
processedTsNs, err = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading from persisted logs: %v", err)
|
||||
}
|
||||
|
||||
if processedTsNs != 0 {
|
||||
lastReadTime = time.Unix(0, processedTsNs)
|
||||
}
|
||||
|
||||
lastReadTime, err = fs.filer.MetaAggregator.MetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool {
|
||||
fs.filer.MetaAggregator.ListenersLock.Lock()
|
||||
fs.filer.MetaAggregator.ListenersCond.Wait()
|
||||
@@ -46,6 +50,9 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
|
||||
return true
|
||||
}, eachLogEntryFn)
|
||||
if err != nil {
|
||||
if err == log_buffer.ResumeFromDiskError {
|
||||
continue
|
||||
}
|
||||
glog.Errorf("processed to %v: %v", lastReadTime, err)
|
||||
time.Sleep(3127 * time.Millisecond)
|
||||
if err != log_buffer.ResumeError {
|
||||
@@ -73,19 +80,23 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
|
||||
|
||||
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
|
||||
|
||||
// println("reading from persisted logs ...")
|
||||
processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading from persisted logs: %v", err)
|
||||
}
|
||||
var processedTsNs int64
|
||||
var err error
|
||||
|
||||
if processedTsNs != 0 {
|
||||
lastReadTime = time.Unix(0, processedTsNs)
|
||||
}
|
||||
glog.V(0).Infof("after local log reads, %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
||||
|
||||
// println("reading from in memory logs ...")
|
||||
for {
|
||||
// println("reading from persisted logs ...")
|
||||
processedTsNs, err = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading from persisted logs: %v", err)
|
||||
}
|
||||
|
||||
if processedTsNs != 0 {
|
||||
lastReadTime = time.Unix(0, processedTsNs)
|
||||
}
|
||||
// glog.V(0).Infof("after local log reads, %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
||||
|
||||
// println("reading from in memory logs ...")
|
||||
|
||||
lastReadTime, err = fs.filer.LocalMetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool {
|
||||
fs.listenersLock.Lock()
|
||||
fs.listenersCond.Wait()
|
||||
@@ -93,6 +104,9 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
|
||||
return true
|
||||
}, eachLogEntryFn)
|
||||
if err != nil {
|
||||
if err == log_buffer.ResumeFromDiskError {
|
||||
continue
|
||||
}
|
||||
glog.Errorf("processed to %v: %v", lastReadTime, err)
|
||||
time.Sleep(3127 * time.Millisecond)
|
||||
if err != log_buffer.ResumeError {
|
||||
|
||||
@@ -23,11 +23,15 @@ import (
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/hbase"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
@@ -55,7 +59,7 @@ type FilerOption struct {
|
||||
Port uint32
|
||||
recursiveDelete bool
|
||||
Cipher bool
|
||||
CacheToFilerLimit int64
|
||||
SaveToFilerLimit int
|
||||
Filers []string
|
||||
}
|
||||
|
||||
|
||||
@@ -3,18 +3,33 @@ package weed_server
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/stats"
|
||||
)
|
||||
|
||||
func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// proxy to volume servers
|
||||
var fileId string
|
||||
if strings.HasPrefix(r.RequestURI, "/?proxyChunkId=") {
|
||||
fileId = r.RequestURI[len("/?proxyChunkId="):]
|
||||
}
|
||||
if fileId != "" {
|
||||
stats.FilerRequestCounter.WithLabelValues("proxy").Inc()
|
||||
fs.proxyToVolumeServer(w, r, fileId)
|
||||
stats.FilerRequestHistogram.WithLabelValues("proxy").Observe(time.Since(start).Seconds())
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION)
|
||||
if r.Header.Get("Origin") != "" {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
start := time.Now()
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
stats.FilerRequestCounter.WithLabelValues("get").Inc()
|
||||
|
||||
66
weed/server/filer_server_handlers_proxy.go
Normal file
66
weed/server/filer_server_handlers_proxy.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package weed_server
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var (
|
||||
client *http.Client
|
||||
)
|
||||
|
||||
func init() {
|
||||
client = &http.Client{Transport: &http.Transport{
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
}}
|
||||
}
|
||||
|
||||
func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Request, fileId string) {
|
||||
|
||||
urlStrings, err := fs.filer.MasterClient.GetLookupFileIdFunction()(fileId)
|
||||
if err != nil {
|
||||
glog.Errorf("locate %s: %v", fileId, err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(urlStrings) == 0 {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
proxyReq, err := http.NewRequest(r.Method, urlStrings[rand.Intn(len(urlStrings))], r.Body)
|
||||
if err != nil {
|
||||
glog.Errorf("NewRequest %s: %v", urlStrings[0], err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
proxyReq.Header.Set("Host", r.Host)
|
||||
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
||||
|
||||
for header, values := range r.Header {
|
||||
for _, value := range values {
|
||||
proxyReq.Header.Add(header, value)
|
||||
}
|
||||
}
|
||||
|
||||
proxyResponse, postErr := client.Do(proxyReq)
|
||||
|
||||
if postErr != nil {
|
||||
glog.Errorf("post to filer: %v", postErr)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer util.CloseResponse(proxyResponse)
|
||||
|
||||
for k, v := range proxyResponse.Header {
|
||||
w.Header()[k] = v
|
||||
}
|
||||
w.WriteHeader(proxyResponse.StatusCode)
|
||||
io.Copy(w, proxyResponse.Body)
|
||||
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -99,6 +100,16 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
|
||||
w.Header().Set(k, string(v))
|
||||
}
|
||||
|
||||
//Seaweed custom header are not visible to Vue or javascript
|
||||
seaweedHeaders := []string{}
|
||||
for header, _ := range w.Header() {
|
||||
if strings.HasPrefix(header, "Seaweed-") {
|
||||
seaweedHeaders = append(seaweedHeaders, header)
|
||||
}
|
||||
}
|
||||
seaweedHeaders = append(seaweedHeaders, "Content-Disposition")
|
||||
w.Header().Set("Access-Control-Expose-Headers", strings.Join(seaweedHeaders, ","))
|
||||
|
||||
//set tag count
|
||||
if r.Method == "GET" {
|
||||
tagCount := 0
|
||||
@@ -121,6 +132,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
|
||||
setEtag(w, etag)
|
||||
|
||||
filename := entry.Name()
|
||||
filename = url.QueryEscape(filename)
|
||||
adjustHeaderContentDisposition(w, r, filename)
|
||||
|
||||
totalSize := int64(entry.Size())
|
||||
@@ -146,7 +158,10 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
|
||||
}
|
||||
}
|
||||
|
||||
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
|
||||
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error {
|
||||
if httpStatusCode != 0 {
|
||||
w.WriteHeader(httpStatusCode)
|
||||
}
|
||||
if offset+size <= int64(len(entry.Content)) {
|
||||
_, err := writer.Write(entry.Content[offset : offset+size])
|
||||
return err
|
||||
|
||||
@@ -34,8 +34,9 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
lastFileName := r.FormValue("lastFileName")
|
||||
namePattern := r.FormValue("namePattern")
|
||||
|
||||
entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit, "")
|
||||
entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern)
|
||||
|
||||
if err != nil {
|
||||
glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)
|
||||
@@ -43,7 +44,6 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
shouldDisplayLoadMore := len(entries) == limit
|
||||
if path == "/" {
|
||||
path = ""
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
glog.V(1).Infoln("deleting", objectPath, ":", err.Error())
|
||||
httpStatus := http.StatusInternalServerError
|
||||
if err == filer_pb.ErrNotFound {
|
||||
httpStatus = http.StatusNotFound
|
||||
httpStatus = http.StatusNoContent
|
||||
}
|
||||
writeJsonError(w, r, httpStatus, err)
|
||||
return
|
||||
|
||||
@@ -96,12 +96,6 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), fileChunks)
|
||||
if replyerr != nil {
|
||||
glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
|
||||
return
|
||||
}
|
||||
|
||||
md5bytes = md5Hash.Sum(nil)
|
||||
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent)
|
||||
|
||||
@@ -111,25 +105,26 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
|
||||
func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
|
||||
|
||||
fileName := ""
|
||||
contentType := ""
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "application/octet-stream" {
|
||||
contentType = ""
|
||||
}
|
||||
|
||||
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, so)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
fileChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), fileChunks)
|
||||
if replyerr != nil {
|
||||
glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
|
||||
return
|
||||
}
|
||||
|
||||
md5bytes = md5Hash.Sum(nil)
|
||||
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func isAppend(r *http.Request) bool {
|
||||
return r.URL.Query().Get("op") == "append"
|
||||
}
|
||||
|
||||
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
|
||||
|
||||
// detect file mode
|
||||
@@ -151,30 +146,66 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infoln("saving", path)
|
||||
entry := &filer.Entry{
|
||||
FullPath: util.FullPath(path),
|
||||
Attr: filer.Attr{
|
||||
Mtime: time.Now(),
|
||||
Crtime: time.Now(),
|
||||
Mode: os.FileMode(mode),
|
||||
Uid: OS_UID,
|
||||
Gid: OS_GID,
|
||||
Replication: so.Replication,
|
||||
Collection: so.Collection,
|
||||
TtlSec: so.TtlSeconds,
|
||||
DiskType: so.DiskType,
|
||||
Mime: contentType,
|
||||
Md5: md5bytes,
|
||||
FileSize: uint64(chunkOffset),
|
||||
},
|
||||
Chunks: fileChunks,
|
||||
Content: content,
|
||||
var entry *filer.Entry
|
||||
var mergedChunks []*filer_pb.FileChunk
|
||||
// when it is an append
|
||||
if isAppend(r) {
|
||||
existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path))
|
||||
if findErr != nil && findErr != filer_pb.ErrNotFound {
|
||||
glog.V(0).Infof("failing to find %s: %v", path, findErr)
|
||||
}
|
||||
entry = existingEntry
|
||||
}
|
||||
if entry != nil {
|
||||
entry.Mtime = time.Now()
|
||||
entry.Md5 = nil
|
||||
// adjust chunk offsets
|
||||
for _, chunk := range fileChunks {
|
||||
chunk.Offset += int64(entry.FileSize)
|
||||
}
|
||||
mergedChunks = append(entry.Chunks, fileChunks...)
|
||||
entry.FileSize += uint64(chunkOffset)
|
||||
|
||||
// TODO
|
||||
if len(entry.Content) > 0 {
|
||||
replyerr = fmt.Errorf("append to small file is not supported yet")
|
||||
return
|
||||
}
|
||||
|
||||
} else {
|
||||
glog.V(4).Infoln("saving", path)
|
||||
mergedChunks = fileChunks
|
||||
entry = &filer.Entry{
|
||||
FullPath: util.FullPath(path),
|
||||
Attr: filer.Attr{
|
||||
Mtime: time.Now(),
|
||||
Crtime: time.Now(),
|
||||
Mode: os.FileMode(mode),
|
||||
Uid: OS_UID,
|
||||
Gid: OS_GID,
|
||||
Replication: so.Replication,
|
||||
Collection: so.Collection,
|
||||
TtlSec: so.TtlSeconds,
|
||||
DiskType: so.DiskType,
|
||||
Mime: contentType,
|
||||
Md5: md5bytes,
|
||||
FileSize: uint64(chunkOffset),
|
||||
},
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// maybe compact entry chunks
|
||||
mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks)
|
||||
if replyerr != nil {
|
||||
glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
|
||||
return
|
||||
}
|
||||
entry.Chunks = mergedChunks
|
||||
|
||||
filerResult = &FilerPostResult{
|
||||
Name: fileName,
|
||||
Size: chunkOffset,
|
||||
Size: int64(entry.FileSize),
|
||||
}
|
||||
|
||||
if entry.Extended == nil {
|
||||
@@ -190,7 +221,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||
}
|
||||
|
||||
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil {
|
||||
fs.filer.DeleteChunks(entry.Chunks)
|
||||
fs.filer.DeleteChunks(fileChunks)
|
||||
replyerr = dbErr
|
||||
filerResult.Error = dbErr.Error()
|
||||
glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
|
||||
@@ -205,23 +236,47 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
||||
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
|
||||
|
||||
chunkOffset := int64(0)
|
||||
var smallContent, content []byte
|
||||
var smallContent []byte
|
||||
|
||||
for {
|
||||
limitedReader := io.LimitReader(partReader, int64(chunkSize))
|
||||
|
||||
// assign one file id for one chunk
|
||||
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so)
|
||||
if assignErr != nil {
|
||||
return nil, nil, 0, assignErr, nil
|
||||
data, err := ioutil.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err, nil
|
||||
}
|
||||
if chunkOffset == 0 && !isAppend(r) {
|
||||
if len(data) < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
|
||||
smallContent = data
|
||||
chunkOffset += int64(len(data))
|
||||
break
|
||||
}
|
||||
}
|
||||
dataReader := util.NewBytesReader(data)
|
||||
|
||||
// upload the chunk to the volume server
|
||||
uploadResult, uploadErr, data := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth)
|
||||
// retry to assign a different file id
|
||||
var fileId, urlLocation string
|
||||
var auth security.EncodedJwt
|
||||
var assignErr, uploadErr error
|
||||
var uploadResult *operation.UploadResult
|
||||
for i := 0; i < 3; i++ {
|
||||
// assign one file id for one chunk
|
||||
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
|
||||
if assignErr != nil {
|
||||
return nil, nil, 0, assignErr, nil
|
||||
}
|
||||
|
||||
// upload the chunk to the volume server
|
||||
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
|
||||
if uploadErr != nil {
|
||||
time.Sleep(251 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if uploadErr != nil {
|
||||
return nil, nil, 0, uploadErr, nil
|
||||
}
|
||||
content = data
|
||||
|
||||
// if last chunk exhausted the reader exactly at the border
|
||||
if uploadResult.Size == 0 {
|
||||
@@ -242,9 +297,6 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
if chunkOffset < fs.option.CacheToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && chunkOffset < 4*1024 {
|
||||
smallContent = content
|
||||
}
|
||||
return fileChunks, md5Hash, chunkOffset, nil, smallContent
|
||||
}
|
||||
|
||||
|
||||
7
weed/server/filer_server_rocksdb.go
Normal file
7
weed/server/filer_server_rocksdb.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build rocksdb
|
||||
|
||||
package weed_server
|
||||
|
||||
import (
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/rocksdb"
|
||||
)
|
||||
@@ -27,7 +27,7 @@ func (vs *VolumeServer) VolumeIncrementalCopy(req *volume_server_pb.VolumeIncrem
|
||||
return nil
|
||||
}
|
||||
|
||||
startOffset := foundOffset.ToAcutalOffset()
|
||||
startOffset := foundOffset.ToActualOffset()
|
||||
|
||||
buf := make([]byte, 1024*1024*2)
|
||||
return sendFileContent(v.DataBackend, buf, startOffset, int64(stopOffset), stream)
|
||||
|
||||
@@ -72,7 +72,7 @@ func sendNeedlesSince(stream volume_server_pb.VolumeServer_VolumeTailSenderServe
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToAcutalOffset(), scanner)
|
||||
err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToActualOffset(), scanner)
|
||||
|
||||
return scanner.lastProcessedTimestampNs, err
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ type VolumeServer struct {
|
||||
guard *security.Guard
|
||||
grpcDialOption grpc.DialOption
|
||||
|
||||
needleMapKind storage.NeedleMapType
|
||||
needleMapKind storage.NeedleMapKind
|
||||
FixJpgOrientation bool
|
||||
ReadRedirect bool
|
||||
compactionBytePerSecond int64
|
||||
@@ -39,7 +39,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
|
||||
port int, publicUrl string,
|
||||
folders []string, maxCounts []int, minFreeSpacePercents []float32, diskTypes []storage.DiskType,
|
||||
idxFolder string,
|
||||
needleMapKind storage.NeedleMapType,
|
||||
needleMapKind storage.NeedleMapKind,
|
||||
masterNodes []string, pulseSeconds int,
|
||||
dataCenter string, rack string,
|
||||
whiteList []string,
|
||||
|
||||
@@ -261,10 +261,13 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re
|
||||
return nil
|
||||
}
|
||||
|
||||
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
|
||||
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error {
|
||||
if _, e = rs.Seek(offset, 0); e != nil {
|
||||
return e
|
||||
}
|
||||
if httpStatusCode != 0 {
|
||||
w.WriteHeader(httpStatusCode)
|
||||
}
|
||||
_, e = io.CopyN(writer, rs, size)
|
||||
return e
|
||||
})
|
||||
|
||||
@@ -106,7 +106,11 @@ type WebDavFile struct {
|
||||
|
||||
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
|
||||
|
||||
chunkCache := chunk_cache.NewTieredChunkCache(256, option.CacheDir, option.CacheSizeMB, 1024*1024)
|
||||
cacheUniqueId := util.Md5String([]byte("webdav" + option.FilerGrpcAddress + util.Version()))[0:8]
|
||||
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
|
||||
|
||||
os.MkdirAll(cacheDir, os.FileMode(0755))
|
||||
chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
|
||||
return &WebDavFileSystem{
|
||||
option: option,
|
||||
chunkCache: chunkCache,
|
||||
@@ -525,7 +529,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
|
||||
}
|
||||
if f.reader == nil {
|
||||
chunkViews := filer.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt64)
|
||||
f.reader = filer.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache, fileSize)
|
||||
f.reader = filer.NewChunkReaderAtFromClient(filer.LookupFn(f.fs), chunkViews, f.fs.chunkCache, fileSize)
|
||||
}
|
||||
|
||||
readSize, err = f.reader.ReadAt(p, f.off)
|
||||
|
||||
Reference in New Issue
Block a user