filer: remember content is gzipped or not

This commit is contained in:
Chris Lu
2020-03-08 21:39:33 -07:00
parent 5ac6297c68
commit 2e3f6ad3a9
25 changed files with 338 additions and 212 deletions

View File

@@ -1,7 +1,6 @@
package weed_server
import (
"bytes"
"encoding/json"
"errors"
"fmt"
@@ -136,7 +135,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
}
debug("upload file to store", url)
uploadResult, err := operation.Upload(url, pu.FileName, false, bytes.NewReader(pu.Data), pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth)
uploadResult, err := operation.UploadData(url, pu.FileName, false, pu.Data, pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
return

View File

@@ -2,6 +2,7 @@ package weed_server
import (
"context"
"fmt"
"io"
"net/http"
"path"
@@ -87,6 +88,9 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
if fileName != "" {
fileName = path.Base(fileName)
}
contentType := part1.Header.Get("Content-Type")
fmt.Printf("autochunk part header: %+v\n", part1.Header)
var fileChunks []*filer_pb.FileChunk
@@ -102,7 +106,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
}
// upload the chunk to the volume server
uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, "", "", nil, auth)
uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth)
if uploadErr != nil {
return nil, uploadErr
}
@@ -121,6 +125,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
Mtime: time.Now().UnixNano(),
ETag: uploadResult.ETag,
CipherKey: uploadResult.CipherKey,
IsGzipped: uploadResult.Gzip > 0,
},
)
@@ -154,6 +159,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
Replication: replication,
Collection: collection,
TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
Mime: contentType,
},
Chunks: fileChunks,
}

View File

@@ -1,7 +1,6 @@
package weed_server
import (
"bytes"
"context"
"fmt"
"net/http"
@@ -28,7 +27,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
// Note: gzip(cipher(data)), cipher data first, then gzip
// Note: encrypt(gzip(data)), encrypt data first, then gzip
sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024
@@ -41,7 +40,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
pu.MimeType = http.DetectContentType(uncompressedData)
}
uploadResult, uploadError := operation.Upload(urlLocation, pu.FileName, true, bytes.NewReader(uncompressedData), false, pu.MimeType, pu.PairMap, auth)
uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth)
if uploadError != nil {
return nil, fmt.Errorf("upload to volume server: %v", uploadError)
}
@@ -53,11 +52,14 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
Offset: 0,
Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
ETag: uploadResult.ETag,
ETag: uploadResult.Md5,
CipherKey: uploadResult.CipherKey,
IsGzipped: uploadResult.Gzip > 0,
},
}
fmt.Printf("uploaded: %+v\n", uploadResult)
path := r.URL.Path
if strings.HasSuffix(path, "/") {
if pu.FileName != "" {

View File

@@ -1,7 +1,6 @@
package weed_server
import (
"bytes"
"context"
"fmt"
"io"
@@ -418,8 +417,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(buf)
uploadResult, err := operation.Upload(fileUrl, f.name, f.fs.option.Cipher, bufReader, false, "", nil, auth)
uploadResult, err := operation.UploadData(fileUrl, f.name, f.fs.option.Cipher, buf, false, "", nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err)
return 0, fmt.Errorf("upload data: %v", err)
@@ -436,6 +434,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
Mtime: time.Now().UnixNano(),
ETag: uploadResult.ETag,
CipherKey: uploadResult.CipherKey,
IsGzipped: uploadResult.Gzip > 0,
}
f.entry.Chunks = append(f.entry.Chunks, chunk)