filer: option to encrypt data on volume server

This commit is contained in:
Chris Lu
2020-03-06 00:49:47 -08:00
parent 31c481e3fc
commit 13e215ee5c
34 changed files with 419 additions and 247 deletions

View File

@@ -134,7 +134,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
}
debug("upload file to store", url)
uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth)
uploadResult, err := operation.Upload(url, fname, false, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
return

View File

@@ -338,5 +338,6 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.
MaxMb: uint32(fs.option.MaxMB),
DirBuckets: fs.filer.DirBucketsPath,
DirQueues: fs.filer.DirQueuesPath,
Cipher: fs.filer.Cipher,
}, nil
}

View File

@@ -46,6 +46,7 @@ type FilerOption struct {
DisableHttp bool
Port uint32
recursiveDelete bool
Cipher bool
}
type FilerServer struct {
@@ -67,6 +68,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
}
fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Port+10000)
fs.filer.Cipher = option.Cipher
go fs.filer.KeepConnectedToMaster()

View File

@@ -14,6 +14,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -93,7 +94,7 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request,
return
}
if fs.option.RedirectOnRead {
if fs.option.RedirectOnRead && entry.Chunks[0].CipherKey == nil {
stats.FilerRequestCounter.WithLabelValues("redirect").Inc()
http.Redirect(w, r, urlString, http.StatusFound)
return
@@ -136,7 +137,27 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request,
w.Header().Set("Content-Type", entry.Attr.Mime)
}
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
if entry.Chunks[0].CipherKey == nil {
io.Copy(w, resp.Body)
} else {
fs.writeEncryptedChunk(w, resp, entry.Chunks[0])
}
}
func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Response, chunk *filer_pb.FileChunk) {
encryptedData, err := ioutil.ReadAll(resp.Body)
if err != nil {
glog.V(1).Infof("read encrypted %s failed, err: %v", chunk.FileId, err)
w.WriteHeader(http.StatusNotFound)
return
}
decryptedData, err := util.Decrypt(encryptedData, util.CipherKey(chunk.CipherKey))
if err != nil {
glog.V(1).Infof("decrypt %s failed, err: %v", chunk.FileId, err)
w.WriteHeader(http.StatusNotFound)
return
}
w.Write(decryptedData)
}
func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {

View File

@@ -182,7 +182,7 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht
stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds())
}()
uploadResult, uploadError := operation.Upload(urlLocation, fileName, limitedReader, false, contentType, nil, auth)
uploadResult, uploadError := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, nil, auth)
if uploadError != nil {
return 0, uploadError
}

View File

@@ -32,6 +32,7 @@ type WebDavOption struct {
Collection string
Uid uint32
Gid uint32
Cipher bool
}
type WebDavServer struct {
@@ -418,7 +419,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(buf)
uploadResult, err := operation.Upload(fileUrl, f.name, bufReader, false, "", nil, auth)
uploadResult, err := operation.Upload(fileUrl, f.name, f.fs.option.Cipher, bufReader, false, "", nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err)
return 0, fmt.Errorf("upload data: %v", err)
@@ -429,11 +430,12 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
}
chunk := &filer_pb.FileChunk{
FileId: fileId,
Offset: f.off,
Size: uint64(len(buf)),
Mtime: time.Now().UnixNano(),
ETag: uploadResult.ETag,
FileId: fileId,
Offset: f.off,
Size: uint64(len(buf)),
Mtime: time.Now().UnixNano(),
ETag: uploadResult.ETag,
CipherKey: uploadResult.CipherKey,
}
f.entry.Chunks = append(f.entry.Chunks, chunk)