cp file can work

1. consolidate to filer_pb.FileChunk
2. dir add file, mkdir
3. file flush, write

updates having issue
This commit is contained in:
Chris Lu
2018-05-16 00:08:44 -07:00
parent c7a71d35b0
commit b303a02461
14 changed files with 619 additions and 102 deletions

View File

@@ -9,7 +9,6 @@ import (
"bazil.org/fuse/fs"
"bazil.org/fuse"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"time"
@@ -27,16 +26,77 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
return nil
}
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: dir.Path,
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: req.Mode&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
FileMode: uint32(req.Mode),
Uid: req.Uid,
Gid: req.Gid,
},
},
}
glog.V(1).Infof("create: %v", request)
if _, err := client.CreateEntry(ctx, request); err != nil {
return fmt.Errorf("create file: %v", err)
}
return nil
})
if err == nil {
node := &File{Name: req.Name, dir: dir, wfs: dir.wfs}
dir.NodeMap[req.Name] = node
return node, node, nil
}
return nil, nil, err
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
dir.NodeMapLock.Lock()
defer dir.NodeMapLock.Unlock()
fmt.Printf("mkdir %+v\n", req)
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs}
dir.NodeMap[req.Name] = node
request := &filer_pb.CreateEntryRequest{
Directory: dir.Path,
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: true,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
FileMode: uint32(req.Mode),
Uid: req.Uid,
Gid: req.Gid,
},
},
}
return node, nil
glog.V(1).Infof("mkdir: %v", request)
if _, err := client.CreateEntry(ctx, request); err != nil {
return fmt.Errorf("make dir: %v", err)
}
return nil
})
if err == nil {
node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs}
dir.NodeMap[req.Name] = node
return node, nil
}
return nil, err
}
func (dir *Dir) Lookup(ctx context.Context, name string) (node fs.Node, err error) {
@@ -75,13 +135,13 @@ func (dir *Dir) Lookup(ctx context.Context, name string) (node fs.Node, err erro
if entry.IsDirectory {
node = &Dir{Path: path.Join(dir.Path, name), wfs: dir.wfs}
} else {
node = &File{FileId: filer.FileId(entry.FileId), Name: name, dir: dir, wfs: dir.wfs}
node = &File{Chunks: entry.Chunks, Name: name, dir: dir, wfs: dir.wfs}
}
dir.NodeMap[name] = node
return node, nil
}
return nil, err
return nil, fuse.ENOENT
}
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {

View File

@@ -5,13 +5,14 @@ import (
"fmt"
"bazil.org/fuse"
"github.com/chrislusf/seaweedfs/weed/filer"
"bazil.org/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"path/filepath"
"os"
"time"
"bytes"
"github.com/chrislusf/seaweedfs/weed/operation"
)
var _ = fs.Node(&File{})
@@ -20,10 +21,11 @@ var _ = fs.Node(&File{})
var _ = fs.Handle(&File{})
var _ = fs.HandleReadAller(&File{})
// var _ = fs.HandleReader(&File{})
var _ = fs.HandleFlusher(&File{})
var _ = fs.HandleWriter(&File{})
type File struct {
FileId filer.FileId
Chunks []*filer_pb.FileChunk
Name string
dir *Dir
wfs *WFS
@@ -71,10 +73,15 @@ func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
func (file *File) ReadAll(ctx context.Context) (content []byte, err error) {
if len(file.Chunks) == 0 {
return
}
err = file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// FIXME: need to either use Read() or implement differently
request := &filer_pb.GetFileContentRequest{
FileId: string(file.FileId),
FileId: file.Chunks[0].FileId,
}
glog.V(1).Infof("read file content: %v", request)
@@ -91,7 +98,75 @@ func (file *File) ReadAll(ctx context.Context) (content []byte, err error) {
return content, err
}
func (file *File) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// write the file chunks to the filer
fmt.Printf("flush file %+v\n", req)
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AppendFileChunksRequest{
Directory: file.dir.Path,
Entry: &filer_pb.Entry{
Name: file.Name,
Chunks: file.Chunks,
},
}
glog.V(1).Infof("append chunks: %v", request)
if _, err := client.AppendFileChunks(ctx, request); err != nil {
return fmt.Errorf("create file: %v", err)
}
return nil
})
return err
}
func (file *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers
fmt.Printf("write file %+v\n", req)
var fileId, host string
if err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: "000",
Collection: "",
}
glog.V(1).Infof("assign volume: %v", request)
resp, err := client.AssignVolume(ctx, request)
if err != nil {
return err
}
fileId, host = resp.FileId, resp.Url
return nil
}); err != nil {
return fmt.Errorf("filer assign volume: %v", err)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(req.Data)
uploadResult, err := operation.Upload(fileUrl, file.Name, bufReader, false, "application/octet-stream", nil, "")
if err != nil {
return fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
return fmt.Errorf("upload result: %v", uploadResult.Error)
}
glog.V(1).Infof("uploaded %s/%s to: %v", file.dir.Path, file.Name, fileUrl)
file.Chunks = append(file.Chunks, &filer_pb.FileChunk{
FileId: fileId,
Offset: req.Offset,
Size: uint64(uploadResult.Size),
})
return nil
}