mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2026-02-09 09:17:28 +08:00
writing meta logs is working
This commit is contained in:
@@ -35,6 +35,7 @@ type Filer struct {
|
||||
DirQueuesPath string
|
||||
buckets *FilerBuckets
|
||||
Cipher bool
|
||||
metaLogBuffer *LogBuffer
|
||||
}
|
||||
|
||||
func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort uint32) *Filer {
|
||||
@@ -44,6 +45,7 @@ func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort ui
|
||||
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
||||
GrpcDialOption: grpcDialOption,
|
||||
}
|
||||
f.metaLogBuffer = NewLogBuffer(time.Minute, f.logFlushFunc)
|
||||
|
||||
go f.loopProcessingDeletion()
|
||||
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
package filer2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/notification"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) {
|
||||
@@ -16,24 +24,123 @@ func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool)
|
||||
return
|
||||
}
|
||||
|
||||
println("key:", key)
|
||||
|
||||
if strings.HasPrefix(key, "/.meta") {
|
||||
return
|
||||
}
|
||||
|
||||
newParentPath := ""
|
||||
if newEntry != nil {
|
||||
newParentPath, _ = newEntry.FullPath.DirAndName()
|
||||
}
|
||||
eventNotification := &filer_pb.EventNotification{
|
||||
OldEntry: oldEntry.ToProtoEntry(),
|
||||
NewEntry: newEntry.ToProtoEntry(),
|
||||
DeleteChunks: deleteChunks,
|
||||
NewParentPath: newParentPath,
|
||||
}
|
||||
|
||||
if notification.Queue != nil {
|
||||
|
||||
glog.V(3).Infof("notifying entry update %v", key)
|
||||
notification.Queue.SendMessage(key, eventNotification)
|
||||
}
|
||||
|
||||
newParentPath := ""
|
||||
if newEntry != nil {
|
||||
newParentPath, _ = newEntry.FullPath.DirAndName()
|
||||
}
|
||||
f.logMetaEvent(time.Now(), key, eventNotification)
|
||||
|
||||
notification.Queue.SendMessage(
|
||||
key,
|
||||
&filer_pb.EventNotification{
|
||||
OldEntry: oldEntry.ToProtoEntry(),
|
||||
NewEntry: newEntry.ToProtoEntry(),
|
||||
DeleteChunks: deleteChunks,
|
||||
NewParentPath: newParentPath,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (f *Filer) logMetaEvent(ts time.Time, dir string, eventNotification *filer_pb.EventNotification) {
|
||||
event := &filer_pb.FullEventNotification{
|
||||
Directory: dir,
|
||||
EventNotification: eventNotification,
|
||||
}
|
||||
data, err := proto.Marshal(event)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to marshal filer_pb.FullEventNotification %+v: %v", event, err)
|
||||
return
|
||||
}
|
||||
|
||||
f.metaLogBuffer.AddToBuffer(ts, []byte(dir), data)
|
||||
|
||||
}
|
||||
|
||||
func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
|
||||
targetFile := fmt.Sprintf("/.meta/log/%04d/%02d/%02d/%02d/%02d/%02d-%02d.log",
|
||||
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
|
||||
startTime.Second(), stopTime.Second())
|
||||
|
||||
if err := f.appendToFile(targetFile, buf); err != nil {
|
||||
glog.V(0).Infof("log write failed %s: %v", targetFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
type LogBuffer struct {
|
||||
buf []byte
|
||||
pos int
|
||||
startTime time.Time
|
||||
stopTime time.Time
|
||||
sizeBuf []byte
|
||||
flushInterval time.Duration
|
||||
flushFn func(startTime, stopTime time.Time, buf []byte)
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime time.Time, buf []byte)) *LogBuffer {
|
||||
lb := &LogBuffer{
|
||||
buf: make([]byte, 4*0124*1024),
|
||||
sizeBuf: make([]byte, 4),
|
||||
flushInterval: 2 * time.Second, // flushInterval,
|
||||
flushFn: flushFn,
|
||||
}
|
||||
go lb.loopFlush()
|
||||
return lb
|
||||
}
|
||||
|
||||
func (m *LogBuffer) loopFlush() {
|
||||
for {
|
||||
m.Lock()
|
||||
m.flush()
|
||||
m.Unlock()
|
||||
time.Sleep(m.flushInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *LogBuffer) flush() {
|
||||
if m.flushFn != nil && m.pos > 0 {
|
||||
m.flushFn(m.startTime, m.stopTime, m.buf[:m.pos])
|
||||
m.pos = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (m *LogBuffer) AddToBuffer(ts time.Time, key, data []byte) {
|
||||
|
||||
logEntry := &filer_pb.LogEntry{
|
||||
TsNs: ts.UnixNano(),
|
||||
PartitionKeyHash: util.HashToInt32(key),
|
||||
Data: data,
|
||||
}
|
||||
|
||||
logEntryData, _ := proto.Marshal(logEntry)
|
||||
|
||||
size := len(logEntryData)
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if m.pos == 0 {
|
||||
m.startTime = ts
|
||||
}
|
||||
|
||||
if m.startTime.Add(m.flushInterval).Before(ts) || len(m.buf)-m.pos < size+4 {
|
||||
m.flush()
|
||||
m.startTime = ts
|
||||
}
|
||||
m.stopTime = ts
|
||||
|
||||
util.Uint32toBytes(m.sizeBuf, uint32(size))
|
||||
copy(m.buf[m.pos:m.pos+4], m.sizeBuf)
|
||||
|
||||
copy(m.buf[m.pos+4:m.pos+4+size], logEntryData)
|
||||
m.pos += size + 4
|
||||
}
|
||||
|
||||
70
weed/filer2/filer_notify_append.go
Normal file
70
weed/filer2/filer_notify_append.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package filer2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (f *Filer) appendToFile(targetFile string, data []byte) error {
|
||||
|
||||
// assign a volume location
|
||||
assignRequest := &operation.VolumeAssignRequest{
|
||||
Count: 1,
|
||||
}
|
||||
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AssignVolume: %v", err)
|
||||
}
|
||||
if assignResult.Error != "" {
|
||||
return fmt.Errorf("AssignVolume error: %v", assignResult.Error)
|
||||
}
|
||||
|
||||
// upload data
|
||||
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
|
||||
uploadResult, err := operation.UploadData(targetUrl, "", false, data, false, "", nil, assignResult.Auth)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upload data %s: %v", targetUrl, err)
|
||||
}
|
||||
println("uploaded to", targetUrl)
|
||||
|
||||
// find out existing entry
|
||||
fullpath := util.FullPath(targetFile)
|
||||
entry, err := f.FindEntry(context.Background(), fullpath)
|
||||
var offset int64 = 0
|
||||
if err == filer_pb.ErrNotFound {
|
||||
entry = &Entry{
|
||||
FullPath: fullpath,
|
||||
Attr: Attr{
|
||||
Crtime: time.Now(),
|
||||
Mtime: time.Now(),
|
||||
Mode: os.FileMode(0644),
|
||||
Uid: OS_UID,
|
||||
Gid: OS_GID,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
offset = int64(TotalSize(entry.Chunks))
|
||||
}
|
||||
|
||||
// append to existing chunks
|
||||
chunk := &filer_pb.FileChunk{
|
||||
FileId: assignResult.Fid,
|
||||
Offset: offset,
|
||||
Size: uint64(uploadResult.Size),
|
||||
Mtime: time.Now().UnixNano(),
|
||||
ETag: uploadResult.ETag,
|
||||
IsGzipped: uploadResult.Gzip > 0,
|
||||
}
|
||||
entry.Chunks = append(entry.Chunks, chunk)
|
||||
|
||||
// update the entry
|
||||
err = f.CreateEntry(context.Background(), entry, false)
|
||||
|
||||
return err
|
||||
}
|
||||
Reference in New Issue
Block a user