mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-10-21 10:07:24 +08:00
rename filer2 to filer
This commit is contained in:
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/sink"
|
||||
@@ -95,8 +95,8 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
totalSize := filer2.FileSize(entry)
|
||||
chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
totalSize := filer.FileSize(entry)
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
|
||||
// Create a URL that references a to-be-created blob in your
|
||||
// Azure Storage account's container.
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/sink"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||
@@ -84,8 +84,8 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
totalSize := filer2.FileSize(entry)
|
||||
chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
totalSize := filer.FileSize(entry)
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
|
||||
bucket, err := g.client.Bucket(context.Background(), g.bucket)
|
||||
if err != nil {
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/sink"
|
||||
@@ -92,7 +92,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
}
|
||||
glog.V(1).Infof("lookup: %v", lookupRequest)
|
||||
if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
|
||||
if filer2.ETag(resp.Entry) == filer2.ETag(entry) {
|
||||
if filer.ETag(resp.Entry) == filer.ETag(entry) {
|
||||
glog.V(0).Infof("already replicated %s", key)
|
||||
return nil
|
||||
}
|
||||
@@ -164,13 +164,13 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
|
||||
// skip if already changed
|
||||
// this usually happens when the messages are not ordered
|
||||
glog.V(0).Infof("late updates %s", key)
|
||||
} else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) {
|
||||
} else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
|
||||
// skip if no change
|
||||
// this usually happens when retrying the replication
|
||||
glog.V(0).Infof("already replicated %s", key)
|
||||
} else {
|
||||
// find out what changed
|
||||
deletedChunks, newChunks, err := compareChunks(filer2.LookupFn(fs), oldEntry, newEntry)
|
||||
deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
|
||||
}
|
||||
@@ -178,7 +178,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
|
||||
// delete the chunks that are deleted from the source
|
||||
if deleteIncludeChunks {
|
||||
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
|
||||
existingEntry.Chunks = filer2.DoMinusChunks(existingEntry.Chunks, deletedChunks)
|
||||
existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks)
|
||||
}
|
||||
|
||||
// replicate the chunks that are new in the source
|
||||
@@ -207,21 +207,21 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
|
||||
})
|
||||
|
||||
}
|
||||
func compareChunks(lookupFileIdFn filer2.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
|
||||
aData, aMeta, aErr := filer2.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
|
||||
func compareChunks(lookupFileIdFn filer.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
|
||||
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
|
||||
if aErr != nil {
|
||||
return nil, nil, aErr
|
||||
}
|
||||
bData, bMeta, bErr := filer2.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
|
||||
bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
|
||||
if bErr != nil {
|
||||
return nil, nil, bErr
|
||||
}
|
||||
|
||||
deletedChunks = append(deletedChunks, filer2.DoMinusChunks(aData, bData)...)
|
||||
deletedChunks = append(deletedChunks, filer2.DoMinusChunks(aMeta, bMeta)...)
|
||||
deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
|
||||
deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
|
||||
|
||||
newChunks = append(newChunks, filer2.DoMinusChunks(bData, aData)...)
|
||||
newChunks = append(newChunks, filer2.DoMinusChunks(bMeta, aMeta)...)
|
||||
newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
|
||||
newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
|
||||
|
||||
return
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
"cloud.google.com/go/storage"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/sink"
|
||||
@@ -89,8 +89,8 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
totalSize := filer2.FileSize(entry)
|
||||
chunkViews := filer2.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
totalSize := filer.FileSize(entry)
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
|
||||
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
|
||||
|
||||
|
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/sink"
|
||||
@@ -107,8 +107,8 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
return err
|
||||
}
|
||||
|
||||
totalSize := filer2.FileSize(entry)
|
||||
chunkViews := filer2.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
totalSize := filer.FileSize(entry)
|
||||
chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
|
||||
parts := make([]*s3.CompletedPart, len(chunkViews))
|
||||
|
||||
@@ -116,7 +116,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
for chunkIndex, chunk := range chunkViews {
|
||||
partId := chunkIndex + 1
|
||||
wg.Add(1)
|
||||
go func(chunk *filer2.ChunkView, index int) {
|
||||
go func(chunk *filer.ChunkView, index int) {
|
||||
defer wg.Done()
|
||||
if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
|
||||
err = uploadErr
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
@@ -103,7 +103,7 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
|
||||
}
|
||||
|
||||
// To upload a part
|
||||
func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
|
||||
func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.ChunkView) (*s3.CompletedPart, error) {
|
||||
var readSeeker io.ReadSeeker
|
||||
|
||||
readSeeker, err := s3sink.buildReadSeeker(chunk)
|
||||
@@ -156,7 +156,7 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
|
||||
return err
|
||||
}
|
||||
|
||||
func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) {
|
||||
func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) {
|
||||
fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
Reference in New Issue
Block a user