Changes logging function (#6919)

* updated logging methods for stores

* updated logging methods for stores

* updated logging methods for filer

* updated logging methods for uploader and http_util

* updated logging methods for weed server

---------

Co-authored-by: akosov <a.kosov@kryptonite.ru>
This commit is contained in:
Aleksey Kosov
2025-06-24 18:44:06 +03:00
committed by GitHub
parent 2cdd8092cc
commit 4511c2cc1f
49 changed files with 206 additions and 196 deletions

View File

@@ -169,7 +169,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") {
// now the insert failed possibly due to duplication constraints
sqlInsert = "falls back to update"
glog.V(1).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err)
glog.V(1).InfofCtx(ctx, "insert %s %s: %v", entry.FullPath, sqlInsert, err)
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
}
if err != nil {
@@ -277,7 +277,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
}
}
glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
glog.V(4).InfofCtx(ctx, "delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
if err != nil {
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
@@ -312,7 +312,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
var name string
var data []byte
if err = rows.Scan(&name, &data); err != nil {
glog.V(0).Infof("scan %s : %v", dirPath, err)
glog.V(0).InfofCtx(ctx, "scan %s : %v", dirPath, err)
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
}
lastFileName = name
@@ -321,7 +321,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
FullPath: util.NewFullPath(string(dirPath), name),
}
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "scan decode %s : %v", entry.FullPath, err)
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
}

View File

@@ -31,7 +31,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
}
// now the insert failed possibly due to duplication constraints
glog.V(1).Infof("kv insert falls back to update: %s", err)
glog.V(1).InfofCtx(ctx, "kv insert falls back to update: %s", err)
res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
if err != nil {

View File

@@ -233,7 +233,7 @@ func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPat
if driver.IsNotFound(err) {
return nil, filer_pb.ErrNotFound
}
glog.Errorf("find %s: %v", fullpath, err)
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound
}
if len(data.Meta) == 0 {
@@ -257,7 +257,7 @@ func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullP
}
_, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath)))
if err != nil && !driver.IsNotFound(err) {
glog.Errorf("find %s: %v", fullpath, err)
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
@@ -331,7 +331,7 @@ sort d.name asc
converted := arrayToBytes(data.Meta)
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}

View File

@@ -38,7 +38,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
return nil, filer.ErrKvNotFound
}
if err != nil {
glog.Errorf("kv get: %s %v", string(key), err)
glog.ErrorfCtx(ctx, "kv get: %s %v", string(key), err)
return nil, filer.ErrKvNotFound
}
return arrayToBytes(model.Meta), nil
@@ -47,7 +47,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key)))
if err != nil {
glog.Errorf("kv del: %v", err)
glog.ErrorfCtx(ctx, "kv del: %v", err)
return filer.ErrKvNotFound
}
return nil

View File

@@ -4,9 +4,10 @@ import (
"context"
"errors"
"fmt"
"github.com/gocql/gocql"
"time"
"github.com/gocql/gocql"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@@ -202,7 +203,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
lastFileName = name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {
@@ -210,7 +211,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
}
}
if err = iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
}
return lastFileName, err

View File

@@ -4,9 +4,10 @@ import (
"context"
"errors"
"fmt"
"github.com/gocql/gocql"
"time"
"github.com/gocql/gocql"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@@ -202,7 +203,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
lastFileName = name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {
@@ -210,7 +211,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
}
}
if err = iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
}
return lastFileName, err

View File

@@ -113,7 +113,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
}
value, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry marshal %v", err)
}
_, err = store.client.Index().
@@ -123,7 +123,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
BodyJson(string(value)).
Do(ctx)
if err != nil {
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v", err)
}
return nil
@@ -152,7 +152,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
err := jsoniter.Unmarshal(searchResult.Source, esEntry)
return esEntry.Entry, err
}
glog.Errorf("find entry(%s),%v.", string(fullpath), err)
glog.ErrorfCtx(ctx, "find entry(%s),%v.", string(fullpath), err)
return nil, filer_pb.ErrNotFound
}
@@ -178,7 +178,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
return nil
}
glog.Errorf("delete index(%s) %v.", index, err)
glog.ErrorfCtx(ctx, "delete index(%s) %v.", index, err)
return err
}
@@ -193,14 +193,14 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
return nil
}
}
glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
glog.ErrorfCtx(ctx, "delete entry(index:%s,_id:%s) %v.", index, id, err)
return fmt.Errorf("delete entry %v", err)
}
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
_, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
glog.ErrorfCtx(ctx, "elastic delete %s: %v.", entry.FullPath, err)
return false
}
return true
@@ -228,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries(
result := &elastic.SearchResult{}
if (startFileName == "" && first) || inclusive {
if result, err = store.search(ctx, index, parentId); err != nil {
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
glog.ErrorfCtx(ctx, "search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return
}
} else {
@@ -238,7 +238,7 @@ func (store *ElasticStore) listDirectoryEntries(
}
after := weed_util.Md5String([]byte(fullPath))
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
glog.ErrorfCtx(ctx, "searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return
}
}

View File

@@ -25,7 +25,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
return nil
}
}
glog.Errorf("delete key(id:%s) %v.", string(key), err)
glog.ErrorfCtx(ctx, "delete key(id:%s) %v.", string(key), err)
return fmt.Errorf("delete key %v", err)
}
@@ -44,7 +44,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte,
return esEntry.Value, nil
}
}
glog.Errorf("find key(%s),%v.", string(key), err)
glog.ErrorfCtx(ctx, "find key(%s),%v.", string(key), err)
return value, filer.ErrKvNotFound
}
@@ -52,7 +52,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
esEntry := &ESKVEntry{value}
val, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.Errorf("insert key(%s) %v.", string(key), err)
glog.ErrorfCtx(ctx, "insert key(%s) %v.", string(key), err)
return fmt.Errorf("insert key %v", err)
}
_, err = store.client.Index().

View File

@@ -4,10 +4,11 @@ import (
"context"
"crypto/tls"
"fmt"
"go.etcd.io/etcd/client/pkg/v3/transport"
"strings"
"time"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3"
"github.com/seaweedfs/seaweedfs/weed/filer"
@@ -95,7 +96,7 @@ func (store *EtcdStore) initialize(servers, username, password string, timeout t
return fmt.Errorf("error checking etcd connection: %s", err)
}
glog.V(0).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
glog.V(0).InfofCtx(ctx, "сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
store.client = client
return nil
@@ -208,7 +209,7 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View File

@@ -106,7 +106,7 @@ func ResolveOneChunkManifest(ctx context.Context, lookupFileIdFn wdclient.Lookup
func fetchWholeChunk(ctx context.Context, bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error {
urlStrings, err := lookupFileIdFn(ctx, fileId)
if err != nil {
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
glog.ErrorfCtx(ctx, "operation LookupFileId %s failed, err: %v", fileId, err)
return err
}
err = retriedStreamFetchChunkData(ctx, bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0)
@@ -159,7 +159,7 @@ func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrin
break
}
if err != nil {
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
glog.V(0).InfofCtx(ctx, "read %s failed, err: %v", urlString, err)
} else {
break
}
@@ -169,7 +169,7 @@ func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrin
break
}
if err != nil && shouldRetry {
glog.V(0).Infof("retry reading in %v", waitTime)
glog.V(0).InfofCtx(ctx, "retry reading in %v", waitTime)
time.Sleep(waitTime)
} else {
break

View File

@@ -220,19 +220,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
}
}
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
glog.V(4).InfofCtx(ctx, "InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
if err := f.Store.InsertEntry(ctx, entry); err != nil {
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
glog.ErrorfCtx(ctx, "insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
} else {
if o_excl {
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
glog.V(3).InfofCtx(ctx, "EEXIST: entry %s already exists", entry.FullPath)
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
}
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
glog.V(4).InfofCtx(ctx, "UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
glog.Errorf("update entry %s: %v", entry.FullPath, err)
glog.ErrorfCtx(ctx, "update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
}
}
@@ -241,7 +241,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
f.deleteChunksIfNotNew(ctx, oldEntry, entry)
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
glog.V(4).InfofCtx(ctx, "CreateEntry %s: created", entry.FullPath)
return nil
}
@@ -256,7 +256,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
// fmt.Printf("%d dirPath: %+v\n", level, dirPath)
// check the store directly
glog.V(4).Infof("find uncached directory: %s", dirPath)
glog.V(4).InfofCtx(ctx, "find uncached directory: %s", dirPath)
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
// no such existing directory
@@ -291,11 +291,11 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
},
}
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
glog.V(2).InfofCtx(ctx, "create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil {
if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil {
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
glog.V(3).InfofCtx(ctx, "mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
} else {
@@ -305,7 +305,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
}
} else if !dirEntry.IsDirectory() {
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
glog.ErrorfCtx(ctx, "CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
return fmt.Errorf("%s is a file", dirPath)
}
@@ -316,11 +316,11 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er
if oldEntry != nil {
entry.Attr.Crtime = oldEntry.Attr.Crtime
if oldEntry.IsDirectory() && !entry.IsDirectory() {
glog.Errorf("existing %s is a directory", oldEntry.FullPath)
glog.ErrorfCtx(ctx, "existing %s is a directory", oldEntry.FullPath)
return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
}
if !oldEntry.IsDirectory() && entry.IsDirectory() {
glog.Errorf("existing %s is a file", oldEntry.FullPath)
glog.ErrorfCtx(ctx, "existing %s is a file", oldEntry.FullPath)
return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
}
}

View File

@@ -41,7 +41,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
return nil
})
if err != nil {
glog.V(2).Infof("delete directory %s: %v", p, err)
glog.V(2).InfofCtx(ctx, "delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err)
}
}
@@ -74,12 +74,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
for {
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err)
glog.ErrorfCtx(ctx, "list folder %s: %v", entry.FullPath, err)
return fmt.Errorf("list folder %s: %v", entry.FullPath, err)
}
if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop
glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
glog.V(2).InfofCtx(ctx, "deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
}
@@ -110,7 +110,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
}
}
glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
glog.V(3).InfofCtx(ctx, "deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
@@ -124,7 +124,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
glog.V(3).InfofCtx(ctx, "deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
@@ -153,7 +153,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) {
func (f *Filer) maybeDeleteHardLinks(ctx context.Context, hardLinkIds []HardLinkId) {
for _, hardLinkId := range hardLinkIds {
if err := f.Store.DeleteHardLink(ctx, hardLinkId); err != nil {
glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
glog.ErrorfCtx(ctx, "delete hard link id %d : %v", hardLinkId, err)
}
}
}

View File

@@ -93,7 +93,7 @@ func (f *Filer) doDeleteChunks(ctx context.Context, chunks []*filer_pb.FileChunk
}
dataChunks, manifestResolveErr := ResolveOneChunkManifest(ctx, f.MasterClient.LookupFileId, chunk)
if manifestResolveErr != nil {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
glog.V(0).InfofCtx(ctx, "failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())
@@ -119,7 +119,7 @@ func (f *Filer) deleteChunksIfNotNew(ctx context.Context, oldEntry, newEntry *En
toDelete, err := MinusChunks(ctx, f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks)
if err != nil {
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
glog.ErrorfCtx(ctx, "Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
return
}
f.DeleteChunksNotRecursive(toDelete)

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
@@ -31,7 +32,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry
// remove old hard link
if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 {
glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
glog.V(4).InfofCtx(ctx, "handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
}
@@ -50,7 +51,7 @@ func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) err
return encodeErr
}
glog.V(4).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
glog.V(4).InfofCtx(ctx, "setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
return fsw.KvPut(ctx, key, newBlob)
}
@@ -63,16 +64,16 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr
value, err := fsw.KvGet(ctx, key)
if err != nil {
glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
glog.ErrorfCtx(ctx, "read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err
}
if err = entry.DecodeAttributesAndChunks(value); err != nil {
glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
glog.ErrorfCtx(ctx, "decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err
}
glog.V(4).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
glog.V(4).InfofCtx(ctx, "maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
return nil
}
@@ -94,7 +95,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
entry.HardLinkCounter--
if entry.HardLinkCounter <= 0 {
glog.V(4).Infof("DeleteHardLink KvDelete %v", key)
glog.V(4).InfofCtx(ctx, "DeleteHardLink KvDelete %v", key)
return fsw.KvDelete(ctx, key)
}
@@ -103,7 +104,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
return encodeErr
}
glog.V(4).Infof("DeleteHardLink KvPut %v", key)
glog.V(4).InfofCtx(ctx, "DeleteHardLink KvPut %v", key)
return fsw.KvPut(ctx, key, newBlob)
}

View File

@@ -192,7 +192,7 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath)
// remove hard link
op := ctx.Value("OP")
if op != "MV" {
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
}
@@ -215,7 +215,7 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry
// remove hard link
op := ctx.Value("OP")
if op != "MV" {
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
}

View File

@@ -203,7 +203,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa
}
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View File

@@ -4,13 +4,14 @@ import (
"bytes"
"context"
"fmt"
"io"
"os"
"github.com/syndtr/goleveldb/leveldb"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"io"
"os"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
@@ -205,7 +206,7 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View File

@@ -213,7 +213,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View File

@@ -342,7 +342,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View File

@@ -187,7 +187,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
glog.Errorf("find %s: %v", fullpath, err)
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound
}
@@ -272,7 +272,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
lastFileName = data.Name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
@@ -283,7 +283,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
}
if err := cur.Close(ctx); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
}
return lastFileName, err

View File

@@ -3,6 +3,7 @@ package mongodb
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"go.mongodb.org/mongo-driver/bson"
@@ -37,7 +38,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte,
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
glog.Errorf("kv get: %v", err)
glog.ErrorfCtx(ctx, "kv get: %v", err)
return nil, filer.ErrKvNotFound
}

View File

@@ -47,7 +47,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp
locations = resp.LocationsMap[vid]
if locations == nil || len(locations.Locations) == 0 {
glog.V(0).Infof("failed to locate %s", fileId)
glog.V(0).InfofCtx(ctx, "failed to locate %s", fileId)
return fmt.Errorf("failed to locate %s", fileId)
}
vicCacheLock.Lock()

View File

@@ -179,7 +179,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP
entry, err := store.FindEntry(ctx, path)
lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
if err == filer_pb.ErrNotFound {
continue
}

View File

@@ -194,7 +194,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dir
entry, err := store.FindEntry(ctx, path)
lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
if err == filer_pb.ErrNotFound {
continue
}

View File

@@ -3,6 +3,7 @@ package redis3
import (
"context"
"fmt"
"github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/glog"
)
@@ -31,7 +32,7 @@ func insertChild(ctx context.Context, redisStore *UniversalRedis3Store, key stri
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
if err := nameList.WriteName(name); err != nil {
glog.Errorf("add %s %s: %v", key, name, err)
glog.ErrorfCtx(ctx, "add %s %s: %v", key, name, err)
return err
}
@@ -100,7 +101,7 @@ func removeChildren(ctx context.Context, redisStore *UniversalRedis3Store, key s
if err = nameList.ListNames("", func(name string) bool {
if err := onDeleteFn(name); err != nil {
glog.Errorf("delete %s child %s: %v", key, name, err)
glog.ErrorfCtx(ctx, "delete %s child %s: %v", key, name, err)
return false
}
return true

View File

@@ -151,7 +151,7 @@ func (store *UniversalRedis3Store) ListDirectoryEntries(ctx context.Context, dir
entry, err := store.FindEntry(ctx, path)
lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
if err == filer_pb.ErrNotFound {
return true
}

View File

@@ -162,7 +162,7 @@ func (store *UniversalRedisLuaStore) ListDirectoryEntries(ctx context.Context, d
entry, err := store.FindEntry(ctx, path)
lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
if err == filer_pb.ErrNotFound {
continue
}

View File

@@ -266,7 +266,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
return false
}
if !eachEntryFunc(entry) {

View File

@@ -82,7 +82,7 @@ func noJwtFunc(string) string {
}
func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) {
glog.V(4).Infof("prepare to stream content for chunks: %d", len(chunks))
glog.V(4).InfofCtx(ctx, "prepare to stream content for chunks: %d", len(chunks))
chunkViews := ViewFromChunks(ctx, masterClient.GetLookupFileIdFunction(), chunks, offset, size)
fileId2Url := make(map[string][]string)
@@ -96,15 +96,15 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
if err == nil && len(urlStrings) > 0 {
break
}
glog.V(4).Infof("waiting for chunk: %s", chunkView.FileId)
glog.V(4).InfofCtx(ctx, "waiting for chunk: %s", chunkView.FileId)
time.Sleep(backoff)
}
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
glog.V(1).InfofCtx(ctx, "operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return nil, err
} else if len(urlStrings) == 0 {
errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
glog.Error(errUrlNotFound)
glog.ErrorCtx(ctx, errUrlNotFound)
return nil, errUrlNotFound
}
fileId2Url[chunkView.FileId] = urlStrings
@@ -118,7 +118,7 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
if offset < chunkView.ViewOffset {
gap := chunkView.ViewOffset - offset
remaining -= gap
glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset)
glog.V(4).InfofCtx(ctx, "zero [%d,%d)", offset, chunkView.ViewOffset)
err := writeZero(writer, gap)
if err != nil {
return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset)
@@ -140,7 +140,7 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize))
}
if remaining > 0 {
glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining)
glog.V(4).InfofCtx(ctx, "zero [%d,%d)", offset, offset+remaining)
err := writeZero(writer, remaining)
if err != nil {
return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining)
@@ -192,7 +192,7 @@ func ReadAll(ctx context.Context, buffer []byte, masterClient *wdclient.MasterCl
chunkView := x.Value
urlStrings, err := lookupFileIdFn(ctx, chunkView.FileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
glog.V(1).InfofCtx(ctx, "operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
}

View File

@@ -260,39 +260,39 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w
}
if len(results) < 1 {
glog.Errorf("Can't find results, data is empty")
glog.ErrorfCtx(ctx, "Can't find results, data is empty")
return
}
rows, ok := results[0].([]interface{})
if !ok {
glog.Errorf("Can't convert results[0] to list")
glog.ErrorfCtx(ctx, "Can't convert results[0] to list")
return
}
for _, result := range rows {
row, ok := result.([]interface{})
if !ok {
glog.Errorf("Can't convert result to list")
glog.ErrorfCtx(ctx, "Can't convert result to list")
return
}
if len(row) < 5 {
glog.Errorf("Length of result is less than needed: %v", len(row))
glog.ErrorfCtx(ctx, "Length of result is less than needed: %v", len(row))
return
}
nameRaw := row[2]
name, ok := nameRaw.(string)
if !ok {
glog.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw)
glog.ErrorfCtx(ctx, "Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw)
return
}
dataRaw := row[4]
data, ok := dataRaw.(string)
if !ok {
glog.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw)
glog.ErrorfCtx(ctx, "Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw)
return
}
@@ -302,7 +302,7 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w
lastFileName = name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View File

@@ -249,7 +249,7 @@ func (store *TikvStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if err := iter.Next(); !eachEntryFunc(entry) || err != nil {

View File

@@ -6,14 +6,15 @@ package ydb
import (
"context"
"fmt"
"github.com/ydb-platform/ydb-go-sdk/v3/query"
"github.com/ydb-platform/ydb-go-sdk/v3/table/options"
"os"
"path"
"strings"
"sync"
"time"
"github.com/ydb-platform/ydb-go-sdk/v3/query"
"github.com/ydb-platform/ydb-go-sdk/v3/table/options"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
"github.com/seaweedfs/seaweedfs/weed/glog"
@@ -234,7 +235,7 @@ func (store *YdbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath)
dir, name := fullpath.DirAndName()
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
q := withPragma(tablePathPrefix, deleteQuery)
glog.V(4).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir)
glog.V(4).InfofCtx(ctx, "DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir)
queryParams := table.NewQueryParameters(
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
table.ValueParam("$directory", types.UTF8Value(*shortDir)),
@@ -433,7 +434,7 @@ func (store *YdbStore) deleteTable(ctx context.Context, prefix string) error {
}); err != nil {
return err
}
glog.V(4).Infof("deleted table %s", prefix)
glog.V(4).InfofCtx(ctx, "deleted table %s", prefix)
return nil
}
@@ -446,11 +447,11 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
}
prefixBuckets := store.dirBuckets + "/"
glog.V(4).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets)
glog.V(4).InfofCtx(ctx, "dir: %s, prefixBuckets: %s", *dir, prefixBuckets)
if strings.HasPrefix(*dir, prefixBuckets) {
// detect bucket
bucketAndDir := (*dir)[len(prefixBuckets):]
glog.V(4).Infof("bucketAndDir: %s", bucketAndDir)
glog.V(4).InfofCtx(ctx, "bucketAndDir: %s", bucketAndDir)
var bucket string
if t := strings.Index(bucketAndDir, "/"); t > 0 {
bucket = bucketAndDir[:t]
@@ -465,17 +466,17 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
defer store.dbsLock.Unlock()
if _, found := store.dbs[bucket]; !found {
glog.V(4).Infof("bucket %q not in cache, verifying existence via DescribeTable", bucket)
glog.V(4).InfofCtx(ctx, "bucket %q not in cache, verifying existence via DescribeTable", bucket)
tablePath := path.Join(store.tablePathPrefix, bucket, abstract_sql.DEFAULT_TABLE)
err2 := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
_, err3 := s.DescribeTable(ctx, tablePath)
return err3
})
if err2 != nil {
glog.V(4).Infof("bucket %q not found (DescribeTable %s failed)", bucket, tablePath)
glog.V(4).InfofCtx(ctx, "bucket %q not found (DescribeTable %s failed)", bucket, tablePath)
return
}
glog.V(4).Infof("bucket %q exists, adding to cache", bucket)
glog.V(4).InfofCtx(ctx, "bucket %q exists, adding to cache", bucket)
store.dbs[bucket] = true
}
bucketPrefix := path.Join(store.tablePathPrefix, bucket)
@@ -487,7 +488,7 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
func (store *YdbStore) ensureTables(ctx context.Context) error {
prefixFull := store.tablePathPrefix
glog.V(4).Infof("creating base table %s", prefixFull)
glog.V(4).InfofCtx(ctx, "creating base table %s", prefixFull)
baseTable := path.Join(prefixFull, abstract_sql.DEFAULT_TABLE)
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
return s.CreateTable(ctx, baseTable, store.createTableOptions()...)
@@ -495,17 +496,17 @@ func (store *YdbStore) ensureTables(ctx context.Context) error {
return fmt.Errorf("failed to create base table %s: %v", baseTable, err)
}
glog.V(4).Infof("creating bucket tables")
glog.V(4).InfofCtx(ctx, "creating bucket tables")
if store.SupportBucketTable {
store.dbsLock.Lock()
defer store.dbsLock.Unlock()
for bucket := range store.dbs {
glog.V(4).Infof("creating bucket table %s", bucket)
glog.V(4).InfofCtx(ctx, "creating bucket table %s", bucket)
bucketTable := path.Join(prefixFull, bucket, abstract_sql.DEFAULT_TABLE)
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
return s.CreateTable(ctx, bucketTable, store.createTableOptions()...)
}); err != nil {
glog.Errorf("failed to create bucket table %s: %v", bucketTable, err)
glog.ErrorfCtx(ctx, "failed to create bucket table %s: %v", bucketTable, err)
}
}
}