Merge branch 'master' into support_ssd_volume

This commit is contained in:
Chris Lu
2021-02-09 11:37:07 -08:00
212 changed files with 5765 additions and 2102 deletions

View File

@@ -15,6 +15,8 @@ var Commands = []*Command{
cmdDownload,
cmdExport,
cmdFiler,
cmdFilerCat,
cmdFilerMetaTail,
cmdFilerReplicate,
cmdFilerSynchronize,
cmdFix,
@@ -25,7 +27,6 @@ var Commands = []*Command{
cmdScaffold,
cmdServer,
cmdShell,
cmdWatch,
cmdUpload,
cmdVersion,
cmdVolume,

View File

@@ -113,7 +113,7 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
nv, ok := needleMap.Get(n.Id)
glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToAcutalOffset() == offset {
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)

View File

@@ -3,6 +3,7 @@ package command
import (
"fmt"
"net/http"
"os"
"strconv"
"strings"
"time"
@@ -19,9 +20,11 @@ import (
)
var (
f FilerOptions
filerStartS3 *bool
filerS3Options S3Options
f FilerOptions
filerStartS3 *bool
filerS3Options S3Options
filerStartWebDav *bool
filerWebDavOptions WebDavOption
)
type FilerOptions struct {
@@ -42,7 +45,7 @@ type FilerOptions struct {
cipher *bool
peers *string
metricsHttpPort *int
cacheToFilerLimit *int
saveToFilerLimit *int
defaultLevelDbDirectory *string
}
@@ -64,7 +67,7 @@ func init() {
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
f.cacheToFilerLimit = cmdFiler.Flag.Int("cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
// start s3 on filer
@@ -74,6 +77,16 @@ func init() {
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
// start webdav on filer
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
}
var cmdFiler = &Command{
@@ -113,6 +126,15 @@ func runFiler(cmd *Command, args []string) bool {
}()
}
if *filerStartWebDav {
filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
filerWebDavOptions.filer = &filerAddress
go func() {
time.Sleep(2 * time.Second)
filerWebDavOptions.startWebDav()
}()
}
f.startFiler()
return true
@@ -148,7 +170,7 @@ func (fo *FilerOptions) startFiler() {
Host: *fo.ip,
Port: uint32(*fo.port),
Cipher: *fo.cipher,
CacheToFilerLimit: int64(*fo.cacheToFilerLimit),
SaveToFilerLimit: *fo.saveToFilerLimit,
Filers: peers,
})
if nfs_err != nil {

118
weed/command/filer_cat.go Normal file
View File

@@ -0,0 +1,118 @@
package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"google.golang.org/grpc"
"math"
"net/url"
"os"
"strings"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
filerCat FilerCatOptions
)
type FilerCatOptions struct {
grpcDialOption grpc.DialOption
filerAddress string
filerClient filer_pb.SeaweedFilerClient
output *string
}
func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType {
return func(fileId string) (targetUrls []string, err error) {
vid := filer.VolumeId(fileId)
resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {
return nil, err
}
locations := resp.LocationsMap[vid]
for _, loc := range locations.Locations {
targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId))
}
return
}
}
func init() {
cmdFilerCat.Run = runFilerCat // break init cycle
filerCat.output = cmdFilerCat.Flag.String("o", "", "write to file instead of stdout")
}
var cmdFilerCat = &Command{
UsageLine: "filer.cat [-o <file>] http://localhost:8888/path/to/file",
Short: "copy one file to local",
Long: `read one file to stdout or write to a file
`,
}
func runFilerCat(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
if len(args) == 0 {
return false
}
filerSource := args[len(args)-1]
filerUrl, err := url.Parse(filerSource)
if err != nil {
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
return false
}
urlPath := filerUrl.Path
if strings.HasSuffix(urlPath, "/") {
fmt.Printf("The last argument should be a file: %v\n", err)
return false
}
filerCat.filerAddress = filerUrl.Host
filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
dir, name := util.FullPath(urlPath).DirAndName()
writer := os.Stdout
if *filerCat.output != "" {
fmt.Printf("saving %s to %s\n", filerSource, *filerCat.output)
f, err := os.OpenFile(*filerCat.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
fmt.Printf("open file %s: %v\n", *filerCat.output, err)
return false
}
defer f.Close()
writer = f
}
pb.WithFilerClient(filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Name: name,
Directory: dir,
}
respLookupEntry, err := filer_pb.LookupEntry(client, request)
if err != nil {
return err
}
filerCat.filerClient = client
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
})
return true
}

View File

@@ -94,7 +94,7 @@ func runCopy(cmd *Command, args []string) bool {
}
urlPath := filerUrl.Path
if !strings.HasSuffix(urlPath, "/") {
fmt.Printf("The last argument should be a folder and end with \"/\": %v\n", err)
fmt.Printf("The last argument should be a folder and end with \"/\"\n")
return false
}

View File

@@ -0,0 +1,211 @@
package command
import (
"context"
"fmt"
"github.com/golang/protobuf/jsonpb"
jsoniter "github.com/json-iterator/go"
"github.com/olivere/elastic/v7"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
cmdFilerMetaTail.Run = runFilerMetaTail // break init cycle
}
var cmdFilerMetaTail = &Command{
UsageLine: "filer.meta.tail [-filer=localhost:8888] [-target=/]",
Short: "see recent changes on a filer",
Long: `See recent changes on a filer.
weed filer.meta.tail -timeAgo=30h | grep truncate
weed filer.meta.tail -timeAgo=30h | jq .
weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name
`,
}
var (
tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port")
tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://<host:port>")
esIndex = cmdFilerMetaTail.Flag.String("es.index", "seaweedfs", "ES index name")
)
func runFilerMetaTail(cmd *Command, args []string) bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var filterFunc func(dir, fname string) bool
if *tailPattern != "" {
if strings.Contains(*tailPattern, "/") {
println("watch path pattern", *tailPattern)
filterFunc = func(dir, fname string) bool {
matched, err := filepath.Match(*tailPattern, dir+"/"+fname)
if err != nil {
fmt.Printf("error: %v", err)
}
return matched
}
} else {
println("watch file pattern", *tailPattern)
filterFunc = func(dir, fname string) bool {
matched, err := filepath.Match(*tailPattern, fname)
if err != nil {
fmt.Printf("error: %v", err)
}
return matched
}
}
}
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
if filterFunc == nil {
return true
}
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
return false
}
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
return true
}
if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
return true
}
return false
}
jsonpbMarshaler := jsonpb.Marshaler{
EmitDefaults: false,
}
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
jsonpbMarshaler.Marshal(os.Stdout, resp)
fmt.Fprintln(os.Stdout)
return nil
}
if *esServers != "" {
var err error
eachEntryFunc, err = sendToElasticSearchFunc(*esServers, *esIndex)
if err != nil {
fmt.Printf("create elastic search client to %s: %+v\n", *esServers, err)
return false
}
}
tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "tail",
PathPrefix: *tailTarget,
SinceNs: time.Now().Add(-*tailStart).UnixNano(),
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if !shouldPrint(resp) {
continue
}
if err = eachEntryFunc(resp); err != nil {
return err
}
}
})
if tailErr != nil {
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
}
return true
}
type EsDocument struct {
Dir string `json:"dir,omitempty"`
Name string `json:"name,omitempty"`
IsDirectory bool `json:"isDir,omitempty"`
Size uint64 `json:"size,omitempty"`
Uid uint32 `json:"uid,omitempty"`
Gid uint32 `json:"gid,omitempty"`
UserName string `json:"userName,omitempty"`
Collection string `json:"collection,omitempty"`
Crtime int64 `json:"crtime,omitempty"`
Mtime int64 `json:"mtime,omitempty"`
Mime string `json:"mime,omitempty"`
}
func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
entry := event.NewEntry
dir, name := event.NewParentPath, entry.Name
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
esEntry := &EsDocument{
Dir: dir,
Name: name,
IsDirectory: entry.IsDirectory,
Size: entry.Attributes.FileSize,
Uid: entry.Attributes.Uid,
Gid: entry.Attributes.Gid,
UserName: entry.Attributes.UserName,
Collection: entry.Attributes.Collection,
Crtime: entry.Attributes.Crtime,
Mtime: entry.Attributes.Mtime,
Mime: entry.Attributes.Mime,
}
return esEntry, id
}
func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
options := []elastic.ClientOptionFunc{}
options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
options = append(options, elastic.SetSniff(false))
options = append(options, elastic.SetHealthcheck(false))
client, err := elastic.NewClient(options...)
if err != nil {
return nil, err
}
return func(resp *filer_pb.SubscribeMetadataResponse) error {
event := resp.EventNotification
if event.OldEntry != nil &&
(event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
// delete or not update the same file
dir, name := resp.Directory, event.OldEntry.Name
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
println("delete", id)
_, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
return err
}
if event.NewEntry != nil {
// add a new file or update the same file
esEntry, id := toEsEntry(event)
value, err := jsoniter.Marshal(esEntry)
if err != nil {
return err
}
println(string(value))
_, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
return err
}
return nil
}, nil
}

View File

@@ -11,10 +11,10 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
"github.com/chrislusf/seaweedfs/weed/replication/sub"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
)
func init() {
@@ -98,13 +98,19 @@ func runFilerReplicate(cmd *Command, args []string) bool {
replicator := replication.NewReplicator(config, "source.filer.", dataSink)
for {
key, m, err := notificationInput.ReceiveMessage()
key, m, onSuccessFn, onFailureFn, err := notificationInput.ReceiveMessage()
if err != nil {
glog.Errorf("receive %s: %+v", key, err)
if onFailureFn != nil {
onFailureFn()
}
continue
}
if key == "" {
// long poll received no messages
if onSuccessFn != nil {
onSuccessFn()
}
continue
}
if m.OldEntry != nil && m.NewEntry == nil {
@@ -116,14 +122,20 @@ func runFilerReplicate(cmd *Command, args []string) bool {
}
if err = replicator.Replicate(context.Background(), key, m); err != nil {
glog.Errorf("replicate %s: %+v", key, err)
if onFailureFn != nil {
onFailureFn()
}
} else {
glog.V(1).Infof("replicated %s", key)
if onSuccessFn != nil {
onSuccessFn()
}
}
}
}
func validateOneEnabledInput(config *viper.Viper) {
func validateOneEnabledInput(config *util.ViperProxy) {
enabledInput := ""
for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") {

View File

@@ -35,6 +35,8 @@ type SyncOptions struct {
bDiskType *string
aDebug *bool
bDebug *bool
aProxyByFiler *bool
bProxyByFiler *bool
}
var (
@@ -45,7 +47,7 @@ var (
func init() {
cmdFilerSynchronize.Run = runFilerSynchronize // break init cycle
syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow if true")
syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow from A to B if true")
syncOptions.filerA = cmdFilerSynchronize.Flag.String("a", "", "filer A in one SeaweedFS cluster")
syncOptions.filerB = cmdFilerSynchronize.Flag.String("b", "", "filer B in the other SeaweedFS cluster")
syncOptions.aPath = cmdFilerSynchronize.Flag.String("a.path", "/", "directory to sync on filer A")
@@ -58,6 +60,8 @@ func init() {
syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B")
syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd] choose between hard drive or solid state drive on filer A")
syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd] choose between hard drive or solid state drive on filer B")
syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers")
syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers")
syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files")
syncOptions.bDebug = cmdFilerSynchronize.Flag.Bool("b.debug", false, "debug mode to print out filer B received files")
syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file")
@@ -66,8 +70,8 @@ func init() {
var cmdFilerSynchronize = &Command{
UsageLine: "filer.sync -a=<oneFilerHost>:<oneFilerPort> -b=<otherFilerHost>:<otherFilerPort>",
Short: "continuously synchronize between two active-active or active-passive SeaweedFS clusters",
Long: `continuously synchronize file changes between two active-active or active-passive filers
Short: "resumeable continuous synchronization between two active-active or active-passive SeaweedFS clusters",
Long: `resumeable continuous synchronization for file changes between two active-active or active-passive filers
filer.sync listens on filer notifications. If any file is updated, it will fetch the updated content,
and write to the other destination. Different from filer.replicate:
@@ -90,8 +94,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
go func() {
for {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.filerB,
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bDiskType, *syncOptions.bDebug)
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
time.Sleep(1747 * time.Millisecond)
@@ -102,8 +106,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
if !*syncOptions.isActivePassive {
go func() {
for {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.filerA,
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aDiskType, *syncOptions.aDebug)
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
time.Sleep(2147 * time.Millisecond)
@@ -117,8 +121,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
return true
}
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath, targetFiler, targetPath string,
replicationStr, collection string, ttlSec int, diskType string, debug bool) error {
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
// read source filer signature
sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler)
@@ -142,9 +146,9 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
// create filer sink
filerSource := &source.FilerSource{}
filerSource.DoInitialize(pb.ServerToGrpcAddress(sourceFiler), sourcePath)
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
filerSink := &filersink.FilerSink{}
filerSink.DoInitialize(pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption)
filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
filerSink.SetSourceFiler(filerSource)
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {

View File

@@ -6,25 +6,25 @@ import (
)
type MountOptions struct {
filer *string
filerMountRootPath *string
dir *string
dirAutoCreate *bool
collection *string
replication *string
diskType *string
ttlSec *int
chunkSizeLimitMB *int
concurrentWriters *int
cacheDir *string
cacheSizeMB *int64
dataCenter *string
allowOthers *bool
umaskString *string
nonempty *bool
outsideContainerClusterMode *bool
uidMap *string
gidMap *string
filer *string
filerMountRootPath *string
dir *string
dirAutoCreate *bool
collection *string
replication *string
diskType *string
ttlSec *int
chunkSizeLimitMB *int
concurrentWriters *int
cacheDir *string
cacheSizeMB *int64
dataCenter *string
allowOthers *bool
umaskString *string
nonempty *bool
volumeServerAccess *string
uidMap *string
gidMap *string
}
var (
@@ -45,14 +45,14 @@ func init() {
mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd] choose between hard drive or solid state drive")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 0, "limit concurrent goroutine writers if not 0")
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 128, "limit concurrent goroutine writers if not 0")
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access volume servers with publicUrl")
mountOptions.volumeServerAccess = cmdMount.Flag.String("volumeServerAccess", "direct", "access volume servers by [direct|publicUrl|filerProxy]")
mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated <local_uid>:<filer_uid>")
mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated <local_gid>:<filer_gid>")

View File

@@ -59,6 +59,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
return true
}
util.LoadConfiguration("security", false)
// try to connect to filer, filerBucketsPath may be useful later
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var cipher bool
@@ -79,8 +80,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
dir := util.ResolvePath(*option.dir)
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
util.LoadConfiguration("security", false)
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"")
@@ -102,9 +101,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
uid, gid := uint32(0), uint32(0)
mountMode := os.ModeDir | 0777
if err == nil {
mountMode = os.ModeDir | fileInfo.Mode()
mountMode = os.ModeDir | os.FileMode(0777)&^umask
uid, gid = util.GetFileUidGid(fileInfo)
fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode())
fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, mountMode)
} else {
fmt.Printf("can not stat %s\n", dir)
return false
@@ -152,6 +151,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
fuse.MaxReadahead(1024 * 128),
fuse.AsyncRead(),
fuse.WritebackCache(),
fuse.MaxBackground(128),
fuse.CongestionThreshold(128),
}
options = append(options, osSpecificMountOptions()...)
@@ -175,28 +176,30 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
}
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: grpcDialOption,
FilerMountRootPath: mountRoot,
Collection: *option.collection,
Replication: *option.replication,
TtlSec: int32(*option.ttlSec),
DiskType: diskType,
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
ConcurrentWriters: *option.concurrentWriters,
CacheDir: *option.cacheDir,
CacheSizeMB: *option.cacheSizeMB,
DataCenter: *option.dataCenter,
EntryCacheTtl: 3 * time.Second,
MountUid: uid,
MountGid: gid,
MountMode: mountMode,
MountCtime: fileInfo.ModTime(),
MountMtime: time.Now(),
Umask: umask,
OutsideContainerClusterMode: *mountOptions.outsideContainerClusterMode,
Cipher: cipher,
UidGidMapper: uidGidMapper,
MountDirectory: dir,
FilerAddress: filer,
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: grpcDialOption,
FilerMountRootPath: mountRoot,
Collection: *option.collection,
Replication: *option.replication,
TtlSec: int32(*option.ttlSec),
DiskType: diskType,
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
ConcurrentWriters: *option.concurrentWriters,
CacheDir: *option.cacheDir,
CacheSizeMB: *option.cacheSizeMB,
DataCenter: *option.dataCenter,
EntryCacheTtl: 3 * time.Second,
MountUid: uid,
MountGid: gid,
MountMode: mountMode,
MountCtime: fileInfo.ModTime(),
MountMtime: time.Now(),
Umask: umask,
VolumeServerAccess: *mountOptions.volumeServerAccess,
Cipher: cipher,
UidGidMapper: uidGidMapper,
})
// mount
@@ -213,7 +216,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
})
glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
err = fs.Serve(c, seaweedFileSystem)
server := fs.New(c, nil)
seaweedFileSystem.Server = server
err = server.Serve(seaweedFileSystem)
// check if the mount process has an error to report
<-c.Ready

View File

@@ -23,13 +23,14 @@ var (
)
type S3Options struct {
filer *string
port *int
config *string
domainName *string
tlsPrivateKey *string
tlsCertificate *string
metricsHttpPort *int
filer *string
port *int
config *string
domainName *string
tlsPrivateKey *string
tlsCertificate *string
metricsHttpPort *int
allowEmptyFolder *bool
}
func init() {
@@ -41,6 +42,7 @@ func init() {
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", false, "allow empty folders")
}
var cmdS3 = &Command{
@@ -181,6 +183,7 @@ func (s3opt *S3Options) startS3Server() bool {
DomainName: *s3opt.domainName,
BucketsPath: filerBucketsPath,
GrpcDialOption: grpcDialOption,
AllowEmptyFolder: *s3opt.allowEmptyFolder,
})
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)

View File

@@ -44,6 +44,8 @@ func runScaffold(cmd *Command, args []string) bool {
content = SECURITY_TOML_EXAMPLE
case "master":
content = MASTER_TOML_EXAMPLE
case "shell":
content = SHELL_TOML_EXAMPLE
}
if content == "" {
println("need a valid -config option")
@@ -85,9 +87,21 @@ buckets_folder = "/buckets"
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = true
dir = "." # directory to store level db files
dir = "./filerldb2" # directory to store level db files
[mysql] # or tidb
[leveldb3]
# similar to leveldb2.
# each bucket has its own meta store.
enabled = false
dir = "./filerldb3" # directory to store level db files
[rocksdb]
# local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
[mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) COMMENT 'directory or file name',
@@ -104,9 +118,31 @@ password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
[postgres] # or cockroachdb
[mysql2] # or memsql, tidb
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS %s (
dirhash BIGINT,
name VARCHAR(1000),
directory TEXT,
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
"""
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
@@ -119,7 +155,29 @@ hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "" # create or use an existing database
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
[postgres2]
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS %s (
dirhash BIGINT,
name VARCHAR(65535),
directory VARCHAR(65535),
meta bytea,
PRIMARY KEY (dirhash, name)
);
"""
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
@@ -141,6 +199,11 @@ password=""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[hbase]
enabled = false
zkquorum = ""
table = "seaweedfs"
[redis2]
enabled = false
address = "localhost:6379"
@@ -161,9 +224,9 @@ addresses = [
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = true
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = true
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
@@ -266,7 +329,8 @@ enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
# the RabbitMQ management plugin.
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
# create binding myexchange => myqueue
topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"
`
@@ -287,6 +351,16 @@ grpcAddress = "localhost:18888"
# i.e., all files with this "prefix" are sent to notification message queue.
directory = "/buckets"
[sink.local]
enabled = false
directory = "/data"
[sink.local_incremental]
# all replicated files are under modified time as yyyy-mm-dd directories
# so each date directory contains all new and updated files.
enabled = false
directory = "/backup"
[sink.filer]
enabled = false
grpcAddress = "localhost:18888"
@@ -454,5 +528,19 @@ copy_other = 1 # create n x 1 = n actual volumes
# if you are doing your own replication or periodic sync of volumes.
treat_replication_as_minimums = false
`
SHELL_TOML_EXAMPLE = `
[cluster]
default = "c1"
[cluster.c1]
master = "localhost:9333" # comma-separated master servers
filer = "localhost:8888" # filer host and port
[cluster.c2]
master = ""
filer = ""
`
)

View File

@@ -24,6 +24,7 @@ var (
masterOptions MasterOptions
filerOptions FilerOptions
s3Options S3Options
webdavOptions WebDavOption
msgBrokerOptions MessageBrokerOptions
)
@@ -61,9 +62,11 @@ var (
serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
// pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server")
isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server")
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
isStartingWebDav = cmdServer.Flag.Bool("webdav", false, "whether to start WebDAV gateway")
isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker")
serverWhiteList []string
@@ -94,7 +97,7 @@ func init() {
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
filerOptions.cacheToFilerLimit = cmdServer.Flag.Int("filer.cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
@@ -114,6 +117,14 @@ func init() {
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file")
webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")
@@ -136,6 +147,9 @@ func runServer(cmd *Command, args []string) bool {
if *isStartingS3 {
*isStartingFiler = true
}
if *isStartingWebDav {
*isStartingFiler = true
}
if *isStartingMsgBroker {
*isStartingFiler = true
}
@@ -170,6 +184,7 @@ func runServer(cmd *Command, args []string) bool {
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
s3Options.filer = &filerAddress
webdavOptions.filer = &filerAddress
msgBrokerOptions.filer = &filerAddress
runtime.GOMAXPROCS(runtime.NumCPU())
@@ -211,6 +226,15 @@ func runServer(cmd *Command, args []string) bool {
}()
}
if *isStartingWebDav {
go func() {
time.Sleep(2 * time.Second)
webdavOptions.startWebDav()
}()
}
if *isStartingMsgBroker {
go func() {
time.Sleep(2 * time.Second)
@@ -224,7 +248,11 @@ func runServer(cmd *Command, args []string) bool {
}
startMaster(masterOptions, serverWhiteList)
if *isStartingMasterServer {
go startMaster(masterOptions, serverWhiteList)
}
select {}
return true
}

View File

@@ -11,12 +11,14 @@ import (
var (
shellOptions shell.ShellOptions
shellInitialFiler *string
shellCluster *string
)
func init() {
cmdShell.Run = runShell // break init cycle
shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
shellOptions.Masters = cmdShell.Flag.String("master", "", "comma-separated master servers, e.g. localhost:9333")
shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port, e.g. localhost:8888")
shellCluster = cmdShell.Flag.String("cluster", "", "cluster defined in shell.toml")
}
var cmdShell = &Command{
@@ -24,6 +26,8 @@ var cmdShell = &Command{
Short: "run interactive administrative commands",
Long: `run interactive administrative commands.
Generate shell.toml via "weed scaffold -config=shell"
`,
}
@@ -32,6 +36,23 @@ func runShell(command *Command, args []string) bool {
util.LoadConfiguration("security", false)
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
if *shellOptions.Masters == "" && *shellInitialFiler == "" {
util.LoadConfiguration("shell", false)
v := util.GetViper()
cluster := v.GetString("cluster.default")
if *shellCluster != "" {
cluster = *shellCluster
}
if cluster == "" {
*shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888"
} else {
*shellOptions.Masters = v.GetString("cluster." + cluster + ".master")
*shellInitialFiler = v.GetString("cluster." + cluster + ".filer")
}
}
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
var err error
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
if err != nil {

View File

@@ -1,8 +1,12 @@
package command
import (
"context"
"encoding/json"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"google.golang.org/grpc"
"os"
"path/filepath"
@@ -67,6 +71,15 @@ func runUpload(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master)
if err != nil {
fmt.Printf("upload: %v", err)
return false
}
if *upload.replication == "" {
*upload.replication = defaultCollection
}
if len(args) == 0 {
if *upload.dir == "" {
return false
@@ -106,3 +119,15 @@ func runUpload(cmd *Command, args []string) bool {
}
return true
}
func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) {
err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error {
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return fmt.Errorf("get master %s configuration: %v", masterAddress, err)
}
replication = resp.DefaultReplication
return nil
})
return
}

View File

@@ -1,113 +0,0 @@
package command
import (
"context"
"fmt"
"io"
"path/filepath"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
cmdWatch.Run = runWatch // break init cycle
}
var cmdWatch = &Command{
UsageLine: "watch [-filer=localhost:8888] [-target=/]",
Short: "see recent changes on a filer",
Long: `See recent changes on a filer.
`,
}
var (
watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port")
watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
watchPattern = cmdWatch.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
)
func runWatch(cmd *Command, args []string) bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var filterFunc func(dir, fname string) bool
if *watchPattern != "" {
if strings.Contains(*watchPattern, "/") {
println("watch path pattern", *watchPattern)
filterFunc = func(dir, fname string) bool {
matched, err := filepath.Match(*watchPattern, dir+"/"+fname)
if err != nil {
fmt.Printf("error: %v", err)
}
return matched
}
} else {
println("watch file pattern", *watchPattern)
filterFunc = func(dir, fname string) bool {
matched, err := filepath.Match(*watchPattern, fname)
if err != nil {
fmt.Printf("error: %v", err)
}
return matched
}
}
}
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
if filterFunc == nil {
return true
}
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
return false
}
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
return true
}
if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
return true
}
return false
}
watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "watch",
PathPrefix: *watchTarget,
SinceNs: time.Now().Add(-*watchStart).UnixNano(),
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if !shouldPrint(resp) {
continue
}
fmt.Printf("dir:%s %+v\n", resp.Directory, resp.EventNotification)
}
})
if watchErr != nil {
fmt.Printf("watch %s: %v\n", *watchFiler, watchErr)
}
return true
}