merge master, resolve conflicts

This commit is contained in:
Bl1tz23
2021-08-10 13:45:24 +03:00
224 changed files with 8532 additions and 4013 deletions

View File

@@ -16,7 +16,7 @@ debug_shell:
debug_mount:
go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/
debug_server:
go build -gcflags="all=-N -l"
@@ -37,3 +37,7 @@ debug_s3:
debug_filer_copy:
go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h
debug_filer_remote_sync:
go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=10000h

View File

@@ -0,0 +1,109 @@
package command
import (
"fmt"
flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
"github.com/posener/complete"
completeinstall "github.com/posener/complete/cmd/install"
"runtime"
)
func AutocompleteMain(commands []*Command) bool {
subCommands := make(map[string]complete.Command)
helpSubCommands := make(map[string]complete.Command)
for _, cmd := range commands {
flags := make(map[string]complete.Predictor)
cmd.Flag.VisitAll(func(flag *flag.Flag) {
flags["-"+flag.Name] = complete.PredictAnything
})
subCommands[cmd.Name()] = complete.Command{
Flags: flags,
}
helpSubCommands[cmd.Name()] = complete.Command{}
}
subCommands["help"] = complete.Command{Sub: helpSubCommands}
globalFlags := make(map[string]complete.Predictor)
flag.VisitAll(func(flag *flag.Flag) {
globalFlags["-"+flag.Name] = complete.PredictAnything
})
weedCmd := complete.Command{
Sub: subCommands,
Flags: globalFlags,
GlobalFlags: complete.Flags{"-h": complete.PredictNothing},
}
cmp := complete.New("weed", weedCmd)
return cmp.Complete()
}
func installAutoCompletion() bool {
if runtime.GOOS == "windows" {
fmt.Println("windows is not supported")
return false
}
err := completeinstall.Install("weed")
if err != nil {
fmt.Printf("install failed! %s\n", err)
return false
}
fmt.Printf("autocompletion is enabled. Please restart your shell.\n")
return true
}
func uninstallAutoCompletion() bool {
if runtime.GOOS == "windows" {
fmt.Println("windows is not supported")
return false
}
err := completeinstall.Uninstall("weed")
if err != nil {
fmt.Printf("uninstall failed! %s\n", err)
return false
}
fmt.Printf("autocompletion is disable. Please restart your shell.\n")
return true
}
var cmdAutocomplete = &Command{
Run: runAutocomplete,
UsageLine: "autocomplete",
Short: "install autocomplete",
Long: `weed autocomplete is installed in the shell.
Supported shells are bash, zsh, and fish.
Windows is not supported.
`,
}
func runAutocomplete(cmd *Command, args []string) bool {
if len(args) != 0 {
cmd.Usage()
}
return installAutoCompletion()
}
var cmdUnautocomplete = &Command{
Run: runUnautocomplete,
UsageLine: "autocomplete.uninstall",
Short: "uninstall autocomplete",
Long: `weed autocomplete is uninstalled in the shell.
Windows is not supported.
`,
}
func runUnautocomplete(cmd *Command, args []string) bool {
if len(args) != 0 {
cmd.Usage()
}
return uninstallAutoCompletion()
}

View File

@@ -8,17 +8,20 @@ import (
)
var Commands = []*Command{
cmdBenchmark,
cmdAutocomplete,
cmdUnautocomplete,
cmdBackup,
cmdBenchmark,
cmdCompact,
cmdCopy,
cmdDownload,
cmdExport,
cmdFiler,
cmdFilerBackup,
cmdFilerCat,
cmdFilerCopy,
cmdFilerMetaBackup,
cmdFilerMetaTail,
cmdFilerRemoteSynchronize,
cmdFilerReplicate,
cmdFilerSynchronize,
cmdFix,

View File

@@ -50,6 +50,8 @@ type FilerOptions struct {
saveToFilerLimit *int
defaultLevelDbDirectory *string
concurrentUploadLimitMB *int
debug *bool
debugPort *int
}
func init() {
@@ -73,6 +75,8 @@ func init() {
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
f.debug = cmdFiler.Flag.Bool("debug", false, "serves runtime profiling data, e.g., http://localhost:<debug.port>/debug/pprof/goroutine?debug=2")
f.debugPort = cmdFiler.Flag.Int("debug.port", 6060, "http port for debugging")
// start s3 on filer
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
@@ -122,6 +126,9 @@ var cmdFiler = &Command{
}
func runFiler(cmd *Command, args []string) bool {
if *f.debug {
go http.ListenAndServe(fmt.Sprintf(":%d", *f.debugPort), nil)
}
util.LoadConfiguration("security", false)

View File

@@ -1,16 +1,13 @@
package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/grpc"
"io"
"time"
)
@@ -52,11 +49,11 @@ var cmdFilerBackup = &Command{
func runFilerBackup(cmd *Command, args []string) bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
util.LoadConfiguration("security", false)
util.LoadConfiguration("replication", true)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for {
err := doFilerBackup(grpcDialOption, &filerBackupOptions)
if err != nil {
@@ -110,48 +107,12 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug)
return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "backup_" + dataSink.GetName(),
PathPrefix: sourcePath,
SinceNs: startFrom.UnixNano(),
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
var counter int64
var lastWriteTime time.Time
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if err := processEventFn(resp); err != nil {
return fmt.Errorf("processEventFn: %v", err)
}
counter++
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
counter = 0
lastWriteTime = time.Now()
if err := setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), resp.TsNs); err != nil {
return fmt.Errorf("setOffset: %v", err)
}
}
}
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
return setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), lastTsNs)
})
return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_"+dataSink.GetName(),
sourcePath, startFrom.UnixNano(), 0, processEventFnWithOffset, false)
}

View File

@@ -52,21 +52,21 @@ type CopyOptions struct {
}
func init() {
cmdCopy.Run = runCopy // break init cycle
cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information")
copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
copy.maxMB = cmdCopy.Flag.Int("maxMB", 4, "split files larger than the limit")
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
copy.checkSize = cmdCopy.Flag.Bool("check.size", false, "copy when the target file size is different from the source file")
copy.verbose = cmdCopy.Flag.Bool("verbose", false, "print out details during copying")
cmdFilerCopy.Run = runCopy // break init cycle
cmdFilerCopy.IsDebug = cmdFilerCopy.Flag.Bool("debug", false, "verbose debug information")
copy.include = cmdFilerCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
copy.replication = cmdFilerCopy.Flag.String("replication", "", "replication type")
copy.collection = cmdFilerCopy.Flag.String("collection", "", "optional collection name")
copy.ttl = cmdFilerCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
copy.diskType = cmdFilerCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
copy.maxMB = cmdFilerCopy.Flag.Int("maxMB", 4, "split files larger than the limit")
copy.concurrenctFiles = cmdFilerCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
copy.concurrenctChunks = cmdFilerCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
copy.checkSize = cmdFilerCopy.Flag.Bool("check.size", false, "copy when the target file size is different from the source file")
copy.verbose = cmdFilerCopy.Flag.Bool("verbose", false, "print out details during copying")
}
var cmdCopy = &Command{
var cmdFilerCopy = &Command{
UsageLine: "filer.copy file_or_dir1 [file_or_dir2 file_or_dir3] http://localhost:8888/path/to/a/folder/",
Short: "copy one or a list of files to a filer folder",
Long: `copy one or a list of files, or batch copy one whole folder recursively, to a filer folder
@@ -154,7 +154,7 @@ func runCopy(cmd *Command, args []string) bool {
}
copy.ttlSec = int32(ttl.Minutes()) * 60
if *cmdCopy.IsDebug {
if *cmdFilerCopy.IsDebug {
grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
}
@@ -213,11 +213,15 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
mode := fi.Mode()
uid, gid := util.GetFileUidGid(fi)
fileSize := fi.Size()
if mode.IsDir() {
fileSize = 0
}
fileCopyTaskChan <- FileCopyTask{
sourceLocation: fileOrDir,
destinationUrlPath: destPath,
fileSize: fi.Size(),
fileSize: fileSize,
fileMode: fi.Mode(),
uid: uid,
gid: gid,
@@ -377,6 +381,9 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
if assignResult.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
}
if assignResult.Url == "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult)
}
return nil
})
})

View File

@@ -7,7 +7,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/spf13/viper"
"google.golang.org/grpc"
"io"
"reflect"
"time"
@@ -53,6 +52,7 @@ The backup writes to another filer store specified in a backup_filer.toml.
func runFilerMetaBackup(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
metaBackup.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
// load backup_filer.toml
@@ -189,48 +189,15 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
return nil
}
tailErr := pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "meta_backup",
PathPrefix: *metaBackup.filerDirectory,
SinceNs: startTime.UnixNano(),
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
var counter int64
var lastWriteTime time.Time
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if err = eachEntryFunc(resp); err != nil {
return err
}
counter++
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
counter = 0
lastWriteTime = time.Now()
if err2 := metaBackup.setOffset(lastWriteTime); err2 != nil {
return err2
}
}
}
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
lastTime := time.Unix(0, lastTsNs)
glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, lastTime, float64(counter)/float64(3))
return metaBackup.setOffset(lastTime)
})
return tailErr
return pb.FollowMetadata(*metaBackup.filerAddress, metaBackup.grpcDialOption, "meta_backup",
*metaBackup.filerDirectory, startTime.UnixNano(), 0, processEventFnWithOffset, false)
}
func (metaBackup *FilerMetaBackupOptions) getOffset() (lastWriteTime time.Time, err error) {

View File

@@ -3,16 +3,15 @@ package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/golang/protobuf/jsonpb"
jsoniter "github.com/json-iterator/go"
"github.com/olivere/elastic/v7"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -45,6 +44,7 @@ var (
func runFilerMetaTail(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var filterFunc func(dir, fname string) bool
@@ -103,37 +103,18 @@ func runFilerMetaTail(cmd *Command, args []string) bool {
}
}
tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "tail",
PathPrefix: *tailTarget,
SinceNs: time.Now().Add(-*tailStart).UnixNano(),
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
tailErr := pb.FollowMetadata(*tailFiler, grpcDialOption, "tail",
*tailTarget, time.Now().Add(-*tailStart).UnixNano(), 0,
func(resp *filer_pb.SubscribeMetadataResponse) error {
if !shouldPrint(resp) {
return nil
}
if listenErr != nil {
return listenErr
}
if !shouldPrint(resp) {
continue
}
if err = eachEntryFunc(resp); err != nil {
if err := eachEntryFunc(resp); err != nil {
return err
}
}
return nil
}, false)
})
if tailErr != nil {
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
}

View File

@@ -0,0 +1,257 @@
package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"time"
)
type RemoteSyncOptions struct {
filerAddress *string
grpcDialOption grpc.DialOption
readChunkFromFiler *bool
debug *bool
timeAgo *time.Duration
dir *string
}
const (
RemoteSyncKeyPrefix = "remote.sync."
)
var _ = filer_pb.FilerClient(&RemoteSyncOptions{})
func (option *RemoteSyncOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
return pb.WithFilerClient(*option.filerAddress, option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return fn(client)
})
}
func (option *RemoteSyncOptions) AdjustedUrl(location *filer_pb.Location) string {
return location.Url
}
var (
remoteSyncOptions RemoteSyncOptions
)
func init() {
cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "/", "a mounted directory on filer")
remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
remoteSyncOptions.debug = cmdFilerRemoteSynchronize.Flag.Bool("debug", false, "debug mode to print out filer updated remote files")
remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
}
var cmdFilerRemoteSynchronize = &Command{
UsageLine: "filer.remote.sync -filer=<filerHost>:<filerPort> -dir=/mount/s3_on_cloud",
Short: "resumable continuously write back updates to remote storage if the directory is mounted to the remote storage",
Long: `resumable continuously write back updates to remote storage if the directory is mounted to the remote storage
filer.remote.sync listens on filer update events.
If any mounted remote file is updated, it will fetch the updated content,
and write to the remote storage.
`,
}
func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
remoteSyncOptions.grpcDialOption = grpcDialOption
// read filer remote storage mount mappings
mappings, readErr := filer.ReadMountMappings(grpcDialOption, *remoteSyncOptions.filerAddress)
if readErr != nil {
fmt.Printf("read mount mapping: %v", readErr)
return false
}
filerSource := &source.FilerSource{}
filerSource.DoInitialize(
*remoteSyncOptions.filerAddress,
pb.ServerToGrpcAddress(*remoteSyncOptions.filerAddress),
"/", // does not matter
*remoteSyncOptions.readChunkFromFiler,
)
var found bool
for dir, remoteStorageMountLocation := range mappings.Mappings {
if *remoteSyncOptions.dir == dir {
found = true
storageConf, readErr := filer.ReadRemoteStorageConf(grpcDialOption, *remoteSyncOptions.filerAddress, remoteStorageMountLocation.Name)
if readErr != nil {
fmt.Printf("read remote storage configuration for %s: %v", dir, readErr)
continue
}
fmt.Printf("synchronize %s to remote storage...\n", *remoteSyncOptions.dir)
if err := util.Retry("filer.remote.sync "+dir, func() error {
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir, storageConf, remoteStorageMountLocation)
}); err != nil {
fmt.Printf("synchronize %s: %v\n", *remoteSyncOptions.dir, err)
}
break
}
}
if !found {
fmt.Printf("directory %s is not mounted to any remote storage\n", *remoteSyncOptions.dir)
return false
}
return true
}
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string, remoteStorage *filer_pb.RemoteConf, remoteStorageMountLocation *filer_pb.RemoteStorageLocation) error {
dirHash := util.HashStringToLong(mountedDir)
// 1. specified by timeAgo
// 2. last offset timestamp for this directory
// 3. directory creation time
var lastOffsetTs time.Time
if *option.timeAgo == 0 {
mountedDirEntry, err := filer_pb.GetEntry(option, util.FullPath(mountedDir))
if err != nil {
return fmt.Errorf("lookup %s: %v", mountedDir, err)
}
lastOffsetTsNs, err := getOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash))
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
glog.V(0).Infof("resume from %v", lastOffsetTs)
} else {
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
}
} else {
lastOffsetTs = time.Now().Add(-*option.timeAgo)
}
client, err := remote_storage.GetRemoteStorage(remoteStorage)
if err != nil {
return err
}
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
if message.OldEntry == nil && message.NewEntry == nil {
return nil
}
if message.OldEntry == nil && message.NewEntry != nil {
if len(message.NewEntry.Chunks) == 0 {
return nil
}
fmt.Printf("create: %+v\n", resp)
if !shouldSendToRemote(message.NewEntry) {
fmt.Printf("skipping creating: %+v\n", resp)
return nil
}
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
if message.NewEntry.IsDirectory {
return client.WriteDirectory(dest, message.NewEntry)
}
reader := filer.NewChunkStreamReader(filerSource, message.NewEntry.Chunks)
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
if message.OldEntry != nil && message.NewEntry == nil {
fmt.Printf("delete: %+v\n", resp)
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
return client.DeleteFile(dest)
}
if message.OldEntry != nil && message.NewEntry != nil {
oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
if !shouldSendToRemote(message.NewEntry) {
fmt.Printf("skipping updating: %+v\n", resp)
return nil
}
if message.NewEntry.IsDirectory {
return client.WriteDirectory(dest, message.NewEntry)
}
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
if isSameChunks(message.OldEntry.Chunks, message.NewEntry.Chunks) {
fmt.Printf("update meta: %+v\n", resp)
return client.UpdateFileMetadata(dest, message.NewEntry)
}
}
fmt.Printf("update: %+v\n", resp)
if err := client.DeleteFile(oldDest); err != nil {
return err
}
reader := filer.NewChunkStreamReader(filerSource, message.NewEntry.Chunks)
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
return nil
}
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
lastTime := time.Unix(0, lastTsNs)
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
return setOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash), lastTsNs)
})
return pb.FollowMetadata(*option.filerAddress, option.grpcDialOption,
"filer.remote.sync", mountedDir, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
}
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *filer_pb.RemoteStorageLocation) *filer_pb.RemoteStorageLocation {
source := string(sourcePath[len(mountDir):])
dest := util.FullPath(remoteMountLocation.Path).Child(source)
return &filer_pb.RemoteStorageLocation{
Name: remoteMountLocation.Name,
Bucket: remoteMountLocation.Bucket,
Path: string(dest),
}
}
func isSameChunks(a, b []*filer_pb.FileChunk) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
x, y := a[i], b[i]
if !proto.Equal(x, y) {
return false
}
}
return true
}
func shouldSendToRemote(entry *filer_pb.Entry) bool {
if entry.RemoteEntry == nil {
return true
}
if entry.RemoteEntry.LocalMtime < entry.Attributes.Mtime {
return true
}
return false
}
func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {
entry.RemoteEntry = remoteEntry
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
})
return err
})
}

View File

@@ -7,12 +7,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/replication"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
"github.com/chrislusf/seaweedfs/weed/replication/sub"
"github.com/chrislusf/seaweedfs/weed/util"
)

View File

@@ -15,7 +15,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/grace"
"google.golang.org/grpc"
"io"
"strings"
"time"
)
@@ -71,8 +70,8 @@ func init() {
var cmdFilerSynchronize = &Command{
UsageLine: "filer.sync -a=<oneFilerHost>:<oneFilerPort> -b=<otherFilerHost>:<otherFilerPort>",
Short: "resumeable continuous synchronization between two active-active or active-passive SeaweedFS clusters",
Long: `resumeable continuous synchronization for file changes between two active-active or active-passive filers
Short: "resumable continuous synchronization between two active-active or active-passive SeaweedFS clusters",
Long: `resumable continuous synchronization for file changes between two active-active or active-passive filers
filer.sync listens on filer notifications. If any file is updated, it will fetch the updated content,
and write to the other destination. Different from filer.replicate:
@@ -89,6 +88,7 @@ var cmdFilerSynchronize = &Command{
func runFilerSynchronize(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
grace.SetupProfiling(*syncCpuProfile, *syncMemProfile)
@@ -165,50 +165,14 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
return persistEventFn(resp)
}
return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "syncTo_" + targetFiler,
PathPrefix: sourcePath,
SinceNs: sourceFilerOffsetTsNs,
Signature: targetFilerSignature,
})
if err != nil {
return fmt.Errorf("listen: %v", err)
}
var counter int64
var lastWriteTime time.Time
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if err := processEventFn(resp); err != nil {
return err
}
counter++
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
glog.V(0).Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
counter = 0
lastWriteTime = time.Now()
if err := setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, resp.TsNs); err != nil {
return err
}
}
}
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
glog.V(0).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
return setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, lastTsNs)
})
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+targetFiler,
sourcePath, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
}
const (
@@ -359,16 +323,19 @@ func genProcessFunction(sourcePath string, targetPath string, dataSink sink.Repl
return processEventFn
}
func buildKey(dataSink sink.ReplicationSink, message *filer_pb.EventNotification, targetPath string, sourceKey util.FullPath, sourcePath string) string {
func buildKey(dataSink sink.ReplicationSink, message *filer_pb.EventNotification, targetPath string, sourceKey util.FullPath, sourcePath string) (key string) {
if !dataSink.IsIncremental() {
return util.Join(targetPath, string(sourceKey)[len(sourcePath):])
key = util.Join(targetPath, string(sourceKey)[len(sourcePath):])
} else {
var mTime int64
if message.NewEntry != nil {
mTime = message.NewEntry.Attributes.Mtime
} else if message.OldEntry != nil {
mTime = message.OldEntry.Attributes.Mtime
}
dateKey := time.Unix(mTime, 0).Format("2006-01-02")
key = util.Join(targetPath, dateKey, string(sourceKey)[len(sourcePath):])
}
var mTime int64
if message.NewEntry != nil {
mTime = message.NewEntry.Attributes.Mtime
} else if message.OldEntry != nil {
mTime = message.OldEntry.Attributes.Mtime
}
dateKey := time.Unix(mTime, 0).Format("2006-01-02")
return util.Join(targetPath, dateKey, string(sourceKey)[len(sourcePath):])
return escapeKey(key)
}

View File

@@ -0,0 +1,7 @@
// +build !windows
package command
func escapeKey(key string) string {
return key
}

View File

@@ -0,0 +1,12 @@
package command
import (
"strings"
)
func escapeKey(key string) string {
if strings.Contains(key, ":") {
return strings.ReplaceAll(key, ":", "")
}
return key
}

View File

@@ -2,140 +2,232 @@ package command
import (
"fmt"
"strings"
"strconv"
"time"
"os"
"strconv"
"strings"
"time"
)
func init() {
cmdFuse.Run = runFuse // break init cycle
}
type parameter struct {
name string
value string
}
func runFuse(cmd *Command, args []string) bool {
argsLen := len(args)
options := []string{}
rawArgs := strings.Join(args, " ")
rawArgsLen := len(rawArgs)
option := strings.Builder{}
options := []parameter{}
masterProcess := true
fusermountPath := ""
// at least target mount path should be passed
if argsLen < 1 {
return false
// first parameter
i := 0
for i = 0; i < rawArgsLen && rawArgs[i] != ' '; i++ {
option.WriteByte(rawArgs[i])
}
options = append(options, parameter{"arg0", option.String()})
option.Reset()
for i++; i < rawArgsLen; i++ {
// space separator check for filled option
if rawArgs[i] == ' ' {
if option.Len() > 0 {
options = append(options, parameter{option.String(), "true"})
option.Reset()
}
// dash separator read option until next space
} else if rawArgs[i] == '-' {
for i++; i < rawArgsLen && rawArgs[i] != ' '; i++ {
option.WriteByte(rawArgs[i])
}
options = append(options, parameter{option.String(), "true"})
option.Reset()
// equal separator start option with pending value
} else if rawArgs[i] == '=' {
name := option.String()
option.Reset()
for i++; i < rawArgsLen && rawArgs[i] != ',' && rawArgs[i] != ' '; i++ {
// double quote separator read option until next double quote
if rawArgs[i] == '"' {
for i++; i < rawArgsLen && rawArgs[i] != '"'; i++ {
option.WriteByte(rawArgs[i])
}
// single quote separator read option until next single quote
} else if rawArgs[i] == '\'' {
for i++; i < rawArgsLen && rawArgs[i] != '\''; i++ {
option.WriteByte(rawArgs[i])
}
// add chars before comma
} else if rawArgs[i] != ' ' {
option.WriteByte(rawArgs[i])
}
}
options = append(options, parameter{name, option.String()})
option.Reset()
// comma separator just read current option
} else if rawArgs[i] == ',' {
options = append(options, parameter{option.String(), "true"})
option.Reset()
// what is not a separator fill option buffer
} else {
option.WriteByte(rawArgs[i])
}
}
// first option is always target mount path
mountOptions.dir = &args[0]
// get residual option data
if option.Len() > 0 {
// add value to pending option
options = append(options, parameter{option.String(), "true"})
option.Reset()
}
// scan parameters looking for one or more -o options
// -o options receive parameters on format key=value[,key=value]...
for i := 0; i < argsLen; i++ {
if args[i] == "-o" && i+1 <= argsLen {
options = strings.Split(args[i+1], ",")
// scan each parameter
for i := 0; i < len(options); i++ {
parameter := options[i]
switch parameter.name {
case "child":
masterProcess = false
case "arg0":
mountOptions.dir = &parameter.value
case "filer":
mountOptions.filer = &parameter.value
case "filer.path":
mountOptions.filerMountRootPath = &parameter.value
case "dirAutoCreate":
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
mountOptions.dirAutoCreate = &parsed
} else {
panic(fmt.Errorf("dirAutoCreate: %s", err))
}
case "collection":
mountOptions.collection = &parameter.value
case "replication":
mountOptions.replication = &parameter.value
case "disk":
mountOptions.diskType = &parameter.value
case "ttl":
if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil {
intValue := int(parsed)
mountOptions.ttlSec = &intValue
} else {
panic(fmt.Errorf("ttl: %s", err))
}
case "chunkSizeLimitMB":
if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil {
intValue := int(parsed)
mountOptions.chunkSizeLimitMB = &intValue
} else {
panic(fmt.Errorf("chunkSizeLimitMB: %s", err))
}
case "concurrentWriters":
i++
if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil {
intValue := int(parsed)
mountOptions.concurrentWriters = &intValue
} else {
panic(fmt.Errorf("concurrentWriters: %s", err))
}
case "cacheDir":
mountOptions.cacheDir = &parameter.value
case "cacheCapacityMB":
if parsed, err := strconv.ParseInt(parameter.value, 0, 64); err == nil {
mountOptions.cacheSizeMB = &parsed
} else {
panic(fmt.Errorf("cacheCapacityMB: %s", err))
}
case "dataCenter":
mountOptions.dataCenter = &parameter.value
case "allowOthers":
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
mountOptions.allowOthers = &parsed
} else {
panic(fmt.Errorf("allowOthers: %s", err))
}
case "umask":
mountOptions.umaskString = &parameter.value
case "nonempty":
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
mountOptions.nonempty = &parsed
} else {
panic(fmt.Errorf("nonempty: %s", err))
}
case "volumeServerAccess":
mountOptions.volumeServerAccess = &parameter.value
case "map.uid":
mountOptions.uidMap = &parameter.value
case "map.gid":
mountOptions.gidMap = &parameter.value
case "readOnly":
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
mountOptions.readOnly = &parsed
} else {
panic(fmt.Errorf("readOnly: %s", err))
}
case "cpuprofile":
mountCpuProfile = &parameter.value
case "memprofile":
mountMemProfile = &parameter.value
case "readRetryTime":
if parsed, err := time.ParseDuration(parameter.value); err == nil {
mountReadRetryTime = &parsed
} else {
panic(fmt.Errorf("readRetryTime: %s", err))
}
case "fusermount.path":
fusermountPath = parameter.value
}
}
// for each option passed with -o
for _, option := range options {
// split just first = character
parts := strings.SplitN(option, "=", 2)
// if doesn't key and value skip
if len(parts) != 2 {
continue
// the master start the child, release it then finish himself
if masterProcess {
arg0, err := os.Executable()
if err != nil {
panic(err)
}
key, value := parts[0], parts[1]
argv := append(os.Args, "-o", "child")
// switch key keeping "weed mount" parameters
switch key {
case "filer":
mountOptions.filer = &value
case "filer.path":
mountOptions.filerMountRootPath = &value
case "dirAutoCreate":
if parsed, err := strconv.ParseBool(value); err != nil {
mountOptions.dirAutoCreate = &parsed
} else {
panic(fmt.Errorf("dirAutoCreate: %s", err))
}
case "collection":
mountOptions.collection = &value
case "replication":
mountOptions.replication = &value
case "disk":
mountOptions.diskType = &value
case "ttl":
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil {
intValue := int(parsed)
mountOptions.ttlSec = &intValue
} else {
panic(fmt.Errorf("ttl: %s", err))
}
case "chunkSizeLimitMB":
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil {
intValue := int(parsed)
mountOptions.chunkSizeLimitMB = &intValue
} else {
panic(fmt.Errorf("chunkSizeLimitMB: %s", err))
}
case "concurrentWriters":
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil {
intValue := int(parsed)
mountOptions.concurrentWriters = &intValue
} else {
panic(fmt.Errorf("concurrentWriters: %s", err))
}
case "cacheDir":
mountOptions.cacheDir = &value
case "cacheCapacityMB":
if parsed, err := strconv.ParseInt(value, 0, 64); err != nil {
mountOptions.cacheSizeMB = &parsed
} else {
panic(fmt.Errorf("cacheCapacityMB: %s", err))
}
case "dataCenter":
mountOptions.dataCenter = &value
case "allowOthers":
if parsed, err := strconv.ParseBool(value); err != nil {
mountOptions.allowOthers = &parsed
} else {
panic(fmt.Errorf("allowOthers: %s", err))
}
case "umask":
mountOptions.umaskString = &value
case "nonempty":
if parsed, err := strconv.ParseBool(value); err != nil {
mountOptions.nonempty = &parsed
} else {
panic(fmt.Errorf("nonempty: %s", err))
}
case "volumeServerAccess":
mountOptions.volumeServerAccess = &value
case "map.uid":
mountOptions.uidMap = &value
case "map.gid":
mountOptions.gidMap = &value
case "readOnly":
if parsed, err := strconv.ParseBool(value); err != nil {
mountOptions.readOnly = &parsed
} else {
panic(fmt.Errorf("readOnly: %s", err))
}
case "cpuprofile":
mountCpuProfile = &value
case "memprofile":
mountMemProfile = &value
case "readRetryTime":
if parsed, err := time.ParseDuration(value); err != nil {
mountReadRetryTime = &parsed
} else {
panic(fmt.Errorf("readRetryTime: %s", err))
}
attr := os.ProcAttr{}
attr.Env = os.Environ()
child, err := os.StartProcess(arg0, argv, &attr)
if err != nil {
panic(fmt.Errorf("master process can not start child process: %s", err))
}
err = child.Release()
if err != nil {
panic(fmt.Errorf("master process can not release child process: %s", err))
}
return true
}
// I don't know why PATH environment variable is lost
if err := os.Setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); err != nil {
panic(fmt.Errorf("setenv: %s", err))
if fusermountPath != "" {
if err := os.Setenv("PATH", fusermountPath); err != nil {
panic(fmt.Errorf("setenv: %s", err))
}
} else if os.Getenv("PATH") == "" {
if err := os.Setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin"); err != nil {
panic(fmt.Errorf("setenv: %s", err))
}
}
// just call "weed mount" command
@@ -144,7 +236,7 @@ func runFuse(cmd *Command, args []string) bool {
var cmdFuse = &Command{
UsageLine: "fuse /mnt/mount/point -o \"filer=localhost:8888,filer.path=/\"",
Short: "Allow use weed with linux's mount command",
Short: "Allow use weed with linux's mount command",
Long: `Allow use weed with linux's mount command
You can use -t weed on mount command:
@@ -160,6 +252,9 @@ var cmdFuse = &Command{
mount -t fuse./home/user/bin/weed fuse /mnt -o "filer=localhost:8888,filer.path=/"
mount -t fuse "/home/user/bin/weed#fuse" /mnt -o "filer=localhost:8888,filer.path=/"
To pass more than one parameter use quotes, example:
mount -t weed fuse /mnt -o "filer='192.168.0.1:8888,192.168.0.2:8888',filer.path=/"
To check valid options look "weed mount --help"
`,
}

View File

@@ -49,6 +49,7 @@ func (iamopt *IamOptions) startIamServer() bool {
return false
}
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for {
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {

30
weed/command/imports.go Normal file
View File

@@ -0,0 +1,30 @@
package command
import (
_ "net/http/pprof"
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/s3"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
_ "github.com/chrislusf/seaweedfs/weed/filer/hbase"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2"
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3"
_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql2"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
)

View File

@@ -5,15 +5,18 @@ package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"os"
"os/user"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/seaweedfs/fuse"
@@ -49,6 +52,21 @@ func runMount(cmd *Command, args []string) bool {
return RunMount(&mountOptions, os.FileMode(umask))
}
func getParentInode(mountDir string) (uint64, error) {
parentDir := filepath.Clean(filepath.Join(mountDir, ".."))
fi, err := os.Stat(parentDir)
if err != nil {
return 0, err
}
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return 0, nil
}
return stat.Ino, nil
}
func RunMount(option *MountOptions, umask os.FileMode) bool {
filers := strings.Split(*option.filer, ",")
@@ -85,13 +103,19 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
filerMountRootPath := *option.filerMountRootPath
dir := util.ResolvePath(*option.dir)
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
parentInode, err := getParentInode(dir)
if err != nil {
glog.Errorf("failed to retrieve inode for parent directory of %s: %v", dir, err)
return true
}
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"")
return false
}
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
if chunkSizeLimitMB <= 0 {
fmt.Printf("Please specify a reasonable buffer size.")
return false
@@ -199,6 +223,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
MountMode: mountMode,
MountCtime: fileInfo.ModTime(),
MountMtime: time.Now(),
MountParentInode: parentInode,
Umask: umask,
VolumeServerAccess: *mountOptions.volumeServerAccess,
Cipher: cipher,
@@ -221,6 +246,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
glog.V(0).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir)
server := fs.New(c, nil)
seaweedFileSystem.Server = server
seaweedFileSystem.StartBackgroundTasks()
err = server.Serve(seaweedFileSystem)
// check if the mount process has an error to report

View File

@@ -1,6 +1,8 @@
package command
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/command/scaffold"
"io/ioutil"
"path/filepath"
)
@@ -35,17 +37,17 @@ func runScaffold(cmd *Command, args []string) bool {
content := ""
switch *config {
case "filer":
content = FILER_TOML_EXAMPLE
content = scaffold.Filer
case "notification":
content = NOTIFICATION_TOML_EXAMPLE
content = scaffold.Notification
case "replication":
content = REPLICATION_TOML_EXAMPLE
content = scaffold.Replication
case "security":
content = SECURITY_TOML_EXAMPLE
content = scaffold.Security
case "master":
content = MASTER_TOML_EXAMPLE
content = scaffold.Master
case "shell":
content = SHELL_TOML_EXAMPLE
content = scaffold.Shell
}
if content == "" {
println("need a valid -config option")
@@ -55,519 +57,7 @@ func runScaffold(cmd *Command, args []string) bool {
if *outputPath != "" {
ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
} else {
println(content)
fmt.Println(content)
}
return true
}
const (
FILER_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
# Used with "weed filer" or "weed server -filer"
# Put this file to one of the location, with descending priority
# ./filer.toml
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
####################################################
# Customizable filer server options
####################################################
[filer.options]
# with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
recursive_delete = false
# directories under this folder will be automatically creating a separate bucket
buckets_folder = "/buckets"
####################################################
# The following are filer store options
####################################################
[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = true
dir = "./filerldb2" # directory to store level db files
[leveldb3]
# similar to leveldb2.
# each bucket has its own meta store.
enabled = false
dir = "./filerldb3" # directory to store level db files
[rocksdb]
# local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
[sqlite]
# local on disk, similar to leveldb
enabled = false
dbFile = "./filer.db" # sqlite db file
[mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
# directory TEXT COMMENT 'full path to parent directory',
# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
enabled = false
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[mysql2] # or memsql, tidb
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
dirhash BIGINT,
name VARCHAR(1000) BINARY,
directory TEXT,
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
"""
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
# directory VARCHAR(65535),
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
enabled = false
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[postgres2]
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS "%s" (
dirhash BIGINT,
name VARCHAR(65535),
directory VARCHAR(65535),
meta bytea,
PRIMARY KEY (dirhash, name)
);
"""
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[cassandra]
# CREATE TABLE filemeta (
# directory varchar,
# name varchar,
# meta blob,
# PRIMARY KEY (directory, name)
# ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false
keyspace="seaweedfs"
hosts=[
"localhost:9042",
]
username=""
password=""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[hbase]
enabled = false
zkquorum = ""
table = "seaweedfs"
[redis2]
enabled = false
address = "localhost:6379"
password = ""
database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis_cluster2]
enabled = false
addresses = [
"localhost:30001",
"localhost:30002",
"localhost:30003",
"localhost:30004",
"localhost:30005",
"localhost:30006",
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[etcd]
enabled = false
servers = "localhost:2379"
timeout = "3s"
[mongodb]
enabled = false
uri = "mongodb://localhost:27017"
option_pool_size = 0
database = "seaweedfs"
[elastic7]
enabled = false
servers = [
"http://localhost1:9200",
"http://localhost2:9200",
"http://localhost3:9200",
]
username = ""
password = ""
sniff_enabled = false
healthcheck_enabled = false
# increase the value is recommend, be sure the value in Elastic is greater or equal here
index.max_result_window = 10000
##########################
##########################
# To add path-specific filer store:
#
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
# 2. Add a location configuraiton. E.g., location = "/tmp/"
# 3. Copy and customize all other configurations.
# Make sure they are not the same if using the same store type!
# 4. Set enabled to true
#
# The following is just using redis as an example
##########################
[redis2.tmp]
enabled = false
location = "/tmp/"
address = "localhost:6379"
password = ""
database = 1
`
NOTIFICATION_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
# Put this file to one of the location, with descending priority
# ./notification.toml
# $HOME/.seaweedfs/notification.toml
# /etc/seaweedfs/notification.toml
####################################################
# notification
# send and receive filer updates for each file to an external message queue
####################################################
[notification.log]
# this is only for debugging perpose and does not work with "weed filer.replicate"
enabled = false
[notification.kafka]
enabled = false
hosts = [
"localhost:9092"
]
topic = "seaweedfs_filer"
offsetFile = "./last.offset"
offsetSaveIntervalSeconds = 10
[notification.aws_sqs]
# experimental, let me know if it works
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
sqs_queue_name = "my_filer_queue" # an existing queue name
[notification.google_pub_sub]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
[notification.gocdk_pub_sub]
# The Go Cloud Development Kit (https://gocloud.dev).
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
# create binding myexchange => myqueue
topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"
`
REPLICATION_TOML_EXAMPLE = `
# A sample TOML config file for replicating SeaweedFS filer
# Used with "weed filer.backup"
# Using with "weed filer.replicate" is deprecated.
# Put this file to one of the location, with descending priority
# ./replication.toml
# $HOME/.seaweedfs/replication.toml
# /etc/seaweedfs/replication.toml
[source.filer] # deprecated. Only useful with "weed filer.replicate"
enabled = true
grpcAddress = "localhost:18888"
# all files under this directory tree are replicated.
# this is not a directory on your hard drive, but on your filer.
# i.e., all files with this "prefix" are sent to notification message queue.
directory = "/buckets"
[sink.local]
enabled = false
directory = "/data"
# all replicated files are under modified time as yyyy-mm-dd directories
# so each date directory contains all new and updated files.
is_incremental = false
[sink.filer]
enabled = false
grpcAddress = "localhost:18888"
# all replicated files are under this directory tree
# this is not a directory on your hard drive, but on your filer.
# i.e., all received files will be "prefixed" to this directory.
directory = "/backup"
replication = ""
collection = ""
ttlSec = 0
is_incremental = false
[sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
# default loads credentials from the shared credentials file (~/.aws/credentials).
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory
endpoint = ""
is_incremental = false
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
bucket = "your_bucket_seaweedfs" # an existing bucket
directory = "/" # destination directory
is_incremental = false
[sink.azure]
# experimental, let me know if it works
enabled = false
account_name = ""
account_key = ""
container = "mycontainer" # an existing container
directory = "/" # destination directory
is_incremental = false
[sink.backblaze]
enabled = false
b2_account_id = ""
b2_master_application_key = ""
bucket = "mybucket" # an existing bucket
directory = "/" # destination directory
is_incremental = false
`
SECURITY_TOML_EXAMPLE = `
# Put this file to one of the location, with descending priority
# ./security.toml
# $HOME/.seaweedfs/security.toml
# /etc/seaweedfs/security.toml
# this file is read by master, volume server, and filer
# the jwt signing key is read by master and volume server.
# a jwt defaults to expire after 10 seconds.
[jwt.signing]
key = ""
expires_after_seconds = 10 # seconds
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
[jwt.signing.read]
key = ""
expires_after_seconds = 10 # seconds
# all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files.
# the host name is not checked, so the PERM files can be shared.
[grpc]
ca = ""
# Set wildcard domain for enable TLS authentication by common names
allowed_wildcard_domain = "" # .mycompany.com
[grpc.volume]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.master]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.filer]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.msg_broker]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
cert = ""
key = ""
# volume server https options
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
[https.client]
enabled = true
[https.volume]
cert = ""
key = ""
`
MASTER_TOML_EXAMPLE = `
# Put this file to one of the location, with descending priority
# ./master.toml
# $HOME/.seaweedfs/master.toml
# /etc/seaweedfs/master.toml
# this file is read by master
[master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
lock
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
volume.fix.replication
unlock
"""
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
[master.sequencer]
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
sequencer_etcd_urls = "http://127.0.0.1:2379"
# configurations for tiered cloud storage
# old volumes are transparently moved to cloud for cost efficiency
[storage.backend]
[storage.backend.s3.default]
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
endpoint = ""
# create this number of logical volumes if no more writable volumes
# count_x means how many copies of data.
# e.g.:
# 000 has only one copy, copy_1
# 010 and 001 has two copies, copy_2
# 011 has only 3 copies, copy_3
[master.volume_growth]
copy_1 = 7 # create 1 x 7 = 7 actual volumes
copy_2 = 6 # create 2 x 6 = 12 actual volumes
copy_3 = 3 # create 3 x 3 = 9 actual volumes
copy_other = 1 # create n x 1 = n actual volumes
# configuration flags for replication
[master.replication]
# any replication counts should be considered minimums. If you specify 010 and
# have 3 different racks, that's still considered writable. Writes will still
# try to replicate to all available volumes. You should only use this option
# if you are doing your own replication or periodic sync of volumes.
treat_replication_as_minimums = false
`
SHELL_TOML_EXAMPLE = `
[cluster]
default = "c1"
[cluster.c1]
master = "localhost:9333" # comma-separated master servers
filer = "localhost:8888" # filer host and port
[cluster.c2]
master = ""
filer = ""
`
)

View File

@@ -0,0 +1,21 @@
package scaffold
import _ "embed"
//go:embed filer.toml
var Filer string
//go:embed notification.toml
var Notification string
//go:embed replication.toml
var Replication string
//go:embed security.toml
var Security string
//go:embed master.toml
var Master string
//go:embed shell.toml
var Shell string

View File

@@ -0,0 +1,232 @@
# A sample TOML config file for SeaweedFS filer store
# Used with "weed filer" or "weed server -filer"
# Put this file to one of the location, with descending priority
# ./filer.toml
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
####################################################
# Customizable filer server options
####################################################
[filer.options]
# with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
recursive_delete = false
####################################################
# The following are filer store options
####################################################
[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = true
dir = "./filerldb2" # directory to store level db files
[leveldb3]
# similar to leveldb2.
# each bucket has its own meta store.
enabled = false
dir = "./filerldb3" # directory to store level db files
[rocksdb]
# local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
[sqlite]
# local on disk, similar to leveldb
enabled = false
dbFile = "./filer.db" # sqlite db file
[mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
# directory TEXT COMMENT 'full path to parent directory',
# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
enabled = false
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[mysql2] # or memsql, tidb
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
dirhash BIGINT,
name VARCHAR(1000) BINARY,
directory TEXT,
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
"""
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
# directory VARCHAR(65535),
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
enabled = false
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[postgres2]
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS "%s" (
dirhash BIGINT,
name VARCHAR(65535),
directory VARCHAR(65535),
meta bytea,
PRIMARY KEY (dirhash, name)
);
"""
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
[cassandra]
# CREATE TABLE filemeta (
# directory varchar,
# name varchar,
# meta blob,
# PRIMARY KEY (directory, name)
# ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false
keyspace = "seaweedfs"
hosts = [
"localhost:9042",
]
username = ""
password = ""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
# Name of the datacenter local to this filer, used as host selection fallback.
localDC = ""
[hbase]
enabled = false
zkquorum = ""
table = "seaweedfs"
[redis2]
enabled = false
address = "localhost:6379"
password = ""
database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis_cluster2]
enabled = false
addresses = [
"localhost:30001",
"localhost:30002",
"localhost:30003",
"localhost:30004",
"localhost:30005",
"localhost:30006",
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[etcd]
enabled = false
servers = "localhost:2379"
timeout = "3s"
[mongodb]
enabled = false
uri = "mongodb://localhost:27017"
option_pool_size = 0
database = "seaweedfs"
[elastic7]
enabled = false
servers = [
"http://localhost1:9200",
"http://localhost2:9200",
"http://localhost3:9200",
]
username = ""
password = ""
sniff_enabled = false
healthcheck_enabled = false
# increase the value is recommend, be sure the value in Elastic is greater or equal here
index.max_result_window = 10000
##########################
##########################
# To add path-specific filer store:
#
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
# 2. Add a location configuraiton. E.g., location = "/tmp/"
# 3. Copy and customize all other configurations.
# Make sure they are not the same if using the same store type!
# 4. Set enabled to true
#
# The following is just using redis as an example
##########################
[redis2.tmp]
enabled = false
location = "/tmp/"
address = "localhost:6379"
password = ""
database = 1

View File

@@ -0,0 +1,63 @@
# Put this file to one of the location, with descending priority
# ./master.toml
# $HOME/.seaweedfs/master.toml
# /etc/seaweedfs/master.toml
# this file is read by master
[master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
lock
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.deleteEmpty -quietFor=24h -force
volume.balance -force
volume.fix.replication
unlock
"""
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
[master.sequencer]
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
sequencer_etcd_urls = "http://127.0.0.1:2379"
# when sequencer.type = snowflake, the snowflake id must be different from other masters
sequencer_snowflake_id = 0 # any number between 1~1023
# configurations for tiered cloud storage
# old volumes are transparently moved to cloud for cost efficiency
[storage.backend]
[storage.backend.s3.default]
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
endpoint = ""
# create this number of logical volumes if no more writable volumes
# count_x means how many copies of data.
# e.g.:
# 000 has only one copy, copy_1
# 010 and 001 has two copies, copy_2
# 011 has only 3 copies, copy_3
[master.volume_growth]
copy_1 = 7 # create 1 x 7 = 7 actual volumes
copy_2 = 6 # create 2 x 6 = 12 actual volumes
copy_3 = 3 # create 3 x 3 = 9 actual volumes
copy_other = 1 # create n x 1 = n actual volumes
# configuration flags for replication
[master.replication]
# any replication counts should be considered minimums. If you specify 010 and
# have 3 different racks, that's still considered writable. Writes will still
# try to replicate to all available volumes. You should only use this option
# if you are doing your own replication or periodic sync of volumes.
treat_replication_as_minimums = false

View File

@@ -0,0 +1,54 @@
# A sample TOML config file for SeaweedFS filer store
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
# Put this file to one of the location, with descending priority
# ./notification.toml
# $HOME/.seaweedfs/notification.toml
# /etc/seaweedfs/notification.toml
####################################################
# notification
# send and receive filer updates for each file to an external message queue
####################################################
[notification.log]
# this is only for debugging perpose and does not work with "weed filer.replicate"
enabled = false
[notification.kafka]
enabled = false
hosts = [
"localhost:9092"
]
topic = "seaweedfs_filer"
offsetFile = "./last.offset"
offsetSaveIntervalSeconds = 10
[notification.aws_sqs]
# experimental, let me know if it works
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
sqs_queue_name = "my_filer_queue" # an existing queue name
[notification.google_pub_sub]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
[notification.gocdk_pub_sub]
# The Go Cloud Development Kit (https://gocloud.dev).
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
# create binding myexchange => myqueue
topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"

View File

@@ -0,0 +1,71 @@
# A sample TOML config file for replicating SeaweedFS filer
# Used with "weed filer.backup"
# Using with "weed filer.replicate" is deprecated.
# Put this file to one of the location, with descending priority
# ./replication.toml
# $HOME/.seaweedfs/replication.toml
# /etc/seaweedfs/replication.toml
[source.filer] # deprecated. Only useful with "weed filer.replicate"
enabled = true
grpcAddress = "localhost:18888"
# all files under this directory tree are replicated.
# this is not a directory on your hard drive, but on your filer.
# i.e., all files with this "prefix" are sent to notification message queue.
directory = "/buckets"
[sink.local]
enabled = false
directory = "/data"
# all replicated files are under modified time as yyyy-mm-dd directories
# so each date directory contains all new and updated files.
is_incremental = false
[sink.filer]
enabled = false
grpcAddress = "localhost:18888"
# all replicated files are under this directory tree
# this is not a directory on your hard drive, but on your filer.
# i.e., all received files will be "prefixed" to this directory.
directory = "/backup"
replication = ""
collection = ""
ttlSec = 0
is_incremental = false
[sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
# default loads credentials from the shared credentials file (~/.aws/credentials).
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory
endpoint = ""
is_incremental = false
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
bucket = "your_bucket_seaweedfs" # an existing bucket
directory = "/" # destination directory
is_incremental = false
[sink.azure]
# experimental, let me know if it works
enabled = false
account_name = ""
account_key = ""
container = "mycontainer" # an existing container
directory = "/" # destination directory
is_incremental = false
[sink.backblaze]
enabled = false
b2_account_id = ""
b2_master_application_key = ""
bucket = "mybucket" # an existing bucket
directory = "/" # destination directory
is_incremental = false

View File

@@ -0,0 +1,60 @@
# Put this file to one of the location, with descending priority
# ./security.toml
# $HOME/.seaweedfs/security.toml
# /etc/seaweedfs/security.toml
# this file is read by master, volume server, and filer
# the jwt signing key is read by master and volume server.
# a jwt defaults to expire after 10 seconds.
[jwt.signing]
key = ""
expires_after_seconds = 10 # seconds
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
[jwt.signing.read]
key = ""
expires_after_seconds = 10 # seconds
# all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files.
# the host name is not checked, so the PERM files can be shared.
[grpc]
ca = ""
# Set wildcard domain for enable TLS authentication by common names
allowed_wildcard_domain = "" # .mycompany.com
[grpc.volume]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.master]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.filer]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.msg_broker]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
cert = ""
key = ""
# volume server https options
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
[https.client]
enabled = true
[https.volume]
cert = ""
key = ""

View File

@@ -0,0 +1,10 @@
[cluster]
default = "c1"
[cluster.c1]
master = "localhost:9333" # comma-separated master servers
filer = "localhost:8888" # filer host and port
[cluster.c2]
master = ""
filer = ""

View File

@@ -3,6 +3,7 @@ package command
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/util/grace"
"net/http"
"os"
"strings"
"time"
@@ -16,6 +17,8 @@ import (
type ServerOptions struct {
cpuprofile *string
memprofile *string
debug *bool
debugPort *int
v VolumeServerOptions
}
@@ -78,6 +81,8 @@ var (
func init() {
serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file")
serverOptions.memprofile = cmdServer.Flag.String("memprofile", "", "memory profile output file")
serverOptions.debug = cmdServer.Flag.Bool("debug", false, "serves runtime profiling data, e.g., http://localhost:6060/debug/pprof/goroutine?debug=2")
serverOptions.debugPort = cmdServer.Flag.Int("debug.port", 6060, "http port for debugging")
masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
@@ -107,10 +112,11 @@ func init() {
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
serverOptions.v.readMode = cmdServer.Flag.String("volume.readMode", "proxy", "[local|proxy|redirect] how to deal with non-local volume: 'not found|read in remote node|redirect volume location'.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory")
serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
serverOptions.v.concurrentDownloadLimitMB = cmdServer.Flag.Int("volume.concurrentDownloadLimitMB", 64, "limit total concurrent download size")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
@@ -139,6 +145,10 @@ func init() {
func runServer(cmd *Command, args []string) bool {
if *serverOptions.debug {
go http.ListenAndServe(fmt.Sprintf(":%d", *serverOptions.debugPort), nil)
}
util.LoadConfiguration("security", false)
util.LoadConfiguration("master", false)
@@ -245,7 +255,7 @@ func runServer(cmd *Command, args []string) bool {
// start volume server
if *isStartingVolumeServer {
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
minFreeSpaces := util.MustParseMinFreeSpace(*volumeMinFreeSpace, *volumeMinFreeSpacePercent)
go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, minFreeSpaces)
}

View File

@@ -55,6 +55,7 @@ func runShell(command *Command, args []string) bool {
var err error
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
shellOptions.FilerAddress = *shellInitialFiler
if err != nil {
fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
return false

View File

@@ -71,20 +71,20 @@ func runUpload(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master)
defaultReplication, err := readMasterConfiguration(grpcDialOption, *upload.master)
if err != nil {
fmt.Printf("upload: %v", err)
return false
}
if *upload.replication == "" {
*upload.replication = defaultCollection
*upload.replication = defaultReplication
}
if len(args) == 0 {
if *upload.dir == "" {
return false
}
filepath.Walk(util.ResolvePath(*upload.dir), func(path string, info os.FileInfo, err error) error {
err = filepath.Walk(util.ResolvePath(*upload.dir), func(path string, info os.FileInfo, err error) error {
if err == nil {
if !info.IsDir() {
if *upload.include != "" {
@@ -108,12 +108,21 @@ func runUpload(cmd *Command, args []string) bool {
}
return err
})
if err != nil {
fmt.Println(err.Error())
return false
}
} else {
parts, e := operation.NewFileParts(args)
if e != nil {
fmt.Println(e.Error())
return false
}
results, err := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
if err != nil {
fmt.Println(err.Error())
return false
}
results, _ := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
}

View File

@@ -35,31 +35,32 @@ var (
)
type VolumeServerOptions struct {
port *int
publicPort *int
folders []string
folderMaxLimits []int
idxFolder *string
ip *string
publicUrl *string
bindIp *string
masters *string
idleConnectionTimeout *int
dataCenter *string
rack *string
whiteList []string
indexType *string
diskType *string
fixJpgOrientation *bool
readRedirect *bool
cpuProfile *string
memProfile *string
compactionMBPerSecond *int
fileSizeLimitMB *int
concurrentUploadLimitMB *int
pprof *bool
preStopSeconds *int
metricsHttpPort *int
port *int
publicPort *int
folders []string
folderMaxLimits []int
idxFolder *string
ip *string
publicUrl *string
bindIp *string
masters *string
idleConnectionTimeout *int
dataCenter *string
rack *string
whiteList []string
indexType *string
diskType *string
fixJpgOrientation *bool
readMode *string
cpuProfile *string
memProfile *string
compactionMBPerSecond *int
fileSizeLimitMB *int
concurrentUploadLimitMB *int
concurrentDownloadLimitMB *int
pprof *bool
preStopSeconds *int
metricsHttpPort *int
// pulseSeconds *int
enableTcp *bool
}
@@ -80,12 +81,13 @@ func init() {
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
v.diskType = cmdVolume.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.readMode = cmdVolume.Flag.String("readMode", "proxy", "[local|proxy|redirect] how to deal with non-local volume: 'not found|proxy to remote node|redirect volume location'.")
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory")
v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 256, "limit total concurrent upload size")
v.concurrentDownloadLimitMB = cmdVolume.Flag.Int("concurrentDownloadLimitMB", 256, "limit total concurrent download size")
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
@@ -228,10 +230,11 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
volumeNeedleMapKind,
strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,
v.whiteList,
*v.fixJpgOrientation, *v.readRedirect,
*v.fixJpgOrientation, *v.readMode,
*v.compactionMBPerSecond,
*v.fileSizeLimitMB,
int64(*v.concurrentUploadLimitMB)*1024*1024,
int64(*v.concurrentDownloadLimitMB)*1024*1024,
)
// starting grpc server
grpcS := v.startGrpcService(volumeServer)
@@ -259,6 +262,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
// Stop heartbeats
if !volumeServer.StopHeartbeat() {
volumeServer.SetStopping()
glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
time.Sleep(time.Duration(*v.preStopSeconds) * time.Second)
}

View File

@@ -32,6 +32,9 @@ type AbstractSqlStore struct {
dbsLock sync.Mutex
}
func (store *AbstractSqlStore) CanDropWholeBucket() bool {
return store.SupportBucketTable
}
func (store *AbstractSqlStore) OnBucketCreation(bucket string) {
store.dbsLock.Lock()
defer store.dbsLock.Unlock()
@@ -277,7 +280,6 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
}
glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
if err != nil {
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
@@ -287,7 +289,6 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
if err != nil {
return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err)
}
return nil
}

View File

@@ -32,6 +32,7 @@ func (store *CassandraStore) Initialize(configuration util.Configuration, prefix
configuration.GetString(prefix+"username"),
configuration.GetString(prefix+"password"),
configuration.GetStringSlice(prefix+"superLargeDirectories"),
configuration.GetString(prefix+"localDC"),
)
}
@@ -40,13 +41,19 @@ func (store *CassandraStore) isSuperLargeDirectory(dir string) (dirHash string,
return
}
func (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string) (err error) {
func (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string, localDC string) (err error) {
store.cluster = gocql.NewCluster(hosts...)
if username != "" && password != "" {
store.cluster.Authenticator = gocql.PasswordAuthenticator{Username: username, Password: password}
}
store.cluster.Keyspace = keyspace
fallback := gocql.RoundRobinHostPolicy()
if localDC != "" {
fallback = gocql.DCAwareRoundRobinPolicy(localDC)
}
store.cluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)
store.cluster.Consistency = gocql.LocalQuorum
store.session, err = store.cluster.CreateSession()
if err != nil {
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
@@ -117,7 +124,7 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPa
var data []byte
if err := store.session.Query(
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
dir, name).Scan(&data); err != nil {
if err != gocql.ErrNotFound {
return nil, filer_pb.ErrNotFound
}

View File

@@ -42,6 +42,7 @@ type Entry struct {
HardLinkId HardLinkId
HardLinkCounter int32
Content []byte
Remote *filer_pb.RemoteEntry
}
func (entry *Entry) Size() uint64 {
@@ -56,20 +57,55 @@ func (entry *Entry) Timestamp() time.Time {
}
}
func (entry *Entry) ShallowClone() *Entry {
if entry == nil {
return nil
}
newEntry := &Entry{}
newEntry.FullPath = entry.FullPath
newEntry.Attr = entry.Attr
newEntry.Chunks = entry.Chunks
newEntry.Extended = entry.Extended
newEntry.HardLinkId = entry.HardLinkId
newEntry.HardLinkCounter = entry.HardLinkCounter
newEntry.Content = entry.Content
newEntry.Remote = entry.Remote
return newEntry
}
func (entry *Entry) ToProtoEntry() *filer_pb.Entry {
if entry == nil {
return nil
}
return &filer_pb.Entry{
Name: entry.FullPath.Name(),
IsDirectory: entry.IsDirectory(),
Attributes: EntryAttributeToPb(entry),
Chunks: entry.Chunks,
Extended: entry.Extended,
HardLinkId: entry.HardLinkId,
HardLinkCounter: entry.HardLinkCounter,
Content: entry.Content,
message := &filer_pb.Entry{}
message.Name = entry.FullPath.Name()
entry.ToExistingProtoEntry(message)
return message
}
func (entry *Entry) ToExistingProtoEntry(message *filer_pb.Entry) {
if entry == nil {
return
}
message.IsDirectory = entry.IsDirectory()
message.Attributes = EntryAttributeToPb(entry)
message.Chunks = entry.Chunks
message.Extended = entry.Extended
message.HardLinkId = entry.HardLinkId
message.HardLinkCounter = entry.HardLinkCounter
message.Content = entry.Content
message.RemoteEntry = entry.Remote
}
func FromPbEntryToExistingEntry(message *filer_pb.Entry, fsEntry *Entry) {
fsEntry.Attr = PbToEntryAttribute(message.Attributes)
fsEntry.Chunks = message.Chunks
fsEntry.Extended = message.Extended
fsEntry.HardLinkId = HardLinkId(message.HardLinkId)
fsEntry.HardLinkCounter = message.HardLinkCounter
fsEntry.Content = message.Content
fsEntry.Remote = message.RemoteEntry
}
func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
@@ -83,26 +119,11 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
}
}
func (entry *Entry) Clone() *Entry {
return &Entry{
FullPath: entry.FullPath,
Attr: entry.Attr,
Chunks: entry.Chunks,
Extended: entry.Extended,
HardLinkId: entry.HardLinkId,
HardLinkCounter: entry.HardLinkCounter,
}
}
func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
return &Entry{
FullPath: util.NewFullPath(dir, entry.Name),
Attr: PbToEntryAttribute(entry.Attributes),
Chunks: entry.Chunks,
HardLinkId: HardLinkId(entry.HardLinkId),
HardLinkCounter: entry.HardLinkCounter,
Content: entry.Content,
}
t := &Entry{}
t.FullPath = util.NewFullPath(dir, entry.Name)
FromPbEntryToExistingEntry(entry, t)
return t
}
func maxUint64(x, y uint64) uint64 {

View File

@@ -12,14 +12,8 @@ import (
)
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) {
message := &filer_pb.Entry{
Attributes: EntryAttributeToPb(entry),
Chunks: entry.Chunks,
Extended: entry.Extended,
HardLinkId: entry.HardLinkId,
HardLinkCounter: entry.HardLinkCounter,
Content: entry.Content,
}
message := &filer_pb.Entry{}
entry.ToExistingProtoEntry(message)
return proto.Marshal(message)
}
@@ -31,15 +25,7 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error {
return fmt.Errorf("decoding value blob for %s: %v", entry.FullPath, err)
}
entry.Attr = PbToEntryAttribute(message.Attributes)
entry.Extended = message.Extended
entry.Chunks = message.Chunks
entry.HardLinkId = message.HardLinkId
entry.HardLinkCounter = message.HardLinkCounter
entry.Content = message.Content
FromPbEntryToExistingEntry(message, entry)
return nil
}
@@ -129,6 +115,9 @@ func EqualEntry(a, b *Entry) bool {
if !bytes.Equal(a.Content, b.Content) {
return false
}
if !proto.Equal(a.Remote, b.Remote) {
return false
}
return true
}

View File

@@ -16,7 +16,7 @@ import (
)
const (
ManifestBatch = 1000
ManifestBatch = 10000
)
func HasChunkManifest(chunks []*filer_pb.FileChunk) bool {
@@ -39,9 +39,14 @@ func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonMa
return
}
func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset, stopOffset int64) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
// TODO maybe parallel this
for _, chunk := range chunks {
if max(chunk.Offset, startOffset) >= min(chunk.Offset+int64(chunk.Size), stopOffset) {
continue
}
if !chunk.IsChunkManifest {
dataChunks = append(dataChunks, chunk)
continue
@@ -54,7 +59,7 @@ func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chun
manifestChunks = append(manifestChunks, chunk)
// recursive
dchunks, mchunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks)
dchunks, mchunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks, startOffset, stopOffset)
if subErr != nil {
return chunks, nil, subErr
}

View File

@@ -53,7 +53,7 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64)
fileIds := make(map[string]bool)
for _, interval := range visibles {
@@ -72,11 +72,11 @@ func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks
func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64)
if aErr != nil {
return nil, aErr
}
bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs)
bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs, 0, math.MaxInt64)
if bErr != nil {
return nil, bErr
}
@@ -117,7 +117,7 @@ func (cv *ChunkView) IsFullChunk() bool {
func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size)
return ViewFromVisibleIntervals(visibles, offset, size)
@@ -221,9 +221,9 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
// If the file chunk content is a chunk manifest
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) {
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
sort.Slice(chunks, func(i, j int) bool {
if chunks[i].Mtime == chunks[j].Mtime {

View File

@@ -90,7 +90,7 @@ func TestRandomFileChunksCompact(t *testing.T) {
}
}
visibles, _ := NonOverlappingVisibleIntervals(nil, chunks)
visibles, _ := NonOverlappingVisibleIntervals(nil, chunks, 0, math.MaxInt64)
for _, v := range visibles {
for x := v.start; x < v.stop; x++ {
@@ -227,7 +227,7 @@ func TestIntervalMerging(t *testing.T) {
for i, testcase := range testcases {
log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks)
intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks, 0, math.MaxInt64)
for x, interval := range intervals {
log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
i, x, interval.start, interval.stop, interval.fileId)

View File

@@ -42,6 +42,7 @@ type Filer struct {
MetaAggregator *MetaAggregator
Signature int32
FilerConf *FilerConf
RemoteStorage *FilerRemoteStorage
}
func NewFiler(masters []string, grpcDialOption grpc.DialOption,
@@ -51,8 +52,9 @@ func NewFiler(masters []string, grpcDialOption grpc.DialOption,
fileIdDeletionQueue: util.NewUnboundedQueue(),
GrpcDialOption: grpcDialOption,
FilerConf: NewFilerConf(),
RemoteStorage: NewFilerRemoteStorage(),
}
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn)
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, notifyFn)
f.metaLogCollection = collection
f.metaLogReplication = replication
@@ -207,7 +209,7 @@ func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, di
Attr: Attr{
Mtime: now,
Crtime: now,
Mode: os.ModeDir | entry.Mode | 0110,
Mode: os.ModeDir | entry.Mode | 0111,
Uid: entry.Uid,
Gid: entry.Gid,
Collection: entry.Collection,

View File

@@ -3,6 +3,7 @@ package filer
import (
"context"
"math"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -78,6 +79,9 @@ func (f *Filer) isBucket(entry *Entry) bool {
if parent != f.DirBucketsPath {
return false
}
if strings.HasPrefix(dirName, ".") {
return false
}
f.buckets.RLock()
defer f.buckets.RUnlock()

View File

@@ -15,6 +15,7 @@ import (
const (
DirectoryEtcRoot = "/etc"
DirectoryEtcSeaweedFS = "/etc/seaweedfs"
DirectoryEtcRemote = "/etc/remote"
FilerConfName = "filer.conf"
IamConfigDirecotry = "/etc/iam"
IamIdentityFile = "identity.json"
@@ -126,6 +127,9 @@ func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
if b.VolumeGrowthCount > 0 {
a.VolumeGrowthCount = b.VolumeGrowthCount
}
if b.ReadOnly {
a.ReadOnly = b.ReadOnly
}
}
func (fc *FilerConf) ToProto() *filer_pb.FilerConf {

View File

@@ -24,6 +24,18 @@ func TestFilerConf(t *testing.T) {
LocationPrefix: "/buckets/",
Replication: "001",
},
{
LocationPrefix: "/buckets",
ReadOnly: false,
},
{
LocationPrefix: "/buckets/xxx",
ReadOnly: true,
},
{
LocationPrefix: "/buckets/xxx/yyy",
ReadOnly: false,
},
}}
fc.doLoadConf(conf)
@@ -31,4 +43,7 @@ func TestFilerConf(t *testing.T) {
assert.Equal(t, "abcd", fc.MatchStorageRule("/buckets/abcd/jasdf").Collection)
assert.Equal(t, "001", fc.MatchStorageRule("/buckets/abc/jasdf").Replication)
assert.Equal(t, true, fc.MatchStorageRule("/buckets/xxx/yyy/zzz").ReadOnly)
assert.Equal(t, false, fc.MatchStorageRule("/buckets/other").ReadOnly)
}

View File

@@ -71,7 +71,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
lastFileName := ""
includeLastFile := false
if !isDeletingBucket {
if !isDeletingBucket || !f.Store.CanDropWholeBucket() {
for {
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
if err != nil {

View File

@@ -12,6 +12,7 @@ import (
// onMetadataChangeEvent is triggered after filer processed change events from local or remote filers
func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) {
f.maybeReloadFilerConfiguration(event)
f.maybeReloadRemoteStorageConfigurationAndMapping(event)
f.onBucketEvents(event)
}
@@ -24,10 +25,14 @@ func (f *Filer) onBucketEvents(event *filer_pb.SubscribeMetadataResponse) {
}
if f.DirBucketsPath == event.Directory {
if message.OldEntry == nil && message.NewEntry != nil {
f.Store.OnBucketCreation(message.NewEntry.Name)
if message.NewEntry.IsDirectory {
f.Store.OnBucketCreation(message.NewEntry.Name)
}
}
if message.OldEntry != nil && message.NewEntry == nil {
f.Store.OnBucketDeletion(message.OldEntry.Name)
if message.OldEntry.IsDirectory {
f.Store.OnBucketDeletion(message.OldEntry.Name)
}
}
}
}
@@ -80,3 +85,16 @@ func (f *Filer) LoadFilerConf() {
}
f.FilerConf = fc
}
////////////////////////////////////
// load and maintain remote storages
////////////////////////////////////
func (f *Filer) LoadRemoteStorageConfAndMapping() {
if err := f.RemoteStorage.LoadRemoteStorageConfigurationsAndMapping(f); err != nil {
glog.Errorf("read remote conf and mapping: %v", err)
return
}
}
func (f *Filer) maybeReloadRemoteStorageConfigurationAndMapping(event *filer_pb.SubscribeMetadataResponse) {
// FIXME add reloading
}

View File

@@ -0,0 +1,197 @@
package filer
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"math"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/viant/ptrie"
)
const REMOTE_STORAGE_CONF_SUFFIX = ".conf"
const REMOTE_STORAGE_MOUNT_FILE = "mount.mapping"
type FilerRemoteStorage struct {
rules ptrie.Trie
storageNameToConf map[string]*filer_pb.RemoteConf
}
func NewFilerRemoteStorage() (rs *FilerRemoteStorage) {
rs = &FilerRemoteStorage{
rules: ptrie.New(),
storageNameToConf: make(map[string]*filer_pb.RemoteConf),
}
return rs
}
func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *Filer) (err error) {
// execute this on filer
entries, _, err := filer.ListDirectoryEntries(context.Background(), DirectoryEtcRemote, "", false, math.MaxInt64, "", "", "")
if err != nil {
if err == filer_pb.ErrNotFound {
return nil
}
glog.Errorf("read remote storage %s: %v", DirectoryEtcRemote, err)
return
}
for _, entry := range entries {
if entry.Name() == REMOTE_STORAGE_MOUNT_FILE {
if err := rs.loadRemoteStorageMountMapping(entry.Content); err != nil {
return err
}
continue
}
if !strings.HasSuffix(entry.Name(), REMOTE_STORAGE_CONF_SUFFIX) {
return nil
}
conf := &filer_pb.RemoteConf{}
if err := proto.Unmarshal(entry.Content, conf); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, entry.Name(), err)
}
rs.storageNameToConf[conf.Name] = conf
}
return nil
}
func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err error) {
mappings := &filer_pb.RemoteStorageMapping{}
if err := proto.Unmarshal(data, mappings); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, err)
}
for dir, storageLocation := range mappings.Mappings {
rs.mapDirectoryToRemoteStorage(util.FullPath(dir), storageLocation)
}
return nil
}
func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc *filer_pb.RemoteStorageLocation) {
rs.rules.Put([]byte(dir+"/"), loc)
}
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *filer_pb.RemoteStorageLocation) {
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
mountDir = util.FullPath(string(key[:len(key)-1]))
remoteLocation = value.(*filer_pb.RemoteStorageLocation)
return true
})
return
}
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
var storageLocation *filer_pb.RemoteStorageLocation
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
storageLocation = value.(*filer_pb.RemoteStorageLocation)
return true
})
if storageLocation == nil {
found = false
return
}
return rs.GetRemoteStorageClient(storageLocation.Name)
}
func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
remoteConf, found = rs.storageNameToConf[storageName]
if !found {
return
}
var err error
if client, err = remote_storage.GetRemoteStorage(remoteConf); err == nil {
found = true
return
}
return
}
func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *filer_pb.RemoteStorageMapping, err error) {
mappings = &filer_pb.RemoteStorageMapping{
Mappings: make(map[string]*filer_pb.RemoteStorageLocation),
}
if len(oldContent) > 0 {
if err = proto.Unmarshal(oldContent, mappings); err != nil {
glog.Warningf("unmarshal existing mappings: %v", err)
}
}
return
}
func AddRemoteStorageMapping(oldContent []byte, dir string, storageLocation *filer_pb.RemoteStorageLocation) (newContent []byte, err error) {
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
if unmarshalErr != nil {
// skip
}
// set the new mapping
mappings.Mappings[dir] = storageLocation
if newContent, err = proto.Marshal(mappings); err != nil {
return oldContent, fmt.Errorf("marshal mappings: %v", err)
}
return
}
func RemoveRemoteStorageMapping(oldContent []byte, dir string) (newContent []byte, err error) {
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
if unmarshalErr != nil {
return nil, unmarshalErr
}
// set the new mapping
delete(mappings.Mappings, dir)
if newContent, err = proto.Marshal(mappings); err != nil {
return oldContent, fmt.Errorf("marshal mappings: %v", err)
}
return
}
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *filer_pb.RemoteStorageMapping, readErr error) {
var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
return readErr
}); readErr != nil {
return nil, readErr
}
mappings, readErr = UnmarshalRemoteStorageMappings(oldContent)
if readErr != nil {
return nil, fmt.Errorf("unmarshal mappings: %v", readErr)
}
return
}
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *filer_pb.RemoteConf, readErr error) {
var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX)
return readErr
}); readErr != nil {
return nil, readErr
}
// unmarshal storage configuration
conf = &filer_pb.RemoteConf{}
if unMarshalErr := proto.Unmarshal(oldContent, conf); unMarshalErr != nil {
readErr = fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
return
}
return
}

View File

@@ -0,0 +1,34 @@
package filer
import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/stretchr/testify/assert"
"testing"
)
func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {
conf := &filer_pb.RemoteConf{
Name: "s7",
Type: "s3",
}
rs := NewFilerRemoteStorage()
rs.storageNameToConf[conf.Name] = conf
rs.mapDirectoryToRemoteStorage("/a/b/c", &filer_pb.RemoteStorageLocation{
Name: "s7",
Bucket: "some",
Path: "/dir",
})
_, _, found := rs.FindRemoteStorageClient("/a/b/c/d/e/f")
assert.Equal(t, true, found, "find storage client")
_, _, found2 := rs.FindRemoteStorageClient("/a/b")
assert.Equal(t, false, found2, "should not find storage client")
_, _, found3 := rs.FindRemoteStorageClient("/a/b/c")
assert.Equal(t, false, found3, "should not find storage client")
_, _, found4 := rs.FindRemoteStorageClient("/a/b/cc")
assert.Equal(t, false, found4, "should not find storage client")
}

View File

@@ -3,6 +3,7 @@ package filer
import (
"context"
"github.com/chrislusf/seaweedfs/weed/util"
"math"
"path/filepath"
"strings"
)
@@ -27,6 +28,10 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, start
return true
})
if limit == math.MaxInt64 {
limit = math.MaxInt64 - 1
}
hasMore = int64(len(entries)) >= limit+1
if hasMore {
entries = entries[:limit]

View File

@@ -43,4 +43,5 @@ type FilerStore interface {
type BucketAware interface {
OnBucketCreation(bucket string)
OnBucketDeletion(bucket string)
CanDropWholeBucket() bool
}

View File

@@ -23,6 +23,7 @@ type VirtualFilerStore interface {
AddPathSpecificStore(path string, storeId string, store FilerStore)
OnBucketCreation(bucket string)
OnBucketDeletion(bucket string)
CanDropWholeBucket() bool
}
type FilerStoreWrapper struct {
@@ -42,6 +43,13 @@ func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
}
}
func (fsw *FilerStoreWrapper) CanDropWholeBucket() bool {
if ba, ok := fsw.defaultStore.(BucketAware); ok {
return ba.CanDropWholeBucket()
}
return false
}
func (fsw *FilerStoreWrapper) OnBucketCreation(bucket string) {
for _, store := range fsw.storeIdToStore {
if ba, ok := store.(BucketAware); ok {

View File

@@ -5,13 +5,15 @@ import (
"context"
"crypto/md5"
"fmt"
"github.com/syndtr/goleveldb/leveldb"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"io"
"os"
"github.com/syndtr/goleveldb/leveldb"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -47,6 +49,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
Filter: filter.NewBloomFilter(8), // false positive rate 0.02
}
for d := 0; d < dbCount; d++ {

View File

@@ -5,15 +5,17 @@ import (
"context"
"crypto/md5"
"fmt"
"github.com/syndtr/goleveldb/leveldb"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"io"
"os"
"strings"
"sync"
"github.com/syndtr/goleveldb/leveldb"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -62,17 +64,19 @@ func (store *LevelDB3Store) initialize(dir string) (err error) {
}
func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
bloom := filter.NewBloomFilter(8) // false positive rate 0.02
opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
Filter: bloom,
}
if name != DEFAULT {
opts = &opt.Options{
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
Filter: bloom,
}
}

View File

@@ -34,7 +34,7 @@ func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAgg
grpcDialOption: grpcDialOption,
}
t.ListenersCond = sync.NewCond(&t.ListenersLock)
t.MetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, nil, func() {
t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, func() {
t.ListenersCond.Broadcast()
})
return t
@@ -118,6 +118,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
}
for {
glog.V(4).Infof("subscribing remote %s meta change: %v", peer, time.Unix(0, lastTsNs))
err := pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
@@ -15,11 +16,11 @@ func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte)
c := store.connect.Database(store.database).Collection(store.collectionName)
_, err = c.InsertOne(ctx, Model{
Directory: dir,
Name: name,
Meta: value,
})
opts := options.Update().SetUpsert(true)
filter := bson.D{{"directory", dir}, {"name", name}}
update := bson.D{{"$set", bson.D{{"meta", value}}}}
_, err = c.UpdateOne(ctx, filter, update, opts)
if err != nil {
return fmt.Errorf("kv put: %v", err)

45
weed/filer/read_remote.go Normal file
View File

@@ -0,0 +1,45 @@
package filer
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (entry *Entry) IsInRemoteOnly() bool {
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
}
func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data []byte, err error) {
client, _, found := f.RemoteStorage.GetRemoteStorageClient(entry.Remote.StorageName)
if !found {
return nil, fmt.Errorf("remote storage %v not found", entry.Remote.StorageName)
}
mountDir, remoteLoation := f.RemoteStorage.FindMountDirectory(entry.FullPath)
sourceLoc := MapFullPathToRemoteStorageLocation(mountDir, remoteLoation, entry.FullPath)
return client.ReadFile(sourceLoc, offset, size)
}
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, fp util.FullPath) *filer_pb.RemoteStorageLocation {
remoteLocation := &filer_pb.RemoteStorageLocation{
Name: remoteMountedLocation.Name,
Bucket: remoteMountedLocation.Bucket,
Path: remoteMountedLocation.Path,
}
remoteLocation.Path += string(fp)[len(localMountedDir):]
return remoteLocation
}
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *filer_pb.RemoteConf, remoteLocation *filer_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
Directory: string(parent),
Name: entry.Name,
})
return err
})
}

View File

@@ -2,13 +2,9 @@ package filer
import (
"bytes"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"io/ioutil"
"math"
"net/http"
"time"
)
@@ -31,50 +27,17 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed
}
func ReadContent(filerAddress string, dir, name string) ([]byte, error) {
target := fmt.Sprintf("http://%s%s/%s", filerAddress, dir, name)
data, _, err := util.Get(target)
return data, err
}
func SaveAs(host string, port int, dir, name string, contentType string, byteBuffer *bytes.Buffer) error {
var target string
if port == 0 {
target = fmt.Sprintf("http://%s%s/%s", host, dir, name)
} else {
target = fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name)
func ReadInsideFiler(filerClient filer_pb.SeaweedFilerClient, dir, name string) (content []byte, err error) {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
Name: name,
}
// set the HTTP method, url, and request body
req, err := http.NewRequest(http.MethodPut, target, byteBuffer)
respLookupEntry, err := filer_pb.LookupEntry(filerClient, request)
if err != nil {
return err
return
}
// set the request header Content-Type for json
if contentType != "" {
req.Header.Set("Content-Type", contentType)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer util.CloseResponse(resp)
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode >= 400 {
return fmt.Errorf("%s: %s %v", target, resp.Status, string(b))
}
return nil
content = respLookupEntry.Entry.Content
return
}
func SaveInsideFiler(client filer_pb.SeaweedFilerClient, dir, name string, content []byte) error {

View File

@@ -24,18 +24,21 @@ func init() {
type options struct {
opt *gorocksdb.Options
bto *gorocksdb.BlockBasedTableOptions
ro *gorocksdb.ReadOptions
wo *gorocksdb.WriteOptions
}
func (opt *options) init() {
opt.opt = gorocksdb.NewDefaultOptions()
opt.bto = gorocksdb.NewDefaultBlockBasedTableOptions()
opt.ro = gorocksdb.NewDefaultReadOptions()
opt.wo = gorocksdb.NewDefaultWriteOptions()
}
func (opt *options) close() {
opt.opt.Destroy()
opt.bto.Destroy()
opt.ro.Destroy()
opt.wo.Destroy()
}
@@ -69,6 +72,11 @@ func (store *RocksDBStore) initialize(dir string) (err error) {
store.opt.SetCompactionFilter(NewTTLFilter())
// store.opt.SetMaxBackgroundCompactions(2)
// https://github.com/tecbot/gorocksdb/issues/132
store.bto.SetFilterPolicy(gorocksdb.NewBloomFilterFull(8))
store.opt.SetBlockBasedTableFactory(store.bto)
// store.opt.EnableStatistics()
store.db, err = gorocksdb.OpenDb(store.opt, dir)
return

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"io"
)
@@ -14,7 +15,7 @@ func ParseS3ConfigurationFromBytes(content []byte, config *iam_pb.S3ApiConfigura
return nil
}
func S3ConfigurationToText(writer io.Writer, config *iam_pb.S3ApiConfiguration) error {
func ProtoToText(writer io.Writer, config proto.Message) error {
m := jsonpb.Marshaler{
EmitDefaults: false,

View File

@@ -44,7 +44,7 @@ func TestS3Conf(t *testing.T) {
},
}
var buf bytes.Buffer
err := S3ConfigurationToText(&buf, s3Conf)
err := ProtoToText(&buf, s3Conf)
assert.Equal(t, err, nil)
s3ConfSaved := &iam_pb.S3ApiConfiguration{}
err = ParseS3ConfigurationFromBytes(buf.Bytes(), s3ConfSaved)

View File

@@ -1,4 +1,4 @@
// +build !linux,!darwin,!windows
// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64
// limited GOOS due to modernc.org/libc/unistd

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"math"
"sort"
"strings"
"time"
@@ -88,16 +89,37 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk)
// ---------------- ChunkStreamReader ----------------------------------
type ChunkStreamReader struct {
chunkViews []*ChunkView
logicOffset int64
buffer []byte
bufferOffset int64
bufferPos int
chunkIndex int
lookupFileId wdclient.LookupFileIdFunctionType
chunkViews []*ChunkView
totalSize int64
logicOffset int64
buffer []byte
bufferOffset int64
bufferPos int
nextChunkViewIndex int
lookupFileId wdclient.LookupFileIdFunctionType
}
var _ = io.ReadSeeker(&ChunkStreamReader{})
var _ = io.ReaderAt(&ChunkStreamReader{})
func doNewChunkStreamReader(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
sort.Slice(chunkViews, func(i, j int) bool {
return chunkViews[i].LogicOffset < chunkViews[j].LogicOffset
})
var totalSize int64
for _, chunk := range chunkViews {
totalSize += int64(chunk.Size)
}
return &ChunkStreamReader{
chunkViews: chunkViews,
lookupFileId: lookupFileIdFn,
totalSize: totalSize,
}
}
func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
@@ -105,39 +127,39 @@ func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks [
return masterClient.LookupFileId(fileId)
}
chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
return &ChunkStreamReader{
chunkViews: chunkViews,
lookupFileId: lookupFileIdFn,
}
return doNewChunkStreamReader(lookupFileIdFn, chunks)
}
func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
lookupFileIdFn := LookupFn(filerClient)
chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
return doNewChunkStreamReader(lookupFileIdFn, chunks)
}
return &ChunkStreamReader{
chunkViews: chunkViews,
lookupFileId: lookupFileIdFn,
func (c *ChunkStreamReader) ReadAt(p []byte, off int64) (n int, err error) {
if err = c.prepareBufferFor(c.logicOffset); err != nil {
return
}
return c.Read(p)
}
func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
for n < len(p) {
if c.isBufferEmpty() {
if c.chunkIndex >= len(c.chunkViews) {
if c.nextChunkViewIndex >= len(c.chunkViews) {
return n, io.EOF
}
chunkView := c.chunkViews[c.chunkIndex]
c.fetchChunkToBuffer(chunkView)
c.chunkIndex++
chunkView := c.chunkViews[c.nextChunkViewIndex]
if err = c.fetchChunkToBuffer(chunkView); err != nil {
return
}
c.nextChunkViewIndex++
}
t := copy(p[n:], c.buffer[c.bufferPos:])
c.bufferPos += t
n += t
c.logicOffset += int64(t)
}
return
}
@@ -148,38 +170,55 @@ func (c *ChunkStreamReader) isBufferEmpty() bool {
func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
var totalSize int64
for _, chunk := range c.chunkViews {
totalSize += int64(chunk.Size)
}
var err error
switch whence {
case io.SeekStart:
case io.SeekCurrent:
offset += c.bufferOffset + int64(c.bufferPos)
offset += c.logicOffset
case io.SeekEnd:
offset = totalSize + offset
offset = c.totalSize + offset
}
if offset > totalSize {
if offset > c.totalSize {
err = io.ErrUnexpectedEOF
} else {
c.logicOffset = offset
}
for i, chunk := range c.chunkViews {
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
c.fetchChunkToBuffer(chunk)
c.chunkIndex = i + 1
break
}
}
}
c.bufferPos = int(offset - c.bufferOffset)
return offset, err
}
func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) {
// stay in the same chunk
if !c.isBufferEmpty() {
if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) {
c.bufferPos = int(offset - c.bufferOffset)
return nil
}
}
// need to seek to a different chunk
currentChunkIndex := sort.Search(len(c.chunkViews), func(i int) bool {
return c.chunkViews[i].LogicOffset <= offset
})
if currentChunkIndex == len(c.chunkViews) {
return io.EOF
}
// positioning within the new chunk
chunk := c.chunkViews[currentChunkIndex]
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
if err = c.fetchChunkToBuffer(chunk); err != nil {
return
}
c.nextChunkViewIndex = currentChunkIndex + 1
}
c.bufferPos = int(offset - c.bufferOffset)
}
return
}
func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
urlStrings, err := c.lookupFileId(chunkView.FileId)
if err != nil {

View File

@@ -54,28 +54,27 @@ func (dir *Dir) Id() uint64 {
func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
// https://github.com/bazil/fuse/issues/196
attr.Valid = time.Second
if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
dir.setRootDirAttributes(attr)
glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
entry, err := dir.maybeLoadEntry()
if err != nil {
glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
glog.V(3).Infof("dir Attr %s, err: %+v", dir.FullPath(), err)
return err
}
// https://github.com/bazil/fuse/issues/196
attr.Valid = time.Second
attr.Inode = dir.Id()
attr.Mode = os.FileMode(entry.Attributes.FileMode) | os.ModeDir
attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
attr.Ctime = time.Unix(entry.Attributes.Crtime, 0)
attr.Atime = time.Unix(entry.Attributes.Mtime, 0)
attr.Gid = entry.Attributes.Gid
attr.Uid = entry.Attributes.Uid
if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
attr.BlockSize = blockSize
}
glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
@@ -93,20 +92,6 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f
return getxattr(entry, req, resp)
}
func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
// attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
attr.Valid = time.Second
attr.Inode = dir.Id()
attr.Uid = dir.wfs.option.MountUid
attr.Gid = dir.wfs.option.MountGid
attr.Mode = dir.wfs.option.MountMode
attr.Crtime = dir.wfs.option.MountCtime
attr.Ctime = dir.wfs.option.MountCtime
attr.Mtime = dir.wfs.option.MountMtime
attr.Atime = dir.wfs.option.MountMtime
attr.BlockSize = blockSize
}
func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
@@ -156,7 +141,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
var node fs.Node
if isDirectory {
node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name))
return node, nil, nil
return node, node, nil
}
node = dir.newFile(req.Name)
@@ -375,6 +360,28 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
glog.Errorf("list meta cache: %v", listErr)
return nil, fuse.EIO
}
// create proper . and .. directories
ret = append(ret, fuse.Dirent{
Inode: dirPath.AsInode(),
Name: ".",
Type: fuse.DT_Dir,
})
// return the correct parent inode for the mount root
var inode uint64
if string(dirPath) == dir.wfs.option.FilerMountRootPath {
inode = dir.wfs.option.MountParentInode
} else {
inode = util.FullPath(dir.parent.FullPath()).AsInode()
}
ret = append(ret, fuse.Dirent{
Inode: inode,
Name: "..",
Type: fuse.DT_Dir,
})
return
}
@@ -436,7 +443,10 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
dir.wfs.handlesLock.Lock()
defer dir.wfs.handlesLock.Unlock()
inodeId := filePath.AsInode()
delete(dir.wfs.handles, inodeId)
if fh, ok := dir.wfs.handles[inodeId]; ok {
delete(dir.wfs.handles, inodeId)
fh.isDeleted = true
}
return nil

View File

@@ -2,6 +2,9 @@ package filesys
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"math"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
@@ -37,6 +40,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
OldName: req.OldName,
NewDirectory: newDir.FullPath(),
NewName: req.NewName,
Signatures: []int32{dir.wfs.signature},
}
_, err := client.AtomicRenameEntry(ctx, request)
@@ -53,46 +57,118 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
return fuse.EIO
}
// TODO: replicate renaming logic on filer
if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil {
glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
err = dir.moveEntry(context.Background(), util.FullPath(dir.FullPath()), oldEntry, util.FullPath(newDir.FullPath()), req.NewName)
if err != nil {
glog.V(0).Infof("dir local Rename %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
oldEntry.FullPath = newPath
if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil {
return nil
}
func (dir *Dir) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error {
oldName := entry.Name()
oldPath := oldParent.Child(oldName)
newPath := newParent.Child(newName)
if err := dir.moveSelfEntry(ctx, oldParent, entry, newParent, newName, func() error {
oldFsNode := NodeWithId(oldPath.AsInode())
newFsNode := NodeWithId(newPath.AsInode())
newDirNode, found := dir.wfs.Server.FindInternalNode(NodeWithId(newParent.AsInode()))
var newDir *Dir
if found {
newDir = newDirNode.(*Dir)
}
dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) {
if file, ok := internalNode.(*File); ok {
glog.V(4).Infof("internal file node %s", oldParent.Child(oldName))
file.Name = newName
file.id = uint64(newFsNode)
if found {
file.dir = newDir
}
}
if dir, ok := internalNode.(*Dir); ok {
glog.V(4).Infof("internal dir node %s", oldParent.Child(oldName))
dir.name = newName
dir.id = uint64(newFsNode)
if found {
dir.parent = newDir
}
}
})
// change file handle
inodeId := oldPath.AsInode()
dir.wfs.handlesLock.Lock()
if existingHandle, found := dir.wfs.handles[inodeId]; found && existingHandle == nil {
glog.V(4).Infof("opened file handle %s => %s", oldPath, newPath)
delete(dir.wfs.handles, inodeId)
dir.wfs.handles[newPath.AsInode()] = existingHandle
}
dir.wfs.handlesLock.Unlock()
if entry.IsDirectory() {
if err := dir.moveFolderSubEntries(ctx, oldParent, oldName, newParent, newName); err != nil {
return err
}
}
return nil
}); err != nil {
return fmt.Errorf("fail to move %s => %s: %v", oldPath, newPath, err)
}
return nil
}
func (dir *Dir) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, oldName string, newParent util.FullPath, newName string) error {
currentDirPath := oldParent.Child(oldName)
newDirPath := newParent.Child(newName)
glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath)
var moveErr error
listErr := dir.wfs.metaCache.ListDirectoryEntries(ctx, currentDirPath, "", false, int64(math.MaxInt32), func(item *filer.Entry) bool {
moveErr = dir.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name())
if moveErr != nil {
return false
}
return true
})
if listErr != nil {
return listErr
}
if moveErr != nil {
return moveErr
}
return nil
}
func (dir *Dir) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, moveFolderSubEntries func() error) error {
newPath := newParent.Child(newName)
oldPath := oldParent.Child(entry.Name())
entry.FullPath = newPath
if err := dir.wfs.metaCache.InsertEntry(ctx, entry); err != nil {
glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
oldFsNode := NodeWithId(oldPath.AsInode())
newFsNode := NodeWithId(newPath.AsInode())
dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) {
if file, ok := internalNode.(*File); ok {
glog.V(4).Infof("internal file node %s", file.Name)
file.Name = req.NewName
file.id = uint64(newFsNode)
file.dir = newDir
if moveFolderSubEntries != nil {
if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil {
return moveChildrenErr
}
if dir, ok := internalNode.(*Dir); ok {
glog.V(4).Infof("internal dir node %s", dir.name)
dir.name = req.NewName
dir.id = uint64(newFsNode)
dir.parent = newDir
}
})
// change file handle
dir.wfs.handlesLock.Lock()
defer dir.wfs.handlesLock.Unlock()
inodeId := oldPath.AsInode()
existingHandle, found := dir.wfs.handles[inodeId]
glog.V(4).Infof("has open filehandle %s: %v", oldPath, found)
if !found || existingHandle == nil {
return nil
}
glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath)
delete(dir.wfs.handles, inodeId)
dir.wfs.handles[newPath.AsInode()] = existingHandle
if err := dir.wfs.metaCache.DeleteEntry(ctx, oldPath); err != nil {
glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
return nil
}

View File

@@ -97,7 +97,7 @@ func (pages *TempFileDirtyPages) saveExistingPagesToStorage() {
for _, list := range pages.writtenIntervals.lists {
listStopOffset := list.Offset() + list.Size()
for uploadedOffset:=int64(0); uploadedOffset < listStopOffset; uploadedOffset += pageSize {
for uploadedOffset := int64(0); uploadedOffset < listStopOffset; uploadedOffset += pageSize {
start, stop := max(list.Offset(), uploadedOffset), min(listStopOffset, uploadedOffset+pageSize)
if start >= stop {
continue

View File

@@ -54,7 +54,7 @@ func (list *WrittenIntervalLinkedList) ReadData(buf []byte, start, stop int64) {
nodeStart, nodeStop := max(start, t.DataOffset), min(stop, t.DataOffset+t.Size)
if nodeStart < nodeStop {
// glog.V(4).Infof("copying start=%d stop=%d t=[%d,%d) => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.DataOffset, t.DataOffset+t.Size, len(buf), nodeStart, nodeStop)
list.tempFile.ReadAt(buf[nodeStart-start:nodeStop-start], t.TempOffset + nodeStart - t.DataOffset)
list.tempFile.ReadAt(buf[nodeStart-start:nodeStop-start], t.TempOffset+nodeStart-t.DataOffset)
}
if t.Next == nil {

View File

@@ -115,16 +115,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
if err != nil {
return err
}
if file.isOpen > 0 {
file.wfs.handlesLock.Lock()
fileHandle := file.wfs.handles[file.Id()]
file.wfs.handlesLock.Unlock()
if fileHandle != nil {
fileHandle.Lock()
defer fileHandle.Unlock()
}
}
if req.Valid.Size() {
@@ -154,17 +144,17 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
file.dirtyMetadata = true
}
if req.Valid.Mode() {
if req.Valid.Mode() && entry.Attributes.FileMode != uint32(req.Mode) {
entry.Attributes.FileMode = uint32(req.Mode)
file.dirtyMetadata = true
}
if req.Valid.Uid() {
if req.Valid.Uid() && entry.Attributes.Uid != req.Uid {
entry.Attributes.Uid = req.Uid
file.dirtyMetadata = true
}
if req.Valid.Gid() {
if req.Valid.Gid() && entry.Attributes.Gid != req.Gid {
entry.Attributes.Gid = req.Gid
file.dirtyMetadata = true
}
@@ -174,7 +164,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
file.dirtyMetadata = true
}
if req.Valid.Mtime() {
if req.Valid.Mtime() && entry.Attributes.Mtime != req.Mtime.Unix() {
entry.Attributes.Mtime = req.Mtime.Unix()
file.dirtyMetadata = true
}
@@ -207,6 +197,11 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
if err := setxattr(entry, req); err != nil {
return err
}
file.dirtyMetadata = true
if file.isOpen > 0 {
return nil
}
return file.saveEntry(entry)
@@ -224,6 +219,11 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
if err := removexattr(entry, req); err != nil {
return err
}
file.dirtyMetadata = true
if file.isOpen > 0 {
return nil
}
return file.saveEntry(entry)
@@ -351,6 +351,8 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
file.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
file.dirtyMetadata = false
return nil
})
}
@@ -358,3 +360,30 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
func (file *File) getEntry() *filer_pb.Entry {
return file.entry
}
func (file *File) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) {
err := file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.DownloadToLocalRequest{
Directory: file.dir.FullPath(),
Name: entry.Name,
}
glog.V(4).Infof("download entry: %v", request)
resp, err := client.DownloadToLocal(context.Background(), request)
if err != nil {
glog.Errorf("DownloadToLocal file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO
}
entry = resp.Entry
file.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, resp.Entry))
file.dirtyMetadata = false
return nil
})
return entry, err
}

View File

@@ -33,11 +33,12 @@ type FileHandle struct {
Uid uint32 // user ID of process making request
Gid uint32 // group ID of process making request
writeOnly bool
isDeleted bool
}
func newFileHandle(file *File, uid, gid uint32, writeOnly bool) *FileHandle {
fh := &FileHandle{
f: file,
f: file,
// dirtyPages: newContinuousDirtyPages(file, writeOnly),
dirtyPages: newTempFileDirtyPages(file, writeOnly),
Uid: uid,
@@ -113,6 +114,16 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
return 0, io.EOF
}
if entry.IsInRemoteOnly() {
glog.V(4).Infof("download remote entry %s", fh.f.fullpath())
newEntry, err := fh.f.downloadRemoteEntry(entry)
if err != nil {
glog.V(1).Infof("download remote entry %s: %v", fh.f.fullpath(), err)
return 0, err
}
entry = newEntry
}
fileSize := int64(filer.FileSize(entry))
fileFullPath := fh.f.fullpath()
@@ -129,7 +140,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
var chunkResolveErr error
if fh.entryViewCache == nil {
fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks)
fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks, 0, math.MaxInt64)
if chunkResolveErr != nil {
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
}
@@ -222,6 +233,11 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
glog.V(4).Infof("Flush %v fh %d", fh.f.fullpath(), fh.handle)
if fh.isDeleted {
glog.V(4).Infof("Flush %v fh %d skip deleted", fh.f.fullpath(), fh.handle)
return nil
}
fh.Lock()
defer fh.Unlock()

View File

@@ -3,14 +3,12 @@ package meta_cache
import (
"context"
"fmt"
"os"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/leveldb"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
"os"
)
// need to have logic similar to FilerStoreWrapper
@@ -18,7 +16,7 @@ import (
type MetaCache struct {
localStore filer.VirtualFilerStore
sync.RWMutex
// sync.RWMutex
visitedBoundary *bounded_tree.BoundedTree
uidGidMapper *UidGidMapper
invalidateFunc func(util.FullPath)
@@ -54,8 +52,8 @@ func openMetaStore(dbFolder string) filer.VirtualFilerStore {
}
func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error {
mc.Lock()
defer mc.Unlock()
//mc.Lock()
//defer mc.Unlock()
return mc.doInsertEntry(ctx, entry)
}
@@ -64,8 +62,8 @@ func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) erro
}
func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error {
mc.Lock()
defer mc.Unlock()
//mc.Lock()
//defer mc.Unlock()
oldDir, _ := oldPath.DirAndName()
if mc.visitedBoundary.HasVisited(util.FullPath(oldDir)) {
@@ -97,14 +95,14 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
}
func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error {
mc.Lock()
defer mc.Unlock()
//mc.Lock()
//defer mc.Unlock()
return mc.localStore.UpdateEntry(ctx, entry)
}
func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) {
mc.RLock()
defer mc.RUnlock()
//mc.RLock()
//defer mc.RUnlock()
entry, err = mc.localStore.FindEntry(ctx, fp)
if err != nil {
return nil, err
@@ -114,14 +112,14 @@ func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *fi
}
func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
mc.Lock()
defer mc.Unlock()
//mc.Lock()
//defer mc.Unlock()
return mc.localStore.DeleteEntry(ctx, fp)
}
func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) error {
mc.RLock()
defer mc.RUnlock()
//mc.RLock()
//defer mc.RUnlock()
if !mc.visitedBoundary.HasVisited(dirPath) {
return fmt.Errorf("unsynchronized dir: %v", dirPath)
@@ -138,8 +136,8 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full
}
func (mc *MetaCache) Shutdown() {
mc.Lock()
defer mc.Unlock()
//mc.Lock()
//defer mc.Unlock()
mc.localStore.Shutdown()
}

View File

@@ -43,5 +43,5 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
}
func IsHiddenSystemEntry(dir, name string) bool {
return dir == "/" && name == "topics"
return dir == "/" && (name == "topics" || name == "etc")
}

View File

@@ -2,12 +2,9 @@ package meta_cache
import (
"context"
"fmt"
"io"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -40,47 +37,30 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
newEntry = filer.FromPbEntry(dir, message.NewEntry)
}
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
if err == nil && message.OldEntry != nil && message.NewEntry != nil {
key := util.NewFullPath(dir, message.NewEntry.Name)
mc.invalidateFunc(key)
if err == nil {
if message.OldEntry != nil && message.NewEntry != nil {
if message.OldEntry.Name == message.NewEntry.Name {
// no need to invalidate
} else {
oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name)
mc.invalidateFunc(oldKey)
newKey := util.NewFullPath(dir, message.NewEntry.Name)
mc.invalidateFunc(newKey)
}
} else if message.OldEntry == nil && message.NewEntry != nil {
// no need to invaalidate
} else if message.OldEntry != nil && message.NewEntry == nil {
oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name)
mc.invalidateFunc(oldKey)
}
}
return err
}
for {
err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "mount",
PathPrefix: dir,
SinceNs: lastTsNs,
Signature: selfSignature,
})
if err != nil {
return fmt.Errorf("subscribe: %v", err)
}
return util.Retry("followMetaUpdates", func() error {
return pb.WithFilerClientFollowMetadata(client, "mount", dir, lastTsNs, selfSignature, processEventFn, true)
})
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if err := processEventFn(resp); err != nil {
glog.Fatalf("process %v: %v", resp, err)
}
lastTsNs = resp.TsNs
}
})
if err != nil {
glog.Errorf("subscribing filer meta change: %v", err)
}
time.Sleep(time.Second)
}
}

View File

@@ -3,9 +3,6 @@ package filesys
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"math"
"math/rand"
"os"
@@ -14,6 +11,10 @@ import (
"sync"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/util/grace"
@@ -46,11 +47,12 @@ type Option struct {
DataCenter string
Umask os.FileMode
MountUid uint32
MountGid uint32
MountMode os.FileMode
MountCtime time.Time
MountMtime time.Time
MountUid uint32
MountGid uint32
MountMode os.FileMode
MountCtime time.Time
MountMtime time.Time
MountParentInode uint64
VolumeServerAccess string // how to access volume servers
Cipher bool // whether encrypt data on volume server
@@ -123,8 +125,6 @@ func NewSeaweedFileSystem(option *Option) *WFS {
glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err)
}
})
startTime := time.Now()
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
grace.OnInterrupt(func() {
wfs.metaCache.Shutdown()
})
@@ -139,6 +139,11 @@ func NewSeaweedFileSystem(option *Option) *WFS {
return wfs
}
func (wfs *WFS) StartBackgroundTasks() {
startTime := time.Now()
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
}
func (wfs *WFS) Root() (fs.Node, error) {
return wfs.root, nil
}

View File

@@ -113,6 +113,21 @@ func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err err
fullpath := util.NewFullPath(dir, name)
// glog.V(3).Infof("read entry cache miss %s", fullpath)
// return a valid entry for the mount root
if string(fullpath) == wfs.option.FilerMountRootPath {
return &filer_pb.Entry{
Name: wfs.option.FilerMountRootPath,
IsDirectory: true,
Attributes: &filer_pb.FuseAttributes{
Mtime: wfs.option.MountMtime.Unix(),
FileMode: uint32(wfs.option.MountMode),
Uid: wfs.option.MountUid,
Gid: wfs.option.MountGid,
Crtime: wfs.option.MountCtime.Unix(),
},
}, nil
}
// read from async meta cache
meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))
cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath)

View File

@@ -1,55 +1,13 @@
package iamapi
import (
"bytes"
"encoding/xml"
"fmt"
"strconv"
"net/http"
"net/url"
"time"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/aws/aws-sdk-go/service/iam"
"net/http"
)
type mimeType string
const (
mimeNone mimeType = ""
mimeXML mimeType = "application/xml"
)
func setCommonHeaders(w http.ResponseWriter) {
w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
w.Header().Set("Accept-Ranges", "bytes")
}
// Encodes the response headers into XML format.
func encodeResponse(response interface{}) []byte {
var bytesBuffer bytes.Buffer
bytesBuffer.WriteString(xml.Header)
e := xml.NewEncoder(&bytesBuffer)
e.Encode(response)
return bytesBuffer.Bytes()
}
// If none of the http routes match respond with MethodNotAllowed
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL)
}
func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) {
apiError := s3err.GetAPIError(errorCode)
errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) {
errCode := err.Error()
errorResp := ErrorResponse{}
@@ -64,42 +22,10 @@ func writeIamErrorResponse(w http.ResponseWriter, err error, object string, valu
case iam.ErrCodeNoSuchEntityException:
msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value)
errorResp.Error.Message = &msg
writeResponse(w, http.StatusNotFound, encodeResponse(errorResp), mimeXML)
s3err.WriteXMLResponse(w, http.StatusNotFound, errorResp)
case iam.ErrCodeServiceFailureException:
writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
s3err.WriteXMLResponse(w, http.StatusInternalServerError, errorResp)
default:
writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
s3err.WriteXMLResponse(w, http.StatusInternalServerError, errorResp)
}
}
func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse {
return s3err.RESTErrorResponse{
Code: err.Code,
Message: err.Description,
Resource: resource,
RequestID: fmt.Sprintf("%d", time.Now().UnixNano()),
}
}
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
setCommonHeaders(w)
if response != nil {
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
}
if mType != mimeNone {
w.Header().Set("Content-Type", string(mType))
}
w.WriteHeader(statusCode)
if response != nil {
glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response))
_, err := w.Write(response)
if err != nil {
glog.V(0).Infof("write err: %v", err)
}
w.(http.Flusher).Flush()
}
}
func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
writeResponse(w, http.StatusOK, response, mimeXML)
}

View File

@@ -362,7 +362,7 @@ func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, valu
func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
return
}
values := r.PostForm
@@ -370,7 +370,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
s3cfgLock.RLock()
s3cfg := &iam_pb.S3ApiConfiguration{}
if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil {
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
return
}
s3cfgLock.RUnlock()
@@ -411,14 +411,14 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
response, err = iama.CreatePolicy(s3cfg, values)
if err != nil {
glog.Errorf("CreatePolicy: %+v", err)
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
return
}
case "PutUserPolicy":
response, err = iama.PutUserPolicy(s3cfg, values)
if err != nil {
glog.Errorf("PutUserPolicy: %+v", err)
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
return
}
case "GetUserPolicy":
@@ -437,7 +437,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
errorResponse := ErrorResponse{}
errorResponse.Error.Code = &errNotImplemented.Code
errorResponse.Error.Message = &errNotImplemented.Description
writeResponse(w, errNotImplemented.HTTPStatusCode, encodeResponse(errorResponse), mimeXML)
s3err.WriteXMLResponse(w, errNotImplemented.HTTPStatusCode, errorResponse)
return
}
if changed {
@@ -449,5 +449,5 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
return
}
}
writeSuccessResponseXML(w, encodeResponse(response))
s3err.WriteXMLResponse(w, http.StatusOK, response)
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
"github.com/chrislusf/seaweedfs/weed/s3api"
. "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"github.com/gorilla/mux"
"google.golang.org/grpc"
@@ -71,7 +72,7 @@ func (iama *IamApiServer) registerRouter(router *mux.Router) {
apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
//
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
apiRouter.NotFoundHandler = http.HandlerFunc(s3err.NotFoundHandler)
}
func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
@@ -95,8 +96,8 @@ func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfigurat
func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
buf := bytes.Buffer{}
if err := filer.S3ConfigurationToText(&buf, s3cfg); err != nil {
return fmt.Errorf("S3ConfigurationToText: %s", err)
if err := filer.ProtoToText(&buf, s3cfg); err != nil {
return fmt.Errorf("ProtoToText: %s", err)
}
return pb.WithGrpcFilerClient(
iam.option.FilerGrpcAddress,

View File

@@ -11,6 +11,8 @@ import (
"github.com/disintegration/imaging"
"github.com/chrislusf/seaweedfs/weed/glog"
_ "golang.org/x/image/webp"
)
func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) {
@@ -47,6 +49,9 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
jpeg.Encode(&buf, dstImage, nil)
case ".gif":
gif.Encode(&buf, dstImage, nil)
case ".webp":
// Webp does not have golang encoder.
png.Encode(&buf, dstImage)
}
return bytes.NewReader(buf.Bytes()), dstImage.Bounds().Dx(), dstImage.Bounds().Dy()
} else {

View File

@@ -0,0 +1,23 @@
package images
import (
"bytes"
"io/ioutil"
"os"
"testing"
)
func TestResizing(t *testing.T) {
fname := "sample2.webp"
dat, _ := ioutil.ReadFile(fname)
resized, _, _ := Resized(".webp", bytes.NewReader(dat), 100, 30, "")
buf := new(bytes.Buffer)
buf.ReadFrom(resized)
ioutil.WriteFile("resized1.png", buf.Bytes(), 0644)
os.Remove("resized1.png")
}

BIN
weed/images/sample2.webp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 208 KiB

View File

@@ -116,7 +116,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
lastReadTime = time.Unix(0, processedTsNs)
}
lastReadTime, err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool {
lastReadTime, err = lock.logBuffer.LoopProcessLogData("broker", lastReadTime, func() bool {
lock.Mutex.Lock()
lock.cond.Wait()
lock.Mutex.Unlock()

View File

@@ -68,7 +68,7 @@ func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topi
glog.V(0).Infof("log write failed %s: %v", targetFile, err)
}
}
logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() {
logBuffer := log_buffer.NewLogBuffer("broker", time.Minute, flushFn, func() {
tl.cond.Broadcast()
})

View File

@@ -73,6 +73,10 @@ func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest
ret.Error = resp.Error
ret.Auth = security.EncodedJwt(resp.Auth)
if resp.Error != "" {
return fmt.Errorf("assignRequest: %v", resp.Error)
}
return nil
})

View File

@@ -100,7 +100,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str
go func(server string, fidList []string) {
defer wg.Done()
if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, true); deleteErr != nil {
if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, false); deleteErr != nil {
err = deleteErr
} else if deleteResults != nil {
resultChan <- deleteResults

View File

@@ -18,7 +18,7 @@ type MockClient struct {
}
func (m *MockClient) Do(req *http.Request) (*http.Response, error) {
n, originalSize, _, err := needle.CreateNeedleFromRequest(req, false, 1024*1024)
n, originalSize, _, err := needle.CreateNeedleFromRequest(req, false, 1024*1024, &bytes.Buffer{})
if m.needleHandling != nil {
m.needleHandling(n, originalSize, err)
}

View File

@@ -71,7 +71,10 @@ func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, v
n := new(needle.Needle)
n.ParseNeedleHeader(needleHeader)
n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion)
err = n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion)
if err != nil {
return err
}
err = fn(n)

View File

@@ -67,6 +67,8 @@ service SeaweedFiler {
rpc KvPut (KvPutRequest) returns (KvPutResponse) {
}
rpc DownloadToLocal (DownloadToLocalRequest) returns (DownloadToLocalResponse) {
}
}
//////////////////////////////////////////////////
@@ -92,6 +94,13 @@ message ListEntriesResponse {
Entry entry = 1;
}
message RemoteEntry {
string storage_name = 1;
int64 local_mtime = 2;
string remote_e_tag = 3;
int64 remote_mtime = 4;
int64 remote_size = 5;
}
message Entry {
string name = 1;
bool is_directory = 2;
@@ -101,6 +110,8 @@ message Entry {
bytes hard_link_id = 7;
int32 hard_link_counter = 8; // only exists in hard link meta data
bytes content = 9; // if not empty, the file content
RemoteEntry remote_entry = 10;
}
message FullEntry {
@@ -208,6 +219,7 @@ message AtomicRenameEntryRequest {
string old_name = 2;
string new_directory = 3;
string new_name = 4;
repeated int32 signatures = 5;
}
message AtomicRenameEntryResponse {
@@ -292,6 +304,7 @@ message GetFilerConfigurationResponse {
int32 signature = 8;
string metrics_address = 9;
int32 metrics_interval_sec = 10;
string version = 11;
}
message SubscribeMetadataRequest {
@@ -334,7 +347,9 @@ message LocateBrokerResponse {
repeated Resource resources = 2;
}
/////////////////////////
// Key-Value operations
/////////////////////////
message KvGetRequest {
bytes key = 1;
}
@@ -350,7 +365,9 @@ message KvPutResponse {
string error = 1;
}
/////////////////////////
// path-based configurations
/////////////////////////
message FilerConf {
int32 version = 1;
message PathConf {
@@ -361,6 +378,35 @@ message FilerConf {
string disk_type = 5;
bool fsync = 6;
uint32 volume_growth_count = 7;
bool read_only = 8;
}
repeated PathConf locations = 2;
}
/////////////////////////
// Remote Storage related
/////////////////////////
message RemoteConf {
string type = 1;
string name = 2;
string s3_access_key = 4;
string s3_secret_key = 5;
string s3_region = 6;
string s3_endpoint = 7;
}
message RemoteStorageMapping {
map<string,RemoteStorageLocation> mappings = 1;
}
message RemoteStorageLocation {
string name = 1;
string bucket = 2;
string path = 3;
}
message DownloadToLocalRequest {
string directory = 1;
string name = 2;
}
message DownloadToLocalResponse {
Entry entry = 1;
}

File diff suppressed because it is too large Load Diff

View File

@@ -204,39 +204,42 @@ func Touch(filerClient FilerClient, parentDirectoryPath string, entryName string
func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error {
return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
entry := &Entry{
Name: dirName,
IsDirectory: true,
Attributes: &FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
FileMode: uint32(0777 | os.ModeDir),
Uid: OS_UID,
Gid: OS_GID,
},
}
if fn != nil {
fn(entry)
}
request := &CreateEntryRequest{
Directory: parentDirectoryPath,
Entry: entry,
}
glog.V(1).Infof("mkdir: %v", request)
if err := CreateEntry(client, request); err != nil {
glog.V(0).Infof("mkdir %v: %v", request, err)
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
}
return nil
return DoMkdir(client, parentDirectoryPath, dirName, fn)
})
}
func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk) error {
func DoMkdir(client SeaweedFilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error {
entry := &Entry{
Name: dirName,
IsDirectory: true,
Attributes: &FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
FileMode: uint32(0777 | os.ModeDir),
Uid: OS_UID,
Gid: OS_GID,
},
}
if fn != nil {
fn(entry)
}
request := &CreateEntryRequest{
Directory: parentDirectoryPath,
Entry: entry,
}
glog.V(1).Infof("mkdir: %v", request)
if err := CreateEntry(client, request); err != nil {
glog.V(0).Infof("mkdir %v: %v", request, err)
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
}
return nil
}
func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk, fn func(entry *Entry)) error {
return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
entry := &Entry{
@@ -252,6 +255,10 @@ func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string
Chunks: chunks,
}
if fn != nil {
fn(entry)
}
request := &CreateEntryRequest{
Directory: parentDirectoryPath,
Entry: entry,
@@ -269,31 +276,33 @@ func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string
func Remove(filerClient FilerClient, parentDirectoryPath, name string, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster bool, signatures []int32) error {
return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
deleteEntryRequest := &DeleteEntryRequest{
Directory: parentDirectoryPath,
Name: name,
IsDeleteData: isDeleteData,
IsRecursive: isRecursive,
IgnoreRecursiveError: ignoreRecursiveErr,
IsFromOtherCluster: isFromOtherCluster,
Signatures: signatures,
}
if resp, err := client.DeleteEntry(context.Background(), deleteEntryRequest); err != nil {
if strings.Contains(err.Error(), ErrNotFound.Error()) {
return nil
}
return err
} else {
if resp.Error != "" {
if strings.Contains(resp.Error, ErrNotFound.Error()) {
return nil
}
return errors.New(resp.Error)
}
}
return nil
return DoRemove(client, parentDirectoryPath, name, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster, signatures)
})
}
func DoRemove(client SeaweedFilerClient, parentDirectoryPath string, name string, isDeleteData bool, isRecursive bool, ignoreRecursiveErr bool, isFromOtherCluster bool, signatures []int32) error {
deleteEntryRequest := &DeleteEntryRequest{
Directory: parentDirectoryPath,
Name: name,
IsDeleteData: isDeleteData,
IsRecursive: isRecursive,
IgnoreRecursiveError: ignoreRecursiveErr,
IsFromOtherCluster: isFromOtherCluster,
Signatures: signatures,
}
if resp, err := client.DeleteEntry(context.Background(), deleteEntryRequest); err != nil {
if strings.Contains(err.Error(), ErrNotFound.Error()) {
return nil
}
return err
} else {
if resp.Error != "" {
if strings.Contains(resp.Error, ErrNotFound.Error()) {
return nil
}
return errors.New(resp.Error)
}
}
return nil
}

View File

@@ -12,6 +12,10 @@ import (
"github.com/viant/ptrie"
)
func (entry *Entry) IsInRemoteOnly() bool {
return len(entry.Chunks) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0
}
func ToFileIdObject(fileIdStr string) (*FileId, error) {
t, err := needle.ParseFileIdFromString(fileIdStr)
if err != nil {
@@ -147,3 +151,7 @@ func (fp *FilerConf_PathConf) Key() interface{} {
key, _ := proto.Marshal(fp)
return string(key)
}
func (fp *RemoteStorageLocation) Key() interface{} {
key, _ := proto.Marshal(fp)
return string(key)
}

94
weed/pb/filer_pb_tail.go Normal file
View File

@@ -0,0 +1,94 @@
package pb
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"google.golang.org/grpc"
"io"
"time"
)
type ProcessMetadataFunc func(resp *filer_pb.SubscribeMetadataResponse) error
func FollowMetadata(filerAddress string, grpcDialOption grpc.DialOption,
clientName string, pathPrefix string, lastTsNs int64, selfSignature int32,
processEventFn ProcessMetadataFunc, fatalOnError bool) error {
err := WithFilerClient(filerAddress, grpcDialOption, makeFunc(
clientName, pathPrefix, lastTsNs, selfSignature, processEventFn, fatalOnError))
if err != nil {
return fmt.Errorf("subscribing filer meta change: %v", err)
}
return err
}
func WithFilerClientFollowMetadata(filerClient filer_pb.FilerClient,
clientName string, pathPrefix string, lastTsNs int64, selfSignature int32,
processEventFn ProcessMetadataFunc, fatalOnError bool) error {
err := filerClient.WithFilerClient(makeFunc(
clientName, pathPrefix, lastTsNs, selfSignature, processEventFn, fatalOnError))
if err != nil {
return fmt.Errorf("subscribing filer meta change: %v", err)
}
return nil
}
func makeFunc(clientName string, pathPrefix string, lastTsNs int64, selfSignature int32,
processEventFn ProcessMetadataFunc, fatalOnError bool) func(client filer_pb.SeaweedFilerClient) error {
return func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: clientName,
PathPrefix: pathPrefix,
SinceNs: lastTsNs,
Signature: selfSignature,
})
if err != nil {
return fmt.Errorf("subscribe: %v", err)
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if err := processEventFn(resp); err != nil {
if fatalOnError {
glog.Fatalf("process %v: %v", resp, err)
} else {
glog.Errorf("process %v: %v", resp, err)
}
}
lastTsNs = resp.TsNs
}
}
}
func AddOffsetFunc(processEventFn ProcessMetadataFunc, offsetInterval time.Duration, offsetFunc func(counter int64, offset int64) error) ProcessMetadataFunc {
var counter int64
var lastWriteTime time.Time
return func(resp *filer_pb.SubscribeMetadataResponse) error {
if err := processEventFn(resp); err != nil {
return err
}
counter++
if lastWriteTime.Add(offsetInterval).Before(time.Now()) {
counter = 0
lastWriteTime = time.Now()
if err := offsetFunc(counter, resp.TsNs); err != nil {
return err
}
}
return nil
}
}

View File

@@ -31,7 +31,8 @@ var (
type versionedGrpcClient struct {
*grpc.ClientConn
version int
version int
errCount int
}
func init() {
@@ -49,7 +50,7 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
}),
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: 60 * time.Second, // min time a client should wait before sending a ping
PermitWithoutStream: false,
PermitWithoutStream: true,
}),
grpc.MaxRecvMsgSize(Max_Message_Size),
grpc.MaxSendMsgSize(Max_Message_Size),
@@ -75,7 +76,7 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 30 * time.Second, // client ping server if no activity for this long
Timeout: 20 * time.Second,
PermitWithoutStream: false,
PermitWithoutStream: true,
}))
for _, opt := range opts {
if opt != nil {
@@ -103,6 +104,7 @@ func getOrCreateConnection(address string, opts ...grpc.DialOption) (*versionedG
vgc := &versionedGrpcClient{
grpcConnection,
rand.Int(),
0,
}
grpcClients[address] = vgc
@@ -116,15 +118,20 @@ func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts
return fmt.Errorf("getOrCreateConnection %s: %v", address, err)
}
executionErr := fn(vgc.ClientConn)
if executionErr != nil && strings.Contains(executionErr.Error(), "transport") {
grpcClientsLock.Lock()
if t, ok := grpcClients[address]; ok {
if t.version == vgc.version {
vgc.Close()
delete(grpcClients, address)
if executionErr != nil {
vgc.errCount++
if vgc.errCount > 3 ||
strings.Contains(executionErr.Error(), "transport") ||
strings.Contains(executionErr.Error(), "connection closed") {
grpcClientsLock.Lock()
if t, ok := grpcClients[address]; ok {
if t.version == vgc.version {
vgc.Close()
delete(grpcClients, address)
}
}
grpcClientsLock.Unlock()
}
grpcClientsLock.Unlock()
}
return executionErr

View File

@@ -215,13 +215,13 @@ message CollectionDeleteResponse {
//
message DiskInfo {
string type = 1;
uint64 volume_count = 2;
uint64 max_volume_count = 3;
uint64 free_volume_count = 4;
uint64 active_volume_count = 5;
int64 volume_count = 2;
int64 max_volume_count = 3;
int64 free_volume_count = 4;
int64 active_volume_count = 5;
repeated VolumeInformationMessage volume_infos = 6;
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
uint64 remote_volume_count = 8;
int64 remote_volume_count = 8;
}
message DataNodeInfo {
string id = 1;

View File

@@ -1628,13 +1628,13 @@ type DiskInfo struct {
unknownFields protoimpl.UnknownFields
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"`
MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"`
FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"`
ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"`
VolumeCount int64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"`
MaxVolumeCount int64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"`
FreeVolumeCount int64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"`
ActiveVolumeCount int64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"`
VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos,proto3" json:"volume_infos,omitempty"`
EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"`
RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"`
RemoteVolumeCount int64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"`
}
func (x *DiskInfo) Reset() {
@@ -1676,28 +1676,28 @@ func (x *DiskInfo) GetType() string {
return ""
}
func (x *DiskInfo) GetVolumeCount() uint64 {
func (x *DiskInfo) GetVolumeCount() int64 {
if x != nil {
return x.VolumeCount
}
return 0
}
func (x *DiskInfo) GetMaxVolumeCount() uint64 {
func (x *DiskInfo) GetMaxVolumeCount() int64 {
if x != nil {
return x.MaxVolumeCount
}
return 0
}
func (x *DiskInfo) GetFreeVolumeCount() uint64 {
func (x *DiskInfo) GetFreeVolumeCount() int64 {
if x != nil {
return x.FreeVolumeCount
}
return 0
}
func (x *DiskInfo) GetActiveVolumeCount() uint64 {
func (x *DiskInfo) GetActiveVolumeCount() int64 {
if x != nil {
return x.ActiveVolumeCount
}
@@ -1718,7 +1718,7 @@ func (x *DiskInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage {
return nil
}
func (x *DiskInfo) GetRemoteVolumeCount() uint64 {
func (x *DiskInfo) GetRemoteVolumeCount() int64 {
if x != nil {
return x.RemoteVolumeCount
}
@@ -3134,15 +3134,15 @@ var file_master_proto_rawDesc = []byte{
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x03, 0x0a, 0x08, 0x44, 0x69, 0x73,
0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10,
0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76,
0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75,
0x03, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75,
0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52,
0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52,
0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75,
0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x66,
0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
@@ -3155,7 +3155,7 @@ var file_master_proto_rawDesc = []byte{
0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0c,
0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13,
0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74,
0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74,
0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xb7, 0x01, 0x0a,
0x0c, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a,
0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x44, 0x0a,

View File

@@ -93,6 +93,10 @@ service VolumeServer {
rpc VolumeServerLeave (VolumeServerLeaveRequest) returns (VolumeServerLeaveResponse) {
}
// remote storage
rpc FetchAndWriteNeedle (FetchAndWriteNeedleRequest) returns (FetchAndWriteNeedleResponse) {
}
// <experimental> query
rpc Query (QueryRequest) returns (stream QueriedStripe) {
}
@@ -426,6 +430,7 @@ message VolumeInfo {
string replication = 3;
}
// tiered storage
message VolumeTierMoveDatToRemoteRequest {
uint32 volume_id = 1;
string collection = 2;
@@ -460,6 +465,26 @@ message VolumeServerLeaveRequest {
message VolumeServerLeaveResponse {
}
// remote storage
message FetchAndWriteNeedleRequest {
uint32 volume_id = 1;
uint64 needle_id = 2;
uint32 cookie = 3;
int64 offset = 4;
int64 size = 5;
// remote conf
string remote_type = 6;
string remote_name = 7;
string s3_access_key = 8;
string s3_secret_key = 9;
string s3_region = 10;
string s3_endpoint = 11;
string remote_bucket = 12;
string remote_path = 13;
}
message FetchAndWriteNeedleResponse {
}
// select on volume servers
message QueryRequest {
repeated string selections = 1;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,76 @@
package remote_storage
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"io"
"strings"
"sync"
)
func ParseLocation(remote string) (loc *filer_pb.RemoteStorageLocation) {
loc = &filer_pb.RemoteStorageLocation{}
if strings.HasSuffix(string(remote), "/") {
remote = remote[:len(remote)-1]
}
parts := strings.SplitN(string(remote), "/", 3)
if len(parts) >= 1 {
loc.Name = parts[0]
}
if len(parts) >= 2 {
loc.Bucket = parts[1]
}
loc.Path = string(remote[len(loc.Name)+1+len(loc.Bucket):])
if loc.Path == "" {
loc.Path = "/"
}
return
}
type VisitFunc func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error
type RemoteStorageClient interface {
Traverse(loc *filer_pb.RemoteStorageLocation, visitFn VisitFunc) error
ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error)
WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error)
WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error)
UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error)
DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error)
}
type RemoteStorageClientMaker interface {
Make(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error)
}
var (
RemoteStorageClientMakers = make(map[string]RemoteStorageClientMaker)
remoteStorageClients = make(map[string]RemoteStorageClient)
remoteStorageClientsLock sync.Mutex
)
func makeRemoteStorageClient(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error) {
maker, found := RemoteStorageClientMakers[remoteConf.Type]
if !found {
return nil, fmt.Errorf("remote storage type %s not found", remoteConf.Type)
}
return maker.Make(remoteConf)
}
func GetRemoteStorage(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error) {
remoteStorageClientsLock.Lock()
defer remoteStorageClientsLock.Unlock()
existingRemoteStorageClient, found := remoteStorageClients[remoteConf.Name]
if found {
return existingRemoteStorageClient, nil
}
newRemoteStorageClient, err := makeRemoteStorageClient(remoteConf)
if err != nil {
return nil, fmt.Errorf("make remote storage client %s: %v", remoteConf.Name, err)
}
remoteStorageClients[remoteConf.Name] = newRemoteStorageClient
return newRemoteStorageClient, nil
}

View File

@@ -0,0 +1,219 @@
package s3
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util"
"io"
)
func init() {
remote_storage.RemoteStorageClientMakers["s3"] = new(s3RemoteStorageMaker)
}
type s3RemoteStorageMaker struct{}
func (s s3RemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
client := &s3RemoteStorageClient{
conf: conf,
}
config := &aws.Config{
Region: aws.String(conf.S3Region),
Endpoint: aws.String(conf.S3Endpoint),
S3ForcePathStyle: aws.Bool(true),
}
if conf.S3AccessKey != "" && conf.S3SecretKey != "" {
config.Credentials = credentials.NewStaticCredentials(conf.S3AccessKey, conf.S3SecretKey, "")
}
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create aws session: %v", err)
}
client.conn = s3.New(sess)
return client, nil
}
type s3RemoteStorageClient struct {
conf *filer_pb.RemoteConf
conn s3iface.S3API
}
var _ = remote_storage.RemoteStorageClient(&s3RemoteStorageClient{})
func (s *s3RemoteStorageClient) Traverse(remote *filer_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
pathKey := remote.Path[1:]
listInput := &s3.ListObjectsV2Input{
Bucket: aws.String(remote.Bucket),
ContinuationToken: nil,
Delimiter: nil, // not aws.String("/"), iterate through all entries
EncodingType: nil,
ExpectedBucketOwner: nil,
FetchOwner: nil,
MaxKeys: nil, // aws.Int64(1000),
Prefix: aws.String(pathKey),
RequestPayer: nil,
StartAfter: nil,
}
isLastPage := false
for !isLastPage && err == nil {
listErr := s.conn.ListObjectsV2Pages(listInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, content := range page.Contents {
key := *content.Key
if len(pathKey) == 0 {
key = "/" + key
} else {
key = key[len(pathKey):]
}
dir, name := util.FullPath(key).DirAndName()
if err := visitFn(dir, name, false, &filer_pb.RemoteEntry{
RemoteMtime: (*content.LastModified).Unix(),
RemoteSize: *content.Size,
RemoteETag: *content.ETag,
StorageName: s.conf.Name,
}); err != nil {
return false
}
}
listInput.ContinuationToken = page.NextContinuationToken
isLastPage = lastPage
return true
})
if listErr != nil {
err = fmt.Errorf("list %v: %v", remote, listErr)
}
}
return
}
func (s *s3RemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
downloader := s3manager.NewDownloaderWithClient(s.conn, func(u *s3manager.Downloader) {
u.PartSize = int64(4 * 1024 * 1024)
u.Concurrency = 1
})
dataSlice := make([]byte, int(size))
writerAt := aws.NewWriteAtBuffer(dataSlice)
_, err = downloader.Download(writerAt, &s3.GetObjectInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
Range: aws.String(fmt.Sprintf("bytes=%d-%d", offset, offset+size-1)),
})
if err != nil {
return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err)
}
return writerAt.Bytes(), nil
}
func (s *s3RemoteStorageClient) WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
return nil
}
func (s *s3RemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
fileSize := int64(filer.FileSize(entry))
partSize := int64(8 * 1024 * 1024) // The minimum/default allowed part size is 5MB
for partSize*1000 < fileSize {
partSize *= 4
}
// Create an uploader with the session and custom options
uploader := s3manager.NewUploaderWithClient(s.conn, func(u *s3manager.Uploader) {
u.PartSize = partSize
u.Concurrency = 5
})
// process tagging
tags := ""
for k, v := range entry.Extended {
if len(tags) > 0 {
tags = tags + "&"
}
tags = tags + k + "=" + string(v)
}
// Upload the file to S3.
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
Body: reader,
ACL: aws.String("private"),
ServerSideEncryption: aws.String("AES256"),
StorageClass: aws.String("STANDARD_IA"),
Tagging: aws.String(tags),
})
//in case it fails to upload
if err != nil {
return nil, fmt.Errorf("upload to s3 %s/%s%s: %v", loc.Name, loc.Bucket, loc.Path, err)
}
// read back the remote entry
return s.readFileRemoteEntry(loc)
}
func toTagging(attributes map[string][]byte) *s3.Tagging {
tagging := &s3.Tagging{}
for k, v := range attributes {
tagging.TagSet = append(tagging.TagSet, &s3.Tag{
Key: aws.String(k),
Value: aws.String(string(v)),
})
}
return tagging
}
func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
resp, err := s.conn.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
})
if err != nil {
return nil, err
}
return &filer_pb.RemoteEntry{
RemoteMtime: resp.LastModified.Unix(),
RemoteSize: *resp.ContentLength,
RemoteETag: *resp.ETag,
StorageName: s.conf.Name,
}, nil
}
func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
tagging := toTagging(entry.Extended)
if len(tagging.TagSet) > 0 {
_, err = s.conn.PutObjectTagging(&s3.PutObjectTaggingInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
Tagging: toTagging(entry.Extended),
})
} else {
_, err = s.conn.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
})
}
return
}
func (s *s3RemoteStorageClient) DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error) {
_, err = s.conn.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
})
return
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"math"
"google.golang.org/grpc"
@@ -133,6 +134,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
Attributes: entry.Attributes,
Chunks: replicatedChunks,
Content: entry.Content,
RemoteEntry: entry.RemoteEntry,
},
IsFromOtherCluster: true,
Signatures: signatures,
@@ -227,11 +229,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
}
func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks, 0, math.MaxInt64)
if aErr != nil {
return nil, nil, aErr
}
bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks, 0, math.MaxInt64)
if bErr != nil {
return nil, nil, bErr
}

View File

@@ -4,6 +4,8 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
@@ -51,8 +53,12 @@ func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManag
return iam
}
func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) error {
content, err := filer.ReadContent(option.Filer, filer.IamConfigDirecotry, filer.IamIdentityFile)
func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) (err error) {
var content []byte
err = pb.WithFilerClient(option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
content, err = filer.ReadInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile)
return err
})
if err != nil {
return fmt.Errorf("read S3 config: %v", err)
}
@@ -150,7 +156,7 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
f(w, r)
return
}
writeErrorResponse(w, errCode, r.URL)
s3err.WriteErrorResponse(w, errCode, r)
}
}
@@ -190,7 +196,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
return identity, s3Err
}
glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions)
glog.V(3).Infof("user name: %v actions: %v, action: %v", identity.Name, identity.Actions, action)
bucket, _ := getBucketAndObject(r)

View File

@@ -1,13 +1,11 @@
package s3api
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"io"
"time"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, lastTsNs int64) error {
@@ -34,37 +32,8 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la
return nil
}
for {
err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: clientName,
PathPrefix: prefix,
SinceNs: lastTsNs,
})
if err != nil {
return fmt.Errorf("subscribe: %v", err)
}
return util.Retry("followIamChanges", func() error {
return pb.WithFilerClientFollowMetadata(s3a, clientName, prefix, lastTsNs, 0, processEventFn, true)
})
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if err := processEventFn(resp); err != nil {
glog.Fatalf("process %v: %v", resp, err)
}
lastTsNs = resp.TsNs
}
})
if err != nil {
glog.Errorf("subscribing filer meta change: %v", err)
}
time.Sleep(time.Second)
}
}

View File

@@ -85,11 +85,17 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr
return nil, "", "", time.Time{}, errCode
}
// Verify if the access key id matches.
_, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
if !found {
return nil, "", "", time.Time{}, s3err.ErrInvalidAccessKeyID
}
bucket, _ := getBucketAndObject(r)
if !identity.canDo("Write", bucket) {
errCode = s3err.ErrAccessDenied
return
}
// Verify if region is valid.
region = signV4Values.Credential.scope.region

View File

@@ -35,6 +35,9 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
entry.Extended = make(map[string][]byte)
}
entry.Extended["key"] = []byte(*input.Key)
for k, v := range input.Metadata {
entry.Extended[k] = []byte(*v)
}
}); err != nil {
glog.Errorf("NewMultipartUpload error: %v", err)
return nil, s3err.ErrInternalError
@@ -68,6 +71,12 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
return nil, s3err.ErrNoSuchUpload
}
pentry, err := s3a.getEntry(s3a.genUploadsFolder(*input.Bucket), *input.UploadId)
if err != nil {
glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, s3err.ErrNoSuchUpload
}
var finalParts []*filer_pb.FileChunk
var offset int64
@@ -103,7 +112,16 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
dirName = dirName[:len(dirName)-1]
}
err = s3a.mkFile(dirName, entryName, finalParts)
err = s3a.mkFile(dirName, entryName, finalParts, func(entry *filer_pb.Entry) {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
for k, v := range pentry.Extended {
if k != "key" {
entry.Extended[k] = v
}
}
})
if err != nil {
glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)

View File

@@ -3,6 +3,7 @@ package s3api
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"testing"
"time"
)
@@ -19,7 +20,7 @@ func TestInitiateMultipartUploadResult(t *testing.T) {
},
}
encoded := string(encodeResponse(response))
encoded := string(s3err.EncodeXMLResponse(response))
if encoded != expected {
t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected)
}
@@ -41,7 +42,7 @@ func TestListPartsResult(t *testing.T) {
},
}
encoded := string(encodeResponse(response))
encoded := string(s3err.EncodeXMLResponse(response))
if encoded != expected {
t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected)
}

View File

@@ -15,9 +15,9 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun
}
func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk, fn func(entry *filer_pb.Entry)) error {
return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks)
return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks, fn)
}

Some files were not shown because too many files have changed in this diff Show More