improve: large file sync throughput for remote.cache and filer.sync (#8676)
* improve large file sync throughput for remote.cache and filer.sync
Three main throughput improvements:
1. Adaptive chunk sizing for remote.cache: targets ~32 chunks per file
instead of always starting at 5MB. A 500MB file now uses ~16MB chunks
(32 chunks) instead of 5MB chunks (100 chunks), reducing per-chunk
overhead (volume assign, gRPC call, needle write) by 3x.
2. Configurable concurrency at every layer:
- remote.cache chunk concurrency: -chunkConcurrency flag (default 8)
- remote.cache S3 download concurrency: -downloadConcurrency flag
(default raised from 1 to 5 per chunk)
- filer.sync chunk concurrency: -chunkConcurrency flag (default 32)
3. S3 multipart download concurrency raised from 1 to 5: the S3 manager
downloader was using Concurrency=1, serializing all part downloads
within each chunk. This alone can 5x per-chunk download speed.
The concurrency values flow through the gRPC request chain:
shell command → CacheRemoteObjectToLocalClusterRequest →
FetchAndWriteNeedleRequest → S3 downloader
Zero values in the request mean "use server defaults", maintaining
full backward compatibility with existing callers.
Ref #8481
* fix: use full maxMB for chunk size cap and remove loop guard
Address review feedback:
- Use full maxMB instead of maxMB/2 for maxChunkSize to avoid
unnecessarily limiting chunk size for very large files.
- Remove chunkSize < maxChunkSize guard from the safety loop so it
can always grow past maxChunkSize when needed to stay under 1000
chunks (e.g., extremely large files with small maxMB).
* address review feedback: help text, validation, naming, docs
- Fix help text for -chunkConcurrency and -downloadConcurrency flags
to say "0 = server default" instead of advertising specific numeric
defaults that could drift from the server implementation.
- Validate chunkConcurrency and downloadConcurrency are within int32
range before narrowing, returning a user-facing error if out of range.
- Rename ReadRemoteErr to readRemoteErr to follow Go naming conventions.
- Add doc comment to SetChunkConcurrency noting it must be called
during initialization before replication goroutines start.
- Replace doubling loop in chunk size safety check with direct
ceil(remoteSize/1000) computation to guarantee the 1000-chunk cap.
* address Copilot review: clamp concurrency, fix chunk count, clarify proto docs
- Use ceiling division for chunk count check to avoid overcounting
when file size is an exact multiple of chunk size.
- Clamp chunkConcurrency (max 1024) and downloadConcurrency (max 1024
at filer, max 64 at volume server) to prevent excessive goroutines.
- Always use ReadFileWithConcurrency when the client supports it,
falling back to the implementation's default when value is 0.
- Clarify proto comments that download_concurrency only applies when
the remote storage client supports it (currently S3).
- Include specific server defaults in help text (e.g., "0 = server
default 8") so users see the actual values in -h output.
* fix data race on executionErr and use %w for error wrapping
- Protect concurrent writes to executionErr in remote.cache worker
goroutines with a sync.Mutex to eliminate the data race.
- Use %w instead of %v in volume_grpc_remote.go error formatting
to preserve the error chain for errors.Is/errors.As callers.
This commit is contained in:
@@ -5,6 +5,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
@@ -37,7 +38,9 @@ func (c *commandRemoteCache) Help() string {
|
||||
remote.cache -dir=/xxx # sync metadata, cache content, and remove deleted files (default)
|
||||
remote.cache -dir=/xxx -cacheContent=false # sync metadata and cleanup only, no caching
|
||||
remote.cache -dir=/xxx -deleteLocalExtra=false # skip removal of local files missing from remote
|
||||
remote.cache -dir=/xxx -concurrent=32 # with custom concurrency
|
||||
remote.cache -dir=/xxx -concurrent=32 # with custom file-level concurrency
|
||||
remote.cache -dir=/xxx -chunkConcurrency=16 # parallel chunk downloads per file (0 = server default 8)
|
||||
remote.cache -dir=/xxx -downloadConcurrency=10 # S3 multipart download concurrency per chunk (0 = server default 5)
|
||||
remote.cache -dir=/xxx -include=*.pdf # only sync PDF files
|
||||
remote.cache -dir=/xxx -exclude=*.tmp # exclude temporary files
|
||||
remote.cache -dir=/xxx -dryRun=true # show what would be done without making changes
|
||||
@@ -64,6 +67,8 @@ func (c *commandRemoteCache) Do(args []string, commandEnv *CommandEnv, writer io
|
||||
cache := remoteCacheCommand.Bool("cacheContent", true, "cache file content from remote")
|
||||
deleteLocalExtra := remoteCacheCommand.Bool("deleteLocalExtra", true, "delete local files that no longer exist on remote")
|
||||
concurrency := remoteCacheCommand.Int("concurrent", 16, "concurrent file operations")
|
||||
chunkConcurrency := remoteCacheCommand.Int("chunkConcurrency", 0, "parallel chunk downloads per file (0 = server default 8)")
|
||||
downloadConcurrency := remoteCacheCommand.Int("downloadConcurrency", 0, "S3 multipart download concurrency per chunk (0 = server default 5)")
|
||||
dryRun := remoteCacheCommand.Bool("dryRun", false, "show what would be done without making changes")
|
||||
fileFiler := newFileFilter(remoteCacheCommand)
|
||||
|
||||
@@ -74,6 +79,12 @@ func (c *commandRemoteCache) Do(args []string, commandEnv *CommandEnv, writer io
|
||||
if *dir == "" {
|
||||
return fmt.Errorf("need to specify -dir option")
|
||||
}
|
||||
if *chunkConcurrency < 0 || *chunkConcurrency > math.MaxInt32 {
|
||||
return fmt.Errorf("chunkConcurrency must be between 0 and %d", math.MaxInt32)
|
||||
}
|
||||
if *downloadConcurrency < 0 || *downloadConcurrency > math.MaxInt32 {
|
||||
return fmt.Errorf("downloadConcurrency must be between 0 and %d", math.MaxInt32)
|
||||
}
|
||||
|
||||
mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, detectErr := detectMountInfo(commandEnv, writer, *dir)
|
||||
if detectErr != nil {
|
||||
@@ -82,10 +93,10 @@ func (c *commandRemoteCache) Do(args []string, commandEnv *CommandEnv, writer io
|
||||
}
|
||||
|
||||
// perform comprehensive sync
|
||||
return c.doComprehensiveSync(commandEnv, writer, util.FullPath(localMountedDir), remoteStorageMountedLocation, util.FullPath(*dir), remoteStorageConf, *cache, *deleteLocalExtra, *concurrency, *dryRun, fileFiler)
|
||||
return c.doComprehensiveSync(commandEnv, writer, util.FullPath(localMountedDir), remoteStorageMountedLocation, util.FullPath(*dir), remoteStorageConf, *cache, *deleteLocalExtra, *concurrency, int32(*chunkConcurrency), int32(*downloadConcurrency), *dryRun, fileFiler)
|
||||
}
|
||||
|
||||
func (c *commandRemoteCache) doComprehensiveSync(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, dirToSync util.FullPath, remoteConf *remote_pb.RemoteConf, shouldCache bool, deleteLocalExtra bool, concurrency int, dryRun bool, fileFilter *FileFilter) error {
|
||||
func (c *commandRemoteCache) doComprehensiveSync(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, dirToSync util.FullPath, remoteConf *remote_pb.RemoteConf, shouldCache bool, deleteLocalExtra bool, concurrency int, chunkConcurrency int32, downloadConcurrency int32, dryRun bool, fileFilter *FileFilter) error {
|
||||
|
||||
// visit remote storage
|
||||
remoteStorage, err := remote_storage.GetRemoteStorage(remoteConf)
|
||||
@@ -306,6 +317,7 @@ func (c *commandRemoteCache) doComprehensiveSync(commandEnv *CommandEnv, writer
|
||||
var wg sync.WaitGroup
|
||||
limitedConcurrentExecutor := util.NewLimitedConcurrentExecutor(concurrency)
|
||||
var executionErr error
|
||||
var execErrMu sync.Mutex
|
||||
|
||||
for _, pathToCache := range filesToCache {
|
||||
wg.Add(1)
|
||||
@@ -341,15 +353,16 @@ func (c *commandRemoteCache) doComprehensiveSync(commandEnv *CommandEnv, writer
|
||||
}
|
||||
|
||||
dir, _ := util.FullPath(pathToCacheCopy).DirAndName()
|
||||
remoteLocation := filer.MapFullPathToRemoteStorageLocation(localMountedDir, remoteMountedLocation, util.FullPath(pathToCacheCopy))
|
||||
|
||||
fmt.Fprintf(writer, "Caching %s... ", pathToCacheCopy)
|
||||
|
||||
if _, err := filer.CacheRemoteObjectToLocalCluster(commandEnv, remoteConf, remoteLocation, util.FullPath(dir), localEntry); err != nil {
|
||||
if _, err := filer.CacheRemoteObjectToLocalCluster(commandEnv, util.FullPath(dir), localEntry, chunkConcurrency, downloadConcurrency); err != nil {
|
||||
fmt.Fprintf(writer, "failed: %v\n", err)
|
||||
execErrMu.Lock()
|
||||
if executionErr == nil {
|
||||
executionErr = err
|
||||
}
|
||||
execErrMu.Unlock()
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(writer, "done\n")
|
||||
|
||||
Reference in New Issue
Block a user