Files
seaweedFS/weed/mount/weedfs_filehandle.go
Chris Lu e47054a7e7 mount: improve small file write performance (#8769)
* mount: defer file creation gRPC to flush time for faster small file writes

When creating a file via FUSE Create(), skip the synchronous gRPC
CreateEntry call to the filer. Instead, allocate the inode and build
the entry locally, deferring the filer create to the Flush/Release path
where flushMetadataToFiler already sends a CreateEntry with chunk data.

This eliminates one synchronous gRPC round-trip per file during creation.
For workloads with many small files (e.g. 30K files), this reduces the
per-file overhead from ~2 gRPC calls to ~1.

Mknod retains synchronous filer creation since it has no file handle
and thus no flush path.

* mount: use bounded worker pool for async flush operations

Replace unbounded goroutine spawning in writebackCache async flush
with a fixed-size worker pool backed by a channel. When many files
are closed rapidly (e.g., cp -r of 30K files), the previous approach
spawned one goroutine per file, leading to resource contention on
gRPC/HTTP connections and high goroutine overhead.

The worker pool size matches ConcurrentWriters (default 128), which
provides good parallelism while bounding resource usage. Work items
are queued into a buffered channel and processed by persistent worker
goroutines.

* mount: fix deferred create cache visibility and async flush race

Three fixes for the deferred create and async flush changes:

1. Insert a local placeholder entry into the metadata cache during
   deferred file creation so that maybeLoadEntry() can find the file
   for duplicate-create checks, stat, and readdir. Uses InsertEntry
   directly (not applyLocalMetadataEvent) to avoid triggering the
   directory hot-threshold eviction that would wipe the entry.

2. Fix race in ReleaseHandle where asyncFlushWg.Add(1) and the
   channel send happened after pendingAsyncFlushMu was unlocked.
   A concurrent WaitForAsyncFlush could observe a zero counter,
   close the channel, and cause a send-on-closed panic. Move Add(1)
   before the unlock; keep the send after unlock to avoid deadlock
   with workers that acquire the same mutex during cleanup.

3. Update TestCreateCreatesAndOpensFile to flush the file handle
   before verifying the CreateEntry gRPC call, since file creation
   is now deferred to flush time.
2026-03-24 20:31:53 -07:00

87 lines
3.3 KiB
Go

package mount
import (
"github.com/seaweedfs/go-fuse/v2/fuse"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)
func (wfs *WFS) AcquireHandle(inode uint64, flags, uid, gid uint32) (fileHandle *FileHandle, status fuse.Status) {
// If there is an in-flight async flush for this inode, wait for it to
// complete before reopening. Otherwise the new handle would be built
// from pre-close filer metadata and its next flush could overwrite the
// data that was just written asynchronously.
wfs.waitForPendingAsyncFlush(inode)
var entry *filer_pb.Entry
var path util.FullPath
path, _, entry, status = wfs.maybeReadEntry(inode)
if status == fuse.OK {
if wormEnforced, _ := wfs.wormEnforcedForEntry(path, entry); wormEnforced && flags&fuse.O_ANYWRITE != 0 {
return nil, fuse.EPERM
}
// Check unix permission bits for the requested access mode.
if entry != nil && entry.Attributes != nil {
if mask := openFlagsToAccessMask(flags); mask != 0 && !hasAccess(uid, gid, entry.Attributes.Uid, entry.Attributes.Gid, entry.Attributes.FileMode, mask) {
return nil, fuse.EACCES
}
}
// need to AcquireFileHandle again to ensure correct handle counter
fileHandle = wfs.fhMap.AcquireFileHandle(wfs, inode, entry)
fileHandle.RememberPath(path)
}
return
}
// ReleaseHandle is called from FUSE Release. For handles with a pending
// async flush, the map removal and the pendingAsyncFlush registration are
// done under a single lock hold so that a concurrent AcquireHandle cannot
// slip through the gap between the two (P1-1 TOCTOU fix).
//
// The handle intentionally stays in fhMap during the drain so that rename
// and unlink can still find it via FindFileHandle (P1-2 fix). It is
// removed from fhMap only after the drain completes (RemoveFileHandle).
func (wfs *WFS) ReleaseHandle(handleId FileHandleId) {
// Hold pendingAsyncFlushMu across the counter decrement and the
// pending-flush registration. Lock ordering: pendingAsyncFlushMu → fhMap.
wfs.pendingAsyncFlushMu.Lock()
fhToRelease := wfs.fhMap.ReleaseByHandle(handleId)
if fhToRelease != nil && fhToRelease.asyncFlushPending {
done := make(chan struct{})
wfs.pendingAsyncFlush[fhToRelease.inode] = done
// Add(1) while holding the mutex so WaitForAsyncFlush cannot
// observe a zero counter and close the channel before we send.
wfs.asyncFlushWg.Add(1)
wfs.pendingAsyncFlushMu.Unlock()
// Send after unlock to avoid deadlock — workers acquire the
// same mutex during cleanup.
wfs.asyncFlushCh <- &asyncFlushItem{fh: fhToRelease, done: done}
return
}
wfs.pendingAsyncFlushMu.Unlock()
if fhToRelease != nil {
fhToRelease.ReleaseHandle()
}
}
// waitForPendingAsyncFlush blocks until any in-flight async flush for
// the given inode completes. Called from AcquireHandle before building
// new handle state, so the filer metadata reflects the flushed data.
func (wfs *WFS) waitForPendingAsyncFlush(inode uint64) {
wfs.pendingAsyncFlushMu.Lock()
done, found := wfs.pendingAsyncFlush[inode]
wfs.pendingAsyncFlushMu.Unlock()
if found {
glog.V(3).Infof("waitForPendingAsyncFlush: waiting for inode %d", inode)
<-done
}
}
func (wfs *WFS) GetHandle(handleId FileHandleId) *FileHandle {
return wfs.fhMap.GetFileHandle(handleId)
}