Files
seaweedFS/weed/mount/weedfs_filehandle.go
Chris Lu d5ee35c8df Fix S3 delete for non-empty directory markers (#8740)
* Fix S3 delete for non-empty directory markers

* Address review feedback on directory marker deletes

* Stabilize FUSE concurrent directory operations
2026-03-23 13:35:16 -07:00

89 lines
3.1 KiB
Go

package mount
import (
"github.com/seaweedfs/go-fuse/v2/fuse"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)
func (wfs *WFS) AcquireHandle(inode uint64, flags, uid, gid uint32) (fileHandle *FileHandle, status fuse.Status) {
// If there is an in-flight async flush for this inode, wait for it to
// complete before reopening. Otherwise the new handle would be built
// from pre-close filer metadata and its next flush could overwrite the
// data that was just written asynchronously.
wfs.waitForPendingAsyncFlush(inode)
var entry *filer_pb.Entry
var path util.FullPath
path, _, entry, status = wfs.maybeReadEntry(inode)
if status == fuse.OK {
if wormEnforced, _ := wfs.wormEnforcedForEntry(path, entry); wormEnforced && flags&fuse.O_ANYWRITE != 0 {
return nil, fuse.EPERM
}
// need to AcquireFileHandle again to ensure correct handle counter
fileHandle = wfs.fhMap.AcquireFileHandle(wfs, inode, entry)
fileHandle.RememberPath(path)
}
return
}
// ReleaseHandle is called from FUSE Release. For handles with a pending
// async flush, the map removal and the pendingAsyncFlush registration are
// done under a single lock hold so that a concurrent AcquireHandle cannot
// slip through the gap between the two (P1-1 TOCTOU fix).
//
// The handle intentionally stays in fhMap during the drain so that rename
// and unlink can still find it via FindFileHandle (P1-2 fix). It is
// removed from fhMap only after the drain completes (RemoveFileHandle).
func (wfs *WFS) ReleaseHandle(handleId FileHandleId) {
// Hold pendingAsyncFlushMu across the counter decrement and the
// pending-flush registration. Lock ordering: pendingAsyncFlushMu → fhMap.
wfs.pendingAsyncFlushMu.Lock()
fhToRelease := wfs.fhMap.ReleaseByHandle(handleId)
if fhToRelease != nil && fhToRelease.asyncFlushPending {
done := make(chan struct{})
wfs.pendingAsyncFlush[fhToRelease.inode] = done
wfs.pendingAsyncFlushMu.Unlock()
wfs.asyncFlushWg.Add(1)
go func() {
defer wfs.asyncFlushWg.Done()
defer func() {
// Remove from fhMap first (so AcquireFileHandle creates a fresh handle).
wfs.fhMap.RemoveFileHandle(fhToRelease.fh, fhToRelease.inode)
// Then signal completion (unblocks waitForPendingAsyncFlush).
close(done)
wfs.pendingAsyncFlushMu.Lock()
delete(wfs.pendingAsyncFlush, fhToRelease.inode)
wfs.pendingAsyncFlushMu.Unlock()
}()
wfs.completeAsyncFlush(fhToRelease)
}()
return
}
wfs.pendingAsyncFlushMu.Unlock()
if fhToRelease != nil {
fhToRelease.ReleaseHandle()
}
}
// waitForPendingAsyncFlush blocks until any in-flight async flush for
// the given inode completes. Called from AcquireHandle before building
// new handle state, so the filer metadata reflects the flushed data.
func (wfs *WFS) waitForPendingAsyncFlush(inode uint64) {
wfs.pendingAsyncFlushMu.Lock()
done, found := wfs.pendingAsyncFlush[inode]
wfs.pendingAsyncFlushMu.Unlock()
if found {
glog.V(3).Infof("waitForPendingAsyncFlush: waiting for inode %d", inode)
<-done
}
}
func (wfs *WFS) GetHandle(handleId FileHandleId) *FileHandle {
return wfs.fhMap.GetFileHandle(handleId)
}