mount: add mutex to DirectoryHandle to fix race condition (#7674)

* mount: add mutex to DirectoryHandle to fix race condition

When using Ganesha NFS on top of FUSE mount, ls operations would hang
forever on directories with hundreds of files. This was caused by a
race condition in DirectoryHandle where multiple concurrent readdir
operations could modify shared state (entryStream, entryStreamOffset,
isFinished) without synchronization.

The fix adds a mutex to DirectoryHandle and holds it for the entire
duration of doReadDirectory. This serializes concurrent readdir calls
on the same handle, which is the correct behavior for a directory
handle and fixes the race condition.

Key changes:
- Added sync.Mutex to DirectoryHandle struct
- Lock the mutex at the start of doReadDirectory
- This ensures thread-safe access to entryStream and other state

The lock is per-handle (not global), so different directories can
still be listed concurrently. Only concurrent operations on the
same directory handle are serialized.

Fixes: https://github.com/seaweedfs/seaweedfs/issues/7672

* mount: add mutex to DirectoryHandle to fix race condition

When using Ganesha NFS on top of FUSE mount, ls operations would hang
forever on directories with hundreds of files. This was caused by a
race condition in DirectoryHandle where multiple concurrent readdir
operations could modify shared state (entryStream, entryStreamOffset,
isFinished) without synchronization.

The fix adds a mutex to DirectoryHandle and holds it for the entire
duration of doReadDirectory. This serializes concurrent readdir calls
on the same handle, which is the correct behavior for a directory
handle and fixes the race condition.

Key changes:
- Added sync.Mutex to DirectoryHandle struct
- Lock the mutex at the start of doReadDirectory
- Optimized reset() to reuse slice capacity and allow GC of old entries

The lock is per-handle (not global), so different directories can
still be listed concurrently. Only concurrent operations on the
same directory handle are serialized.

Fixes: https://github.com/seaweedfs/seaweedfs/issues/7672
This commit is contained in:
Chris Lu
2025-12-08 21:38:09 -08:00
committed by GitHub
parent ff4855dcbe
commit 9196696278

View File

@@ -18,22 +18,28 @@ const (
directoryStreamBaseOffset = 2 // . & .. directoryStreamBaseOffset = 2 // . & ..
) )
// DirectoryHandle represents an open directory handle.
// It maintains state for directory listing pagination and is protected by a mutex
// to handle concurrent readdir operations from NFS-Ganesha and other multi-threaded clients.
type DirectoryHandle struct { type DirectoryHandle struct {
sync.Mutex
isFinished bool isFinished bool
entryStream []*filer.Entry entryStream []*filer.Entry
entryStreamOffset uint64 entryStreamOffset uint64
} }
func (dh *DirectoryHandle) reset() { func (dh *DirectoryHandle) reset() {
*dh = DirectoryHandle{ dh.isFinished = false
isFinished: false, // Nil out pointers to allow garbage collection of old entries,
entryStream: []*filer.Entry{}, // then reuse the slice's capacity to avoid re-allocations.
entryStreamOffset: directoryStreamBaseOffset, for i := range dh.entryStream {
dh.entryStream[i] = nil
} }
dh.entryStream = dh.entryStream[:0]
dh.entryStreamOffset = directoryStreamBaseOffset
} }
type DirectoryHandleToInode struct { type DirectoryHandleToInode struct {
// shares the file handle id sequencer with FileHandleToInode{nextFh}
sync.Mutex sync.Mutex
dir2inode map[DirectoryHandleId]*DirectoryHandle dir2inode map[DirectoryHandleId]*DirectoryHandle
} }
@@ -45,14 +51,14 @@ func NewDirectoryHandleToInode() *DirectoryHandleToInode {
} }
func (wfs *WFS) AcquireDirectoryHandle() (DirectoryHandleId, *DirectoryHandle) { func (wfs *WFS) AcquireDirectoryHandle() (DirectoryHandleId, *DirectoryHandle) {
fh := FileHandleId(util.RandomUint64()) fh := DirectoryHandleId(util.RandomUint64())
wfs.dhMap.Lock() wfs.dhMap.Lock()
defer wfs.dhMap.Unlock() defer wfs.dhMap.Unlock()
dh := new(DirectoryHandle) dh := &DirectoryHandle{}
dh.reset() dh.reset()
wfs.dhMap.dir2inode[DirectoryHandleId(fh)] = dh wfs.dhMap.dir2inode[fh] = dh
return DirectoryHandleId(fh), dh return fh, dh
} }
func (wfs *WFS) GetDirectoryHandle(dhid DirectoryHandleId) *DirectoryHandle { func (wfs *WFS) GetDirectoryHandle(dhid DirectoryHandleId) *DirectoryHandle {
@@ -61,7 +67,7 @@ func (wfs *WFS) GetDirectoryHandle(dhid DirectoryHandleId) *DirectoryHandle {
if dh, found := wfs.dhMap.dir2inode[dhid]; found { if dh, found := wfs.dhMap.dir2inode[dhid]; found {
return dh return dh
} }
dh := new(DirectoryHandle) dh := &DirectoryHandle{}
dh.reset() dh.reset()
wfs.dhMap.dir2inode[dhid] = dh wfs.dhMap.dir2inode[dhid] = dh
return dh return dh
@@ -137,7 +143,13 @@ func (wfs *WFS) ReadDirPlus(cancel <-chan struct{}, input *fuse.ReadIn, out *fus
} }
func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPlusMode bool) fuse.Status { func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPlusMode bool) fuse.Status {
// Get the directory handle and lock it for the duration of this operation.
// This serializes concurrent readdir calls on the same handle, fixing the
// race condition that caused hangs with NFS-Ganesha.
dh := wfs.GetDirectoryHandle(DirectoryHandleId(input.Fh)) dh := wfs.GetDirectoryHandle(DirectoryHandleId(input.Fh))
dh.Lock()
defer dh.Unlock()
if input.Offset == 0 { if input.Offset == 0 {
dh.reset() dh.reset()
} else if dh.isFinished && input.Offset >= dh.entryStreamOffset { } else if dh.isFinished && input.Offset >= dh.entryStreamOffset {
@@ -154,7 +166,7 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl
} }
var dirEntry fuse.DirEntry var dirEntry fuse.DirEntry
processEachEntryFn := func(entry *filer.Entry) (bool, error) { processEachEntryFn := func(entry *filer.Entry) bool {
dirEntry.Name = entry.Name() dirEntry.Name = entry.Name()
dirEntry.Mode = toSyscallMode(entry.Mode) dirEntry.Mode = toSyscallMode(entry.Mode)
inode := wfs.inodeToPath.Lookup(dirPath.Child(dirEntry.Name), entry.Crtime.Unix(), entry.IsDirectory(), len(entry.HardLinkId) > 0, entry.Inode, isPlusMode) inode := wfs.inodeToPath.Lookup(dirPath.Child(dirEntry.Name), entry.Crtime.Unix(), entry.IsDirectory(), len(entry.HardLinkId) > 0, entry.Inode, isPlusMode)
@@ -162,13 +174,13 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl
if !isPlusMode { if !isPlusMode {
if !out.AddDirEntry(dirEntry) { if !out.AddDirEntry(dirEntry) {
isEarlyTerminated = true isEarlyTerminated = true
return false, nil return false
} }
} else { } else {
entryOut := out.AddDirLookupEntry(dirEntry) entryOut := out.AddDirLookupEntry(dirEntry)
if entryOut == nil { if entryOut == nil {
isEarlyTerminated = true isEarlyTerminated = true
return false, nil return false
} }
if fh, found := wfs.fhMap.FindFileHandle(inode); found { if fh, found := wfs.fhMap.FindFileHandle(inode); found {
glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name)) glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name))
@@ -176,7 +188,7 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl
} }
wfs.outputFilerEntry(entryOut, inode, entry) wfs.outputFilerEntry(entryOut, inode, entry)
} }
return true, nil return true
} }
if input.Offset < directoryStreamBaseOffset { if input.Offset < directoryStreamBaseOffset {
@@ -207,7 +219,7 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl
entryCurrentIndex := input.Offset - dh.entryStreamOffset entryCurrentIndex := input.Offset - dh.entryStreamOffset
for uint64(len(dh.entryStream)) > entryCurrentIndex { for uint64(len(dh.entryStream)) > entryCurrentIndex {
entry := dh.entryStream[entryCurrentIndex] entry := dh.entryStream[entryCurrentIndex]
if process, _ := processEachEntryFn(entry); process { if processEachEntryFn(entry) {
lastEntryName = entry.Name() lastEntryName = entry.Name()
entryCurrentIndex++ entryCurrentIndex++
} else { } else {
@@ -217,14 +229,16 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl
} }
} }
var err error if err := meta_cache.EnsureVisited(wfs.metaCache, wfs, dirPath); err != nil {
if err = meta_cache.EnsureVisited(wfs.metaCache, wfs, dirPath); err != nil {
glog.Errorf("dir ReadDirAll %s: %v", dirPath, err) glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
return fuse.EIO return fuse.EIO
} }
listErr := wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, lastEntryName, false, int64(math.MaxInt32), func(entry *filer.Entry) (bool, error) { listErr := wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, lastEntryName, false, int64(math.MaxInt32), func(entry *filer.Entry) (bool, error) {
dh.entryStream = append(dh.entryStream, entry) dh.entryStream = append(dh.entryStream, entry)
return processEachEntryFn(entry) if !processEachEntryFn(entry) {
return false, nil
}
return true, nil
}) })
if listErr != nil { if listErr != nil {
glog.Errorf("list meta cache: %v", listErr) glog.Errorf("list meta cache: %v", listErr)