Files
seaweedFS/weed/mount/filehandle.go
Chris Lu cca1555cc7 mount: implement create for rsync temp files (#8749)
* mount: implement create for rsync temp files

* mount: move access implementation out of unsupported

* mount: tighten access checks

* mount: log access group lookup failures

* mount: reset dirty pages on truncate

* mount: tighten create and root access handling

* mount: handle existing creates before quota checks

* mount: restrict access fallback when group lookup fails

When lookupSupplementaryGroupIDs returns an error, the previous code
fell through to checking only the "other" permission bits, which could
overgrant access.  Require both group and other permission classes to
satisfy the mask so access is never broader than intended.

* mount: guard against nil entry in Create existing-file path

maybeLoadEntry can return OK with a nil entry or nil Attributes in
edge cases.  Check before dereferencing to prevent a panic.

* mount: reopen existing file on create race without O_EXCL

When createRegularFile returns EEXIST because another process won the
race, and O_EXCL is not set, reload the winner's entry and open it
instead of propagating the error to the caller.

* mount: check parent directory permission in createRegularFile

Verify the caller has write+search (W_OK|X_OK) permission on the
parent directory before creating a file.  This applies to both
Create and Mknod.  Update test fixture mount mode to 0o777 so the
existing tests pass with the new check.

* mount: enforce file permission bits in AcquireHandle

Map the open flags (O_RDONLY/O_WRONLY/O_RDWR) to an access mask and
call hasAccess before handing out a file handle.  This makes
AcquireHandle the single source of truth for mode-based access
control across Open, Create-existing, and Create-new paths.

---------

Co-authored-by: Copilot <copilot@github.com>
2026-03-24 11:43:41 -07:00

200 lines
5.4 KiB
Go

package mount
import (
"os"
"sync"
"github.com/seaweedfs/go-fuse/v2/fuse"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)
type FileHandleId uint64
var IsDebugFileReadWrite = false
type FileHandle struct {
fh FileHandleId
counter int64
entry *LockedEntry
entryLock sync.RWMutex
entryChunkGroup *filer.ChunkGroup
inode uint64
wfs *WFS
// cache file has been written to
dirtyMetadata bool
dirtyPages *PageWriter
reader *filer.ChunkReadAt
contentType string
asyncFlushPending bool // set in writebackCache mode to defer flush to Release
asyncFlushUid uint32 // saved uid for deferred metadata flush
asyncFlushGid uint32 // saved gid for deferred metadata flush
savedDir string // last known parent path if inode-to-path state is forgotten
savedName string // last known file name if inode-to-path state is forgotten
isDeleted bool
// RDMA chunk offset cache for performance optimization
chunkOffsetCache []int64
chunkCacheValid bool
chunkCacheLock sync.RWMutex
// for debugging
mirrorFile *os.File
}
func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_pb.Entry) *FileHandle {
fh := &FileHandle{
fh: handleId,
counter: 1,
inode: inode,
wfs: wfs,
}
// dirtyPages: newContinuousDirtyPages(file, writeOnly),
fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit)
fh.entry = &LockedEntry{
Entry: entry,
}
if entry != nil {
fh.SetEntry(entry)
}
if IsDebugFileReadWrite {
var err error
fh.mirrorFile, err = os.OpenFile("/tmp/sw/"+entry.Name, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
println("failed to create mirror:", err.Error())
}
}
return fh
}
func (fh *FileHandle) FullPath() util.FullPath {
if fp, status := fh.wfs.inodeToPath.GetPath(fh.inode); status == fuse.OK {
return fp
}
if fh.savedName != "" {
return util.FullPath(fh.savedDir).Child(fh.savedName)
}
return ""
}
func (fh *FileHandle) RememberPath(fullPath util.FullPath) {
if fullPath == "" {
return
}
fh.savedDir, fh.savedName = fullPath.DirAndName()
}
func (fh *FileHandle) GetEntry() *LockedEntry {
return fh.entry
}
func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) {
if entry != nil {
fileSize := filer.FileSize(entry)
entry.Attributes.FileSize = fileSize
var resolveManifestErr error
fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks, fh.wfs.option.ConcurrentReaders)
if resolveManifestErr != nil {
glog.Warningf("failed to resolve manifest chunks in %+v", entry)
}
} else {
glog.Fatalf("setting file handle entry to nil")
}
fh.entry.SetEntry(entry)
// Invalidate chunk offset cache since chunks may have changed
fh.invalidateChunkCache()
}
func (fh *FileHandle) ResetDirtyPages() {
fh.dirtyPages.Destroy()
fh.dirtyPages = newPageWriter(fh, fh.wfs.option.ChunkSizeLimit)
fh.dirtyMetadata = false
fh.contentType = ""
}
func (fh *FileHandle) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entry {
result := fh.entry.UpdateEntry(fn)
// Invalidate chunk offset cache since entry may have been modified
fh.invalidateChunkCache()
return result
}
func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
fh.entry.AppendChunks(chunks)
// Invalidate chunk offset cache since new chunks were added
fh.invalidateChunkCache()
}
func (fh *FileHandle) ReleaseHandle() {
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("ReleaseHandle", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
fh.dirtyPages.Destroy()
if IsDebugFileReadWrite {
fh.mirrorFile.Close()
}
}
func lessThan(a, b *filer_pb.FileChunk) bool {
if a.ModifiedTsNs == b.ModifiedTsNs {
return a.Fid.FileKey < b.Fid.FileKey
}
return a.ModifiedTsNs < b.ModifiedTsNs
}
// getCumulativeOffsets returns cached cumulative offsets for chunks, computing them if necessary
func (fh *FileHandle) getCumulativeOffsets(chunks []*filer_pb.FileChunk) []int64 {
fh.chunkCacheLock.RLock()
if fh.chunkCacheValid && len(fh.chunkOffsetCache) == len(chunks)+1 {
// Cache is valid and matches current chunk count
result := make([]int64, len(fh.chunkOffsetCache))
copy(result, fh.chunkOffsetCache)
fh.chunkCacheLock.RUnlock()
return result
}
fh.chunkCacheLock.RUnlock()
// Need to compute/recompute cache
fh.chunkCacheLock.Lock()
defer fh.chunkCacheLock.Unlock()
// Double-check in case another goroutine computed it while we waited for the lock
if fh.chunkCacheValid && len(fh.chunkOffsetCache) == len(chunks)+1 {
result := make([]int64, len(fh.chunkOffsetCache))
copy(result, fh.chunkOffsetCache)
return result
}
// Compute cumulative offsets
cumulativeOffsets := make([]int64, len(chunks)+1)
for i, chunk := range chunks {
cumulativeOffsets[i+1] = cumulativeOffsets[i] + int64(chunk.Size)
}
// Cache the result
fh.chunkOffsetCache = make([]int64, len(cumulativeOffsets))
copy(fh.chunkOffsetCache, cumulativeOffsets)
fh.chunkCacheValid = true
return cumulativeOffsets
}
// invalidateChunkCache invalidates the chunk offset cache when chunks are modified
func (fh *FileHandle) invalidateChunkCache() {
fh.chunkCacheLock.Lock()
fh.chunkCacheValid = false
fh.chunkOffsetCache = nil
fh.chunkCacheLock.Unlock()
}