Files
seaweedFS/weed/mount/filehandle.go
Chris Lu 995dfc4d5d chore: remove ~50k lines of unreachable dead code (#8913)
* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
2026-04-03 16:04:27 -07:00

194 lines
5.3 KiB
Go

package mount
import (
"os"
"sync"
"github.com/seaweedfs/go-fuse/v2/fuse"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)
type FileHandleId uint64
var IsDebugFileReadWrite = false
type FileHandle struct {
fh FileHandleId
counter int64
entry *LockedEntry
entryLock sync.RWMutex
entryChunkGroup *filer.ChunkGroup
inode uint64
wfs *WFS
// cache file has been written to
dirtyMetadata bool
dirtyPages *PageWriter
reader *filer.ChunkReadAt
contentType string
asyncFlushPending bool // set in writebackCache mode to defer flush to Release
asyncFlushUid uint32 // saved uid for deferred metadata flush
asyncFlushGid uint32 // saved gid for deferred metadata flush
savedDir string // last known parent path if inode-to-path state is forgotten
savedName string // last known file name if inode-to-path state is forgotten
isDeleted bool
isRenamed bool // set by Rename before waiting for async flush; skips old-path metadata flush
// RDMA chunk offset cache for performance optimization
chunkOffsetCache []int64
chunkCacheValid bool
chunkCacheLock sync.RWMutex
// for debugging
mirrorFile *os.File
}
func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_pb.Entry) *FileHandle {
fh := &FileHandle{
fh: handleId,
counter: 1,
inode: inode,
wfs: wfs,
}
// dirtyPages: newContinuousDirtyPages(file, writeOnly),
fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit)
fh.entry = &LockedEntry{
Entry: entry,
}
if entry != nil {
fh.SetEntry(entry)
}
if IsDebugFileReadWrite {
var err error
fh.mirrorFile, err = os.OpenFile("/tmp/sw/"+entry.Name, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
println("failed to create mirror:", err.Error())
}
}
return fh
}
func (fh *FileHandle) FullPath() util.FullPath {
if fp, status := fh.wfs.inodeToPath.GetPath(fh.inode); status == fuse.OK {
return fp
}
if fh.savedName != "" {
return util.FullPath(fh.savedDir).Child(fh.savedName)
}
return ""
}
func (fh *FileHandle) RememberPath(fullPath util.FullPath) {
if fullPath == "" {
return
}
fh.savedDir, fh.savedName = fullPath.DirAndName()
}
func (fh *FileHandle) GetEntry() *LockedEntry {
return fh.entry
}
func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) {
if entry != nil {
fileSize := filer.FileSize(entry)
entry.Attributes.FileSize = fileSize
var resolveManifestErr error
fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks, fh.wfs.option.ConcurrentReaders)
if resolveManifestErr != nil {
glog.Warningf("failed to resolve manifest chunks in %+v", entry)
}
} else {
glog.Fatalf("setting file handle entry to nil")
}
fh.entry.SetEntry(entry)
// Invalidate chunk offset cache since chunks may have changed
fh.invalidateChunkCache()
}
func (fh *FileHandle) ResetDirtyPages() {
fh.dirtyPages.Destroy()
fh.dirtyPages = newPageWriter(fh, fh.wfs.option.ChunkSizeLimit)
fh.dirtyMetadata = false
fh.contentType = ""
}
func (fh *FileHandle) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entry {
result := fh.entry.UpdateEntry(fn)
// Invalidate chunk offset cache since entry may have been modified
fh.invalidateChunkCache()
return result
}
func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
fh.entry.AppendChunks(chunks)
// Invalidate chunk offset cache since new chunks were added
fh.invalidateChunkCache()
}
func (fh *FileHandle) ReleaseHandle() {
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("ReleaseHandle", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
fh.dirtyPages.Destroy()
if IsDebugFileReadWrite {
fh.mirrorFile.Close()
}
}
// getCumulativeOffsets returns cached cumulative offsets for chunks, computing them if necessary
func (fh *FileHandle) getCumulativeOffsets(chunks []*filer_pb.FileChunk) []int64 {
fh.chunkCacheLock.RLock()
if fh.chunkCacheValid && len(fh.chunkOffsetCache) == len(chunks)+1 {
// Cache is valid and matches current chunk count
result := make([]int64, len(fh.chunkOffsetCache))
copy(result, fh.chunkOffsetCache)
fh.chunkCacheLock.RUnlock()
return result
}
fh.chunkCacheLock.RUnlock()
// Need to compute/recompute cache
fh.chunkCacheLock.Lock()
defer fh.chunkCacheLock.Unlock()
// Double-check in case another goroutine computed it while we waited for the lock
if fh.chunkCacheValid && len(fh.chunkOffsetCache) == len(chunks)+1 {
result := make([]int64, len(fh.chunkOffsetCache))
copy(result, fh.chunkOffsetCache)
return result
}
// Compute cumulative offsets
cumulativeOffsets := make([]int64, len(chunks)+1)
for i, chunk := range chunks {
cumulativeOffsets[i+1] = cumulativeOffsets[i] + int64(chunk.Size)
}
// Cache the result
fh.chunkOffsetCache = make([]int64, len(cumulativeOffsets))
copy(fh.chunkOffsetCache, cumulativeOffsets)
fh.chunkCacheValid = true
return cumulativeOffsets
}
// invalidateChunkCache invalidates the chunk offset cache when chunks are modified
func (fh *FileHandle) invalidateChunkCache() {
fh.chunkCacheLock.Lock()
fh.chunkCacheValid = false
fh.chunkOffsetCache = nil
fh.chunkCacheLock.Unlock()
}