* fix: use keyed fields in struct literals - Replace unsafe reflect.StringHeader/SliceHeader with safe unsafe.String/Slice (weed/query/sqltypes/unsafe.go) - Add field names to Type_ScalarType struct literals (weed/mq/schema/schema_builder.go) - Add Duration field name to FlexibleDuration struct literals across test files - Add field names to bson.D struct literals (weed/filer/mongodb/mongodb_store_kv.go) Fixes go vet warnings about unkeyed struct literals. * fix: remove unreachable code - Remove unreachable return statements after infinite for loops - Remove unreachable code after if/else blocks where all paths return - Simplify recursive logic by removing unnecessary for loop (inode_to_path.go) - Fix Type_ScalarType literal to use enum value directly (schema_builder.go) - Call onCompletionFn on stream error (subscribe_session.go) Files fixed: - weed/query/sqltypes/unsafe.go - weed/mq/schema/schema_builder.go - weed/mq/client/sub_client/connect_to_sub_coordinator.go - weed/filer/redis3/ItemList.go - weed/mq/client/agent_client/subscribe_session.go - weed/mq/broker/broker_grpc_pub_balancer.go - weed/mount/inode_to_path.go - weed/util/skiplist/name_list.go * fix: avoid copying lock values in protobuf messages - Use proto.Merge() instead of direct assignment to avoid copying sync.Mutex in S3ApiConfiguration (iamapi_server.go) - Add explicit comments noting that channel-received values are already copies before taking addresses (volume_grpc_client_to_master.go) The protobuf messages contain sync.Mutex fields from the message state, which should not be copied. Using proto.Merge() properly merges messages without copying the embedded mutex. * fix: correct byte array size for uint32 bit shift operations The generateAccountId() function only needs 4 bytes to create a uint32 value. Changed from allocating 8 bytes to 4 bytes to match the actual usage. This fixes go vet warning about shifting 8-bit values (bytes) by more than 8 bits. * fix: ensure context cancellation on all error paths In broker_client_subscribe.go, ensure subscriberCancel() is called on all error return paths: - When stream creation fails - When partition assignment fails - When sending initialization message fails This prevents context leaks when an error occurs during subscriber creation. * fix: ensure subscriberCancel called for CreateFreshSubscriber stream.Send error Ensure subscriberCancel() is called when stream.Send fails in CreateFreshSubscriber. * ci: add go vet step to prevent future lint regressions - Add go vet step to GitHub Actions workflow - Filter known protobuf lock warnings (MessageState sync.Mutex) These are expected in generated protobuf code and are safe - Prevents accumulation of go vet errors in future PRs - Step runs before build to catch issues early * fix: resolve remaining syntax and logic errors in vet fixes - Fixed syntax errors in filer_sync.go caused by missing closing braces - Added missing closing brace for if block and function - Synchronized fixes to match previous commits on branch * fix: add missing return statements to daemon functions - Add 'return false' after infinite loops in filer_backup.go and filer_meta_backup.go - Satisfies declared bool return type signatures - Maintains consistency with other daemon functions (runMaster, runFilerSynchronize, runWorker) - While unreachable, explicitly declares the return satisfies function signature contract * fix: add nil check for onCompletionFn in SubscribeMessageRecord - Check if onCompletionFn is not nil before calling it - Prevents potential panic if nil function is passed - Matches pattern used in other callback functions * docs: clarify unreachable return statements in daemon functions - Add comments documenting that return statements satisfy function signature - Explains that these returns follow infinite loops and are unreachable - Improves code clarity for future maintainers
304 lines
7.2 KiB
Go
304 lines
7.2 KiB
Go
package mount
|
|
|
|
import (
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/hanwen/go-fuse/v2/fuse"
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
|
)
|
|
|
|
type InodeToPath struct {
|
|
sync.RWMutex
|
|
nextInodeId uint64
|
|
cacheMetaTtlSec time.Duration
|
|
inode2path map[uint64]*InodeEntry
|
|
path2inode map[util.FullPath]uint64
|
|
}
|
|
type InodeEntry struct {
|
|
paths []util.FullPath
|
|
nlookup uint64
|
|
isDirectory bool
|
|
isChildrenCached bool
|
|
cachedExpiresTime time.Time
|
|
}
|
|
|
|
func (ie *InodeEntry) removeOnePath(p util.FullPath) bool {
|
|
if len(ie.paths) == 0 {
|
|
return false
|
|
}
|
|
idx := -1
|
|
for i, x := range ie.paths {
|
|
if x == p {
|
|
idx = i
|
|
break
|
|
}
|
|
}
|
|
if idx < 0 {
|
|
return false
|
|
}
|
|
for x := idx; x < len(ie.paths)-1; x++ {
|
|
ie.paths[x] = ie.paths[x+1]
|
|
}
|
|
ie.paths = ie.paths[0 : len(ie.paths)-1]
|
|
return true
|
|
}
|
|
|
|
func NewInodeToPath(root util.FullPath, ttlSec int) *InodeToPath {
|
|
t := &InodeToPath{
|
|
inode2path: make(map[uint64]*InodeEntry),
|
|
path2inode: make(map[util.FullPath]uint64),
|
|
cacheMetaTtlSec: time.Second * time.Duration(ttlSec),
|
|
}
|
|
t.inode2path[1] = &InodeEntry{[]util.FullPath{root}, 1, true, false, time.Time{}}
|
|
t.path2inode[root] = 1
|
|
|
|
return t
|
|
}
|
|
|
|
// EnsurePath make sure the full path is tracked, used by symlink.
|
|
func (i *InodeToPath) EnsurePath(path util.FullPath, isDirectory bool) bool {
|
|
dir, _ := path.DirAndName()
|
|
if dir == "/" {
|
|
return true
|
|
}
|
|
if i.EnsurePath(util.FullPath(dir), true) {
|
|
i.Lookup(path, time.Now().Unix(), isDirectory, false, 0, false)
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (i *InodeToPath) Lookup(path util.FullPath, unixTime int64, isDirectory bool, isHardlink bool, possibleInode uint64, isLookup bool) uint64 {
|
|
i.Lock()
|
|
defer i.Unlock()
|
|
inode, found := i.path2inode[path]
|
|
if !found {
|
|
if possibleInode == 0 {
|
|
inode = path.AsInode(unixTime)
|
|
} else {
|
|
inode = possibleInode
|
|
}
|
|
if !isHardlink {
|
|
for _, found := i.inode2path[inode]; found; inode++ {
|
|
_, found = i.inode2path[inode+1]
|
|
}
|
|
}
|
|
}
|
|
i.path2inode[path] = inode
|
|
|
|
if _, found := i.inode2path[inode]; found {
|
|
if isLookup {
|
|
i.inode2path[inode].nlookup++
|
|
}
|
|
} else {
|
|
if !isLookup {
|
|
i.inode2path[inode] = &InodeEntry{[]util.FullPath{path}, 0, isDirectory, false, time.Time{}}
|
|
} else {
|
|
i.inode2path[inode] = &InodeEntry{[]util.FullPath{path}, 1, isDirectory, false, time.Time{}}
|
|
}
|
|
}
|
|
|
|
return inode
|
|
}
|
|
|
|
func (i *InodeToPath) AllocateInode(path util.FullPath, unixTime int64) uint64 {
|
|
if path == "/" {
|
|
return 1
|
|
}
|
|
i.Lock()
|
|
defer i.Unlock()
|
|
inode := path.AsInode(unixTime)
|
|
for _, found := i.inode2path[inode]; found; inode++ {
|
|
_, found = i.inode2path[inode]
|
|
}
|
|
return inode
|
|
}
|
|
|
|
func (i *InodeToPath) GetInode(path util.FullPath) (uint64, bool) {
|
|
if path == "/" {
|
|
return 1, true
|
|
}
|
|
i.Lock()
|
|
defer i.Unlock()
|
|
inode, found := i.path2inode[path]
|
|
if !found {
|
|
// glog.Fatalf("GetInode unknown inode for %s", path)
|
|
// this could be the parent for mount point
|
|
}
|
|
return inode, found
|
|
}
|
|
|
|
func (i *InodeToPath) GetPath(inode uint64) (util.FullPath, fuse.Status) {
|
|
i.RLock()
|
|
defer i.RUnlock()
|
|
path, found := i.inode2path[inode]
|
|
if !found || len(path.paths) == 0 {
|
|
return "", fuse.ENOENT
|
|
}
|
|
return path.paths[0], fuse.OK
|
|
}
|
|
|
|
func (i *InodeToPath) HasPath(path util.FullPath) bool {
|
|
i.RLock()
|
|
defer i.RUnlock()
|
|
_, found := i.path2inode[path]
|
|
return found
|
|
}
|
|
|
|
func (i *InodeToPath) MarkChildrenCached(fullpath util.FullPath) {
|
|
i.Lock()
|
|
defer i.Unlock()
|
|
inode, found := i.path2inode[fullpath]
|
|
if !found {
|
|
// https://github.com/seaweedfs/seaweedfs/issues/4968
|
|
// glog.Fatalf("MarkChildrenCached not found inode %v", fullpath)
|
|
glog.Warningf("MarkChildrenCached not found inode %v", fullpath)
|
|
return
|
|
}
|
|
path, found := i.inode2path[inode]
|
|
if !found {
|
|
glog.Warningf("MarkChildrenCached inode %d not found in inode2path for %v", inode, fullpath)
|
|
return
|
|
}
|
|
path.isChildrenCached = true
|
|
if i.cacheMetaTtlSec > 0 {
|
|
path.cachedExpiresTime = time.Now().Add(i.cacheMetaTtlSec)
|
|
}
|
|
}
|
|
|
|
func (i *InodeToPath) IsChildrenCached(fullpath util.FullPath) bool {
|
|
i.RLock()
|
|
defer i.RUnlock()
|
|
inode, found := i.path2inode[fullpath]
|
|
if !found {
|
|
return false
|
|
}
|
|
path, found := i.inode2path[inode]
|
|
if !found {
|
|
return false
|
|
}
|
|
if path.isChildrenCached {
|
|
return path.cachedExpiresTime.IsZero() || time.Now().Before(path.cachedExpiresTime)
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (i *InodeToPath) HasInode(inode uint64) bool {
|
|
if inode == 1 {
|
|
return true
|
|
}
|
|
i.RLock()
|
|
defer i.RUnlock()
|
|
_, found := i.inode2path[inode]
|
|
return found
|
|
}
|
|
|
|
func (i *InodeToPath) AddPath(inode uint64, path util.FullPath) {
|
|
i.Lock()
|
|
defer i.Unlock()
|
|
i.path2inode[path] = inode
|
|
|
|
ie, found := i.inode2path[inode]
|
|
if found {
|
|
ie.paths = append(ie.paths, path)
|
|
ie.nlookup++
|
|
} else {
|
|
i.inode2path[inode] = &InodeEntry{
|
|
paths: []util.FullPath{path},
|
|
nlookup: 1,
|
|
isDirectory: false,
|
|
isChildrenCached: false,
|
|
}
|
|
}
|
|
}
|
|
|
|
func (i *InodeToPath) RemovePath(path util.FullPath) {
|
|
i.Lock()
|
|
defer i.Unlock()
|
|
inode, found := i.path2inode[path]
|
|
if found {
|
|
delete(i.path2inode, path)
|
|
i.removePathFromInode2Path(inode, path)
|
|
}
|
|
}
|
|
|
|
func (i *InodeToPath) removePathFromInode2Path(inode uint64, path util.FullPath) {
|
|
ie, found := i.inode2path[inode]
|
|
if !found {
|
|
return
|
|
}
|
|
if !ie.removeOnePath(path) {
|
|
return
|
|
}
|
|
}
|
|
|
|
func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (sourceInode, targetInode uint64) {
|
|
i.Lock()
|
|
defer i.Unlock()
|
|
sourceInode, sourceFound := i.path2inode[sourcePath]
|
|
targetInode, targetFound := i.path2inode[targetPath]
|
|
if targetFound {
|
|
i.removePathFromInode2Path(targetInode, targetPath)
|
|
delete(i.path2inode, targetPath)
|
|
}
|
|
if sourceFound {
|
|
delete(i.path2inode, sourcePath)
|
|
i.path2inode[targetPath] = sourceInode
|
|
} else {
|
|
// it is possible some source folder items has not been visited before
|
|
// so no need to worry about their source inodes
|
|
return
|
|
}
|
|
if entry, entryFound := i.inode2path[sourceInode]; entryFound {
|
|
for i, p := range entry.paths {
|
|
if p == sourcePath {
|
|
entry.paths[i] = targetPath
|
|
}
|
|
}
|
|
entry.isChildrenCached = false
|
|
} else {
|
|
glog.Errorf("MovePath %s to %s: sourceInode %d not found", sourcePath, targetPath, sourceInode)
|
|
}
|
|
return
|
|
}
|
|
|
|
func (i *InodeToPath) Forget(inode, nlookup uint64, onForgetDir func(dir util.FullPath)) {
|
|
var dirPaths []util.FullPath
|
|
callOnForgetDir := false
|
|
|
|
i.Lock()
|
|
path, found := i.inode2path[inode]
|
|
if found {
|
|
if nlookup > path.nlookup {
|
|
glog.Errorf("kernel forget over-decrement: inode %d paths %v current %d forget %d", inode, path.paths, path.nlookup, nlookup)
|
|
path.nlookup = 0
|
|
} else {
|
|
path.nlookup -= nlookup
|
|
}
|
|
glog.V(4).Infof("kernel forget: inode %d paths %v nlookup %d", inode, path.paths, path.nlookup)
|
|
if path.nlookup == 0 {
|
|
if path.isDirectory && onForgetDir != nil {
|
|
dirPaths = append([]util.FullPath(nil), path.paths...)
|
|
callOnForgetDir = true
|
|
}
|
|
for _, p := range path.paths {
|
|
delete(i.path2inode, p)
|
|
}
|
|
delete(i.inode2path, inode)
|
|
} else {
|
|
glog.V(4).Infof("kernel forget but nlookup not zero: inode %d paths %v nlookup %d", inode, path.paths, path.nlookup)
|
|
}
|
|
} else {
|
|
glog.Warningf("kernel forget but inode not found: inode %d", inode)
|
|
}
|
|
i.Unlock()
|
|
|
|
if callOnForgetDir {
|
|
for _, p := range dirPaths {
|
|
onForgetDir(p)
|
|
}
|
|
}
|
|
}
|