Files
seaweedFS/weed/storage/store_state.go
Chris Lu 995dfc4d5d chore: remove ~50k lines of unreachable dead code (#8913)
* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
2026-04-03 16:04:27 -07:00

119 lines
2.5 KiB
Go

package storage
import (
"fmt"
"os"
"path/filepath"
"sync"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/protobuf/proto"
)
const (
StateFileName = "state.pb"
StateFileMode = 0644
)
type State struct {
filePath string
pb *volume_server_pb.VolumeServerState
mu sync.Mutex
}
func NewState(dir string) (*State, error) {
state := &State{
filePath: filepath.Join(dir, StateFileName),
pb: nil,
}
err := state.Load()
return state, err
}
func (st *State) Proto() *volume_server_pb.VolumeServerState {
st.mu.Lock()
defer st.mu.Unlock()
return st.pb
}
func (st *State) Load() error {
st.mu.Lock()
defer st.mu.Unlock()
st.pb = &volume_server_pb.VolumeServerState{}
if !util.FileExists(st.filePath) {
glog.V(1).Infof("No preexisting store state at %s", st.filePath)
return nil
}
binPb, err := os.ReadFile(st.filePath)
if err != nil {
st.pb = nil
return fmt.Errorf("failed to read store state from %s : %v", st.filePath, err)
}
if err := proto.Unmarshal(binPb, st.pb); err != nil {
st.pb = nil
return fmt.Errorf("failed to parse store state from %s : %v", st.filePath, err)
}
glog.V(1).Infof("Got store state from %s: %v", st.filePath, st.pb)
return nil
}
func (st *State) save(locking bool) error {
if locking {
st.mu.Lock()
defer st.mu.Unlock()
}
if st.pb == nil {
st.pb = &volume_server_pb.VolumeServerState{}
}
binPb, err := proto.Marshal(st.pb)
if err != nil {
return fmt.Errorf("failed to serialize store state %v: %s", st.pb, err)
}
if err := util.WriteFile(st.filePath, binPb, StateFileMode); err != nil {
return fmt.Errorf("failed to write store state to %s : %v", st.filePath, err)
}
glog.V(1).Infof("Saved store state %v to %s", st.pb, st.filePath)
return nil
}
func (st *State) Save() error {
return st.save(true)
}
func (st *State) Update(state *volume_server_pb.VolumeServerState) error {
st.mu.Lock()
defer st.mu.Unlock()
if state == nil {
return nil
}
if got, want := st.pb.GetVersion(), state.GetVersion(); got != want {
return fmt.Errorf("version mismatch for VolumeServerState (got %d, want %d)", got, want)
}
origState := st.pb
st.pb = &volume_server_pb.VolumeServerState{}
proto.Merge(st.pb, state)
st.pb.Version = st.pb.GetVersion() + 1
err := st.save(false)
if err != nil {
// restore the original state upon save failures, to avoid skew between in-memory and disk state protos.
st.pb = origState
}
return err
}