Files
seaweedFS/weed/server/raft_server.go
Chris Lu 7b8df39cf7 s3api: add AttachUserPolicy/DetachUserPolicy/ListAttachedUserPolicies (#8379)
* iam: add XML responses for managed user policy APIs

* s3api: implement attach/detach/list attached user policies

* s3api: add embedded IAM tests for managed user policies

* iam: update CredentialStore interface and Manager for managed policies

Updated the `CredentialStore` interface to include `AttachUserPolicy`,
`DetachUserPolicy`, and `ListAttachedUserPolicies` methods.
The `CredentialManager` was updated to delegate these calls to the store.
Added common error variables for policy management.

* iam: implement managed policy methods in MemoryStore

Implemented `AttachUserPolicy`, `DetachUserPolicy`, and
`ListAttachedUserPolicies` in the MemoryStore.
Also ensured deep copying of identities includes PolicyNames.

* iam: implement managed policy methods in PostgresStore

Modified Postgres schema to include `policy_names` JSONB column in `users`.
Implemented `AttachUserPolicy`, `DetachUserPolicy`, and `ListAttachedUserPolicies`.
Updated user CRUD operations to handle policy names persistence.

* iam: implement managed policy methods in remaining stores

Implemented user policy management in:
- `FilerEtcStore` (partial implementation)
- `IamGrpcStore` (delegated via GetUser/UpdateUser)
- `PropagatingCredentialStore` (to broadcast updates)
Ensures cluster-wide consistency for policy attachments.

* s3api: refactor EmbeddedIamApi to use managed policy APIs

- Refactored `AttachUserPolicy`, `DetachUserPolicy`, and `ListAttachedUserPolicies`
  to use `e.credentialManager` directly.
- Fixed a critical error suppression bug in `ExecuteAction` that always
  returned success even on failure.
- Implemented robust error matching using string comparison fallbacks.
- Improved consistency by reloading configuration after policy changes.

* s3api: update and refine IAM integration tests

- Updated tests to use a real `MemoryStore`-backed `CredentialManager`.
- Refined test configuration synchronization using `sync.Once` and
  manual deep-copying to prevent state corruption.
- Improved `extractEmbeddedIamErrorCodeAndMessage` to handle more XML
  formats robustly.
- Adjusted test expectations to match current AWS IAM behavior.

* fix compilation

* visibility

* ensure 10 policies

* reload

* add integration tests

* Guard raft command registration

* Allow IAM actions in policy tests

* Validate gRPC policy attachments

* Revert Validate gRPC policy attachments

* Tighten gRPC policy attach/detach

* Improve IAM managed policy handling

* Improve managed policy filters
2026-02-19 12:26:27 -08:00

223 lines
5.6 KiB
Go

package weed_server
import (
"encoding/json"
"io"
"math/rand/v2"
"os"
"path"
"sync"
"time"
transport "github.com/Jille/raft-grpc-transport"
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/pb"
hashicorpRaft "github.com/hashicorp/raft"
"github.com/seaweedfs/raft"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/topology"
)
type RaftServerOption struct {
GrpcDialOption grpc.DialOption
Peers map[string]pb.ServerAddress
ServerAddr pb.ServerAddress
DataDir string
Topo *topology.Topology
RaftResumeState bool
HeartbeatInterval time.Duration
ElectionTimeout time.Duration
RaftBootstrap bool
}
type RaftServer struct {
peers map[string]pb.ServerAddress // initial peers to join with
raftServer raft.Server
RaftHashicorp *hashicorpRaft.Raft
TransportManager *transport.Manager
dataDir string
serverAddr pb.ServerAddress
topo *topology.Topology
*raft.GrpcServer
}
type StateMachine struct {
raft.StateMachine
topo *topology.Topology
}
var _ hashicorpRaft.FSM = &StateMachine{}
func (s StateMachine) Save() ([]byte, error) {
state := topology.MaxVolumeIdCommand{
MaxVolumeId: s.topo.GetMaxVolumeId(),
TopologyId: s.topo.GetTopologyId(),
}
glog.V(1).Infof("Save raft state %+v", state)
return json.Marshal(state)
}
func (s StateMachine) Recovery(data []byte) error {
state := topology.MaxVolumeIdCommand{}
err := json.Unmarshal(data, &state)
if err != nil {
return err
}
glog.V(1).Infof("Recovery raft state %+v", state)
s.topo.UpAdjustMaxVolumeId(state.MaxVolumeId)
if state.TopologyId != "" {
s.topo.SetTopologyId(state.TopologyId)
glog.V(0).Infof("Recovered TopologyId: %s", state.TopologyId)
}
return nil
}
func (s *StateMachine) Apply(l *hashicorpRaft.Log) interface{} {
before := s.topo.GetMaxVolumeId()
state := topology.MaxVolumeIdCommand{}
err := json.Unmarshal(l.Data, &state)
if err != nil {
return err
}
s.topo.UpAdjustMaxVolumeId(state.MaxVolumeId)
if state.TopologyId != "" {
prevTopologyId := s.topo.GetTopologyId()
s.topo.SetTopologyId(state.TopologyId)
// Log when recovering TopologyId from Raft log replay, or setting it for the first time.
if prevTopologyId == "" {
glog.V(0).Infof("Set TopologyId from raft log: %s", state.TopologyId)
}
}
glog.V(1).Infoln("max volume id", before, "==>", s.topo.GetMaxVolumeId())
return nil
}
func (s *StateMachine) Snapshot() (hashicorpRaft.FSMSnapshot, error) {
return &topology.MaxVolumeIdCommand{
MaxVolumeId: s.topo.GetMaxVolumeId(),
TopologyId: s.topo.GetTopologyId(),
}, nil
}
func (s *StateMachine) Restore(r io.ReadCloser) error {
b, err := io.ReadAll(r)
if err != nil {
return err
}
if err := s.Recovery(b); err != nil {
return err
}
return nil
}
var registerMaxVolumeIdCommandOnce sync.Once
func registerMaxVolumeIdCommand() {
registerMaxVolumeIdCommandOnce.Do(func() {
raft.RegisterCommand(&topology.MaxVolumeIdCommand{})
})
}
func NewRaftServer(option *RaftServerOption) (*RaftServer, error) {
s := &RaftServer{
peers: option.Peers,
serverAddr: option.ServerAddr,
dataDir: option.DataDir,
topo: option.Topo,
}
if glog.V(4) {
raft.SetLogLevel(2)
}
registerMaxVolumeIdCommand()
var err error
transporter := raft.NewGrpcTransporter(option.GrpcDialOption)
glog.V(0).Infof("Starting RaftServer with %v", option.ServerAddr)
if !option.RaftResumeState {
// clear previous log to ensure fresh start
os.RemoveAll(path.Join(s.dataDir, "log"))
// always clear previous metadata
os.RemoveAll(path.Join(s.dataDir, "conf"))
os.RemoveAll(path.Join(s.dataDir, "snapshot"))
}
if err := os.MkdirAll(path.Join(s.dataDir, "snapshot"), os.ModePerm); err != nil {
return nil, err
}
stateMachine := StateMachine{topo: option.Topo}
s.raftServer, err = raft.NewServer(string(s.serverAddr), s.dataDir, transporter, stateMachine, option.Topo, s.serverAddr.ToGrpcAddress())
if err != nil {
glog.V(0).Infoln(err)
return nil, err
}
heartbeatInterval := time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1))
s.raftServer.SetHeartbeatInterval(heartbeatInterval)
s.raftServer.SetElectionTimeout(option.ElectionTimeout)
if err := s.raftServer.LoadSnapshot(); err != nil {
return nil, err
}
if err := s.raftServer.Start(); err != nil {
return nil, err
}
for name, peer := range s.peers {
if err := s.raftServer.AddPeer(name, peer.ToGrpcAddress()); err != nil {
return nil, err
}
}
// Remove deleted peers
for existsPeerName := range s.raftServer.Peers() {
if existingPeer, found := s.peers[existsPeerName]; !found {
if err := s.raftServer.RemovePeer(existsPeerName); err != nil {
glog.V(0).Infoln(err)
return nil, err
} else {
glog.V(0).Infof("removing old peer: %s", existingPeer)
}
}
}
s.GrpcServer = raft.NewGrpcServer(s.raftServer)
glog.V(0).Infof("current cluster leader: %v", s.raftServer.Leader())
return s, nil
}
func (s *RaftServer) Peers() (members []string) {
if s.raftServer != nil {
peers := s.raftServer.Peers()
for _, p := range peers {
members = append(members, p.Name)
}
} else if s.RaftHashicorp != nil {
cfg := s.RaftHashicorp.GetConfiguration()
for _, p := range cfg.Configuration().Servers {
members = append(members, string(p.ID))
}
}
return
}
func (s *RaftServer) DoJoinCommand() {
glog.V(0).Infoln("Initializing new cluster")
if _, err := s.raftServer.Do(&raft.DefaultJoinCommand{
Name: s.raftServer.Name(),
ConnectionString: s.serverAddr.ToGrpcAddress(),
}); err != nil {
glog.Errorf("fail to send join command: %v", err)
}
}