* fix(s3): include static identities in listing operations Static identities loaded from -s3.config file were only stored in the S3 API server's in-memory state. Listing operations (s3.configure shell command, aws iam list-users) queried the credential manager which only returned dynamic identities from the backend store. Register static identities with the credential manager after loading so they are included in LoadConfiguration and ListUsers results, and filtered out before SaveConfiguration to avoid persisting them to the dynamic store. Fixes https://github.com/seaweedfs/seaweedfs/discussions/8896 * fix: avoid mutating caller's config and defensive copies - SaveConfiguration: use shallow struct copy instead of mutating the caller's config.Identities field - SetStaticIdentities: skip nil entries to avoid panics - GetStaticIdentities: defensively copy PolicyNames slice to avoid aliasing the original * fix: filter nil static identities and sync on config reload - SetStaticIdentities: filter nil entries from the stored slice (not just from staticNames) to prevent panics in LoadConfiguration/ListUsers - Extract updateCredentialManagerStaticIdentities helper and call it from both startup and the grace.OnReload handler so the credential manager's static snapshot stays current after config file reloads * fix: add mutex for static identity fields and fix ListUsers for store callers - Add sync.RWMutex to protect staticIdentities/staticNames against concurrent reads during config reload - Revert CredentialManager.ListUsers to return only store users, since internal callers (e.g. DeletePolicy) look up each user in the store and fail on non-existent static entries - Merge static usernames in the filer gRPC ListUsers handler instead, via the new GetStaticUsernames method - Fix CI: TestIAMPolicyManagement/managed_policy_crud_lifecycle was failing because DeletePolicy iterated static users that don't exist in the store * fix: show static identities in admin UI and weed shell The admin UI and weed shell s3.configure command query the filer's credential manager via gRPC, which is a separate instance from the S3 server's credential manager. Static identities were only registered on the S3 server's credential manager, so they never appeared in the filer's responses. - Add CredentialManager.LoadS3ConfigFile to parse a static S3 config file and register its identities - Add FilerOptions.s3ConfigFile so the filer can load the same static config that the S3 server uses - Wire s3ConfigFile through in weed mini and weed server modes - Merge static usernames in filer gRPC ListUsers handler - Add CredentialManager.GetStaticUsernames helper - Add sync.RWMutex to protect concurrent access to static identity fields - Avoid importing weed/filer from weed/credential (which pulled in filer store init() registrations and broke test isolation) - Add docker/compose/s3_static_users_example.json * fix(admin): make static users read-only in admin UI Static users loaded from the -s3.config file should not be editable or deletable through the admin UI since they are managed via the config file. - Add IsStatic field to ObjectStoreUser, set from credential manager - Hide edit, delete, and access key buttons for static users in the users table template - Show a "static" badge next to static user names - Return 403 Forbidden from UpdateUser and DeleteUser API handlers when the target user is a static identity * fix(admin): show details for static users GetObjectStoreUserDetails called credentialManager.GetUser which only queries the dynamic store. For static users this returned ErrUserNotFound. Fall back to GetStaticIdentity when the store lookup fails. * fix(admin): load static S3 identities in admin server The admin server has its own credential manager (gRPC store) which is a separate instance from the S3 server's and filer's. It had no static identity data, so IsStaticIdentity returned false (edit/delete buttons shown) and GetStaticIdentity returned nil (details page failed). Pass the -s3.config file path through to the admin server and call LoadS3ConfigFile on its credential manager, matching the approach used for the filer. * fix: use protobuf is_static field instead of passing config file path The previous approach passed -s3.config file path to every component (filer, admin). This is wrong because the admin server should not need to know about S3 config files. Instead, add an is_static field to the Identity protobuf message. The field is set when static identities are serialized (in GetStaticIdentities and LoadS3ConfigFile). Any gRPC client that loads configuration via GetConfiguration automatically sees which identities are static, without needing the config file. - Add is_static field (tag 8) to iam_pb.Identity proto message - Set IsStatic=true in GetStaticIdentities and LoadS3ConfigFile - Admin GetObjectStoreUsers reads identity.IsStatic from proto - Admin IsStaticUser helper loads config via gRPC to check the flag - Filer GetUser gRPC handler falls back to GetStaticIdentity - Remove s3ConfigFile from AdminOptions and NewAdminServer signature
333 lines
10 KiB
Go
333 lines
10 KiB
Go
package dash
|
|
|
|
import (
|
|
"context"
|
|
"net/http"
|
|
"sort"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/cluster"
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/iam"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
|
)
|
|
|
|
// Access key status constants
|
|
const (
|
|
AccessKeyStatusActive = iam.AccessKeyStatusActive
|
|
AccessKeyStatusInactive = iam.AccessKeyStatusInactive
|
|
)
|
|
|
|
type AdminData struct {
|
|
Username string `json:"username"`
|
|
TotalVolumes int `json:"total_volumes"`
|
|
TotalFiles int64 `json:"total_files"`
|
|
TotalSize int64 `json:"total_size"`
|
|
VolumeSizeLimitMB uint64 `json:"volume_size_limit_mb"`
|
|
MasterNodes []MasterNode `json:"master_nodes"`
|
|
VolumeServers []VolumeServer `json:"volume_servers"`
|
|
FilerNodes []FilerNode `json:"filer_nodes"`
|
|
MessageBrokers []MessageBrokerNode `json:"message_brokers"`
|
|
DataCenters []DataCenter `json:"datacenters"`
|
|
LastUpdated time.Time `json:"last_updated"`
|
|
|
|
// EC shard totals for dashboard
|
|
TotalEcVolumes int `json:"total_ec_volumes"` // Total number of EC volumes across all servers
|
|
TotalEcShards int `json:"total_ec_shards"` // Total number of EC shards across all servers
|
|
}
|
|
|
|
// Object Store Users management structures
|
|
type ObjectStoreUser struct {
|
|
Username string `json:"username"`
|
|
Email string `json:"email"`
|
|
AccessKey string `json:"access_key"`
|
|
SecretKey string `json:"secret_key"`
|
|
Permissions []string `json:"permissions"`
|
|
PolicyNames []string `json:"policy_names"`
|
|
IsStatic bool `json:"is_static"` // loaded from static config file, not editable
|
|
}
|
|
|
|
type ObjectStoreUsersData struct {
|
|
Username string `json:"username"`
|
|
Users []ObjectStoreUser `json:"users"`
|
|
TotalUsers int `json:"total_users"`
|
|
HasAnonymousUser bool `json:"has_anonymous_user"`
|
|
LastUpdated time.Time `json:"last_updated"`
|
|
}
|
|
|
|
// User management request structures
|
|
type CreateUserRequest struct {
|
|
Username string `json:"username" binding:"required"`
|
|
Email string `json:"email"`
|
|
Actions []string `json:"actions"`
|
|
GenerateKey bool `json:"generate_key"`
|
|
PolicyNames []string `json:"policy_names"`
|
|
}
|
|
|
|
type UpdateUserRequest struct {
|
|
Email string `json:"email"`
|
|
Actions []string `json:"actions"`
|
|
PolicyNames []string `json:"policy_names"`
|
|
}
|
|
|
|
type UpdateUserPoliciesRequest struct {
|
|
Actions []string `json:"actions" binding:"required"`
|
|
}
|
|
|
|
type AccessKeyInfo struct {
|
|
AccessKey string `json:"access_key"`
|
|
SecretKey string `json:"secret_key"`
|
|
Status string `json:"status"`
|
|
CreatedAt time.Time `json:"created_at"`
|
|
}
|
|
|
|
type CreateAccessKeyRequest struct {
|
|
AccessKey string `json:"access_key"`
|
|
SecretKey string `json:"secret_key"`
|
|
}
|
|
|
|
type UpdateAccessKeyStatusRequest struct {
|
|
Status string `json:"status" binding:"required"`
|
|
}
|
|
|
|
type UserDetails struct {
|
|
Username string `json:"username"`
|
|
Email string `json:"email"`
|
|
Actions []string `json:"actions"`
|
|
PolicyNames []string `json:"policy_names"`
|
|
AccessKeys []AccessKeyInfo `json:"access_keys"`
|
|
Groups []string `json:"groups"`
|
|
}
|
|
|
|
type FilerNode struct {
|
|
Address string `json:"address"`
|
|
DataCenter string `json:"datacenter"`
|
|
Rack string `json:"rack"`
|
|
LastUpdated time.Time `json:"last_updated"`
|
|
}
|
|
|
|
type MessageBrokerNode struct {
|
|
Address string `json:"address"`
|
|
DataCenter string `json:"datacenter"`
|
|
Rack string `json:"rack"`
|
|
LastUpdated time.Time `json:"last_updated"`
|
|
}
|
|
|
|
// GetAdminData retrieves admin data as a struct (for reuse by both JSON and HTML handlers)
|
|
func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
|
|
if username == "" {
|
|
username = "admin"
|
|
}
|
|
|
|
// Get cluster topology
|
|
topology, err := s.GetClusterTopology()
|
|
if err != nil {
|
|
glog.Errorf("Failed to get cluster topology: %v", err)
|
|
return AdminData{}, err
|
|
}
|
|
|
|
// Get volume servers data with EC shard information
|
|
volumeServersData, err := s.GetClusterVolumeServers()
|
|
if err != nil {
|
|
glog.Errorf("Failed to get cluster volume servers: %v", err)
|
|
return AdminData{}, err
|
|
}
|
|
|
|
// Get master nodes status
|
|
masterNodes := s.getMasterNodesStatus()
|
|
|
|
// Get filer nodes status
|
|
filerNodes := s.getFilerNodesStatus()
|
|
|
|
// Get message broker nodes status
|
|
messageBrokers := s.getMessageBrokerNodesStatus()
|
|
|
|
// Get volume size limit from master configuration
|
|
var volumeSizeLimitMB uint64 = 30000 // Default to 30GB
|
|
err = s.WithMasterClient(func(client master_pb.SeaweedClient) error {
|
|
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
volumeSizeLimitMB = uint64(resp.VolumeSizeLimitMB)
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
glog.Warningf("Failed to get volume size limit from master: %v", err)
|
|
// Keep default value on error
|
|
}
|
|
|
|
// Calculate EC shard totals
|
|
var totalEcVolumes, totalEcShards int
|
|
ecVolumeSet := make(map[uint32]bool) // To avoid counting the same EC volume multiple times
|
|
|
|
for _, vs := range volumeServersData.VolumeServers {
|
|
totalEcShards += vs.EcShards
|
|
// Count unique EC volumes across all servers
|
|
for _, ecInfo := range vs.EcShardDetails {
|
|
ecVolumeSet[ecInfo.VolumeID] = true
|
|
}
|
|
}
|
|
totalEcVolumes = len(ecVolumeSet)
|
|
|
|
// Prepare admin data
|
|
adminData := AdminData{
|
|
Username: username,
|
|
TotalVolumes: topology.TotalVolumes,
|
|
TotalFiles: topology.TotalFiles,
|
|
TotalSize: topology.TotalSize,
|
|
VolumeSizeLimitMB: volumeSizeLimitMB,
|
|
MasterNodes: masterNodes,
|
|
VolumeServers: volumeServersData.VolumeServers,
|
|
FilerNodes: filerNodes,
|
|
MessageBrokers: messageBrokers,
|
|
DataCenters: topology.DataCenters,
|
|
LastUpdated: topology.UpdatedAt,
|
|
TotalEcVolumes: totalEcVolumes,
|
|
TotalEcShards: totalEcShards,
|
|
}
|
|
|
|
return adminData, nil
|
|
}
|
|
|
|
// ShowAdmin displays the main admin page (now uses GetAdminData)
|
|
func (s *AdminServer) ShowAdmin(w http.ResponseWriter, r *http.Request) {
|
|
username := UsernameFromContext(r.Context())
|
|
|
|
adminData, err := s.GetAdminData(username)
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, "Failed to get admin data: "+err.Error())
|
|
return
|
|
}
|
|
|
|
// Return JSON for API calls
|
|
writeJSON(w, http.StatusOK, adminData)
|
|
}
|
|
|
|
// ShowOverview displays cluster overview
|
|
func (s *AdminServer) ShowOverview(w http.ResponseWriter, r *http.Request) {
|
|
topology, err := s.GetClusterTopology()
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, err.Error())
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, topology)
|
|
}
|
|
|
|
// getMasterNodesStatus checks status of all master nodes
|
|
func (s *AdminServer) getMasterNodesStatus() []MasterNode {
|
|
var masterNodes []MasterNode
|
|
|
|
// Since we have a single master address, create one entry
|
|
var isLeader bool = true // Assume leader since it's the only master we know about
|
|
|
|
// Try to get leader info from this master
|
|
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
|
|
_, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
// For now, assume this master is the leader since we can connect to it
|
|
isLeader = true
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
isLeader = false
|
|
}
|
|
|
|
currentMaster := s.masterClient.GetMaster(context.Background())
|
|
if currentMaster != "" {
|
|
masterNodes = append(masterNodes, MasterNode{
|
|
Address: pb.ServerAddress(currentMaster).ToHttpAddress(),
|
|
IsLeader: isLeader,
|
|
})
|
|
}
|
|
|
|
return masterNodes
|
|
}
|
|
|
|
// getFilerNodesStatus checks status of all filer nodes using master's ListClusterNodes
|
|
func (s *AdminServer) getFilerNodesStatus() []FilerNode {
|
|
var filerNodes []FilerNode
|
|
|
|
// Get filer nodes from master using ListClusterNodes
|
|
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
|
|
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
|
ClientType: cluster.FilerType,
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Process each filer node
|
|
for _, node := range resp.ClusterNodes {
|
|
filerNodes = append(filerNodes, FilerNode{
|
|
Address: pb.ServerAddress(node.Address).ToHttpAddress(),
|
|
DataCenter: node.DataCenter,
|
|
Rack: node.Rack,
|
|
LastUpdated: time.Now(),
|
|
})
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
currentMaster := s.masterClient.GetMaster(context.Background())
|
|
glog.Errorf("Failed to get filer nodes from master %s: %v", currentMaster, err)
|
|
// Return empty list if we can't get filer info from master
|
|
return []FilerNode{}
|
|
}
|
|
|
|
// Sort filer nodes by address for consistent ordering on page refresh
|
|
sort.Slice(filerNodes, func(i, j int) bool {
|
|
return filerNodes[i].Address < filerNodes[j].Address
|
|
})
|
|
|
|
return filerNodes
|
|
}
|
|
|
|
// getMessageBrokerNodesStatus checks status of all message broker nodes using master's ListClusterNodes
|
|
func (s *AdminServer) getMessageBrokerNodesStatus() []MessageBrokerNode {
|
|
var messageBrokers []MessageBrokerNode
|
|
|
|
// Get message broker nodes from master using ListClusterNodes
|
|
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
|
|
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
|
ClientType: cluster.BrokerType,
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Process each message broker node
|
|
for _, node := range resp.ClusterNodes {
|
|
messageBrokers = append(messageBrokers, MessageBrokerNode{
|
|
Address: node.Address,
|
|
DataCenter: node.DataCenter,
|
|
Rack: node.Rack,
|
|
LastUpdated: time.Now(),
|
|
})
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
currentMaster := s.masterClient.GetMaster(context.Background())
|
|
glog.Errorf("Failed to get message broker nodes from master %s: %v", currentMaster, err)
|
|
// Return empty list if we can't get broker info from master
|
|
return []MessageBrokerNode{}
|
|
}
|
|
|
|
// Sort message broker nodes by address for consistent ordering on page refresh
|
|
sort.Slice(messageBrokers, func(i, j int) bool {
|
|
return messageBrokers[i].Address < messageBrokers[j].Address
|
|
})
|
|
|
|
return messageBrokers
|
|
}
|