IAM Policy Management via gRPC (#8109)

* Add IAM gRPC service definition

- Add GetConfiguration/PutConfiguration for config management
- Add CreateUser/GetUser/UpdateUser/DeleteUser/ListUsers for user management
- Add CreateAccessKey/DeleteAccessKey/GetUserByAccessKey for access key management
- Methods mirror existing IAM HTTP API functionality

* Add IAM gRPC handlers on filer server

- Implement IamGrpcServer with CredentialManager integration
- Handle configuration get/put operations
- Handle user CRUD operations
- Handle access key create/delete operations
- All methods delegate to CredentialManager for actual storage

* Wire IAM gRPC service to filer server

- Add CredentialManager field to FilerOption and FilerServer
- Import credential store implementations in filer command
- Initialize CredentialManager from credential.toml if available
- Register IAM gRPC service on filer gRPC server
- Enable credential management via gRPC alongside existing filer services

* Regenerate IAM protobuf with gRPC service methods

* iam_pb: add Policy Management to protobuf definitions

* credential: implement PolicyManager in credential stores

* filer: implement IAM Policy Management RPCs

* shell: add s3.policy command

* test: add integration test for s3.policy

* test: fix compilation errors in policy_test

* pb

* fmt

* test

* weed shell: add -policies flag to s3.configure

This allows linking/unlinking IAM policies to/from identities
directly from the s3.configure command.

* test: verify s3.configure policy linking and fix port allocation

- Added test case for linking policies to users via s3.configure
- Implemented findAvailablePortPair to ensure HTTP and gRPC ports
  are both available, avoiding conflicts with randomized port assignments.
- Updated assertion to match jsonpb output (policyNames)

* credential: add StoreTypeGrpc constant

* credential: add IAM gRPC store boilerplate

* credential: implement identity methods in gRPC store

* credential: implement policy methods in gRPC store

* admin: use gRPC credential store for AdminServer

This ensures that all IAM and policy changes made through the Admin UI
are persisted via the Filer's IAM gRPC service instead of direct file manipulation.

* shell: s3.configure use granular IAM gRPC APIs instead of full config patching

* shell: s3.configure use granular IAM gRPC APIs

* shell: replace deprecated ioutil with os in s3.policy

* filer: use gRPC FailedPrecondition for unconfigured credential manager

* test: improve s3.policy integration tests and fix error checks

* ci: add s3 policy shell integration tests to github workflow

* filer: fix LoadCredentialConfiguration error handling

* credential/grpc: propagate unmarshal errors in GetPolicies

* filer/grpc: improve error handling and validation

* shell: use gRPC status codes in s3.configure

* credential: document PutPolicy as create-or-replace

* credential/postgres: reuse CreatePolicy in PutPolicy to deduplicate logic

* shell: add timeout context and strictly enforce flags in s3.policy

* iam: standardize policy content field naming in gRPC and proto

* shell: extract slice helper functions in s3.configure

* filer: map credential store errors to gRPC status codes

* filer: add input validation for UpdateUser and CreateAccessKey

* iam: improve validation in policy and config handlers

* filer: ensure IAM service registration by defaulting credential manager

* credential: add GetStoreName method to manager

* test: verify policy deletion in integration test
This commit is contained in:
Chris Lu
2026-01-25 13:39:30 -08:00
committed by GitHub
parent 59d40f7186
commit 6bf088cec9
38 changed files with 3853 additions and 741 deletions

View File

@@ -0,0 +1,296 @@
package policy
import (
"context"
"fmt"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/command"
"github.com/seaweedfs/seaweedfs/weed/glog"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
"github.com/stretchr/testify/require"
)
// TestCluster manages the weed mini instance for integration testing
type TestCluster struct {
dataDir string
ctx context.Context
cancel context.CancelFunc
isRunning bool
wg sync.WaitGroup
masterPort int
volumePort int
filerPort int
s3Port int
s3Endpoint string
}
func TestS3PolicyShellRevised(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
cluster, err := startMiniCluster(t)
require.NoError(t, err)
defer cluster.Stop()
policyContent := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":"*","Resource":"*"}]}`
tmpPolicyFile, err := os.CreateTemp("", "test_policy_*.json")
if err != nil {
t.Fatalf("Failed to create temp policy file: %v", err)
}
defer os.Remove(tmpPolicyFile.Name())
_, err = tmpPolicyFile.WriteString(policyContent)
require.NoError(t, err)
require.NoError(t, tmpPolicyFile.Close())
weedCmd := "weed"
masterAddr := fmt.Sprintf("127.0.0.1:%d", cluster.masterPort)
filerAddr := fmt.Sprintf("127.0.0.1:%d", cluster.filerPort)
// Put
execShell(t, weedCmd, masterAddr, filerAddr, fmt.Sprintf("s3.policy -put -name=testpolicy -file=%s", tmpPolicyFile.Name()))
// List
out := execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -list")
if !contains(out, "Name: testpolicy") {
t.Errorf("List failed: %s", out)
}
// Get
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -get -name=testpolicy")
if !contains(out, "Statement") {
t.Errorf("Get failed: %s", out)
}
// Delete
execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -delete -name=testpolicy")
// Verify
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -list")
if contains(out, "Name: testpolicy") {
t.Errorf("delete failed, policy 'testpolicy' should not be in the list: %s", out)
}
// Verify s3.configure linking policies
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Read -policies=testpolicy -apply")
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
if !contains(out, "\"policyNames\": [\n \"testpolicy\"\n ]") {
// relaxed check
if !contains(out, "\"testpolicy\"") || !contains(out, "policyNames") {
t.Errorf("s3.configure failed to link policy: %s", out)
}
}
// 1. Update User: Add Write action
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Write -apply")
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
if !contains(out, "Write") {
t.Errorf("s3.configure failed to add Write action: %s", out)
}
// 2. Granular Delete: Delete Read action
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Read -delete -apply")
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
if contains(out, "\"Read\"") { // Quote to avoid matching partial words if any
t.Errorf("s3.configure failed to delete Read action: %s", out)
}
if !contains(out, "Write") {
t.Errorf("s3.configure deleted Write action unnecessarily: %s", out)
}
// 3. Access Key Management
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -access_key=testkey -secret_key=testsecret -apply")
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
if !contains(out, "testkey") {
t.Errorf("s3.configure failed to add access key: %s", out)
}
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -access_key=testkey -delete -apply")
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
if contains(out, "testkey") {
t.Errorf("s3.configure failed to delete access key: %s", out)
}
// 4. Delete User
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -delete -apply")
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
if contains(out, "\"Name\": \"test\"") {
t.Errorf("s3.configure failed to delete user: %s", out)
}
}
func execShell(t *testing.T, weedCmd, master, filer, shellCmd string) string {
// weed shell -master=... -filer=...
args := []string{"shell", "-master=" + master, "-filer=" + filer}
t.Logf("Running: %s %v <<< %s", weedCmd, args, shellCmd)
cmd := exec.Command(weedCmd, args...)
cmd.Stdin = strings.NewReader(shellCmd + "\n")
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Failed to run %s: %v\nOutput: %s", shellCmd, err, string(out))
}
return string(out)
}
// --- Test setup helpers ---
func findAvailablePort() (int, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return 0, err
}
defer listener.Close()
addr := listener.Addr().(*net.TCPAddr)
return addr.Port, nil
}
// findAvailablePortPair finds an available http port P such that P and P+10000 (grpc) are both available
func findAvailablePortPair() (int, int, error) {
for i := 0; i < 100; i++ {
httpPort, err := findAvailablePort()
if err != nil {
return 0, 0, err
}
grpcPort := httpPort + 10000
// check if grpc port is available
listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", grpcPort))
if err == nil {
listener.Close()
return httpPort, grpcPort, nil
}
}
return 0, 0, fmt.Errorf("failed to find available port pair")
}
func startMiniCluster(t *testing.T) (*TestCluster, error) {
masterPort, masterGrpcPort, err := findAvailablePortPair()
require.NoError(t, err)
volumePort, volumeGrpcPort, err := findAvailablePortPair()
require.NoError(t, err)
filerPort, filerGrpcPort, err := findAvailablePortPair()
require.NoError(t, err)
s3Port, s3GrpcPort, err := findAvailablePortPair()
require.NoError(t, err)
testDir := t.TempDir()
ctx, cancel := context.WithCancel(context.Background())
s3Endpoint := fmt.Sprintf("http://127.0.0.1:%d", s3Port)
cluster := &TestCluster{
dataDir: testDir,
ctx: ctx,
cancel: cancel,
masterPort: masterPort,
volumePort: volumePort,
filerPort: filerPort,
s3Port: s3Port,
s3Endpoint: s3Endpoint,
}
// Disable authentication for tests
securityToml := filepath.Join(testDir, "security.toml")
err = os.WriteFile(securityToml, []byte("# Empty security config\n"), 0644)
require.NoError(t, err)
// Configure credential store for IAM tests
credentialToml := filepath.Join(testDir, "credential.toml")
credentialConfig := `
[credential.memory]
enabled = true
`
err = os.WriteFile(credentialToml, []byte(credentialConfig), 0644)
require.NoError(t, err)
cluster.wg.Add(1)
go func() {
defer cluster.wg.Done()
oldDir, _ := os.Getwd()
oldArgs := os.Args
defer func() {
os.Chdir(oldDir)
os.Args = oldArgs
}()
os.Chdir(testDir)
os.Args = []string{
"weed",
"-dir=" + testDir,
"-master.port=" + strconv.Itoa(masterPort),
"-master.port.grpc=" + strconv.Itoa(masterGrpcPort),
"-volume.port=" + strconv.Itoa(volumePort),
"-volume.port.grpc=" + strconv.Itoa(volumeGrpcPort),
"-filer.port=" + strconv.Itoa(filerPort),
"-filer.port.grpc=" + strconv.Itoa(filerGrpcPort),
"-s3.port=" + strconv.Itoa(s3Port),
"-s3.port.grpc=" + strconv.Itoa(s3GrpcPort),
"-webdav.port=0",
"-admin.ui=false",
"-master.volumeSizeLimitMB=32",
"-ip=127.0.0.1",
"-master.peers=none",
}
glog.MaxSize = 1024 * 1024
for _, cmd := range command.Commands {
if cmd.Name() == "mini" && cmd.Run != nil {
cmd.Flag.Parse(os.Args[1:])
cmd.Run(cmd, cmd.Flag.Args())
return
}
}
}()
// Wait for S3
err = waitForS3Ready(cluster.s3Endpoint, 60*time.Second)
if err != nil {
cancel()
return nil, err
}
cluster.isRunning = true
return cluster, nil
}
func waitForS3Ready(endpoint string, timeout time.Duration) error {
client := &http.Client{Timeout: 1 * time.Second}
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
resp, err := client.Get(endpoint)
if err == nil {
resp.Body.Close()
return nil
}
time.Sleep(200 * time.Millisecond)
}
return fmt.Errorf("timeout waiting for S3")
}
func (c *TestCluster) Stop() {
if c.cancel != nil {
c.cancel()
}
if c.isRunning {
time.Sleep(500 * time.Millisecond)
}
// Simplified stop
for _, cmd := range command.Commands {
if cmd.Name() == "mini" {
cmd.Flag.VisitAll(func(f *flag.Flag) {
f.Value.Set(f.DefValue)
})
break
}
}
}
func contains(s, substr string) bool {
return strings.Contains(s, substr)
}