* S3: Implement IAM defaults and STS signing key fallback logic * S3: Refactor startup order to init SSE-S3 key manager before IAM * S3: Derive STS signing key from KEK using HKDF for security isolation * S3: Document STS signing key fallback in security.toml * fix(s3api): refine anonymous access logic and secure-by-default behavior - Initialize anonymous identity by default in `NewIdentityAccessManagement` to prevent nil pointer exceptions. - Ensure `ReplaceS3ApiConfiguration` preserves the anonymous identity if not present in the new configuration. - Update `NewIdentityAccessManagement` signature to accept `filerClient`. - In legacy mode (no policy engine), anonymous defaults to Deny (no actions), preserving secure-by-default behavior. - Use specific `LookupAnonymous` method instead of generic map lookup. - Update tests to accommodate signature changes and verify improved anonymous handling. * feat(s3api): make IAM configuration optional - Start S3 API server without a configuration file if `EnableIam` option is set. - Default to `Allow` effect for policy engine when no configuration is provided (Zero-Config mode). - Handle empty configuration path gracefully in `loadIAMManagerFromConfig`. - Add integration test `iam_optional_test.go` to verify empty config behavior. * fix(iamapi): fix signature mismatch in NewIdentityAccessManagementWithStore * fix(iamapi): properly initialize FilerClient instead of passing nil * fix(iamapi): properly initialize filer client for IAM management - Instead of passing `nil`, construct a `wdclient.FilerClient` using the provided `Filers` addresses. - Ensure `NewIdentityAccessManagementWithStore` receives a valid `filerClient` to avoid potential nil pointer dereferences or limited functionality. * clean: remove dead code in s3api_server.go * refactor(s3api): improve IAM initialization, safety and anonymous access security * fix(s3api): ensure IAM config loads from filer after client init * fix(s3): resolve test failures in integration, CORS, and tagging tests - Fix CORS tests by providing explicit anonymous permissions config - Fix S3 integration tests by setting admin credentials in init - Align tagging test credentials in CI with IAM defaults - Added goroutine to retry IAM config load in iamapi server * fix(s3): allow anonymous access to health targets and S3 Tables when identities are present * fix(ci): use /healthz for Caddy health check in awscli tests * iam, s3api: expose DefaultAllow from IAM and Policy Engine This allows checking the global "Open by Default" configuration from other components like S3 Tables. * s3api/s3tables: support DefaultAllow in permission logic and handler Updated CheckPermissionWithContext to respect the DefaultAllow flag in PolicyContext. This enables "Open by Default" behavior for unauthenticated access in zero-config environments. Added a targeted unit test to verify the logic. * s3api/s3tables: propagate DefaultAllow through handlers Propagated the DefaultAllow flag to individual handlers for namespaces, buckets, tables, policies, and tagging. This ensures consistent "Open by Default" behavior across all S3 Tables API endpoints. * s3api: wire up DefaultAllow for S3 Tables API initialization Updated registerS3TablesRoutes to query the global IAM configuration and set the DefaultAllow flag on the S3 Tables API server. This completes the end-to-end propagation required for anonymous access in zero-config environments. Added a SetDefaultAllow method to S3TablesApiServer to facilitate this. * s3api: fix tests by adding DefaultAllow to mock IAM integrations The IAMIntegration interface was updated to include DefaultAllow(), breaking several mock implementations in tests. This commit fixes the build errors by adding the missing method to the mocks. * env * ensure ports * env * env * fix default allow * add one more test using non-anonymous user * debug * add more debug * less logs
309 lines
8.9 KiB
Go
309 lines
8.9 KiB
Go
package policy
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net"
|
|
"net/http"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/command"
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
// TestCluster manages the weed mini instance for integration testing
|
|
type TestCluster struct {
|
|
dataDir string
|
|
ctx context.Context
|
|
cancel context.CancelFunc
|
|
isRunning bool
|
|
wg sync.WaitGroup
|
|
masterPort int
|
|
masterGrpcPort int
|
|
volumePort int
|
|
filerPort int
|
|
filerGrpcPort int
|
|
s3Port int
|
|
s3Endpoint string
|
|
}
|
|
|
|
func TestS3PolicyShellRevised(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("Skipping integration test in short mode")
|
|
}
|
|
cluster, err := startMiniCluster(t)
|
|
require.NoError(t, err)
|
|
defer cluster.Stop()
|
|
|
|
policyContent := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":"*","Resource":"*"}]}`
|
|
tmpPolicyFile, err := os.CreateTemp("", "test_policy_*.json")
|
|
if err != nil {
|
|
t.Fatalf("Failed to create temp policy file: %v", err)
|
|
}
|
|
defer os.Remove(tmpPolicyFile.Name())
|
|
_, err = tmpPolicyFile.WriteString(policyContent)
|
|
require.NoError(t, err)
|
|
require.NoError(t, tmpPolicyFile.Close())
|
|
|
|
weedCmd := "weed"
|
|
masterAddr := string(pb.NewServerAddress("127.0.0.1", cluster.masterPort, cluster.masterGrpcPort))
|
|
filerAddr := string(pb.NewServerAddress("127.0.0.1", cluster.filerPort, cluster.filerGrpcPort))
|
|
|
|
// Put
|
|
execShell(t, weedCmd, masterAddr, filerAddr, fmt.Sprintf("s3.policy -put -name=testpolicy -file=%s", tmpPolicyFile.Name()))
|
|
|
|
// List
|
|
out := execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -list")
|
|
if !contains(out, "Name: testpolicy") {
|
|
t.Errorf("List failed: %s", out)
|
|
}
|
|
|
|
// Get
|
|
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -get -name=testpolicy")
|
|
if !contains(out, "Statement") {
|
|
t.Errorf("Get failed: %s", out)
|
|
}
|
|
|
|
// Delete
|
|
execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -delete -name=testpolicy")
|
|
|
|
// Verify
|
|
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -list")
|
|
if contains(out, "Name: testpolicy") {
|
|
t.Errorf("delete failed, policy 'testpolicy' should not be in the list: %s", out)
|
|
}
|
|
// Verify s3.configure linking policies
|
|
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Read -policies=testpolicy -apply")
|
|
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
|
|
if !contains(out, "\"policyNames\": [\n \"testpolicy\"\n ]") {
|
|
// relaxed check
|
|
if !contains(out, "\"testpolicy\"") || !contains(out, "policyNames") {
|
|
t.Errorf("s3.configure failed to link policy: %s", out)
|
|
}
|
|
}
|
|
|
|
// 1. Update User: Add Write action
|
|
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Write -apply")
|
|
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
|
|
if !contains(out, "Write") {
|
|
t.Errorf("s3.configure failed to add Write action: %s", out)
|
|
}
|
|
|
|
// 2. Granular Delete: Delete Read action
|
|
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Read -delete -apply")
|
|
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
|
|
if contains(out, "\"Read\"") { // Quote to avoid matching partial words if any
|
|
t.Errorf("s3.configure failed to delete Read action: %s", out)
|
|
}
|
|
if !contains(out, "Write") {
|
|
t.Errorf("s3.configure deleted Write action unnecessarily: %s", out)
|
|
}
|
|
|
|
// 3. Access Key Management
|
|
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -access_key=testkey -secret_key=testsecret -apply")
|
|
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
|
|
if !contains(out, "testkey") {
|
|
t.Errorf("s3.configure failed to add access key: %s", out)
|
|
}
|
|
|
|
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -access_key=testkey -delete -apply")
|
|
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
|
|
if contains(out, "testkey") {
|
|
t.Errorf("s3.configure failed to delete access key: %s", out)
|
|
}
|
|
|
|
// 4. Delete User
|
|
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -delete -apply")
|
|
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure")
|
|
if contains(out, "\"Name\": \"test\"") {
|
|
t.Errorf("s3.configure failed to delete user: %s", out)
|
|
}
|
|
}
|
|
|
|
func execShell(t *testing.T, weedCmd, master, filer, shellCmd string) string {
|
|
// weed shell -master=... -filer=...
|
|
args := []string{"shell", "-master=" + master, "-filer=" + filer}
|
|
t.Logf("Running: %s %v <<< %s", weedCmd, args, shellCmd)
|
|
|
|
cmd := exec.Command(weedCmd, args...)
|
|
cmd.Stdin = strings.NewReader(shellCmd + "\n")
|
|
|
|
out, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
t.Fatalf("Failed to run %s: %v\nOutput: %s", shellCmd, err, string(out))
|
|
}
|
|
return string(out)
|
|
}
|
|
|
|
// --- Test setup helpers ---
|
|
|
|
func findAvailablePort() (int, error) {
|
|
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
defer listener.Close()
|
|
addr := listener.Addr().(*net.TCPAddr)
|
|
return addr.Port, nil
|
|
}
|
|
|
|
// findAvailablePortPair finds an available http port P such that P and P+10000 (grpc) are both available
|
|
func findAvailablePortPair() (int, int, error) {
|
|
httpPort, err := findAvailablePort()
|
|
if err != nil {
|
|
return 0, 0, err
|
|
}
|
|
for i := 0; i < 100; i++ {
|
|
grpcPort, err := findAvailablePort()
|
|
if err != nil {
|
|
return 0, 0, err
|
|
}
|
|
if grpcPort != httpPort {
|
|
return httpPort, grpcPort, nil
|
|
}
|
|
}
|
|
return 0, 0, fmt.Errorf("failed to find available port pair")
|
|
}
|
|
|
|
func startMiniCluster(t *testing.T) (*TestCluster, error) {
|
|
masterPort, masterGrpcPort, err := findAvailablePortPair()
|
|
require.NoError(t, err)
|
|
volumePort, volumeGrpcPort, err := findAvailablePortPair()
|
|
require.NoError(t, err)
|
|
filerPort, filerGrpcPort, err := findAvailablePortPair()
|
|
require.NoError(t, err)
|
|
s3Port, s3GrpcPort, err := findAvailablePortPair()
|
|
require.NoError(t, err)
|
|
|
|
testDir := t.TempDir()
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
s3Endpoint := fmt.Sprintf("http://127.0.0.1:%d", s3Port)
|
|
cluster := &TestCluster{
|
|
dataDir: testDir,
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
masterPort: masterPort,
|
|
masterGrpcPort: masterGrpcPort,
|
|
volumePort: volumePort,
|
|
filerPort: filerPort,
|
|
filerGrpcPort: filerGrpcPort,
|
|
s3Port: s3Port,
|
|
s3Endpoint: s3Endpoint,
|
|
}
|
|
|
|
// Disable authentication for tests
|
|
securityToml := filepath.Join(testDir, "security.toml")
|
|
err = os.WriteFile(securityToml, []byte("# Empty security config\n"), 0644)
|
|
require.NoError(t, err)
|
|
|
|
// Configure credential store for IAM tests
|
|
credentialToml := filepath.Join(testDir, "credential.toml")
|
|
credentialConfig := `
|
|
[credential.memory]
|
|
enabled = true
|
|
`
|
|
err = os.WriteFile(credentialToml, []byte(credentialConfig), 0644)
|
|
require.NoError(t, err)
|
|
|
|
// Set environment variables for admin credentials safely for this test
|
|
if os.Getenv("AWS_ACCESS_KEY_ID") == "" {
|
|
t.Setenv("AWS_ACCESS_KEY_ID", "admin")
|
|
}
|
|
if os.Getenv("AWS_SECRET_ACCESS_KEY") == "" {
|
|
t.Setenv("AWS_SECRET_ACCESS_KEY", "admin")
|
|
}
|
|
|
|
cluster.wg.Add(1)
|
|
go func() {
|
|
defer cluster.wg.Done()
|
|
oldDir, _ := os.Getwd()
|
|
oldArgs := os.Args
|
|
defer func() {
|
|
os.Chdir(oldDir)
|
|
os.Args = oldArgs
|
|
}()
|
|
os.Chdir(testDir)
|
|
os.Args = []string{
|
|
"weed",
|
|
"-dir=" + testDir,
|
|
"-master.port=" + strconv.Itoa(masterPort),
|
|
"-master.port.grpc=" + strconv.Itoa(masterGrpcPort),
|
|
"-volume.port=" + strconv.Itoa(volumePort),
|
|
"-volume.port.grpc=" + strconv.Itoa(volumeGrpcPort),
|
|
"-filer.port=" + strconv.Itoa(filerPort),
|
|
"-filer.port.grpc=" + strconv.Itoa(filerGrpcPort),
|
|
"-s3.port=" + strconv.Itoa(s3Port),
|
|
"-s3.port.grpc=" + strconv.Itoa(s3GrpcPort),
|
|
"-webdav.port=0",
|
|
"-admin.ui=false",
|
|
"-master.volumeSizeLimitMB=32",
|
|
"-ip=127.0.0.1",
|
|
"-master.peers=none",
|
|
}
|
|
glog.MaxSize = 1024 * 1024
|
|
for _, cmd := range command.Commands {
|
|
if cmd.Name() == "mini" && cmd.Run != nil {
|
|
cmd.Flag.Parse(os.Args[1:])
|
|
cmd.Run(cmd, cmd.Flag.Args())
|
|
return
|
|
}
|
|
}
|
|
}()
|
|
|
|
// Wait for S3
|
|
err = waitForS3Ready(cluster.s3Endpoint, 60*time.Second)
|
|
if err != nil {
|
|
cancel()
|
|
return nil, err
|
|
}
|
|
cluster.isRunning = true
|
|
return cluster, nil
|
|
}
|
|
|
|
func waitForS3Ready(endpoint string, timeout time.Duration) error {
|
|
client := &http.Client{Timeout: 1 * time.Second}
|
|
deadline := time.Now().Add(timeout)
|
|
for time.Now().Before(deadline) {
|
|
resp, err := client.Get(endpoint)
|
|
if err == nil {
|
|
resp.Body.Close()
|
|
return nil
|
|
}
|
|
time.Sleep(200 * time.Millisecond)
|
|
}
|
|
return fmt.Errorf("timeout waiting for S3")
|
|
}
|
|
|
|
func (c *TestCluster) Stop() {
|
|
if c.cancel != nil {
|
|
c.cancel()
|
|
}
|
|
if c.isRunning {
|
|
time.Sleep(500 * time.Millisecond)
|
|
}
|
|
// Simplified stop
|
|
for _, cmd := range command.Commands {
|
|
if cmd.Name() == "mini" {
|
|
cmd.Flag.VisitAll(func(f *flag.Flag) {
|
|
f.Value.Set(f.DefValue)
|
|
})
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
func contains(s, substr string) bool {
|
|
return strings.Contains(s, substr)
|
|
}
|