fix: admin UI bucket deletion with filer group configured (#7735)

This commit is contained in:
Chris Lu
2025-12-13 19:04:12 -08:00
committed by GitHub
parent f70cd05404
commit 51c2ab0107
8 changed files with 767 additions and 41 deletions

View File

@@ -0,0 +1,166 @@
# S3 Filer Group Test Makefile
# This Makefile provides targets for running S3 filer group integration tests
# These tests verify that S3 bucket operations work correctly when a filer group is configured.
.PHONY: help build-weed check-deps start-server stop-server test test-with-server clean logs health-check
# Configuration
WEED_BINARY := ../../../weed/weed_binary
S3_PORT := 8333
MASTER_PORT := 9333
MASTER_GRPC_PORT := 19333
VOLUME_PORT := 8080
FILER_PORT := 8888
TEST_TIMEOUT := 10m
TEST_PATTERN := Test
# Filer group configuration
FILER_GROUP := testgroup
# Default target
help:
@echo "S3 Filer Group Test Makefile"
@echo ""
@echo "Available targets:"
@echo " help - Show this help message"
@echo " build-weed - Build the SeaweedFS binary"
@echo " check-deps - Check dependencies and build binary if needed"
@echo " start-server - Start SeaweedFS server with filer group"
@echo " stop-server - Stop SeaweedFS server"
@echo " test - Run filer group tests (server must be running)"
@echo " test-with-server - Start server, run tests, stop server"
@echo " logs - Show server logs"
@echo " clean - Clean up test artifacts and stop server"
@echo " health-check - Check if server is accessible"
@echo ""
@echo "Configuration:"
@echo " S3_PORT=${S3_PORT}"
@echo " MASTER_GRPC_PORT=${MASTER_GRPC_PORT}"
@echo " FILER_GROUP=${FILER_GROUP}"
@echo " TEST_TIMEOUT=${TEST_TIMEOUT}"
# Build the SeaweedFS binary
build-weed:
@echo "Building SeaweedFS binary..."
@cd ../../../weed && go build -o weed_binary .
@chmod +x $(WEED_BINARY)
@echo "✅ SeaweedFS binary built at $(WEED_BINARY)"
check-deps: build-weed
@echo "Checking dependencies..."
@command -v go >/dev/null 2>&1 || (echo "Go is required but not installed" && exit 1)
@echo "Go version: $$(go version)"
@test -f $(WEED_BINARY) || (echo "SeaweedFS binary not found at $(WEED_BINARY)" && exit 1)
@go list -m github.com/aws/aws-sdk-go-v2 >/dev/null 2>&1 || (echo "AWS SDK Go v2 not found. Run 'go mod tidy'." && exit 1)
@go list -m github.com/stretchr/testify >/dev/null 2>&1 || (echo "Testify not found. Run 'go mod tidy'." && exit 1)
@echo "✅ All dependencies are available"
# Start SeaweedFS server with filer group configured
start-server: check-deps
@echo "Starting SeaweedFS server with filer group: $(FILER_GROUP)..."
@rm -f weed-server.pid
@mkdir -p ./test-volume-data
@if netstat -tlnp 2>/dev/null | grep $(S3_PORT) >/dev/null; then \
echo "⚠️ Port $(S3_PORT) is already in use"; \
exit 1; \
fi
@echo "Launching SeaweedFS server with filer group $(FILER_GROUP)..."
@$(WEED_BINARY) server \
-debug \
-s3 \
-s3.port=$(S3_PORT) \
-s3.allowDeleteBucketNotEmpty=true \
-s3.config=../../../docker/compose/s3.json \
-filer \
-filer.maxMB=64 \
-filer.filerGroup=$(FILER_GROUP) \
-master.volumeSizeLimitMB=50 \
-volume.max=100 \
-dir=./test-volume-data \
-volume.preStopSeconds=1 \
-metricsPort=9325 \
> weed-test.log 2>&1 & echo $$! > weed-server.pid
@echo "Server PID: $$(cat weed-server.pid 2>/dev/null || echo 'PID file not found')"
@echo "Waiting for server to start (up to 90 seconds)..."
@for i in $$(seq 1 90); do \
if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \
echo "✅ SeaweedFS server started successfully on port $(S3_PORT) with filer group $(FILER_GROUP)"; \
exit 0; \
fi; \
if [ $$i -eq 30 ]; then \
echo "⚠️ Server taking longer than expected (30s), checking logs..."; \
if [ -f weed-test.log ]; then \
tail -20 weed-test.log; \
fi; \
fi; \
sleep 1; \
done; \
echo "❌ Server failed to start within 90 seconds"; \
if [ -f weed-test.log ]; then \
cat weed-test.log; \
fi; \
exit 1
# Stop SeaweedFS server
stop-server:
@echo "Stopping SeaweedFS server..."
@if [ -f weed-server.pid ]; then \
SERVER_PID=$$(cat weed-server.pid); \
echo "Killing server PID $$SERVER_PID"; \
if ps -p $$SERVER_PID >/dev/null 2>&1; then \
kill -TERM $$SERVER_PID 2>/dev/null || true; \
sleep 2; \
if ps -p $$SERVER_PID >/dev/null 2>&1; then \
echo "Process still running, sending KILL signal..."; \
kill -KILL $$SERVER_PID 2>/dev/null || true; \
sleep 1; \
fi; \
else \
echo "Process $$SERVER_PID not found (already stopped)"; \
fi; \
rm -f weed-server.pid; \
else \
echo "No PID file found"; \
fi
@echo "✅ SeaweedFS server stopped"
# Show server logs
logs:
@if test -f weed-test.log; then \
echo "=== SeaweedFS Server Logs ==="; \
tail -f weed-test.log; \
else \
echo "No log file found. Server may not be running."; \
fi
# Run filer group tests (assumes server is already running)
test: check-deps
@echo "Running filer group S3 tests..."
@FILER_GROUP=$(FILER_GROUP) S3_ENDPOINT=http://localhost:$(S3_PORT) MASTER_ADDRESS=localhost:$(MASTER_GRPC_PORT) \
go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" .
@echo "✅ Filer group tests completed"
# Run tests with automatic server management
test-with-server: start-server
@echo "Server started successfully, running filer group tests..."
@echo "Test pattern: $(TEST_PATTERN)"
@echo "Test timeout: $(TEST_TIMEOUT)"
@trap "$(MAKE) stop-server" EXIT; \
$(MAKE) test || (echo "❌ Tests failed, showing server logs:" && echo "=== Last 50 lines of server logs ===" && tail -50 weed-test.log && echo "=== End of server logs ===" && exit 1)
@$(MAKE) stop-server
@echo "✅ Tests completed and server stopped"
# Clean up test artifacts
clean:
@echo "Cleaning up test artifacts..."
@$(MAKE) stop-server
@rm -f weed-test*.log weed-server.pid
@rm -rf test-volume-data/
@go clean -testcache
@echo "✅ Cleanup completed"
# Quick health check
health-check:
@echo "Running health check..."
@curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1 && echo "✅ S3 API is accessible" || echo "❌ S3 API is not accessible"
@curl -s http://localhost:9325/metrics >/dev/null 2>&1 && echo "✅ Metrics endpoint is accessible" || echo "❌ Metrics endpoint is not accessible"

View File

@@ -0,0 +1,99 @@
# Filer Group S3 Tests
These tests verify that S3 bucket operations work correctly when a filer group is configured.
## Background
When SeaweedFS is configured with a filer group (via `-filer.group` option), collections are named with the filer group prefix:
```text
Collection name = {filerGroup}_{bucketName}
```
For example, with filer group `mygroup` and bucket `mybucket`, the collection will be named `mygroup_mybucket`.
## Issue Being Tested
This test suite was created to verify the fix for a bug where:
- The admin UI was using just the bucket name when deleting collections
- This caused collection deletion to fail when a filer group was configured
- After bucket deletion via admin UI, the collection data would be orphaned
## Running the Tests
### Prerequisites
1. SeaweedFS servers must be running with a filer group configured
2. The S3 gateway must be accessible
3. Master server must be accessible for collection verification
### Quick Start
```bash
# Set environment variables
export FILER_GROUP=testgroup
export S3_ENDPOINT=http://localhost:8333
export MASTER_ADDRESS=localhost:9333
# Run tests
go test -v ./...
```
### Using the Makefile
```bash
# Start test servers with filer group configured
make start-servers FILER_GROUP=testgroup
# Run tests
make test
# Stop servers
make stop-servers
# Or run full test cycle
make full-test
```
### Configuration
Tests can be configured via:
1. Environment variables:
- `FILER_GROUP`: The filer group name (required for tests to run)
- `S3_ENDPOINT`: S3 API endpoint (default: `http://localhost:8333`)
- `MASTER_ADDRESS`: Master server address (default: localhost:9333)
2. `test_config.json` file
## Test Cases
### TestFilerGroupCollectionNaming
Verifies that when a bucket is created and objects are uploaded:
1. The collection is created with the correct filer group prefix
2. Bucket deletion removes the correctly-named collection
### TestBucketDeletionWithFilerGroup
Specifically tests that bucket deletion via S3 API correctly deletes
the collection when filer group is configured.
### TestMultipleBucketsWithFilerGroup
Tests creating and deleting multiple buckets to ensure the filer group
prefix is correctly applied and removed for all buckets.
## Expected Behavior
With filer group `testgroup`:
| Bucket Name | Expected Collection Name |
|-------------|-------------------------|
| mybucket | testgroup_mybucket |
| test-123 | testgroup_test-123 |
Without filer group:
| Bucket Name | Expected Collection Name |
|-------------|-------------------------|
| mybucket | mybucket |
| test-123 | test-123 |

View File

@@ -0,0 +1,332 @@
package filer_group
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
// TestConfig holds configuration for filer group S3 tests
type TestConfig struct {
S3Endpoint string `json:"s3_endpoint"`
MasterAddress string `json:"master_address"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
Region string `json:"region"`
FilerGroup string `json:"filer_group"`
}
var testConfig = &TestConfig{
S3Endpoint: "http://localhost:8333",
MasterAddress: "localhost:19333", // gRPC port = 10000 + master HTTP port (9333)
AccessKey: "some_access_key1",
SecretKey: "some_secret_key1",
Region: "us-east-1",
FilerGroup: "testgroup", // Expected filer group for these tests
}
func init() {
// Load config from file if exists
if data, err := os.ReadFile("test_config.json"); err == nil {
if err := json.Unmarshal(data, testConfig); err != nil {
// Log but don't fail - env vars can still override
fmt.Fprintf(os.Stderr, "Warning: failed to parse test_config.json: %v\n", err)
}
}
// Override from environment variables
if v := os.Getenv("S3_ENDPOINT"); v != "" {
testConfig.S3Endpoint = v
}
if v := os.Getenv("MASTER_ADDRESS"); v != "" {
testConfig.MasterAddress = v
}
if v := os.Getenv("FILER_GROUP"); v != "" {
testConfig.FilerGroup = v
}
}
func getS3Client(t *testing.T) *s3.Client {
cfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion(testConfig.Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
testConfig.AccessKey,
testConfig.SecretKey,
"",
)),
)
require.NoError(t, err)
return s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = aws.String(testConfig.S3Endpoint)
o.UsePathStyle = true
})
}
func getMasterClient(t *testing.T) master_pb.SeaweedClient {
conn, err := grpc.NewClient(testConfig.MasterAddress, grpc.WithTransportCredentials(insecure.NewCredentials()))
require.NoError(t, err)
t.Cleanup(func() { conn.Close() })
return master_pb.NewSeaweedClient(conn)
}
func getNewBucketName() string {
return fmt.Sprintf("filergroup-test-%d", time.Now().UnixNano())
}
// getExpectedCollectionName returns the expected collection name for a bucket
// When filer group is configured, collections are named: {filerGroup}_{bucketName}
func getExpectedCollectionName(bucketName string) string {
if testConfig.FilerGroup != "" {
return fmt.Sprintf("%s_%s", testConfig.FilerGroup, bucketName)
}
return bucketName
}
// listAllCollections returns a list of all collection names from the master
func listAllCollections(t *testing.T, masterClient master_pb.SeaweedClient) []string {
collectionResp, err := masterClient.CollectionList(context.Background(), &master_pb.CollectionListRequest{
IncludeNormalVolumes: true,
IncludeEcVolumes: true,
})
if err != nil {
t.Logf("Warning: failed to list collections: %v", err)
return nil
}
var names []string
for _, c := range collectionResp.Collections {
names = append(names, c.Name)
}
return names
}
// collectionExists checks if a collection exists in the master
func collectionExists(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) bool {
for _, name := range listAllCollections(t, masterClient) {
if name == collectionName {
return true
}
}
return false
}
// waitForCollectionExists waits for a collection to exist using polling
func waitForCollectionExists(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) {
var lastCollections []string
success := assert.Eventually(t, func() bool {
lastCollections = listAllCollections(t, masterClient)
for _, name := range lastCollections {
if name == collectionName {
return true
}
}
return false
}, 10*time.Second, 200*time.Millisecond)
if !success {
t.Fatalf("collection %s should be created; existing collections: %v", collectionName, lastCollections)
}
}
// waitForCollectionDeleted waits for a collection to be deleted using polling
func waitForCollectionDeleted(t *testing.T, masterClient master_pb.SeaweedClient, collectionName string) {
require.Eventually(t, func() bool {
return !collectionExists(t, masterClient, collectionName)
}, 10*time.Second, 200*time.Millisecond, "collection %s should be deleted", collectionName)
}
// TestFilerGroupCollectionNaming verifies that when a filer group is configured,
// collections are created with the correct prefix ({filerGroup}_{bucketName})
func TestFilerGroupCollectionNaming(t *testing.T) {
if testConfig.FilerGroup == "" {
t.Skip("Skipping test: FILER_GROUP not configured. Set FILER_GROUP environment variable or configure in test_config.json")
}
s3Client := getS3Client(t)
masterClient := getMasterClient(t)
bucketName := getNewBucketName()
expectedCollection := getExpectedCollectionName(bucketName)
t.Logf("Testing with filer group: %s", testConfig.FilerGroup)
t.Logf("Bucket name: %s", bucketName)
t.Logf("Expected collection name: %s", expectedCollection)
// Create bucket
_, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "CreateBucket should succeed")
// Upload an object to trigger collection creation
_, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String("test-object"),
Body: strings.NewReader("test content"),
})
require.NoError(t, err, "PutObject should succeed")
// Wait for collection to be visible using polling
waitForCollectionExists(t, masterClient, expectedCollection)
// Verify collection exists with correct name
require.True(t, collectionExists(t, masterClient, expectedCollection),
"Collection %s should exist (filer group prefix applied)", expectedCollection)
// Cleanup: delete object and bucket
_, err = s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String("test-object"),
})
require.NoError(t, err, "DeleteObject should succeed")
_, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "DeleteBucket should succeed")
// Wait for collection to be deleted using polling
waitForCollectionDeleted(t, masterClient, expectedCollection)
t.Log("SUCCESS: Collection naming with filer group works correctly")
}
// TestBucketDeletionWithFilerGroup verifies that bucket deletion correctly
// deletes the collection when filer group is configured
func TestBucketDeletionWithFilerGroup(t *testing.T) {
if testConfig.FilerGroup == "" {
t.Skip("Skipping test: FILER_GROUP not configured")
}
s3Client := getS3Client(t)
masterClient := getMasterClient(t)
bucketName := getNewBucketName()
expectedCollection := getExpectedCollectionName(bucketName)
// Create bucket and add an object
_, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
_, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String("test-object"),
Body: strings.NewReader("test content"),
})
require.NoError(t, err)
// Wait for collection to be created using polling
waitForCollectionExists(t, masterClient, expectedCollection)
// Verify collection exists before deletion
require.True(t, collectionExists(t, masterClient, expectedCollection),
"Collection should exist before bucket deletion")
// Delete object first
_, err = s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String("test-object"),
})
require.NoError(t, err)
// Delete bucket
_, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "DeleteBucket should succeed")
// Wait for collection to be deleted using polling
waitForCollectionDeleted(t, masterClient, expectedCollection)
// Verify collection was deleted
require.False(t, collectionExists(t, masterClient, expectedCollection),
"Collection %s should be deleted after bucket deletion", expectedCollection)
t.Log("SUCCESS: Bucket deletion with filer group correctly deletes collection")
}
// TestMultipleBucketsWithFilerGroup tests creating and deleting multiple buckets
func TestMultipleBucketsWithFilerGroup(t *testing.T) {
if testConfig.FilerGroup == "" {
t.Skip("Skipping test: FILER_GROUP not configured")
}
s3Client := getS3Client(t)
masterClient := getMasterClient(t)
buckets := make([]string, 3)
for i := 0; i < 3; i++ {
buckets[i] = getNewBucketName()
}
// Create all buckets and add objects
for _, bucket := range buckets {
_, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{
Bucket: aws.String(bucket),
})
require.NoError(t, err)
_, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String("test-object"),
Body: strings.NewReader("test content"),
})
require.NoError(t, err)
}
// Wait for all collections to be created using polling
for _, bucket := range buckets {
expectedCollection := getExpectedCollectionName(bucket)
waitForCollectionExists(t, masterClient, expectedCollection)
}
// Verify all collections exist with correct naming
for _, bucket := range buckets {
expectedCollection := getExpectedCollectionName(bucket)
require.True(t, collectionExists(t, masterClient, expectedCollection),
"Collection %s should exist for bucket %s", expectedCollection, bucket)
}
// Delete all buckets
for _, bucket := range buckets {
_, err := s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String("test-object"),
})
require.NoError(t, err)
_, err = s3Client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucket),
})
require.NoError(t, err)
}
// Wait for all collections to be deleted using polling
for _, bucket := range buckets {
expectedCollection := getExpectedCollectionName(bucket)
waitForCollectionDeleted(t, masterClient, expectedCollection)
}
// Verify all collections are deleted
for _, bucket := range buckets {
expectedCollection := getExpectedCollectionName(bucket)
require.False(t, collectionExists(t, masterClient, expectedCollection),
"Collection %s should be deleted for bucket %s", expectedCollection, bucket)
}
t.Log("SUCCESS: Multiple bucket operations with filer group work correctly")
}

View File

@@ -0,0 +1,8 @@
{
"s3_endpoint": "http://localhost:8333",
"master_address": "localhost:9333",
"access_key": "some_access_key1",
"secret_key": "some_secret_key1",
"region": "us-east-1",
"filer_group": "testgroup"
}