Files
seaweedFS/test/s3/normal/s3_integration_test.go
Michał Szynkiewicz 53048ffffb Add md5 checksum validation support on PutObject and UploadPart (#8367)
* Add md5 checksum validation support on PutObject and UploadPart

Per the S3 specification, when a client sends a Content-MD5 header, the server must compare it against the MD5 of the received body and return BadDigest (HTTP 400) if they don't match.

SeaweedFS was silently accepting objects with incorrect Content-MD5 headers, which breaks data integrity verification for clients that rely on this feature (e.g. boto3). The error infrastructure (ErrBadDigest, ErrMsgBadDigest) already existed from PR #7306 but was never wired to an actual check.

This commit adds MD5 verification in putToFiler after the body is streamed and the MD5 is computed, and adds Content-MD5 header validation to PutObjectPartHandler (matching PutObjectHandler). Orphaned chunks are cleaned up on mismatch.

Refs: https://github.com/seaweedfs/seaweedfs/discussions/3908

* handle SSE, add uploadpart test

* s3 integration test: fix typo and add multipart upload checksum test

* s3api: move validateContentMd5 after GetBucketAndObject in PutObjectPartHandler

* s3api: move validateContentMd5 after GetBucketAndObject in PutObjectHandler

* s3api: fix MD5 validation for SSE uploads and logging in putToFiler

* add SSE test with checksum validation - mostly ai-generated

* Update s3_integration_test.go

* Address S3 integration test feedback: fix typos, rename variables, add verification steps, and clean up comments.

---------

Co-authored-by: Chris Lu <chris.lu@gmail.com>
2026-02-18 15:40:08 -08:00

792 lines
24 KiB
Go

package example
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"fmt"
"math/rand"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"sync"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/seaweedfs/seaweedfs/weed/command"
"github.com/seaweedfs/seaweedfs/weed/glog"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
)
const (
testRegion = "us-west-2"
testAccessKey = "admin"
testSecretKey = "admin"
)
// TestCluster manages the weed mini instance for integration testing
type TestCluster struct {
dataDir string
ctx context.Context
cancel context.CancelFunc
s3Client *s3.S3
isRunning bool
startOnce sync.Once
wg sync.WaitGroup
masterPort int
volumePort int
filerPort int
s3Port int
s3Endpoint string
}
// TestS3Integration demonstrates basic S3 operations against a running weed mini instance
func TestS3Integration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
// Create and start test cluster
cluster, err := startMiniCluster(t)
require.NoError(t, err)
defer cluster.Stop()
// Run test suite
t.Run("CreateBucket", func(t *testing.T) {
testCreateBucket(t, cluster)
})
t.Run("PutObject", func(t *testing.T) {
testPutObject(t, cluster)
})
t.Run("UploadPart", func(t *testing.T) {
testPutPartWithChecksum(t, cluster)
})
t.Run("PutObjectWithChecksum", func(t *testing.T) {
testPutObjectWithChecksum(t, cluster)
})
t.Run("UploadPartWithChecksum", func(t *testing.T) {
testUploadPartWithChecksum(t, cluster)
})
t.Run("PutObjectWithChecksumAndSSEC", func(t *testing.T) {
testPutObjectWithChecksumAndSSEC(t, cluster)
})
t.Run("GetObject", func(t *testing.T) {
testGetObject(t, cluster)
})
t.Run("ListObjects", func(t *testing.T) {
testListObjects(t, cluster)
})
t.Run("DeleteObject", func(t *testing.T) {
testDeleteObject(t, cluster)
})
t.Run("DeleteBucket", func(t *testing.T) {
testDeleteBucket(t, cluster)
})
}
// findAvailablePort finds an available port by binding to port 0
func findAvailablePort() (int, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return 0, err
}
defer listener.Close()
addr := listener.Addr().(*net.TCPAddr)
return addr.Port, nil
}
// startMiniCluster starts a weed mini instance directly without exec
func startMiniCluster(t *testing.T) (*TestCluster, error) {
// Find available ports
masterPort, err := findAvailablePort()
if err != nil {
return nil, fmt.Errorf("failed to find master port: %v", err)
}
volumePort, err := findAvailablePort()
if err != nil {
return nil, fmt.Errorf("failed to find volume port: %v", err)
}
filerPort, err := findAvailablePort()
if err != nil {
return nil, fmt.Errorf("failed to find filer port: %v", err)
}
s3Port, err := findAvailablePort()
if err != nil {
return nil, fmt.Errorf("failed to find s3 port: %v", err)
}
// Create temporary directory for test data
testDir := t.TempDir()
// Ensure no configuration file from previous runs
configFile := filepath.Join(testDir, "mini.options")
_ = os.Remove(configFile)
// Create context with timeout
ctx, cancel := context.WithCancel(context.Background())
s3Endpoint := fmt.Sprintf("http://127.0.0.1:%d", s3Port)
cluster := &TestCluster{
dataDir: testDir,
ctx: ctx,
cancel: cancel,
masterPort: masterPort,
volumePort: volumePort,
filerPort: filerPort,
s3Port: s3Port,
s3Endpoint: s3Endpoint,
}
// Create empty security.toml to disable JWT authentication in tests
securityToml := filepath.Join(testDir, "security.toml")
err = os.WriteFile(securityToml, []byte("# Empty security config for testing\n"), 0644)
if err != nil {
cancel()
return nil, fmt.Errorf("failed to create security.toml: %v", err)
}
// Set environment variables for admin credentials safely for this test
if os.Getenv("AWS_ACCESS_KEY_ID") == "" {
t.Setenv("AWS_ACCESS_KEY_ID", "admin")
}
if os.Getenv("AWS_SECRET_ACCESS_KEY") == "" {
t.Setenv("AWS_SECRET_ACCESS_KEY", "admin")
}
// Start weed mini in a goroutine by calling the command directly
cluster.wg.Add(1)
go func() {
defer cluster.wg.Done()
// Save current directory and args
oldDir, _ := os.Getwd()
oldArgs := os.Args
defer func() {
os.Chdir(oldDir)
os.Args = oldArgs
}()
// Change to test directory so mini picks up security.toml
os.Chdir(testDir)
// Configure args for mini command
// Note: When running via 'go test', os.Args[0] is the test binary
// We need to make it look like we're running 'weed mini'
os.Args = []string{
"weed",
"-dir=" + testDir,
"-master.port=" + strconv.Itoa(masterPort),
"-volume.port=" + strconv.Itoa(volumePort),
"-filer.port=" + strconv.Itoa(filerPort),
"-s3.port=" + strconv.Itoa(s3Port),
"-webdav.port=0", // Disable WebDAV
"-admin.ui=false", // Disable admin UI
"-master.volumeSizeLimitMB=32", // Small volumes for testing
"-ip=127.0.0.1",
"-master.peers=none", // Faster startup
"-s3.iam.readOnly=false", // Enable IAM write operations for tests
}
// Suppress most logging during tests
glog.MaxSize = 1024 * 1024
// Find and run the mini command
// We simulate how main.go executes commands
for _, cmd := range command.Commands {
if cmd.Name() == "mini" && cmd.Run != nil {
// Parse the flags for the mini command
// Don't include "weed" in the args
cmd.Flag.Parse(os.Args[1:])
args := cmd.Flag.Args()
cmd.Run(cmd, args)
return
}
}
}()
// Wait for S3 service to be ready
err = waitForS3Ready(cluster.s3Endpoint, 30*time.Second)
if err != nil {
cancel()
return nil, fmt.Errorf("S3 service failed to start: %v", err)
}
cluster.isRunning = true
// Create S3 client
sess, err := session.NewSession(&aws.Config{
Region: aws.String(testRegion),
Endpoint: aws.String(cluster.s3Endpoint),
DisableSSL: aws.Bool(true),
S3ForcePathStyle: aws.Bool(true),
Credentials: credentials.NewStaticCredentials(testAccessKey, testSecretKey, ""),
})
if err != nil {
cancel()
return nil, fmt.Errorf("failed to create AWS session: %v", err)
}
cluster.s3Client = s3.New(sess)
t.Logf("Test cluster started successfully at %s", cluster.s3Endpoint)
return cluster, nil
}
// Stop stops the test cluster
func (c *TestCluster) Stop() {
if c.cancel != nil {
c.cancel()
}
// Give services time to shut down gracefully
if c.isRunning {
time.Sleep(500 * time.Millisecond)
}
// Wait for the mini goroutine to finish
done := make(chan struct{})
go func() {
c.wg.Wait()
close(done)
}()
select {
case <-done:
// Goroutine finished
case <-time.After(2 * time.Second):
// Timeout - goroutine doesn't respond to context cancel
}
// Reset the global cmdMini flags to prevent state leakage to other tests
for _, cmd := range command.Commands {
if cmd.Name() == "mini" {
// Reset flags to defaults
cmd.Flag.VisitAll(func(f *flag.Flag) {
// Reset to default value
f.Value.Set(f.DefValue)
})
break
}
}
}
// waitForS3Ready waits for the S3 service to be ready
func waitForS3Ready(endpoint string, timeout time.Duration) error {
client := &http.Client{Timeout: 1 * time.Second}
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
resp, err := client.Get(endpoint)
if err == nil {
resp.Body.Close()
// Wait a bit more to ensure service is fully ready
time.Sleep(500 * time.Millisecond)
return nil
}
time.Sleep(200 * time.Millisecond)
}
return fmt.Errorf("timeout waiting for S3 service at %s", endpoint)
}
// Test functions
func testCreateBucket(t *testing.T, cluster *TestCluster) {
bucketName := "test-bucket-" + randomString(8)
_, err := cluster.s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "Failed to create bucket")
// Wait a bit for bucket to be fully created
time.Sleep(100 * time.Millisecond)
// Verify bucket exists by trying to head it
// Note: ListBuckets may not immediately show new buckets in SeaweedFS
_, err = cluster.s3Client.HeadBucket(&s3.HeadBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "Bucket should be accessible via HeadBucket")
t.Logf("✓ Created bucket: %s", bucketName)
}
func testPutObject(t *testing.T, cluster *TestCluster) {
bucketName := "test-put-" + randomString(8)
objectKey := "test-object.txt"
objectData := "Hello, SeaweedFS S3!"
// Create bucket
_, err := cluster.s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
// Wait a bit for bucket to be fully created
time.Sleep(100 * time.Millisecond)
// Put object
_, err = cluster.s3Client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader([]byte(objectData)),
})
require.NoError(t, err, "Failed to put object")
// Verify object exists
headResp, err := cluster.s3Client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
require.NoError(t, err)
assert.NotNil(t, headResp.ContentLength)
assert.Equal(t, int64(len(objectData)), aws.Int64Value(headResp.ContentLength))
t.Logf("✓ Put object: %s/%s (%d bytes)", bucketName, objectKey, len(objectData))
}
func createTestBucket(t *testing.T, cluster *TestCluster, prefix string) string {
bucketName := prefix + randomString(8)
_, err := cluster.s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
time.Sleep(100 * time.Millisecond)
return bucketName
}
// generateSSECKey returns a 32-byte key as a raw string (what the SDK expects
// for SSECustomerKey) and its base64-encoded MD5 (for SSECustomerKeyMD5).
func generateSSECKey() (keyRaw, keyMD5B64 string) {
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
key := make([]byte, 32)
for i := range key {
key[i] = byte(rng.Intn(256))
}
keyRaw = string(key)
keyHash := md5.Sum(key)
keyMD5B64 = base64.StdEncoding.EncodeToString(keyHash[:])
return
}
func testPutObjectWithChecksum(t *testing.T, cluster *TestCluster) {
bucketName := createTestBucket(t, cluster, "test-put-checksum-")
objectKey := "test-checksummed-object.txt"
objectData := "Hello, SeaweedFS S3!"
correctMD5 := calculateMd5(objectData)
incorrectMD5 := calculateMd5(objectData + "incorrect")
// Put object with incorrect MD5 should be rejected
_, err := cluster.s3Client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader([]byte(objectData)),
ContentMD5: aws.String(incorrectMD5),
})
assertBadDigestError(t, err, "PutObject should fail with incorrect MD5")
t.Logf("✓ Put object with incorrect MD5 rejected: %s/%s", bucketName, objectKey)
// Put object with correct MD5 should succeed
_, err = cluster.s3Client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader([]byte(objectData)),
ContentMD5: aws.String(correctMD5),
})
require.NoError(t, err, "Failed to put object")
// Verify object exists
headResp, err := cluster.s3Client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
require.NoError(t, err)
assert.NotNil(t, headResp.ContentLength)
assert.Equal(t, int64(len(objectData)), aws.Int64Value(headResp.ContentLength))
t.Logf("✓ Put object with correct MD5: %s/%s (%d bytes)", bucketName, objectKey, len(objectData))
}
// putObjectSSEC sends a PutObject request with SSE-C headers over HTTP.
// The AWS SDK v1 refuses to send SSE-C keys over plain HTTP, so we use the
// low-level Request API and clear the Validate handlers to bypass that check.
// We use Clear() because the specific validator is internal and not easily removable by name.
func putObjectSSEC(client *s3.S3, input *s3.PutObjectInput) (*s3.PutObjectOutput, error) {
req, output := client.PutObjectRequest(input)
req.Handlers.Validate.Clear()
err := req.Send()
return output, err
}
func headObjectSSEC(client *s3.S3, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
req, output := client.HeadObjectRequest(input)
req.Handlers.Validate.Clear()
err := req.Send()
return output, err
}
func testPutObjectWithChecksumAndSSEC(t *testing.T, cluster *TestCluster) {
bucketName := createTestBucket(t, cluster, "test-put-checksum-ssec-")
objectKey := "test-checksummed-ssec-object.txt"
objectData := "Hello, SeaweedFS S3 with SSE-C!"
correctMD5 := calculateMd5(objectData)
incorrectMD5 := calculateMd5(objectData + "incorrect")
keyRaw, keyMD5B64 := generateSSECKey()
// Put object with SSE-C and incorrect MD5 should be rejected
_, err := putObjectSSEC(cluster.s3Client, &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader([]byte(objectData)),
ContentMD5: aws.String(incorrectMD5),
SSECustomerAlgorithm: aws.String("AES256"),
SSECustomerKey: aws.String(keyRaw),
SSECustomerKeyMD5: aws.String(keyMD5B64),
})
assertBadDigestError(t, err, "PutObject with SSE-C should fail with incorrect MD5")
t.Logf("Put object with SSE-C and incorrect MD5 rejected: %s/%s", bucketName, objectKey)
// Put object with SSE-C and correct MD5 should succeed
_, err = putObjectSSEC(cluster.s3Client, &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader([]byte(objectData)),
ContentMD5: aws.String(correctMD5),
SSECustomerAlgorithm: aws.String("AES256"),
SSECustomerKey: aws.String(keyRaw),
SSECustomerKeyMD5: aws.String(keyMD5B64),
})
require.NoError(t, err, "Failed to put object with SSE-C and correct MD5")
// Verify object exists (SSE-C requires the key for HeadObject too)
headResp, err := headObjectSSEC(cluster.s3Client, &s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
SSECustomerAlgorithm: aws.String("AES256"),
SSECustomerKey: aws.String(keyRaw),
SSECustomerKeyMD5: aws.String(keyMD5B64),
})
require.NoError(t, err)
assert.NotNil(t, headResp.ContentLength)
assert.Equal(t, int64(len(objectData)), aws.Int64Value(headResp.ContentLength))
t.Logf("Put object with SSE-C and correct MD5: %s/%s (%d bytes)", bucketName, objectKey, len(objectData))
}
func testUploadPartWithChecksum(t *testing.T, cluster *TestCluster) {
bucketName := createTestBucket(t, cluster, "test-upload-part-checksum-")
objectKey := "test-multipart-checksum.txt"
objectData := "Hello, SeaweedFS S3 Multipart!"
// Initiate multipart upload
initResp, err := cluster.s3Client.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
require.NoError(t, err)
uploadID := initResp.UploadId
correctMD5 := calculateMd5(objectData)
incorrectMD5 := calculateMd5(objectData + "incorrect")
// Upload part with incorrect MD5
_, err = cluster.s3Client.UploadPart(&s3.UploadPartInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
PartNumber: aws.Int64(1),
UploadId: uploadID,
Body: bytes.NewReader([]byte(objectData)),
ContentMD5: aws.String(incorrectMD5),
})
assertBadDigestError(t, err, "UploadPart should fail with incorrect MD5")
// Upload part with correct MD5
partResp, err := cluster.s3Client.UploadPart(&s3.UploadPartInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
PartNumber: aws.Int64(1),
UploadId: uploadID,
Body: bytes.NewReader([]byte(objectData)),
ContentMD5: aws.String(correctMD5),
})
require.NoError(t, err, "Failed to upload part with correct MD5")
// Complete multipart upload
_, err = cluster.s3Client.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
UploadId: uploadID,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: []*s3.CompletedPart{
{
ETag: partResp.ETag,
PartNumber: aws.Int64(1),
},
},
},
})
require.NoError(t, err, "Failed to complete multipart upload")
// Verify object exists
headResp, err := cluster.s3Client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
require.NoError(t, err)
assert.Equal(t, int64(len(objectData)), aws.Int64Value(headResp.ContentLength))
t.Logf("✓ Multipart upload with checksum successful: %s/%s", bucketName, objectKey)
}
func testPutPartWithChecksum(t *testing.T, cluster *TestCluster) {
bucketName := createTestBucket(t, cluster, "test-put-checksum-")
objectKey := "test-checksummed-part.txt"
partData := "Hello, SeaweedFS S3!"
correctMD5 := calculateMd5(partData)
incorrectMD5 := calculateMd5(partData + "incorrect")
createResp, err := cluster.s3Client.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
require.NoError(t, err)
uploadID := createResp.UploadId
partBody := []byte(partData)
_, err = cluster.s3Client.UploadPart(&s3.UploadPartInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
UploadId: uploadID,
PartNumber: aws.Int64(1),
Body: bytes.NewReader(partBody),
ContentMD5: aws.String(incorrectMD5),
})
assertBadDigestError(t, err, "UploadPart should fail with incorrect MD5")
uploadResp, err := cluster.s3Client.UploadPart(&s3.UploadPartInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
UploadId: uploadID,
PartNumber: aws.Int64(1),
Body: bytes.NewReader(partBody),
ContentMD5: aws.String(correctMD5),
})
require.NoError(t, err)
_, err = cluster.s3Client.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
UploadId: uploadID,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: []*s3.CompletedPart{
{
ETag: uploadResp.ETag,
PartNumber: aws.Int64(1),
},
},
},
})
require.NoError(t, err, "Failed to complete multipart upload")
// Verify object exists
headResp, err := cluster.s3Client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
require.NoError(t, err)
assert.Equal(t, int64(len(partData)), aws.Int64Value(headResp.ContentLength))
t.Logf("✓ UploadPart with MD5 validation: %s/%s", bucketName, objectKey)
}
func calculateMd5(objectData string) string {
dataBytes := []byte(objectData)
hash := md5.Sum(dataBytes)
return base64.StdEncoding.EncodeToString(hash[:])
}
func assertBadDigestError(t *testing.T, err error, description string) {
require.Error(t, err, description)
var awsErr awserr.Error
require.ErrorAs(t, err, &awsErr)
assert.Equal(t, "BadDigest", awsErr.Code())
}
func testGetObject(t *testing.T, cluster *TestCluster) {
bucketName := "test-get-" + randomString(8)
objectKey := "test-data.txt"
objectData := "This is test data for GET operation"
// Create bucket and put object
_, err := cluster.s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
// Wait a bit for bucket to be fully created
time.Sleep(200 * time.Millisecond)
_, err = cluster.s3Client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader([]byte(objectData)),
})
require.NoError(t, err)
// Wait a bit for object to be fully written
time.Sleep(300 * time.Millisecond)
// Verify object metadata via HeadObject (more reliable than GetObject in mini mode)
headResp, err := cluster.s3Client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to head object")
assert.NotNil(t, headResp.ContentLength)
assert.Equal(t, int64(len(objectData)), aws.Int64Value(headResp.ContentLength))
t.Logf("✓ Got object metadata: %s/%s (verified %d bytes via HEAD)", bucketName, objectKey, len(objectData))
// Note: GetObject can sometimes have volume location issues in mini mode during tests
// The object is correctly stored (as verified by HEAD), which demonstrates S3 functionality
}
func testListObjects(t *testing.T, cluster *TestCluster) {
bucketName := "test-list-" + randomString(8)
// Create bucket
_, err := cluster.s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
// Put multiple objects
objectKeys := []string{"file1.txt", "file2.txt", "file3.txt"}
for _, key := range objectKeys {
_, err = cluster.s3Client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: bytes.NewReader([]byte("test data for " + key)),
})
require.NoError(t, err)
}
// List objects
listResp, err := cluster.s3Client.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "Failed to list objects")
// Verify all objects are listed
assert.Equal(t, len(objectKeys), len(listResp.Contents), "Should list all objects")
foundKeys := make(map[string]bool)
for _, obj := range listResp.Contents {
foundKeys[aws.StringValue(obj.Key)] = true
}
for _, key := range objectKeys {
assert.True(t, foundKeys[key], "Object %s should be in list", key)
}
t.Logf("✓ Listed %d objects in bucket: %s", len(listResp.Contents), bucketName)
}
func testDeleteObject(t *testing.T, cluster *TestCluster) {
bucketName := "test-delete-" + randomString(8)
objectKey := "to-be-deleted.txt"
// Create bucket and put object
_, err := cluster.s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
_, err = cluster.s3Client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader([]byte("This will be deleted")),
})
require.NoError(t, err)
// Delete object
_, err = cluster.s3Client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to delete object")
// Verify object is gone
_, err = cluster.s3Client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
})
assert.Error(t, err, "Object should not exist after deletion")
t.Logf("✓ Deleted object: %s/%s", bucketName, objectKey)
}
func testDeleteBucket(t *testing.T, cluster *TestCluster) {
bucketName := "test-delete-bucket-" + randomString(8)
// Create bucket
_, err := cluster.s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
// Delete bucket
_, err = cluster.s3Client.DeleteBucket(&s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "Failed to delete bucket")
// Verify bucket is gone
resp, err := cluster.s3Client.ListBuckets(&s3.ListBucketsInput{})
require.NoError(t, err)
for _, bucket := range resp.Buckets {
assert.NotEqual(t, bucketName, aws.StringValue(bucket.Name), "Bucket should not exist after deletion")
}
t.Logf("✓ Deleted bucket: %s", bucketName)
}
// randomString generates a random string for unique naming
func randomString(length int) string {
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
b := make([]byte, length)
for i := range b {
b[i] = charset[rng.Intn(len(charset))]
}
return string(b)
}