chore: execute goimports to format the code (#7983)

* chore: execute goimports to format the code

Signed-off-by: promalert <promalert@outlook.com>

* goimports -w .

---------

Signed-off-by: promalert <promalert@outlook.com>
Co-authored-by: Chris Lu <chris.lu@gmail.com>
This commit is contained in:
promalert
2026-01-08 05:06:08 +08:00
committed by GitHub
parent 6432019d08
commit 9012069bd7
271 changed files with 608 additions and 461 deletions

View File

@@ -3,15 +3,16 @@ package s3api
import (
"encoding/json"
"fmt"
"reflect"
"sync"
"testing"
"time"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"reflect"
"sync"
"testing"
"time"
)
type BucketMetadataTestCase struct {

View File

@@ -84,11 +84,11 @@ func TestGetEntryNameAndDir(t *testing.T) {
}
tests := []struct {
name string
bucket string
key string
expectedName string
expectedDirEnd string // We check the suffix since dir includes BucketsPath
name string
bucket string
key string
expectedName string
expectedDirEnd string // We check the suffix since dir includes BucketsPath
}{
{
name: "simple file at root",

View File

@@ -2,10 +2,10 @@ package s3api
import (
"context"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"strings"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
)
const (

View File

@@ -20,10 +20,11 @@ package policy
import (
"encoding/base64"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"net/http"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
// expirationDateFormat date format for expiration key in json policy.

View File

@@ -4,4 +4,3 @@ const (
// DefaultBucketsPath is the default path for S3 buckets in the filer
DefaultBucketsPath = "/buckets"
)

View File

@@ -12,12 +12,12 @@ const (
ExtLatestVersionIdKey = "Seaweed-X-Amz-Latest-Version-Id"
ExtLatestVersionFileNameKey = "Seaweed-X-Amz-Latest-Version-File-Name"
// Cached list metadata in .versions directory for single-scan efficiency
ExtLatestVersionSizeKey = "Seaweed-X-Amz-Latest-Version-Size"
ExtLatestVersionETagKey = "Seaweed-X-Amz-Latest-Version-ETag"
ExtLatestVersionMtimeKey = "Seaweed-X-Amz-Latest-Version-Mtime"
ExtLatestVersionOwnerKey = "Seaweed-X-Amz-Latest-Version-Owner"
ExtLatestVersionIsDeleteMarker = "Seaweed-X-Amz-Latest-Version-Is-Delete-Marker"
ExtMultipartObjectKey = "key"
ExtLatestVersionSizeKey = "Seaweed-X-Amz-Latest-Version-Size"
ExtLatestVersionETagKey = "Seaweed-X-Amz-Latest-Version-ETag"
ExtLatestVersionMtimeKey = "Seaweed-X-Amz-Latest-Version-Mtime"
ExtLatestVersionOwnerKey = "Seaweed-X-Amz-Latest-Version-Owner"
ExtLatestVersionIsDeleteMarker = "Seaweed-X-Amz-Latest-Version-Is-Delete-Marker"
ExtMultipartObjectKey = "key"
// Bucket Policy
ExtBucketPolicyKey = "Seaweed-X-Amz-Bucket-Policy"

View File

@@ -257,4 +257,3 @@ func TestHasPolicyForBucket(t *testing.T) {
// No longer has policy
assert.False(t, engine.HasPolicyForBucket("test-bucket"))
}

View File

@@ -3,15 +3,16 @@ package s3api
import (
"bytes"
"encoding/json"
"io"
"net/http"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"io"
"net/http"
"testing"
)
var accountManager *IdentityAccessManagement

View File

@@ -1,9 +1,10 @@
package s3api
import (
"net/http"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"net/http"
)
func getAccountId(r *http.Request) string {

View File

@@ -88,7 +88,7 @@ func (s3a *S3ApiServer) GetObjectLockConfigurationHandler(w http.ResponseWriter,
if bucketConfig.ObjectLockConfig != nil {
// Set namespace for S3 compatibility
bucketConfig.ObjectLockConfig.XMLNS = s3_constants.S3Namespace
// Use cached configuration and marshal it to XML for response
marshaledXML, err := xml.Marshal(bucketConfig.ObjectLockConfig)
if err != nil {

View File

@@ -93,9 +93,9 @@ func (cb *CircuitBreaker) loadCircuitBreakerConfig(cfg *s3_pb.S3CircuitBreakerCo
func (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) {
return func(w http.ResponseWriter, r *http.Request) {
// Apply upload limiting for write actions if configured
if cb.s3a != nil && (action == s3_constants.ACTION_WRITE) &&
if cb.s3a != nil && (action == s3_constants.ACTION_WRITE) &&
(cb.s3a.option.ConcurrentUploadLimit != 0 || cb.s3a.option.ConcurrentFileUploadLimit != 0) {
// Get content length, default to 0 if not provided
contentLength := r.ContentLength
if contentLength < 0 {
@@ -108,12 +108,12 @@ func (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request),
inFlightUploads := atomic.LoadInt64(&cb.s3a.inFlightUploads)
// Wait if either data size limit or file count limit is exceeded
for (cb.s3a.option.ConcurrentUploadLimit != 0 && inFlightDataSize > cb.s3a.option.ConcurrentUploadLimit) ||
for (cb.s3a.option.ConcurrentUploadLimit != 0 && inFlightDataSize > cb.s3a.option.ConcurrentUploadLimit) ||
(cb.s3a.option.ConcurrentFileUploadLimit != 0 && inFlightUploads >= cb.s3a.option.ConcurrentFileUploadLimit) {
if (cb.s3a.option.ConcurrentUploadLimit != 0 && inFlightDataSize > cb.s3a.option.ConcurrentUploadLimit) {
if cb.s3a.option.ConcurrentUploadLimit != 0 && inFlightDataSize > cb.s3a.option.ConcurrentUploadLimit {
glog.V(4).Infof("wait because inflight data %d > %d", inFlightDataSize, cb.s3a.option.ConcurrentUploadLimit)
}
if (cb.s3a.option.ConcurrentFileUploadLimit != 0 && inFlightUploads >= cb.s3a.option.ConcurrentFileUploadLimit) {
if cb.s3a.option.ConcurrentFileUploadLimit != 0 && inFlightUploads >= cb.s3a.option.ConcurrentFileUploadLimit {
glog.V(4).Infof("wait because inflight uploads %d >= %d", inFlightUploads, cb.s3a.option.ConcurrentFileUploadLimit)
}
cb.s3a.inFlightDataLimitCond.Wait()

View File

@@ -1,13 +1,14 @@
package s3api
import (
"github.com/seaweedfs/seaweedfs/weed/pb/s3_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"net/http"
"sync"
"sync/atomic"
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/s3_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
type TestLimitCase struct {

View File

@@ -173,4 +173,3 @@ func TestEncryptedVolumeCopyScenario(t *testing.T) {
t.Log("✓ All chunk metadata properly preserved for encrypted volume copy scenario")
})
}

View File

@@ -20,7 +20,7 @@ func (s3a *S3ApiServer) WithFilerClient(streamingMode bool, fn func(filer_pb.Sea
if s3a.filerClient != nil {
return s3a.withFilerClientFailover(streamingMode, fn)
}
// Fallback to direct connection if filerClient not initialized
// This should only happen during initialization or testing
return pb.WithGrpcClient(streamingMode, s3a.randomClientId, func(grpcConnection *grpc.ClientConn) error {
@@ -34,41 +34,41 @@ func (s3a *S3ApiServer) WithFilerClient(streamingMode bool, fn func(filer_pb.Sea
func (s3a *S3ApiServer) withFilerClientFailover(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error {
// Get current filer as starting point
currentFiler := s3a.filerClient.GetCurrentFiler()
// Try current filer first (fast path)
err := pb.WithGrpcClient(streamingMode, s3a.randomClientId, func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}, currentFiler.ToGrpcAddress(), false, s3a.option.GrpcDialOption)
if err == nil {
s3a.filerClient.RecordFilerSuccess(currentFiler)
return nil
}
// Record failure for current filer
s3a.filerClient.RecordFilerFailure(currentFiler)
// Current filer failed - try all other filers with health-aware selection
filers := s3a.filerClient.GetAllFilers()
var lastErr error = err
for _, filer := range filers {
if filer == currentFiler {
continue // Already tried this one
}
// Skip filers known to be unhealthy (circuit breaker pattern)
if s3a.filerClient.ShouldSkipUnhealthyFiler(filer) {
glog.V(2).Infof("WithFilerClient: skipping unhealthy filer %s", filer)
continue
}
err = pb.WithGrpcClient(streamingMode, s3a.randomClientId, func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}, filer.ToGrpcAddress(), false, s3a.option.GrpcDialOption)
if err == nil {
// Success! Record success and update current filer for future requests
s3a.filerClient.RecordFilerSuccess(filer)
@@ -76,13 +76,13 @@ func (s3a *S3ApiServer) withFilerClientFailover(streamingMode bool, fn func(file
glog.V(1).Infof("WithFilerClient: failover from %s to %s succeeded", currentFiler, filer)
return nil
}
// Record failure for health tracking
s3a.filerClient.RecordFilerFailure(filer)
glog.V(2).Infof("WithFilerClient: failover to %s failed: %v", filer, err)
lastErr = err
}
// All filers failed
return fmt.Errorf("all filers failed, last error: %w", lastErr)
}

View File

@@ -44,7 +44,7 @@ func TestPrefixNormalizationInList(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
// Normalize using NormalizeObjectKey (same as object keys)
normalizedPrefix := s3_constants.NormalizeObjectKey(tt.inputPrefix)
if normalizedPrefix != tt.expectedPrefix {
t.Errorf("Prefix normalization mismatch:\n Input: %q\n Expected: %q\n Got: %q\n Desc: %s",
tt.inputPrefix, tt.expectedPrefix, normalizedPrefix, tt.description)
@@ -75,11 +75,11 @@ func TestListPrefixConsistency(t *testing.T) {
func startsWithPrefix(objectKey, prefix string) bool {
// Normalize the prefix using the same logic as NormalizeObjectKey
normalizedPrefix := s3_constants.NormalizeObjectKey(prefix)
// Check if the object starts with the normalized prefix
if normalizedPrefix == "" {
return true
}
return objectKey == normalizedPrefix || objectKey[:len(normalizedPrefix)] == normalizedPrefix
}

View File

@@ -2,6 +2,7 @@ package s3api
import (
"context"
"github.com/seaweedfs/seaweedfs/weed/pb/s3_pb"
)

View File

@@ -1,8 +1,9 @@
package s3api
import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"net/http"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
func (s3a *S3ApiServer) StatusHandler(w http.ResponseWriter, r *http.Request) {

View File

@@ -459,8 +459,8 @@ func (scm *StreamingCopyManager) streamToChunks(ctx context.Context, reader io.R
for {
n, err := reader.Read(buffer)
if n > 0 {
// Create chunk for this data, setting SSE type and per-chunk metadata (including chunk-specific IVs for SSE-S3)
chunk, chunkErr := scm.createChunkFromData(buffer[:n], offset, dstPath, spec.EncryptionSpec)
// Create chunk for this data, setting SSE type and per-chunk metadata (including chunk-specific IVs for SSE-S3)
chunk, chunkErr := scm.createChunkFromData(buffer[:n], offset, dstPath, spec.EncryptionSpec)
if chunkErr != nil {
return nil, fmt.Errorf("create chunk from data: %w", chunkErr)
}

View File

@@ -1,9 +1,10 @@
package s3api
import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
func TestCopyObjectResponse(t *testing.T) {

View File

@@ -372,4 +372,3 @@ func TestMixedFormatTransition(t *testing.T) {
t.Errorf("compareVersionIds(newest_new_format, oldest_old_format) = %d, want negative", result)
}
}

View File

@@ -1,8 +1,9 @@
package s3bucket
import (
"github.com/stretchr/testify/assert"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_verifyBucketName(t *testing.T) {