Files
seaweedFS/weed/s3api/s3api_object_handlers_put_test.go
Chris Lu 0b3867dca3 filer: add structured error codes to CreateEntryResponse (#8767)
* filer: add FilerError enum and error_code field to CreateEntryResponse

Add a machine-readable error code alongside the existing string error
field. This follows the precedent set by PublishMessageResponse in the
MQ broker proto. The string field is kept for human readability and
backward compatibility.

Defined codes: OK, ENTRY_NAME_TOO_LONG, PARENT_IS_FILE,
EXISTING_IS_DIRECTORY, EXISTING_IS_FILE, ENTRY_ALREADY_EXISTS.

* filer: add sentinel errors and error code mapping in filer_pb

Define sentinel errors (ErrEntryNameTooLong, ErrParentIsFile, etc.) in
the filer_pb package so both the filer and consumers can reference them
without circular imports.

Add FilerErrorToSentinel() to map proto error codes to sentinels, and
update CreateEntryWithResponse() to check error_code first, falling back
to the string-based path for backward compatibility with old servers.

* filer: return wrapped sentinel errors and set proto error codes

Replace fmt.Errorf string errors in filer.CreateEntry, UpdateEntry, and
ensureParentDirectoryEntry with wrapped filer_pb sentinel errors (using
%w). This preserves errors.Is() traversal on the server side.

In the gRPC CreateEntry handler, map sentinel errors to the
corresponding FilerError proto codes using errors.Is(), setting both
resp.Error (string, for backward compat) and resp.ErrorCode (enum).

* S3: use errors.Is() with filer sentinels instead of string matching

Replace fragile string-based error matching in filerErrorToS3Error and
other S3 API consumers with errors.Is() checks against filer_pb sentinel
errors. This works because the updated CreateEntryWithResponse helper
reconstructs sentinel errors from the proto FilerError code.

Update iceberg stage_create and metadata_files to check resp.ErrorCode
instead of parsing resp.Error strings. Update SSE-S3 to use errors.Is()
for the already-exists check.

String matching is retained only for non-filer errors (gRPC transport
errors, checksum validation) that don't go through CreateEntryResponse.

* filer: remove backward-compat string fallbacks for error codes

Clients and servers are always deployed together, so there is no need
for backward-compatibility fallback paths that parse resp.Error strings
when resp.ErrorCode is unset. Simplify all consumers to rely solely on
the structured error code.

* iceberg: ensure unknown non-OK error codes are not silently ignored

When FilerErrorToSentinel returns nil for an unrecognized error code,
return an error including the code and message rather than falling
through to return nil.

* filer: fix redundant error message and restore error wrapping in helper

Use request path instead of resp.Error in the sentinel error format
string to avoid duplicating the sentinel message (e.g. "entry already
exists: entry already exists"). Restore %w wrapping with errors.New()
in the fallback paths so callers can use errors.Is()/errors.As().

* filer: promote file to directory on path conflict instead of erroring

S3 allows both "foo/bar" (object) and "foo/bar/xyzzy" (another object)
to coexist because S3 has a flat key space. When ensureParentDirectoryEntry
finds a parent path that is a file instead of a directory, promote it to
a directory by setting ModeDir while preserving the original content and
chunks. Use Store.UpdateEntry directly to bypass the Filer.UpdateEntry
type-change guard.

This fixes the S3 compatibility test failures where creating overlapping
keys (e.g. "foo/bar" then "foo/bar/xyzzy") returned ExistingObjectIsFile.
2026-03-24 17:08:22 -07:00

202 lines
6.5 KiB
Go

package s3api
import (
"encoding/xml"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/gorilla/mux"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
weed_server "github.com/seaweedfs/seaweedfs/weed/server"
"github.com/seaweedfs/seaweedfs/weed/util/constants"
)
func TestFilerErrorToS3Error(t *testing.T) {
tests := []struct {
name string
err error
expectedErr s3err.ErrorCode
}{
{
name: "nil error",
err: nil,
expectedErr: s3err.ErrNone,
},
{
name: "MD5 mismatch error",
err: errors.New(constants.ErrMsgBadDigest),
expectedErr: s3err.ErrBadDigest,
},
{
name: "Read only error (direct)",
err: weed_server.ErrReadOnly,
expectedErr: s3err.ErrAccessDenied,
},
{
name: "Read only error (wrapped)",
err: fmt.Errorf("create file /buckets/test/file.txt: %w", weed_server.ErrReadOnly),
expectedErr: s3err.ErrAccessDenied,
},
{
name: "Context canceled error",
err: errors.New("rpc error: code = Canceled desc = context canceled"),
expectedErr: s3err.ErrInvalidRequest,
},
{
name: "Context canceled error (simple)",
err: errors.New("context canceled"),
expectedErr: s3err.ErrInvalidRequest,
},
{
name: "Directory exists error (sentinel)",
err: fmt.Errorf("CreateEntry /path: %w", filer_pb.ErrExistingIsDirectory),
expectedErr: s3err.ErrExistingObjectIsDirectory,
},
{
name: "Parent is file error (sentinel)",
err: fmt.Errorf("CreateEntry /path: %w", filer_pb.ErrParentIsFile),
expectedErr: s3err.ErrExistingObjectIsFile,
},
{
name: "Existing is file error (sentinel)",
err: fmt.Errorf("CreateEntry /path: %w", filer_pb.ErrExistingIsFile),
expectedErr: s3err.ErrExistingObjectIsFile,
},
{
name: "Entry name too long (sentinel)",
err: fmt.Errorf("CreateEntry: %w", filer_pb.ErrEntryNameTooLong),
expectedErr: s3err.ErrKeyTooLongError,
},
{
name: "Entry name too long (bare sentinel)",
err: filer_pb.ErrEntryNameTooLong,
expectedErr: s3err.ErrKeyTooLongError,
},
{
name: "Unknown error",
err: errors.New("some random error"),
expectedErr: s3err.ErrInternalError,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := filerErrorToS3Error(tt.err)
if result != tt.expectedErr {
t.Errorf("filerErrorToS3Error(%v) = %v, want %v", tt.err, result, tt.expectedErr)
}
})
}
}
// setupKeyLengthTestRouter creates a minimal router that maps requests directly
// to the given handler with {bucket} and {object} mux vars, bypassing auth.
func setupKeyLengthTestRouter(handler http.HandlerFunc) *mux.Router {
router := mux.NewRouter()
bucket := router.PathPrefix("/{bucket}").Subrouter()
bucket.Path("/{object:.+}").HandlerFunc(handler)
return router
}
func TestPutObjectHandler_KeyTooLong(t *testing.T) {
s3a := &S3ApiServer{}
router := setupKeyLengthTestRouter(s3a.PutObjectHandler)
longKey := strings.Repeat("a", s3_constants.MaxS3ObjectKeyLength+1)
req := httptest.NewRequest(http.MethodPut, "/bucket/"+longKey, nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if rr.Code != http.StatusBadRequest {
t.Errorf("expected status %d, got %d", http.StatusBadRequest, rr.Code)
}
var errResp s3err.RESTErrorResponse
if err := xml.Unmarshal(rr.Body.Bytes(), &errResp); err != nil {
t.Fatalf("failed to parse error XML: %v", err)
}
if errResp.Code != "KeyTooLongError" {
t.Errorf("expected error code KeyTooLongError, got %s", errResp.Code)
}
}
func TestPutObjectHandler_KeyAtLimit(t *testing.T) {
s3a := &S3ApiServer{}
// Wrap handler to convert panics from uninitialized server state into 500
// responses. The key length check runs early and writes 400 KeyTooLongError
// before reaching any code that needs a fully initialized server. A panic
// means the handler accepted the key and continued past the check.
panicSafe := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if p := recover(); p != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}()
s3a.PutObjectHandler(w, r)
})
router := setupKeyLengthTestRouter(panicSafe)
atLimitKey := strings.Repeat("a", s3_constants.MaxS3ObjectKeyLength)
req := httptest.NewRequest(http.MethodPut, "/bucket/"+atLimitKey, nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
// Must NOT be KeyTooLongError — any other response (including 500 from
// the minimal server hitting uninitialized state) proves the key passed.
var errResp s3err.RESTErrorResponse
if rr.Code == http.StatusBadRequest {
if err := xml.Unmarshal(rr.Body.Bytes(), &errResp); err == nil && errResp.Code == "KeyTooLongError" {
t.Errorf("key at exactly %d bytes should not be rejected as too long", s3_constants.MaxS3ObjectKeyLength)
}
}
}
func TestCopyObjectHandler_KeyTooLong(t *testing.T) {
s3a := &S3ApiServer{}
router := setupKeyLengthTestRouter(s3a.CopyObjectHandler)
longKey := strings.Repeat("a", s3_constants.MaxS3ObjectKeyLength+1)
req := httptest.NewRequest(http.MethodPut, "/bucket/"+longKey, nil)
req.Header.Set("X-Amz-Copy-Source", "/src-bucket/src-object")
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if rr.Code != http.StatusBadRequest {
t.Errorf("expected status %d, got %d", http.StatusBadRequest, rr.Code)
}
var errResp s3err.RESTErrorResponse
if err := xml.Unmarshal(rr.Body.Bytes(), &errResp); err != nil {
t.Fatalf("failed to parse error XML: %v", err)
}
if errResp.Code != "KeyTooLongError" {
t.Errorf("expected error code KeyTooLongError, got %s", errResp.Code)
}
}
func TestNewMultipartUploadHandler_KeyTooLong(t *testing.T) {
s3a := &S3ApiServer{}
router := setupKeyLengthTestRouter(s3a.NewMultipartUploadHandler)
longKey := strings.Repeat("a", s3_constants.MaxS3ObjectKeyLength+1)
req := httptest.NewRequest(http.MethodPost, "/bucket/"+longKey+"?uploads", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if rr.Code != http.StatusBadRequest {
t.Errorf("expected status %d, got %d", http.StatusBadRequest, rr.Code)
}
var errResp s3err.RESTErrorResponse
if err := xml.Unmarshal(rr.Body.Bytes(), &errResp); err != nil {
t.Fatalf("failed to parse error XML: %v", err)
}
if errResp.Code != "KeyTooLongError" {
t.Errorf("expected error code KeyTooLongError, got %s", errResp.Code)
}
}