* S3: Implement IAM defaults and STS signing key fallback logic * S3: Refactor startup order to init SSE-S3 key manager before IAM * S3: Derive STS signing key from KEK using HKDF for security isolation * S3: Document STS signing key fallback in security.toml * fix(s3api): refine anonymous access logic and secure-by-default behavior - Initialize anonymous identity by default in `NewIdentityAccessManagement` to prevent nil pointer exceptions. - Ensure `ReplaceS3ApiConfiguration` preserves the anonymous identity if not present in the new configuration. - Update `NewIdentityAccessManagement` signature to accept `filerClient`. - In legacy mode (no policy engine), anonymous defaults to Deny (no actions), preserving secure-by-default behavior. - Use specific `LookupAnonymous` method instead of generic map lookup. - Update tests to accommodate signature changes and verify improved anonymous handling. * feat(s3api): make IAM configuration optional - Start S3 API server without a configuration file if `EnableIam` option is set. - Default to `Allow` effect for policy engine when no configuration is provided (Zero-Config mode). - Handle empty configuration path gracefully in `loadIAMManagerFromConfig`. - Add integration test `iam_optional_test.go` to verify empty config behavior. * fix(iamapi): fix signature mismatch in NewIdentityAccessManagementWithStore * fix(iamapi): properly initialize FilerClient instead of passing nil * fix(iamapi): properly initialize filer client for IAM management - Instead of passing `nil`, construct a `wdclient.FilerClient` using the provided `Filers` addresses. - Ensure `NewIdentityAccessManagementWithStore` receives a valid `filerClient` to avoid potential nil pointer dereferences or limited functionality. * clean: remove dead code in s3api_server.go * refactor(s3api): improve IAM initialization, safety and anonymous access security * fix(s3api): ensure IAM config loads from filer after client init * fix(s3): resolve test failures in integration, CORS, and tagging tests - Fix CORS tests by providing explicit anonymous permissions config - Fix S3 integration tests by setting admin credentials in init - Align tagging test credentials in CI with IAM defaults - Added goroutine to retry IAM config load in iamapi server * fix(s3): allow anonymous access to health targets and S3 Tables when identities are present * fix(ci): use /healthz for Caddy health check in awscli tests * iam, s3api: expose DefaultAllow from IAM and Policy Engine This allows checking the global "Open by Default" configuration from other components like S3 Tables. * s3api/s3tables: support DefaultAllow in permission logic and handler Updated CheckPermissionWithContext to respect the DefaultAllow flag in PolicyContext. This enables "Open by Default" behavior for unauthenticated access in zero-config environments. Added a targeted unit test to verify the logic. * s3api/s3tables: propagate DefaultAllow through handlers Propagated the DefaultAllow flag to individual handlers for namespaces, buckets, tables, policies, and tagging. This ensures consistent "Open by Default" behavior across all S3 Tables API endpoints. * s3api: wire up DefaultAllow for S3 Tables API initialization Updated registerS3TablesRoutes to query the global IAM configuration and set the DefaultAllow flag on the S3 Tables API server. This completes the end-to-end propagation required for anonymous access in zero-config environments. Added a SetDefaultAllow method to S3TablesApiServer to facilitate this. * s3api: fix tests by adding DefaultAllow to mock IAM integrations The IAMIntegration interface was updated to include DefaultAllow(), breaking several mock implementations in tests. This commit fixes the build errors by adding the missing method to the mocks. * env * ensure ports * env * env * fix default allow * add one more test using non-anonymous user * debug * add more debug * less logs
1248 lines
41 KiB
Go
1248 lines
41 KiB
Go
package s3tables
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
)
|
|
|
|
// handleCreateTable creates a new table in a namespace
|
|
func (h *S3TablesHandler) handleCreateTable(w http.ResponseWriter, r *http.Request, filerClient FilerClient) error {
|
|
|
|
var req CreateTableRequest
|
|
if err := h.readRequestBody(r, &req); err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
if req.TableBucketARN == "" {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "tableBucketARN is required")
|
|
return fmt.Errorf("tableBucketARN is required")
|
|
}
|
|
|
|
namespaceName, err := validateNamespace(req.Namespace)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
if req.Name == "" {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "name is required")
|
|
return fmt.Errorf("name is required")
|
|
}
|
|
|
|
if req.Format == "" {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "format is required")
|
|
return fmt.Errorf("format is required")
|
|
}
|
|
|
|
// Validate format
|
|
if req.Format != "ICEBERG" {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "only ICEBERG format is supported")
|
|
return fmt.Errorf("invalid format")
|
|
}
|
|
|
|
bucketName, err := parseBucketNameFromARN(req.TableBucketARN)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
// Validate table name
|
|
tableName, err := validateTableName(req.Name)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
// Check if namespace exists
|
|
namespacePath := GetNamespacePath(bucketName, namespaceName)
|
|
var namespaceMetadata namespaceMetadata
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
data, err := h.getExtendedAttribute(r.Context(), client, namespacePath, ExtendedKeyMetadata)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := json.Unmarshal(data, &namespaceMetadata); err != nil {
|
|
return fmt.Errorf("failed to unmarshal namespace metadata: %w", err)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
if errors.Is(err, filer_pb.ErrNotFound) {
|
|
h.writeError(w, http.StatusNotFound, ErrCodeNoSuchNamespace, fmt.Sprintf("namespace %s not found", namespaceName))
|
|
} else {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, fmt.Sprintf("failed to check namespace: %v", err))
|
|
}
|
|
return err
|
|
}
|
|
|
|
// Authorize table creation using policy framework (namespace + bucket policies)
|
|
accountID := h.getAccountID(r)
|
|
bucketPath := GetTableBucketPath(bucketName)
|
|
namespacePolicy := ""
|
|
bucketPolicy := ""
|
|
bucketTags := map[string]string{}
|
|
var data []byte
|
|
var bucketMetadata tableBucketMetadata
|
|
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
// Fetch bucket metadata to use correct owner for bucket policy evaluation
|
|
data, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyMetadata)
|
|
if err == nil {
|
|
if err := json.Unmarshal(data, &bucketMetadata); err != nil {
|
|
return fmt.Errorf("failed to unmarshal bucket metadata: %w", err)
|
|
}
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket metadata: %v", err)
|
|
}
|
|
|
|
// Fetch namespace policy if it exists
|
|
policyData, err := h.getExtendedAttribute(r.Context(), client, namespacePath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
namespacePolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch namespace policy: %v", err)
|
|
}
|
|
|
|
// Fetch bucket policy if it exists
|
|
policyData, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
bucketPolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket policy: %v", err)
|
|
}
|
|
if tags, err := h.readTags(r.Context(), client, bucketPath); err != nil {
|
|
return err
|
|
} else if tags != nil {
|
|
bucketTags = tags
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, fmt.Sprintf("failed to fetch policies: %v", err))
|
|
return err
|
|
}
|
|
|
|
bucketARN := h.generateTableBucketARN(bucketMetadata.OwnerAccountID, bucketName)
|
|
identityActions := getIdentityActions(r)
|
|
nsAllowed := CheckPermissionWithContext("CreateTable", accountID, namespaceMetadata.OwnerAccountID, namespacePolicy, bucketARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespaceName,
|
|
TableName: tableName,
|
|
RequestTags: req.Tags,
|
|
TagKeys: mapKeys(req.Tags),
|
|
TableBucketTags: bucketTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
bucketAllowed := CheckPermissionWithContext("CreateTable", accountID, bucketMetadata.OwnerAccountID, bucketPolicy, bucketARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespaceName,
|
|
TableName: tableName,
|
|
RequestTags: req.Tags,
|
|
TagKeys: mapKeys(req.Tags),
|
|
TableBucketTags: bucketTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
|
|
if !nsAllowed && !bucketAllowed {
|
|
h.writeError(w, http.StatusForbidden, ErrCodeAccessDenied, "not authorized to create table in this namespace")
|
|
return ErrAccessDenied
|
|
}
|
|
|
|
tablePath := GetTablePath(bucketName, namespaceName, tableName)
|
|
|
|
// Check if table already exists
|
|
var existingMetadata tableMetadataInternal
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
data, err := h.getExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyMetadata)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if unmarshalErr := json.Unmarshal(data, &existingMetadata); unmarshalErr != nil {
|
|
return fmt.Errorf("failed to parse existing table metadata: %w", unmarshalErr)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
if err == nil {
|
|
tableARN := h.generateTableARN(existingMetadata.OwnerAccountID, bucketName, namespaceName+"/"+tableName)
|
|
h.writeJSON(w, http.StatusOK, &CreateTableResponse{
|
|
TableARN: tableARN,
|
|
VersionToken: existingMetadata.VersionToken,
|
|
MetadataLocation: existingMetadata.MetadataLocation,
|
|
})
|
|
return nil
|
|
} else if !errors.Is(err, filer_pb.ErrNotFound) && !errors.Is(err, ErrAttributeNotFound) {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, fmt.Sprintf("failed to check table: %v", err))
|
|
return err
|
|
}
|
|
|
|
// Create the table
|
|
now := time.Now()
|
|
versionToken := generateVersionToken()
|
|
|
|
metadata := &tableMetadataInternal{
|
|
Name: tableName,
|
|
Namespace: namespaceName,
|
|
Format: req.Format,
|
|
CreatedAt: now,
|
|
ModifiedAt: now,
|
|
OwnerAccountID: namespaceMetadata.OwnerAccountID, // Inherit namespace owner for consistency
|
|
VersionToken: versionToken,
|
|
MetadataVersion: max(req.MetadataVersion, 1),
|
|
MetadataLocation: req.MetadataLocation,
|
|
Metadata: req.Metadata,
|
|
}
|
|
|
|
metadataBytes, err := json.Marshal(metadata)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, "failed to marshal table metadata")
|
|
return fmt.Errorf("failed to marshal metadata: %w", err)
|
|
}
|
|
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
// Ensure table directory exists (may already be created by object storage clients)
|
|
if err := h.ensureDirectory(r.Context(), client, tablePath); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Create data subdirectory for Iceberg files
|
|
dataPath := tablePath + "/data"
|
|
if err := h.ensureDirectory(r.Context(), client, dataPath); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Set metadata as extended attribute
|
|
if err := h.setExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyMetadata, metadataBytes); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Set tags if provided
|
|
if len(req.Tags) > 0 {
|
|
tagsBytes, err := json.Marshal(req.Tags)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to marshal tags: %w", err)
|
|
}
|
|
if err := h.setExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyTags, tagsBytes); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
if err := h.updateTableLocationMapping(r.Context(), client, "", req.MetadataLocation, tablePath); err != nil {
|
|
glog.V(1).Infof("failed to update table location mapping for %s: %v", req.MetadataLocation, err)
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, "failed to create table")
|
|
return err
|
|
}
|
|
|
|
tableARN := h.generateTableARN(metadata.OwnerAccountID, bucketName, namespaceName+"/"+tableName)
|
|
|
|
resp := &CreateTableResponse{
|
|
TableARN: tableARN,
|
|
VersionToken: versionToken,
|
|
}
|
|
|
|
h.writeJSON(w, http.StatusOK, resp)
|
|
return nil
|
|
}
|
|
|
|
// handleGetTable gets details of a table
|
|
func (h *S3TablesHandler) handleGetTable(w http.ResponseWriter, r *http.Request, filerClient FilerClient) error {
|
|
|
|
var req GetTableRequest
|
|
if err := h.readRequestBody(r, &req); err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
var bucketName, namespace, tableName string
|
|
var err error
|
|
|
|
// Support getting by ARN or by bucket/namespace/name
|
|
if req.TableARN != "" {
|
|
bucketName, namespace, tableName, err = parseTableFromARN(req.TableARN)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
} else if req.TableBucketARN != "" && len(req.Namespace) > 0 && req.Name != "" {
|
|
bucketName, err = parseBucketNameFromARN(req.TableBucketARN)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
namespace, err = validateNamespace(req.Namespace)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
tableName, err = validateTableName(req.Name)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
} else {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "either tableARN or (tableBucketARN, namespace, name) is required")
|
|
return fmt.Errorf("missing required parameters")
|
|
}
|
|
|
|
tablePath := GetTablePath(bucketName, namespace, tableName)
|
|
|
|
var metadata tableMetadataInternal
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
data, err := h.getExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyMetadata)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := json.Unmarshal(data, &metadata); err != nil {
|
|
return fmt.Errorf("failed to unmarshal table metadata: %w", err)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
if errors.Is(err, filer_pb.ErrNotFound) {
|
|
h.writeError(w, http.StatusNotFound, ErrCodeNoSuchTable, fmt.Sprintf("table %s not found", tableName))
|
|
} else {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, fmt.Sprintf("failed to get table: %v", err))
|
|
}
|
|
return err
|
|
}
|
|
|
|
// Authorize access to the table using policy framework
|
|
accountID := h.getAccountID(r)
|
|
bucketPath := GetTableBucketPath(bucketName)
|
|
tablePolicy := ""
|
|
bucketPolicy := ""
|
|
bucketTags := map[string]string{}
|
|
tableTags := map[string]string{}
|
|
var bucketMetadata tableBucketMetadata
|
|
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
// Fetch bucket metadata to use correct owner for bucket policy evaluation
|
|
data, err := h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyMetadata)
|
|
if err == nil {
|
|
if err := json.Unmarshal(data, &bucketMetadata); err != nil {
|
|
return fmt.Errorf("failed to unmarshal bucket metadata: %w", err)
|
|
}
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket metadata: %v", err)
|
|
}
|
|
|
|
// Fetch table policy if it exists
|
|
policyData, err := h.getExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
tablePolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch table policy: %v", err)
|
|
}
|
|
if tags, err := h.readTags(r.Context(), client, tablePath); err != nil {
|
|
return err
|
|
} else if tags != nil {
|
|
tableTags = tags
|
|
}
|
|
|
|
// Fetch bucket policy if it exists
|
|
policyData, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
bucketPolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket policy: %v", err)
|
|
}
|
|
if tags, err := h.readTags(r.Context(), client, bucketPath); err != nil {
|
|
return err
|
|
} else if tags != nil {
|
|
bucketTags = tags
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, fmt.Sprintf("failed to fetch policies: %v", err))
|
|
return err
|
|
}
|
|
|
|
tableARN := h.generateTableARN(metadata.OwnerAccountID, bucketName, namespace+"/"+tableName)
|
|
bucketARN := h.generateTableBucketARN(bucketMetadata.OwnerAccountID, bucketName)
|
|
identityActions := getIdentityActions(r)
|
|
tableAllowed := CheckPermissionWithContext("GetTable", accountID, metadata.OwnerAccountID, tablePolicy, tableARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespace,
|
|
TableName: tableName,
|
|
TableBucketTags: bucketTags,
|
|
ResourceTags: tableTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
bucketAllowed := CheckPermissionWithContext("GetTable", accountID, bucketMetadata.OwnerAccountID, bucketPolicy, bucketARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespace,
|
|
TableName: tableName,
|
|
TableBucketTags: bucketTags,
|
|
ResourceTags: tableTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
|
|
if !tableAllowed && !bucketAllowed {
|
|
h.writeError(w, http.StatusNotFound, ErrCodeNoSuchTable, fmt.Sprintf("table %s not found", tableName))
|
|
return ErrAccessDenied
|
|
}
|
|
|
|
resp := &GetTableResponse{
|
|
Name: metadata.Name,
|
|
TableARN: tableARN,
|
|
Namespace: expandNamespace(metadata.Namespace),
|
|
Format: metadata.Format,
|
|
CreatedAt: metadata.CreatedAt,
|
|
ModifiedAt: metadata.ModifiedAt,
|
|
OwnerAccountID: metadata.OwnerAccountID,
|
|
MetadataLocation: metadata.MetadataLocation,
|
|
MetadataVersion: metadata.MetadataVersion,
|
|
VersionToken: metadata.VersionToken,
|
|
Metadata: metadata.Metadata,
|
|
}
|
|
|
|
h.writeJSON(w, http.StatusOK, resp)
|
|
return nil
|
|
}
|
|
|
|
// handleListTables lists all tables in a namespace or bucket
|
|
func (h *S3TablesHandler) handleListTables(w http.ResponseWriter, r *http.Request, filerClient FilerClient) error {
|
|
|
|
var req ListTablesRequest
|
|
if err := h.readRequestBody(r, &req); err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
if req.TableBucketARN == "" {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "tableBucketARN is required")
|
|
return fmt.Errorf("tableBucketARN is required")
|
|
}
|
|
|
|
bucketName, err := parseBucketNameFromARN(req.TableBucketARN)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
maxTables := req.MaxTables
|
|
if maxTables <= 0 {
|
|
maxTables = 100
|
|
}
|
|
// Cap to prevent uint32 overflow when used in uint32(maxTables*2)
|
|
const maxTablesLimit = 1000
|
|
if maxTables > maxTablesLimit {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "MaxTables exceeds maximum allowed value")
|
|
return fmt.Errorf("invalid maxTables value: %d", maxTables)
|
|
}
|
|
|
|
// Pre-validate namespace before calling WithFilerClient to return 400 on validation errors
|
|
var namespaceName string
|
|
if len(req.Namespace) > 0 {
|
|
var err error
|
|
namespaceName, err = validateNamespace(req.Namespace)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
}
|
|
|
|
var tables []TableSummary
|
|
var paginationToken string
|
|
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
var err error
|
|
accountID := h.getAccountID(r)
|
|
|
|
if len(req.Namespace) > 0 {
|
|
// Namespace has already been validated above
|
|
namespacePath := GetNamespacePath(bucketName, namespaceName)
|
|
bucketPath := GetTableBucketPath(bucketName)
|
|
var nsMeta namespaceMetadata
|
|
var bucketMeta tableBucketMetadata
|
|
var namespacePolicy, bucketPolicy string
|
|
bucketTags := map[string]string{}
|
|
|
|
// Fetch namespace metadata and policy
|
|
data, err := h.getExtendedAttribute(r.Context(), client, namespacePath, ExtendedKeyMetadata)
|
|
if err != nil {
|
|
return err // Not Found handled by caller
|
|
}
|
|
if err := json.Unmarshal(data, &nsMeta); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Fetch namespace policy if it exists
|
|
policyData, err := h.getExtendedAttribute(r.Context(), client, namespacePath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
namespacePolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch namespace policy: %v", err)
|
|
}
|
|
|
|
// Fetch bucket metadata and policy
|
|
data, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyMetadata)
|
|
if err == nil {
|
|
if err := json.Unmarshal(data, &bucketMeta); err != nil {
|
|
return fmt.Errorf("failed to unmarshal bucket metadata: %w", err)
|
|
}
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket metadata: %v", err)
|
|
}
|
|
|
|
policyData, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
bucketPolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket policy: %v", err)
|
|
}
|
|
if tags, err := h.readTags(r.Context(), client, bucketPath); err != nil {
|
|
return err
|
|
} else if tags != nil {
|
|
bucketTags = tags
|
|
}
|
|
|
|
bucketARN := h.generateTableBucketARN(bucketMeta.OwnerAccountID, bucketName)
|
|
identityActions := getIdentityActions(r)
|
|
nsAllowed := CheckPermissionWithContext("ListTables", accountID, nsMeta.OwnerAccountID, namespacePolicy, bucketARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespaceName,
|
|
TableBucketTags: bucketTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
bucketAllowed := CheckPermissionWithContext("ListTables", accountID, bucketMeta.OwnerAccountID, bucketPolicy, bucketARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespaceName,
|
|
TableBucketTags: bucketTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
if !nsAllowed && !bucketAllowed {
|
|
return ErrAccessDenied
|
|
}
|
|
|
|
tables, paginationToken, err = h.listTablesInNamespaceWithClient(r, client, bucketName, namespaceName, req.Prefix, req.ContinuationToken, maxTables)
|
|
} else {
|
|
// List tables across all namespaces in bucket
|
|
bucketPath := GetTableBucketPath(bucketName)
|
|
var bucketMeta tableBucketMetadata
|
|
var bucketPolicy string
|
|
bucketTags := map[string]string{}
|
|
|
|
// Fetch bucket metadata and policy
|
|
data, err := h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyMetadata)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := json.Unmarshal(data, &bucketMeta); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Fetch bucket policy if it exists
|
|
policyData, err := h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
bucketPolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket policy: %v", err)
|
|
}
|
|
if tags, err := h.readTags(r.Context(), client, bucketPath); err != nil {
|
|
return err
|
|
} else if tags != nil {
|
|
bucketTags = tags
|
|
}
|
|
|
|
bucketARN := h.generateTableBucketARN(bucketMeta.OwnerAccountID, bucketName)
|
|
identityActions := getIdentityActions(r)
|
|
if !CheckPermissionWithContext("ListTables", accountID, bucketMeta.OwnerAccountID, bucketPolicy, bucketARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
TableBucketTags: bucketTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
}) {
|
|
return ErrAccessDenied
|
|
}
|
|
|
|
tables, paginationToken, err = h.listTablesInAllNamespaces(r, client, bucketName, req.Prefix, req.ContinuationToken, maxTables)
|
|
}
|
|
return err
|
|
})
|
|
|
|
if err != nil {
|
|
if errors.Is(err, filer_pb.ErrNotFound) {
|
|
// If the bucket or namespace directory is not found, return an empty result
|
|
tables = []TableSummary{}
|
|
paginationToken = ""
|
|
} else if isAuthError(err) {
|
|
h.writeError(w, http.StatusForbidden, ErrCodeAccessDenied, "Access Denied")
|
|
return err
|
|
} else {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, fmt.Sprintf("failed to list tables: %v", err))
|
|
return err
|
|
}
|
|
}
|
|
|
|
resp := &ListTablesResponse{
|
|
Tables: tables,
|
|
ContinuationToken: paginationToken,
|
|
}
|
|
|
|
h.writeJSON(w, http.StatusOK, resp)
|
|
return nil
|
|
}
|
|
|
|
// listTablesInNamespaceWithClient lists tables in a specific namespace
|
|
func (h *S3TablesHandler) listTablesInNamespaceWithClient(r *http.Request, client filer_pb.SeaweedFilerClient, bucketName, namespaceName, prefix, continuationToken string, maxTables int) ([]TableSummary, string, error) {
|
|
namespacePath := GetNamespacePath(bucketName, namespaceName)
|
|
return h.listTablesWithClient(r, client, namespacePath, bucketName, namespaceName, prefix, continuationToken, maxTables)
|
|
}
|
|
|
|
func (h *S3TablesHandler) listTablesWithClient(r *http.Request, client filer_pb.SeaweedFilerClient, dirPath, bucketName, namespaceName, prefix, continuationToken string, maxTables int) ([]TableSummary, string, error) {
|
|
var tables []TableSummary
|
|
lastFileName := continuationToken
|
|
ctx := r.Context()
|
|
|
|
for len(tables) < maxTables {
|
|
resp, err := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{
|
|
Directory: dirPath,
|
|
Limit: uint32(maxTables * 2),
|
|
StartFromFileName: lastFileName,
|
|
InclusiveStartFrom: lastFileName == "" || lastFileName == continuationToken,
|
|
})
|
|
if err != nil {
|
|
return nil, "", err
|
|
}
|
|
|
|
hasMore := false
|
|
for {
|
|
entry, respErr := resp.Recv()
|
|
if respErr != nil {
|
|
if respErr == io.EOF {
|
|
break
|
|
}
|
|
return nil, "", respErr
|
|
}
|
|
if entry.Entry == nil {
|
|
continue
|
|
}
|
|
|
|
// Skip the start item if it was included in the previous page
|
|
if len(tables) == 0 && continuationToken != "" && entry.Entry.Name == continuationToken {
|
|
continue
|
|
}
|
|
|
|
hasMore = true
|
|
lastFileName = entry.Entry.Name
|
|
|
|
if !entry.Entry.IsDirectory {
|
|
continue
|
|
}
|
|
|
|
// Skip hidden entries
|
|
if strings.HasPrefix(entry.Entry.Name, ".") {
|
|
continue
|
|
}
|
|
|
|
// Apply prefix filter
|
|
if prefix != "" && !strings.HasPrefix(entry.Entry.Name, prefix) {
|
|
continue
|
|
}
|
|
|
|
// Read table metadata from extended attribute
|
|
data, ok := entry.Entry.Extended[ExtendedKeyMetadata]
|
|
if !ok {
|
|
continue
|
|
}
|
|
|
|
var metadata tableMetadataInternal
|
|
if err := json.Unmarshal(data, &metadata); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Note: Authorization (ownership or policy-based access) is checked at the handler level
|
|
// before calling this function. This filter is removed to allow policy-based sharing.
|
|
// The caller has already been verified to have ListTables permission for this namespace/bucket.
|
|
|
|
tableARN := h.generateTableARN(metadata.OwnerAccountID, bucketName, namespaceName+"/"+entry.Entry.Name)
|
|
|
|
tables = append(tables, TableSummary{
|
|
Name: entry.Entry.Name,
|
|
TableARN: tableARN,
|
|
Namespace: expandNamespace(namespaceName),
|
|
CreatedAt: metadata.CreatedAt,
|
|
ModifiedAt: metadata.ModifiedAt,
|
|
})
|
|
|
|
if len(tables) >= maxTables {
|
|
return tables, lastFileName, nil
|
|
}
|
|
}
|
|
|
|
if !hasMore {
|
|
break
|
|
}
|
|
}
|
|
|
|
if len(tables) < maxTables {
|
|
lastFileName = ""
|
|
}
|
|
return tables, lastFileName, nil
|
|
}
|
|
|
|
func (h *S3TablesHandler) listTablesInAllNamespaces(r *http.Request, client filer_pb.SeaweedFilerClient, bucketName, prefix, continuationToken string, maxTables int) ([]TableSummary, string, error) {
|
|
bucketPath := GetTableBucketPath(bucketName)
|
|
ctx := r.Context()
|
|
|
|
var continuationNamespace string
|
|
var startTableName string
|
|
if continuationToken != "" {
|
|
if parts := strings.SplitN(continuationToken, "/", 2); len(parts) == 2 {
|
|
continuationNamespace = parts[0]
|
|
startTableName = parts[1]
|
|
} else {
|
|
continuationNamespace = continuationToken
|
|
}
|
|
}
|
|
|
|
var tables []TableSummary
|
|
lastNamespace := continuationNamespace
|
|
for {
|
|
// List namespaces in batches
|
|
resp, err := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{
|
|
Directory: bucketPath,
|
|
Limit: 100,
|
|
StartFromFileName: lastNamespace,
|
|
InclusiveStartFrom: (lastNamespace == continuationNamespace && startTableName != "") || (lastNamespace == "" && continuationNamespace == ""),
|
|
})
|
|
if err != nil {
|
|
return nil, "", err
|
|
}
|
|
|
|
hasMore := false
|
|
for {
|
|
entry, respErr := resp.Recv()
|
|
if respErr != nil {
|
|
if respErr == io.EOF {
|
|
break
|
|
}
|
|
return nil, "", respErr
|
|
}
|
|
if entry.Entry == nil {
|
|
continue
|
|
}
|
|
|
|
hasMore = true
|
|
lastNamespace = entry.Entry.Name
|
|
|
|
if !entry.Entry.IsDirectory || strings.HasPrefix(entry.Entry.Name, ".") {
|
|
continue
|
|
}
|
|
|
|
namespace := entry.Entry.Name
|
|
tableNameFilter := ""
|
|
if namespace == continuationNamespace {
|
|
tableNameFilter = startTableName
|
|
}
|
|
|
|
nsTables, nsToken, err := h.listTablesInNamespaceWithClient(r, client, bucketName, namespace, prefix, tableNameFilter, maxTables-len(tables))
|
|
if err != nil {
|
|
glog.Warningf("S3Tables: failed to list tables in namespace %s/%s: %v", bucketName, namespace, err)
|
|
continue
|
|
}
|
|
|
|
tables = append(tables, nsTables...)
|
|
|
|
if namespace == continuationNamespace {
|
|
startTableName = ""
|
|
}
|
|
|
|
if len(tables) >= maxTables {
|
|
paginationToken := namespace + "/" + nsToken
|
|
if nsToken == "" {
|
|
// If we hit the limit exactly at the end of a namespace, the next token should be the next namespace
|
|
paginationToken = namespace // This will start from the NEXT namespace in the outer loop
|
|
}
|
|
return tables, paginationToken, nil
|
|
}
|
|
}
|
|
|
|
if !hasMore {
|
|
break
|
|
}
|
|
}
|
|
|
|
return tables, "", nil
|
|
}
|
|
|
|
// handleDeleteTable deletes a table from a namespace
|
|
func (h *S3TablesHandler) handleDeleteTable(w http.ResponseWriter, r *http.Request, filerClient FilerClient) error {
|
|
|
|
var req DeleteTableRequest
|
|
if err := h.readRequestBody(r, &req); err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
if req.TableBucketARN == "" || len(req.Namespace) == 0 || req.Name == "" {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "tableBucketARN, namespace, and name are required")
|
|
return fmt.Errorf("missing required parameters")
|
|
}
|
|
|
|
namespaceName, err := validateNamespace(req.Namespace)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
bucketName, err := parseBucketNameFromARN(req.TableBucketARN)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
tableName, err := validateTableName(req.Name)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
tablePath := GetTablePath(bucketName, namespaceName, tableName)
|
|
|
|
// Check if table exists and enforce VersionToken if provided
|
|
var metadata tableMetadataInternal
|
|
var tablePolicy string
|
|
var bucketPolicy string
|
|
var bucketTags map[string]string
|
|
var tableTags map[string]string
|
|
var bucketMetadata tableBucketMetadata
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
data, err := h.getExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyMetadata)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := json.Unmarshal(data, &metadata); err != nil {
|
|
return fmt.Errorf("failed to unmarshal table metadata: %w", err)
|
|
}
|
|
|
|
if req.VersionToken != "" {
|
|
if metadata.VersionToken != req.VersionToken {
|
|
return ErrVersionTokenMismatch
|
|
}
|
|
}
|
|
|
|
// Fetch table policy if it exists
|
|
policyData, err := h.getExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyPolicy)
|
|
if err != nil {
|
|
if errors.Is(err, ErrAttributeNotFound) {
|
|
// No table policy set; proceed with empty policy
|
|
} else {
|
|
return fmt.Errorf("failed to fetch table policy: %w", err)
|
|
}
|
|
} else {
|
|
tablePolicy = string(policyData)
|
|
}
|
|
|
|
tableTags, err = h.readTags(r.Context(), client, tablePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
bucketPath := GetTableBucketPath(bucketName)
|
|
data, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyMetadata)
|
|
if err == nil {
|
|
if err := json.Unmarshal(data, &bucketMetadata); err != nil {
|
|
return fmt.Errorf("failed to unmarshal bucket metadata: %w", err)
|
|
}
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket metadata: %w", err)
|
|
}
|
|
policyData, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyPolicy)
|
|
if err != nil {
|
|
if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket policy: %w", err)
|
|
}
|
|
} else {
|
|
bucketPolicy = string(policyData)
|
|
}
|
|
bucketTags, err = h.readTags(r.Context(), client, bucketPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
if errors.Is(err, filer_pb.ErrNotFound) {
|
|
h.writeError(w, http.StatusNotFound, ErrCodeNoSuchTable, fmt.Sprintf("table %s not found", tableName))
|
|
} else if errors.Is(err, ErrVersionTokenMismatch) {
|
|
h.writeError(w, http.StatusConflict, ErrCodeConflict, "version token mismatch")
|
|
} else {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, fmt.Sprintf("failed to check table: %v", err))
|
|
}
|
|
return err
|
|
}
|
|
|
|
tableARN := h.generateTableARN(metadata.OwnerAccountID, bucketName, namespaceName+"/"+tableName)
|
|
bucketARN := h.generateTableBucketARN(bucketMetadata.OwnerAccountID, bucketName)
|
|
principal := h.getAccountID(r)
|
|
identityActions := getIdentityActions(r)
|
|
tableAllowed := CheckPermissionWithContext("DeleteTable", principal, metadata.OwnerAccountID, tablePolicy, tableARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespaceName,
|
|
TableName: tableName,
|
|
TableBucketTags: bucketTags,
|
|
ResourceTags: tableTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
bucketAllowed := CheckPermissionWithContext("DeleteTable", principal, bucketMetadata.OwnerAccountID, bucketPolicy, bucketARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespaceName,
|
|
TableName: tableName,
|
|
TableBucketTags: bucketTags,
|
|
ResourceTags: tableTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
if !tableAllowed && !bucketAllowed {
|
|
h.writeError(w, http.StatusForbidden, ErrCodeAccessDenied, "not authorized to delete table")
|
|
return NewAuthError("DeleteTable", principal, "not authorized to delete table")
|
|
}
|
|
|
|
// Delete the table
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
if err := h.deleteDirectory(r.Context(), client, tablePath); err != nil {
|
|
return err
|
|
}
|
|
if err := h.deleteTableLocationMapping(r.Context(), client, metadata.MetadataLocation, tablePath); err != nil {
|
|
glog.V(1).Infof("failed to delete table location mapping for %s: %v", metadata.MetadataLocation, err)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, "failed to delete table")
|
|
return err
|
|
}
|
|
|
|
h.writeJSON(w, http.StatusOK, nil)
|
|
return nil
|
|
}
|
|
|
|
// handleUpdateTable updates table metadata
|
|
func (h *S3TablesHandler) handleUpdateTable(w http.ResponseWriter, r *http.Request, filerClient FilerClient) error {
|
|
var req UpdateTableRequest
|
|
if err := h.readRequestBody(r, &req); err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
if req.TableBucketARN == "" || len(req.Namespace) == 0 || req.Name == "" {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, "tableBucketARN, namespace, and name are required")
|
|
return fmt.Errorf("missing required parameters")
|
|
}
|
|
|
|
namespaceName, err := validateNamespace(req.Namespace)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
bucketName, err := parseBucketNameFromARN(req.TableBucketARN)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
tableName, err := validateTableName(req.Name)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusBadRequest, ErrCodeInvalidRequest, err.Error())
|
|
return err
|
|
}
|
|
|
|
tablePath := GetTablePath(bucketName, namespaceName, tableName)
|
|
|
|
// Load existing metadata and policies for authorization
|
|
var metadata tableMetadataInternal
|
|
var tablePolicy string
|
|
var bucketPolicy string
|
|
var bucketTags map[string]string
|
|
var tableTags map[string]string
|
|
var bucketMetadata tableBucketMetadata
|
|
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
// 1. Get Table Metadata
|
|
data, err := h.getExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyMetadata)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := json.Unmarshal(data, &metadata); err != nil {
|
|
return fmt.Errorf("failed to unmarshal table metadata: %w", err)
|
|
}
|
|
|
|
// 2. Get Table Policy & Tags
|
|
policyData, err := h.getExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
tablePolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch table policy: %w", err)
|
|
}
|
|
tableTags, err = h.readTags(r.Context(), client, tablePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// 3. Get Bucket Metadata, Policy & Tags
|
|
bucketPath := GetTableBucketPath(bucketName)
|
|
data, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyMetadata)
|
|
if err == nil {
|
|
if err := json.Unmarshal(data, &bucketMetadata); err != nil {
|
|
return fmt.Errorf("failed to unmarshal bucket metadata: %w", err)
|
|
}
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket metadata: %w", err)
|
|
}
|
|
policyData, err = h.getExtendedAttribute(r.Context(), client, bucketPath, ExtendedKeyPolicy)
|
|
if err == nil {
|
|
bucketPolicy = string(policyData)
|
|
} else if !errors.Is(err, ErrAttributeNotFound) {
|
|
return fmt.Errorf("failed to fetch bucket policy: %w", err)
|
|
}
|
|
bucketTags, err = h.readTags(r.Context(), client, bucketPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
if errors.Is(err, filer_pb.ErrNotFound) {
|
|
h.writeError(w, http.StatusNotFound, ErrCodeNoSuchTable, "table not found")
|
|
} else {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, err.Error())
|
|
}
|
|
return err
|
|
}
|
|
|
|
// Authorization Check
|
|
tableARN := h.generateTableARN(metadata.OwnerAccountID, bucketName, namespaceName+"/"+tableName)
|
|
bucketARN := h.generateTableBucketARN(bucketMetadata.OwnerAccountID, bucketName)
|
|
principal := h.getAccountID(r)
|
|
identityActions := getIdentityActions(r)
|
|
|
|
tableAllowed := CheckPermissionWithContext("UpdateTable", principal, metadata.OwnerAccountID, tablePolicy, tableARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespaceName,
|
|
TableName: tableName,
|
|
TableBucketTags: bucketTags,
|
|
ResourceTags: tableTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
bucketAllowed := CheckPermissionWithContext("UpdateTable", principal, bucketMetadata.OwnerAccountID, bucketPolicy, bucketARN, &PolicyContext{
|
|
TableBucketName: bucketName,
|
|
Namespace: namespaceName,
|
|
TableName: tableName,
|
|
TableBucketTags: bucketTags,
|
|
ResourceTags: tableTags,
|
|
IdentityActions: identityActions,
|
|
DefaultAllow: h.defaultAllow,
|
|
})
|
|
|
|
if !tableAllowed && !bucketAllowed {
|
|
h.writeError(w, http.StatusForbidden, ErrCodeAccessDenied, "not authorized to update table")
|
|
return NewAuthError("UpdateTable", principal, "not authorized to update table")
|
|
}
|
|
|
|
// Check version token if provided
|
|
if req.VersionToken != "" && req.VersionToken != metadata.VersionToken {
|
|
h.writeError(w, http.StatusConflict, ErrCodeConflict, "Version token mismatch")
|
|
return ErrVersionTokenMismatch
|
|
}
|
|
|
|
// Capture old metadata location before mutation for stale mapping cleanup
|
|
oldMetadataLocation := metadata.MetadataLocation
|
|
|
|
// Update metadata
|
|
if req.Metadata != nil {
|
|
if metadata.Metadata == nil {
|
|
metadata.Metadata = &TableMetadata{}
|
|
}
|
|
if req.Metadata.Iceberg != nil {
|
|
if metadata.Metadata.Iceberg == nil {
|
|
metadata.Metadata.Iceberg = &IcebergMetadata{}
|
|
}
|
|
if req.Metadata.Iceberg.TableUUID != "" {
|
|
metadata.Metadata.Iceberg.TableUUID = req.Metadata.Iceberg.TableUUID
|
|
}
|
|
}
|
|
if len(req.Metadata.FullMetadata) > 0 {
|
|
metadata.Metadata.FullMetadata = req.Metadata.FullMetadata
|
|
}
|
|
}
|
|
if req.MetadataLocation != "" {
|
|
metadata.MetadataLocation = req.MetadataLocation
|
|
}
|
|
if req.MetadataVersion > 0 {
|
|
metadata.MetadataVersion = req.MetadataVersion
|
|
} else if metadata.MetadataVersion == 0 {
|
|
metadata.MetadataVersion = 1
|
|
}
|
|
metadata.ModifiedAt = time.Now()
|
|
metadata.VersionToken = generateVersionToken()
|
|
|
|
metadataBytes, err := json.Marshal(metadata)
|
|
if err != nil {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, "failed to marshal metadata")
|
|
return err
|
|
}
|
|
|
|
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
if err := h.setExtendedAttribute(r.Context(), client, tablePath, ExtendedKeyMetadata, metadataBytes); err != nil {
|
|
return err
|
|
}
|
|
if err := h.updateTableLocationMapping(r.Context(), client, oldMetadataLocation, metadata.MetadataLocation, tablePath); err != nil {
|
|
glog.V(1).Infof("failed to update table location mapping for %s -> %s: %v", oldMetadataLocation, metadata.MetadataLocation, err)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
h.writeError(w, http.StatusInternalServerError, ErrCodeInternalError, "failed to update metadata")
|
|
return err
|
|
}
|
|
|
|
h.writeJSON(w, http.StatusOK, &UpdateTableResponse{
|
|
TableARN: tableARN,
|
|
MetadataLocation: metadata.MetadataLocation,
|
|
VersionToken: metadata.VersionToken,
|
|
})
|
|
return nil
|
|
}
|
|
|
|
func (h *S3TablesHandler) updateTableLocationMapping(ctx context.Context, client filer_pb.SeaweedFilerClient, oldMetadataLocation, newMetadataLocation, tablePath string) error {
|
|
newTableLocationBucket, ok := parseTableLocationBucket(newMetadataLocation)
|
|
if !ok {
|
|
return nil
|
|
}
|
|
tableBucketPath, ok := tableBucketPathFromTablePath(tablePath)
|
|
if !ok {
|
|
return fmt.Errorf("invalid table path for location mapping: %s", tablePath)
|
|
}
|
|
|
|
if err := h.ensureDirectory(ctx, client, GetTableLocationMappingDir()); err != nil {
|
|
return err
|
|
}
|
|
if err := h.ensureTableLocationMappingBucketDir(ctx, client, newTableLocationBucket); err != nil {
|
|
return err
|
|
}
|
|
|
|
// If the metadata location changed, remove this table's stale mapping entry from the old bucket.
|
|
if oldMetadataLocation != "" && oldMetadataLocation != newMetadataLocation {
|
|
oldTableLocationBucket, ok := parseTableLocationBucket(oldMetadataLocation)
|
|
if ok && oldTableLocationBucket != newTableLocationBucket {
|
|
if err := h.removeTableLocationMappingEntry(ctx, client, oldTableLocationBucket, tablePath); err != nil {
|
|
glog.V(1).Infof("failed to delete stale mapping for %s: %v", oldTableLocationBucket, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
return h.upsertFile(ctx, client, GetTableLocationMappingEntryPath(newTableLocationBucket, tablePath), []byte(tableBucketPath))
|
|
}
|
|
|
|
func (h *S3TablesHandler) deleteTableLocationMapping(ctx context.Context, client filer_pb.SeaweedFilerClient, metadataLocation, tablePath string) error {
|
|
tableLocationBucket, ok := parseTableLocationBucket(metadataLocation)
|
|
if !ok {
|
|
return nil
|
|
}
|
|
return h.removeTableLocationMappingEntry(ctx, client, tableLocationBucket, tablePath)
|
|
}
|
|
|
|
func (h *S3TablesHandler) ensureTableLocationMappingBucketDir(ctx context.Context, client filer_pb.SeaweedFilerClient, tableLocationBucket string) error {
|
|
mappingDir := GetTableLocationMappingDir()
|
|
bucketMappingPath := GetTableLocationMappingPath(tableLocationBucket)
|
|
|
|
resp, err := filer_pb.LookupEntry(ctx, client, &filer_pb.LookupDirectoryEntryRequest{
|
|
Directory: mappingDir,
|
|
Name: tableLocationBucket,
|
|
})
|
|
if err == nil {
|
|
if resp != nil && resp.Entry != nil && resp.Entry.IsDirectory {
|
|
return nil
|
|
}
|
|
if removeErr := h.deleteEntryIfExists(ctx, client, bucketMappingPath); removeErr != nil && !errors.Is(removeErr, filer_pb.ErrNotFound) {
|
|
return removeErr
|
|
}
|
|
} else if !errors.Is(err, filer_pb.ErrNotFound) {
|
|
return err
|
|
}
|
|
|
|
return h.ensureDirectory(ctx, client, bucketMappingPath)
|
|
}
|
|
|
|
func (h *S3TablesHandler) removeTableLocationMappingEntry(ctx context.Context, client filer_pb.SeaweedFilerClient, tableLocationBucket, tablePath string) error {
|
|
entryPath := GetTableLocationMappingEntryPath(tableLocationBucket, tablePath)
|
|
if err := h.deleteEntryIfExists(ctx, client, entryPath); err != nil && !errors.Is(err, filer_pb.ErrNotFound) {
|
|
return err
|
|
}
|
|
return h.removeTableLocationMappingBucketDirIfEmpty(ctx, client, tableLocationBucket)
|
|
}
|
|
|
|
func (h *S3TablesHandler) removeTableLocationMappingBucketDirIfEmpty(ctx context.Context, client filer_pb.SeaweedFilerClient, tableLocationBucket string) error {
|
|
bucketMappingPath := GetTableLocationMappingPath(tableLocationBucket)
|
|
|
|
stream, err := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{
|
|
Directory: bucketMappingPath,
|
|
Limit: 1,
|
|
})
|
|
if err != nil {
|
|
if errors.Is(err, filer_pb.ErrNotFound) {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
for {
|
|
resp, recvErr := stream.Recv()
|
|
if recvErr == io.EOF {
|
|
break
|
|
}
|
|
if recvErr != nil {
|
|
return recvErr
|
|
}
|
|
if resp != nil && resp.Entry != nil {
|
|
return nil
|
|
}
|
|
}
|
|
|
|
if err := h.deleteEntryIfExists(ctx, client, bucketMappingPath); err != nil && !errors.Is(err, filer_pb.ErrNotFound) {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|