Files
seaweedFS/weed/s3api/bucket_paths.go
Chris Lu 8eba7ba5b2 feat: drop table location mapping support (#8458)
* feat: drop table location mapping support

Disable external metadata locations for S3 Tables and remove the table location
mapping index entirely. Table metadata must live under the table bucket paths,
so lookups no longer use mapping directories.

Changes:
- Remove mapping lookup and cache from bucket path resolution
- Reject metadataLocation in CreateTable and UpdateTable
- Remove mapping helpers and tests

* compile

* refactor

* fix: accept metadataLocation in S3 Tables API requests

We removed the external table location mapping feature, but still need to
accept and store metadataLocation values from clients like Trino. The mapping
feature was an internal implementation detail that mapped external buckets to
internal table paths. The metadataLocation field itself is part of the S3 Tables
API and should be preserved.

* fmt

* fix: handle MetadataLocation in UpdateTable requests

Mirror handleCreateTable behavior by updating metadata.MetadataLocation
when req.MetadataLocation is provided in UpdateTable requests. This ensures
table metadata location can be updated, not just set during creation.
2026-02-26 16:36:24 -08:00

100 lines
2.6 KiB
Go

package s3api
import (
"errors"
"path"
"strings"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3tables"
)
var tableBucketFileValidator = s3tables.NewTableBucketFileValidator()
func (s3a *S3ApiServer) isTableBucket(bucket string) bool {
if bucket == "" {
return false
}
// Check cache first
if s3a.bucketRegistry != nil {
s3a.bucketRegistry.metadataCacheLock.RLock()
if metadata, ok := s3a.bucketRegistry.metadataCache[bucket]; ok {
s3a.bucketRegistry.metadataCacheLock.RUnlock()
return metadata.IsTableBucket
}
s3a.bucketRegistry.metadataCacheLock.RUnlock()
}
entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket)
if err == nil && entry != nil {
if s3a.bucketRegistry != nil {
s3a.bucketRegistry.LoadBucketMetadata(entry)
}
return s3tables.IsTableBucketEntry(entry)
}
if err != nil && !errors.Is(err, filer_pb.ErrNotFound) {
glog.V(1).Infof("bucket lookup failed for %s: %v", bucket, err)
}
return false
}
func (s3a *S3ApiServer) bucketRoot(bucket string) string {
// Returns the unified buckets root path for all bucket types
return s3a.option.BucketsPath
}
func (s3a *S3ApiServer) bucketDir(bucket string) string {
return path.Join(s3a.bucketRoot(bucket), bucket)
}
func (s3a *S3ApiServer) validateTableBucketObjectPath(bucket, object string) error {
if !s3a.isTableBucket(bucket) {
return nil
}
cleanObject := strings.TrimPrefix(object, "/")
if cleanObject == "" {
return &s3tables.IcebergLayoutError{
Code: s3tables.ErrCodeInvalidIcebergLayout,
Message: "object must be under namespace/table/data or metadata",
}
}
fullPath := s3a.bucketDir(bucket)
if !strings.HasSuffix(fullPath, "/") {
fullPath += "/"
}
fullPath += cleanObject
if err := tableBucketFileValidator.ValidateTableBucketUpload(fullPath); err != nil {
return err
}
parts := strings.SplitN(cleanObject, "/", 4)
if len(parts) < 4 {
return &s3tables.IcebergLayoutError{
Code: s3tables.ErrCodeInvalidIcebergLayout,
Message: "object must be under namespace/table/data or metadata",
}
}
return nil
}
func (s3a *S3ApiServer) bucketPrefix(bucket string) string {
return s3a.bucketDir(bucket) + "/"
}
func (s3a *S3ApiServer) bucketExists(bucket string) (bool, error) {
entry, err := s3a.getBucketEntry(bucket)
if err != nil {
if errors.Is(err, filer_pb.ErrNotFound) {
return false, nil
}
return false, err
}
return entry != nil, nil
}
func (s3a *S3ApiServer) getBucketEntry(bucket string) (*filer_pb.Entry, error) {
return s3a.getEntry(s3a.option.BucketsPath, bucket)
}