* fix: paginate bucket listing in Admin UI to show all buckets The Admin UI's GetS3Buckets() had a hardcoded Limit of 1000 in the ListEntries request, causing the Total Buckets count to cap at 1000 even when more buckets exist. This adds pagination to iterate through all buckets by continuing from the last entry name when a full page is returned. Fixes seaweedfs/seaweedfs#8564 * feat: add server-side pagination and sorting to S3 buckets page Add pagination controls, page size selector, and sortable column headers to the Admin UI's Object Store buckets page, following the same pattern used by the Cluster Volumes page. This ensures the UI remains responsive with thousands of buckets. - Add CurrentPage, TotalPages, PageSize, SortBy, SortOrder to S3BucketsData - Accept page/pageSize/sortBy/sortOrder query params in ShowS3Buckets handler - Sort buckets by name, owner, created, objects, logical/physical size - Paginate results server-side (default 100 per page) - Add pagination nav, page size dropdown, and sort indicators to template * Update s3_buckets_templ.go * Update object_store_users_templ.go * fix: use errors.Is(err, io.EOF) instead of string comparison Replace brittle err.Error() == "EOF" string comparison with idiomatic errors.Is(err, io.EOF) for checking stream end in bucket listing. * fix: address PR review findings for bucket pagination - Clamp page to totalPages when page exceeds total, preventing empty results with misleading pagination state - Fix sort comparator to use explicit ascending/descending comparisons with a name tie-breaker, satisfying strict weak ordering for sort.Slice - Capture SnapshotTsNs from first ListEntries response and pass it to subsequent requests for consistent pagination across pages - Replace non-focusable <th onclick> sort headers with <a> tags and reuse getSortIcon, matching the cluster_volumes accessibility pattern - Change exportBucketList() to fetch all buckets from /api/s3/buckets instead of scraping DOM rows (which now only contain the current page)
539 lines
17 KiB
Go
539 lines
17 KiB
Go
package dash
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net/http"
|
|
"os"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/gorilla/mux"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api"
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
|
)
|
|
|
|
// MaxOwnerNameLength is the maximum allowed length for bucket owner identity names.
|
|
// This is a reasonable limit to prevent abuse; AWS IAM user names are limited to 64 chars,
|
|
// but we use 256 to allow for more complex identity formats (e.g., email addresses).
|
|
const MaxOwnerNameLength = 256
|
|
|
|
// S3 Bucket management data structures for templates
|
|
type S3BucketsData struct {
|
|
Username string `json:"username"`
|
|
Buckets []S3Bucket `json:"buckets"`
|
|
TotalBuckets int `json:"total_buckets"`
|
|
TotalSize int64 `json:"total_size"`
|
|
LastUpdated time.Time `json:"last_updated"`
|
|
|
|
// Pagination
|
|
CurrentPage int `json:"current_page"`
|
|
TotalPages int `json:"total_pages"`
|
|
PageSize int `json:"page_size"`
|
|
|
|
// Sorting
|
|
SortBy string `json:"sort_by"`
|
|
SortOrder string `json:"sort_order"`
|
|
}
|
|
|
|
type CreateBucketRequest struct {
|
|
Name string `json:"name"` // validated manually in CreateBucket
|
|
Region string `json:"region"`
|
|
QuotaSize int64 `json:"quota_size"` // Quota size in bytes
|
|
QuotaUnit string `json:"quota_unit"` // Unit: MB, GB, TB
|
|
QuotaEnabled bool `json:"quota_enabled"` // Whether quota is enabled
|
|
VersioningEnabled bool `json:"versioning_enabled"` // Whether versioning is enabled
|
|
ObjectLockEnabled bool `json:"object_lock_enabled"` // Whether object lock is enabled
|
|
ObjectLockMode string `json:"object_lock_mode"` // Object lock mode: "GOVERNANCE" or "COMPLIANCE"
|
|
SetDefaultRetention bool `json:"set_default_retention"` // Whether to set default retention
|
|
ObjectLockDuration int32 `json:"object_lock_duration"` // Default retention duration in days
|
|
Owner string `json:"owner"` // Bucket owner identity (for S3 IAM authentication)
|
|
}
|
|
|
|
// S3 Bucket Management Handlers
|
|
|
|
// ShowS3Buckets displays the Object Store buckets management page
|
|
func (s *AdminServer) ShowS3Buckets(w http.ResponseWriter, r *http.Request) {
|
|
username := UsernameFromContext(r.Context())
|
|
|
|
data, err := s.GetS3BucketsData(1, 100, "name", "asc")
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, "Failed to get Object Store buckets: "+err.Error())
|
|
return
|
|
}
|
|
|
|
data.Username = username
|
|
writeJSON(w, http.StatusOK, data)
|
|
}
|
|
|
|
// ShowBucketDetails displays detailed information about a specific bucket
|
|
func (s *AdminServer) ShowBucketDetails(w http.ResponseWriter, r *http.Request) {
|
|
bucketName := mux.Vars(r)["bucket"]
|
|
if bucketName == "" {
|
|
writeJSONError(w, http.StatusBadRequest, "Bucket name is required")
|
|
return
|
|
}
|
|
|
|
details, err := s.GetBucketDetails(bucketName)
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, "Failed to get bucket details: "+err.Error())
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, details)
|
|
}
|
|
|
|
// CreateBucket creates a new S3 bucket
|
|
func (s *AdminServer) CreateBucket(w http.ResponseWriter, r *http.Request) {
|
|
var req CreateBucketRequest
|
|
if err := decodeJSONBody(newJSONMaxReader(w, r), &req); err != nil {
|
|
writeJSONError(w, http.StatusBadRequest, "Invalid request: "+err.Error())
|
|
return
|
|
}
|
|
if strings.TrimSpace(req.Name) == "" {
|
|
writeJSONError(w, http.StatusBadRequest, "Bucket name is required")
|
|
return
|
|
}
|
|
|
|
// Validate bucket name (basic validation)
|
|
if len(req.Name) < 3 || len(req.Name) > 63 {
|
|
writeJSONError(w, http.StatusBadRequest, "Bucket name must be between 3 and 63 characters")
|
|
return
|
|
}
|
|
|
|
// Validate object lock settings
|
|
if req.ObjectLockEnabled {
|
|
// Object lock requires versioning to be enabled
|
|
req.VersioningEnabled = true
|
|
|
|
// Validate object lock mode
|
|
if req.ObjectLockMode != "GOVERNANCE" && req.ObjectLockMode != "COMPLIANCE" {
|
|
writeJSONError(w, http.StatusBadRequest, "Object lock mode must be either GOVERNANCE or COMPLIANCE")
|
|
return
|
|
}
|
|
|
|
// Validate retention duration if default retention is enabled
|
|
if req.SetDefaultRetention {
|
|
if req.ObjectLockDuration <= 0 {
|
|
writeJSONError(w, http.StatusBadRequest, "Object lock duration must be greater than 0 days when default retention is enabled")
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
normalizedUnit, err := normalizeQuotaUnit(req.QuotaUnit)
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusBadRequest, err.Error())
|
|
return
|
|
}
|
|
req.QuotaUnit = normalizedUnit
|
|
quotaBytes := convertQuotaToBytes(req.QuotaSize, normalizedUnit)
|
|
|
|
// Validate quota: if enabled, size must be greater than 0
|
|
if req.QuotaEnabled && quotaBytes <= 0 {
|
|
writeJSONError(w, http.StatusBadRequest, "Quota size must be greater than 0 when quota is enabled")
|
|
return
|
|
}
|
|
|
|
// Sanitize owner: trim whitespace and enforce max length
|
|
owner := strings.TrimSpace(req.Owner)
|
|
if len(owner) > MaxOwnerNameLength {
|
|
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("Owner name must be %d characters or less", MaxOwnerNameLength))
|
|
return
|
|
}
|
|
|
|
err = s.CreateS3BucketWithObjectLock(req.Name, quotaBytes, req.QuotaEnabled, req.VersioningEnabled, req.ObjectLockEnabled, req.ObjectLockMode, req.SetDefaultRetention, req.ObjectLockDuration, owner)
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, "Failed to create bucket: "+err.Error())
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusCreated, map[string]interface{}{
|
|
"message": "Bucket created successfully",
|
|
"bucket": req.Name,
|
|
"quota_size": req.QuotaSize,
|
|
"quota_unit": req.QuotaUnit,
|
|
"quota_enabled": req.QuotaEnabled,
|
|
"versioning_enabled": req.VersioningEnabled,
|
|
"object_lock_enabled": req.ObjectLockEnabled,
|
|
"object_lock_mode": req.ObjectLockMode,
|
|
"object_lock_duration": req.ObjectLockDuration,
|
|
"owner": owner,
|
|
})
|
|
}
|
|
|
|
// UpdateBucketQuota updates the quota settings for a bucket
|
|
func (s *AdminServer) UpdateBucketQuota(w http.ResponseWriter, r *http.Request) {
|
|
bucketName := mux.Vars(r)["bucket"]
|
|
if bucketName == "" {
|
|
writeJSONError(w, http.StatusBadRequest, "Bucket name is required")
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
QuotaSize int64 `json:"quota_size"`
|
|
QuotaUnit string `json:"quota_unit"`
|
|
QuotaEnabled bool `json:"quota_enabled"`
|
|
}
|
|
if err := decodeJSONBody(newJSONMaxReader(w, r), &req); err != nil {
|
|
writeJSONError(w, http.StatusBadRequest, "Invalid request: "+err.Error())
|
|
return
|
|
}
|
|
|
|
if req.QuotaEnabled && req.QuotaSize <= 0 {
|
|
writeJSONError(w, http.StatusBadRequest, "quota_size must be > 0 when quota_enabled is true")
|
|
return
|
|
}
|
|
|
|
normalizedUnit, err := normalizeQuotaUnit(req.QuotaUnit)
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusBadRequest, err.Error())
|
|
return
|
|
}
|
|
req.QuotaUnit = normalizedUnit
|
|
// Convert quota to bytes
|
|
quotaBytes := convertQuotaToBytes(req.QuotaSize, normalizedUnit)
|
|
|
|
err = s.SetBucketQuota(bucketName, quotaBytes, req.QuotaEnabled)
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, "Failed to update bucket quota: "+err.Error())
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]interface{}{
|
|
"message": "Bucket quota updated successfully",
|
|
"bucket": bucketName,
|
|
"quota_size": req.QuotaSize,
|
|
"quota_unit": req.QuotaUnit,
|
|
"quota_enabled": req.QuotaEnabled,
|
|
})
|
|
}
|
|
|
|
// DeleteBucket deletes an S3 bucket
|
|
func (s *AdminServer) DeleteBucket(w http.ResponseWriter, r *http.Request) {
|
|
bucketName := mux.Vars(r)["bucket"]
|
|
if bucketName == "" {
|
|
writeJSONError(w, http.StatusBadRequest, "Bucket name is required")
|
|
return
|
|
}
|
|
|
|
err := s.DeleteS3Bucket(bucketName)
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, "Failed to delete bucket: "+err.Error())
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]interface{}{
|
|
"message": "Bucket deleted successfully",
|
|
"bucket": bucketName,
|
|
})
|
|
}
|
|
|
|
// UpdateBucketOwner updates the owner of an S3 bucket
|
|
func (s *AdminServer) UpdateBucketOwner(w http.ResponseWriter, r *http.Request) {
|
|
bucketName := mux.Vars(r)["bucket"]
|
|
if bucketName == "" {
|
|
writeJSONError(w, http.StatusBadRequest, "Bucket name is required")
|
|
return
|
|
}
|
|
|
|
// Use pointer to detect if owner field was explicitly provided
|
|
var req struct {
|
|
Owner *string `json:"owner"`
|
|
}
|
|
if err := decodeJSONBody(newJSONMaxReader(w, r), &req); err != nil {
|
|
writeJSONError(w, http.StatusBadRequest, "Invalid request: "+err.Error())
|
|
return
|
|
}
|
|
|
|
// Require owner field to be explicitly provided
|
|
if req.Owner == nil {
|
|
writeJSONError(w, http.StatusBadRequest, "Owner field is required (use empty string to clear owner)")
|
|
return
|
|
}
|
|
|
|
// Trim and validate owner
|
|
owner := strings.TrimSpace(*req.Owner)
|
|
if len(owner) > MaxOwnerNameLength {
|
|
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("Owner name must be %d characters or less", MaxOwnerNameLength))
|
|
return
|
|
}
|
|
|
|
err := s.SetBucketOwner(bucketName, owner)
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, "Failed to update bucket owner: "+err.Error())
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]interface{}{
|
|
"message": "Bucket owner updated successfully",
|
|
"bucket": bucketName,
|
|
"owner": owner,
|
|
})
|
|
}
|
|
|
|
// SetBucketOwner sets the owner of a bucket
|
|
func (s *AdminServer) SetBucketOwner(bucketName string, owner string) error {
|
|
return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
|
// Get the current bucket entry
|
|
lookupResp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{
|
|
Directory: "/buckets",
|
|
Name: bucketName,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("lookup bucket %s: %w", bucketName, err)
|
|
}
|
|
|
|
bucketEntry := lookupResp.Entry
|
|
|
|
// Initialize Extended map if nil
|
|
if bucketEntry.Extended == nil {
|
|
bucketEntry.Extended = make(map[string][]byte)
|
|
}
|
|
|
|
// Set or remove the owner
|
|
if owner == "" {
|
|
delete(bucketEntry.Extended, s3_constants.AmzIdentityId)
|
|
} else {
|
|
bucketEntry.Extended[s3_constants.AmzIdentityId] = []byte(owner)
|
|
}
|
|
|
|
// Update the entry
|
|
_, err = client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
|
|
Directory: "/buckets",
|
|
Entry: bucketEntry,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("failed to update bucket owner: %w", err)
|
|
}
|
|
|
|
return nil
|
|
})
|
|
}
|
|
|
|
// ListBucketsAPI returns the list of buckets as JSON
|
|
func (s *AdminServer) ListBucketsAPI(w http.ResponseWriter, r *http.Request) {
|
|
buckets, err := s.GetS3Buckets()
|
|
if err != nil {
|
|
writeJSONError(w, http.StatusInternalServerError, "Failed to get buckets: "+err.Error())
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]interface{}{
|
|
"buckets": buckets,
|
|
"total": len(buckets),
|
|
})
|
|
}
|
|
|
|
// Helper function to convert quota size and unit to bytes
|
|
func convertQuotaToBytes(size int64, unit string) int64 {
|
|
if size <= 0 {
|
|
return 0
|
|
}
|
|
|
|
switch unit {
|
|
case "TB":
|
|
return size * 1024 * 1024 * 1024 * 1024
|
|
case "GB":
|
|
return size * 1024 * 1024 * 1024
|
|
case "MB":
|
|
return size * 1024 * 1024
|
|
case "KB":
|
|
return size * 1024
|
|
case "B":
|
|
return size
|
|
default:
|
|
return 0
|
|
}
|
|
}
|
|
|
|
func normalizeQuotaUnit(unit string) (string, error) {
|
|
normalized := strings.ToUpper(strings.TrimSpace(unit))
|
|
if normalized == "" {
|
|
return "MB", nil
|
|
}
|
|
switch normalized {
|
|
case "B", "KB", "MB", "GB", "TB":
|
|
return normalized, nil
|
|
default:
|
|
return "", fmt.Errorf("unsupported quota unit: %s", unit)
|
|
}
|
|
}
|
|
|
|
// Helper function to convert bytes to appropriate unit and size
|
|
func convertBytesToQuota(bytes int64) (int64, string) {
|
|
if bytes == 0 {
|
|
return 0, "MB"
|
|
}
|
|
|
|
// Convert to TB if >= 1TB
|
|
if bytes >= 1024*1024*1024*1024 && bytes%(1024*1024*1024*1024) == 0 {
|
|
return bytes / (1024 * 1024 * 1024 * 1024), "TB"
|
|
}
|
|
|
|
// Convert to GB if >= 1GB
|
|
if bytes >= 1024*1024*1024 && bytes%(1024*1024*1024) == 0 {
|
|
return bytes / (1024 * 1024 * 1024), "GB"
|
|
}
|
|
|
|
// Convert to MB (default)
|
|
return bytes / (1024 * 1024), "MB"
|
|
}
|
|
|
|
// SetBucketQuota sets the quota for a bucket
|
|
func (s *AdminServer) SetBucketQuota(bucketName string, quotaBytes int64, quotaEnabled bool) error {
|
|
return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
|
// Get the current bucket entry
|
|
lookupResp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{
|
|
Directory: "/buckets",
|
|
Name: bucketName,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("bucket not found: %w", err)
|
|
}
|
|
|
|
bucketEntry := lookupResp.Entry
|
|
|
|
// Determine quota value (negative if disabled)
|
|
var quota int64
|
|
if quotaEnabled && quotaBytes > 0 {
|
|
quota = quotaBytes
|
|
} else if !quotaEnabled && quotaBytes > 0 {
|
|
quota = -quotaBytes
|
|
} else {
|
|
quota = 0
|
|
}
|
|
|
|
// Update the quota
|
|
bucketEntry.Quota = quota
|
|
|
|
// Update the entry
|
|
_, err = client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
|
|
Directory: "/buckets",
|
|
Entry: bucketEntry,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("failed to update bucket quota: %w", err)
|
|
}
|
|
|
|
return nil
|
|
})
|
|
}
|
|
|
|
// CreateS3BucketWithQuota creates a new S3 bucket with quota settings
|
|
func (s *AdminServer) CreateS3BucketWithQuota(bucketName string, quotaBytes int64, quotaEnabled bool) error {
|
|
return s.CreateS3BucketWithObjectLock(bucketName, quotaBytes, quotaEnabled, false, false, "", false, 0, "")
|
|
}
|
|
|
|
// CreateS3BucketWithObjectLock creates a new S3 bucket with quota, versioning, object lock settings, and owner
|
|
func (s *AdminServer) CreateS3BucketWithObjectLock(bucketName string, quotaBytes int64, quotaEnabled, versioningEnabled, objectLockEnabled bool, objectLockMode string, setDefaultRetention bool, objectLockDuration int32, owner string) error {
|
|
return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
|
// First ensure /buckets directory exists
|
|
_, err := client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{
|
|
Directory: "/",
|
|
Entry: &filer_pb.Entry{
|
|
Name: "buckets",
|
|
IsDirectory: true,
|
|
Attributes: &filer_pb.FuseAttributes{
|
|
FileMode: uint32(0755 | os.ModeDir), // Directory mode
|
|
Uid: uint32(1000),
|
|
Gid: uint32(1000),
|
|
Crtime: time.Now().Unix(),
|
|
Mtime: time.Now().Unix(),
|
|
TtlSec: 0,
|
|
},
|
|
},
|
|
})
|
|
// Ignore error if directory already exists
|
|
if err != nil && !strings.Contains(err.Error(), "already exists") && !strings.Contains(err.Error(), "existing entry") {
|
|
return fmt.Errorf("failed to create /buckets directory: %w", err)
|
|
}
|
|
|
|
// Check if bucket already exists
|
|
_, err = client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{
|
|
Directory: "/buckets",
|
|
Name: bucketName,
|
|
})
|
|
if err == nil {
|
|
return fmt.Errorf("bucket %s already exists", bucketName)
|
|
}
|
|
|
|
// Determine quota value (negative if disabled)
|
|
var quota int64
|
|
if quotaEnabled && quotaBytes > 0 {
|
|
quota = quotaBytes
|
|
} else if !quotaEnabled && quotaBytes > 0 {
|
|
quota = -quotaBytes
|
|
} else {
|
|
quota = 0
|
|
}
|
|
|
|
// Prepare bucket attributes with versioning and object lock metadata
|
|
attributes := &filer_pb.FuseAttributes{
|
|
FileMode: uint32(0755 | os.ModeDir), // Directory mode
|
|
Uid: filer_pb.OS_UID,
|
|
Gid: filer_pb.OS_GID,
|
|
Crtime: time.Now().Unix(),
|
|
Mtime: time.Now().Unix(),
|
|
TtlSec: 0,
|
|
}
|
|
|
|
// Create extended attributes map for versioning and owner
|
|
extended := make(map[string][]byte)
|
|
|
|
// Set bucket owner if specified
|
|
if owner != "" {
|
|
extended[s3_constants.AmzIdentityId] = []byte(owner)
|
|
}
|
|
|
|
// Create bucket entry
|
|
bucketEntry := &filer_pb.Entry{
|
|
Name: bucketName,
|
|
IsDirectory: true,
|
|
Attributes: attributes,
|
|
Extended: extended,
|
|
Quota: quota,
|
|
}
|
|
|
|
// Handle versioning using shared utilities
|
|
if err := s3api.StoreVersioningInExtended(bucketEntry, versioningEnabled); err != nil {
|
|
return fmt.Errorf("failed to store versioning configuration: %w", err)
|
|
}
|
|
|
|
// Handle Object Lock configuration using shared utilities
|
|
if objectLockEnabled {
|
|
var duration int32 = 0
|
|
var mode string = ""
|
|
|
|
if setDefaultRetention {
|
|
// Validate Object Lock parameters only when setting default retention
|
|
if err := s3api.ValidateObjectLockParameters(objectLockEnabled, objectLockMode, objectLockDuration); err != nil {
|
|
return fmt.Errorf("invalid Object Lock parameters: %w", err)
|
|
}
|
|
duration = objectLockDuration
|
|
mode = objectLockMode
|
|
}
|
|
|
|
// Create Object Lock configuration using shared utility
|
|
objectLockConfig := s3api.CreateObjectLockConfigurationFromParams(objectLockEnabled, mode, duration)
|
|
|
|
// Store Object Lock configuration in extended attributes using shared utility
|
|
if err := s3api.StoreObjectLockConfigurationInExtended(bucketEntry, objectLockConfig); err != nil {
|
|
return fmt.Errorf("failed to store Object Lock configuration: %w", err)
|
|
}
|
|
}
|
|
|
|
// Create bucket directory under /buckets
|
|
_, err = client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{
|
|
Directory: "/buckets",
|
|
Entry: bucketEntry,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create bucket directory: %w", err)
|
|
}
|
|
|
|
return nil
|
|
})
|
|
}
|