Merge branch 'master' into s3tables-by-claude
This commit is contained in:
@@ -15,14 +15,18 @@
|
||||
{{- $existingConfigSecret = or .Values.allInOne.s3.existingConfigSecret .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret }}
|
||||
{{- end }}
|
||||
{{- else if .Values.master.enabled }}
|
||||
{{- /* Check standalone filer.s3 mode */}}
|
||||
{{- if .Values.filer.s3.enabled }}
|
||||
{{- /* Check if embedded (in filer) or standalone S3 gateway is enabled */}}
|
||||
{{- if or .Values.filer.s3.enabled .Values.s3.enabled }}
|
||||
{{- $s3Enabled = true }}
|
||||
{{- if .Values.filer.s3.createBuckets }}
|
||||
{{- if .Values.s3.createBuckets }}
|
||||
{{- $createBuckets = .Values.s3.createBuckets }}
|
||||
{{- $enableAuth = .Values.s3.enableAuth }}
|
||||
{{- $existingConfigSecret = .Values.s3.existingConfigSecret }}
|
||||
{{- else if .Values.filer.s3.createBuckets }}
|
||||
{{- $createBuckets = .Values.filer.s3.createBuckets }}
|
||||
{{- $enableAuth = .Values.filer.s3.enableAuth }}
|
||||
{{- $existingConfigSecret = .Values.filer.s3.existingConfigSecret }}
|
||||
{{- end }}
|
||||
{{- $enableAuth = .Values.filer.s3.enableAuth }}
|
||||
{{- $existingConfigSecret = .Values.filer.s3.existingConfigSecret }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -36,7 +40,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
spec:
|
||||
@@ -105,9 +109,12 @@ spec:
|
||||
wait_for_service "http://$WEED_CLUSTER_SW_FILER{{ .Values.filer.readinessProbe.httpGet.path }}"
|
||||
{{- end }}
|
||||
{{- range $createBuckets }}
|
||||
/bin/echo \
|
||||
"s3.bucket.create --name {{ .name }}" |\
|
||||
/usr/bin/weed shell
|
||||
if /bin/echo "s3.bucket.list" | /usr/bin/weed shell | awk '{print $1}' | grep -Fxq "{{ .name }}"; then
|
||||
echo "Bucket '{{ .name }}' already exists, skipping creation."
|
||||
else
|
||||
echo "Creating bucket '{{ .name }}'..."
|
||||
/bin/echo "s3.bucket.create --name {{ .name }}" | /usr/bin/weed shell
|
||||
fi
|
||||
{{- end }}
|
||||
{{- range $createBuckets }}
|
||||
{{- if .anonymousRead }}
|
||||
|
||||
@@ -891,7 +891,7 @@ filer:
|
||||
# should have a secret key called seaweedfs_s3_config with an inline json configure
|
||||
existingConfigSecret: null
|
||||
auditLogConfig: {}
|
||||
# You may specify buckets to be created during the install process.
|
||||
# You may specify buckets to be created during the install or upgrade process.
|
||||
# Buckets may be exposed publicly by setting `anonymousRead` to `true`
|
||||
# createBuckets:
|
||||
# - name: bucket-a
|
||||
@@ -916,6 +916,13 @@ s3:
|
||||
# should have a secret key called seaweedfs_s3_config with an inline json config
|
||||
existingConfigSecret: null
|
||||
auditLogConfig: {}
|
||||
# You may specify buckets to be created during the install or upgrade process.
|
||||
# Buckets may be exposed publicly by setting `anonymousRead` to `true`
|
||||
# createBuckets:
|
||||
# - name: bucket-a
|
||||
# anonymousRead: true
|
||||
# - name: bucket-b
|
||||
# anonymousRead: false
|
||||
|
||||
# Suffix of the host name, {bucket}.{domainName}
|
||||
domainName: ""
|
||||
|
||||
@@ -11,6 +11,13 @@ import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
// closeEcVolumes closes all EC volumes in the given DiskLocation to release file handles.
|
||||
func closeEcVolumes(dl *DiskLocation) {
|
||||
for _, ecVol := range dl.ecVolumes {
|
||||
ecVol.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// TestIncompleteEcEncodingCleanup tests the cleanup logic for incomplete EC encoding scenarios
|
||||
func TestIncompleteEcEncodingCleanup(t *testing.T) {
|
||||
tests := []struct {
|
||||
@@ -182,11 +189,18 @@ func TestIncompleteEcEncodingCleanup(t *testing.T) {
|
||||
t.Logf("loadAllEcShards returned error (expected in some cases): %v", loadErr)
|
||||
}
|
||||
|
||||
// Close EC volumes before idempotency test to avoid leaking file handles
|
||||
closeEcVolumes(diskLocation)
|
||||
diskLocation.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
|
||||
|
||||
// Test idempotency - running again should not cause issues
|
||||
loadErr2 := diskLocation.loadAllEcShards(nil)
|
||||
if loadErr2 != nil {
|
||||
t.Logf("Second loadAllEcShards returned error: %v", loadErr2)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
closeEcVolumes(diskLocation)
|
||||
})
|
||||
|
||||
// Verify cleanup expectations
|
||||
if tt.expectCleanup {
|
||||
@@ -554,6 +568,9 @@ func TestEcCleanupWithSeparateIdxDirectory(t *testing.T) {
|
||||
if loadErr != nil {
|
||||
t.Logf("loadAllEcShards error: %v", loadErr)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
closeEcVolumes(diskLocation)
|
||||
})
|
||||
|
||||
// Verify cleanup occurred in data directory (shards)
|
||||
for i := 0; i < erasure_coding.TotalShardsCount; i++ {
|
||||
@@ -625,6 +642,9 @@ func TestDistributedEcVolumeNoFileDeletion(t *testing.T) {
|
||||
if loadErr != nil {
|
||||
t.Logf("loadAllEcShards returned error (expected): %v", loadErr)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
closeEcVolumes(diskLocation)
|
||||
})
|
||||
|
||||
// CRITICAL CHECK: Verify shard files still exist (should NOT be deleted)
|
||||
for i := 0; i < 5; i++ {
|
||||
|
||||
@@ -18,6 +18,7 @@ func TestFirstInvalidIndex(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
defer v.Close()
|
||||
type WriteInfo struct {
|
||||
offset int64
|
||||
size int32
|
||||
|
||||
@@ -45,7 +45,10 @@ func newTestStore(t *testing.T, numDirs int) *Store {
|
||||
}
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() { close(done) })
|
||||
t.Cleanup(func() {
|
||||
store.Close()
|
||||
close(done)
|
||||
})
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ func TestReadNeedMetaWithWritesAndUpdates(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
defer v.Close()
|
||||
type WriteInfo struct {
|
||||
offset int64
|
||||
size int32
|
||||
@@ -55,6 +56,7 @@ func TestReadNeedMetaWithDeletesThenWrites(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
defer v.Close()
|
||||
type WriteInfo struct {
|
||||
offset int64
|
||||
size int32
|
||||
|
||||
@@ -119,6 +119,7 @@ func testCompaction(t *testing.T, needleMapKind NeedleMapKind) {
|
||||
if err != nil {
|
||||
t.Fatalf("volume reloading: %v", err)
|
||||
}
|
||||
defer v.Close()
|
||||
|
||||
for i := 1; i <= beforeCommitFileCount+afterCommitFileCount; i++ {
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ func TestSearchVolumesWithDeletedNeedles(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
defer v.Close()
|
||||
|
||||
count := 20
|
||||
|
||||
@@ -119,6 +120,7 @@ func TestDestroyNonemptyVolumeWithOnlyEmpty(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
defer v.Close()
|
||||
path := v.DataBackend.Name()
|
||||
|
||||
// should return "volume not empty" error and do not delete file when Destroy non-empty volume
|
||||
|
||||
Reference in New Issue
Block a user