chore: remove ~50k lines of unreachable dead code (#8913)
* chore: remove unreachable dead code across the codebase Remove ~50,000 lines of unreachable code identified by static analysis. Major removals: - weed/filer/redis_lua: entire unused Redis Lua filer store implementation - weed/wdclient/net2, resource_pool: unused connection/resource pool packages - weed/plugin/worker/lifecycle: unused lifecycle plugin worker - weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy, multipart IAM, key rotation, and various SSE helper functions - weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions - weed/mq/offset: unused SQL storage and migration code - weed/worker: unused registry, task, and monitoring functions - weed/query: unused SQL engine, parquet scanner, and type functions - weed/shell: unused EC proportional rebalance functions - weed/storage/erasure_coding/distribution: unused distribution analysis functions - Individual unreachable functions removed from 150+ files across admin, credential, filer, iam, kms, mount, mq, operation, pb, s3api, server, shell, storage, topology, and util packages * fix(s3): reset shared memory store in IAM test to prevent flaky failure TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because the MemoryStore credential backend is a singleton registered via init(). Earlier tests that create anonymous identities pollute the shared store, causing LookupAnonymous() to unexpectedly return true. Fix by calling Reset() on the memory store before the test runs. * style: run gofmt on changed files * fix: restore KMS functions used by integration tests * fix(plugin): prevent panic on send to closed worker session channel The Plugin.sendToWorker method could panic with "send on closed channel" when a worker disconnected while a message was being sent. The race was between streamSession.close() closing the outgoing channel and sendToWorker writing to it concurrently. Add a done channel to streamSession that is closed before the outgoing channel, and check it in sendToWorker's select to safely detect closed sessions without panicking.
This commit is contained in:
@@ -1,127 +0,0 @@
|
||||
package s3lifecycle
|
||||
|
||||
import "time"
|
||||
|
||||
// Evaluate checks the given lifecycle rules against an object and returns
|
||||
// the highest-priority action that applies. The evaluation follows S3's
|
||||
// action priority:
|
||||
// 1. ExpiredObjectDeleteMarker (delete marker is sole version)
|
||||
// 2. NoncurrentVersionExpiration (non-current version age/count)
|
||||
// 3. Current version Expiration (Days or Date)
|
||||
//
|
||||
// AbortIncompleteMultipartUpload is evaluated separately since it applies
|
||||
// to uploads, not objects. Use EvaluateMPUAbort for that.
|
||||
func Evaluate(rules []Rule, obj ObjectInfo, now time.Time) EvalResult {
|
||||
// Phase 1: ExpiredObjectDeleteMarker
|
||||
if obj.IsDeleteMarker && obj.IsLatest && obj.NumVersions == 1 {
|
||||
for _, rule := range rules {
|
||||
if rule.Status != "Enabled" {
|
||||
continue
|
||||
}
|
||||
if !MatchesFilter(rule, obj) {
|
||||
continue
|
||||
}
|
||||
if rule.ExpiredObjectDeleteMarker {
|
||||
return EvalResult{Action: ActionExpireDeleteMarker, RuleID: rule.ID}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2: NoncurrentVersionExpiration
|
||||
if !obj.IsLatest && !obj.SuccessorModTime.IsZero() {
|
||||
for _, rule := range rules {
|
||||
if ShouldExpireNoncurrentVersion(rule, obj, obj.NoncurrentIndex, now) {
|
||||
return EvalResult{Action: ActionDeleteVersion, RuleID: rule.ID}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 3: Current version Expiration
|
||||
if obj.IsLatest && !obj.IsDeleteMarker {
|
||||
for _, rule := range rules {
|
||||
if rule.Status != "Enabled" {
|
||||
continue
|
||||
}
|
||||
if !MatchesFilter(rule, obj) {
|
||||
continue
|
||||
}
|
||||
// Date-based expiration
|
||||
if !rule.ExpirationDate.IsZero() && !now.Before(rule.ExpirationDate) {
|
||||
return EvalResult{Action: ActionDeleteObject, RuleID: rule.ID}
|
||||
}
|
||||
// Days-based expiration
|
||||
if rule.ExpirationDays > 0 {
|
||||
expiryTime := expectedExpiryTime(obj.ModTime, rule.ExpirationDays)
|
||||
if !now.Before(expiryTime) {
|
||||
return EvalResult{Action: ActionDeleteObject, RuleID: rule.ID}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return EvalResult{Action: ActionNone}
|
||||
}
|
||||
|
||||
// ShouldExpireNoncurrentVersion checks whether a non-current version should
|
||||
// be expired considering both NoncurrentDays and NewerNoncurrentVersions.
|
||||
// noncurrentIndex is the 0-based position among non-current versions sorted
|
||||
// newest-first (0 = newest non-current version).
|
||||
func ShouldExpireNoncurrentVersion(rule Rule, obj ObjectInfo, noncurrentIndex int, now time.Time) bool {
|
||||
if rule.Status != "Enabled" {
|
||||
return false
|
||||
}
|
||||
if rule.NoncurrentVersionExpirationDays <= 0 {
|
||||
return false
|
||||
}
|
||||
if obj.IsLatest || obj.SuccessorModTime.IsZero() {
|
||||
return false
|
||||
}
|
||||
if !MatchesFilter(rule, obj) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check age threshold.
|
||||
expiryTime := expectedExpiryTime(obj.SuccessorModTime, rule.NoncurrentVersionExpirationDays)
|
||||
if now.Before(expiryTime) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check NewerNoncurrentVersions count threshold.
|
||||
if rule.NewerNoncurrentVersions > 0 && noncurrentIndex < rule.NewerNoncurrentVersions {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// EvaluateMPUAbort finds the applicable AbortIncompleteMultipartUpload rule
|
||||
// for a multipart upload with the given key prefix and creation time.
|
||||
func EvaluateMPUAbort(rules []Rule, uploadKey string, createdAt time.Time, now time.Time) EvalResult {
|
||||
for _, rule := range rules {
|
||||
if rule.Status != "Enabled" {
|
||||
continue
|
||||
}
|
||||
if rule.AbortMPUDaysAfterInitiation <= 0 {
|
||||
continue
|
||||
}
|
||||
if !matchesPrefix(rule.Prefix, uploadKey) {
|
||||
continue
|
||||
}
|
||||
cutoff := expectedExpiryTime(createdAt, rule.AbortMPUDaysAfterInitiation)
|
||||
if !now.Before(cutoff) {
|
||||
return EvalResult{Action: ActionAbortMultipartUpload, RuleID: rule.ID}
|
||||
}
|
||||
}
|
||||
return EvalResult{Action: ActionNone}
|
||||
}
|
||||
|
||||
// expectedExpiryTime computes the expiration time given a reference time and
|
||||
// a number of days. Following S3 semantics, expiration happens at midnight UTC
|
||||
// of the day after the specified number of days.
|
||||
func expectedExpiryTime(refTime time.Time, days int) time.Time {
|
||||
if days == 0 {
|
||||
return refTime
|
||||
}
|
||||
t := refTime.UTC().Add(time.Duration(days+1) * 24 * time.Hour)
|
||||
return t.Truncate(24 * time.Hour)
|
||||
}
|
||||
@@ -1,495 +0,0 @@
|
||||
package s3lifecycle
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var now = time.Date(2026, 3, 27, 12, 0, 0, 0, time.UTC)
|
||||
|
||||
func TestEvaluate_ExpirationDays(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "expire-30d", Status: "Enabled",
|
||||
ExpirationDays: 30,
|
||||
}}
|
||||
|
||||
t.Run("object_older_than_days_is_expired", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "data/file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-31 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteObject, result.Action)
|
||||
assertEqual(t, "expire-30d", result.RuleID)
|
||||
})
|
||||
|
||||
t.Run("object_younger_than_days_is_not_expired", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "data/file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("non_latest_version_not_affected_by_expiration_days", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "data/file.txt", IsLatest: false,
|
||||
ModTime: now.Add(-60 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("delete_marker_not_affected_by_expiration_days", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "data/file.txt", IsLatest: true, IsDeleteMarker: true,
|
||||
ModTime: now.Add(-60 * 24 * time.Hour), NumVersions: 3,
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_ExpirationDate(t *testing.T) {
|
||||
expirationDate := time.Date(2026, 3, 15, 0, 0, 0, 0, time.UTC)
|
||||
rules := []Rule{{
|
||||
ID: "expire-date", Status: "Enabled",
|
||||
ExpirationDate: expirationDate,
|
||||
}}
|
||||
|
||||
t.Run("object_expired_after_date", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-60 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteObject, result.Action)
|
||||
})
|
||||
|
||||
t.Run("object_not_expired_before_date", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-1 * time.Hour),
|
||||
}
|
||||
beforeDate := time.Date(2026, 3, 10, 0, 0, 0, 0, time.UTC)
|
||||
result := Evaluate(rules, obj, beforeDate)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_ExpiredObjectDeleteMarker(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "cleanup-markers", Status: "Enabled",
|
||||
ExpiredObjectDeleteMarker: true,
|
||||
}}
|
||||
|
||||
t.Run("sole_delete_marker_is_expired", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true, IsDeleteMarker: true,
|
||||
NumVersions: 1,
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionExpireDeleteMarker, result.Action)
|
||||
})
|
||||
|
||||
t.Run("delete_marker_with_other_versions_not_expired", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true, IsDeleteMarker: true,
|
||||
NumVersions: 3,
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("non_latest_delete_marker_not_expired", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: false, IsDeleteMarker: true,
|
||||
NumVersions: 1,
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("non_delete_marker_not_affected", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true, IsDeleteMarker: false,
|
||||
NumVersions: 1,
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_NoncurrentVersionExpiration(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "expire-noncurrent", Status: "Enabled",
|
||||
NoncurrentVersionExpirationDays: 30,
|
||||
}}
|
||||
|
||||
t.Run("old_noncurrent_version_is_expired", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: false,
|
||||
SuccessorModTime: now.Add(-45 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteVersion, result.Action)
|
||||
})
|
||||
|
||||
t.Run("recent_noncurrent_version_is_not_expired", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: false,
|
||||
SuccessorModTime: now.Add(-10 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("latest_version_not_affected", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-60 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestShouldExpireNoncurrentVersion(t *testing.T) {
|
||||
rule := Rule{
|
||||
ID: "noncurrent-rule", Status: "Enabled",
|
||||
NoncurrentVersionExpirationDays: 30,
|
||||
NewerNoncurrentVersions: 2,
|
||||
}
|
||||
|
||||
t.Run("old_version_beyond_count_is_expired", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: false,
|
||||
SuccessorModTime: now.Add(-45 * 24 * time.Hour),
|
||||
}
|
||||
// noncurrentIndex=2 means this is the 3rd noncurrent version (0-indexed)
|
||||
// With NewerNoncurrentVersions=2, indices 0 and 1 are kept.
|
||||
if !ShouldExpireNoncurrentVersion(rule, obj, 2, now) {
|
||||
t.Error("expected version at index 2 to be expired")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("old_version_within_count_is_kept", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: false,
|
||||
SuccessorModTime: now.Add(-45 * 24 * time.Hour),
|
||||
}
|
||||
// noncurrentIndex=1 is within the keep threshold (NewerNoncurrentVersions=2).
|
||||
if ShouldExpireNoncurrentVersion(rule, obj, 1, now) {
|
||||
t.Error("expected version at index 1 to be kept")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("recent_version_beyond_count_is_kept", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: false,
|
||||
SuccessorModTime: now.Add(-5 * 24 * time.Hour),
|
||||
}
|
||||
// Even at index 5 (beyond count), if too young, it's kept.
|
||||
if ShouldExpireNoncurrentVersion(rule, obj, 5, now) {
|
||||
t.Error("expected recent version to be kept regardless of index")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("disabled_rule_never_expires", func(t *testing.T) {
|
||||
disabled := Rule{
|
||||
ID: "disabled", Status: "Disabled",
|
||||
NoncurrentVersionExpirationDays: 1,
|
||||
}
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: false,
|
||||
SuccessorModTime: now.Add(-365 * 24 * time.Hour),
|
||||
}
|
||||
if ShouldExpireNoncurrentVersion(disabled, obj, 10, now) {
|
||||
t.Error("disabled rule should never expire")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_PrefixFilter(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "logs-only", Status: "Enabled",
|
||||
Prefix: "logs/",
|
||||
ExpirationDays: 7,
|
||||
}}
|
||||
|
||||
t.Run("matching_prefix", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "logs/app.log", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteObject, result.Action)
|
||||
})
|
||||
|
||||
t.Run("non_matching_prefix", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "data/file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_TagFilter(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "temp-only", Status: "Enabled",
|
||||
ExpirationDays: 1,
|
||||
FilterTags: map[string]string{"env": "temp"},
|
||||
}}
|
||||
|
||||
t.Run("matching_tags", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-5 * 24 * time.Hour),
|
||||
Tags: map[string]string{"env": "temp", "project": "foo"},
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteObject, result.Action)
|
||||
})
|
||||
|
||||
t.Run("missing_tag", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-5 * 24 * time.Hour),
|
||||
Tags: map[string]string{"project": "foo"},
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("wrong_tag_value", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-5 * 24 * time.Hour),
|
||||
Tags: map[string]string{"env": "prod"},
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("nil_object_tags", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-5 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_SizeFilter(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "large-files", Status: "Enabled",
|
||||
ExpirationDays: 7,
|
||||
FilterSizeGreaterThan: 1024 * 1024, // > 1 MB
|
||||
FilterSizeLessThan: 100 * 1024 * 1024, // < 100 MB
|
||||
}}
|
||||
|
||||
t.Run("matching_size", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.bin", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
Size: 10 * 1024 * 1024, // 10 MB
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteObject, result.Action)
|
||||
})
|
||||
|
||||
t.Run("too_small", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.bin", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
Size: 512, // 512 bytes
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("too_large", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "file.bin", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
Size: 200 * 1024 * 1024, // 200 MB
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_CombinedFilters(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "combined", Status: "Enabled",
|
||||
Prefix: "logs/",
|
||||
ExpirationDays: 7,
|
||||
FilterTags: map[string]string{"env": "dev"},
|
||||
FilterSizeGreaterThan: 100,
|
||||
}}
|
||||
|
||||
t.Run("all_filters_match", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "logs/app.log", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
Size: 1024,
|
||||
Tags: map[string]string{"env": "dev"},
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteObject, result.Action)
|
||||
})
|
||||
|
||||
t.Run("prefix_doesnt_match", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "data/app.log", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
Size: 1024,
|
||||
Tags: map[string]string{"env": "dev"},
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("tag_doesnt_match", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "logs/app.log", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
Size: 1024,
|
||||
Tags: map[string]string{"env": "prod"},
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("size_doesnt_match", func(t *testing.T) {
|
||||
obj := ObjectInfo{
|
||||
Key: "logs/app.log", IsLatest: true,
|
||||
ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
Size: 50, // too small
|
||||
Tags: map[string]string{"env": "dev"},
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_DisabledRule(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "disabled", Status: "Disabled",
|
||||
ExpirationDays: 1,
|
||||
}}
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-365 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
}
|
||||
|
||||
func TestEvaluate_MultipleRules_Priority(t *testing.T) {
|
||||
t.Run("delete_marker_takes_priority_over_expiration", func(t *testing.T) {
|
||||
rules := []Rule{
|
||||
{ID: "expire", Status: "Enabled", ExpirationDays: 1},
|
||||
{ID: "marker", Status: "Enabled", ExpiredObjectDeleteMarker: true},
|
||||
}
|
||||
obj := ObjectInfo{
|
||||
Key: "file.txt", IsLatest: true, IsDeleteMarker: true,
|
||||
NumVersions: 1, ModTime: now.Add(-10 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionExpireDeleteMarker, result.Action)
|
||||
assertEqual(t, "marker", result.RuleID)
|
||||
})
|
||||
|
||||
t.Run("first_matching_expiration_rule_wins", func(t *testing.T) {
|
||||
rules := []Rule{
|
||||
{ID: "rule1", Status: "Enabled", ExpirationDays: 30, Prefix: "logs/"},
|
||||
{ID: "rule2", Status: "Enabled", ExpirationDays: 7},
|
||||
}
|
||||
obj := ObjectInfo{
|
||||
Key: "logs/app.log", IsLatest: true,
|
||||
ModTime: now.Add(-31 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteObject, result.Action)
|
||||
assertEqual(t, "rule1", result.RuleID)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvaluate_EmptyPrefix(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "all", Status: "Enabled",
|
||||
ExpirationDays: 30,
|
||||
}}
|
||||
obj := ObjectInfo{
|
||||
Key: "any/path/file.txt", IsLatest: true,
|
||||
ModTime: now.Add(-31 * 24 * time.Hour),
|
||||
}
|
||||
result := Evaluate(rules, obj, now)
|
||||
assertAction(t, ActionDeleteObject, result.Action)
|
||||
}
|
||||
|
||||
func TestEvaluateMPUAbort(t *testing.T) {
|
||||
rules := []Rule{{
|
||||
ID: "abort-mpu", Status: "Enabled",
|
||||
AbortMPUDaysAfterInitiation: 7,
|
||||
}}
|
||||
|
||||
t.Run("old_upload_is_aborted", func(t *testing.T) {
|
||||
result := EvaluateMPUAbort(rules, "uploads/file.bin", now.Add(-10*24*time.Hour), now)
|
||||
assertAction(t, ActionAbortMultipartUpload, result.Action)
|
||||
})
|
||||
|
||||
t.Run("recent_upload_is_not_aborted", func(t *testing.T) {
|
||||
result := EvaluateMPUAbort(rules, "uploads/file.bin", now.Add(-3*24*time.Hour), now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
|
||||
t.Run("prefix_scoped_abort", func(t *testing.T) {
|
||||
prefixRules := []Rule{{
|
||||
ID: "abort-logs", Status: "Enabled",
|
||||
Prefix: "logs/",
|
||||
AbortMPUDaysAfterInitiation: 1,
|
||||
}}
|
||||
result := EvaluateMPUAbort(prefixRules, "data/file.bin", now.Add(-5*24*time.Hour), now)
|
||||
assertAction(t, ActionNone, result.Action)
|
||||
})
|
||||
}
|
||||
|
||||
func TestExpectedExpiryTime(t *testing.T) {
|
||||
ref := time.Date(2026, 3, 1, 15, 30, 0, 0, time.UTC)
|
||||
|
||||
t.Run("30_days", func(t *testing.T) {
|
||||
// S3 spec: expires at midnight UTC of day 32 (ref + 31 days, truncated).
|
||||
expiry := expectedExpiryTime(ref, 30)
|
||||
expected := time.Date(2026, 4, 1, 0, 0, 0, 0, time.UTC)
|
||||
if !expiry.Equal(expected) {
|
||||
t.Errorf("expected %v, got %v", expected, expiry)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("zero_days_returns_ref", func(t *testing.T) {
|
||||
expiry := expectedExpiryTime(ref, 0)
|
||||
if !expiry.Equal(ref) {
|
||||
t.Errorf("expected %v, got %v", ref, expiry)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func assertAction(t *testing.T, expected, actual Action) {
|
||||
t.Helper()
|
||||
if expected != actual {
|
||||
t.Errorf("expected action %d, got %d", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqual(t *testing.T, expected, actual string) {
|
||||
t.Helper()
|
||||
if expected != actual {
|
||||
t.Errorf("expected %q, got %q", expected, actual)
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package s3lifecycle
|
||||
|
||||
import "strings"
|
||||
|
||||
// MatchesFilter checks if an object matches the rule's filter criteria
|
||||
// (prefix, tags, and size constraints).
|
||||
func MatchesFilter(rule Rule, obj ObjectInfo) bool {
|
||||
if !matchesPrefix(rule.Prefix, obj.Key) {
|
||||
return false
|
||||
}
|
||||
if !matchesTags(rule.FilterTags, obj.Tags) {
|
||||
return false
|
||||
}
|
||||
if !matchesSize(rule.FilterSizeGreaterThan, rule.FilterSizeLessThan, obj.Size) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// matchesPrefix returns true if the object key starts with the given prefix.
|
||||
// An empty prefix matches all keys.
|
||||
func matchesPrefix(prefix, key string) bool {
|
||||
if prefix == "" {
|
||||
return true
|
||||
}
|
||||
return strings.HasPrefix(key, prefix)
|
||||
}
|
||||
|
||||
// matchesTags returns true if all rule tags are present in the object's tags
|
||||
// with matching values. An empty or nil rule tag set matches all objects.
|
||||
func matchesTags(ruleTags, objTags map[string]string) bool {
|
||||
if len(ruleTags) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(objTags) == 0 {
|
||||
return false
|
||||
}
|
||||
for k, v := range ruleTags {
|
||||
if objVal, ok := objTags[k]; !ok || objVal != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// matchesSize returns true if the object's size falls within the specified
|
||||
// bounds. Zero values mean no constraint on that side.
|
||||
func matchesSize(greaterThan, lessThan, objSize int64) bool {
|
||||
if greaterThan > 0 && objSize <= greaterThan {
|
||||
return false
|
||||
}
|
||||
if lessThan > 0 && objSize >= lessThan {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package s3lifecycle
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestMatchesPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix string
|
||||
key string
|
||||
want bool
|
||||
}{
|
||||
{"empty_prefix_matches_all", "", "any/key.txt", true},
|
||||
{"exact_prefix_match", "logs/", "logs/app.log", true},
|
||||
{"prefix_mismatch", "logs/", "data/file.txt", false},
|
||||
{"key_shorter_than_prefix", "very/long/prefix/", "short", false},
|
||||
{"prefix_equals_key", "exact", "exact", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := matchesPrefix(tt.prefix, tt.key); got != tt.want {
|
||||
t.Errorf("matchesPrefix(%q, %q) = %v, want %v", tt.prefix, tt.key, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesTags(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ruleTags map[string]string
|
||||
objTags map[string]string
|
||||
want bool
|
||||
}{
|
||||
{"nil_rule_tags_match_all", nil, map[string]string{"a": "1"}, true},
|
||||
{"empty_rule_tags_match_all", map[string]string{}, map[string]string{"a": "1"}, true},
|
||||
{"nil_obj_tags_no_match", map[string]string{"a": "1"}, nil, false},
|
||||
{"single_tag_match", map[string]string{"env": "dev"}, map[string]string{"env": "dev", "foo": "bar"}, true},
|
||||
{"single_tag_value_mismatch", map[string]string{"env": "dev"}, map[string]string{"env": "prod"}, false},
|
||||
{"single_tag_key_missing", map[string]string{"env": "dev"}, map[string]string{"foo": "bar"}, false},
|
||||
{"multi_tag_all_match", map[string]string{"env": "dev", "tier": "hot"}, map[string]string{"env": "dev", "tier": "hot", "extra": "x"}, true},
|
||||
{"multi_tag_partial_match", map[string]string{"env": "dev", "tier": "hot"}, map[string]string{"env": "dev"}, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := matchesTags(tt.ruleTags, tt.objTags); got != tt.want {
|
||||
t.Errorf("matchesTags() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesSize(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
greaterThan int64
|
||||
lessThan int64
|
||||
objSize int64
|
||||
want bool
|
||||
}{
|
||||
{"no_constraints", 0, 0, 1000, true},
|
||||
{"only_greater_than_pass", 100, 0, 200, true},
|
||||
{"only_greater_than_fail", 100, 0, 50, false},
|
||||
{"only_greater_than_equal_fail", 100, 0, 100, false},
|
||||
{"only_less_than_pass", 0, 1000, 500, true},
|
||||
{"only_less_than_fail", 0, 1000, 2000, false},
|
||||
{"only_less_than_equal_fail", 0, 1000, 1000, false},
|
||||
{"both_constraints_pass", 100, 1000, 500, true},
|
||||
{"both_constraints_too_small", 100, 1000, 50, false},
|
||||
{"both_constraints_too_large", 100, 1000, 2000, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := matchesSize(tt.greaterThan, tt.lessThan, tt.objSize); got != tt.want {
|
||||
t.Errorf("matchesSize(%d, %d, %d) = %v, want %v",
|
||||
tt.greaterThan, tt.lessThan, tt.objSize, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,34 +1,3 @@
|
||||
package s3lifecycle
|
||||
|
||||
import "strings"
|
||||
|
||||
const tagPrefix = "X-Amz-Tagging-"
|
||||
|
||||
// ExtractTags extracts S3 object tags from a filer entry's Extended metadata.
|
||||
// Tags are stored with the key prefix "X-Amz-Tagging-" followed by the tag key.
|
||||
func ExtractTags(extended map[string][]byte) map[string]string {
|
||||
if len(extended) == 0 {
|
||||
return nil
|
||||
}
|
||||
var tags map[string]string
|
||||
for k, v := range extended {
|
||||
if strings.HasPrefix(k, tagPrefix) {
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
tags[k[len(tagPrefix):]] = string(v)
|
||||
}
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
// HasTagRules returns true if any enabled rule in the set uses tag-based filtering.
|
||||
// This is used as an optimization to skip tag extraction when no rules need it.
|
||||
func HasTagRules(rules []Rule) bool {
|
||||
for _, r := range rules {
|
||||
if r.Status == "Enabled" && len(r.FilterTags) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
package s3lifecycle
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestExtractTags(t *testing.T) {
|
||||
t.Run("extracts_tags_with_prefix", func(t *testing.T) {
|
||||
extended := map[string][]byte{
|
||||
"X-Amz-Tagging-env": []byte("prod"),
|
||||
"X-Amz-Tagging-project": []byte("foo"),
|
||||
"Content-Type": []byte("text/plain"),
|
||||
"X-Amz-Meta-Custom": []byte("value"),
|
||||
}
|
||||
tags := ExtractTags(extended)
|
||||
if len(tags) != 2 {
|
||||
t.Fatalf("expected 2 tags, got %d", len(tags))
|
||||
}
|
||||
if tags["env"] != "prod" {
|
||||
t.Errorf("expected env=prod, got %q", tags["env"])
|
||||
}
|
||||
if tags["project"] != "foo" {
|
||||
t.Errorf("expected project=foo, got %q", tags["project"])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nil_extended_returns_nil", func(t *testing.T) {
|
||||
tags := ExtractTags(nil)
|
||||
if tags != nil {
|
||||
t.Errorf("expected nil, got %v", tags)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no_tags_returns_nil", func(t *testing.T) {
|
||||
extended := map[string][]byte{
|
||||
"Content-Type": []byte("text/plain"),
|
||||
}
|
||||
tags := ExtractTags(extended)
|
||||
if tags != nil {
|
||||
t.Errorf("expected nil, got %v", tags)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty_tag_value", func(t *testing.T) {
|
||||
extended := map[string][]byte{
|
||||
"X-Amz-Tagging-empty": []byte(""),
|
||||
}
|
||||
tags := ExtractTags(extended)
|
||||
if len(tags) != 1 {
|
||||
t.Fatalf("expected 1 tag, got %d", len(tags))
|
||||
}
|
||||
if tags["empty"] != "" {
|
||||
t.Errorf("expected empty value, got %q", tags["empty"])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestHasTagRules(t *testing.T) {
|
||||
t.Run("has_tag_rules", func(t *testing.T) {
|
||||
rules := []Rule{
|
||||
{Status: "Enabled", FilterTags: map[string]string{"env": "dev"}},
|
||||
}
|
||||
if !HasTagRules(rules) {
|
||||
t.Error("expected true")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no_tag_rules", func(t *testing.T) {
|
||||
rules := []Rule{
|
||||
{Status: "Enabled", ExpirationDays: 30},
|
||||
}
|
||||
if HasTagRules(rules) {
|
||||
t.Error("expected false")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("disabled_tag_rule", func(t *testing.T) {
|
||||
rules := []Rule{
|
||||
{Status: "Disabled", FilterTags: map[string]string{"env": "dev"}},
|
||||
}
|
||||
if HasTagRules(rules) {
|
||||
t.Error("expected false for disabled rule")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty_rules", func(t *testing.T) {
|
||||
if HasTagRules(nil) {
|
||||
t.Error("expected false for nil rules")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,99 +1,6 @@
|
||||
package s3lifecycle
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// versionIdFormatThreshold distinguishes old vs new format version IDs.
|
||||
// New format (inverted timestamps) produces values above this threshold;
|
||||
// old format (raw timestamps) produces values below it.
|
||||
const versionIdFormatThreshold = 0x4000000000000000
|
||||
|
||||
// GetVersionTimestamp extracts the actual timestamp from a SeaweedFS version ID,
|
||||
// handling both old (raw nanosecond) and new (inverted nanosecond) formats.
|
||||
// Returns zero time if the version ID is invalid or "null".
|
||||
func GetVersionTimestamp(versionId string) time.Time {
|
||||
ns := getVersionTimestampNanos(versionId)
|
||||
if ns == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, ns)
|
||||
}
|
||||
|
||||
// getVersionTimestampNanos extracts the raw nanosecond timestamp from a version ID.
|
||||
func getVersionTimestampNanos(versionId string) int64 {
|
||||
if len(versionId) < 16 || versionId == "null" {
|
||||
return 0
|
||||
}
|
||||
timestampPart, err := strconv.ParseUint(versionId[:16], 16, 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
if timestampPart > math.MaxInt64 {
|
||||
return 0
|
||||
}
|
||||
if timestampPart > versionIdFormatThreshold {
|
||||
// New format: inverted timestamp, convert back.
|
||||
return int64(math.MaxInt64 - timestampPart)
|
||||
}
|
||||
return int64(timestampPart)
|
||||
}
|
||||
|
||||
// isNewFormatVersionId returns true if the version ID uses inverted timestamps.
|
||||
func isNewFormatVersionId(versionId string) bool {
|
||||
if len(versionId) < 16 || versionId == "null" {
|
||||
return false
|
||||
}
|
||||
timestampPart, err := strconv.ParseUint(versionId[:16], 16, 64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return timestampPart > versionIdFormatThreshold && timestampPart <= math.MaxInt64
|
||||
}
|
||||
|
||||
// CompareVersionIds compares two version IDs for sorting (newest first).
|
||||
// Returns negative if a is newer, positive if b is newer, 0 if equal.
|
||||
// Handles both old and new format version IDs and uses full lexicographic
|
||||
// comparison (not just timestamps) to break ties from the random suffix.
|
||||
func CompareVersionIds(a, b string) int {
|
||||
if a == b {
|
||||
return 0
|
||||
}
|
||||
if a == "null" {
|
||||
return 1
|
||||
}
|
||||
if b == "null" {
|
||||
return -1
|
||||
}
|
||||
|
||||
aIsNew := isNewFormatVersionId(a)
|
||||
bIsNew := isNewFormatVersionId(b)
|
||||
|
||||
if aIsNew == bIsNew {
|
||||
if aIsNew {
|
||||
// New format: smaller hex = newer (inverted timestamps).
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
// Old format: smaller hex = older.
|
||||
if a < b {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Mixed formats: compare by actual timestamp.
|
||||
aTime := getVersionTimestampNanos(a)
|
||||
bTime := getVersionTimestampNanos(b)
|
||||
if aTime > bTime {
|
||||
return -1
|
||||
}
|
||||
if aTime < bTime {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
package s3lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGetVersionTimestamp(t *testing.T) {
|
||||
t.Run("new_format_inverted_timestamp", func(t *testing.T) {
|
||||
// Simulate a new-format version ID (inverted timestamp above threshold).
|
||||
now := time.Now()
|
||||
inverted := math.MaxInt64 - now.UnixNano()
|
||||
versionId := fmt.Sprintf("%016x", inverted) + "0000000000000000"
|
||||
|
||||
got := GetVersionTimestamp(versionId)
|
||||
// Should recover the original timestamp within 1 second.
|
||||
diff := got.Sub(now)
|
||||
if diff < -time.Second || diff > time.Second {
|
||||
t.Errorf("timestamp diff too large: %v (got %v, want ~%v)", diff, got, now)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("old_format_raw_timestamp", func(t *testing.T) {
|
||||
// Simulate an old-format version ID (raw nanosecond timestamp below threshold).
|
||||
// Use a timestamp from 2023 which would be below threshold.
|
||||
ts := time.Date(2023, 6, 15, 12, 0, 0, 0, time.UTC)
|
||||
versionId := fmt.Sprintf("%016x", ts.UnixNano()) + "abcdef0123456789"
|
||||
|
||||
got := GetVersionTimestamp(versionId)
|
||||
if !got.Equal(ts) {
|
||||
t.Errorf("expected %v, got %v", ts, got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("null_version_id", func(t *testing.T) {
|
||||
got := GetVersionTimestamp("null")
|
||||
if !got.IsZero() {
|
||||
t.Errorf("expected zero time for null version, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty_version_id", func(t *testing.T) {
|
||||
got := GetVersionTimestamp("")
|
||||
if !got.IsZero() {
|
||||
t.Errorf("expected zero time for empty version, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("short_version_id", func(t *testing.T) {
|
||||
got := GetVersionTimestamp("abc")
|
||||
if !got.IsZero() {
|
||||
t.Errorf("expected zero time for short version, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("high_bit_overflow_returns_zero", func(t *testing.T) {
|
||||
// Version ID with first 16 hex chars > math.MaxInt64 should return zero,
|
||||
// not a wrapped negative timestamp.
|
||||
versionId := "80000000000000000000000000000000"
|
||||
got := GetVersionTimestamp(versionId)
|
||||
if !got.IsZero() {
|
||||
t.Errorf("expected zero time for overflow version ID, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid_hex", func(t *testing.T) {
|
||||
got := GetVersionTimestamp("zzzzzzzzzzzzzzzz0000000000000000")
|
||||
if !got.IsZero() {
|
||||
t.Errorf("expected zero time for invalid hex, got %v", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user