glog: add gzip compression for rotated log files (#8709)

* glog: add gzip compression for rotated log files

Add opt-in gzip compression that automatically compresses log files
after rotation, reducing disk usage in long-running deployments.

- Add --log_compress flag to enable compression at startup
- Add SetCompressRotated()/IsCompressRotated() for runtime toggle
- Compress rotated files in background goroutine (non-blocking)
- Use gzip.BestSpeed for minimal CPU overhead
- Fix .gz file cleanup: TrimSuffix approach correctly counts
  compressed files toward MaxFileCount limit
- Include 6 unit tests covering normal, empty, large, and edge cases

Enabled via --log_compress flag. Default behavior unchanged.

* glog: fix compressFile to check gz/dst close errors and use atomic rename

Write to a temp file (.gz.tmp) and rename atomically to prevent
exposing partial archives. Check gz.Close() and dst.Close() errors
to avoid deleting the original log when flush fails (e.g. ENOSPC).
Use defer for robust resource cleanup.

* glog: deduplicate .log/.log.gz pairs in rotation cleanup

During concurrent compression, both foo.log and foo.log.gz can exist
simultaneously. Count them as one entry against MaxFileCount to prevent
premature eviction of rotated logs.

* glog: use portable temp path in TestCompressFile_NonExistent

Replace hardcoded /nonexistent/path with t.TempDir() for portability.

---------

Co-authored-by: Copilot <copilot@github.com>
This commit is contained in:
JARDEL ALVES
2026-03-20 01:35:08 -03:00
committed by GitHub
parent 51ec0d2122
commit 5f2244d25d
4 changed files with 270 additions and 8 deletions

View File

@@ -64,6 +64,10 @@ var logMaxFiles = flag.Int("log_max_files", 5, "Maximum number of log files to k
// The default is 168 hours (7 days). Set to 0 to disable time-based rotation.
var logRotateHours = flag.Int("log_rotate_hours", 168, "Rotate log files after this many hours (default: 168 = 7 days, 0 = disabled)")
// logCompress enables gzip compression of rotated log files.
// Compressed files get a .gz suffix. Compression runs in the background.
var logCompress = flag.Bool("log_compress", false, "Gzip-compress rotated log files to save disk space")
func createLogDirs() {
// Apply flag values now that flags have been parsed.
if *logMaxSizeMB > 0 {
@@ -73,6 +77,10 @@ func createLogDirs() {
MaxFileCount = *logMaxFiles
}
if *logCompress {
SetCompressRotated(true)
}
if *logDir != "" {
logDirs = append(logDirs, *logDir)
} else {
@@ -160,21 +168,31 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) {
var lastErr error
for _, dir := range logDirs {
// remove old logs
// remove old logs (including .gz compressed rotated files)
// Deduplicate .log/.log.gz pairs so concurrent compression
// doesn't cause double-counting against MaxFileCount.
entries, _ := os.ReadDir(dir)
var previousLogs []string
previousLogs := make(map[string][]string) // bare name -> actual file names
for _, entry := range entries {
if strings.HasPrefix(entry.Name(), logPrefix) {
previousLogs = append(previousLogs, entry.Name())
name := entry.Name()
bare := strings.TrimSuffix(name, ".gz")
if strings.HasPrefix(bare, logPrefix) {
previousLogs[bare] = append(previousLogs[bare], name)
}
}
if len(previousLogs) >= MaxFileCount {
sort.Strings(previousLogs)
for i, entry := range previousLogs {
if i > len(previousLogs)-MaxFileCount {
var keys []string
for bare := range previousLogs {
keys = append(keys, bare)
}
sort.Strings(keys)
for i, bare := range keys {
if i > len(keys)-MaxFileCount {
break
}
os.Remove(filepath.Join(dir, entry))
for _, name := range previousLogs[bare] {
os.Remove(filepath.Join(dir, name))
}
}
}