fix: honor SSE-C chunk offsets in decryption for large chunked uploads (#8216)

* fix: honor SSE-C chunk offsets in decryption for large chunked uploads

Fixes issue #8215 where SSE-C decryption for large objects could corrupt
data by ignoring per-chunk PartOffset values.

Changes:
- Add TestSSECLargeObjectChunkReassembly unit test to verify correct
  decryption of 19MB object split into 8MB chunks using PartOffset
- Update decryptSSECChunkView and createMultipartSSECDecryptedReaderDirect
  to extract PartOffset from SSE-C metadata and pass to
  CreateSSECDecryptedReaderWithOffset for offset-aware decryption
- Fix createCTRStreamWithOffset to use calculateIVWithOffset for proper
  block-aligned counter advancement, matching SSE-KMS/S3 behavior
- Update comments to clarify SSE-C IV handling uses per-chunk offsets
  (unlike base IV approach used by KMS/S3)

All tests pass: go test ./weed/s3api ✓

* fix: close chunkReader on error paths in createMultipartSSECDecryptedReader

Address resource leak issue reported in PR #8216: ensure chunkReader is
properly closed before returning on all error paths, including:
- DeserializeSSECMetadata failures
- IV decoding errors
- Invalid PartOffset values
- SSE-C reader creation failures
- Missing per-chunk metadata

This prevents leaking network connections and file handles during
SSE-C multipart decryption error scenarios.

* docs: clarify SSE-C IV handling in decryptSSECChunkView comment

Replace misleading warning 'Do NOT call calculateIVWithOffset' with
accurate explanation that:
- CreateSSECDecryptedReaderWithOffset internally uses calculateIVWithOffset
  to advance the CTR counter to reach PartOffset
- calculateIVWithOffset is applied only to the per-part IV, NOT to derive
  a global base IV for all parts
- This differs fundamentally from SSE-KMS/SSE-S3 which use base IV +
  calculateIVWithOffset(ChunkOffset)

This clarifies the IV advancement mechanism while contrasting it with
the base IV approach used by other encryption schemes.
This commit is contained in:
Chris Lu
2026-02-04 22:57:41 -08:00
committed by GitHub
parent 19c18d827a
commit c2bfd7b524
3 changed files with 95 additions and 33 deletions

View File

@@ -422,6 +422,58 @@ func TestMultipartSSEMixedScenarios(t *testing.T) {
})
}
func TestSSECLargeObjectChunkReassembly(t *testing.T) {
keyPair := GenerateTestSSECKey(1)
customerKey := &SSECustomerKey{
Algorithm: "AES256",
Key: keyPair.Key,
KeyMD5: keyPair.KeyMD5,
}
const chunkSize = 8 * 1024 * 1024 // matches putToFiler chunk size
totalSize := chunkSize*2 + 3*1024*1024
plaintext := make([]byte, totalSize)
for i := range plaintext {
plaintext[i] = byte(i % 251)
}
encryptedReader, iv, err := CreateSSECEncryptedReader(bytes.NewReader(plaintext), customerKey)
if err != nil {
t.Fatalf("Failed to create encrypted reader: %v", err)
}
encryptedData, err := io.ReadAll(encryptedReader)
if err != nil {
t.Fatalf("Failed to read encrypted data: %v", err)
}
var reconstructed bytes.Buffer
offset := int64(0)
for offset < int64(len(encryptedData)) {
end := offset + chunkSize
if end > int64(len(encryptedData)) {
end = int64(len(encryptedData))
}
chunkIV := make([]byte, len(iv))
copy(chunkIV, iv)
chunkReader := bytes.NewReader(encryptedData[offset:end])
decryptedReader, decErr := CreateSSECDecryptedReaderWithOffset(chunkReader, customerKey, chunkIV, uint64(offset))
if decErr != nil {
t.Fatalf("Failed to create decrypted reader for offset %d: %v", offset, decErr)
}
decryptedChunk, decErr := io.ReadAll(decryptedReader)
if decErr != nil {
t.Fatalf("Failed to read decrypted chunk at offset %d: %v", offset, decErr)
}
reconstructed.Write(decryptedChunk)
offset = end
}
if !bytes.Equal(reconstructed.Bytes(), plaintext) {
t.Fatalf("Reconstructed data mismatch: expected %d bytes, got %d", len(plaintext), reconstructed.Len())
}
}
// TestMultipartSSEPerformance tests performance characteristics of SSE with multipart
func TestMultipartSSEPerformance(t *testing.T) {
if testing.Short() {