Clean up logs and deprecated functions (#7339)
* less logs * fix deprecated grpc.Dial
This commit is contained in:
@@ -486,7 +486,6 @@ func (s3a *S3ApiServer) completeMultipartUpload(r *http.Request, input *s3.Compl
|
||||
|
||||
for _, deleteEntry := range deleteEntries {
|
||||
//delete unused part data
|
||||
glog.Infof("completeMultipartUpload cleanup %s upload %s unused %s", *input.Bucket, *input.UploadId, deleteEntry.Name)
|
||||
if err = s3a.rm(uploadDirectory, deleteEntry.Name, true, true); err != nil {
|
||||
glog.Warningf("completeMultipartUpload cleanup %s upload %s unused %s : %v", *input.Bucket, *input.UploadId, deleteEntry.Name, err)
|
||||
}
|
||||
|
||||
@@ -423,10 +423,8 @@ func CreateSSEKMSDecryptedReader(r io.Reader, sseKey *SSEKMSKey) (io.Reader, err
|
||||
var iv []byte
|
||||
if sseKey.ChunkOffset > 0 {
|
||||
iv = calculateIVWithOffset(sseKey.IV, sseKey.ChunkOffset)
|
||||
glog.Infof("Using calculated IV with offset %d for chunk decryption", sseKey.ChunkOffset)
|
||||
} else {
|
||||
iv = sseKey.IV
|
||||
// glog.Infof("Using base IV for chunk decryption (offset=0)")
|
||||
}
|
||||
|
||||
// Create AES cipher with the decrypted data key
|
||||
|
||||
@@ -32,7 +32,6 @@ func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker {
|
||||
err := pb.WithFilerClient(false, 0, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
content, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile)
|
||||
if errors.Is(err, filer_pb.ErrNotFound) {
|
||||
glog.Infof("s3 circuit breaker not configured")
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
@@ -42,7 +41,6 @@ func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.Infof("s3 circuit breaker not configured correctly: %v", err)
|
||||
}
|
||||
|
||||
return cb
|
||||
|
||||
@@ -921,9 +921,6 @@ func (s3a *S3ApiServer) handleSSEKMSResponse(r *http.Request, proxyResponse *htt
|
||||
}
|
||||
}
|
||||
isMultipartSSEKMS = sseKMSChunks > 1
|
||||
|
||||
glog.Infof("SSE-KMS object detection: chunks=%d, sseKMSChunks=%d, isMultipartSSEKMS=%t",
|
||||
len(entry.GetChunks()), sseKMSChunks, isMultipartSSEKMS)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1131,10 +1128,7 @@ func (s3a *S3ApiServer) createMultipartSSEKMSDecryptedReader(r *http.Request, pr
|
||||
// Create readers for each chunk, decrypting them independently
|
||||
var readers []io.Reader
|
||||
|
||||
for i, chunk := range chunks {
|
||||
glog.Infof("Processing chunk %d/%d: fileId=%s, offset=%d, size=%d, sse_type=%d",
|
||||
i+1, len(entry.GetChunks()), chunk.GetFileIdString(), chunk.GetOffset(), chunk.GetSize(), chunk.GetSseType())
|
||||
|
||||
for _, chunk := range chunks {
|
||||
// Get this chunk's encrypted data
|
||||
chunkReader, err := s3a.createEncryptedChunkReader(chunk)
|
||||
if err != nil {
|
||||
@@ -1153,8 +1147,6 @@ func (s3a *S3ApiServer) createMultipartSSEKMSDecryptedReader(r *http.Request, pr
|
||||
} else {
|
||||
// ChunkOffset is already set from the stored metadata (PartOffset)
|
||||
chunkSSEKMSKey = kmsKey
|
||||
glog.Infof("Using per-chunk SSE-KMS metadata for chunk %s: keyID=%s, IV=%x, partOffset=%d",
|
||||
chunk.GetFileIdString(), kmsKey.KeyID, kmsKey.IV[:8], kmsKey.ChunkOffset)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1170,7 +1162,6 @@ func (s3a *S3ApiServer) createMultipartSSEKMSDecryptedReader(r *http.Request, pr
|
||||
kmsKey.ChunkOffset = chunk.GetOffset()
|
||||
chunkSSEKMSKey = kmsKey
|
||||
}
|
||||
glog.Infof("Using fallback object-level SSE-KMS metadata for chunk %s with offset %d", chunk.GetFileIdString(), chunk.GetOffset())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1410,7 +1401,6 @@ func (s3a *S3ApiServer) createMultipartSSECDecryptedReader(r *http.Request, prox
|
||||
return nil, fmt.Errorf("failed to create SSE-C decrypted reader for chunk %s: %v", chunk.GetFileIdString(), decErr)
|
||||
}
|
||||
readers = append(readers, decryptedReader)
|
||||
glog.Infof("Created SSE-C decrypted reader for chunk %s using stored metadata", chunk.GetFileIdString())
|
||||
} else {
|
||||
return nil, fmt.Errorf("SSE-C chunk %s missing required metadata", chunk.GetFileIdString())
|
||||
}
|
||||
|
||||
@@ -1113,20 +1113,9 @@ func (s3a *S3ApiServer) downloadChunkData(srcUrl string, offset, size int64) ([]
|
||||
// copyMultipartSSECChunks handles copying multipart SSE-C objects
|
||||
// Returns chunks and destination metadata that should be applied to the destination entry
|
||||
func (s3a *S3ApiServer) copyMultipartSSECChunks(entry *filer_pb.Entry, copySourceKey *SSECustomerKey, destKey *SSECustomerKey, dstPath string) ([]*filer_pb.FileChunk, map[string][]byte, error) {
|
||||
glog.Infof("copyMultipartSSECChunks called: copySourceKey=%v, destKey=%v, path=%s", copySourceKey != nil, destKey != nil, dstPath)
|
||||
|
||||
var sourceKeyMD5, destKeyMD5 string
|
||||
if copySourceKey != nil {
|
||||
sourceKeyMD5 = copySourceKey.KeyMD5
|
||||
}
|
||||
if destKey != nil {
|
||||
destKeyMD5 = destKey.KeyMD5
|
||||
}
|
||||
glog.Infof("Key MD5 comparison: source=%s, dest=%s, equal=%t", sourceKeyMD5, destKeyMD5, sourceKeyMD5 == destKeyMD5)
|
||||
|
||||
// For multipart SSE-C, always use decrypt/reencrypt path to ensure proper metadata handling
|
||||
// The standard copyChunks() doesn't preserve SSE metadata, so we need per-chunk processing
|
||||
glog.Infof("Taking multipart SSE-C reencrypt path to preserve metadata: %s", dstPath)
|
||||
|
||||
// Different keys or key changes: decrypt and re-encrypt each chunk individually
|
||||
glog.V(2).Infof("Multipart SSE-C reencrypt copy (different keys): %s", dstPath)
|
||||
@@ -1175,11 +1164,9 @@ func (s3a *S3ApiServer) copyMultipartSSECChunks(entry *filer_pb.Entry, copySourc
|
||||
// copyMultipartSSEKMSChunks handles copying multipart SSE-KMS objects (unified with SSE-C approach)
|
||||
// Returns chunks and destination metadata that should be applied to the destination entry
|
||||
func (s3a *S3ApiServer) copyMultipartSSEKMSChunks(entry *filer_pb.Entry, destKeyID string, encryptionContext map[string]string, bucketKeyEnabled bool, dstPath, bucket string) ([]*filer_pb.FileChunk, map[string][]byte, error) {
|
||||
glog.Infof("copyMultipartSSEKMSChunks called: destKeyID=%s, path=%s", destKeyID, dstPath)
|
||||
|
||||
// For multipart SSE-KMS, always use decrypt/reencrypt path to ensure proper metadata handling
|
||||
// The standard copyChunks() doesn't preserve SSE metadata, so we need per-chunk processing
|
||||
glog.Infof("Taking multipart SSE-KMS reencrypt path to preserve metadata: %s", dstPath)
|
||||
|
||||
var dstChunks []*filer_pb.FileChunk
|
||||
|
||||
@@ -1217,7 +1204,6 @@ func (s3a *S3ApiServer) copyMultipartSSEKMSChunks(entry *filer_pb.Entry, destKey
|
||||
}
|
||||
if kmsMetadata, serErr := SerializeSSEKMSMetadata(sseKey); serErr == nil {
|
||||
dstMetadata[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata
|
||||
glog.Infof("Created object-level KMS metadata for GET compatibility")
|
||||
} else {
|
||||
glog.Errorf("Failed to serialize SSE-KMS metadata: %v", serErr)
|
||||
}
|
||||
@@ -1444,10 +1430,6 @@ func (s3a *S3ApiServer) copyMultipartSSECChunk(chunk *filer_pb.FileChunk, copySo
|
||||
// copyMultipartCrossEncryption handles all cross-encryption and decrypt-only copy scenarios
|
||||
// This unified function supports: SSE-C↔SSE-KMS, SSE-C→Plain, SSE-KMS→Plain
|
||||
func (s3a *S3ApiServer) copyMultipartCrossEncryption(entry *filer_pb.Entry, r *http.Request, state *EncryptionState, dstBucket, dstPath string) ([]*filer_pb.FileChunk, map[string][]byte, error) {
|
||||
glog.Infof("copyMultipartCrossEncryption called: %s→%s, path=%s",
|
||||
s3a.getEncryptionTypeString(state.SrcSSEC, state.SrcSSEKMS, false),
|
||||
s3a.getEncryptionTypeString(state.DstSSEC, state.DstSSEKMS, false), dstPath)
|
||||
|
||||
var dstChunks []*filer_pb.FileChunk
|
||||
|
||||
// Parse destination encryption parameters
|
||||
@@ -1462,16 +1444,13 @@ func (s3a *S3ApiServer) copyMultipartCrossEncryption(entry *filer_pb.Entry, r *h
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse destination SSE-C headers: %w", err)
|
||||
}
|
||||
glog.Infof("Destination SSE-C: keyMD5=%s", destSSECKey.KeyMD5)
|
||||
} else if state.DstSSEKMS {
|
||||
var err error
|
||||
destKMSKeyID, destKMSEncryptionContext, destKMSBucketKeyEnabled, err = ParseSSEKMSCopyHeaders(r)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse destination SSE-KMS headers: %w", err)
|
||||
}
|
||||
glog.Infof("Destination SSE-KMS: keyID=%s, bucketKey=%t", destKMSKeyID, destKMSBucketKeyEnabled)
|
||||
} else {
|
||||
glog.Infof("Destination: Unencrypted")
|
||||
}
|
||||
|
||||
// Parse source encryption parameters
|
||||
@@ -1482,7 +1461,6 @@ func (s3a *S3ApiServer) copyMultipartCrossEncryption(entry *filer_pb.Entry, r *h
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse source SSE-C headers: %w", err)
|
||||
}
|
||||
glog.Infof("Source SSE-C: keyMD5=%s", sourceSSECKey.KeyMD5)
|
||||
}
|
||||
|
||||
// Process each chunk with unified cross-encryption logic
|
||||
@@ -1529,7 +1507,6 @@ func (s3a *S3ApiServer) copyMultipartCrossEncryption(entry *filer_pb.Entry, r *h
|
||||
StoreIVInMetadata(dstMetadata, iv)
|
||||
dstMetadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256")
|
||||
dstMetadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(destSSECKey.KeyMD5)
|
||||
glog.Infof("Created SSE-C object-level metadata from first chunk")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1545,7 +1522,6 @@ func (s3a *S3ApiServer) copyMultipartCrossEncryption(entry *filer_pb.Entry, r *h
|
||||
}
|
||||
if kmsMetadata, serErr := SerializeSSEKMSMetadata(sseKey); serErr == nil {
|
||||
dstMetadata[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata
|
||||
glog.Infof("Created SSE-KMS object-level metadata")
|
||||
} else {
|
||||
glog.Errorf("Failed to serialize SSE-KMS metadata: %v", serErr)
|
||||
}
|
||||
@@ -1738,7 +1714,6 @@ func (s3a *S3ApiServer) getEncryptionTypeString(isSSEC, isSSEKMS, isSSES3 bool)
|
||||
// copyChunksWithSSEC handles SSE-C aware copying with smart fast/slow path selection
|
||||
// Returns chunks and destination metadata that should be applied to the destination entry
|
||||
func (s3a *S3ApiServer) copyChunksWithSSEC(entry *filer_pb.Entry, r *http.Request) ([]*filer_pb.FileChunk, map[string][]byte, error) {
|
||||
glog.Infof("copyChunksWithSSEC called for %s with %d chunks", r.URL.Path, len(entry.GetChunks()))
|
||||
|
||||
// Parse SSE-C headers
|
||||
copySourceKey, err := ParseSSECCopySourceHeaders(r)
|
||||
@@ -1764,8 +1739,6 @@ func (s3a *S3ApiServer) copyChunksWithSSEC(entry *filer_pb.Entry, r *http.Reques
|
||||
}
|
||||
isMultipartSSEC = sseCChunks > 1
|
||||
|
||||
glog.Infof("SSE-C copy analysis: total chunks=%d, sseC chunks=%d, isMultipart=%t", len(entry.GetChunks()), sseCChunks, isMultipartSSEC)
|
||||
|
||||
if isMultipartSSEC {
|
||||
glog.V(2).Infof("Detected multipart SSE-C object with %d encrypted chunks for copy", sseCChunks)
|
||||
return s3a.copyMultipartSSECChunks(entry, copySourceKey, destKey, r.URL.Path)
|
||||
@@ -1933,7 +1906,6 @@ func (s3a *S3ApiServer) copyChunkWithReencryption(chunk *filer_pb.FileChunk, cop
|
||||
// copyChunksWithSSEKMS handles SSE-KMS aware copying with smart fast/slow path selection
|
||||
// Returns chunks and destination metadata like SSE-C for consistency
|
||||
func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Request, bucket string) ([]*filer_pb.FileChunk, map[string][]byte, error) {
|
||||
glog.Infof("copyChunksWithSSEKMS called for %s with %d chunks", r.URL.Path, len(entry.GetChunks()))
|
||||
|
||||
// Parse SSE-KMS headers from copy request
|
||||
destKeyID, encryptionContext, bucketKeyEnabled, err := ParseSSEKMSCopyHeaders(r)
|
||||
@@ -1952,8 +1924,6 @@ func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Requ
|
||||
}
|
||||
isMultipartSSEKMS = sseKMSChunks > 1
|
||||
|
||||
glog.Infof("SSE-KMS copy analysis: total chunks=%d, sseKMS chunks=%d, isMultipart=%t", len(entry.GetChunks()), sseKMSChunks, isMultipartSSEKMS)
|
||||
|
||||
if isMultipartSSEKMS {
|
||||
glog.V(2).Infof("Detected multipart SSE-KMS object with %d encrypted chunks for copy", sseKMSChunks)
|
||||
return s3a.copyMultipartSSEKMSChunks(entry, destKeyID, encryptionContext, bucketKeyEnabled, r.URL.Path, bucket)
|
||||
|
||||
@@ -318,16 +318,12 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Check for SSE-C headers in the current request first
|
||||
sseCustomerAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm)
|
||||
if sseCustomerAlgorithm != "" {
|
||||
glog.Infof("PutObjectPartHandler: detected SSE-C headers, handling as SSE-C part upload")
|
||||
// SSE-C part upload - headers are already present, let putToFiler handle it
|
||||
} else {
|
||||
// No SSE-C headers, check for SSE-KMS settings from upload directory
|
||||
glog.Infof("PutObjectPartHandler: attempting to retrieve upload entry for bucket %s, uploadID %s", bucket, uploadID)
|
||||
if uploadEntry, err := s3a.getEntry(s3a.genUploadsFolder(bucket), uploadID); err == nil {
|
||||
glog.Infof("PutObjectPartHandler: upload entry found, Extended metadata: %v", uploadEntry.Extended != nil)
|
||||
if uploadEntry.Extended != nil {
|
||||
// Check if this upload uses SSE-KMS
|
||||
glog.Infof("PutObjectPartHandler: checking for SSE-KMS key in extended metadata")
|
||||
if keyIDBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSKeyID]; exists {
|
||||
keyID := string(keyIDBytes)
|
||||
|
||||
@@ -385,7 +381,6 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Pass the base IV to putToFiler via header
|
||||
r.Header.Set(s3_constants.SeaweedFSSSEKMSBaseIVHeader, base64.StdEncoding.EncodeToString(baseIV))
|
||||
|
||||
glog.Infof("PutObjectPartHandler: inherited SSE-KMS settings from upload %s, keyID %s - letting putToFiler handle encryption", uploadID, keyID)
|
||||
} else {
|
||||
// Check if this upload uses SSE-S3
|
||||
if err := s3a.handleSSES3MultipartHeaders(r, uploadEntry, uploadID); err != nil {
|
||||
@@ -396,7 +391,6 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.Infof("PutObjectPartHandler: failed to retrieve upload entry: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -501,9 +495,7 @@ type CompletedPart struct {
|
||||
|
||||
// handleSSES3MultipartHeaders handles SSE-S3 multipart upload header setup to reduce nesting complexity
|
||||
func (s3a *S3ApiServer) handleSSES3MultipartHeaders(r *http.Request, uploadEntry *filer_pb.Entry, uploadID string) error {
|
||||
glog.Infof("PutObjectPartHandler: checking for SSE-S3 settings in extended metadata")
|
||||
if encryptionTypeBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3Encryption]; exists && string(encryptionTypeBytes) == s3_constants.SSEAlgorithmAES256 {
|
||||
glog.Infof("PutObjectPartHandler: found SSE-S3 encryption type, setting up headers")
|
||||
|
||||
// Set SSE-S3 headers to indicate server-side encryption
|
||||
r.Header.Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmAES256)
|
||||
@@ -538,7 +530,6 @@ func (s3a *S3ApiServer) handleSSES3MultipartHeaders(r *http.Request, uploadEntry
|
||||
// Pass the base IV to putToFiler via header for offset calculation
|
||||
r.Header.Set(s3_constants.SeaweedFSSSES3BaseIVHeader, base64.StdEncoding.EncodeToString(baseIV))
|
||||
|
||||
glog.Infof("PutObjectPartHandler: inherited SSE-S3 settings from upload %s - letting putToFiler handle encryption", uploadID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user