convert error fromating to %w everywhere (#6995)

This commit is contained in:
Chris Lu
2025-07-16 23:39:27 -07:00
committed by GitHub
parent a524b4f485
commit 69553e5ba6
174 changed files with 524 additions and 524 deletions

View File

@@ -121,12 +121,12 @@ func (l *DiskLocation) loadEcShards(shards []string, collection string, vid need
for _, shard := range shards {
shardId, err := strconv.ParseInt(path.Ext(shard)[3:], 10, 64)
if err != nil {
return fmt.Errorf("failed to parse ec shard name %v: %v", shard, err)
return fmt.Errorf("failed to parse ec shard name %v: %w", shard, err)
}
_, err = l.LoadEcShard(collection, vid, erasure_coding.ShardId(shardId))
if err != nil {
return fmt.Errorf("failed to load ec shard %v: %v", shard, err)
return fmt.Errorf("failed to load ec shard %v: %w", shard, err)
}
}

View File

@@ -31,12 +31,12 @@ func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) {
defer nm.Close()
}
if err != nil {
return fmt.Errorf("readNeedleMap: %v", err)
return fmt.Errorf("readNeedleMap: %w", err)
}
ecxFile, err := os.OpenFile(baseFileName+ext, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("failed to open ecx file: %v", err)
return fmt.Errorf("failed to open ecx file: %w", err)
}
defer ecxFile.Close()
@@ -47,7 +47,7 @@ func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) {
})
if err != nil {
return fmt.Errorf("failed to visit idx file: %v", err)
return fmt.Errorf("failed to visit idx file: %w", err)
}
return nil
@@ -69,19 +69,19 @@ func ToExt(ecIndex int) string {
func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64) error {
file, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("failed to open dat file: %v", err)
return fmt.Errorf("failed to open dat file: %w", err)
}
defer file.Close()
fi, err := file.Stat()
if err != nil {
return fmt.Errorf("failed to stat dat file: %v", err)
return fmt.Errorf("failed to stat dat file: %w", err)
}
glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size())
err = encodeDatFile(fi.Size(), baseFileName, bufferSize, largeBlockSize, file, smallBlockSize)
if err != nil {
return fmt.Errorf("encodeDatFile: %v", err)
return fmt.Errorf("encodeDatFile: %w", err)
}
return nil
}
@@ -112,7 +112,7 @@ func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize
err = rebuildEcFiles(shardHasData, inputFiles, outputFiles)
if err != nil {
return nil, fmt.Errorf("rebuildEcFiles: %v", err)
return nil, fmt.Errorf("rebuildEcFiles: %w", err)
}
return
}
@@ -201,7 +201,7 @@ func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, lar
enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount)
if err != nil {
return fmt.Errorf("failed to create encoder: %v", err)
return fmt.Errorf("failed to create encoder: %w", err)
}
buffers := make([][]byte, TotalShardsCount)
@@ -218,7 +218,7 @@ func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, lar
for remainingSize > largeBlockSize*DataShardsCount {
err = encodeData(file, enc, processedSize, largeBlockSize, buffers, outputs)
if err != nil {
return fmt.Errorf("failed to encode large chunk data: %v", err)
return fmt.Errorf("failed to encode large chunk data: %w", err)
}
remainingSize -= largeBlockSize * DataShardsCount
processedSize += largeBlockSize * DataShardsCount
@@ -226,7 +226,7 @@ func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, lar
for remainingSize > 0 {
err = encodeData(file, enc, processedSize, smallBlockSize, buffers, outputs)
if err != nil {
return fmt.Errorf("failed to encode small chunk data: %v", err)
return fmt.Errorf("failed to encode small chunk data: %w", err)
}
remainingSize -= smallBlockSize * DataShardsCount
processedSize += smallBlockSize * DataShardsCount
@@ -238,7 +238,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o
enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount)
if err != nil {
return fmt.Errorf("failed to create encoder: %v", err)
return fmt.Errorf("failed to create encoder: %w", err)
}
buffers := make([][]byte, TotalShardsCount)
@@ -273,7 +273,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o
// encode the data
err = enc.Reconstruct(buffers)
if err != nil {
return fmt.Errorf("reconstruct: %v", err)
return fmt.Errorf("reconstruct: %w", err)
}
// write the data to output files

View File

@@ -220,7 +220,7 @@ func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.
// find the needle from ecx file
offset, size, err = ev.FindNeedleFromEcx(needleId)
if err != nil {
return types.Offset{}, 0, nil, fmt.Errorf("FindNeedleFromEcx: %v", err)
return types.Offset{}, 0, nil, fmt.Errorf("FindNeedleFromEcx: %w", err)
}
intervals = ev.LocateEcShardNeedleInterval(version, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, version)))

View File

@@ -15,7 +15,7 @@ var (
types.SizeToBytes(b, types.TombstoneFileSize)
n, err := file.WriteAt(b, offset+types.NeedleIdSize+types.OffsetSize)
if err != nil {
return fmt.Errorf("sorted needle write error: %v", err)
return fmt.Errorf("sorted needle write error: %w", err)
}
if n != types.SizeSize {
return fmt.Errorf("sorted needle written %d bytes, expecting %d", n, types.SizeSize)
@@ -56,7 +56,7 @@ func RebuildEcxFile(baseFileName string) error {
ecxFile, err := os.OpenFile(baseFileName+".ecx", os.O_RDWR, 0644)
if err != nil {
return fmt.Errorf("rebuild: failed to open ecx file: %v", err)
return fmt.Errorf("rebuild: failed to open ecx file: %w", err)
}
defer ecxFile.Close()
@@ -69,7 +69,7 @@ func RebuildEcxFile(baseFileName string) error {
ecjFile, err := os.OpenFile(baseFileName+".ecj", os.O_RDWR, 0644)
if err != nil {
return fmt.Errorf("rebuild: failed to open ecj file: %v", err)
return fmt.Errorf("rebuild: failed to open ecj file: %w", err)
}
buf := make([]byte, types.NeedleIdSize)

View File

@@ -160,11 +160,11 @@ func ParseNeedleIdCookie(key_hash_string string) (NeedleId, Cookie, error) {
split := len(key_hash_string) - CookieSize*2
needleId, err := ParseNeedleId(key_hash_string[:split])
if err != nil {
return NeedleIdEmpty, 0, fmt.Errorf("Parse needleId error: %v", err)
return NeedleIdEmpty, 0, fmt.Errorf("Parse needleId error: %w", err)
}
cookie, err := ParseCookie(key_hash_string[split:])
if err != nil {
return NeedleIdEmpty, 0, fmt.Errorf("Parse cookie error: %v", err)
return NeedleIdEmpty, 0, fmt.Errorf("Parse cookie error: %w", err)
}
return needleId, cookie, nil
}

View File

@@ -38,7 +38,7 @@ func (cm *MemDb) Set(key NeedleId, offset Offset, size Size) error {
bytes := ToBytes(key, offset, size)
if err := cm.db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil {
return fmt.Errorf("failed to write temp leveldb: %v", err)
return fmt.Errorf("failed to write temp leveldb: %w", err)
}
return nil
}

View File

@@ -194,7 +194,7 @@ func setWatermark(db *leveldb.DB, watermark uint64) error {
var wmBytes = make([]byte, 8)
util.Uint64toBytes(wmBytes, watermark)
if err := db.Put(watermarkKey, wmBytes, nil); err != nil {
return fmt.Errorf("failed to setWatermark: %v", err)
return fmt.Errorf("failed to setWatermark: %w", err)
}
return nil
}
@@ -204,7 +204,7 @@ func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size, update
bytes := needle_map.ToBytes(key, offset, size)
if err := db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil {
return fmt.Errorf("failed to write leveldb: %v", err)
return fmt.Errorf("failed to write leveldb: %w", err)
}
// set watermark
if updateWatermark {

View File

@@ -140,7 +140,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle, onReadS
offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n.Id, localEcVolume.Version)
if err != nil {
return 0, fmt.Errorf("locate in local ec volume: %v", err)
return 0, fmt.Errorf("locate in local ec volume: %w", err)
}
if size.IsDeleted() {
return 0, ErrorDeleted
@@ -157,7 +157,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle, onReadS
}
bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals)
if err != nil {
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
return 0, fmt.Errorf("ReadEcShardIntervals: %w", err)
}
if isDeleted {
return 0, ErrorDeleted
@@ -165,7 +165,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle, onReadS
err = n.ReadBytes(bytes, offset.ToActualOffset(), size, localEcVolume.Version)
if err != nil {
return 0, fmt.Errorf("readbytes: %v", err)
return 0, fmt.Errorf("readbytes: %w", err)
}
return len(bytes), nil
@@ -345,7 +345,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)
if err != nil {
return 0, false, fmt.Errorf("failed to create encoder: %v", err)
return 0, false, fmt.Errorf("failed to create encoder: %w", err)
}
bufs := make([][]byte, erasure_coding.TotalShardsCount)

View File

@@ -142,7 +142,7 @@ func verifyDeletedNeedleIntegrity(datFile backend.BackendStorageFile, v needle.V
var fileSize int64
fileSize, _, err = datFile.GetStat()
if err != nil {
return 0, fmt.Errorf("GetStat: %v", err)
return 0, fmt.Errorf("GetStat: %w", err)
}
if err = n.ReadData(datFile, fileSize-size, Size(0), v); err != nil {
return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", fileSize-size, size, err)

View File

@@ -46,7 +46,7 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn
if err = jsonpb.Unmarshal(fileData, volumeInfo); err != nil {
if oldVersionErr := tryOldVersionVolumeInfo(fileData, volumeInfo); oldVersionErr != nil {
glog.Warningf("unmarshal error: %v oldFormat: %v", err, oldVersionErr)
err = fmt.Errorf("unmarshal error: %v oldFormat: %v", err, oldVersionErr)
err = fmt.Errorf("unmarshal error: %w oldFormat: %v", err, oldVersionErr)
return
} else {
err = nil
@@ -89,7 +89,7 @@ func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) er
func tryOldVersionVolumeInfo(data []byte, volumeInfo *volume_server_pb.VolumeInfo) error {
oldVersionVolumeInfo := &volume_server_pb.OldVersionVolumeInfo{}
if err := jsonpb.Unmarshal(data, oldVersionVolumeInfo); err != nil {
return fmt.Errorf("failed to unmarshal old version volume info: %v", err)
return fmt.Errorf("failed to unmarshal old version volume info: %w", err)
}
volumeInfo.Files = oldVersionVolumeInfo.Files
volumeInfo.Version = oldVersionVolumeInfo.Version

View File

@@ -53,7 +53,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
v.noWriteOrDelete = false
glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo)
if err := v.LoadRemoteFile(); err != nil {
return fmt.Errorf("load remote file %v: %v", v.volumeInfo, err)
return fmt.Errorf("load remote file %v: %w", v.volumeInfo, err)
}
alreadyHasSuperBlock = true
} else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(v.FileName(".dat")); exists {

View File

@@ -176,7 +176,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
return fmt.Errorf("ReadNeedleData checksum %v expected %v for Needle: %v,%v", crc, n.Checksum, v.Id, n)
}
if _, err = writer.Write(buf[0:toWrite]); err != nil {
return fmt.Errorf("ReadNeedleData write: %v", err)
return fmt.Errorf("ReadNeedleData write: %w", err)
}
}
if err != nil {
@@ -184,7 +184,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
err = nil
break
}
return fmt.Errorf("ReadNeedleData: %v", err)
return fmt.Errorf("ReadNeedleData: %w", err)
}
if count <= 0 {
break
@@ -265,7 +265,7 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag
}
if err != nil {
glog.V(0).Infof("visit needle error: %v", err)
return fmt.Errorf("visit needle error: %v", err)
return fmt.Errorf("visit needle error: %w", err)
}
offset += NeedleHeaderSize + rest
glog.V(4).Infof("==> new entry offset %d", offset)

View File

@@ -149,7 +149,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint
if ok {
existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToActualOffset())
if existingNeedleReadErr != nil {
err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
err = fmt.Errorf("reading existing needle: %w", existingNeedleReadErr)
return
}
if n.Cookie == 0 && !checkCookie {