convert error fromating to %w everywhere (#6995)
This commit is contained in:
@@ -310,13 +310,13 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||
glog.Errorf("ProcessRangeRequest: %v", err)
|
||||
w.Header().Del("Content-Length")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return fmt.Errorf("ProcessRangeRequest: %v", err)
|
||||
return fmt.Errorf("ProcessRangeRequest: %w", err)
|
||||
}
|
||||
if err = writeFn(bufferedWriter); err != nil {
|
||||
glog.Errorf("ProcessRangeRequest: %v", err)
|
||||
w.Header().Del("Content-Length")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return fmt.Errorf("ProcessRangeRequest: %v", err)
|
||||
return fmt.Errorf("ProcessRangeRequest: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -327,7 +327,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||
if err != nil {
|
||||
glog.Errorf("ProcessRangeRequest headers: %+v err: %v", w.Header(), err)
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return fmt.Errorf("ProcessRangeRequest header: %v", err)
|
||||
return fmt.Errorf("ProcessRangeRequest header: %w", err)
|
||||
}
|
||||
if sumRangesSize(ranges) > totalSize {
|
||||
// The total number of bytes in all the ranges
|
||||
@@ -360,7 +360,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||
glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err)
|
||||
w.Header().Del("Content-Length")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return fmt.Errorf("ProcessRangeRequest: %v", err)
|
||||
return fmt.Errorf("ProcessRangeRequest: %w", err)
|
||||
}
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
err = writeFn(bufferedWriter)
|
||||
@@ -368,7 +368,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||
glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err)
|
||||
w.Header().Del("Content-Length")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return fmt.Errorf("ProcessRangeRequest range[0]: %v", err)
|
||||
return fmt.Errorf("ProcessRangeRequest range[0]: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -379,7 +379,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||
for i, ra := range ranges {
|
||||
if ra.start > totalSize {
|
||||
http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
|
||||
return fmt.Errorf("out of range: %v", err)
|
||||
return fmt.Errorf("out of range: %w", err)
|
||||
}
|
||||
writeFn, err := prepareWriteFn(ra.start, ra.length)
|
||||
if err != nil {
|
||||
@@ -422,7 +422,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||
if _, err := io.CopyN(bufferedWriter, sendContent, sendSize); err != nil {
|
||||
glog.Errorf("ProcessRangeRequest err: %v", err)
|
||||
http.Error(w, "Internal Error", http.StatusInternalServerError)
|
||||
return fmt.Errorf("ProcessRangeRequest err: %v", err)
|
||||
return fmt.Errorf("ProcessRangeRequest err: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -207,7 +207,7 @@ func (fs *FilerServer) cleanupChunks(ctx context.Context, fullpath string, exist
|
||||
if existingEntry != nil {
|
||||
garbage, err = filer.MinusChunks(ctx, fs.lookupFileId, existingEntry.GetChunks(), newEntry.GetChunks())
|
||||
if err != nil {
|
||||
return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %v", err)
|
||||
return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
|
||||
|
||||
processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn)
|
||||
if readPersistedLogErr != nil {
|
||||
return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
|
||||
return fmt.Errorf("reading from persisted logs: %w", readPersistedLogErr)
|
||||
}
|
||||
if isDone {
|
||||
return nil
|
||||
@@ -73,7 +73,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest,
|
||||
position := log_buffer.NewMessagePosition(nextDayTs, -2)
|
||||
found, err := fs.filer.HasPersistedLogFiles(position)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking persisted log files: %v", err)
|
||||
return fmt.Errorf("checking persisted log files: %w", err)
|
||||
}
|
||||
if found {
|
||||
lastReadTime = position
|
||||
@@ -157,7 +157,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
|
||||
processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn)
|
||||
if readPersistedLogErr != nil {
|
||||
glog.V(0).Infof("read on disk %v local subscribe %s from %+v: %v", clientName, req.PathPrefix, lastReadTime, readPersistedLogErr)
|
||||
return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
|
||||
return fmt.Errorf("reading from persisted logs: %w", readPersistedLogErr)
|
||||
}
|
||||
if isDone {
|
||||
return nil
|
||||
@@ -219,7 +219,7 @@ func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotificati
|
||||
event := &filer_pb.SubscribeMetadataResponse{}
|
||||
if err := proto.Unmarshal(logEntry.Data, event); err != nil {
|
||||
glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
|
||||
return false, fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
|
||||
return false, fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %w", err)
|
||||
}
|
||||
|
||||
if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil {
|
||||
|
||||
@@ -40,7 +40,7 @@ func (fs *FilerServer) TraverseBfsMetadata(req *filer_pb.TraverseBfsMetadataRequ
|
||||
Directory: parent,
|
||||
Entry: item.ToProtoEntry(),
|
||||
}); err != nil {
|
||||
return fmt.Errorf("send traverse bfs metadata response: %v", err)
|
||||
return fmt.Errorf("send traverse bfs metadata response: %w", err)
|
||||
}
|
||||
|
||||
if !item.IsDirectory() {
|
||||
|
||||
@@ -56,12 +56,12 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
|
||||
uploader, uploaderErr := operation.NewUploader()
|
||||
if uploaderErr != nil {
|
||||
return nil, fmt.Errorf("uploader initialization error: %v", uploaderErr)
|
||||
return nil, fmt.Errorf("uploader initialization error: %w", uploaderErr)
|
||||
}
|
||||
|
||||
uploadResult, uploadError := uploader.UploadData(ctx, uncompressedData, uploadOption)
|
||||
if uploadError != nil {
|
||||
return nil, fmt.Errorf("upload to volume server: %v", uploadError)
|
||||
return nil, fmt.Errorf("upload to volume server: %w", uploadError)
|
||||
}
|
||||
|
||||
// Save to chunk manifest structure
|
||||
|
||||
@@ -139,7 +139,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
||||
}
|
||||
|
||||
if err := raft.ValidateConfig(c); err != nil {
|
||||
return nil, fmt.Errorf(`raft.ValidateConfig: %v`, err)
|
||||
return nil, fmt.Errorf("raft.ValidateConfig: %w", err)
|
||||
}
|
||||
|
||||
if option.RaftBootstrap {
|
||||
@@ -154,17 +154,17 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
||||
|
||||
ldb, err := boltdb.NewBoltStore(filepath.Join(baseDir, ldbFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "logs.dat"), err)
|
||||
return nil, fmt.Errorf("boltdb.NewBoltStore(%q): %v", filepath.Join(baseDir, "logs.dat"), err)
|
||||
}
|
||||
|
||||
sdb, err := boltdb.NewBoltStore(filepath.Join(baseDir, sdbFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "stable.dat"), err)
|
||||
return nil, fmt.Errorf("boltdb.NewBoltStore(%q): %v", filepath.Join(baseDir, "stable.dat"), err)
|
||||
}
|
||||
|
||||
fss, err := raft.NewFileSnapshotStore(baseDir, 3, os.Stderr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`raft.NewFileSnapshotStore(%q, ...): %v`, baseDir, err)
|
||||
return nil, fmt.Errorf("raft.NewFileSnapshotStore(%q, ...): %v", baseDir, err)
|
||||
}
|
||||
|
||||
s.TransportManager = transport.New(raft.ServerAddress(s.serverAddr), []grpc.DialOption{option.GrpcDialOption})
|
||||
@@ -172,7 +172,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
||||
stateMachine := StateMachine{topo: option.Topo}
|
||||
s.RaftHashicorp, err = raft.NewRaft(c, &stateMachine, ldb, sdb, fss, s.TransportManager.Transport())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("raft.NewRaft: %v", err)
|
||||
return nil, fmt.Errorf("raft.NewRaft: %w", err)
|
||||
}
|
||||
|
||||
updatePeers := false
|
||||
@@ -185,7 +185,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
||||
time.Sleep(timeSleep)
|
||||
f := s.RaftHashicorp.BootstrapCluster(cfg)
|
||||
if err := f.Error(); err != nil {
|
||||
return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
|
||||
return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %w", err)
|
||||
}
|
||||
} else {
|
||||
updatePeers = true
|
||||
@@ -214,12 +214,12 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
||||
if sink, err := prometheus.NewPrometheusSinkFrom(prometheus.PrometheusOpts{
|
||||
Registerer: stats.Gather,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("NewPrometheusSink: %v", err)
|
||||
return nil, fmt.Errorf("NewPrometheusSink: %w", err)
|
||||
} else {
|
||||
metricsConf := metrics.DefaultConfig(stats.Namespace)
|
||||
metricsConf.EnableRuntimeMetrics = false
|
||||
if _, err = metrics.NewGlobal(metricsConf, sink); err != nil {
|
||||
return nil, fmt.Errorf("metrics.NewGlobal: %v", err)
|
||||
return nil, fmt.Errorf("metrics.NewGlobal: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre
|
||||
VolumeId: req.VolumeId,
|
||||
})
|
||||
if nil != err {
|
||||
return fmt.Errorf("read volume file status failed, %v", err)
|
||||
return fmt.Errorf("read volume file status failed, %w", err)
|
||||
}
|
||||
|
||||
diskType := volFileInfoResp.DiskType
|
||||
@@ -247,7 +247,7 @@ func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse
|
||||
|
||||
stat, err = os.Stat(datFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get dat file info failed, %v", err)
|
||||
return fmt.Errorf("get dat file info failed, %w", err)
|
||||
}
|
||||
if originFileInf.DatFileSize != uint64(stat.Size()) {
|
||||
return fmt.Errorf("the dat file size [%v] is not same as origin file size [%v]",
|
||||
|
||||
@@ -408,7 +408,7 @@ func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_serv
|
||||
|
||||
_, size, _, err := localEcVolume.LocateEcShardNeedle(types.NeedleId(req.FileKey), needle.Version(req.Version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("locate in local ec volume: %v", err)
|
||||
return nil, fmt.Errorf("locate in local ec volume: %w", err)
|
||||
}
|
||||
if size.IsDeleted() {
|
||||
return resp, nil
|
||||
|
||||
@@ -24,7 +24,7 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
|
||||
|
||||
client, getClientErr := remote_storage.GetRemoteStorage(remoteConf)
|
||||
if getClientErr != nil {
|
||||
return nil, fmt.Errorf("get remote client: %v", getClientErr)
|
||||
return nil, fmt.Errorf("get remote client: %w", getClientErr)
|
||||
}
|
||||
|
||||
remoteStorageLocation := req.RemoteLocation
|
||||
|
||||
@@ -31,7 +31,7 @@ func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderR
|
||||
lastProcessedTimestampNs, err := sendNeedlesSince(stream, v, lastTimestampNs)
|
||||
if err != nil {
|
||||
glog.Infof("sendNeedlesSince: %v", err)
|
||||
return fmt.Errorf("streamFollow: %v", err)
|
||||
return fmt.Errorf("streamFollow: %w", err)
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@ func writeDeleteResult(err error, count int64, w http.ResponseWriter, r *http.Re
|
||||
m["size"] = count
|
||||
writeJsonQuiet(w, r, http.StatusAccepted, m)
|
||||
} else {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Deletion Failed: %v", err))
|
||||
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Deletion Failed: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -407,7 +407,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
|
||||
uploader, uploaderErr := operation.NewUploader()
|
||||
if uploaderErr != nil {
|
||||
glog.V(0).Infof("upload data %v: %v", f.name, uploaderErr)
|
||||
return nil, fmt.Errorf("upload data: %v", uploaderErr)
|
||||
return nil, fmt.Errorf("upload data: %w", uploaderErr)
|
||||
}
|
||||
|
||||
fileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
|
||||
@@ -434,7 +434,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
|
||||
|
||||
if flushErr != nil {
|
||||
glog.V(0).Infof("upload data %v: %v", f.name, flushErr)
|
||||
return nil, fmt.Errorf("upload data: %v", flushErr)
|
||||
return nil, fmt.Errorf("upload data: %w", flushErr)
|
||||
}
|
||||
if uploadResult.Error != "" {
|
||||
glog.V(0).Infof("upload failure %v: %v", f.name, flushErr)
|
||||
|
||||
Reference in New Issue
Block a user