Merge branch 'master' into mq-subscribe

This commit is contained in:
chrislu
2024-04-25 23:42:19 -07:00
10 changed files with 79 additions and 62 deletions

View File

@@ -72,7 +72,9 @@ func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) {
return servers[i] < servers[j]
})
r.Lock()
r.lastUpdateTime = time.Now()
r.Unlock()
r.addOneSnapshot(servers)

View File

@@ -8,7 +8,7 @@ import (
"image/png"
"io"
"github.com/disintegration/imaging"
"github.com/cognusion/imaging"
"github.com/seaweedfs/seaweedfs/weed/glog"
)

View File

@@ -8,7 +8,7 @@ import (
"image/png"
"io"
"github.com/disintegration/imaging"
"github.com/cognusion/imaging"
"github.com/seaweedfs/seaweedfs/weed/glog"

View File

@@ -402,14 +402,12 @@ func adjustAfterMove(v *master_pb.VolumeInformationMessage, volumeReplicas map[u
replica.location = &loc
for diskType, diskInfo := range fullNode.info.DiskInfos {
if diskType == v.DiskType {
diskInfo.VolumeCount--
diskInfo.FreeVolumeCount++
addVolumeCount(diskInfo, -1)
}
}
for diskType, diskInfo := range emptyNode.info.DiskInfos {
if diskType == v.DiskType {
diskInfo.VolumeCount++
diskInfo.FreeVolumeCount--
addVolumeCount(diskInfo, 1)
}
}
return

View File

@@ -4,16 +4,17 @@ import (
"context"
"flag"
"fmt"
"io"
"path/filepath"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"golang.org/x/exp/slices"
"google.golang.org/grpc"
"io"
"path/filepath"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
@@ -316,7 +317,7 @@ func (c *commandVolumeFixReplication) fixOneUnderReplicatedVolume(commandEnv *Co
if !takeAction {
// adjust volume count
dst.dataNode.DiskInfos[replica.info.DiskType].VolumeCount++
addVolumeCount(dst.dataNode.DiskInfos[replica.info.DiskType], 1)
break
}
@@ -350,7 +351,7 @@ func (c *commandVolumeFixReplication) fixOneUnderReplicatedVolume(commandEnv *Co
}
// adjust volume count
dst.dataNode.DiskInfos[replica.info.DiskType].VolumeCount++
addVolumeCount(dst.dataNode.DiskInfos[replica.info.DiskType], 1)
break
}
}
@@ -361,6 +362,14 @@ func (c *commandVolumeFixReplication) fixOneUnderReplicatedVolume(commandEnv *Co
return nil
}
func addVolumeCount(info *master_pb.DiskInfo, count int) {
if info == nil {
return
}
info.VolumeCount += int64(count)
info.FreeVolumeCount -= int64(count)
}
func keepDataNodesSorted(dataNodes []location, diskType types.DiskType) {
fn := capacityByFreeVolumeCount(diskType)
slices.SortFunc(dataNodes, func(a, b location) int {

View File

@@ -5,15 +5,16 @@ import (
"errors"
"flag"
"fmt"
"io"
"path/filepath"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
"io"
"path/filepath"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
@@ -212,7 +213,7 @@ func (c *commandVolumeTierMove) doVolumeTierMove(commandEnv *CommandEnv, writer
hasFoundTarget = true
// adjust volume count
dst.dataNode.DiskInfos[string(toDiskType)].VolumeCount++
addVolumeCount(dst.dataNode.DiskInfos[string(toDiskType)], 1)
destServerAddress := pb.NewServerAddressFromDataNode(dst.dataNode)
c.queues[destServerAddress] <- volumeTierMoveJob{sourceVolumeServer, vid}

View File

@@ -328,7 +328,7 @@ func ReadUrlAsStreamAuthenticated(fileUrl, jwt string, cipherKey []byte, isConte
}
defer CloseResponse(r)
if r.StatusCode >= 400 {
retryable = r.StatusCode == http.StatusNotFound || r.StatusCode >= 500
retryable = r.StatusCode == http.StatusNotFound || r.StatusCode >= 499
return retryable, fmt.Errorf("%s: %s", fileUrl, r.Status)
}

View File

@@ -66,9 +66,17 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition
isDone = true
return
}
logBuffer.RLock()
lastTsNs := logBuffer.LastTsNs
for lastTsNs == logBuffer.LastTsNs {
logBuffer.RUnlock()
loopTsNs := lastTsNs // make a copy
for lastTsNs == loopTsNs {
if waitForDataFn() {
// Update loopTsNs and loop again
logBuffer.RLock()
loopTsNs = logBuffer.LastTsNs
logBuffer.RUnlock()
continue
} else {
isDone = true