Files
seaweedFS/weed/storage/store_load_balancing_test.go
Chris Lu 5ed0b00fb9 Support separate volume server ID independent of RPC bind address (#7609)
* pb: add id field to Heartbeat message for stable volume server identification

This adds an 'id' field to the Heartbeat protobuf message that allows
volume servers to identify themselves independently of their IP:port address.

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* storage: add Id field to Store struct

Add Id field to Store struct and include it in CollectHeartbeat().
The Id field provides a stable volume server identity independent of IP:port.

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* topology: support id-based DataNode identification

Update GetOrCreateDataNode to accept an id parameter for stable node
identification. When id is provided, the DataNode can maintain its identity
even when its IP address changes (e.g., in Kubernetes pod reschedules).

For backward compatibility:
- If id is provided, use it as the node ID
- If id is empty, fall back to ip:port

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* volume: add -id flag for stable volume server identity

Add -id command line flag to volume server that allows specifying a stable
identifier independent of the IP address. This is useful for Kubernetes
deployments with hostPath volumes where pods can be rescheduled to different
nodes while the persisted data remains on the original node.

Usage: weed volume -id=node-1 -ip=10.0.0.1 ...

If -id is not specified, it defaults to ip:port for backward compatibility.

Fixes https://github.com/seaweedfs/seaweedfs/issues/7487

* server: add -volume.id flag to weed server command

Support the -volume.id flag in the all-in-one 'weed server' command,
consistent with the standalone 'weed volume' command.

Usage: weed server -volume.id=node-1 ...

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* topology: add test for id-based DataNode identification

Test the key scenarios:
1. Create DataNode with explicit id
2. Same id with different IP returns same DataNode (K8s reschedule)
3. IP/PublicUrl are updated when node reconnects with new address
4. Different id creates new DataNode
5. Empty id falls back to ip:port (backward compatibility)

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* pb: add address field to DataNodeInfo for proper node addressing

Previously, DataNodeInfo.Id was used as the node address, which worked
when Id was always ip:port. Now that Id can be an explicit string,
we need a separate Address field for connection purposes.

Changes:
- Add 'address' field to DataNodeInfo protobuf message
- Update ToDataNodeInfo() to populate the address field
- Update NewServerAddressFromDataNode() to use Address (with Id fallback)
- Fix LookupEcVolume to use dn.Url() instead of dn.Id()

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* fix: trim whitespace from volume server id and fix test

- Trim whitespace from -id flag to treat ' ' as empty
- Fix store_load_balancing_test.go to include id parameter in NewStore call

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* refactor: extract GetVolumeServerId to util package

Move the volume server ID determination logic to a shared utility function
to avoid code duplication between volume.go and rack.go.

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* fix: improve transition logic for legacy nodes

- Use exact ip:port match instead of net.SplitHostPort heuristic
- Update GrpcPort and PublicUrl during transition for consistency
- Remove unused net import

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487

* fix: add id normalization and address change logging

- Normalize id parameter at function boundary (trim whitespace)
- Log when DataNode IP:Port changes (helps debug K8s pod rescheduling)

Ref: https://github.com/seaweedfs/seaweedfs/issues/7487
2025-12-02 22:08:11 -08:00

257 lines
7.0 KiB
Go

package storage
import (
"os"
"path/filepath"
"strconv"
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
)
// newTestStore creates a test store with the specified number of directories
func newTestStore(t *testing.T, numDirs int) *Store {
tempDir := t.TempDir()
var dirs []string
var maxCounts []int32
var minFreeSpaces []util.MinFreeSpace
var diskTypes []types.DiskType
for i := 0; i < numDirs; i++ {
dir := filepath.Join(tempDir, "dir"+strconv.Itoa(i))
os.MkdirAll(dir, 0755)
dirs = append(dirs, dir)
maxCounts = append(maxCounts, 100) // high limit
minFreeSpaces = append(minFreeSpaces, util.MinFreeSpace{})
diskTypes = append(diskTypes, types.HardDriveType)
}
store := NewStore(nil, "localhost", 8080, 18080, "http://localhost:8080", "",
dirs, maxCounts, minFreeSpaces, "", NeedleMapInMemory, diskTypes, 3)
// Consume channel messages to prevent blocking
done := make(chan bool)
go func() {
for {
select {
case <-store.NewVolumesChan:
case <-done:
return
}
}
}()
t.Cleanup(func() { close(done) })
return store
}
func TestLocalVolumesLen(t *testing.T) {
testCases := []struct {
name string
totalVolumes int
remoteVolumes int
expectedLocalCount int
}{
{
name: "all local volumes",
totalVolumes: 5,
remoteVolumes: 0,
expectedLocalCount: 5,
},
{
name: "all remote volumes",
totalVolumes: 5,
remoteVolumes: 5,
expectedLocalCount: 0,
},
{
name: "mixed local and remote",
totalVolumes: 10,
remoteVolumes: 3,
expectedLocalCount: 7,
},
{
name: "no volumes",
totalVolumes: 0,
remoteVolumes: 0,
expectedLocalCount: 0,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diskLocation := &DiskLocation{
volumes: make(map[needle.VolumeId]*Volume),
}
// Add volumes
for i := 0; i < tc.totalVolumes; i++ {
vol := &Volume{
Id: needle.VolumeId(i + 1),
volumeInfo: &volume_server_pb.VolumeInfo{},
}
// Mark some as remote
if i < tc.remoteVolumes {
vol.hasRemoteFile = true
vol.volumeInfo.Files = []*volume_server_pb.RemoteFile{
{BackendType: "s3", BackendId: "test", Key: "test-key"},
}
}
diskLocation.volumes[vol.Id] = vol
}
result := diskLocation.LocalVolumesLen()
if result != tc.expectedLocalCount {
t.Errorf("Expected LocalVolumesLen() = %d; got %d (total: %d, remote: %d)",
tc.expectedLocalCount, result, tc.totalVolumes, tc.remoteVolumes)
}
})
}
}
func TestVolumeLoadBalancing(t *testing.T) {
testCases := []struct {
name string
locations []locationSetup
expectedLocations []int // which location index should get each volume
}{
{
name: "even distribution across empty locations",
locations: []locationSetup{
{localVolumes: 0, remoteVolumes: 0},
{localVolumes: 0, remoteVolumes: 0},
{localVolumes: 0, remoteVolumes: 0},
},
expectedLocations: []int{0, 1, 2, 0, 1, 2}, // round-robin
},
{
name: "prefers location with fewer local volumes",
locations: []locationSetup{
{localVolumes: 5, remoteVolumes: 0},
{localVolumes: 2, remoteVolumes: 0},
{localVolumes: 8, remoteVolumes: 0},
},
expectedLocations: []int{1, 1, 1}, // all go to location 1 (has fewest)
},
{
name: "ignores remote volumes in count",
locations: []locationSetup{
{localVolumes: 2, remoteVolumes: 10}, // 2 local, 10 remote
{localVolumes: 5, remoteVolumes: 0}, // 5 local
{localVolumes: 3, remoteVolumes: 0}, // 3 local
},
// expectedLocations: []int{0, 0, 2}
// Explanation:
// 1. Initial local counts: [2, 5, 3]. First volume goes to location 0 (2 local, ignoring 10 remote).
// 2. New local counts: [3, 5, 3]. Second volume goes to location 0 (first with min count 3).
// 3. New local counts: [4, 5, 3]. Third volume goes to location 2 (3 local < 4 local).
expectedLocations: []int{0, 0, 2},
},
{
name: "balances when some locations have remote volumes",
locations: []locationSetup{
{localVolumes: 1, remoteVolumes: 5},
{localVolumes: 1, remoteVolumes: 0},
{localVolumes: 0, remoteVolumes: 3},
},
// expectedLocations: []int{2, 0, 1}
// Explanation:
// 1. Initial local counts: [1, 1, 0]. First volume goes to location 2 (0 local).
// 2. New local counts: [1, 1, 1]. Second volume goes to location 0 (first with min count 1).
// 3. New local counts: [2, 1, 1]. Third volume goes to location 1 (next with min count 1).
expectedLocations: []int{2, 0, 1},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create test store with multiple directories
store := newTestStore(t, len(tc.locations))
// Pre-populate locations with volumes
for locIdx, setup := range tc.locations {
location := store.Locations[locIdx]
vidCounter := 1000 + locIdx*100 // unique volume IDs per location
// Add local volumes
for i := 0; i < setup.localVolumes; i++ {
vol := createTestVolume(needle.VolumeId(vidCounter), false)
location.SetVolume(vol.Id, vol)
vidCounter++
}
// Add remote volumes
for i := 0; i < setup.remoteVolumes; i++ {
vol := createTestVolume(needle.VolumeId(vidCounter), true)
location.SetVolume(vol.Id, vol)
vidCounter++
}
}
// Create volumes and verify they go to expected locations
for i, expectedLoc := range tc.expectedLocations {
volumeId := needle.VolumeId(i + 1)
err := store.AddVolume(volumeId, "", NeedleMapInMemory, "000", "",
0, needle.GetCurrentVersion(), 0, types.HardDriveType, 3)
if err != nil {
t.Fatalf("Failed to add volume %d: %v", volumeId, err)
}
// Find which location got the volume
actualLoc := -1
for locIdx, location := range store.Locations {
if _, found := location.FindVolume(volumeId); found {
actualLoc = locIdx
break
}
}
if actualLoc != expectedLoc {
t.Errorf("Volume %d: expected location %d, got location %d",
volumeId, expectedLoc, actualLoc)
// Debug info
for locIdx, loc := range store.Locations {
localCount := loc.LocalVolumesLen()
totalCount := loc.VolumesLen()
t.Logf(" Location %d: %d local, %d total", locIdx, localCount, totalCount)
}
}
}
})
}
}
// Helper types and functions
type locationSetup struct {
localVolumes int
remoteVolumes int
}
func createTestVolume(vid needle.VolumeId, isRemote bool) *Volume {
vol := &Volume{
Id: vid,
SuperBlock: super_block.SuperBlock{},
volumeInfo: &volume_server_pb.VolumeInfo{},
}
if isRemote {
vol.hasRemoteFile = true
vol.volumeInfo.Files = []*volume_server_pb.RemoteFile{
{BackendType: "s3", BackendId: "test", Key: "remote-key-" + strconv.Itoa(int(vid))},
}
}
return vol
}