Humanize output for weed.server by default (#7758)
* Implement a `weed shell` command to return a status overview of the cluster. Detailed file information will be implemented in a follow-up MR. Note also that masters are currently not reporting back EC shard sizes correctly, via `master_pb.VolumeEcShardInformationMessage.shard_sizes`. F.ex: ``` > status cluster: id: topo status: LOCKED nodes: 10 topology: 1 DC(s)s, 1 disk(s) on 1 rack(s) volumes: total: 3 volumes on 1 collections max size: 31457280000 bytes regular: 2/80 volumes on 6 replicas, 6 writable (100.00%), 0 read-only (0.00%) EC: 1 EC volumes on 14 shards (14.00 shards/volume) storage: total: 186024424 bytes regular volumes: 186024424 bytes EC volumes: 0 bytes raw: 558073152 bytes on volume replicas, 0 bytes on EC shard files ``` * Humanize output for `weed.server` by default. Makes things more readable :) ``` > cluster.status cluster: id: topo status: LOCKED nodes: 10 topology: 1 DC, 10 disks on 1 rack volumes: total: 3 volumes, 1 collection max size: 32 GB regular: 2/80 volumes on 6 replicas, 6 writable (100%), 0 read-only (0%) EC: 1 EC volume on 14 shards (14 shards/volume) storage: total: 172 MB regular volumes: 172 MB EC volumes: 0 B raw: 516 MB on volume replicas, 0 B on EC shards ``` ``` > cluster.status --humanize=false cluster: id: topo status: LOCKED nodes: 10 topology: 1 DC(s), 10 disk(s) on 1 rack(s) volumes: total: 3 volume(s), 1 collection(s) max size: 31457280000 byte(s) regular: 2/80 volume(s) on 6 replica(s), 5 writable (83.33%), 1 read-only (16.67%) EC: 1 EC volume(s) on 14 shard(s) (14.00 shards/volume) storage: total: 172128072 byte(s) regular volumes: 172128072 byte(s) EC volumes: 0 byte(s) raw: 516384216 byte(s) on volume replicas, 0 byte(s) on EC shards ``` Also adds unit tests, and reshuffles test files handling for clarity.
This commit is contained in:
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize/english"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
@@ -18,7 +20,8 @@ func init() {
|
||||
|
||||
type commandClusterStatus struct{}
|
||||
type ClusterStatusPrinter struct {
|
||||
writer io.Writer
|
||||
writer io.Writer
|
||||
humanize bool
|
||||
|
||||
locked bool
|
||||
collections []string
|
||||
@@ -40,6 +43,7 @@ func (c *commandClusterStatus) HasTag(CommandTag) bool {
|
||||
|
||||
func (c *commandClusterStatus) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
||||
flags := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
humanize := flags.Bool("humanize", true, "human-readable output")
|
||||
|
||||
if err = flags.Parse(args); err != nil {
|
||||
return err
|
||||
@@ -55,7 +59,8 @@ func (c *commandClusterStatus) Do(args []string, commandEnv *CommandEnv, writer
|
||||
}
|
||||
|
||||
sp := &ClusterStatusPrinter{
|
||||
writer: writer,
|
||||
writer: writer,
|
||||
humanize: *humanize,
|
||||
|
||||
locked: commandEnv.isLocked(),
|
||||
collections: collections,
|
||||
@@ -67,7 +72,66 @@ func (c *commandClusterStatus) Do(args []string, commandEnv *CommandEnv, writer
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: humanize figures in output
|
||||
func (sp *ClusterStatusPrinter) uint64(n uint64) string {
|
||||
if !sp.humanize {
|
||||
return fmt.Sprintf("%d", n)
|
||||
}
|
||||
return humanize.Comma(int64(n))
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) int(n int) string {
|
||||
return sp.uint64(uint64(n))
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) plural(n int, str string) string {
|
||||
if !sp.humanize {
|
||||
return fmt.Sprintf("%s(s)", str)
|
||||
}
|
||||
return english.PluralWord(n, str, "")
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) bytes(b uint64) string {
|
||||
if !sp.humanize {
|
||||
return fmt.Sprintf("%d %s", b, sp.plural(int(b), "byte"))
|
||||
}
|
||||
return fmt.Sprintf("%s", humanize.Bytes(b))
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) uint64Ratio(a, b uint64) string {
|
||||
var p float64
|
||||
if b != 0 {
|
||||
p = float64(a) / float64(b)
|
||||
}
|
||||
if !sp.humanize {
|
||||
return fmt.Sprintf("%.02f", p)
|
||||
}
|
||||
return fmt.Sprintf("%s", humanize.FtoaWithDigits(p, 2))
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) intRatio(a, b int) string {
|
||||
return sp.uint64Ratio(uint64(a), uint64(b))
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) uint64Pct(a, b uint64) string {
|
||||
var p float64
|
||||
if b != 0 {
|
||||
p = 100 * float64(a) / float64(b)
|
||||
}
|
||||
if !sp.humanize {
|
||||
return fmt.Sprintf("%.02f%%", p)
|
||||
}
|
||||
return fmt.Sprintf("%s%%", humanize.FtoaWithDigits(p, 2))
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) intPct(a, b int) string {
|
||||
return sp.uint64Pct(uint64(a), uint64(b))
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) write(format string, a ...any) {
|
||||
fmt.Fprintf(sp.writer, strings.TrimRight(format, "\r\n "), a...)
|
||||
fmt.Fprint(sp.writer, "\n")
|
||||
}
|
||||
|
||||
// TODO: add option to collect detailed file stats
|
||||
func (sp *ClusterStatusPrinter) Print() {
|
||||
sp.write("")
|
||||
@@ -76,11 +140,6 @@ func (sp *ClusterStatusPrinter) Print() {
|
||||
sp.printStorageInfo()
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) write(format string, a ...any) {
|
||||
fmt.Fprintf(sp.writer, strings.TrimRight(format, "\r\n "), a...)
|
||||
fmt.Fprint(sp.writer, "\n")
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) printClusterInfo() {
|
||||
dcs := len(sp.topology.DataCenterInfos)
|
||||
|
||||
@@ -105,18 +164,22 @@ func (sp *ClusterStatusPrinter) printClusterInfo() {
|
||||
sp.write("cluster:")
|
||||
sp.write("\tid: %s", sp.topology.Id)
|
||||
sp.write("\tstatus: %s", status)
|
||||
sp.write("\tnodes: %d", nodes)
|
||||
sp.write("\ttopology: %d DC(s), %d disk(s) on %d rack(s)", dcs, disks, racks)
|
||||
sp.write("\tnodes: %s", sp.int(nodes))
|
||||
sp.write("\ttopology: %s %s, %s %s on %s %s",
|
||||
sp.int(dcs), sp.plural(dcs, "DC"),
|
||||
sp.int(disks), sp.plural(disks, "disk"),
|
||||
sp.int(racks), sp.plural(racks, "rack"))
|
||||
sp.write("")
|
||||
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) printVolumeInfo() {
|
||||
collections := len(sp.collections)
|
||||
var maxVolumes uint64
|
||||
volumes := map[needle.VolumeId]bool{}
|
||||
ecVolumes := map[needle.VolumeId]bool{}
|
||||
volumeIds := map[needle.VolumeId]bool{}
|
||||
ecVolumeIds := map[needle.VolumeId]bool{}
|
||||
|
||||
var replicas, roReplicas, rwReplicas, ecShards uint64
|
||||
var replicas, roReplicas, rwReplicas, ecShards int
|
||||
|
||||
for _, dci := range sp.topology.DataCenterInfos {
|
||||
for _, ri := range dci.RackInfos {
|
||||
@@ -125,7 +188,7 @@ func (sp *ClusterStatusPrinter) printVolumeInfo() {
|
||||
maxVolumes += uint64(di.MaxVolumeCount)
|
||||
for _, vi := range di.VolumeInfos {
|
||||
vid := needle.VolumeId(vi.Id)
|
||||
volumes[vid] = true
|
||||
volumeIds[vid] = true
|
||||
replicas++
|
||||
if vi.ReadOnly {
|
||||
roReplicas++
|
||||
@@ -135,31 +198,34 @@ func (sp *ClusterStatusPrinter) printVolumeInfo() {
|
||||
}
|
||||
for _, eci := range di.EcShardInfos {
|
||||
vid := needle.VolumeId(eci.Id)
|
||||
ecVolumes[vid] = true
|
||||
ecShards += uint64(erasure_coding.ShardBits(eci.EcIndexBits).ShardIdCount())
|
||||
ecVolumeIds[vid] = true
|
||||
ecShards += erasure_coding.ShardBits(eci.EcIndexBits).ShardIdCount()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var roReplicasRatio, rwReplicasRatio, ecShardsPerVolume float64
|
||||
if replicas != 0 {
|
||||
roReplicasRatio = float64(roReplicas) / float64(replicas)
|
||||
rwReplicasRatio = float64(rwReplicas) / float64(replicas)
|
||||
}
|
||||
if len(ecVolumes) != 0 {
|
||||
ecShardsPerVolume = float64(ecShards) / float64(len(ecVolumes))
|
||||
}
|
||||
|
||||
totalVolumes := len(volumes) + len(ecVolumes)
|
||||
volumes := len(volumeIds)
|
||||
ecVolumes := len(ecVolumeIds)
|
||||
totalVolumes := volumes + ecVolumes
|
||||
|
||||
sp.write("volumes:")
|
||||
sp.write("\ttotal: %d volumes on %d collections", totalVolumes, collections)
|
||||
sp.write("\tmax size: %d bytes", sp.volumeSizeLimitMb*1024*1024)
|
||||
sp.write("\tregular: %d/%d volumes on %d replicas, %d writable (%.02f%%), %d read-only (%.02f%%)", len(volumes), maxVolumes, replicas, rwReplicas, 100*rwReplicasRatio, roReplicas, 100*roReplicasRatio)
|
||||
sp.write("\tEC: %d EC volumes on %d shards (%.02f shards/volume)", len(ecVolumes), ecShards, ecShardsPerVolume)
|
||||
sp.write("\ttotal: %s %s, %s %s",
|
||||
sp.int(totalVolumes), sp.plural(totalVolumes, "volume"),
|
||||
sp.int(collections), sp.plural(collections, "collection"))
|
||||
sp.write("\tmax size: %s", sp.bytes(sp.volumeSizeLimitMb*1024*1024))
|
||||
sp.write("\tregular: %s/%s %s on %s %s, %s writable (%s), %s read-only (%s)",
|
||||
sp.int(volumes), sp.uint64(maxVolumes), sp.plural(volumes, "volume"),
|
||||
sp.int(replicas), sp.plural(replicas, "replica"),
|
||||
sp.int(rwReplicas), sp.intPct(rwReplicas, replicas),
|
||||
sp.int(roReplicas), sp.intPct(roReplicas, replicas))
|
||||
sp.write("\tEC: %s EC %s on %s %s (%s shards/volume)",
|
||||
sp.int(ecVolumes), sp.plural(ecVolumes, "volume"),
|
||||
sp.int(ecShards), sp.plural(ecShards, "shard"),
|
||||
sp.intRatio(ecShards, ecVolumes))
|
||||
sp.write("")
|
||||
|
||||
}
|
||||
|
||||
func (sp *ClusterStatusPrinter) printStorageInfo() {
|
||||
@@ -202,13 +268,12 @@ func (sp *ClusterStatusPrinter) printStorageInfo() {
|
||||
for _, s := range perEcVolumeSize {
|
||||
ecVolumeSize += s
|
||||
}
|
||||
|
||||
totalSize := volumeSize + ecVolumeSize
|
||||
|
||||
sp.write("storage:")
|
||||
sp.write("\ttotal: %d bytes", totalSize)
|
||||
sp.write("\tregular volumes: %d bytes", volumeSize)
|
||||
sp.write("\tEC volumes: %d bytes", ecVolumeSize)
|
||||
sp.write("\traw: %d bytes on volume replicas, %d bytes on EC shard files", rawVolumeSize, rawEcVolumeSize)
|
||||
sp.write("\ttotal: %s", sp.bytes(totalSize))
|
||||
sp.write("\tregular volumes: %s", sp.bytes(volumeSize))
|
||||
sp.write("\tEC volumes: %s", sp.bytes(ecVolumeSize))
|
||||
sp.write("\traw: %s on volume replicas, %s on EC shards", sp.bytes(rawVolumeSize), sp.bytes(rawEcVolumeSize))
|
||||
sp.write("")
|
||||
}
|
||||
|
||||
140
weed/shell/command_cluster_status_test.go
Normal file
140
weed/shell/command_cluster_status_test.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
)
|
||||
|
||||
func TestPrintClusterInfo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
topology *master_pb.TopologyInfo
|
||||
humanize bool
|
||||
want string
|
||||
}{
|
||||
{
|
||||
testTopology1, true,
|
||||
`cluster:
|
||||
id: test_topo_1
|
||||
status: unlocked
|
||||
nodes: 5
|
||||
topology: 5 DCs, 5 disks on 6 racks
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
testTopology1, false,
|
||||
`cluster:
|
||||
id: test_topo_1
|
||||
status: unlocked
|
||||
nodes: 5
|
||||
topology: 5 DC(s), 5 disk(s) on 6 rack(s)
|
||||
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
var buf bytes.Buffer
|
||||
sp := &ClusterStatusPrinter{
|
||||
writer: &buf,
|
||||
humanize: tc.humanize,
|
||||
topology: tc.topology,
|
||||
}
|
||||
sp.printClusterInfo()
|
||||
got := buf.String()
|
||||
|
||||
if got != tc.want {
|
||||
t.Errorf("for %v: got %v, want %v", tc.topology.Id, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintVolumeInfo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
topology *master_pb.TopologyInfo
|
||||
humanize bool
|
||||
want string
|
||||
}{
|
||||
{
|
||||
testTopology2, true,
|
||||
`volumes:
|
||||
total: 12,056 volumes, 0 collections
|
||||
max size: 0 B
|
||||
regular: 5,302/25,063 volumes on 15,900 replicas, 15,900 writable (100%), 0 read-only (0%)
|
||||
EC: 6,754 EC volumes on 91,662 shards (13.57 shards/volume)
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
testTopology2, false,
|
||||
`volumes:
|
||||
total: 12056 volume(s), 0 collection(s)
|
||||
max size: 0 byte(s)
|
||||
regular: 5302/25063 volume(s) on 15900 replica(s), 15900 writable (100.00%), 0 read-only (0.00%)
|
||||
EC: 6754 EC volume(s) on 91662 shard(s) (13.57 shards/volume)
|
||||
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
var buf bytes.Buffer
|
||||
sp := &ClusterStatusPrinter{
|
||||
writer: &buf,
|
||||
humanize: tc.humanize,
|
||||
topology: tc.topology,
|
||||
}
|
||||
sp.printVolumeInfo()
|
||||
got := buf.String()
|
||||
|
||||
if got != tc.want {
|
||||
t.Errorf("for %v: got %v, want %v", tc.topology.Id, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintStorageInfo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
topology *master_pb.TopologyInfo
|
||||
humanize bool
|
||||
want string
|
||||
}{
|
||||
{
|
||||
testTopology2, true,
|
||||
`storage:
|
||||
total: 5.9 TB
|
||||
regular volumes: 5.9 TB
|
||||
EC volumes: 0 B
|
||||
raw: 18 TB on volume replicas, 0 B on EC shards
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
testTopology2, false,
|
||||
`storage:
|
||||
total: 5892610895448 byte(s)
|
||||
regular volumes: 5892610895448 byte(s)
|
||||
EC volumes: 0 byte(s)
|
||||
raw: 17676186754616 byte(s) on volume replicas, 0 byte(s) on EC shards
|
||||
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
var buf bytes.Buffer
|
||||
sp := &ClusterStatusPrinter{
|
||||
writer: &buf,
|
||||
humanize: tc.humanize,
|
||||
topology: tc.topology,
|
||||
}
|
||||
sp.printStorageInfo()
|
||||
got := buf.String()
|
||||
|
||||
if got != tc.want {
|
||||
t.Errorf("for %v: got %v, want %v", tc.topology.Id, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,12 +13,6 @@ import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
var (
|
||||
topology1 = parseOutput(topoData)
|
||||
topology2 = parseOutput(topoData2)
|
||||
topologyEc = parseOutput(topoDataEc)
|
||||
)
|
||||
|
||||
func errorCheck(got error, want string) error {
|
||||
if got == nil && want == "" {
|
||||
return nil
|
||||
@@ -42,22 +36,22 @@ func TestCollectCollectionsForVolumeIds(t *testing.T) {
|
||||
want []string
|
||||
}{
|
||||
// normal volumes
|
||||
{topology1, nil, nil},
|
||||
{topology1, []needle.VolumeId{}, nil},
|
||||
{topology1, []needle.VolumeId{needle.VolumeId(9999)}, nil},
|
||||
{topology1, []needle.VolumeId{needle.VolumeId(2)}, []string{""}},
|
||||
{topology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272)}, []string{"", "collection2"}},
|
||||
{topology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272), needle.VolumeId(299)}, []string{"", "collection2"}},
|
||||
{topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95)}, []string{"collection1", "collection2"}},
|
||||
{topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95), needle.VolumeId(51)}, []string{"collection1", "collection2"}},
|
||||
{topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95), needle.VolumeId(51), needle.VolumeId(15)}, []string{"collection0", "collection1", "collection2"}},
|
||||
{testTopology1, nil, nil},
|
||||
{testTopology1, []needle.VolumeId{}, nil},
|
||||
{testTopology1, []needle.VolumeId{needle.VolumeId(9999)}, nil},
|
||||
{testTopology1, []needle.VolumeId{needle.VolumeId(2)}, []string{""}},
|
||||
{testTopology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272)}, []string{"", "collection2"}},
|
||||
{testTopology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272), needle.VolumeId(299)}, []string{"", "collection2"}},
|
||||
{testTopology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95)}, []string{"collection1", "collection2"}},
|
||||
{testTopology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95), needle.VolumeId(51)}, []string{"collection1", "collection2"}},
|
||||
{testTopology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95), needle.VolumeId(51), needle.VolumeId(15)}, []string{"collection0", "collection1", "collection2"}},
|
||||
// EC volumes
|
||||
{topology2, []needle.VolumeId{needle.VolumeId(9577)}, []string{"s3qldata"}},
|
||||
{topology2, []needle.VolumeId{needle.VolumeId(9577), needle.VolumeId(12549)}, []string{"s3qldata"}},
|
||||
{testTopology2, []needle.VolumeId{needle.VolumeId(9577)}, []string{"s3qldata"}},
|
||||
{testTopology2, []needle.VolumeId{needle.VolumeId(9577), needle.VolumeId(12549)}, []string{"s3qldata"}},
|
||||
// normal + EC volumes
|
||||
{topology2, []needle.VolumeId{needle.VolumeId(18111)}, []string{"s3qldata"}},
|
||||
{topology2, []needle.VolumeId{needle.VolumeId(8677)}, []string{"s3qldata"}},
|
||||
{topology2, []needle.VolumeId{needle.VolumeId(18111), needle.VolumeId(8677)}, []string{"s3qldata"}},
|
||||
{testTopology2, []needle.VolumeId{needle.VolumeId(18111)}, []string{"s3qldata"}},
|
||||
{testTopology2, []needle.VolumeId{needle.VolumeId(8677)}, []string{"s3qldata"}},
|
||||
{testTopology2, []needle.VolumeId{needle.VolumeId(18111), needle.VolumeId(8677)}, []string{"s3qldata"}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -106,7 +100,7 @@ func TestParseReplicaPlacementArg(t *testing.T) {
|
||||
func TestEcDistribution(t *testing.T) {
|
||||
|
||||
// find out all volume servers with one slot left.
|
||||
ecNodes, totalFreeEcSlots := collectEcVolumeServersByDc(topology1, "", types.HardDriveType)
|
||||
ecNodes, totalFreeEcSlots := collectEcVolumeServersByDc(testTopology1, "", types.HardDriveType)
|
||||
|
||||
sortEcNodesByFreeslotsDescending(ecNodes)
|
||||
|
||||
@@ -133,18 +127,18 @@ func TestPickRackToBalanceShardsInto(t *testing.T) {
|
||||
wantErr string
|
||||
}{
|
||||
// Non-EC volumes. We don't care about these, but the function should return all racks as a safeguard.
|
||||
{topologyEc, "", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{topologyEc, "6225", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{topologyEc, "6226", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{topologyEc, "6241", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{topologyEc, "6242", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{testTopologyEc, "", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{testTopologyEc, "6225", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{testTopologyEc, "6226", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{testTopologyEc, "6241", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
{testTopologyEc, "6242", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
|
||||
// EC volumes.
|
||||
{topologyEc, "9577", "", nil, "shards 1 > replica placement limit for other racks (0)"},
|
||||
{topologyEc, "9577", "111", []string{"rack1", "rack2", "rack3"}, ""},
|
||||
{topologyEc, "9577", "222", []string{"rack1", "rack2", "rack3"}, ""},
|
||||
{topologyEc, "10457", "222", []string{"rack1"}, ""},
|
||||
{topologyEc, "12737", "222", []string{"rack2"}, ""},
|
||||
{topologyEc, "14322", "222", []string{"rack3"}, ""},
|
||||
{testTopologyEc, "9577", "", nil, "shards 1 > replica placement limit for other racks (0)"},
|
||||
{testTopologyEc, "9577", "111", []string{"rack1", "rack2", "rack3"}, ""},
|
||||
{testTopologyEc, "9577", "222", []string{"rack1", "rack2", "rack3"}, ""},
|
||||
{testTopologyEc, "10457", "222", []string{"rack1"}, ""},
|
||||
{testTopologyEc, "12737", "222", []string{"rack2"}, ""},
|
||||
{testTopologyEc, "14322", "222", []string{"rack3"}, ""},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -190,11 +184,11 @@ func TestPickEcNodeToBalanceShardsInto(t *testing.T) {
|
||||
wantOneOf []string
|
||||
wantErr string
|
||||
}{
|
||||
{topologyEc, "", "", nil, "INTERNAL: missing source nodes"},
|
||||
{topologyEc, "idontexist", "12737", nil, "INTERNAL: missing source nodes"},
|
||||
{testTopologyEc, "", "", nil, "INTERNAL: missing source nodes"},
|
||||
{testTopologyEc, "idontexist", "12737", nil, "INTERNAL: missing source nodes"},
|
||||
// Non-EC nodes. We don't care about these, but the function should return all available target nodes as a safeguard.
|
||||
{
|
||||
topologyEc, "172.19.0.10:8702", "6225", []string{
|
||||
testTopologyEc, "172.19.0.10:8702", "6225", []string{
|
||||
"172.19.0.13:8701", "172.19.0.14:8711", "172.19.0.16:8704", "172.19.0.17:8703",
|
||||
"172.19.0.19:8700", "172.19.0.20:8706", "172.19.0.21:8710", "172.19.0.3:8708",
|
||||
"172.19.0.4:8707", "172.19.0.5:8705", "172.19.0.6:8713", "172.19.0.8:8709",
|
||||
@@ -202,7 +196,7 @@ func TestPickEcNodeToBalanceShardsInto(t *testing.T) {
|
||||
"",
|
||||
},
|
||||
{
|
||||
topologyEc, "172.19.0.8:8709", "6226", []string{
|
||||
testTopologyEc, "172.19.0.8:8709", "6226", []string{
|
||||
"172.19.0.10:8702", "172.19.0.13:8701", "172.19.0.14:8711", "172.19.0.16:8704",
|
||||
"172.19.0.17:8703", "172.19.0.19:8700", "172.19.0.20:8706", "172.19.0.21:8710",
|
||||
"172.19.0.3:8708", "172.19.0.4:8707", "172.19.0.5:8705", "172.19.0.6:8713",
|
||||
@@ -210,16 +204,16 @@ func TestPickEcNodeToBalanceShardsInto(t *testing.T) {
|
||||
"",
|
||||
},
|
||||
// EC volumes.
|
||||
{topologyEc, "172.19.0.10:8702", "14322", []string{
|
||||
{testTopologyEc, "172.19.0.10:8702", "14322", []string{
|
||||
"172.19.0.14:8711", "172.19.0.5:8705", "172.19.0.6:8713"},
|
||||
""},
|
||||
{topologyEc, "172.19.0.13:8701", "10457", []string{
|
||||
{testTopologyEc, "172.19.0.13:8701", "10457", []string{
|
||||
"172.19.0.10:8702", "172.19.0.6:8713"},
|
||||
""},
|
||||
{topologyEc, "172.19.0.17:8703", "12737", []string{
|
||||
{testTopologyEc, "172.19.0.17:8703", "12737", []string{
|
||||
"172.19.0.13:8701"},
|
||||
""},
|
||||
{topologyEc, "172.19.0.20:8706", "14322", []string{
|
||||
{testTopologyEc, "172.19.0.20:8706", "14322", []string{
|
||||
"172.19.0.14:8711", "172.19.0.5:8705", "172.19.0.6:8713"},
|
||||
""},
|
||||
}
|
||||
@@ -277,7 +271,7 @@ func TestCountFreeShardSlots(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "topology #1, free HDD shards",
|
||||
topology: topology1,
|
||||
topology: testTopology1,
|
||||
diskType: types.HardDriveType,
|
||||
want: map[string]int{
|
||||
"192.168.1.1:8080": 17330,
|
||||
@@ -289,7 +283,7 @@ func TestCountFreeShardSlots(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "topology #1, no free SSD shards available",
|
||||
topology: topology1,
|
||||
topology: testTopology1,
|
||||
diskType: types.SsdType,
|
||||
want: map[string]int{
|
||||
"192.168.1.1:8080": 0,
|
||||
@@ -301,7 +295,7 @@ func TestCountFreeShardSlots(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "topology #2, no negative free HDD shards",
|
||||
topology: topology2,
|
||||
topology: testTopology2,
|
||||
diskType: types.HardDriveType,
|
||||
want: map[string]int{
|
||||
"172.19.0.3:8708": 0,
|
||||
@@ -322,7 +316,7 @@ func TestCountFreeShardSlots(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "topology #2, no free SSD shards available",
|
||||
topology: topology2,
|
||||
topology: testTopology2,
|
||||
diskType: types.SsdType,
|
||||
want: map[string]int{
|
||||
"172.19.0.10:8702": 0,
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package shell
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -32,6 +30,7 @@ func TestParsing(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
// TODO: actually parsing all fields would be nice...
|
||||
func parseOutput(output string) *master_pb.TopologyInfo {
|
||||
lines := strings.Split(output, "\n")
|
||||
var topo *master_pb.TopologyInfo
|
||||
@@ -45,7 +44,9 @@ func parseOutput(output string) *master_pb.TopologyInfo {
|
||||
switch parts[0] {
|
||||
case "Topology":
|
||||
if topo == nil {
|
||||
topo = &master_pb.TopologyInfo{}
|
||||
topo = &master_pb.TopologyInfo{
|
||||
Id: parts[1],
|
||||
}
|
||||
}
|
||||
case "DataCenter":
|
||||
if dc == nil {
|
||||
@@ -124,12 +125,3 @@ func parseOutput(output string) *master_pb.TopologyInfo {
|
||||
|
||||
return topo
|
||||
}
|
||||
|
||||
//go:embed volume.list.txt
|
||||
var topoData string
|
||||
|
||||
//go:embed volume.list2.txt
|
||||
var topoData2 string
|
||||
|
||||
//go:embed volume.ecshards.txt
|
||||
var topoDataEc string
|
||||
|
||||
20
weed/shell/common_test.go
Normal file
20
weed/shell/common_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package shell
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
)
|
||||
|
||||
//go:embed volume.list.txt
|
||||
var topoData string
|
||||
|
||||
//go:embed volume.list2.txt
|
||||
var topoData2 string
|
||||
|
||||
//go:embed volume.ecshards.txt
|
||||
var topoDataEc string
|
||||
|
||||
var (
|
||||
testTopology1 = parseOutput(topoData)
|
||||
testTopology2 = parseOutput(topoData2)
|
||||
testTopologyEc = parseOutput(topoDataEc)
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
Topology volumeSizeLimit:1024 MB hdd(volume:15900/25063 active:15900 free:9163 remote:0)
|
||||
Topology test_topo_ec volumeSizeLimit:1024 MB hdd(volume:15900/25063 active:15900 free:9163 remote:0)
|
||||
DataCenter DefaultDataCenter hdd(volume:15900/25063 active:15900 free:9163 remote:0)
|
||||
Rack rack1 hdd(volume:15900/25063 active:15900 free:9163 remote:0)
|
||||
DataNode 172.19.0.10:8702 hdd(volume:7/2225 active:7 free:2225 remote:0)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Topology volumeSizeLimit:1024 MB hdd(volume:760/7280 active:760 free:6520 remote:0)
|
||||
Topology test_topo_1 volumeSizeLimit:1024 MB hdd(volume:760/7280 active:760 free:6520 remote:0)
|
||||
DataCenter dc1 hdd(volume:0/0 active:0 free:0 remote:0)
|
||||
Rack DefaultRack hdd(volume:0/0 active:0 free:0 remote:0)
|
||||
Rack DefaultRack total size:0 file_count:0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Topology volumeSizeLimit:1024 MB hdd(volume:15900/25063 active:15900 free:9163 remote:0)
|
||||
Topology test_topo_2 volumeSizeLimit:1024 MB hdd(volume:15900/25063 active:15900 free:9163 remote:0)
|
||||
DataCenter DefaultDataCenter hdd(volume:15900/25063 active:15900 free:9163 remote:0)
|
||||
Rack DefaultRack hdd(volume:15900/25063 active:15900 free:9163 remote:0)
|
||||
DataNode 172.19.0.10:8702 hdd(volume:1559/2232 active:1559 free:673 remote:0)
|
||||
|
||||
Reference in New Issue
Block a user