Files
seaweedFS/weed/shell/command_volume_list_test.go
Lisandro Pin 187ef65e8f Humanize output for weed.server by default (#7758)
* Implement a `weed shell` command to return a status overview of the cluster.

Detailed file information will be implemented in a follow-up MR. Note also
that masters are currently not reporting back EC shard sizes correctly, via
`master_pb.VolumeEcShardInformationMessage.shard_sizes`.

F.ex:

```
> status

cluster:
	id:       topo
	status:   LOCKED
	nodes:    10
	topology: 1 DC(s)s, 1 disk(s) on 1 rack(s)

volumes:
	total:    3 volumes on 1 collections
	max size: 31457280000 bytes
	regular:  2/80 volumes on 6 replicas, 6 writable (100.00%), 0 read-only (0.00%)
	EC:       1 EC volumes on 14 shards (14.00 shards/volume)

storage:
	total:           186024424 bytes
	regular volumes: 186024424 bytes
	EC volumes:      0 bytes
	raw:             558073152 bytes on volume replicas, 0 bytes on EC shard files
```

* Humanize output for `weed.server` by default.

Makes things more readable :)

```
> cluster.status

cluster:
	id:       topo
	status:   LOCKED
	nodes:    10
	topology: 1 DC, 10 disks on 1 rack

volumes:
	total:    3 volumes, 1 collection
	max size: 32 GB
	regular:  2/80 volumes on 6 replicas, 6 writable (100%), 0 read-only (0%)
	EC:       1 EC volume on 14 shards (14 shards/volume)

storage:
	total:           172 MB
	regular volumes: 172 MB
	EC volumes:      0 B
	raw:             516 MB on volume replicas, 0 B on EC shards
```

```
> cluster.status --humanize=false

cluster:
	id:       topo
	status:   LOCKED
	nodes:    10
	topology: 1 DC(s), 10 disk(s) on 1 rack(s)

volumes:
	total:    3 volume(s), 1 collection(s)
	max size: 31457280000 byte(s)
	regular:  2/80 volume(s) on 6 replica(s), 5 writable (83.33%), 1 read-only (16.67%)
	EC:       1 EC volume(s) on 14 shard(s) (14.00 shards/volume)

storage:
	total:           172128072 byte(s)
	regular volumes: 172128072 byte(s)
	EC volumes:      0 byte(s)
	raw:             516384216 byte(s) on volume replicas, 0 byte(s) on EC shards
```

Also adds unit tests, and reshuffles test files handling for clarity.
2025-12-15 11:18:45 -08:00

128 lines
3.5 KiB
Go

package shell
import (
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/stretchr/testify/assert"
//"google.golang.org/protobuf/proto"
"strconv"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
func TestParsing(t *testing.T) {
topo := parseOutput(topoData)
assert.Equal(t, 5, len(topo.DataCenterInfos))
topo = parseOutput(topoData2)
dataNodes := topo.DataCenterInfos[0].RackInfos[0].DataNodeInfos
assert.Equal(t, 14, len(dataNodes))
diskInfo := dataNodes[0].DiskInfos[""]
assert.Equal(t, 1559, len(diskInfo.VolumeInfos))
assert.Equal(t, 6740, len(diskInfo.EcShardInfos))
}
// TODO: actually parsing all fields would be nice...
func parseOutput(output string) *master_pb.TopologyInfo {
lines := strings.Split(output, "\n")
var topo *master_pb.TopologyInfo
var dc *master_pb.DataCenterInfo
var rack *master_pb.RackInfo
var dn *master_pb.DataNodeInfo
var disk *master_pb.DiskInfo
for _, line := range lines {
line = strings.TrimSpace(line)
parts := strings.Split(line, " ")
switch parts[0] {
case "Topology":
if topo == nil {
topo = &master_pb.TopologyInfo{
Id: parts[1],
}
}
case "DataCenter":
if dc == nil {
dc = &master_pb.DataCenterInfo{
Id: parts[1],
}
topo.DataCenterInfos = append(topo.DataCenterInfos, dc)
} else {
dc = nil
}
case "Rack":
if rack == nil {
rack = &master_pb.RackInfo{
Id: parts[1],
}
dc.RackInfos = append(dc.RackInfos, rack)
} else {
rack = nil
}
case "DataNode":
if dn == nil {
dn = &master_pb.DataNodeInfo{
Id: parts[1],
DiskInfos: make(map[string]*master_pb.DiskInfo),
}
rack.DataNodeInfos = append(rack.DataNodeInfos, dn)
} else {
dn = nil
}
case "Disk":
if disk == nil {
diskType := parts[1][:strings.Index(parts[1], "(")]
volumeCountStr := parts[1][strings.Index(parts[1], ":")+1 : strings.Index(parts[1], "/")]
maxVolumeCountStr := parts[1][strings.Index(parts[1], "/")+1:]
maxVolumeCount, _ := strconv.Atoi(maxVolumeCountStr)
volumeCount, _ := strconv.Atoi(volumeCountStr)
disk = &master_pb.DiskInfo{
Type: diskType,
MaxVolumeCount: int64(maxVolumeCount),
VolumeCount: int64(volumeCount),
}
dn.DiskInfos[types.ToDiskType(diskType).String()] = disk
} else {
disk = nil
}
case "volume":
volumeLine := line[len("volume "):]
volume := &master_pb.VolumeInformationMessage{}
proto.UnmarshalText(volumeLine, volume)
disk.VolumeInfos = append(disk.VolumeInfos, volume)
case "ec":
ecVolumeLine := line[len("ec volume "):]
ecShard := &master_pb.VolumeEcShardInformationMessage{}
for _, part := range strings.Split(ecVolumeLine, " ") {
if strings.HasPrefix(part, "id:") {
id, _ := strconv.ParseInt(part[len("id:"):], 10, 64)
ecShard.Id = uint32(id)
}
if strings.HasPrefix(part, "collection:") {
ecShard.Collection = part[len("collection:"):]
}
if strings.HasPrefix(part, "shards:") {
shards := part[len("shards:["):]
shards = strings.TrimRight(shards, "]")
shardBits := erasure_coding.ShardBits(0)
for _, shardId := range strings.Split(shards, ",") {
sid, _ := strconv.Atoi(shardId)
shardBits = shardBits.AddShardId(erasure_coding.ShardId(sid))
}
ecShard.EcIndexBits = uint32(shardBits)
}
}
disk.EcShardInfos = append(disk.EcShardInfos, ecShard)
}
}
return topo
}