this can compile now!!!

This commit is contained in:
Chris Lu
2021-02-16 02:47:02 -08:00
parent 71f0c19515
commit f8446b42ab
50 changed files with 1686 additions and 1363 deletions

View File

@@ -2,7 +2,7 @@ package topology
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@@ -30,12 +30,12 @@ func (c *Collection) String() string {
return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout)
}
func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) *VolumeLayout {
func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) *VolumeLayout {
keyString := rp.String()
if ttl != nil {
keyString += ttl.String()
}
if diskType != storage.HardDriveType {
if diskType != types.HardDriveType {
keyString += string(diskType)
}
vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} {
@@ -44,12 +44,12 @@ func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, t
return vl.(*VolumeLayout)
}
func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) {
func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) {
keyString := rp.String()
if ttl != nil {
keyString += ttl.String()
}
if diskType != storage.HardDriveType {
if diskType != types.HardDriveType {
keyString += string(diskType)
}
c.storageType2VolumeLayout.Delete(keyString)

View File

@@ -1,6 +1,8 @@
package topology
import "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
import (
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
)
type DataCenter struct {
NodeImpl
@@ -10,6 +12,7 @@ func NewDataCenter(id string) *DataCenter {
dc := &DataCenter{}
dc.id = NodeId(id)
dc.nodeType = "DataCenter"
dc.diskUsages = newDiskUsages()
dc.children = make(map[NodeId]Node)
dc.NodeImpl.value = dc
return dc
@@ -30,9 +33,6 @@ func (dc *DataCenter) GetOrCreateRack(rackName string) *Rack {
func (dc *DataCenter) ToMap() interface{} {
m := make(map[string]interface{})
m["Id"] = dc.Id()
m["Max"] = dc.GetMaxVolumeCount()
m["MaxSsd"] = dc.GetMaxSsdVolumeCount()
m["Free"] = dc.FreeSpace()
var racks []interface{}
for _, c := range dc.Children() {
rack := c.(*Rack)
@@ -45,13 +45,6 @@ func (dc *DataCenter) ToMap() interface{} {
func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo {
m := &master_pb.DataCenterInfo{
Id: string(dc.Id()),
VolumeCount: uint64(dc.GetVolumeCount()),
MaxVolumeCount: uint64(dc.GetMaxVolumeCount()),
MaxSsdVolumeCount: uint64(dc.GetMaxSsdVolumeCount()),
SsdVolumeCount: uint64(dc.GetSsdVolumeCount()),
FreeVolumeCount: uint64(dc.FreeSpace()),
ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()),
RemoteVolumeCount: uint64(dc.GetRemoteVolumeCount()),
}
for _, c := range dc.Children() {
rack := c.(*Rack)

View File

@@ -2,13 +2,11 @@ package topology
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
"strconv"
"sync"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
@@ -16,29 +14,26 @@ import (
type DataNode struct {
NodeImpl
volumes map[needle.VolumeId]storage.VolumeInfo
Ip string
Port int
PublicUrl string
LastSeen int64 // unix time in seconds
ecShards map[needle.VolumeId]*erasure_coding.EcVolumeInfo
ecShardsLock sync.RWMutex
}
func NewDataNode(id string) *DataNode {
s := &DataNode{}
s.id = NodeId(id)
s.nodeType = "DataNode"
s.volumes = make(map[needle.VolumeId]storage.VolumeInfo)
s.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo)
s.NodeImpl.value = s
return s
dn := &DataNode{}
dn.id = NodeId(id)
dn.nodeType = "DataNode"
dn.diskUsages = newDiskUsages()
dn.children = make(map[NodeId]Node)
dn.NodeImpl.value = dn
return dn
}
func (dn *DataNode) String() string {
dn.RLock()
defer dn.RUnlock()
return fmt.Sprintf("Node:%s, volumes:%v, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl)
return fmt.Sprintf("Node:%s, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.Ip, dn.Port, dn.PublicUrl)
}
func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
@@ -47,37 +42,23 @@ func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO
return dn.doAddOrUpdateVolume(v)
}
func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
if oldV, ok := dn.volumes[v.Id]; !ok {
dn.volumes[v.Id] = v
if v.DiskType == storage.SsdType {
dn.UpAdjustSsdVolumeCountDelta(1)
} else {
dn.UpAdjustVolumeCountDelta(1)
}
if v.IsRemote() {
dn.UpAdjustRemoteVolumeCountDelta(1)
}
if !v.ReadOnly {
dn.UpAdjustActiveVolumeCountDelta(1)
}
dn.UpAdjustMaxVolumeId(v.Id)
isNew = true
} else {
if oldV.IsRemote() != v.IsRemote() {
if v.IsRemote() {
dn.UpAdjustRemoteVolumeCountDelta(1)
}
if oldV.IsRemote() {
dn.UpAdjustRemoteVolumeCountDelta(-1)
}
}
isChangedRO = dn.volumes[v.Id].ReadOnly != v.ReadOnly
dn.volumes[v.Id] = v
func (dn *DataNode) getOrCreateDisk(diskType string) *Disk {
c, found := dn.children[NodeId(diskType)]
if !found {
c = NewDisk(diskType)
dn.LinkChildNode(c)
}
return
disk := c.(*Disk)
return disk
}
func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
disk := dn.getOrCreateDisk(v.DiskType)
return disk.AddOrUpdateVolume(v)
}
// UpdateVolumes detects new/deleted/changed volumes on a volume server
// used in master to notify master clients of these changes.
func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes, changeRO []storage.VolumeInfo) {
actualVolumeMap := make(map[needle.VolumeId]storage.VolumeInfo)
@@ -88,22 +69,26 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume
dn.Lock()
defer dn.Unlock()
for vid, v := range dn.volumes {
existingVolumes := dn.getVolumes()
for _, v := range existingVolumes {
vid := v.Id
if _, ok := actualVolumeMap[vid]; !ok {
glog.V(0).Infoln("Deleting volume id:", vid)
delete(dn.volumes, vid)
disk := dn.getOrCreateDisk(v.DiskType)
delete(disk.volumes, vid)
deletedVolumes = append(deletedVolumes, v)
if v.DiskType == storage.SsdType {
dn.UpAdjustSsdVolumeCountDelta(-1)
} else {
dn.UpAdjustVolumeCountDelta(-1)
}
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(v.DiskType))
deltaDiskUsage.volumeCount = -1
if v.IsRemote() {
dn.UpAdjustRemoteVolumeCountDelta(-1)
deltaDiskUsage.remoteVolumeCount = -1
}
if !v.ReadOnly {
dn.UpAdjustActiveVolumeCountDelta(-1)
deltaDiskUsage.activeVolumeCount = -1
}
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
}
for _, v := range actualVolumes {
@@ -123,18 +108,19 @@ func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.Volu
defer dn.Unlock()
for _, v := range deletedVolumes {
delete(dn.volumes, v.Id)
if v.DiskType == storage.SsdType {
dn.UpAdjustSsdVolumeCountDelta(-1)
} else {
dn.UpAdjustVolumeCountDelta(-1)
}
disk := dn.getOrCreateDisk(v.DiskType)
delete(disk.volumes, v.Id)
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(v.DiskType))
deltaDiskUsage.volumeCount = -1
if v.IsRemote() {
dn.UpAdjustRemoteVolumeCountDelta(-1)
deltaDiskUsage.remoteVolumeCount = -1
}
if !v.ReadOnly {
dn.UpAdjustActiveVolumeCountDelta(-1)
deltaDiskUsage.activeVolumeCount = -1
}
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
for _, v := range newVolumes {
dn.doAddOrUpdateVolume(v)
@@ -144,18 +130,26 @@ func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.Volu
func (dn *DataNode) GetVolumes() (ret []storage.VolumeInfo) {
dn.RLock()
for _, v := range dn.volumes {
ret = append(ret, v)
for _, c := range dn.children {
disk := c.(*Disk)
ret = append(ret, disk.GetVolumes()...)
}
dn.RUnlock()
return ret
}
func (dn *DataNode) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, error) {
func (dn *DataNode) GetVolumesById(id needle.VolumeId) (vInfo storage.VolumeInfo, err error) {
dn.RLock()
defer dn.RUnlock()
vInfo, ok := dn.volumes[id]
if ok {
found := false
for _, c := range dn.children {
disk := c.(*Disk)
vInfo, found = disk.volumes[id]
if found {
break
}
}
if found {
return vInfo, nil
} else {
return storage.VolumeInfo{}, fmt.Errorf("volumeInfo not found")
@@ -193,31 +187,18 @@ func (dn *DataNode) Url() string {
func (dn *DataNode) ToMap() interface{} {
ret := make(map[string]interface{})
ret["Url"] = dn.Url()
ret["Volumes"] = dn.GetVolumeCount() + dn.GetSsdVolumeCount()
ret["VolumeIds"] = dn.GetVolumeIds()
ret["EcShards"] = dn.GetEcShardCount()
ret["Max"] = dn.GetMaxVolumeCount() + dn.GetMaxSsdVolumeCount()
ret["Free"] = dn.FreeSpace()
ret["PublicUrl"] = dn.PublicUrl
ret["Disks"] = dn.diskUsages.ToMap()
return ret
}
func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo {
m := &master_pb.DataNodeInfo{
Id: string(dn.Id()),
VolumeCount: uint64(dn.GetVolumeCount()),
MaxVolumeCount: uint64(dn.GetMaxVolumeCount()),
MaxSsdVolumeCount: uint64(dn.GetMaxSsdVolumeCount()),
SsdVolumeCount: uint64(dn.GetSsdVolumeCount()),
FreeVolumeCount: uint64(dn.FreeSpace()),
ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()),
RemoteVolumeCount: uint64(dn.GetRemoteVolumeCount()),
}
for _, v := range dn.GetVolumes() {
m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage())
}
for _, ecv := range dn.GetEcShards() {
m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage())
for _, c := range dn.Children() {
disk := c.(*Disk)
m.DiskInfos[string(disk.Id())] = disk.ToDiskInfo()
}
return m
}
@@ -226,11 +207,21 @@ func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo {
func (dn *DataNode) GetVolumeIds() string {
dn.RLock()
defer dn.RUnlock()
ids := make([]int, 0, len(dn.volumes))
existingVolumes := dn.getVolumes()
ids := make([]int, 0, len(existingVolumes))
for k := range dn.volumes {
for k := range existingVolumes {
ids = append(ids, int(k))
}
return util.HumanReadableIntsMax(100, ids...)
}
func (dn *DataNode) getVolumes() []storage.VolumeInfo {
var existingVolumes []storage.VolumeInfo
for _, c := range dn.children {
disk := c.(*Disk)
existingVolumes = append(existingVolumes, disk.GetVolumes()...)
}
return existingVolumes
}

View File

@@ -3,12 +3,14 @@ package topology
import (
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
func (dn *DataNode) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) {
dn.RLock()
for _, ecVolumeInfo := range dn.ecShards {
ret = append(ret, ecVolumeInfo)
for _, c := range dn.children {
disk := c.(*Disk)
ret = append(ret, disk.GetEcShards()...)
}
dn.RUnlock()
return ret
@@ -21,10 +23,17 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo)
actualEcShardMap[ecShards.VolumeId] = ecShards
}
existingEcShards := dn.GetEcShards()
// found out the newShards and deletedShards
var newShardCount, deletedShardCount int
dn.ecShardsLock.RLock()
for vid, ecShards := range dn.ecShards {
for _, ecShards := range existingEcShards {
disk := dn.getOrCreateDisk(ecShards.DiskType)
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(ecShards.DiskType))
vid := ecShards.VolumeId
if actualEcShards, ok := actualEcShardMap[vid]; !ok {
// dn registered ec shards not found in the new set of ec shards
deletedShards = append(deletedShards, ecShards)
@@ -42,26 +51,61 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo)
deletedShardCount += d.ShardIdCount()
}
}
deltaDiskUsage.ecShardCount = int64(newShardCount - deletedShardCount)
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
for _, ecShards := range actualShards {
if _, found := dn.ecShards[ecShards.VolumeId]; !found {
disk := dn.getOrCreateDisk(ecShards.DiskType)
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(ecShards.DiskType))
if dn.hasEcShards(ecShards.VolumeId) {
newShards = append(newShards, ecShards)
newShardCount += ecShards.ShardIdCount()
}
deltaDiskUsage.ecShardCount = int64(newShardCount)
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
dn.ecShardsLock.RUnlock()
if len(newShards) > 0 || len(deletedShards) > 0 {
// if changed, set to the new ec shard map
dn.ecShardsLock.Lock()
dn.ecShards = actualEcShardMap
dn.UpAdjustEcShardCountDelta(int64(newShardCount - deletedShardCount))
dn.ecShardsLock.Unlock()
dn.doUpdateEcShards(actualShards)
}
return
}
func (dn *DataNode) hasEcShards(volumeId needle.VolumeId) (found bool) {
dn.RLock()
defer dn.RUnlock()
for _, c := range dn.children {
disk := c.(*Disk)
_, found = disk.ecShards[volumeId]
if found {
return
}
}
return
}
func (dn *DataNode) doUpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) {
dn.Lock()
for _, c := range dn.children {
disk := c.(*Disk)
disk.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo)
}
for _, shard := range actualShards {
disk := dn.getOrCreateDisk(shard.DiskType)
disk.ecShards[shard.VolumeId] = shard
}
dn.Unlock()
}
func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
for _, newShard := range newShards {
@@ -75,61 +119,25 @@ func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_codi
}
func (dn *DataNode) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) {
dn.ecShardsLock.Lock()
defer dn.ecShardsLock.Unlock()
delta := 0
if existing, ok := dn.ecShards[s.VolumeId]; !ok {
dn.ecShards[s.VolumeId] = s
delta = s.ShardBits.ShardIdCount()
} else {
oldCount := existing.ShardBits.ShardIdCount()
existing.ShardBits = existing.ShardBits.Plus(s.ShardBits)
delta = existing.ShardBits.ShardIdCount() - oldCount
}
dn.UpAdjustEcShardCountDelta(int64(delta))
disk := dn.getOrCreateDisk(s.DiskType)
disk.AddOrUpdateEcShard(s)
}
func (dn *DataNode) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
dn.ecShardsLock.Lock()
defer dn.ecShardsLock.Unlock()
disk := dn.getOrCreateDisk(s.DiskType)
disk.DeleteEcShard(s)
}
if existing, ok := dn.ecShards[s.VolumeId]; ok {
oldCount := existing.ShardBits.ShardIdCount()
existing.ShardBits = existing.ShardBits.Minus(s.ShardBits)
delta := existing.ShardBits.ShardIdCount() - oldCount
dn.UpAdjustEcShardCountDelta(int64(delta))
if existing.ShardBits.ShardIdCount() == 0 {
delete(dn.ecShards, s.VolumeId)
func (dn *DataNode) HasVolumesById(volumeId needle.VolumeId) (hasVolumeId bool) {
dn.RLock()
defer dn.RUnlock()
for _, c := range dn.children {
disk := c.(*Disk)
if disk.HasVolumesById(volumeId) {
return true
}
}
}
func (dn *DataNode) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) {
// check whether normal volumes has this volume id
dn.RLock()
_, ok := dn.volumes[id]
if ok {
hasVolumeId = true
}
dn.RUnlock()
if hasVolumeId {
return
}
// check whether ec shards has this volume id
dn.ecShardsLock.RLock()
_, ok = dn.ecShards[id]
if ok {
hasVolumeId = true
}
dn.ecShardsLock.RUnlock()
return
return false
}

275
weed/topology/disk.go Normal file
View File

@@ -0,0 +1,275 @@
package topology
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
"sync"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage"
)
type Disk struct {
NodeImpl
volumes map[needle.VolumeId]storage.VolumeInfo
ecShards map[needle.VolumeId]*erasure_coding.EcVolumeInfo
ecShardsLock sync.RWMutex
}
func NewDisk(diskType string) *Disk {
s := &Disk{}
s.id = NodeId(diskType)
s.nodeType = "Disk"
s.diskUsages = newDiskUsages()
s.volumes = make(map[needle.VolumeId]storage.VolumeInfo, 2)
s.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo, 2)
s.NodeImpl.value = s
return s
}
type DiskUsages struct {
sync.RWMutex
usages map[types.DiskType]*DiskUsageCounts
}
func newDiskUsages() *DiskUsages {
return &DiskUsages{
usages: make(map[types.DiskType]*DiskUsageCounts),
}
}
func (d *DiskUsages) negative() (*DiskUsages) {
d.RLock()
defer d.RUnlock()
t := newDiskUsages()
for diskType, b := range d.usages {
a := t.getOrCreateDisk(diskType)
a.volumeCount = - b.volumeCount
a.remoteVolumeCount = - b.remoteVolumeCount
a.activeVolumeCount = - b.activeVolumeCount
a.ecShardCount = - b.ecShardCount
a.maxVolumeCount = - b.maxVolumeCount
}
return t
}
func (d *DiskUsages) ToMap() interface{} {
d.RLock()
defer d.RUnlock()
ret := make(map[string]interface{})
for diskType, diskUsage := range d.usages {
ret[types.DiskType(diskType).String()] = diskUsage.ToMap()
}
return ret
}
func (d *DiskUsages) FreeSpace() (freeSpace int64) {
d.RLock()
defer d.RUnlock()
for _, diskUsage := range d.usages {
freeSpace += diskUsage.FreeSpace()
}
return
}
func (d *DiskUsages) GetMaxVolumeCount() (maxVolumeCount int64) {
d.RLock()
defer d.RUnlock()
for _, diskUsage := range d.usages {
maxVolumeCount += diskUsage.maxVolumeCount
}
return
}
type DiskUsageCounts struct {
volumeCount int64
remoteVolumeCount int64
activeVolumeCount int64
ecShardCount int64
maxVolumeCount int64
}
func (a *DiskUsageCounts) addDiskUsageCounts(b *DiskUsageCounts) {
a.volumeCount += b.volumeCount
a.remoteVolumeCount += b.remoteVolumeCount
a.activeVolumeCount += b.activeVolumeCount
a.ecShardCount += b.ecShardCount
a.maxVolumeCount += b.maxVolumeCount
}
func (a *DiskUsageCounts) FreeSpace() int64 {
freeVolumeSlotCount := a.maxVolumeCount + a.remoteVolumeCount - a.volumeCount
if a.ecShardCount > 0 {
freeVolumeSlotCount = freeVolumeSlotCount - a.ecShardCount/erasure_coding.DataShardsCount - 1
}
return freeVolumeSlotCount
}
func (a *DiskUsageCounts) minus(b *DiskUsageCounts) (*DiskUsageCounts) {
return &DiskUsageCounts{
volumeCount: a.volumeCount - b.volumeCount,
remoteVolumeCount: a.remoteVolumeCount - b.remoteVolumeCount,
activeVolumeCount: a.activeVolumeCount - b.activeVolumeCount,
ecShardCount: a.ecShardCount - b.ecShardCount,
maxVolumeCount: a.maxVolumeCount - b.maxVolumeCount,
}
}
func (diskUsage *DiskUsageCounts) ToMap() interface{} {
ret := make(map[string]interface{})
ret["Volumes"] = diskUsage.volumeCount
ret["EcShards"] = diskUsage.ecShardCount
ret["Max"] = diskUsage.maxVolumeCount
ret["Free"] = diskUsage.FreeSpace()
return ret
}
func (du *DiskUsages) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
du.Lock()
defer du.Unlock()
t, found := du.usages[diskType]
if found {
return t
}
t = &DiskUsageCounts{}
du.usages[diskType] = t
return t
}
func (d *Disk) String() string {
d.RLock()
defer d.RUnlock()
return fmt.Sprintf("Disk:%s, volumes:%v, ecShards:%v, Port:%d, PublicUrl:%s", d.NodeImpl.String(), d.volumes, d.ecShards)
}
func (d *Disk) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
d.Lock()
defer d.Unlock()
return d.doAddOrUpdateVolume(v)
}
func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(v.DiskType))
if oldV, ok := d.volumes[v.Id]; !ok {
d.volumes[v.Id] = v
deltaDiskUsage.volumeCount = 1
if v.IsRemote() {
deltaDiskUsage.remoteVolumeCount = 1
}
if !v.ReadOnly {
deltaDiskUsage.activeVolumeCount = 1
}
d.UpAdjustMaxVolumeId(v.Id)
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
isNew = true
} else {
if oldV.IsRemote() != v.IsRemote() {
if v.IsRemote() {
deltaDiskUsage.remoteVolumeCount = 1
}
if oldV.IsRemote() {
deltaDiskUsage.remoteVolumeCount = -1
}
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
isChangedRO = d.volumes[v.Id].ReadOnly != v.ReadOnly
d.volumes[v.Id] = v
}
return
}
func (d *Disk) GetVolumes() (ret []storage.VolumeInfo) {
d.RLock()
for _, v := range d.volumes {
ret = append(ret, v)
}
d.RUnlock()
return ret
}
func (d *Disk) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, error) {
d.RLock()
defer d.RUnlock()
vInfo, ok := d.volumes[id]
if ok {
return vInfo, nil
} else {
return storage.VolumeInfo{}, fmt.Errorf("volumeInfo not found")
}
}
func (d *Disk) GetDataCenter() *DataCenter {
dn := d.Parent()
rack := dn.Parent()
dcNode := rack.Parent()
dcValue := dcNode.GetValue()
return dcValue.(*DataCenter)
}
func (d *Disk) GetRack() *Rack {
return d.Parent().Parent().(*NodeImpl).value.(*Rack)
}
func (d *Disk) GetTopology() *Topology {
p := d.Parent()
for p.Parent() != nil {
p = p.Parent()
}
t := p.(*Topology)
return t
}
func (d *Disk) ToMap() interface{} {
ret := make(map[string]interface{})
diskUsage := d.diskUsages.getOrCreateDisk(types.DiskType(d.Id()))
ret["Volumes"] = diskUsage.volumeCount
ret["VolumeIds"] = d.GetVolumeIds()
ret["EcShards"] = diskUsage.ecShardCount
ret["Max"] = diskUsage.maxVolumeCount
ret["Free"] = d.FreeSpace()
return ret
}
func (d *Disk) FreeSpace() int64 {
t := d.diskUsages.getOrCreateDisk(types.DiskType(d.Id()))
return t.FreeSpace()
}
func (d *Disk) ToDiskInfo() *master_pb.DiskInfo {
diskUsage := d.diskUsages.getOrCreateDisk(types.DiskType(d.Id()))
m := &master_pb.DiskInfo{
Type: string(d.Id()),
VolumeCount: uint64(diskUsage.volumeCount),
MaxVolumeCount: uint64(diskUsage.maxVolumeCount),
FreeVolumeCount: uint64(d.FreeSpace()),
ActiveVolumeCount: uint64(diskUsage.activeVolumeCount),
RemoteVolumeCount: uint64(diskUsage.remoteVolumeCount),
}
for _, v := range d.GetVolumes() {
m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage())
}
for _, ecv := range d.GetEcShards() {
m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage())
}
return m
}
// GetVolumeIds returns the human readable volume ids limited to count of max 100.
func (d *Disk) GetVolumeIds() string {
d.RLock()
defer d.RUnlock()
ids := make([]int, 0, len(d.volumes))
for k := range d.volumes {
ids = append(ids, int(k))
}
return util.HumanReadableIntsMax(100, ids...)
}

84
weed/topology/disk_ec.go Normal file
View File

@@ -0,0 +1,84 @@
package topology
import (
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
func (d *Disk) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) {
d.RLock()
for _, ecVolumeInfo := range d.ecShards {
ret = append(ret, ecVolumeInfo)
}
d.RUnlock()
return ret
}
func (d *Disk) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) {
d.ecShardsLock.Lock()
defer d.ecShardsLock.Unlock()
delta := 0
if existing, ok := d.ecShards[s.VolumeId]; !ok {
d.ecShards[s.VolumeId] = s
delta = s.ShardBits.ShardIdCount()
} else {
oldCount := existing.ShardBits.ShardIdCount()
existing.ShardBits = existing.ShardBits.Plus(s.ShardBits)
delta = existing.ShardBits.ShardIdCount() - oldCount
}
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(d.Id()))
deltaDiskUsage.ecShardCount = int64(delta)
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
func (d *Disk) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
d.ecShardsLock.Lock()
defer d.ecShardsLock.Unlock()
if existing, ok := d.ecShards[s.VolumeId]; ok {
oldCount := existing.ShardBits.ShardIdCount()
existing.ShardBits = existing.ShardBits.Minus(s.ShardBits)
delta := existing.ShardBits.ShardIdCount() - oldCount
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(d.Id()))
deltaDiskUsage.ecShardCount = int64(delta)
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
if existing.ShardBits.ShardIdCount() == 0 {
delete(d.ecShards, s.VolumeId)
}
}
}
func (d *Disk) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) {
// check whether normal volumes has this volume id
d.RLock()
_, ok := d.volumes[id]
if ok {
hasVolumeId = true
}
d.RUnlock()
if hasVolumeId {
return
}
// check whether ec shards has this volume id
d.ecShardsLock.RLock()
_, ok = d.ecShards[id]
if ok {
hasVolumeId = true
}
d.ecShardsLock.RUnlock()
return
}

View File

@@ -2,40 +2,25 @@ package topology
import (
"errors"
"github.com/chrislusf/seaweedfs/weed/storage"
"math/rand"
"strings"
"sync"
"sync/atomic"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"math/rand"
"strings"
"sync"
)
type NodeId string
type Node interface {
Id() NodeId
String() string
FreeSpace() int64
AvailableSpaceFor(option *VolumeGrowOption) int64
ReserveOneVolume(r int64, option *VolumeGrowOption) (*DataNode, error)
UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64)
UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta int64)
UpAdjustVolumeCountDelta(volumeCountDelta int64)
UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta int64)
UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64)
UpAdjustEcShardCountDelta(ecShardCountDelta int64)
UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64)
UpAdjustDiskUsageDelta(deltaDiskUsages *DiskUsages)
UpAdjustMaxVolumeId(vid needle.VolumeId)
GetDiskUsages() *DiskUsages
GetVolumeCount() int64
GetSsdVolumeCount() int64
GetEcShardCount() int64
GetActiveVolumeCount() int64
GetRemoteVolumeCount() int64
GetMaxVolumeCount() int64
GetMaxSsdVolumeCount() int64
GetMaxVolumeId() needle.VolumeId
SetParent(Node)
LinkChildNode(node Node)
@@ -51,24 +36,22 @@ type Node interface {
GetValue() interface{} //get reference to the topology,dc,rack,datanode
}
type NodeImpl struct {
volumeCount int64
remoteVolumeCount int64
ssdVolumeCount int64
activeVolumeCount int64
ecShardCount int64
maxVolumeCount int64
maxSsdVolumeCount int64
id NodeId
parent Node
sync.RWMutex // lock children
children map[NodeId]Node
maxVolumeId needle.VolumeId
diskUsages *DiskUsages
id NodeId
parent Node
sync.RWMutex // lock children
children map[NodeId]Node
maxVolumeId needle.VolumeId
//for rack, data center, topology
nodeType string
value interface{}
}
func (n *NodeImpl) GetDiskUsages() *DiskUsages {
return n.diskUsages
}
// the first node must satisfy filterFirstNodeFn(), the rest nodes must have one free slot
func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, option *VolumeGrowOption, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) {
var totalWeights int64
@@ -150,20 +133,14 @@ func (n *NodeImpl) String() string {
func (n *NodeImpl) Id() NodeId {
return n.id
}
func (n *NodeImpl) AvailableSpaceFor(option *VolumeGrowOption) int64 {
freeVolumeSlotCount := n.maxVolumeCount + n.remoteVolumeCount - n.volumeCount
if option.DiskType == storage.SsdType {
freeVolumeSlotCount = n.maxSsdVolumeCount - n.ssdVolumeCount
}
if n.ecShardCount > 0 {
freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1
}
return freeVolumeSlotCount
func (n *NodeImpl) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
return n.diskUsages.getOrCreateDisk(diskType)
}
func (n *NodeImpl) FreeSpace() int64 {
freeVolumeSlotCount := n.maxVolumeCount + n.maxSsdVolumeCount + n.remoteVolumeCount - n.volumeCount - n.ssdVolumeCount
if n.ecShardCount > 0 {
freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1
func (n *NodeImpl) AvailableSpaceFor(option *VolumeGrowOption) int64 {
t := n.getOrCreateDisk(option.DiskType)
freeVolumeSlotCount := t.maxVolumeCount + t.remoteVolumeCount - t.volumeCount
if t.ecShardCount > 0 {
freeVolumeSlotCount = freeVolumeSlotCount - t.ecShardCount/erasure_coding.DataShardsCount - 1
}
return freeVolumeSlotCount
}
@@ -209,67 +186,29 @@ func (n *NodeImpl) ReserveOneVolume(r int64, option *VolumeGrowOption) (assigned
return nil, errors.New("No free volume slot found!")
}
func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //can be negative
if maxVolumeCountDelta == 0 {
return
}
atomic.AddInt64(&n.maxVolumeCount, maxVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta)
func (n *NodeImpl) AdjustMaxVolumeCounts(maxVolumeCounts map[string]uint32) {
deltaDiskUsages := newDiskUsages()
for diskType, maxVolumeCount := range maxVolumeCounts {
if maxVolumeCount == 0 {
// the volume server may have set the max to zero
continue
}
dt := types.DiskType(diskType)
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(dt)
currentDiskUsage := n.diskUsages.getOrCreateDisk(dt)
deltaDiskUsage.maxVolumeCount = int64(maxVolumeCount) - currentDiskUsage.maxVolumeCount
deltaDiskUsages.getOrCreateDisk(dt).maxVolumeCount = int64(maxVolumeCount)
}
n.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
func (n *NodeImpl) UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta int64) { //can be negative
if maxSsdVolumeCountDelta == 0 {
return
func (n *NodeImpl) UpAdjustDiskUsageDelta(deltaDiskUsages *DiskUsages) { //can be negative
for diskType, diskUsage := range deltaDiskUsages.usages {
existingDisk := n.getOrCreateDisk(diskType)
existingDisk.addDiskUsageCounts(diskUsage)
}
atomic.AddInt64(&n.maxSsdVolumeCount, maxSsdVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta)
}
}
func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative
if volumeCountDelta == 0 {
return
}
atomic.AddInt64(&n.volumeCount, volumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustVolumeCountDelta(volumeCountDelta)
}
}
func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) { //can be negative
if remoteVolumeCountDelta == 0 {
return
}
atomic.AddInt64(&n.remoteVolumeCount, remoteVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta)
}
}
func (n *NodeImpl) UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta int64) { //can be negative
if ssdVolumeCountDelta == 0 {
return
}
atomic.AddInt64(&n.ssdVolumeCount, ssdVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta)
}
}
func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative
if ecShardCountDelta == 0 {
return
}
atomic.AddInt64(&n.ecShardCount, ecShardCountDelta)
if n.parent != nil {
n.parent.UpAdjustEcShardCountDelta(ecShardCountDelta)
}
}
func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) { //can be negative
if activeVolumeCountDelta == 0 {
return
}
atomic.AddInt64(&n.activeVolumeCount, activeVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta)
n.parent.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
}
func (n *NodeImpl) UpAdjustMaxVolumeId(vid needle.VolumeId) { //can be negative
@@ -283,41 +222,14 @@ func (n *NodeImpl) UpAdjustMaxVolumeId(vid needle.VolumeId) { //can be negative
func (n *NodeImpl) GetMaxVolumeId() needle.VolumeId {
return n.maxVolumeId
}
func (n *NodeImpl) GetVolumeCount() int64 {
return n.volumeCount
}
func (n *NodeImpl) GetSsdVolumeCount() int64 {
return n.ssdVolumeCount
}
func (n *NodeImpl) GetEcShardCount() int64 {
return n.ecShardCount
}
func (n *NodeImpl) GetRemoteVolumeCount() int64 {
return n.remoteVolumeCount
}
func (n *NodeImpl) GetActiveVolumeCount() int64 {
return n.activeVolumeCount
}
func (n *NodeImpl) GetMaxVolumeCount() int64 {
return n.maxVolumeCount
}
func (n *NodeImpl) GetMaxSsdVolumeCount() int64 {
return n.maxSsdVolumeCount
}
func (n *NodeImpl) LinkChildNode(node Node) {
n.Lock()
defer n.Unlock()
if n.children[node.Id()] == nil {
n.children[node.Id()] = node
n.UpAdjustMaxVolumeCountDelta(node.GetMaxVolumeCount())
n.UpAdjustMaxSsdVolumeCountDelta(node.GetMaxSsdVolumeCount())
n.UpAdjustDiskUsageDelta(node.GetDiskUsages())
n.UpAdjustMaxVolumeId(node.GetMaxVolumeId())
n.UpAdjustVolumeCountDelta(node.GetVolumeCount())
n.UpAdjustSsdVolumeCountDelta(node.GetSsdVolumeCount())
n.UpAdjustRemoteVolumeCountDelta(node.GetRemoteVolumeCount())
n.UpAdjustEcShardCountDelta(node.GetEcShardCount())
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
node.SetParent(n)
glog.V(0).Infoln(n, "adds child", node.Id())
}
@@ -330,13 +242,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
if node != nil {
node.SetParent(nil)
delete(n.children, node.Id())
n.UpAdjustVolumeCountDelta(-node.GetVolumeCount())
n.UpAdjustSsdVolumeCountDelta(-node.GetSsdVolumeCount())
n.UpAdjustRemoteVolumeCountDelta(-node.GetRemoteVolumeCount())
n.UpAdjustEcShardCountDelta(-node.GetEcShardCount())
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
n.UpAdjustMaxSsdVolumeCountDelta(-node.GetMaxSsdVolumeCount())
n.UpAdjustDiskUsageDelta(node.GetDiskUsages().negative())
glog.V(0).Infoln(n, "removes", node.Id())
}
}

View File

@@ -2,6 +2,7 @@ package topology
import (
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"strconv"
"time"
)
@@ -14,6 +15,7 @@ func NewRack(id string) *Rack {
r := &Rack{}
r.id = NodeId(id)
r.nodeType = "Rack"
r.diskUsages = newDiskUsages()
r.children = make(map[NodeId]Node)
r.NodeImpl.value = r
return r
@@ -28,7 +30,7 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode {
}
return nil
}
func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64, maxSsdVolumeCount int64) *DataNode {
func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCounts map[string]uint32) *DataNode {
for _, c := range r.Children() {
dn := c.(*DataNode)
if dn.MatchLocation(ip, port) {
@@ -40,19 +42,19 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol
dn.Ip = ip
dn.Port = port
dn.PublicUrl = publicUrl
dn.maxVolumeCount = maxVolumeCount
dn.maxSsdVolumeCount = maxSsdVolumeCount
dn.LastSeen = time.Now().Unix()
r.LinkChildNode(dn)
for diskType, maxVolumeCount := range maxVolumeCounts {
disk := NewDisk(diskType)
disk.diskUsages.getOrCreateDisk(types.DiskType(diskType)).maxVolumeCount = int64(maxVolumeCount)
dn.LinkChildNode(disk)
}
return dn
}
func (r *Rack) ToMap() interface{} {
m := make(map[string]interface{})
m["Id"] = r.Id()
m["Max"] = r.GetMaxVolumeCount()
m["MaxSsd"] = r.GetMaxSsdVolumeCount()
m["Free"] = r.FreeSpace()
var dns []interface{}
for _, c := range r.Children() {
dn := c.(*DataNode)
@@ -65,13 +67,6 @@ func (r *Rack) ToMap() interface{} {
func (r *Rack) ToRackInfo() *master_pb.RackInfo {
m := &master_pb.RackInfo{
Id: string(r.Id()),
VolumeCount: uint64(r.GetVolumeCount()),
MaxVolumeCount: uint64(r.GetMaxVolumeCount()),
MaxSsdVolumeCount: uint64(r.GetMaxSsdVolumeCount()),
SsdVolumeCount: uint64(r.GetSsdVolumeCount()),
FreeVolumeCount: uint64(r.FreeSpace()),
ActiveVolumeCount: uint64(r.GetActiveVolumeCount()),
RemoteVolumeCount: uint64(r.GetRemoteVolumeCount()),
}
for _, c := range r.Children() {
dn := c.(*DataNode)

View File

@@ -3,6 +3,7 @@ package topology
import (
"errors"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"math/rand"
"sync"
"time"
@@ -45,6 +46,7 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls
t.id = NodeId(id)
t.nodeType = "Topology"
t.NodeImpl.value = t
t.diskUsages = newDiskUsages()
t.children = make(map[NodeId]Node)
t.collectionMap = util.NewConcurrentReadMap()
t.ecShardMap = make(map[needle.VolumeId]*EcShardLocations)
@@ -137,7 +139,7 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string,
return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil
}
func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) *VolumeLayout {
func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) *VolumeLayout {
return t.collectionMap.Get(collectionName, func() interface{} {
return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin)
}).(*Collection).GetOrCreateVolumeLayout(rp, ttl, diskType)
@@ -176,7 +178,7 @@ func (t *Topology) DeleteCollection(collectionName string) {
t.collectionMap.Delete(collectionName)
}
func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) {
func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) {
collection, found := t.FindCollection(collectionName)
if !found {
return
@@ -188,14 +190,14 @@ func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPl
}
func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
diskType := storage.ToDiskType(v.DiskType)
diskType := types.ToDiskType(v.DiskType)
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
vl.RegisterVolume(&v, dn)
vl.EnsureCorrectWritables(&v)
}
func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
glog.Infof("removing volume info: %+v", v)
diskType := storage.ToDiskType(v.DiskType)
diskType := types.ToDiskType(v.DiskType)
volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
volumeLayout.UnRegisterVolume(&v, dn)
if volumeLayout.isEmpty() {
@@ -235,7 +237,7 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati
t.UnRegisterVolumeLayout(v, dn)
}
for _, v := range changedVolumes {
diskType := storage.ToDiskType(v.DiskType)
diskType := types.ToDiskType(v.DiskType)
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
vl.EnsureCorrectWritables(&v)
}

View File

@@ -18,6 +18,7 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf
for _, shardInfo := range shardInfos {
shards = append(shards,
erasure_coding.NewEcVolumeInfo(
shardInfo.DiskType,
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
erasure_coding.ShardBits(shardInfo.EcIndexBits)))
@@ -39,6 +40,7 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
for _, shardInfo := range newEcShards {
newShards = append(newShards,
erasure_coding.NewEcVolumeInfo(
shardInfo.DiskType,
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
erasure_coding.ShardBits(shardInfo.EcIndexBits)))
@@ -46,6 +48,7 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
for _, shardInfo := range deletedEcShards {
deletedShards = append(deletedShards,
erasure_coding.NewEcVolumeInfo(
shardInfo.DiskType,
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
erasure_coding.ShardBits(shardInfo.EcIndexBits)))

View File

@@ -1,6 +1,7 @@
package topology
import (
"github.com/chrislusf/seaweedfs/weed/storage/types"
"google.golang.org/grpc"
"math/rand"
"time"
@@ -37,7 +38,7 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g
}()
}
func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
diskType := storage.ToDiskType(volumeInfo.DiskType)
diskType := types.ToDiskType(volumeInfo.DiskType)
vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType)
if !vl.SetVolumeCapacityFull(volumeInfo.Id) {
return false
@@ -48,7 +49,13 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
for _, dn := range vl.vid2location[volumeInfo.Id].list {
if !volumeInfo.ReadOnly {
dn.UpAdjustActiveVolumeCountDelta(-1)
disk := dn.getOrCreateDisk(volumeInfo.DiskType)
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(volumeInfo.DiskType))
deltaDiskUsage.activeVolumeCount = -1
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
}
}
return true
@@ -56,16 +63,14 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
func (t *Topology) UnRegisterDataNode(dn *DataNode) {
for _, v := range dn.GetVolumes() {
glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id())
diskType := storage.ToDiskType(v.DiskType)
diskType := types.ToDiskType(v.DiskType)
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
vl.SetVolumeUnavailable(dn, v.Id)
}
dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount())
dn.UpAdjustSsdVolumeCountDelta(-dn.GetSsdVolumeCount())
dn.UpAdjustRemoteVolumeCountDelta(-dn.GetRemoteVolumeCount())
dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount())
dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount())
dn.UpAdjustMaxSsdVolumeCountDelta(-dn.GetMaxSsdVolumeCount())
negativeUsages := dn.GetDiskUsages().negative()
dn.UpAdjustDiskUsageDelta(negativeUsages)
if dn.Parent() != nil {
dn.Parent().UnlinkChildNode(dn.Id())
}

View File

@@ -4,8 +4,8 @@ import "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
func (t *Topology) ToMap() interface{} {
m := make(map[string]interface{})
m["Max"] = t.GetMaxVolumeCount() + t.GetMaxSsdVolumeCount()
m["Free"] = t.FreeSpace()
m["Max"] = t.diskUsages.GetMaxVolumeCount()
m["Free"] = t.diskUsages.FreeSpace()
var dcs []interface{}
for _, c := range t.Children() {
dc := c.(*DataCenter)
@@ -29,8 +29,8 @@ func (t *Topology) ToMap() interface{} {
func (t *Topology) ToVolumeMap() interface{} {
m := make(map[string]interface{})
m["Max"] = t.GetMaxVolumeCount() + t.GetMaxSsdVolumeCount()
m["Free"] = t.FreeSpace()
m["Max"] = t.diskUsages.GetMaxVolumeCount()
m["Free"] = t.diskUsages.FreeSpace()
dcs := make(map[NodeId]interface{})
for _, c := range t.Children() {
dc := c.(*DataCenter)
@@ -81,13 +81,6 @@ func (t *Topology) ToVolumeLocations() (volumeLocations []*master_pb.VolumeLocat
func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo {
m := &master_pb.TopologyInfo{
Id: string(t.Id()),
VolumeCount: uint64(t.GetVolumeCount()),
MaxVolumeCount: uint64(t.GetMaxVolumeCount()),
MaxSsdVolumeCount: uint64(t.GetMaxSsdVolumeCount()),
FreeVolumeCount: uint64(t.FreeSpace()),
ActiveVolumeCount: uint64(t.GetActiveVolumeCount()),
RemoteVolumeCount: uint64(t.GetRemoteVolumeCount()),
SsdVolumeCount: uint64(t.GetSsdVolumeCount()),
}
for _, c := range t.Children() {
dc := c.(*DataCenter)

View File

@@ -6,6 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"testing"
)
@@ -114,7 +115,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
nil,
dn)
rp, _ := super_block.NewReplicaPlacementFromString("000")
layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL, storage.HardDriveType)
layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL, types.HardDriveType)
assert(t, "writables after repeated add", len(layout.writables), volumeCount)
assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount)

View File

@@ -2,6 +2,7 @@ package topology
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"math/rand"
"sync"
@@ -27,7 +28,7 @@ type VolumeGrowOption struct {
Collection string
ReplicaPlacement *super_block.ReplicaPlacement
Ttl *needle.TTL
DiskType storage.DiskType
DiskType types.DiskType
Prealloacte int64
DataCenter string
Rack string
@@ -219,6 +220,7 @@ func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid
ReplicaPlacement: option.ReplicaPlacement,
Ttl: option.Ttl,
Version: needle.CurrentVersion,
DiskType: string(option.DiskType),
}
server.AddOrUpdateVolume(vi)
topo.RegisterVolumeLayout(vi, server)

View File

@@ -3,6 +3,7 @@ package topology
import (
"errors"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"math/rand"
"sync"
"time"
@@ -103,7 +104,7 @@ func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
type VolumeLayout struct {
rp *super_block.ReplicaPlacement
ttl *needle.TTL
diskType storage.DiskType
diskType types.DiskType
vid2location map[needle.VolumeId]*VolumeLocationList
writables []needle.VolumeId // transient array of writable volume id
readonlyVolumes *volumesBinaryState // readonly volumes
@@ -119,7 +120,7 @@ type VolumeLayoutStats struct {
FileCount uint64
}
func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
return &VolumeLayout{
rp: rp,
ttl: ttl,