Merge branch 'master' into mq-subscribe
This commit is contained in:
@@ -52,7 +52,7 @@ var cmdMasterFollower = &Command{
|
||||
In most cases, the master follower is not needed. In big data centers with thousands of volume
|
||||
servers. In theory, the master may have trouble to keep up with the write requests and read requests.
|
||||
|
||||
The master follower can relieve the master from from read requests, which only needs to
|
||||
The master follower can relieve the master from read requests, which only needs to
|
||||
lookup a fileId or volumeId.
|
||||
|
||||
The master follower currently can handle fileId lookup requests:
|
||||
|
||||
@@ -21,13 +21,13 @@ import (
|
||||
)
|
||||
|
||||
type MetaAggregator struct {
|
||||
filer *Filer
|
||||
self pb.ServerAddress
|
||||
isLeader bool
|
||||
grpcDialOption grpc.DialOption
|
||||
MetaLogBuffer *log_buffer.LogBuffer
|
||||
peerStatues map[pb.ServerAddress]int
|
||||
peerStatuesLock sync.Mutex
|
||||
filer *Filer
|
||||
self pb.ServerAddress
|
||||
isLeader bool
|
||||
grpcDialOption grpc.DialOption
|
||||
MetaLogBuffer *log_buffer.LogBuffer
|
||||
peerChans map[pb.ServerAddress]chan struct{}
|
||||
peerChansLock sync.Mutex
|
||||
// notifying clients
|
||||
ListenersLock sync.Mutex
|
||||
ListenersCond *sync.Cond
|
||||
@@ -40,7 +40,7 @@ func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc.
|
||||
filer: filer,
|
||||
self: self,
|
||||
grpcDialOption: grpcDialOption,
|
||||
peerStatues: make(map[pb.ServerAddress]int),
|
||||
peerChans: make(map[pb.ServerAddress]chan struct{}),
|
||||
}
|
||||
t.ListenersCond = sync.NewCond(&t.ListenersLock)
|
||||
t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, nil, func() {
|
||||
@@ -50,51 +50,40 @@ func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc.
|
||||
}
|
||||
|
||||
func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startFrom time.Time) {
|
||||
ma.peerChansLock.Lock()
|
||||
defer ma.peerChansLock.Unlock()
|
||||
|
||||
address := pb.ServerAddress(update.Address)
|
||||
if update.IsAdd {
|
||||
// every filer should subscribe to a new filer
|
||||
if ma.setActive(address, true) {
|
||||
go ma.loopSubscribeToOneFiler(ma.filer, ma.self, address, startFrom)
|
||||
// cancel previous subscription if any
|
||||
if prevChan, found := ma.peerChans[address]; found {
|
||||
close(prevChan)
|
||||
}
|
||||
stopChan := make(chan struct{})
|
||||
ma.peerChans[address] = stopChan
|
||||
go ma.loopSubscribeToOneFiler(ma.filer, ma.self, address, startFrom, stopChan)
|
||||
} else {
|
||||
ma.setActive(address, false)
|
||||
if prevChan, found := ma.peerChans[address]; found {
|
||||
close(prevChan)
|
||||
delete(ma.peerChans, address)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ma *MetaAggregator) setActive(address pb.ServerAddress, isActive bool) (notDuplicated bool) {
|
||||
ma.peerStatuesLock.Lock()
|
||||
defer ma.peerStatuesLock.Unlock()
|
||||
if isActive {
|
||||
if _, found := ma.peerStatues[address]; found {
|
||||
ma.peerStatues[address] += 1
|
||||
} else {
|
||||
ma.peerStatues[address] = 1
|
||||
notDuplicated = true
|
||||
}
|
||||
} else {
|
||||
if _, found := ma.peerStatues[address]; found {
|
||||
delete(ma.peerStatues, address)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func (ma *MetaAggregator) isActive(address pb.ServerAddress) (isActive bool) {
|
||||
ma.peerStatuesLock.Lock()
|
||||
defer ma.peerStatuesLock.Unlock()
|
||||
var count int
|
||||
count, isActive = ma.peerStatues[address]
|
||||
return count > 0 && isActive
|
||||
}
|
||||
|
||||
func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time) {
|
||||
func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time, stopChan chan struct{}) {
|
||||
lastTsNs := startFrom.UnixNano()
|
||||
for {
|
||||
glog.V(0).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs)
|
||||
if !ma.isActive(peer) {
|
||||
glog.V(0).Infof("stop subscribing remote %s meta change", peer)
|
||||
|
||||
// check stopChan to see if we should stop
|
||||
select {
|
||||
case <-stopChan:
|
||||
glog.V(0).Infof("stop subscribing peer %s meta change", peer)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errLvl := glog.Level(0)
|
||||
if strings.Contains(err.Error(), "duplicated local subscription detected") {
|
||||
|
||||
@@ -226,7 +226,7 @@ func (vs *VolumeServer) doCopyFileWithThrottler(client volume_server_pb.VolumeSe
|
||||
|
||||
/*
|
||||
*
|
||||
only check the the differ of the file size
|
||||
only check the differ of the file size
|
||||
todo: maybe should check the received count and deleted count of the volume
|
||||
*/
|
||||
func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, hasRemoteDatFile bool, idxFileName, datFileName string) error {
|
||||
|
||||
@@ -150,7 +150,7 @@ func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId
|
||||
|
||||
}
|
||||
|
||||
// WriteDatFile generates .dat from from .ec00 ~ .ec09 files
|
||||
// WriteDatFile generates .dat from .ec00 ~ .ec09 files
|
||||
func WriteDatFile(baseFileName string, datFileSize int64, shardFileNames []string) error {
|
||||
|
||||
datFile, openErr := os.OpenFile(baseFileName+".dat", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
|
||||
@@ -345,7 +345,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||
stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "normal").Set(float64(size))
|
||||
}
|
||||
|
||||
for col, deletedBytes := range collectionVolumeDeletedBytes{
|
||||
for col, deletedBytes := range collectionVolumeDeletedBytes {
|
||||
stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "deleted_bytes").Set(float64(deletedBytes))
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user