This commit is contained in:
a
2022-03-28 16:50:28 +00:00
55 changed files with 1057 additions and 483 deletions

View File

@@ -393,7 +393,7 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.
clusterId, _ := fs.filer.Store.KvGet(context.Background(), []byte("clusterId"))
t := &filer_pb.GetFilerConfigurationResponse{
Masters: pb.ToAddressStrings(fs.option.Masters),
Masters: pb.ToAddressStringsFromMap(fs.option.Masters),
Collection: fs.option.Collection,
Replication: fs.option.DefaultReplication,
MaxMb: uint32(fs.option.MaxMB),

View File

@@ -49,7 +49,7 @@ import (
)
type FilerOption struct {
Masters []pb.ServerAddress
Masters map[string]pb.ServerAddress
Collection string
DefaultReplication string
DisableDirListing bool

View File

@@ -82,7 +82,9 @@ func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Reque
toDelete := strings.Split(r.URL.Query().Get("tagging"), ",")
deletions := make(map[string]struct{})
for _, deletion := range toDelete {
deletions[deletion] = struct{}{}
if deletion != "" {
deletions[deletion] = struct{}{}
}
}
// delete all tags or specific tags

View File

@@ -131,7 +131,7 @@ func isAppend(r *http.Request) bool {
}
func skipCheckParentDirEntry(r *http.Request) bool {
return r.URL.Query().Get("skipCheckParentDir") != "true"
return r.URL.Query().Get("skipCheckParentDir") == "true"
}
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {

View File

@@ -75,7 +75,7 @@ type MasterServer struct {
Cluster *cluster.Cluster
}
func NewMasterServer(r *mux.Router, option *MasterOption, peers []pb.ServerAddress) *MasterServer {
func NewMasterServer(r *mux.Router, option *MasterOption, peers map[string]pb.ServerAddress) *MasterServer {
v := util.GetViper()
signingKey := v.GetString("jwt.signing.key")

View File

@@ -5,8 +5,6 @@ import (
"math/rand"
"os"
"path"
"sort"
"strings"
"time"
"google.golang.org/grpc"
@@ -21,7 +19,7 @@ import (
type RaftServerOption struct {
GrpcDialOption grpc.DialOption
Peers []pb.ServerAddress
Peers map[string]pb.ServerAddress
ServerAddr pb.ServerAddress
DataDir string
Topo *topology.Topology
@@ -31,7 +29,7 @@ type RaftServerOption struct {
}
type RaftServer struct {
peers []pb.ServerAddress // initial peers to join with
peers map[string]pb.ServerAddress // initial peers to join with
raftServer raft.Server
dataDir string
serverAddr pb.ServerAddress
@@ -81,10 +79,11 @@ func NewRaftServer(option *RaftServerOption) (*RaftServer, error) {
transporter := raft.NewGrpcTransporter(option.GrpcDialOption)
glog.V(0).Infof("Starting RaftServer with %v", option.ServerAddr)
// always clear previous log to avoid server is promotable
os.RemoveAll(path.Join(s.dataDir, "log"))
if !option.RaftResumeState {
// always clear previous metadata
os.RemoveAll(path.Join(s.dataDir, "conf"))
os.RemoveAll(path.Join(s.dataDir, "log"))
os.RemoveAll(path.Join(s.dataDir, "snapshot"))
}
if err := os.MkdirAll(path.Join(s.dataDir, "snapshot"), 0600); err != nil {
@@ -107,39 +106,26 @@ func NewRaftServer(option *RaftServerOption) (*RaftServer, error) {
return nil, err
}
for _, peer := range s.peers {
if err := s.raftServer.AddPeer(string(peer), peer.ToGrpcAddress()); err != nil {
for name, peer := range s.peers {
if err := s.raftServer.AddPeer(name, peer.ToGrpcAddress()); err != nil {
return nil, err
}
}
// Remove deleted peers
for existsPeerName := range s.raftServer.Peers() {
exists := false
var existingPeer pb.ServerAddress
for _, peer := range s.peers {
if peer.ToGrpcAddress() == existsPeerName {
exists, existingPeer = true, peer
break
}
}
if exists {
if existingPeer, found := s.peers[existsPeerName]; !found {
if err := s.raftServer.RemovePeer(existsPeerName); err != nil {
glog.V(0).Infoln(err)
return nil, err
} else {
glog.V(0).Infof("removing old peer %s", existingPeer)
glog.V(0).Infof("removing old peer: %s", existingPeer)
}
}
}
s.GrpcServer = raft.NewGrpcServer(s.raftServer)
if s.raftServer.IsLogEmpty() && isTheFirstOne(option.ServerAddr, s.peers) {
// Initialize the server by joining itself.
// s.DoJoinCommand()
}
glog.V(0).Infof("current cluster leader: %v", s.raftServer.Leader())
return s, nil
@@ -155,16 +141,6 @@ func (s *RaftServer) Peers() (members []string) {
return
}
func isTheFirstOne(self pb.ServerAddress, peers []pb.ServerAddress) bool {
sort.Slice(peers, func(i, j int) bool {
return strings.Compare(string(peers[i]), string(peers[j])) < 0
})
if len(peers) <= 0 {
return true
}
return self == peers[0]
}
func (s *RaftServer) DoJoinCommand() {
glog.V(0).Infoln("Initializing new cluster")

View File

@@ -23,7 +23,6 @@ type VolumeServer struct {
inFlightDownloadDataSize int64
concurrentUploadLimit int64
concurrentDownloadLimit int64
inFlightUploadDataLimitCond *sync.Cond
inFlightDownloadDataLimitCond *sync.Cond
SeedMasterNodes []pb.ServerAddress
@@ -84,7 +83,6 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024,
isHeartbeating: true,
stopChan: make(chan bool),
inFlightUploadDataLimitCond: sync.NewCond(new(sync.Mutex)),
inFlightDownloadDataLimitCond: sync.NewCond(new(sync.Mutex)),
concurrentUploadLimit: concurrentUploadLimit,
concurrentDownloadLimit: concurrentDownloadLimit,

View File

@@ -1,6 +1,7 @@
package weed_server
import (
"fmt"
"net/http"
"strconv"
"strings"
@@ -39,8 +40,14 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque
stats.ReadRequest()
vs.inFlightDownloadDataLimitCond.L.Lock()
for vs.concurrentDownloadLimit != 0 && atomic.LoadInt64(&vs.inFlightDownloadDataSize) > vs.concurrentDownloadLimit {
glog.V(4).Infof("wait because inflight download data %d > %d", vs.inFlightDownloadDataSize, vs.concurrentDownloadLimit)
vs.inFlightDownloadDataLimitCond.Wait()
select {
case <-r.Context().Done():
glog.V(4).Infof("request cancelled from %s: %v", r.RemoteAddr, r.Context().Err())
return
default:
glog.V(4).Infof("wait because inflight download data %d > %d", vs.inFlightDownloadDataSize, vs.concurrentDownloadLimit)
vs.inFlightDownloadDataLimitCond.Wait()
}
}
vs.inFlightDownloadDataLimitCond.L.Unlock()
vs.GetOrHeadHandler(w, r)
@@ -51,16 +58,18 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque
// wait until in flight data is less than the limit
contentLength := getContentLength(r)
vs.inFlightUploadDataLimitCond.L.Lock()
for vs.concurrentUploadLimit != 0 && atomic.LoadInt64(&vs.inFlightUploadDataSize) > vs.concurrentUploadLimit {
glog.V(4).Infof("wait because inflight upload data %d > %d", vs.inFlightUploadDataSize, vs.concurrentUploadLimit)
vs.inFlightUploadDataLimitCond.Wait()
// exclude the replication from the concurrentUploadLimitMB
if vs.concurrentUploadLimit != 0 && r.URL.Query().Get("type") != "replicate" &&
atomic.LoadInt64(&vs.inFlightUploadDataSize) > vs.concurrentUploadLimit {
err := fmt.Errorf("reject because inflight upload data %d > %d", vs.inFlightUploadDataSize, vs.concurrentUploadLimit)
glog.V(1).Infof("too many requests: %v", err)
writeJsonError(w, r, http.StatusTooManyRequests, err)
return
}
vs.inFlightUploadDataLimitCond.L.Unlock()
atomic.AddInt64(&vs.inFlightUploadDataSize, contentLength)
defer func() {
atomic.AddInt64(&vs.inFlightUploadDataSize, -contentLength)
vs.inFlightUploadDataLimitCond.Signal()
}()
// processs uploads