Merge branch 'master' into a
This commit is contained in:
@@ -3,7 +3,6 @@ package weed_server
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -357,128 +356,3 @@ func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.Delet
|
||||
|
||||
return &filer_pb.DeleteCollectionResponse{}, err
|
||||
}
|
||||
|
||||
func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) {
|
||||
|
||||
var output *master_pb.StatisticsResponse
|
||||
|
||||
err = fs.filer.MasterClient.WithClient(false, func(masterClient master_pb.SeaweedClient) error {
|
||||
grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{
|
||||
Replication: req.Replication,
|
||||
Collection: req.Collection,
|
||||
Ttl: req.Ttl,
|
||||
DiskType: req.DiskType,
|
||||
})
|
||||
if grpcErr != nil {
|
||||
return grpcErr
|
||||
}
|
||||
|
||||
output = grpcResponse
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &filer_pb.StatisticsResponse{
|
||||
TotalSize: output.TotalSize,
|
||||
UsedSize: output.UsedSize,
|
||||
FileCount: output.FileCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
|
||||
|
||||
clusterId, _ := fs.filer.Store.KvGet(context.Background(), []byte("clusterId"))
|
||||
|
||||
t := &filer_pb.GetFilerConfigurationResponse{
|
||||
Masters: pb.ToAddressStringsFromMap(fs.option.Masters),
|
||||
Collection: fs.option.Collection,
|
||||
Replication: fs.option.DefaultReplication,
|
||||
MaxMb: uint32(fs.option.MaxMB),
|
||||
DirBuckets: fs.filer.DirBucketsPath,
|
||||
Cipher: fs.filer.Cipher,
|
||||
Signature: fs.filer.Signature,
|
||||
MetricsAddress: fs.metricsAddress,
|
||||
MetricsIntervalSec: int32(fs.metricsIntervalSec),
|
||||
Version: util.Version(),
|
||||
ClusterId: string(clusterId),
|
||||
}
|
||||
|
||||
glog.V(4).Infof("GetFilerConfiguration: %v", t)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error {
|
||||
|
||||
req, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientName := util.JoinHostPort(req.Name, int(req.GrpcPort))
|
||||
m := make(map[string]bool)
|
||||
for _, tp := range req.Resources {
|
||||
m[tp] = true
|
||||
}
|
||||
fs.brokersLock.Lock()
|
||||
fs.brokers[clientName] = m
|
||||
glog.V(0).Infof("+ broker %v", clientName)
|
||||
fs.brokersLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
fs.brokersLock.Lock()
|
||||
delete(fs.brokers, clientName)
|
||||
glog.V(0).Infof("- broker %v: %v", clientName, err)
|
||||
fs.brokersLock.Unlock()
|
||||
}()
|
||||
|
||||
for {
|
||||
if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil {
|
||||
glog.V(0).Infof("send broker %v: %+v", clientName, err)
|
||||
return err
|
||||
}
|
||||
// println("replied")
|
||||
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
glog.V(0).Infof("recv broker %v: %v", clientName, err)
|
||||
return err
|
||||
}
|
||||
// println("received")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) {
|
||||
|
||||
resp = &filer_pb.LocateBrokerResponse{}
|
||||
|
||||
fs.brokersLock.Lock()
|
||||
defer fs.brokersLock.Unlock()
|
||||
|
||||
var localBrokers []*filer_pb.LocateBrokerResponse_Resource
|
||||
|
||||
for b, m := range fs.brokers {
|
||||
if _, found := m[req.Resource]; found {
|
||||
resp.Found = true
|
||||
resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{
|
||||
{
|
||||
GrpcAddresses: b,
|
||||
ResourceCount: int32(len(m)),
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{
|
||||
GrpcAddresses: b,
|
||||
ResourceCount: int32(len(m)),
|
||||
})
|
||||
}
|
||||
|
||||
resp.Resources = localBrokers
|
||||
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
164
weed/server/filer_grpc_server_admin.go
Normal file
164
weed/server/filer_grpc_server_admin.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package weed_server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) {
|
||||
|
||||
var output *master_pb.StatisticsResponse
|
||||
|
||||
err = fs.filer.MasterClient.WithClient(false, func(masterClient master_pb.SeaweedClient) error {
|
||||
grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{
|
||||
Replication: req.Replication,
|
||||
Collection: req.Collection,
|
||||
Ttl: req.Ttl,
|
||||
DiskType: req.DiskType,
|
||||
})
|
||||
if grpcErr != nil {
|
||||
return grpcErr
|
||||
}
|
||||
|
||||
output = grpcResponse
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &filer_pb.StatisticsResponse{
|
||||
TotalSize: output.TotalSize,
|
||||
UsedSize: output.UsedSize,
|
||||
FileCount: output.FileCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) Ping(ctx context.Context, req *filer_pb.PingRequest) (resp *filer_pb.PingResponse, pingErr error) {
|
||||
resp = &filer_pb.PingResponse{}
|
||||
if req.TargetType == cluster.FilerType {
|
||||
pingErr = pb.WithFilerClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
_, err := client.Ping(ctx, &filer_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if req.TargetType == cluster.VolumeServerType {
|
||||
pingErr = pb.WithVolumeServerClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||
_, err := client.Ping(ctx, &volume_server_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if req.TargetType == cluster.MasterType {
|
||||
pingErr = pb.WithMasterClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
_, err := client.Ping(ctx, &master_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if pingErr != nil {
|
||||
pingErr = fmt.Errorf("ping %s %s: %v", req.TargetType, req.Target, pingErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
|
||||
|
||||
clusterId, _ := fs.filer.Store.KvGet(context.Background(), []byte("clusterId"))
|
||||
|
||||
t := &filer_pb.GetFilerConfigurationResponse{
|
||||
Masters: pb.ToAddressStringsFromMap(fs.option.Masters),
|
||||
Collection: fs.option.Collection,
|
||||
Replication: fs.option.DefaultReplication,
|
||||
MaxMb: uint32(fs.option.MaxMB),
|
||||
DirBuckets: fs.filer.DirBucketsPath,
|
||||
Cipher: fs.filer.Cipher,
|
||||
Signature: fs.filer.Signature,
|
||||
MetricsAddress: fs.metricsAddress,
|
||||
MetricsIntervalSec: int32(fs.metricsIntervalSec),
|
||||
Version: util.Version(),
|
||||
ClusterId: string(clusterId),
|
||||
}
|
||||
|
||||
glog.V(4).Infof("GetFilerConfiguration: %v", t)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error {
|
||||
|
||||
req, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientName := util.JoinHostPort(req.Name, int(req.GrpcPort))
|
||||
m := make(map[string]bool)
|
||||
for _, tp := range req.Resources {
|
||||
m[tp] = true
|
||||
}
|
||||
fs.brokersLock.Lock()
|
||||
fs.brokers[clientName] = m
|
||||
glog.V(0).Infof("+ broker %v", clientName)
|
||||
fs.brokersLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
fs.brokersLock.Lock()
|
||||
delete(fs.brokers, clientName)
|
||||
glog.V(0).Infof("- broker %v: %v", clientName, err)
|
||||
fs.brokersLock.Unlock()
|
||||
}()
|
||||
|
||||
for {
|
||||
if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil {
|
||||
glog.V(0).Infof("send broker %v: %+v", clientName, err)
|
||||
return err
|
||||
}
|
||||
// println("replied")
|
||||
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
glog.V(0).Infof("recv broker %v: %v", clientName, err)
|
||||
return err
|
||||
}
|
||||
// println("received")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) {
|
||||
|
||||
resp = &filer_pb.LocateBrokerResponse{}
|
||||
|
||||
fs.brokersLock.Lock()
|
||||
defer fs.brokersLock.Unlock()
|
||||
|
||||
var localBrokers []*filer_pb.LocateBrokerResponse_Resource
|
||||
|
||||
for b, m := range fs.brokers {
|
||||
if _, found := m[req.Resource]; found {
|
||||
resp.Found = true
|
||||
resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{
|
||||
{
|
||||
GrpcAddresses: b,
|
||||
ResourceCount: int32(len(m)),
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{
|
||||
GrpcAddresses: b,
|
||||
ResourceCount: int32(len(m)),
|
||||
})
|
||||
}
|
||||
|
||||
resp.Resources = localBrokers
|
||||
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
@@ -164,6 +164,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||
}
|
||||
|
||||
var entry *filer.Entry
|
||||
var newChunks []*filer_pb.FileChunk
|
||||
var mergedChunks []*filer_pb.FileChunk
|
||||
|
||||
isAppend := isAppend(r)
|
||||
@@ -186,7 +187,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||
}
|
||||
entry.FileSize += uint64(chunkOffset)
|
||||
}
|
||||
mergedChunks = append(entry.Chunks, fileChunks...)
|
||||
newChunks = append(entry.Chunks, fileChunks...)
|
||||
|
||||
// TODO
|
||||
if len(entry.Content) > 0 {
|
||||
@@ -196,7 +197,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||
|
||||
} else {
|
||||
glog.V(4).Infoln("saving", path)
|
||||
mergedChunks = fileChunks
|
||||
newChunks = fileChunks
|
||||
entry = &filer.Entry{
|
||||
FullPath: util.FullPath(path),
|
||||
Attr: filer.Attr{
|
||||
@@ -217,6 +218,13 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||
}
|
||||
}
|
||||
|
||||
// maybe concatenate small chunks into one whole chunk
|
||||
mergedChunks, replyerr = fs.maybeMergeChunks(so, newChunks)
|
||||
if replyerr != nil {
|
||||
glog.V(0).Infof("merge chunks %s: %v", r.RequestURI, replyerr)
|
||||
mergedChunks = newChunks
|
||||
}
|
||||
|
||||
// maybe compact entry chunks
|
||||
mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks)
|
||||
if replyerr != nil {
|
||||
|
||||
11
weed/server/filer_server_handlers_write_merge.go
Normal file
11
weed/server/filer_server_handlers_write_merge.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package weed_server
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
||||
func (fs *FilerServer) maybeMergeChunks(so *operation.StorageOption, inputChunks []*filer_pb.FileChunk) (mergedChunks []*filer_pb.FileChunk, err error) {
|
||||
//TODO merge consecutive smaller chunks into a large chunk to reduce number of chunks
|
||||
return inputChunks, nil
|
||||
}
|
||||
@@ -133,13 +133,13 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
||||
ms.Topo.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn)
|
||||
|
||||
for _, s := range heartbeat.NewEcShards {
|
||||
message.NewVids = append(message.NewVids, s.Id)
|
||||
message.NewEcVids = append(message.NewEcVids, s.Id)
|
||||
}
|
||||
for _, s := range heartbeat.DeletedEcShards {
|
||||
if dn.HasVolumesById(needle.VolumeId(s.Id)) {
|
||||
if dn.HasEcShards(needle.VolumeId(s.Id)) {
|
||||
continue
|
||||
}
|
||||
message.DeletedVids = append(message.DeletedVids, s.Id)
|
||||
message.DeletedEcVids = append(message.DeletedEcVids, s.Id)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -151,17 +151,17 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
||||
|
||||
// broadcast the ec vid changes to master clients
|
||||
for _, s := range newShards {
|
||||
message.NewVids = append(message.NewVids, uint32(s.VolumeId))
|
||||
message.NewEcVids = append(message.NewEcVids, uint32(s.VolumeId))
|
||||
}
|
||||
for _, s := range deletedShards {
|
||||
if dn.HasVolumesById(s.VolumeId) {
|
||||
continue
|
||||
}
|
||||
message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))
|
||||
message.DeletedEcVids = append(message.DeletedEcVids, uint32(s.VolumeId))
|
||||
}
|
||||
|
||||
}
|
||||
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {
|
||||
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 || len(message.NewEcVids) > 0 || len(message.DeletedEcVids) > 0 {
|
||||
ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: message})
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,11 @@ package weed_server
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -142,3 +146,29 @@ func (ms *MasterServer) ReleaseAdminToken(ctx context.Context, req *master_pb.Re
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ms *MasterServer) Ping(ctx context.Context, req *master_pb.PingRequest) (resp *master_pb.PingResponse, pingErr error) {
|
||||
resp = &master_pb.PingResponse{}
|
||||
if req.TargetType == cluster.FilerType {
|
||||
pingErr = pb.WithFilerClient(false, pb.ServerAddress(req.Target), ms.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
_, err := client.Ping(ctx, &filer_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if req.TargetType == cluster.VolumeServerType {
|
||||
pingErr = pb.WithVolumeServerClient(false, pb.ServerAddress(req.Target), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||
_, err := client.Ping(ctx, &volume_server_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if req.TargetType == cluster.MasterType {
|
||||
pingErr = pb.WithMasterClient(false, pb.ServerAddress(req.Target), ms.grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
_, err := client.Ping(ctx, &master_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if pingErr != nil {
|
||||
pingErr = fmt.Errorf("ping %s %s: %v", req.TargetType, req.Target, pingErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3,6 +3,10 @@ package weed_server
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
@@ -247,3 +251,29 @@ func (vs *VolumeServer) VolumeNeedleStatus(ctx context.Context, req *volume_serv
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) Ping(ctx context.Context, req *volume_server_pb.PingRequest) (resp *volume_server_pb.PingResponse, pingErr error) {
|
||||
resp = &volume_server_pb.PingResponse{}
|
||||
if req.TargetType == cluster.FilerType {
|
||||
pingErr = pb.WithFilerClient(false, pb.ServerAddress(req.Target), vs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
_, err := client.Ping(ctx, &filer_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if req.TargetType == cluster.VolumeServerType {
|
||||
pingErr = pb.WithVolumeServerClient(false, pb.ServerAddress(req.Target), vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||
_, err := client.Ping(ctx, &volume_server_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if req.TargetType == cluster.MasterType {
|
||||
pingErr = pb.WithMasterClient(false, pb.ServerAddress(req.Target), vs.grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
_, err := client.Ping(ctx, &master_pb.PingRequest{})
|
||||
return err
|
||||
})
|
||||
}
|
||||
if pingErr != nil {
|
||||
pingErr = fmt.Errorf("ping %s %s: %v", req.TargetType, req.Target, pingErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user