Merge remote-tracking branch 'origin/master'
This commit is contained in:
@@ -72,7 +72,7 @@ func runBackup(cmd *Command, args []string) bool {
|
||||
vid := needle.VolumeId(*s.volumeId)
|
||||
|
||||
// find volume location, replication, ttl info
|
||||
lookup, err := operation.Lookup(*s.master, vid.String())
|
||||
lookup, err := operation.Lookup(func() string { return *s.master }, vid.String())
|
||||
if err != nil {
|
||||
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
|
||||
return true
|
||||
|
||||
@@ -35,6 +35,7 @@ type BenchmarkOptions struct {
|
||||
sequentialRead *bool
|
||||
collection *string
|
||||
replication *string
|
||||
diskType *string
|
||||
cpuprofile *string
|
||||
maxCpu *int
|
||||
grpcDialOption grpc.DialOption
|
||||
@@ -62,6 +63,7 @@ func init() {
|
||||
b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file")
|
||||
b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection")
|
||||
b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
|
||||
b.diskType = cmdBenchmark.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
|
||||
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
|
||||
b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write")
|
||||
@@ -234,13 +236,14 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
|
||||
Count: 1,
|
||||
Collection: *b.collection,
|
||||
Replication: *b.replication,
|
||||
DiskType: *b.diskType,
|
||||
}
|
||||
if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil {
|
||||
if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil {
|
||||
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
|
||||
if !isSecure && assignResult.Auth != "" {
|
||||
isSecure = true
|
||||
}
|
||||
if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil {
|
||||
if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil {
|
||||
if random.Intn(100) < *b.deletePercentage {
|
||||
s.total++
|
||||
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
|
||||
@@ -290,7 +293,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
|
||||
}
|
||||
var bytes []byte
|
||||
for _, url := range urls {
|
||||
bytes, _, err = util.Get(url)
|
||||
bytes, _, err = util.FastGet(url)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -44,15 +44,15 @@ var cmdDownload = &Command{
|
||||
|
||||
func runDownload(cmd *Command, args []string) bool {
|
||||
for _, fid := range args {
|
||||
if e := downloadToFile(*d.server, fid, util.ResolvePath(*d.dir)); e != nil {
|
||||
if e := downloadToFile(func() string { return *d.server }, fid, util.ResolvePath(*d.dir)); e != nil {
|
||||
fmt.Println("Download Error: ", fid, e)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func downloadToFile(server, fileId, saveDir string) error {
|
||||
fileUrl, lookupError := operation.LookupFileId(server, fileId)
|
||||
func downloadToFile(masterFn operation.GetMasterFn, fileId, saveDir string) error {
|
||||
fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
|
||||
if lookupError != nil {
|
||||
return lookupError
|
||||
}
|
||||
@@ -83,7 +83,7 @@ func downloadToFile(server, fileId, saveDir string) error {
|
||||
fids := strings.Split(string(content), "\n")
|
||||
for _, partId := range fids {
|
||||
var n int
|
||||
_, part, err := fetchContent(*d.server, partId)
|
||||
_, part, err := fetchContent(masterFn, partId)
|
||||
if err == nil {
|
||||
n, err = f.Write(part)
|
||||
}
|
||||
@@ -103,8 +103,8 @@ func downloadToFile(server, fileId, saveDir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchContent(server string, fileId string) (filename string, content []byte, e error) {
|
||||
fileUrl, lookupError := operation.LookupFileId(server, fileId)
|
||||
func fetchContent(masterFn operation.GetMasterFn, fileId string) (filename string, content []byte, e error) {
|
||||
fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
|
||||
if lookupError != nil {
|
||||
return "", nil, lookupError
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ type CopyOptions struct {
|
||||
replication *string
|
||||
collection *string
|
||||
ttl *string
|
||||
diskType *string
|
||||
maxMB *int
|
||||
masterClient *wdclient.MasterClient
|
||||
concurrenctFiles *int
|
||||
@@ -54,6 +55,7 @@ func init() {
|
||||
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
|
||||
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
|
||||
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
||||
copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||
copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
|
||||
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
|
||||
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
|
||||
@@ -311,6 +313,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||
Replication: *worker.options.replication,
|
||||
Collection: *worker.options.collection,
|
||||
TtlSec: worker.options.ttlSec,
|
||||
DiskType: *worker.options.diskType,
|
||||
Path: task.destinationUrlPath,
|
||||
}
|
||||
|
||||
@@ -405,6 +408,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||
Replication: *worker.options.replication,
|
||||
Collection: *worker.options.collection,
|
||||
TtlSec: worker.options.ttlSec,
|
||||
DiskType: *worker.options.diskType,
|
||||
Path: task.destinationUrlPath + fileName,
|
||||
}
|
||||
|
||||
@@ -459,7 +463,9 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||
for _, chunk := range chunks {
|
||||
fileIds = append(fileIds, chunk.FileId)
|
||||
}
|
||||
operation.DeleteFiles(copy.masters[0], false, worker.options.grpcDialOption, fileIds)
|
||||
operation.DeleteFiles(func() string {
|
||||
return copy.masters[0]
|
||||
}, false, worker.options.grpcDialOption, fileIds)
|
||||
return uploadError
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,8 @@ type SyncOptions struct {
|
||||
bCollection *string
|
||||
aTtlSec *int
|
||||
bTtlSec *int
|
||||
aDiskType *string
|
||||
bDiskType *string
|
||||
aDebug *bool
|
||||
bDebug *bool
|
||||
aProxyByFiler *bool
|
||||
@@ -56,6 +58,8 @@ func init() {
|
||||
syncOptions.bCollection = cmdFilerSynchronize.Flag.String("b.collection", "", "collection on filer B")
|
||||
syncOptions.aTtlSec = cmdFilerSynchronize.Flag.Int("a.ttlSec", 0, "ttl in seconds on filer A")
|
||||
syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B")
|
||||
syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd] hard drive or solid state drive on filer A")
|
||||
syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd] hard drive or solid state drive on filer B")
|
||||
syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers")
|
||||
syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers")
|
||||
syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files")
|
||||
@@ -90,9 +94,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
|
||||
go func() {
|
||||
for {
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler,
|
||||
*syncOptions.filerB, *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler,
|
||||
*syncOptions.bDebug)
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
|
||||
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
|
||||
if err != nil {
|
||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
|
||||
time.Sleep(1747 * time.Millisecond)
|
||||
@@ -103,9 +106,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
if !*syncOptions.isActivePassive {
|
||||
go func() {
|
||||
for {
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler,
|
||||
*syncOptions.filerA, *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler,
|
||||
*syncOptions.aDebug)
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
|
||||
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
|
||||
if err != nil {
|
||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
|
||||
time.Sleep(2147 * time.Millisecond)
|
||||
@@ -120,7 +122,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
}
|
||||
|
||||
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
|
||||
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler, debug bool) error {
|
||||
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
|
||||
|
||||
// read source filer signature
|
||||
sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler)
|
||||
@@ -146,7 +148,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
|
||||
filerSource := &source.FilerSource{}
|
||||
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
|
||||
filerSink := &filersink.FilerSink{}
|
||||
filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, grpcDialOption, sinkWriteChunkByFiler)
|
||||
filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
|
||||
filerSink.SetSourceFiler(filerSource)
|
||||
|
||||
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
|
||||
@@ -12,6 +12,7 @@ type MountOptions struct {
|
||||
dirAutoCreate *bool
|
||||
collection *string
|
||||
replication *string
|
||||
diskType *string
|
||||
ttlSec *int
|
||||
chunkSizeLimitMB *int
|
||||
concurrentWriters *int
|
||||
@@ -41,6 +42,7 @@ func init() {
|
||||
mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to")
|
||||
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
|
||||
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
|
||||
mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
|
||||
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
|
||||
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 128, "limit concurrent goroutine writers if not 0")
|
||||
|
||||
@@ -5,6 +5,7 @@ package command
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
@@ -168,6 +169,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
mountRoot = mountRoot[0 : len(mountRoot)-1]
|
||||
}
|
||||
|
||||
diskType := types.ToDiskType(*option.diskType)
|
||||
|
||||
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
|
||||
MountDirectory: dir,
|
||||
FilerAddress: filer,
|
||||
@@ -177,6 +180,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
Collection: *option.collection,
|
||||
Replication: *option.replication,
|
||||
TtlSec: int32(*option.ttlSec),
|
||||
DiskType: diskType,
|
||||
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
|
||||
ConcurrentWriters: *option.concurrentWriters,
|
||||
CacheDir: *option.cacheDir,
|
||||
|
||||
@@ -124,11 +124,11 @@ interpolateParams = false
|
||||
[mysql2] # or memsql, tidb
|
||||
enabled = false
|
||||
createTable = """
|
||||
CREATE TABLE IF NOT EXISTS %s (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(1000),
|
||||
directory TEXT,
|
||||
meta LONGBLOB,
|
||||
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(1000),
|
||||
directory TEXT,
|
||||
meta LONGBLOB,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
) DEFAULT CHARSET=utf8;
|
||||
"""
|
||||
@@ -160,11 +160,12 @@ schema = ""
|
||||
sslmode = "disable"
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
|
||||
[postgres2]
|
||||
enabled = false
|
||||
createTable = """
|
||||
CREATE TABLE IF NOT EXISTS %s (
|
||||
CREATE TABLE IF NOT EXISTS "%s" (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(65535),
|
||||
directory VARCHAR(65535),
|
||||
@@ -181,6 +182,7 @@ schema = ""
|
||||
sslmode = "disable"
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
|
||||
[cassandra]
|
||||
# CREATE TABLE filemeta (
|
||||
|
||||
@@ -102,6 +102,7 @@ func init() {
|
||||
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
|
||||
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
|
||||
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
|
||||
serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
|
||||
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
|
||||
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
|
||||
|
||||
@@ -27,6 +27,7 @@ type UploadOptions struct {
|
||||
collection *string
|
||||
dataCenter *string
|
||||
ttl *string
|
||||
diskType *string
|
||||
maxMB *int
|
||||
usePublicUrl *bool
|
||||
}
|
||||
@@ -40,6 +41,7 @@ func init() {
|
||||
upload.replication = cmdUpload.Flag.String("replication", "", "replication type")
|
||||
upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name")
|
||||
upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name")
|
||||
upload.diskType = cmdUpload.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
||||
upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit")
|
||||
upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server")
|
||||
@@ -94,7 +96,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
|
||||
results, e := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||
bytes, _ := json.Marshal(results)
|
||||
fmt.Println(string(bytes))
|
||||
if e != nil {
|
||||
@@ -111,7 +113,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||
if e != nil {
|
||||
fmt.Println(e.Error())
|
||||
}
|
||||
results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
|
||||
results, _ := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||
bytes, _ := json.Marshal(results)
|
||||
fmt.Println(string(bytes))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"net/http"
|
||||
httppprof "net/http/pprof"
|
||||
"os"
|
||||
@@ -49,6 +50,7 @@ type VolumeServerOptions struct {
|
||||
rack *string
|
||||
whiteList []string
|
||||
indexType *string
|
||||
diskType *string
|
||||
fixJpgOrientation *bool
|
||||
readRedirect *bool
|
||||
cpuProfile *string
|
||||
@@ -76,6 +78,7 @@ func init() {
|
||||
v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name")
|
||||
v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name")
|
||||
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
|
||||
v.diskType = cmdVolume.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
|
||||
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
|
||||
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
|
||||
@@ -167,6 +170,21 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
|
||||
}
|
||||
|
||||
// set disk types
|
||||
var diskTypes []types.DiskType
|
||||
diskTypeStrings := strings.Split(*v.diskType, ",")
|
||||
for _, diskTypeString := range diskTypeStrings {
|
||||
diskTypes = append(diskTypes, types.ToDiskType(diskTypeString))
|
||||
}
|
||||
if len(diskTypes) == 1 && len(v.folders) > 1 {
|
||||
for i := 0; i < len(v.folders)-1; i++ {
|
||||
diskTypes = append(diskTypes, diskTypes[0])
|
||||
}
|
||||
}
|
||||
if len(v.folders) != len(diskTypes) {
|
||||
glog.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes))
|
||||
}
|
||||
|
||||
// security related white list configuration
|
||||
if volumeWhiteListOption != "" {
|
||||
v.whiteList = strings.Split(volumeWhiteListOption, ",")
|
||||
@@ -212,7 +230,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
|
||||
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
|
||||
*v.ip, *v.port, *v.publicUrl,
|
||||
v.folders, v.folderMaxLimits, v.minFreeSpacePercents,
|
||||
v.folders, v.folderMaxLimits, v.minFreeSpacePercents, diskTypes,
|
||||
*v.idxFolder,
|
||||
volumeNeedleMapKind,
|
||||
strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,
|
||||
|
||||
11
weed/filer.toml
Normal file
11
weed/filer.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[elastic7]
|
||||
enabled = true
|
||||
servers = [
|
||||
"http://localhost:9200",
|
||||
]
|
||||
username = ""
|
||||
password = ""
|
||||
sniff_enabled = false
|
||||
healthcheck_enabled = false
|
||||
# increase the value is recommend, be sure the value in Elastic is greater or equal here
|
||||
index.max_result_window = 10000
|
||||
@@ -107,7 +107,7 @@ func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.Full
|
||||
}
|
||||
|
||||
if _, found := store.dbs[bucket]; !found {
|
||||
if err = store.CreateTable(ctx, bucket); err != nil {
|
||||
if err = store.CreateTable(ctx, bucket); err == nil {
|
||||
store.dbs[bucket] = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ type Attr struct {
|
||||
Replication string // replication
|
||||
Collection string // collection name
|
||||
TtlSec int32 // ttl in seconds
|
||||
DiskType string
|
||||
UserName string
|
||||
GroupNames []string
|
||||
SymlinkTarget string
|
||||
|
||||
@@ -56,6 +56,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
|
||||
Collection: entry.Attr.Collection,
|
||||
Replication: entry.Attr.Replication,
|
||||
TtlSec: entry.Attr.TtlSec,
|
||||
DiskType: entry.Attr.DiskType,
|
||||
UserName: entry.Attr.UserName,
|
||||
GroupName: entry.Attr.GroupNames,
|
||||
SymlinkTarget: entry.Attr.SymlinkTarget,
|
||||
@@ -81,6 +82,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
|
||||
t.Collection = attr.Collection
|
||||
t.Replication = attr.Replication
|
||||
t.TtlSec = attr.TtlSec
|
||||
t.DiskType = attr.DiskType
|
||||
t.UserName = attr.UserName
|
||||
t.GroupNames = attr.GroupName
|
||||
t.SymlinkTarget = attr.SymlinkTarget
|
||||
|
||||
@@ -102,7 +102,7 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
|
||||
|
||||
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
|
||||
for _, urlString := range urlStrings {
|
||||
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
|
||||
shouldRetry, err = util.FastReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
|
||||
buffer.Write(data)
|
||||
})
|
||||
if !shouldRetry {
|
||||
|
||||
@@ -116,7 +116,7 @@ func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
|
||||
a.Collection = util.Nvl(b.Collection, a.Collection)
|
||||
a.Replication = util.Nvl(b.Replication, a.Replication)
|
||||
a.Ttl = util.Nvl(b.Ttl, a.Ttl)
|
||||
if b.DiskType != filer_pb.FilerConf_PathConf_NONE {
|
||||
if b.DiskType != "" {
|
||||
a.DiskType = b.DiskType
|
||||
}
|
||||
a.Fsync = b.Fsync || a.Fsync
|
||||
|
||||
@@ -55,7 +55,10 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry
|
||||
|
||||
if notification.Queue != nil {
|
||||
glog.V(3).Infof("notifying entry update %v", fullpath)
|
||||
notification.Queue.SendMessage(fullpath, eventNotification)
|
||||
if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
|
||||
// throw message
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
f.logMetaEvent(ctx, fullpath, eventNotification)
|
||||
|
||||
@@ -56,7 +56,7 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
|
||||
WritableVolumeCount: rule.VolumeGrowthCount,
|
||||
}
|
||||
|
||||
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
|
||||
assignResult, err := operation.Assign(f.GetMaster, f.GrpcDialOption, assignRequest)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AssignVolume: %v", err)
|
||||
}
|
||||
|
||||
@@ -16,31 +16,31 @@ var (
|
||||
)
|
||||
|
||||
func (gen *SqlGenMysql) GetSqlInsert(bucket string) string {
|
||||
return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES(?,?,?,?)", bucket)
|
||||
return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenMysql) GetSqlUpdate(bucket string) string {
|
||||
return fmt.Sprintf("UPDATE %s SET meta=? WHERE dirhash=? AND name=? AND directory=?", bucket)
|
||||
return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenMysql) GetSqlFind(bucket string) string {
|
||||
return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket)
|
||||
return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenMysql) GetSqlDelete(bucket string) string {
|
||||
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND name=? AND directory=?", bucket)
|
||||
return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(bucket string) string {
|
||||
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=? AND directory=?", bucket)
|
||||
return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenMysql) GetSqlListExclusive(bucket string) string {
|
||||
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
|
||||
return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenMysql) GetSqlListInclusive(bucket string) string {
|
||||
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
|
||||
return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenMysql) GetSqlCreateTable(bucket string) string {
|
||||
|
||||
@@ -47,12 +47,14 @@ func (store *MysqlStore) initialize(user, password, hostname string, port int, d
|
||||
store.SupportBucketTable = false
|
||||
store.SqlGenerator = &SqlGenMysql{
|
||||
CreateTableSqlTemplate: "",
|
||||
DropTableSqlTemplate: "drop table %s",
|
||||
DropTableSqlTemplate: "drop table `%s`",
|
||||
}
|
||||
|
||||
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
|
||||
adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "<ADAPTED>", hostname, port, database)
|
||||
if interpolateParams {
|
||||
sqlUrl += "&interpolateParams=true"
|
||||
adaptedSqlUrl += "&interpolateParams=true"
|
||||
}
|
||||
|
||||
var dbErr error
|
||||
@@ -60,7 +62,7 @@ func (store *MysqlStore) initialize(user, password, hostname string, port int, d
|
||||
if dbErr != nil {
|
||||
store.DB.Close()
|
||||
store.DB = nil
|
||||
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
|
||||
return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
|
||||
}
|
||||
|
||||
store.DB.SetMaxIdleConns(maxIdle)
|
||||
|
||||
@@ -50,12 +50,14 @@ func (store *MysqlStore2) initialize(createTable, user, password, hostname strin
|
||||
store.SupportBucketTable = true
|
||||
store.SqlGenerator = &mysql.SqlGenMysql{
|
||||
CreateTableSqlTemplate: createTable,
|
||||
DropTableSqlTemplate: "drop table %s",
|
||||
DropTableSqlTemplate: "drop table `%s`",
|
||||
}
|
||||
|
||||
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database)
|
||||
adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "<ADAPTED>", hostname, port, database)
|
||||
if interpolateParams {
|
||||
sqlUrl += "&interpolateParams=true"
|
||||
adaptedSqlUrl += "&interpolateParams=true"
|
||||
}
|
||||
|
||||
var dbErr error
|
||||
@@ -63,7 +65,7 @@ func (store *MysqlStore2) initialize(createTable, user, password, hostname strin
|
||||
if dbErr != nil {
|
||||
store.DB.Close()
|
||||
store.DB = nil
|
||||
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
|
||||
return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
|
||||
}
|
||||
|
||||
store.DB.SetMaxIdleConns(maxIdle)
|
||||
|
||||
@@ -17,31 +17,31 @@ var (
|
||||
)
|
||||
|
||||
func (gen *SqlGenPostgres) GetSqlInsert(bucket string) string {
|
||||
return fmt.Sprintf("INSERT INTO %s (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)", bucket)
|
||||
return fmt.Sprintf(`INSERT INTO "%s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)`, bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenPostgres) GetSqlUpdate(bucket string) string {
|
||||
return fmt.Sprintf("UPDATE %s SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4", bucket)
|
||||
return fmt.Sprintf(`UPDATE "%s" SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4`, bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenPostgres) GetSqlFind(bucket string) string {
|
||||
return fmt.Sprintf("SELECT meta FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket)
|
||||
return fmt.Sprintf(`SELECT meta FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenPostgres) GetSqlDelete(bucket string) string {
|
||||
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND name=$2 AND directory=$3", bucket)
|
||||
return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(bucket string) string {
|
||||
return fmt.Sprintf("DELETE FROM %s WHERE dirhash=$1 AND directory=$2", bucket)
|
||||
return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND directory=$2`, bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenPostgres) GetSqlListExclusive(bucket string) string {
|
||||
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket)
|
||||
return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenPostgres) GetSqlListInclusive(bucket string) string {
|
||||
return fmt.Sprintf("SELECT NAME, meta FROM %s WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5", bucket)
|
||||
return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, bucket)
|
||||
}
|
||||
|
||||
func (gen *SqlGenPostgres) GetSqlCreateTable(bucket string) string {
|
||||
|
||||
@@ -3,6 +3,7 @@ package postgres
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
|
||||
@@ -37,40 +38,46 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix
|
||||
configuration.GetString(prefix+"sslmode"),
|
||||
configuration.GetInt(prefix+"connection_max_idle"),
|
||||
configuration.GetInt(prefix+"connection_max_open"),
|
||||
configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
|
||||
)
|
||||
}
|
||||
|
||||
func (store *PostgresStore) initialize(user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) {
|
||||
func (store *PostgresStore) initialize(user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) {
|
||||
|
||||
store.SupportBucketTable = false
|
||||
store.SqlGenerator = &SqlGenPostgres{
|
||||
CreateTableSqlTemplate: "",
|
||||
DropTableSqlTemplate: "drop table %s",
|
||||
DropTableSqlTemplate: `drop table "%s"`,
|
||||
}
|
||||
|
||||
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
|
||||
if user != "" {
|
||||
sqlUrl += " user=" + user
|
||||
}
|
||||
adaptedSqlUrl := sqlUrl
|
||||
if password != "" {
|
||||
sqlUrl += " password=" + password
|
||||
adaptedSqlUrl += " password=ADAPTED"
|
||||
}
|
||||
if database != "" {
|
||||
sqlUrl += " dbname=" + database
|
||||
adaptedSqlUrl += " dbname=" + database
|
||||
}
|
||||
if schema != "" {
|
||||
sqlUrl += " search_path=" + schema
|
||||
adaptedSqlUrl += " search_path=" + schema
|
||||
}
|
||||
var dbErr error
|
||||
store.DB, dbErr = sql.Open("postgres", sqlUrl)
|
||||
if dbErr != nil {
|
||||
store.DB.Close()
|
||||
store.DB = nil
|
||||
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
|
||||
return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
|
||||
}
|
||||
|
||||
store.DB.SetMaxIdleConns(maxIdle)
|
||||
store.DB.SetMaxOpenConns(maxOpen)
|
||||
store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
|
||||
|
||||
if err = store.DB.Ping(); err != nil {
|
||||
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer/abstract_sql"
|
||||
@@ -40,40 +41,46 @@ func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix
|
||||
configuration.GetString(prefix+"sslmode"),
|
||||
configuration.GetInt(prefix+"connection_max_idle"),
|
||||
configuration.GetInt(prefix+"connection_max_open"),
|
||||
configuration.GetInt(prefix+"connection_max_lifetime_seconds"),
|
||||
)
|
||||
}
|
||||
|
||||
func (store *PostgresStore2) initialize(createTable, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen int) (err error) {
|
||||
func (store *PostgresStore2) initialize(createTable, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) {
|
||||
|
||||
store.SupportBucketTable = true
|
||||
store.SqlGenerator = &postgres.SqlGenPostgres{
|
||||
CreateTableSqlTemplate: createTable,
|
||||
DropTableSqlTemplate: "drop table %s",
|
||||
DropTableSqlTemplate: `drop table "%s"`,
|
||||
}
|
||||
|
||||
sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode)
|
||||
if user != "" {
|
||||
sqlUrl += " user=" + user
|
||||
}
|
||||
adaptedSqlUrl := sqlUrl
|
||||
if password != "" {
|
||||
sqlUrl += " password=" + password
|
||||
adaptedSqlUrl += " password=ADAPTED"
|
||||
}
|
||||
if database != "" {
|
||||
sqlUrl += " dbname=" + database
|
||||
adaptedSqlUrl += " dbname=" + database
|
||||
}
|
||||
if schema != "" {
|
||||
sqlUrl += " search_path=" + schema
|
||||
adaptedSqlUrl += " search_path=" + schema
|
||||
}
|
||||
var dbErr error
|
||||
store.DB, dbErr = sql.Open("postgres", sqlUrl)
|
||||
if dbErr != nil {
|
||||
store.DB.Close()
|
||||
store.DB = nil
|
||||
return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err)
|
||||
return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err)
|
||||
}
|
||||
|
||||
store.DB.SetMaxIdleConns(maxIdle)
|
||||
store.DB.SetMaxOpenConns(maxOpen)
|
||||
store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second)
|
||||
|
||||
if err = store.DB.Ping(); err != nil {
|
||||
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
|
||||
|
||||
@@ -35,7 +35,7 @@ func ReadContent(filerAddress string, dir, name string) ([]byte, error) {
|
||||
|
||||
target := fmt.Sprintf("http://%s%s/%s", filerAddress, dir, name)
|
||||
|
||||
data, _, err := util.Get(target)
|
||||
data, _, err := util.FastGet(target)
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
|
||||
var buffer bytes.Buffer
|
||||
var shouldRetry bool
|
||||
for _, urlString := range urlStrings {
|
||||
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
||||
shouldRetry, err = util.FastReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
||||
buffer.Write(data)
|
||||
})
|
||||
if !shouldRetry {
|
||||
|
||||
@@ -192,16 +192,13 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
|
||||
|
||||
if fh.f.isOpen == 1 {
|
||||
|
||||
if err := fh.doFlush(ctx, req.Header); err != nil {
|
||||
glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
fh.f.isOpen--
|
||||
|
||||
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
|
||||
if closer, ok := fh.f.reader.(io.Closer); ok {
|
||||
closer.Close()
|
||||
if closer != nil {
|
||||
closer.Close()
|
||||
}
|
||||
}
|
||||
fh.f.reader = nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"math"
|
||||
"os"
|
||||
@@ -34,6 +35,7 @@ type Option struct {
|
||||
Collection string
|
||||
Replication string
|
||||
TtlSec int32
|
||||
DiskType types.DiskType
|
||||
ChunkSizeLimit int64
|
||||
ConcurrentWriters int
|
||||
CacheDir string
|
||||
@@ -194,6 +196,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
|
||||
Collection: wfs.option.Collection,
|
||||
Replication: wfs.option.Replication,
|
||||
Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec),
|
||||
DiskType: string(wfs.option.DiskType),
|
||||
}
|
||||
|
||||
glog.V(4).Infof("reading filer stats: %+v", request)
|
||||
|
||||
@@ -26,6 +26,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
|
||||
Replication: wfs.option.Replication,
|
||||
Collection: wfs.option.Collection,
|
||||
TtlSec: wfs.option.TtlSec,
|
||||
DiskType: string(wfs.option.DiskType),
|
||||
DataCenter: wfs.option.DataCenter,
|
||||
Path: string(fullPath),
|
||||
}
|
||||
|
||||
@@ -17,10 +17,14 @@ package gocdk_pub_sub
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/streadway/amqp"
|
||||
"gocloud.dev/pubsub"
|
||||
_ "gocloud.dev/pubsub/awssnssqs"
|
||||
"gocloud.dev/pubsub/rabbitpubsub"
|
||||
"net/url"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/notification"
|
||||
@@ -29,12 +33,18 @@ import (
|
||||
_ "gocloud.dev/pubsub/gcppubsub"
|
||||
_ "gocloud.dev/pubsub/natspubsub"
|
||||
_ "gocloud.dev/pubsub/rabbitpubsub"
|
||||
"os"
|
||||
)
|
||||
|
||||
func init() {
|
||||
notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{})
|
||||
}
|
||||
|
||||
func getPath(rawUrl string) string {
|
||||
parsedUrl, _ := url.Parse(rawUrl)
|
||||
return path.Join(parsedUrl.Host, parsedUrl.Path)
|
||||
}
|
||||
|
||||
type GoCDKPubSub struct {
|
||||
topicURL string
|
||||
topic *pubsub.Topic
|
||||
@@ -44,6 +54,28 @@ func (k *GoCDKPubSub) GetName() string {
|
||||
return "gocdk_pub_sub"
|
||||
}
|
||||
|
||||
func (k *GoCDKPubSub) doReconnect() {
|
||||
var conn *amqp.Connection
|
||||
if k.topic.As(&conn) {
|
||||
go func() {
|
||||
<-conn.NotifyClose(make(chan *amqp.Error))
|
||||
conn.Close()
|
||||
k.topic.Shutdown(context.Background())
|
||||
for {
|
||||
glog.Info("Try reconnect")
|
||||
conn, err := amqp.Dial(os.Getenv("RABBIT_SERVER_URL"))
|
||||
if err == nil {
|
||||
k.topic = rabbitpubsub.OpenTopic(conn, getPath(k.topicURL), nil)
|
||||
k.doReconnect()
|
||||
break
|
||||
}
|
||||
glog.Error(err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error {
|
||||
k.topicURL = configuration.GetString(prefix + "topic_url")
|
||||
glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
|
||||
@@ -52,6 +84,7 @@ func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string
|
||||
glog.Fatalf("Failed to open topic: %v", err)
|
||||
}
|
||||
k.topic = topic
|
||||
k.doReconnect()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ type VolumeAssignRequest struct {
|
||||
Replication string
|
||||
Collection string
|
||||
Ttl string
|
||||
DiskType string
|
||||
DataCenter string
|
||||
Rack string
|
||||
DataNode string
|
||||
@@ -33,7 +34,7 @@ type AssignResult struct {
|
||||
Auth security.EncodedJwt `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
|
||||
func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
|
||||
|
||||
var requests []*VolumeAssignRequest
|
||||
requests = append(requests, primaryRequest)
|
||||
@@ -47,13 +48,14 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum
|
||||
continue
|
||||
}
|
||||
|
||||
lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
|
||||
lastError = WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
|
||||
|
||||
req := &master_pb.AssignRequest{
|
||||
Count: request.Count,
|
||||
Replication: request.Replication,
|
||||
Collection: request.Collection,
|
||||
Ttl: request.Ttl,
|
||||
DiskType: request.DiskType,
|
||||
DataCenter: request.DataCenter,
|
||||
Rack: request.Rack,
|
||||
DataNode: request.DataNode,
|
||||
@@ -105,6 +107,7 @@ func LookupJwt(master string, fileId string) security.EncodedJwt {
|
||||
|
||||
type StorageOption struct {
|
||||
Replication string
|
||||
DiskType string
|
||||
Collection string
|
||||
DataCenter string
|
||||
Rack string
|
||||
@@ -123,6 +126,7 @@ func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, a
|
||||
Replication: so.Replication,
|
||||
Collection: so.Collection,
|
||||
Ttl: so.TtlString(),
|
||||
DiskType: so.DiskType,
|
||||
DataCenter: so.DataCenter,
|
||||
Rack: so.Rack,
|
||||
WritableVolumeCount: so.VolumeGrowthCount,
|
||||
@@ -133,6 +137,7 @@ func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, a
|
||||
Replication: so.Replication,
|
||||
Collection: so.Collection,
|
||||
Ttl: so.TtlString(),
|
||||
DiskType: so.DiskType,
|
||||
DataCenter: "",
|
||||
Rack: "",
|
||||
WritableVolumeCount: so.VolumeGrowthCount,
|
||||
|
||||
@@ -72,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) {
|
||||
return json.Marshal(cm)
|
||||
}
|
||||
|
||||
func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDialOption grpc.DialOption) error {
|
||||
func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption) error {
|
||||
var fileIds []string
|
||||
for _, ci := range cm.Chunks {
|
||||
fileIds = append(fileIds, ci.Fid)
|
||||
}
|
||||
results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds)
|
||||
results, err := DeleteFiles(masterFn, usePublicUrl, grpcDialOption, fileIds)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("delete %+v: %v", fileIds, err)
|
||||
return fmt.Errorf("chunk delete: %v", err)
|
||||
@@ -174,7 +174,9 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
for ; chunkIndex < len(cf.chunkList); chunkIndex++ {
|
||||
ci := cf.chunkList[chunkIndex]
|
||||
// if we need read date from local volume server first?
|
||||
fileUrl, lookupError := LookupFileId(cf.master, ci.Fid)
|
||||
fileUrl, lookupError := LookupFileId(func() string {
|
||||
return cf.master
|
||||
}, ci.Fid)
|
||||
if lookupError != nil {
|
||||
return n, lookupError
|
||||
}
|
||||
|
||||
@@ -28,10 +28,10 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) {
|
||||
}
|
||||
|
||||
// DeleteFiles batch deletes a list of fileIds
|
||||
func DeleteFiles(master string, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
|
||||
func DeleteFiles(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
|
||||
|
||||
lookupFunc := func(vids []string) (results map[string]LookupResult, err error) {
|
||||
results, err = LookupVolumeIds(master, grpcDialOption, vids)
|
||||
results, err = LookupVolumeIds(masterFn, grpcDialOption, vids)
|
||||
if err == nil && usePublicUrl {
|
||||
for _, result := range results {
|
||||
for _, loc := range result.Locations {
|
||||
|
||||
@@ -33,10 +33,10 @@ var (
|
||||
vc VidCache // caching of volume locations, re-check if after 10 minutes
|
||||
)
|
||||
|
||||
func Lookup(server string, vid string) (ret *LookupResult, err error) {
|
||||
func Lookup(masterFn GetMasterFn, vid string) (ret *LookupResult, err error) {
|
||||
locations, cache_err := vc.Get(vid)
|
||||
if cache_err != nil {
|
||||
if ret, err = do_lookup(server, vid); err == nil {
|
||||
if ret, err = do_lookup(masterFn, vid); err == nil {
|
||||
vc.Set(vid, ret.Locations, 10*time.Minute)
|
||||
}
|
||||
} else {
|
||||
@@ -45,9 +45,10 @@ func Lookup(server string, vid string) (ret *LookupResult, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func do_lookup(server string, vid string) (*LookupResult, error) {
|
||||
func do_lookup(masterFn GetMasterFn, vid string) (*LookupResult, error) {
|
||||
values := make(url.Values)
|
||||
values.Add("volumeId", vid)
|
||||
server := masterFn()
|
||||
jsonBlob, err := util.Post("http://"+server+"/dir/lookup", values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -63,12 +64,12 @@ func do_lookup(server string, vid string) (*LookupResult, error) {
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func LookupFileId(server string, fileId string) (fullUrl string, err error) {
|
||||
func LookupFileId(masterFn GetMasterFn, fileId string) (fullUrl string, err error) {
|
||||
parts := strings.Split(fileId, ",")
|
||||
if len(parts) != 2 {
|
||||
return "", errors.New("Invalid fileId " + fileId)
|
||||
}
|
||||
lookup, lookupError := Lookup(server, parts[0])
|
||||
lookup, lookupError := Lookup(masterFn, parts[0])
|
||||
if lookupError != nil {
|
||||
return "", lookupError
|
||||
}
|
||||
@@ -79,7 +80,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) {
|
||||
}
|
||||
|
||||
// LookupVolumeIds find volume locations by cache and actual lookup
|
||||
func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
|
||||
func LookupVolumeIds(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
|
||||
ret := make(map[string]LookupResult)
|
||||
var unknown_vids []string
|
||||
|
||||
@@ -99,7 +100,7 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin
|
||||
|
||||
//only query unknown_vids
|
||||
|
||||
err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
|
||||
err := WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
|
||||
|
||||
req := &master_pb.LookupVolumeRequest{
|
||||
VolumeIds: unknown_vids,
|
||||
|
||||
@@ -25,6 +25,7 @@ type FilePart struct {
|
||||
Collection string
|
||||
DataCenter string
|
||||
Ttl string
|
||||
DiskType string
|
||||
Server string //this comes from assign result
|
||||
Fid string //this comes from assign result, but customizable
|
||||
Fsync bool
|
||||
@@ -38,7 +39,9 @@ type SubmitResult struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) {
|
||||
type GetMasterFn func() string
|
||||
|
||||
func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, diskType string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) {
|
||||
results := make([]SubmitResult, len(files))
|
||||
for index, file := range files {
|
||||
results[index].FileName = file.FileName
|
||||
@@ -49,8 +52,9 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
|
||||
Collection: collection,
|
||||
DataCenter: dataCenter,
|
||||
Ttl: ttl,
|
||||
DiskType: diskType,
|
||||
}
|
||||
ret, err := Assign(master, grpcDialOption, ar)
|
||||
ret, err := Assign(masterFn, grpcDialOption, ar)
|
||||
if err != nil {
|
||||
for index := range files {
|
||||
results[index].Error = err.Error()
|
||||
@@ -70,7 +74,8 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
|
||||
file.Collection = collection
|
||||
file.DataCenter = dataCenter
|
||||
file.Ttl = ttl
|
||||
results[index].Size, err = file.Upload(maxMB, master, usePublicUrl, ret.Auth, grpcDialOption)
|
||||
file.DiskType = diskType
|
||||
results[index].Size, err = file.Upload(maxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption)
|
||||
if err != nil {
|
||||
results[index].Error = err.Error()
|
||||
}
|
||||
@@ -113,7 +118,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
|
||||
func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
|
||||
fileUrl := "http://" + fi.Server + "/" + fi.Fid
|
||||
if fi.ModTime != 0 {
|
||||
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
|
||||
@@ -143,8 +148,9 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
|
||||
Replication: fi.Replication,
|
||||
Collection: fi.Collection,
|
||||
Ttl: fi.Ttl,
|
||||
DiskType: fi.DiskType,
|
||||
}
|
||||
ret, err = Assign(master, grpcDialOption, ar)
|
||||
ret, err = Assign(masterFn, grpcDialOption, ar)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -156,11 +162,12 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
|
||||
Replication: fi.Replication,
|
||||
Collection: fi.Collection,
|
||||
Ttl: fi.Ttl,
|
||||
DiskType: fi.DiskType,
|
||||
}
|
||||
ret, err = Assign(master, grpcDialOption, ar)
|
||||
ret, err = Assign(masterFn, grpcDialOption, ar)
|
||||
if err != nil {
|
||||
// delete all uploaded chunks
|
||||
cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
|
||||
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
|
||||
return
|
||||
}
|
||||
id = ret.Fid
|
||||
@@ -177,11 +184,11 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
|
||||
count, e := upload_one_chunk(
|
||||
baseName+"-"+strconv.FormatInt(i+1, 10),
|
||||
io.LimitReader(fi.Reader, chunkSize),
|
||||
master, fileUrl,
|
||||
masterFn, fileUrl,
|
||||
ret.Auth)
|
||||
if e != nil {
|
||||
// delete all uploaded chunks
|
||||
cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
|
||||
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
|
||||
return 0, e
|
||||
}
|
||||
cm.Chunks = append(cm.Chunks,
|
||||
@@ -196,7 +203,7 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
|
||||
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
|
||||
if err != nil {
|
||||
// delete all uploaded chunks
|
||||
cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
|
||||
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
|
||||
}
|
||||
} else {
|
||||
ret, e, _ := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt)
|
||||
@@ -208,7 +215,7 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
|
||||
return
|
||||
}
|
||||
|
||||
func upload_one_chunk(filename string, reader io.Reader, master,
|
||||
func upload_one_chunk(filename string, reader io.Reader, masterFn GetMasterFn,
|
||||
fileUrl string, jwt security.EncodedJwt,
|
||||
) (size uint32, e error) {
|
||||
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
|
||||
func TailVolume(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
|
||||
// find volume location, replication, ttl info
|
||||
lookup, err := Lookup(master, vid.String())
|
||||
lookup, err := Lookup(masterFn, vid.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("look up volume %d: %v", vid, err)
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ var (
|
||||
|
||||
func init() {
|
||||
HttpClient = &http.Client{Transport: &http.Transport{
|
||||
MaxIdleConns: 1024,
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
}}
|
||||
}
|
||||
@@ -99,6 +100,7 @@ func retriedUploadData(uploadUrl string, filename string, cipher bool, data []by
|
||||
} else {
|
||||
glog.Warningf("uploading to %s: %v", uploadUrl, err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * time.Duration(237*(i+1)))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -156,6 +156,7 @@ message FuseAttributes {
|
||||
repeated string group_name = 12; // for hdfs
|
||||
string symlink_target = 13;
|
||||
bytes md5 = 14;
|
||||
string disk_type = 15;
|
||||
}
|
||||
|
||||
message CreateEntryRequest {
|
||||
@@ -220,6 +221,7 @@ message AssignVolumeRequest {
|
||||
string data_center = 5;
|
||||
string path = 6;
|
||||
string rack = 7;
|
||||
string disk_type = 8;
|
||||
}
|
||||
|
||||
message AssignVolumeResponse {
|
||||
@@ -270,11 +272,9 @@ message StatisticsRequest {
|
||||
string replication = 1;
|
||||
string collection = 2;
|
||||
string ttl = 3;
|
||||
string disk_type = 4;
|
||||
}
|
||||
message StatisticsResponse {
|
||||
string replication = 1;
|
||||
string collection = 2;
|
||||
string ttl = 3;
|
||||
uint64 total_size = 4;
|
||||
uint64 used_size = 5;
|
||||
uint64 file_count = 6;
|
||||
@@ -358,12 +358,7 @@ message FilerConf {
|
||||
string collection = 2;
|
||||
string replication = 3;
|
||||
string ttl = 4;
|
||||
enum DiskType {
|
||||
NONE = 0;
|
||||
HDD = 1;
|
||||
SSD = 2;
|
||||
}
|
||||
DiskType disk_type = 5;
|
||||
string disk_type = 5;
|
||||
bool fsync = 6;
|
||||
uint32 volume_growth_count = 7;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -29,6 +29,7 @@ var (
|
||||
|
||||
func init() {
|
||||
http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024
|
||||
http.DefaultTransport.(*http.Transport).MaxIdleConns = 1024
|
||||
}
|
||||
|
||||
func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
|
||||
|
||||
@@ -44,7 +44,6 @@ message Heartbeat {
|
||||
string ip = 1;
|
||||
uint32 port = 2;
|
||||
string public_url = 3;
|
||||
uint32 max_volume_count = 4;
|
||||
uint64 max_file_key = 5;
|
||||
string data_center = 6;
|
||||
string rack = 7;
|
||||
@@ -62,6 +61,8 @@ message Heartbeat {
|
||||
repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
|
||||
bool has_no_ec_shards = 19;
|
||||
|
||||
map<string, uint32> max_volume_counts = 4;
|
||||
|
||||
}
|
||||
|
||||
message HeartbeatResponse {
|
||||
@@ -87,6 +88,7 @@ message VolumeInformationMessage {
|
||||
int64 modified_at_second = 12;
|
||||
string remote_storage_name = 13;
|
||||
string remote_storage_key = 14;
|
||||
string disk_type = 15;
|
||||
}
|
||||
|
||||
message VolumeShortInformationMessage {
|
||||
@@ -95,12 +97,14 @@ message VolumeShortInformationMessage {
|
||||
uint32 replica_placement = 8;
|
||||
uint32 version = 9;
|
||||
uint32 ttl = 10;
|
||||
string disk_type = 15;
|
||||
}
|
||||
|
||||
message VolumeEcShardInformationMessage {
|
||||
uint32 id = 1;
|
||||
string collection = 2;
|
||||
uint32 ec_index_bits = 3;
|
||||
string disk_type = 4;
|
||||
}
|
||||
|
||||
message StorageBackend {
|
||||
@@ -163,6 +167,7 @@ message AssignRequest {
|
||||
string data_node = 7;
|
||||
uint32 memory_map_max_size_mb = 8;
|
||||
uint32 Writable_volume_count = 9;
|
||||
string disk_type = 10;
|
||||
}
|
||||
message AssignResponse {
|
||||
string fid = 1;
|
||||
@@ -177,11 +182,9 @@ message StatisticsRequest {
|
||||
string replication = 1;
|
||||
string collection = 2;
|
||||
string ttl = 3;
|
||||
string disk_type = 4;
|
||||
}
|
||||
message StatisticsResponse {
|
||||
string replication = 1;
|
||||
string collection = 2;
|
||||
string ttl = 3;
|
||||
uint64 total_size = 4;
|
||||
uint64 used_size = 5;
|
||||
uint64 file_count = 6;
|
||||
@@ -210,8 +213,8 @@ message CollectionDeleteResponse {
|
||||
//
|
||||
// volume related
|
||||
//
|
||||
message DataNodeInfo {
|
||||
string id = 1;
|
||||
message DiskInfo {
|
||||
string type = 1;
|
||||
uint64 volume_count = 2;
|
||||
uint64 max_volume_count = 3;
|
||||
uint64 free_volume_count = 4;
|
||||
@@ -220,32 +223,24 @@ message DataNodeInfo {
|
||||
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
|
||||
uint64 remote_volume_count = 8;
|
||||
}
|
||||
message DataNodeInfo {
|
||||
string id = 1;
|
||||
map<string, DiskInfo> diskInfos = 2;
|
||||
}
|
||||
message RackInfo {
|
||||
string id = 1;
|
||||
uint64 volume_count = 2;
|
||||
uint64 max_volume_count = 3;
|
||||
uint64 free_volume_count = 4;
|
||||
uint64 active_volume_count = 5;
|
||||
repeated DataNodeInfo data_node_infos = 6;
|
||||
uint64 remote_volume_count = 7;
|
||||
repeated DataNodeInfo data_node_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message DataCenterInfo {
|
||||
string id = 1;
|
||||
uint64 volume_count = 2;
|
||||
uint64 max_volume_count = 3;
|
||||
uint64 free_volume_count = 4;
|
||||
uint64 active_volume_count = 5;
|
||||
repeated RackInfo rack_infos = 6;
|
||||
uint64 remote_volume_count = 7;
|
||||
repeated RackInfo rack_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message TopologyInfo {
|
||||
string id = 1;
|
||||
uint64 volume_count = 2;
|
||||
uint64 max_volume_count = 3;
|
||||
uint64 free_volume_count = 4;
|
||||
uint64 active_volume_count = 5;
|
||||
repeated DataCenterInfo data_center_infos = 6;
|
||||
uint64 remote_volume_count = 7;
|
||||
repeated DataCenterInfo data_center_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message VolumeListRequest {
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -157,6 +157,7 @@ message AllocateVolumeRequest {
|
||||
string replication = 4;
|
||||
string ttl = 5;
|
||||
uint32 memory_map_max_size_mb = 6;
|
||||
string disk_type = 7;
|
||||
}
|
||||
message AllocateVolumeResponse {
|
||||
}
|
||||
@@ -233,6 +234,7 @@ message VolumeCopyRequest {
|
||||
string replication = 3;
|
||||
string ttl = 4;
|
||||
string source_data_node = 5;
|
||||
string disk_type = 6;
|
||||
}
|
||||
message VolumeCopyResponse {
|
||||
uint64 last_append_at_ns = 1;
|
||||
@@ -361,6 +363,7 @@ message ReadVolumeFileStatusResponse {
|
||||
uint64 file_count = 6;
|
||||
uint32 compaction_revision = 7;
|
||||
string collection = 8;
|
||||
string disk_type = 9;
|
||||
}
|
||||
|
||||
message DiskStatus {
|
||||
@@ -370,6 +373,7 @@ message DiskStatus {
|
||||
uint64 free = 4;
|
||||
float percent_free = 5;
|
||||
float percent_used = 6;
|
||||
string disk_type = 7;
|
||||
}
|
||||
|
||||
message MemStatus {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,7 +20,7 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer
|
||||
var shouldRetry bool
|
||||
|
||||
for _, fileUrl := range fileUrls {
|
||||
shouldRetry, err = util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||
shouldRetry, err = util.FastReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||
writeErr = writeFunc(data)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -78,6 +78,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
|
||||
Collection: fs.collection,
|
||||
TtlSec: fs.ttlSec,
|
||||
DataCenter: fs.dataCenter,
|
||||
DiskType: fs.diskType,
|
||||
Path: path,
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ type FilerSink struct {
|
||||
replication string
|
||||
collection string
|
||||
ttlSec int32
|
||||
diskType string
|
||||
dataCenter string
|
||||
grpcDialOption grpc.DialOption
|
||||
address string
|
||||
@@ -51,6 +52,7 @@ func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string)
|
||||
configuration.GetString(prefix+"replication"),
|
||||
configuration.GetString(prefix+"collection"),
|
||||
configuration.GetInt(prefix+"ttlSec"),
|
||||
configuration.GetString(prefix+"disk"),
|
||||
security.LoadClientTLS(util.GetViper(), "grpc.client"),
|
||||
false)
|
||||
}
|
||||
@@ -60,7 +62,7 @@ func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
|
||||
}
|
||||
|
||||
func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
|
||||
replication string, collection string, ttlSec int, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
|
||||
replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
|
||||
fs.address = address
|
||||
if fs.address == "" {
|
||||
fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
|
||||
@@ -70,6 +72,7 @@ func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
|
||||
fs.replication = replication
|
||||
fs.collection = collection
|
||||
fs.ttlSec = int32(ttlSec)
|
||||
fs.diskType = diskType
|
||||
fs.grpcDialOption = grpcDialOption
|
||||
fs.writeChunkByFiler = writeChunkByFiler
|
||||
return nil
|
||||
|
||||
@@ -9,9 +9,12 @@ import (
|
||||
"github.com/streadway/amqp"
|
||||
"gocloud.dev/pubsub"
|
||||
_ "gocloud.dev/pubsub/awssnssqs"
|
||||
"gocloud.dev/pubsub/rabbitpubsub"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
// _ "gocloud.dev/pubsub/azuresb"
|
||||
_ "gocloud.dev/pubsub/gcppubsub"
|
||||
@@ -73,7 +76,8 @@ func QueueDeclareAndBind(conn *amqp.Connection, exchangeUrl string, queueUrl str
|
||||
}
|
||||
|
||||
type GoCDKPubSubInput struct {
|
||||
sub *pubsub.Subscription
|
||||
sub *pubsub.Subscription
|
||||
subURL string
|
||||
}
|
||||
|
||||
func (k *GoCDKPubSubInput) GetName() string {
|
||||
@@ -82,9 +86,9 @@ func (k *GoCDKPubSubInput) GetName() string {
|
||||
|
||||
func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error {
|
||||
topicUrl := configuration.GetString(prefix + "topic_url")
|
||||
subURL := configuration.GetString(prefix + "sub_url")
|
||||
glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
|
||||
sub, err := pubsub.OpenSubscription(context.Background(), subURL)
|
||||
k.subURL = configuration.GetString(prefix + "sub_url")
|
||||
glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", k.subURL)
|
||||
sub, err := pubsub.OpenSubscription(context.Background(), k.subURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -95,10 +99,10 @@ func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix s
|
||||
return err
|
||||
}
|
||||
defer ch.Close()
|
||||
_, err = ch.QueueInspect(getPath(subURL))
|
||||
_, err = ch.QueueInspect(getPath(k.subURL))
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "Exception (404) Reason") {
|
||||
if err := QueueDeclareAndBind(conn, topicUrl, subURL); err != nil {
|
||||
if err := QueueDeclareAndBind(conn, topicUrl, k.subURL); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -111,9 +115,24 @@ func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix s
|
||||
}
|
||||
|
||||
func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
|
||||
msg, err := k.sub.Receive(context.Background())
|
||||
ctx := context.Background()
|
||||
msg, err := k.sub.Receive(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
var conn *amqp.Connection
|
||||
if k.sub.As(&conn) && conn.IsClosed() {
|
||||
conn.Close()
|
||||
k.sub.Shutdown(ctx)
|
||||
conn, err = amqp.Dial(os.Getenv("RABBIT_SERVER_URL"))
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
time.Sleep(time.Second)
|
||||
return
|
||||
}
|
||||
k.sub = rabbitpubsub.OpenSubscription(conn, getPath(k.subURL), nil)
|
||||
return
|
||||
}
|
||||
// This is permanent cached sub err
|
||||
glog.Fatal(err)
|
||||
}
|
||||
onFailureFn = func() {
|
||||
if msg.Nackable() {
|
||||
|
||||
@@ -27,6 +27,7 @@ var (
|
||||
|
||||
func init() {
|
||||
client = &http.Client{Transport: &http.Transport{
|
||||
MaxIdleConns: 1024,
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
}}
|
||||
}
|
||||
@@ -184,7 +185,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
|
||||
for _, object := range deleteObjects.Objects {
|
||||
|
||||
lastSeparator := strings.LastIndex(object.ObjectName, "/")
|
||||
parentDirectoryPath, entryName, isDeleteData, isRecursive := "/", object.ObjectName, true, false
|
||||
parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false
|
||||
if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
|
||||
entryName = object.ObjectName[lastSeparator+1:]
|
||||
parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
|
||||
@@ -207,7 +208,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
|
||||
|
||||
// purge empty folders, only checking folders with deletions
|
||||
for len(directoriesWithDeletion) > 0 {
|
||||
directoriesWithDeletion = doDeleteEmptyDirectories(client, directoriesWithDeletion)
|
||||
directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -223,7 +224,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
|
||||
|
||||
}
|
||||
|
||||
func doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int){
|
||||
func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) {
|
||||
var allDirs []string
|
||||
for dir, _ := range directoriesWithDeletion {
|
||||
allDirs = append(allDirs, dir)
|
||||
@@ -234,6 +235,9 @@ func doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWit
|
||||
newDirectoriesWithDeletion = make(map[string]int)
|
||||
for _, dir := range allDirs {
|
||||
parentDir, dirName := util.FullPath(dir).DirAndName()
|
||||
if parentDir == s3a.option.BucketsPath {
|
||||
continue
|
||||
}
|
||||
if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil {
|
||||
glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err)
|
||||
} else {
|
||||
|
||||
@@ -100,7 +100,7 @@ func debug(params ...interface{}) {
|
||||
glog.V(4).Infoln(params...)
|
||||
}
|
||||
|
||||
func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) {
|
||||
func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption) {
|
||||
m := make(map[string]interface{})
|
||||
if r.Method != "POST" {
|
||||
writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!"))
|
||||
@@ -131,8 +131,9 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
|
||||
Replication: r.FormValue("replication"),
|
||||
Collection: r.FormValue("collection"),
|
||||
Ttl: r.FormValue("ttl"),
|
||||
DiskType: r.FormValue("disk"),
|
||||
}
|
||||
assignResult, ae := operation.Assign(masterUrl, grpcDialOption, ar)
|
||||
assignResult, ae := operation.Assign(masterFn, grpcDialOption, ar)
|
||||
if ae != nil {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, ae)
|
||||
return
|
||||
|
||||
@@ -263,6 +263,7 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry
|
||||
newEntry.Attributes.Collection,
|
||||
newEntry.Attributes.Replication,
|
||||
newEntry.Attributes.TtlSec,
|
||||
newEntry.Attributes.DiskType,
|
||||
"",
|
||||
"",
|
||||
)
|
||||
@@ -306,7 +307,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
|
||||
}
|
||||
|
||||
entry.Chunks = append(entry.Chunks, req.Chunks...)
|
||||
so := fs.detectStorageOption(string(fullpath), entry.Collection, entry.Replication, entry.TtlSec, "", "")
|
||||
so := fs.detectStorageOption(string(fullpath), entry.Collection, entry.Replication, entry.TtlSec, entry.DiskType, "", "")
|
||||
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks)
|
||||
if err != nil {
|
||||
// not good, but should be ok
|
||||
@@ -332,11 +333,11 @@ func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntr
|
||||
|
||||
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
|
||||
|
||||
so := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DataCenter, req.Rack)
|
||||
so := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack)
|
||||
|
||||
assignRequest, altRequest := so.ToAssignRequests(int(req.Count))
|
||||
|
||||
assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest)
|
||||
assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("AssignVolume: %v", err)
|
||||
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil
|
||||
@@ -402,6 +403,7 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR
|
||||
Replication: req.Replication,
|
||||
Collection: req.Collection,
|
||||
Ttl: req.Ttl,
|
||||
DiskType: req.DiskType,
|
||||
})
|
||||
if grpcErr != nil {
|
||||
return grpcErr
|
||||
|
||||
@@ -14,6 +14,7 @@ var (
|
||||
|
||||
func init() {
|
||||
client = &http.Client{Transport: &http.Transport{
|
||||
MaxIdleConns: 1024,
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, u
|
||||
|
||||
ar, altRequest := so.ToAssignRequests(1)
|
||||
|
||||
assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest)
|
||||
assignResult, ae := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, ar, altRequest)
|
||||
if ae != nil {
|
||||
glog.Errorf("failing to assign a file id: %v", ae)
|
||||
err = ae
|
||||
@@ -61,6 +61,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
||||
query.Get("collection"),
|
||||
query.Get("replication"),
|
||||
query.Get("ttl"),
|
||||
query.Get("disk"),
|
||||
query.Get("dataCenter"),
|
||||
query.Get("rack"),
|
||||
)
|
||||
@@ -104,7 +105,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication string, ttlSeconds int32, dataCenter, rack string) *operation.StorageOption {
|
||||
func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication string, ttlSeconds int32, diskType string, dataCenter, rack string) *operation.StorageOption {
|
||||
collection := util.Nvl(qCollection, fs.option.Collection)
|
||||
replication := util.Nvl(qReplication, fs.option.DefaultReplication)
|
||||
|
||||
@@ -134,17 +135,18 @@ func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication
|
||||
DataCenter: util.Nvl(dataCenter, fs.option.DataCenter),
|
||||
Rack: util.Nvl(rack, fs.option.Rack),
|
||||
TtlSeconds: ttlSeconds,
|
||||
DiskType: util.Nvl(diskType, rule.DiskType),
|
||||
Fsync: fsync || rule.Fsync,
|
||||
VolumeGrowthCount: rule.VolumeGrowthCount,
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FilerServer) detectStorageOption0(requestURI, qCollection, qReplication string, qTtl string, dataCenter, rack string) *operation.StorageOption {
|
||||
func (fs *FilerServer) detectStorageOption0(requestURI, qCollection, qReplication string, qTtl string, diskType string, dataCenter, rack string) *operation.StorageOption {
|
||||
|
||||
ttl, err := needle.ReadTTL(qTtl)
|
||||
if err != nil {
|
||||
glog.Errorf("fail to parse ttl %s: %v", qTtl, err)
|
||||
}
|
||||
|
||||
return fs.detectStorageOption(requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, dataCenter, rack)
|
||||
return fs.detectStorageOption(requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, diskType, dataCenter, rack)
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
|
||||
|
||||
func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
|
||||
|
||||
fileName := ""
|
||||
fileName := path.Base(r.URL.Path)
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "application/octet-stream" {
|
||||
contentType = ""
|
||||
@@ -186,6 +186,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||
Replication: so.Replication,
|
||||
Collection: so.Collection,
|
||||
TtlSec: so.TtlSeconds,
|
||||
DiskType: so.DiskType,
|
||||
Mime: contentType,
|
||||
Md5: md5bytes,
|
||||
FileSize: uint64(chunkOffset),
|
||||
|
||||
@@ -68,6 +68,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
Replication: so.Replication,
|
||||
Collection: so.Collection,
|
||||
TtlSec: so.TtlSeconds,
|
||||
DiskType: so.DiskType,
|
||||
Mime: pu.MimeType,
|
||||
Md5: util.Base64Md5ToBytes(pu.ContentMd5),
|
||||
},
|
||||
|
||||
@@ -67,9 +67,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
||||
dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
|
||||
dc := ms.Topo.GetOrCreateDataCenter(dcName)
|
||||
rack := dc.GetOrCreateRack(rackName)
|
||||
dn = rack.GetOrCreateDataNode(heartbeat.Ip,
|
||||
int(heartbeat.Port), heartbeat.PublicUrl,
|
||||
int64(heartbeat.MaxVolumeCount))
|
||||
dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), heartbeat.PublicUrl, heartbeat.MaxVolumeCounts)
|
||||
glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort())
|
||||
if err := stream.Send(&master_pb.HeartbeatResponse{
|
||||
VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,
|
||||
@@ -79,10 +77,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
||||
}
|
||||
}
|
||||
|
||||
if heartbeat.MaxVolumeCount != 0 && dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) {
|
||||
delta := int64(heartbeat.MaxVolumeCount) - dn.GetMaxVolumeCount()
|
||||
dn.UpAdjustMaxVolumeCountDelta(delta)
|
||||
}
|
||||
dn.AdjustMaxVolumeCounts(heartbeat.MaxVolumeCounts)
|
||||
|
||||
glog.V(4).Infof("master received heartbeat %s", heartbeat.String())
|
||||
message := &master_pb.VolumeLocation{
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/raft"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
@@ -60,11 +61,13 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diskType := types.ToDiskType(req.DiskType)
|
||||
|
||||
option := &topology.VolumeGrowOption{
|
||||
Collection: req.Collection,
|
||||
ReplicaPlacement: replicaPlacement,
|
||||
Ttl: ttl,
|
||||
DiskType: diskType,
|
||||
Prealloacte: ms.preallocateSize,
|
||||
DataCenter: req.DataCenter,
|
||||
Rack: req.Rack,
|
||||
@@ -73,7 +76,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
|
||||
}
|
||||
|
||||
if !ms.Topo.HasWritableVolume(option) {
|
||||
if ms.Topo.FreeSpace() <= 0 {
|
||||
if ms.Topo.AvailableSpaceFor(option) <= 0 {
|
||||
return nil, fmt.Errorf("No free volumes left!")
|
||||
}
|
||||
ms.vgLock.Lock()
|
||||
@@ -117,10 +120,10 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl)
|
||||
volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl, types.ToDiskType(req.DiskType))
|
||||
stats := volumeLayout.Stats()
|
||||
|
||||
totalSize := ms.Topo.GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
|
||||
totalSize := ms.Topo.GetDiskUsages().GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
|
||||
|
||||
resp := &master_pb.StatisticsResponse{
|
||||
TotalSize: uint64(totalSize),
|
||||
|
||||
@@ -112,7 +112,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
if !ms.Topo.HasWritableVolume(option) {
|
||||
if ms.Topo.FreeSpace() <= 0 {
|
||||
if ms.Topo.AvailableSpaceFor(option) <= 0 {
|
||||
writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left!"})
|
||||
return
|
||||
}
|
||||
@@ -136,6 +136,9 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
func (ms *MasterServer) maybeAddJwtAuthorization(w http.ResponseWriter, fileId string, isWrite bool) {
|
||||
if fileId == "" {
|
||||
return
|
||||
}
|
||||
var encodedJwt security.EncodedJwt
|
||||
if isWrite {
|
||||
encodedJwt = security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fileId)
|
||||
|
||||
@@ -3,6 +3,7 @@ package weed_server
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@@ -75,8 +76,8 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
if count, err = strconv.Atoi(r.FormValue("count")); err == nil {
|
||||
if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) {
|
||||
err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount())
|
||||
if ms.Topo.AvailableSpaceFor(option) < int64(count*option.ReplicaPlacement.GetCopyCount()) {
|
||||
err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.AvailableSpaceFor(option), count*option.ReplicaPlacement.GetCopyCount())
|
||||
} else {
|
||||
count, err = ms.vg.GrowByCountAndType(ms.grpcDialOption, count, option, ms.Topo)
|
||||
}
|
||||
@@ -124,19 +125,19 @@ func (ms *MasterServer) selfUrl(r *http.Request) string {
|
||||
}
|
||||
func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if ms.Topo.IsLeader() {
|
||||
submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOption)
|
||||
submitForClientHandler(w, r, func() string { return ms.selfUrl(r) }, ms.grpcDialOption)
|
||||
} else {
|
||||
masterUrl, err := ms.Topo.Leader()
|
||||
if err != nil {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, err)
|
||||
} else {
|
||||
submitForClientHandler(w, r, masterUrl, ms.grpcDialOption)
|
||||
submitForClientHandler(w, r, func() string { return masterUrl }, ms.grpcDialOption)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MasterServer) HasWritableVolume(option *topology.VolumeGrowOption) bool {
|
||||
vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl)
|
||||
vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
|
||||
return vl.GetActiveVolumeCount(option) > 0
|
||||
}
|
||||
|
||||
@@ -157,6 +158,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diskType := types.ToDiskType(r.FormValue("disk"))
|
||||
|
||||
preallocate := ms.preallocateSize
|
||||
if r.FormValue("preallocate") != "" {
|
||||
@@ -169,6 +171,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr
|
||||
Collection: r.FormValue("collection"),
|
||||
ReplicaPlacement: replicaPlacement,
|
||||
Ttl: ttl,
|
||||
DiskType: diskType,
|
||||
Prealloacte: preallocate,
|
||||
DataCenter: r.FormValue("dataCenter"),
|
||||
Rack: r.FormValue("rack"),
|
||||
|
||||
@@ -41,6 +41,7 @@ func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_p
|
||||
req.Ttl,
|
||||
req.Preallocate,
|
||||
req.MemoryMapMaxSizeMb,
|
||||
types.ToDiskType(req.DiskType),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -219,15 +219,14 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
|
||||
case <-vs.stopChan:
|
||||
var volumeMessages []*master_pb.VolumeInformationMessage
|
||||
emptyBeat := &master_pb.Heartbeat{
|
||||
Ip: vs.store.Ip,
|
||||
Port: uint32(vs.store.Port),
|
||||
PublicUrl: vs.store.PublicUrl,
|
||||
MaxVolumeCount: uint32(0),
|
||||
MaxFileKey: uint64(0),
|
||||
DataCenter: vs.store.GetDataCenter(),
|
||||
Rack: vs.store.GetRack(),
|
||||
Volumes: volumeMessages,
|
||||
HasNoVolumes: len(volumeMessages) == 0,
|
||||
Ip: vs.store.Ip,
|
||||
Port: uint32(vs.store.Port),
|
||||
PublicUrl: vs.store.PublicUrl,
|
||||
MaxFileKey: uint64(0),
|
||||
DataCenter: vs.store.GetDataCenter(),
|
||||
Rack: vs.store.GetRack(),
|
||||
Volumes: volumeMessages,
|
||||
HasNoVolumes: len(volumeMessages) == 0,
|
||||
}
|
||||
glog.V(1).Infof("volume server %s:%d stops and deletes all volumes", vs.store.Ip, vs.store.Port)
|
||||
if err = stream.Send(emptyBeat); err != nil {
|
||||
|
||||
@@ -3,6 +3,7 @@ package weed_server
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
@@ -36,11 +37,6 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
||||
glog.V(0).Infof("deleted existing volume %d before copying.", req.VolumeId)
|
||||
}
|
||||
|
||||
location := vs.store.FindFreeLocation()
|
||||
if location == nil {
|
||||
return nil, fmt.Errorf("no space left")
|
||||
}
|
||||
|
||||
// the master will not start compaction for read-only volumes, so it is safe to just copy files directly
|
||||
// copy .dat and .idx files
|
||||
// read .idx .dat file size and timestamp
|
||||
@@ -59,6 +55,15 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
||||
return fmt.Errorf("read volume file status failed, %v", err)
|
||||
}
|
||||
|
||||
diskType := volFileInfoResp.DiskType
|
||||
if req.DiskType != "" {
|
||||
diskType = req.DiskType
|
||||
}
|
||||
location := vs.store.FindFreeLocation(types.ToDiskType(diskType))
|
||||
if location == nil {
|
||||
return fmt.Errorf("no space left")
|
||||
}
|
||||
|
||||
dataBaseFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId))
|
||||
indexBaseFileName = storage.VolumeFileName(location.IdxDirectory, volFileInfoResp.Collection, int(req.VolumeId))
|
||||
|
||||
@@ -206,6 +211,7 @@ func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_se
|
||||
resp.FileCount = v.FileCount()
|
||||
resp.CompactionRevision = uint32(v.CompactionRevision)
|
||||
resp.Collection = v.Collection
|
||||
resp.DiskType = string(v.DiskType())
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
|
||||
|
||||
glog.V(0).Infof("VolumeEcShardsCopy: %v", req)
|
||||
|
||||
location := vs.store.FindFreeLocation()
|
||||
location := vs.store.FindFreeLocation(types.HardDriveType)
|
||||
if location == nil {
|
||||
return nil, fmt.Errorf("no space left")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package weed_server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
@@ -37,7 +38,7 @@ type VolumeServer struct {
|
||||
|
||||
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
|
||||
port int, publicUrl string,
|
||||
folders []string, maxCounts []int, minFreeSpacePercents []float32,
|
||||
folders []string, maxCounts []int, minFreeSpacePercents []float32, diskTypes []types.DiskType,
|
||||
idxFolder string,
|
||||
needleMapKind storage.NeedleMapKind,
|
||||
masterNodes []string, pulseSeconds int,
|
||||
@@ -76,7 +77,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
|
||||
|
||||
vs.checkWithMaster()
|
||||
|
||||
vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, idxFolder, vs.needleMapKind)
|
||||
vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, idxFolder, vs.needleMapKind, diskTypes)
|
||||
vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec)
|
||||
|
||||
handleStaticResources(adminMux)
|
||||
|
||||
@@ -16,7 +16,9 @@ func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var ds []*volume_server_pb.DiskStatus
|
||||
for _, loc := range vs.store.Locations {
|
||||
if dir, e := filepath.Abs(loc.Directory); e == nil {
|
||||
ds = append(ds, stats.NewDiskStatus(dir))
|
||||
newDiskStatus := stats.NewDiskStatus(dir)
|
||||
newDiskStatus.DiskType = loc.DiskType.String()
|
||||
ds = append(ds, newDiskStatus)
|
||||
}
|
||||
}
|
||||
m["DiskStatuses"] = ds
|
||||
@@ -31,7 +33,9 @@ func (vs *VolumeServer) statsDiskHandler(w http.ResponseWriter, r *http.Request)
|
||||
var ds []*volume_server_pb.DiskStatus
|
||||
for _, loc := range vs.store.Locations {
|
||||
if dir, e := filepath.Abs(loc.Directory); e == nil {
|
||||
ds = append(ds, stats.NewDiskStatus(dir))
|
||||
newDiskStatus := stats.NewDiskStatus(dir)
|
||||
newDiskStatus.DiskType = loc.DiskType.String()
|
||||
ds = append(ds, newDiskStatus)
|
||||
}
|
||||
}
|
||||
m["DiskStatuses"] = ds
|
||||
|
||||
@@ -63,7 +63,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
lookupResult, err := operation.Lookup(vs.GetMaster(), volumeId.String())
|
||||
lookupResult, err := operation.Lookup(vs.GetMaster, volumeId.String())
|
||||
glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err)
|
||||
if err == nil && len(lookupResult.Locations) > 0 {
|
||||
u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))
|
||||
|
||||
@@ -19,7 +19,9 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
|
||||
var ds []*volume_server_pb.DiskStatus
|
||||
for _, loc := range vs.store.Locations {
|
||||
if dir, e := filepath.Abs(loc.Directory); e == nil {
|
||||
ds = append(ds, stats.NewDiskStatus(dir))
|
||||
newDiskStatus := stats.NewDiskStatus(dir)
|
||||
newDiskStatus.DiskType = loc.DiskType.String()
|
||||
ds = append(ds, newDiskStatus)
|
||||
}
|
||||
}
|
||||
volumeInfos := vs.store.VolumeInfos()
|
||||
|
||||
@@ -50,7 +50,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
ret := operation.UploadResult{}
|
||||
isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, reqNeedle, r)
|
||||
isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster, vs.store, volumeId, reqNeedle, r)
|
||||
|
||||
// http 204 status code does not allow body
|
||||
if writeError == nil && isUnchanged {
|
||||
@@ -128,7 +128,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
// make sure all chunks had deleted before delete manifest
|
||||
if e := chunkManifest.DeleteChunks(vs.GetMaster(), false, vs.grpcDialOption); e != nil {
|
||||
if e := chunkManifest.DeleteChunks(vs.GetMaster, false, vs.grpcDialOption); e != nil {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e))
|
||||
return
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
_, err := topology.ReplicatedDelete(vs.GetMaster(), vs.store, volumeId, n, r)
|
||||
_, err := topology.ReplicatedDelete(vs.GetMaster, vs.store, volumeId, n, r)
|
||||
|
||||
writeDeleteResult(err, count, w, r)
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Path</th>
|
||||
<th>Disk</th>
|
||||
<th>Total</th>
|
||||
<th>Free</th>
|
||||
<th>Usage</th>
|
||||
@@ -78,6 +79,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC
|
||||
{{ range .DiskStatuses }}
|
||||
<tr>
|
||||
<td>{{ .Dir }}</td>
|
||||
<td>{{ .DiskType }}</td>
|
||||
<td>{{ bytesToHumanReadable .All }}</td>
|
||||
<td>{{ bytesToHumanReadable .Free }}</td>
|
||||
<td>{{ percentFrom .All .Used}}%</td>
|
||||
@@ -127,6 +129,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC
|
||||
<tr>
|
||||
<th>Id</th>
|
||||
<th>Collection</th>
|
||||
<th>Disk</th>
|
||||
<th>Data Size</th>
|
||||
<th>Files</th>
|
||||
<th>Trash</th>
|
||||
@@ -139,6 +142,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC
|
||||
<tr>
|
||||
<td><code>{{ .Id }}</code></td>
|
||||
<td>{{ .Collection }}</td>
|
||||
<td>{{ .DiskType }}</td>
|
||||
<td>{{ bytesToHumanReadable .Size }}</td>
|
||||
<td>{{ .FileCount }}</td>
|
||||
<td>{{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}}</td>
|
||||
|
||||
@@ -33,6 +33,7 @@ type WebDavOption struct {
|
||||
BucketsPath string
|
||||
GrpcDialOption grpc.DialOption
|
||||
Collection string
|
||||
DiskType string
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Cipher bool
|
||||
@@ -382,6 +383,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
|
||||
Count: 1,
|
||||
Replication: "",
|
||||
Collection: f.fs.option.Collection,
|
||||
DiskType: f.fs.option.DiskType,
|
||||
Path: name,
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package shell
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
@@ -325,7 +326,9 @@ func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, ra
|
||||
|
||||
var possibleDestinationEcNodes []*EcNode
|
||||
for _, n := range racks[RackId(rackId)].ecNodes {
|
||||
possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
|
||||
if _, found := n.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
|
||||
}
|
||||
}
|
||||
sourceEcNodes := rackEcNodesWithVid[rackId]
|
||||
averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes))
|
||||
@@ -386,11 +389,15 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
||||
rackEcNodes = append(rackEcNodes, node)
|
||||
}
|
||||
|
||||
ecNodeIdToShardCount := groupByCount(rackEcNodes, func(node *EcNode) (id string, count int) {
|
||||
for _, ecShardInfo := range node.info.EcShardInfos {
|
||||
ecNodeIdToShardCount := groupByCount(rackEcNodes, func(ecNode *EcNode) (id string, count int) {
|
||||
diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount()
|
||||
}
|
||||
return node.info.Id, count
|
||||
return ecNode.info.Id, count
|
||||
})
|
||||
|
||||
var totalShardCount int
|
||||
@@ -411,26 +418,30 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
||||
if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount {
|
||||
|
||||
emptyNodeIds := make(map[uint32]bool)
|
||||
for _, shards := range emptyNode.info.EcShardInfos {
|
||||
emptyNodeIds[shards.Id] = true
|
||||
if emptyDiskInfo, found := emptyNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, shards := range emptyDiskInfo.EcShardInfos {
|
||||
emptyNodeIds[shards.Id] = true
|
||||
}
|
||||
}
|
||||
for _, shards := range fullNode.info.EcShardInfos {
|
||||
if _, found := emptyNodeIds[shards.Id]; !found {
|
||||
for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
|
||||
if fullDiskInfo, found := fullNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, shards := range fullDiskInfo.EcShardInfos {
|
||||
if _, found := emptyNodeIds[shards.Id]; !found {
|
||||
for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
|
||||
|
||||
fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id)
|
||||
fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id)
|
||||
|
||||
err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
|
||||
if err != nil {
|
||||
return err
|
||||
err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ecNodeIdToShardCount[emptyNode.info.Id]++
|
||||
ecNodeIdToShardCount[fullNode.info.Id]--
|
||||
hasMove = true
|
||||
break
|
||||
}
|
||||
|
||||
ecNodeIdToShardCount[emptyNode.info.Id]++
|
||||
ecNodeIdToShardCount[fullNode.info.Id]--
|
||||
hasMove = true
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -511,7 +522,11 @@ func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[
|
||||
func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode {
|
||||
vidLocations := make(map[needle.VolumeId][]*EcNode)
|
||||
for _, ecNode := range allEcNodes {
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package shell
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
@@ -159,8 +160,15 @@ func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (cou
|
||||
return
|
||||
}
|
||||
|
||||
func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) {
|
||||
return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos)
|
||||
func countFreeShardSlots(dn *master_pb.DataNodeInfo, diskType types.DiskType) (count int) {
|
||||
if dn.DiskInfos == nil {
|
||||
return 0
|
||||
}
|
||||
diskInfo := dn.DiskInfos[string(diskType)]
|
||||
if diskInfo == nil {
|
||||
return 0
|
||||
}
|
||||
return int(diskInfo.MaxVolumeCount-diskInfo.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(diskInfo.EcShardInfos)
|
||||
}
|
||||
|
||||
type RackId string
|
||||
@@ -174,10 +182,12 @@ type EcNode struct {
|
||||
}
|
||||
|
||||
func (ecNode *EcNode) localShardIdCount(vid uint32) int {
|
||||
for _, ecShardInfo := range ecNode.info.EcShardInfos {
|
||||
if vid == ecShardInfo.Id {
|
||||
shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
return shardBits.ShardIdCount()
|
||||
for _, diskInfo := range ecNode.info.DiskInfos {
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
if vid == ecShardInfo.Id {
|
||||
shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
return shardBits.ShardIdCount()
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
@@ -214,7 +224,7 @@ func collectEcVolumeServersByDc(topo *master_pb.TopologyInfo, selectedDataCenter
|
||||
return
|
||||
}
|
||||
|
||||
freeEcSlots := countFreeShardSlots(dn)
|
||||
freeEcSlots := countFreeShardSlots(dn, types.HardDriveType)
|
||||
ecNodes = append(ecNodes, &EcNode{
|
||||
info: dn,
|
||||
dc: dc,
|
||||
@@ -278,9 +288,11 @@ func ceilDivide(total, n int) int {
|
||||
|
||||
func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {
|
||||
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
return erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
return erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,18 +302,26 @@ func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.Shar
|
||||
func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {
|
||||
|
||||
foundVolume := false
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
newShardBits := oldShardBits
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
|
||||
diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
||||
if found {
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
newShardBits := oldShardBits
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
|
||||
}
|
||||
shardInfo.EcIndexBits = uint32(newShardBits)
|
||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||
foundVolume = true
|
||||
break
|
||||
}
|
||||
shardInfo.EcIndexBits = uint32(newShardBits)
|
||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||
foundVolume = true
|
||||
break
|
||||
}
|
||||
} else {
|
||||
diskInfo = &master_pb.DiskInfo{
|
||||
Type: string(types.HardDriveType),
|
||||
}
|
||||
ecNode.info.DiskInfos[string(types.HardDriveType)] = diskInfo
|
||||
}
|
||||
|
||||
if !foundVolume {
|
||||
@@ -309,10 +329,11 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string,
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
|
||||
}
|
||||
ecNode.info.EcShardInfos = append(ecNode.info.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{
|
||||
diskInfo.EcShardInfos = append(diskInfo.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{
|
||||
Id: uint32(vid),
|
||||
Collection: collection,
|
||||
EcIndexBits: uint32(newShardBits),
|
||||
DiskType: string(types.HardDriveType),
|
||||
})
|
||||
ecNode.freeEcSlot -= len(shardIds)
|
||||
}
|
||||
@@ -322,15 +343,17 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string,
|
||||
|
||||
func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {
|
||||
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
newShardBits := oldShardBits
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))
|
||||
if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
newShardBits := oldShardBits
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))
|
||||
}
|
||||
shardInfo.EcIndexBits = uint32(newShardBits)
|
||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||
}
|
||||
shardInfo.EcIndexBits = uint32(newShardBits)
|
||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
@@ -225,9 +226,11 @@ func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyIn
|
||||
func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) {
|
||||
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.EcShardInfos {
|
||||
if v.Collection == selectedCollection && v.Id == uint32(vid) {
|
||||
ecShardInfos = append(ecShardInfos, v)
|
||||
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, v := range diskInfo.EcShardInfos {
|
||||
if v.Collection == selectedCollection && v.Id == uint32(vid) {
|
||||
ecShardInfos = append(ecShardInfos, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -239,9 +242,11 @@ func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection stri
|
||||
|
||||
vidMap := make(map[uint32]bool)
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.EcShardInfos {
|
||||
if v.Collection == selectedCollection {
|
||||
vidMap[v.Id] = true
|
||||
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, v := range diskInfo.EcShardInfos {
|
||||
if v.Collection == selectedCollection {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -257,9 +262,11 @@ func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeI
|
||||
|
||||
nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits)
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.EcShardInfos {
|
||||
if v.Id == uint32(vid) {
|
||||
nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
|
||||
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, v := range diskInfo.EcShardInfos {
|
||||
if v.Id == uint32(vid) {
|
||||
nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -281,10 +281,12 @@ func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection stri
|
||||
|
||||
vidMap := make(map[uint32]bool)
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
|
||||
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
|
||||
vidMap[v.Id] = true
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
|
||||
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,10 +188,12 @@ func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection
|
||||
|
||||
needEcxFile := true
|
||||
var localShardBits erasure_coding.ShardBits
|
||||
for _, ecShardInfo := range rebuilder.info.EcShardInfos {
|
||||
if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId {
|
||||
needEcxFile = false
|
||||
localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
for _, diskInfo := range rebuilder.info.DiskInfos {
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId {
|
||||
needEcxFile = false
|
||||
localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,15 +249,17 @@ type EcShardMap map[needle.VolumeId]EcShardLocations
|
||||
type EcShardLocations [][]*EcNode
|
||||
|
||||
func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) {
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
if shardInfo.Collection == collection {
|
||||
existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)]
|
||||
if !found {
|
||||
existing = make([][]*EcNode, erasure_coding.TotalShardsCount)
|
||||
ecShardMap[needle.VolumeId(shardInfo.Id)] = existing
|
||||
}
|
||||
for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() {
|
||||
existing[shardId] = append(existing[shardId], ecNode)
|
||||
for _, diskInfo := range ecNode.info.DiskInfos {
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
if shardInfo.Collection == collection {
|
||||
existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)]
|
||||
if !found {
|
||||
existing = make([][]*EcNode, erasure_coding.TotalShardsCount)
|
||||
ecShardMap[needle.VolumeId(shardInfo.Id)] = existing
|
||||
}
|
||||
for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() {
|
||||
existing[shardId] = append(existing[shardId], ecNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,7 +126,8 @@ func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) {
|
||||
func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode {
|
||||
return &EcNode{
|
||||
info: &master_pb.DataNodeInfo{
|
||||
Id: dataNodeId,
|
||||
Id: dataNodeId,
|
||||
DiskInfos: make(map[string]*master_pb.DiskInfo),
|
||||
},
|
||||
dc: dc,
|
||||
rack: RackId(rack),
|
||||
|
||||
@@ -52,6 +52,7 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
|
||||
collection := fsConfigureCommand.String("collection", "", "assign writes to this collection")
|
||||
replication := fsConfigureCommand.String("replication", "", "assign writes with this replication")
|
||||
ttl := fsConfigureCommand.String("ttl", "", "assign writes with this ttl")
|
||||
diskType := fsConfigureCommand.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||
fsync := fsConfigureCommand.Bool("fsync", false, "fsync for the writes")
|
||||
volumeGrowthCount := fsConfigureCommand.Int("volumeGrowthCount", 0, "the number of physical volumes to add if no writable volumes")
|
||||
isDelete := fsConfigureCommand.Bool("delete", false, "delete the configuration by locationPrefix")
|
||||
@@ -81,6 +82,7 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
|
||||
Replication: *replication,
|
||||
Ttl: *ttl,
|
||||
Fsync: *fsync,
|
||||
DiskType: *diskType,
|
||||
VolumeGrowthCount: uint32(*volumeGrowthCount),
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
@@ -85,6 +86,7 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
|
||||
|
||||
volumeServers := collectVolumeServersByDc(resp.TopologyInfo, *dc)
|
||||
volumeReplicas, _ := collectVolumeReplicaLocations(resp)
|
||||
diskTypes := collectVolumeDiskTypes(resp.TopologyInfo)
|
||||
|
||||
if *collection == "EACH_COLLECTION" {
|
||||
collections, err := ListCollectionNames(commandEnv, true, false)
|
||||
@@ -92,16 +94,16 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
|
||||
return err
|
||||
}
|
||||
for _, c := range collections {
|
||||
if err = balanceVolumeServers(commandEnv, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil {
|
||||
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if *collection == "ALL_COLLECTIONS" {
|
||||
if err = balanceVolumeServers(commandEnv, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil {
|
||||
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = balanceVolumeServers(commandEnv, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil {
|
||||
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -109,7 +111,18 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
|
||||
return nil
|
||||
}
|
||||
|
||||
func balanceVolumeServers(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
|
||||
func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
|
||||
|
||||
for _, diskType := range diskTypes {
|
||||
if err := balanceVolumeServersByDiskType(commandEnv, diskType, volumeReplicas, nodes, volumeSizeLimit, collection, applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
|
||||
|
||||
// balance writable volumes
|
||||
for _, n := range nodes {
|
||||
@@ -119,10 +132,10 @@ func balanceVolumeServers(commandEnv *CommandEnv, volumeReplicas map[uint32][]*V
|
||||
return false
|
||||
}
|
||||
}
|
||||
return !v.ReadOnly && v.Size < volumeSizeLimit
|
||||
return v.DiskType == string(diskType) && (!v.ReadOnly && v.Size < volumeSizeLimit)
|
||||
})
|
||||
}
|
||||
if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, sortWritableVolumes, applyBalancing); err != nil {
|
||||
if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount(diskType), sortWritableVolumes, applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -134,10 +147,10 @@ func balanceVolumeServers(commandEnv *CommandEnv, volumeReplicas map[uint32][]*V
|
||||
return false
|
||||
}
|
||||
}
|
||||
return v.ReadOnly || v.Size >= volumeSizeLimit
|
||||
return v.DiskType == string(diskType) && (v.ReadOnly || v.Size >= volumeSizeLimit)
|
||||
})
|
||||
}
|
||||
if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, sortReadOnlyVolumes, applyBalancing); err != nil {
|
||||
if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount(diskType), sortReadOnlyVolumes, applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -162,6 +175,25 @@ func collectVolumeServersByDc(t *master_pb.TopologyInfo, selectedDataCenter stri
|
||||
return
|
||||
}
|
||||
|
||||
func collectVolumeDiskTypes(t *master_pb.TopologyInfo) (diskTypes []types.DiskType) {
|
||||
knownTypes := make(map[string]bool)
|
||||
for _, dc := range t.DataCenterInfos {
|
||||
for _, r := range dc.RackInfos {
|
||||
for _, dn := range r.DataNodeInfos {
|
||||
for diskType, _ := range dn.DiskInfos {
|
||||
if _, found := knownTypes[diskType]; !found {
|
||||
knownTypes[diskType] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for diskType, _ := range knownTypes {
|
||||
diskTypes = append(diskTypes, types.ToDiskType(diskType))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
info *master_pb.DataNodeInfo
|
||||
selectedVolumes map[uint32]*master_pb.VolumeInformationMessage
|
||||
@@ -169,19 +201,43 @@ type Node struct {
|
||||
rack string
|
||||
}
|
||||
|
||||
func (n *Node) localVolumeRatio() float64 {
|
||||
return divide(len(n.selectedVolumes), int(n.info.MaxVolumeCount))
|
||||
type CapacityFunc func(*master_pb.DataNodeInfo) int
|
||||
|
||||
func capacityByMaxVolumeCount(diskType types.DiskType) CapacityFunc {
|
||||
return func(info *master_pb.DataNodeInfo) int {
|
||||
diskInfo, found := info.DiskInfos[string(diskType)]
|
||||
if !found {
|
||||
return 0
|
||||
}
|
||||
return int(diskInfo.MaxVolumeCount)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) localVolumeNextRatio() float64 {
|
||||
return divide(len(n.selectedVolumes)+1, int(n.info.MaxVolumeCount))
|
||||
func capacityByFreeVolumeCount(diskType types.DiskType) CapacityFunc {
|
||||
return func(info *master_pb.DataNodeInfo) int {
|
||||
diskInfo, found := info.DiskInfos[string(diskType)]
|
||||
if !found {
|
||||
return 0
|
||||
}
|
||||
return int(diskInfo.MaxVolumeCount - diskInfo.VolumeCount)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) localVolumeRatio(capacityFunc CapacityFunc) float64 {
|
||||
return divide(len(n.selectedVolumes), capacityFunc(n.info))
|
||||
}
|
||||
|
||||
func (n *Node) localVolumeNextRatio(capacityFunc CapacityFunc) float64 {
|
||||
return divide(len(n.selectedVolumes)+1, capacityFunc(n.info))
|
||||
}
|
||||
|
||||
func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool) {
|
||||
n.selectedVolumes = make(map[uint32]*master_pb.VolumeInformationMessage)
|
||||
for _, v := range n.info.VolumeInfos {
|
||||
if fn(v) {
|
||||
n.selectedVolumes[v.Id] = v
|
||||
for _, diskInfo := range n.info.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if fn(v) {
|
||||
n.selectedVolumes[v.Id] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -198,33 +254,40 @@ func sortReadOnlyVolumes(volumes []*master_pb.VolumeInformationMessage) {
|
||||
})
|
||||
}
|
||||
|
||||
func balanceSelectedVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, sortCandidatesFn func(volumes []*master_pb.VolumeInformationMessage), applyBalancing bool) (err error) {
|
||||
func balanceSelectedVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, capacityFunc CapacityFunc, sortCandidatesFn func(volumes []*master_pb.VolumeInformationMessage), applyBalancing bool) (err error) {
|
||||
selectedVolumeCount, volumeMaxCount := 0, 0
|
||||
var nodesWithCapacity []*Node
|
||||
for _, dn := range nodes {
|
||||
selectedVolumeCount += len(dn.selectedVolumes)
|
||||
volumeMaxCount += int(dn.info.MaxVolumeCount)
|
||||
capacity := capacityFunc(dn.info)
|
||||
if capacity > 0 {
|
||||
nodesWithCapacity = append(nodesWithCapacity, dn)
|
||||
}
|
||||
volumeMaxCount += capacity
|
||||
}
|
||||
|
||||
idealVolumeRatio := divide(selectedVolumeCount, volumeMaxCount)
|
||||
|
||||
hasMoved := true
|
||||
|
||||
// fmt.Fprintf(os.Stdout, " total %d volumes, max %d volumes, idealVolumeRatio %f\n", selectedVolumeCount, volumeMaxCount, idealVolumeRatio)
|
||||
|
||||
for hasMoved {
|
||||
hasMoved = false
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
return nodes[i].localVolumeRatio() < nodes[j].localVolumeRatio()
|
||||
sort.Slice(nodesWithCapacity, func(i, j int) bool {
|
||||
return nodesWithCapacity[i].localVolumeRatio(capacityFunc) < nodesWithCapacity[j].localVolumeRatio(capacityFunc)
|
||||
})
|
||||
|
||||
fullNode := nodes[len(nodes)-1]
|
||||
fullNode := nodesWithCapacity[len(nodesWithCapacity)-1]
|
||||
var candidateVolumes []*master_pb.VolumeInformationMessage
|
||||
for _, v := range fullNode.selectedVolumes {
|
||||
candidateVolumes = append(candidateVolumes, v)
|
||||
}
|
||||
sortCandidatesFn(candidateVolumes)
|
||||
|
||||
for i := 0; i < len(nodes)-1; i++ {
|
||||
emptyNode := nodes[i]
|
||||
if !(fullNode.localVolumeRatio() > idealVolumeRatio && emptyNode.localVolumeNextRatio() <= idealVolumeRatio) {
|
||||
for i := 0; i < len(nodesWithCapacity)-1; i++ {
|
||||
emptyNode := nodesWithCapacity[i]
|
||||
if !(fullNode.localVolumeRatio(capacityFunc) > idealVolumeRatio && emptyNode.localVolumeNextRatio(capacityFunc) <= idealVolumeRatio) {
|
||||
// no more volume servers with empty slots
|
||||
break
|
||||
}
|
||||
@@ -279,9 +342,9 @@ func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, f
|
||||
if v.Collection == "" {
|
||||
collectionPrefix = ""
|
||||
}
|
||||
fmt.Fprintf(os.Stdout, "moving volume %s%d %s => %s\n", collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id)
|
||||
fmt.Fprintf(os.Stdout, " moving %s volume %s%d %s => %s\n", v.DiskType, collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id)
|
||||
if applyChange {
|
||||
return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second)
|
||||
return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second, v.DiskType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -71,10 +71,12 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
|
||||
var allLocations []location
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
loc := newLocation(dc, string(rack), dn)
|
||||
for _, v := range dn.VolumeInfos {
|
||||
if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 {
|
||||
allLocations = append(allLocations, loc)
|
||||
continue
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 {
|
||||
allLocations = append(allLocations, loc)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -52,6 +52,6 @@ func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||
return fmt.Errorf("source and target volume servers are the same!")
|
||||
}
|
||||
|
||||
_, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
|
||||
_, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, "")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -102,8 +103,6 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
|
||||
}
|
||||
|
||||
// find the most under populated data nodes
|
||||
keepDataNodesSorted(allLocations)
|
||||
|
||||
return c.fixUnderReplicatedVolumes(commandEnv, writer, takeAction, underReplicatedVolumeIds, volumeReplicas, allLocations)
|
||||
|
||||
}
|
||||
@@ -113,11 +112,13 @@ func collectVolumeReplicaLocations(resp *master_pb.VolumeListResponse) (map[uint
|
||||
var allLocations []location
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
loc := newLocation(dc, string(rack), dn)
|
||||
for _, v := range dn.VolumeInfos {
|
||||
volumeReplicas[v.Id] = append(volumeReplicas[v.Id], &VolumeReplica{
|
||||
location: &loc,
|
||||
info: v,
|
||||
})
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
volumeReplicas[v.Id] = append(volumeReplicas[v.Id], &VolumeReplica{
|
||||
location: &loc,
|
||||
info: v,
|
||||
})
|
||||
}
|
||||
}
|
||||
allLocations = append(allLocations, loc)
|
||||
})
|
||||
@@ -157,15 +158,18 @@ func (c *commandVolumeFixReplication) fixOverReplicatedVolumes(commandEnv *Comma
|
||||
}
|
||||
|
||||
func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *CommandEnv, writer io.Writer, takeAction bool, underReplicatedVolumeIds []uint32, volumeReplicas map[uint32][]*VolumeReplica, allLocations []location) error {
|
||||
|
||||
for _, vid := range underReplicatedVolumeIds {
|
||||
replicas := volumeReplicas[vid]
|
||||
replica := pickOneReplicaToCopyFrom(replicas)
|
||||
replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replica.info.ReplicaPlacement))
|
||||
foundNewLocation := false
|
||||
hasSkippedCollection := false
|
||||
keepDataNodesSorted(allLocations, replica.info.DiskType)
|
||||
for _, dst := range allLocations {
|
||||
// check whether data nodes satisfy the constraints
|
||||
if dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) {
|
||||
fn := capacityByFreeVolumeCount(types.ToDiskType(replica.info.DiskType))
|
||||
if fn(dst.dataNode) > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) {
|
||||
// check collection name pattern
|
||||
if *c.collectionPattern != "" {
|
||||
matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection)
|
||||
@@ -202,11 +206,11 @@ func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *Comm
|
||||
}
|
||||
|
||||
// adjust free volume count
|
||||
dst.dataNode.FreeVolumeCount--
|
||||
keepDataNodesSorted(allLocations)
|
||||
dst.dataNode.DiskInfos[replica.info.DiskType].FreeVolumeCount--
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundNewLocation && !hasSkippedCollection {
|
||||
fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", replica.info.Id, replicaPlacement, len(replicas))
|
||||
}
|
||||
@@ -215,9 +219,10 @@ func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *Comm
|
||||
return nil
|
||||
}
|
||||
|
||||
func keepDataNodesSorted(dataNodes []location) {
|
||||
func keepDataNodesSorted(dataNodes []location, diskType string) {
|
||||
fn := capacityByFreeVolumeCount(types.ToDiskType(diskType))
|
||||
sort.Slice(dataNodes, func(i, j int) bool {
|
||||
return dataNodes[i].dataNode.FreeVolumeCount > dataNodes[j].dataNode.FreeVolumeCount
|
||||
return fn(dataNodes[i].dataNode) > fn(dataNodes[j].dataNode)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -285,18 +285,20 @@ func (c *commandVolumeFsck) collectVolumeIds(verbose bool, writer io.Writer) (vo
|
||||
}
|
||||
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
|
||||
for _, vi := range t.VolumeInfos {
|
||||
volumeIdToServer[vi.Id] = VInfo{
|
||||
server: t.Id,
|
||||
collection: vi.Collection,
|
||||
isEcVolume: false,
|
||||
for _, diskInfo := range t.DiskInfos {
|
||||
for _, vi := range diskInfo.VolumeInfos {
|
||||
volumeIdToServer[vi.Id] = VInfo{
|
||||
server: t.Id,
|
||||
collection: vi.Collection,
|
||||
isEcVolume: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, ecShardInfo := range t.EcShardInfos {
|
||||
volumeIdToServer[ecShardInfo.Id] = VInfo{
|
||||
server: t.Id,
|
||||
collection: ecShardInfo.Collection,
|
||||
isEcVolume: true,
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
volumeIdToServer[ecShardInfo.Id] = VInfo{
|
||||
server: t.Id,
|
||||
collection: ecShardInfo.Collection,
|
||||
isEcVolume: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
@@ -44,8 +45,25 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||
return nil
|
||||
}
|
||||
|
||||
func diskInfosToString(diskInfos map[string]*master_pb.DiskInfo) string {
|
||||
var buf bytes.Buffer
|
||||
for diskType, diskInfo := range diskInfos {
|
||||
if diskType == "" {
|
||||
diskType = "hdd"
|
||||
}
|
||||
fmt.Fprintf(&buf, " %s(volume:%d/%d active:%d free:%d remote:%d)", diskType, diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "volume:%d/%d active:%d free:%d remote:%d", diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics {
|
||||
fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d remote:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount, volumeSizeLimitMb)
|
||||
fmt.Fprintf(writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
|
||||
sort.Slice(t.DataCenterInfos, func(i, j int) bool {
|
||||
return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id
|
||||
})
|
||||
@@ -57,7 +75,7 @@ func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLi
|
||||
return s
|
||||
}
|
||||
func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics {
|
||||
fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
|
||||
fmt.Fprintf(writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||
var s statistics
|
||||
sort.Slice(t.RackInfos, func(i, j int) bool {
|
||||
return t.RackInfos[i].Id < t.RackInfos[j].Id
|
||||
@@ -69,7 +87,7 @@ func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statisti
|
||||
return s
|
||||
}
|
||||
func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics {
|
||||
fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
|
||||
fmt.Fprintf(writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||
var s statistics
|
||||
sort.Slice(t.DataNodeInfos, func(i, j int) bool {
|
||||
return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id
|
||||
@@ -81,8 +99,22 @@ func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics {
|
||||
return s
|
||||
}
|
||||
func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics {
|
||||
fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
|
||||
fmt.Fprintf(writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||
var s statistics
|
||||
for _, diskInfo := range t.DiskInfos {
|
||||
s = s.plus(writeDiskInfo(writer, diskInfo))
|
||||
}
|
||||
fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo) statistics {
|
||||
var s statistics
|
||||
diskType := t.Type
|
||||
if diskType == "" {
|
||||
diskType = "hdd"
|
||||
}
|
||||
fmt.Fprintf(writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
|
||||
sort.Slice(t.VolumeInfos, func(i, j int) bool {
|
||||
return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id
|
||||
})
|
||||
@@ -90,13 +122,14 @@ func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics {
|
||||
s = s.plus(writeVolumeInformationMessage(writer, vi))
|
||||
}
|
||||
for _, ecShardInfo := range t.EcShardInfos {
|
||||
fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
|
||||
fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
|
||||
}
|
||||
fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s)
|
||||
fmt.Fprintf(writer, " Disk %s %+v \n", diskType, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics {
|
||||
fmt.Fprintf(writer, " volume %+v \n", t)
|
||||
fmt.Fprintf(writer, " volume %+v \n", t)
|
||||
return newStatistics(t)
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ func (c *commandVolumeMove) Help() string {
|
||||
return `move a live volume from one volume server to another volume server
|
||||
|
||||
volume.move -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id>
|
||||
volume.move -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id> -disk [hdd|ssd]
|
||||
|
||||
This command move a live volume from one volume server to another volume server. Here are the steps:
|
||||
|
||||
@@ -40,6 +41,8 @@ func (c *commandVolumeMove) Help() string {
|
||||
Now the master will mark this volume id as writable.
|
||||
5. This command asks the source volume server to delete the source volume
|
||||
|
||||
The option "-disk [hdd|ssd]" can be used to change the volume disk type.
|
||||
|
||||
`
|
||||
}
|
||||
|
||||
@@ -53,6 +56,7 @@ func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||
volumeIdInt := volMoveCommand.Int("volumeId", 0, "the volume id")
|
||||
sourceNodeStr := volMoveCommand.String("source", "", "the source volume server <host>:<port>")
|
||||
targetNodeStr := volMoveCommand.String("target", "", "the target volume server <host>:<port>")
|
||||
diskTypeStr := volMoveCommand.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||
if err = volMoveCommand.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -65,14 +69,14 @@ func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||
return fmt.Errorf("source and target volume servers are the same!")
|
||||
}
|
||||
|
||||
return LiveMoveVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second)
|
||||
return LiveMoveVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second, *diskTypeStr)
|
||||
}
|
||||
|
||||
// LiveMoveVolume moves one volume from one source volume server to one target volume server, with idleTimeout to drain the incoming requests.
|
||||
func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) {
|
||||
func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration, diskType string) (err error) {
|
||||
|
||||
log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
|
||||
lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
|
||||
lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, diskType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
|
||||
}
|
||||
@@ -91,7 +95,7 @@ func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, so
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) {
|
||||
func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, diskType string) (lastAppendAtNs uint64, err error) {
|
||||
|
||||
// check to see if the volume is already read-only and if its not then we need
|
||||
// to mark it as read-only and then before we return we need to undo what we
|
||||
@@ -134,6 +138,7 @@ func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, source
|
||||
resp, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
|
||||
VolumeId: uint32(volumeId),
|
||||
SourceDataNode: sourceVolumeServer,
|
||||
DiskType: diskType,
|
||||
})
|
||||
if replicateErr == nil {
|
||||
lastAppendAtNs = resp.LastAppendAtNs
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
@@ -100,17 +101,19 @@ func evacuateNormalVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListRes
|
||||
|
||||
// move away normal volumes
|
||||
volumeReplicas, _ := collectVolumeReplicaLocations(resp)
|
||||
for _, vol := range thisNode.info.VolumeInfos {
|
||||
hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move away volume %d from %s: %v", vol.Id, volumeServer, err)
|
||||
}
|
||||
if !hasMoved {
|
||||
if skipNonMoveable {
|
||||
replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement))
|
||||
fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String())
|
||||
} else {
|
||||
return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer)
|
||||
for _, diskInfo := range thisNode.info.DiskInfos {
|
||||
for _, vol := range diskInfo.VolumeInfos {
|
||||
hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move away volume %d from %s: %v", vol.Id, volumeServer, err)
|
||||
}
|
||||
if !hasMoved {
|
||||
if skipNonMoveable {
|
||||
replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement))
|
||||
fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String())
|
||||
} else {
|
||||
return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -126,16 +129,18 @@ func evacuateEcVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListRespons
|
||||
}
|
||||
|
||||
// move away ec volumes
|
||||
for _, ecShardInfo := range thisNode.info.EcShardInfos {
|
||||
hasMoved, err := moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err)
|
||||
}
|
||||
if !hasMoved {
|
||||
if skipNonMoveable {
|
||||
fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer)
|
||||
} else {
|
||||
return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer)
|
||||
for _, diskInfo := range thisNode.info.DiskInfos {
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
hasMoved, err := moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err)
|
||||
}
|
||||
if !hasMoved {
|
||||
if skipNonMoveable {
|
||||
fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer)
|
||||
} else {
|
||||
return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -174,8 +179,9 @@ func moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEc
|
||||
}
|
||||
|
||||
func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, vol *master_pb.VolumeInformationMessage, thisNode *Node, otherNodes []*Node, applyChange bool) (hasMoved bool, err error) {
|
||||
fn := capacityByFreeVolumeCount(types.ToDiskType(vol.DiskType))
|
||||
sort.Slice(otherNodes, func(i, j int) bool {
|
||||
return otherNodes[i].localVolumeRatio() < otherNodes[j].localVolumeRatio()
|
||||
return otherNodes[i].localVolumeRatio(fn) > otherNodes[j].localVolumeRatio(fn)
|
||||
})
|
||||
|
||||
for i := 0; i < len(otherNodes); i++ {
|
||||
|
||||
@@ -86,9 +86,11 @@ func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection s
|
||||
|
||||
vidMap := make(map[uint32]bool)
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" {
|
||||
vidMap[v.Id] = true
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
108
weed/shell/command_volume_tier_move.go
Normal file
108
weed/shell/command_volume_tier_move.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package shell
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Commands = append(Commands, &commandVolumeTierMove{})
|
||||
}
|
||||
|
||||
type commandVolumeTierMove struct {
|
||||
}
|
||||
|
||||
func (c *commandVolumeTierMove) Name() string {
|
||||
return "volume.tier.upload"
|
||||
}
|
||||
|
||||
func (c *commandVolumeTierMove) Help() string {
|
||||
return `change a volume from one disk type to another
|
||||
|
||||
volume.tier.move -source=hdd -target=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
|
||||
volume.tier.move -target=hdd [-collection=""] -volumeId=<volume_id>
|
||||
|
||||
`
|
||||
}
|
||||
|
||||
func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
||||
|
||||
if err = commandEnv.confirmIsLocked(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
volumeId := tierCommand.Int("volumeId", 0, "the volume id")
|
||||
collection := tierCommand.String("collection", "", "the collection name")
|
||||
fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
|
||||
quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period")
|
||||
source := tierCommand.String("fromDiskType", "", "the source disk type")
|
||||
target := tierCommand.String("toDiskType", "", "the target disk type")
|
||||
if err = tierCommand.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if *source == *target {
|
||||
return fmt.Errorf("source tier %s is the same as target tier %s", *source, *target)
|
||||
}
|
||||
|
||||
vid := needle.VolumeId(*volumeId)
|
||||
|
||||
// volumeId is provided
|
||||
if vid != 0 {
|
||||
// return doVolumeTierMove(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
|
||||
}
|
||||
|
||||
// apply to all volumes in the collection
|
||||
// reusing collectVolumeIdsForEcEncode for now
|
||||
volumeIds, err := collectVolumeIdsForTierChange(commandEnv, *source, *collection, *fullPercentage, *quietPeriod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("tier move volumes: %v\n", volumeIds)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func collectVolumeIdsForTierChange(commandEnv *CommandEnv, sourceTier string, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
|
||||
|
||||
var resp *master_pb.VolumeListResponse
|
||||
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
|
||||
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
quietSeconds := int64(quietPeriod / time.Second)
|
||||
nowUnixSeconds := time.Now().Unix()
|
||||
|
||||
fmt.Printf("collect %s volumes quiet for: %d seconds\n", sourceTier, quietSeconds)
|
||||
|
||||
vidMap := make(map[uint32]bool)
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == types.ToDiskType(sourceTier) {
|
||||
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for vid := range vidMap {
|
||||
vids = append(vids, needle.VolumeId(vid))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
type DiskLocation struct {
|
||||
Directory string
|
||||
IdxDirectory string
|
||||
DiskType types.DiskType
|
||||
MaxVolumeCount int
|
||||
OriginalMaxVolumeCount int
|
||||
MinFreeSpacePercent float32
|
||||
@@ -32,7 +34,7 @@ type DiskLocation struct {
|
||||
isDiskSpaceLow bool
|
||||
}
|
||||
|
||||
func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32, idxDir string) *DiskLocation {
|
||||
func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32, idxDir string, diskType types.DiskType) *DiskLocation {
|
||||
dir = util.ResolvePath(dir)
|
||||
if idxDir == "" {
|
||||
idxDir = dir
|
||||
@@ -42,6 +44,7 @@ func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32
|
||||
location := &DiskLocation{
|
||||
Directory: dir,
|
||||
IdxDirectory: idxDir,
|
||||
DiskType: diskType,
|
||||
MaxVolumeCount: maxVolumeCount,
|
||||
OriginalMaxVolumeCount: maxVolumeCount,
|
||||
MinFreeSpacePercent: minFreeSpacePercent,
|
||||
|
||||
@@ -57,7 +57,7 @@ func (l *DiskLocation) FindEcShard(vid needle.VolumeId, shardId erasure_coding.S
|
||||
|
||||
func (l *DiskLocation) LoadEcShard(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) (err error) {
|
||||
|
||||
ecVolumeShard, err := erasure_coding.NewEcVolumeShard(l.Directory, collection, vid, shardId)
|
||||
ecVolumeShard, err := erasure_coding.NewEcVolumeShard(l.DiskType, l.Directory, collection, vid, shardId)
|
||||
if err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
return os.ErrNotExist
|
||||
@@ -68,7 +68,7 @@ func (l *DiskLocation) LoadEcShard(collection string, vid needle.VolumeId, shard
|
||||
defer l.ecVolumesLock.Unlock()
|
||||
ecVolume, found := l.ecVolumes[vid]
|
||||
if !found {
|
||||
ecVolume, err = erasure_coding.NewEcVolume(l.Directory, l.IdxDirectory, collection, vid)
|
||||
ecVolume, err = erasure_coding.NewEcVolume(l.DiskType, l.Directory, l.IdxDirectory, collection, vid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create ec volume %d: %v", vid, err)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package erasure_coding
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -20,11 +21,12 @@ type EcVolumeShard struct {
|
||||
dir string
|
||||
ecdFile *os.File
|
||||
ecdFileSize int64
|
||||
DiskType types.DiskType
|
||||
}
|
||||
|
||||
func NewEcVolumeShard(dirname string, collection string, id needle.VolumeId, shardId ShardId) (v *EcVolumeShard, e error) {
|
||||
func NewEcVolumeShard(diskType types.DiskType, dirname string, collection string, id needle.VolumeId, shardId ShardId) (v *EcVolumeShard, e error) {
|
||||
|
||||
v = &EcVolumeShard{dir: dirname, Collection: collection, VolumeId: id, ShardId: shardId}
|
||||
v = &EcVolumeShard{dir: dirname, Collection: collection, VolumeId: id, ShardId: shardId, DiskType: diskType}
|
||||
|
||||
baseFileName := v.FileName()
|
||||
|
||||
|
||||
@@ -36,10 +36,11 @@ type EcVolume struct {
|
||||
Version needle.Version
|
||||
ecjFile *os.File
|
||||
ecjFileAccessLock sync.Mutex
|
||||
diskType types.DiskType
|
||||
}
|
||||
|
||||
func NewEcVolume(dir string, dirIdx string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) {
|
||||
ev = &EcVolume{dir: dir, dirIdx: dirIdx, Collection: collection, VolumeId: vid}
|
||||
func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) {
|
||||
ev = &EcVolume{dir: dir, dirIdx: dirIdx, Collection: collection, VolumeId: vid, diskType: diskType}
|
||||
|
||||
dataBaseFileName := EcShardFileName(collection, dir, int(vid))
|
||||
indexBaseFileName := EcShardFileName(collection, dirIdx, int(vid))
|
||||
@@ -191,6 +192,7 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.V
|
||||
m = &master_pb.VolumeEcShardInformationMessage{
|
||||
Id: uint32(s.VolumeId),
|
||||
Collection: s.Collection,
|
||||
DiskType: string(ev.diskType),
|
||||
}
|
||||
messages = append(messages, m)
|
||||
}
|
||||
|
||||
@@ -10,13 +10,15 @@ type EcVolumeInfo struct {
|
||||
VolumeId needle.VolumeId
|
||||
Collection string
|
||||
ShardBits ShardBits
|
||||
DiskType string
|
||||
}
|
||||
|
||||
func NewEcVolumeInfo(collection string, vid needle.VolumeId, shardBits ShardBits) *EcVolumeInfo {
|
||||
func NewEcVolumeInfo(diskType string, collection string, vid needle.VolumeId, shardBits ShardBits) *EcVolumeInfo {
|
||||
return &EcVolumeInfo{
|
||||
Collection: collection,
|
||||
VolumeId: vid,
|
||||
ShardBits: shardBits,
|
||||
DiskType: diskType,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +47,7 @@ func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo {
|
||||
VolumeId: ecInfo.VolumeId,
|
||||
Collection: ecInfo.Collection,
|
||||
ShardBits: ecInfo.ShardBits.Minus(other.ShardBits),
|
||||
DiskType: ecInfo.DiskType,
|
||||
}
|
||||
|
||||
return ret
|
||||
@@ -55,6 +58,7 @@ func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb.
|
||||
Id: uint32(ecInfo.VolumeId),
|
||||
EcIndexBits: uint32(ecInfo.ShardBits),
|
||||
Collection: ecInfo.Collection,
|
||||
DiskType: ecInfo.DiskType,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -52,11 +52,11 @@ func (s *Store) String() (str string) {
|
||||
return
|
||||
}
|
||||
|
||||
func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, minFreeSpacePercents []float32, idxFolder string, needleMapKind NeedleMapKind) (s *Store) {
|
||||
func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, minFreeSpacePercents []float32, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType) (s *Store) {
|
||||
s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapKind: needleMapKind}
|
||||
s.Locations = make([]*DiskLocation, 0)
|
||||
for i := 0; i < len(dirnames); i++ {
|
||||
location := NewDiskLocation(dirnames[i], maxVolumeCounts[i], minFreeSpacePercents[i], idxFolder)
|
||||
location := NewDiskLocation(dirnames[i], maxVolumeCounts[i], minFreeSpacePercents[i], idxFolder, diskTypes[i])
|
||||
location.loadExistingVolumes(needleMapKind)
|
||||
s.Locations = append(s.Locations, location)
|
||||
stats.VolumeServerMaxVolumeCounter.Add(float64(maxVolumeCounts[i]))
|
||||
@@ -69,7 +69,7 @@ func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, di
|
||||
|
||||
return
|
||||
}
|
||||
func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32) error {
|
||||
func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32, diskType DiskType) error {
|
||||
rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement)
|
||||
if e != nil {
|
||||
return e
|
||||
@@ -78,7 +78,7 @@ func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMap
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, MemoryMapMaxSizeMb)
|
||||
e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, MemoryMapMaxSizeMb, diskType)
|
||||
return e
|
||||
}
|
||||
func (s *Store) DeleteCollection(collection string) (e error) {
|
||||
@@ -100,9 +100,12 @@ func (s *Store) findVolume(vid needle.VolumeId) *Volume {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (s *Store) FindFreeLocation() (ret *DiskLocation) {
|
||||
func (s *Store) FindFreeLocation(diskType DiskType) (ret *DiskLocation) {
|
||||
max := 0
|
||||
for _, location := range s.Locations {
|
||||
if diskType != location.DiskType {
|
||||
continue
|
||||
}
|
||||
currentFreeCount := location.MaxVolumeCount - location.VolumesLen()
|
||||
currentFreeCount *= erasure_coding.DataShardsCount
|
||||
currentFreeCount -= location.EcVolumesLen()
|
||||
@@ -114,11 +117,11 @@ func (s *Store) FindFreeLocation() (ret *DiskLocation) {
|
||||
}
|
||||
return ret
|
||||
}
|
||||
func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) error {
|
||||
func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32, diskType DiskType) error {
|
||||
if s.findVolume(vid) != nil {
|
||||
return fmt.Errorf("Volume Id %d already exists!", vid)
|
||||
}
|
||||
if location := s.FindFreeLocation(); location != nil {
|
||||
if location := s.FindFreeLocation(diskType); location != nil {
|
||||
glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
|
||||
location.Directory, vid, collection, replicaPlacement, ttl)
|
||||
if volume, err := NewVolume(location.Directory, location.IdxDirectory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb); err == nil {
|
||||
@@ -130,6 +133,7 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
|
||||
ReplicaPlacement: uint32(replicaPlacement.Byte()),
|
||||
Version: uint32(volume.Version()),
|
||||
Ttl: ttl.ToUint32(),
|
||||
DiskType: string(diskType),
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
@@ -169,6 +173,7 @@ func collectStatForOneVolume(vid needle.VolumeId, v *Volume) (s *VolumeInfo) {
|
||||
ReadOnly: v.IsReadOnly(),
|
||||
Ttl: v.Ttl,
|
||||
CompactRevision: uint32(v.CompactionRevision),
|
||||
DiskType: v.DiskType().String(),
|
||||
}
|
||||
s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey()
|
||||
|
||||
@@ -202,13 +207,13 @@ func (s *Store) GetRack() string {
|
||||
|
||||
func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||
var volumeMessages []*master_pb.VolumeInformationMessage
|
||||
maxVolumeCount := 0
|
||||
maxVolumeCounts := make(map[string]uint32)
|
||||
var maxFileKey NeedleId
|
||||
collectionVolumeSize := make(map[string]uint64)
|
||||
collectionVolumeReadOnlyCount := make(map[string]map[string]uint8)
|
||||
for _, location := range s.Locations {
|
||||
var deleteVids []needle.VolumeId
|
||||
maxVolumeCount = maxVolumeCount + location.MaxVolumeCount
|
||||
maxVolumeCounts[string(location.DiskType)] += uint32(location.MaxVolumeCount)
|
||||
location.volumesLock.RLock()
|
||||
for _, v := range location.volumes {
|
||||
curMaxFileKey, volumeMessage := v.ToVolumeInformationMessage()
|
||||
@@ -280,15 +285,15 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||
}
|
||||
|
||||
return &master_pb.Heartbeat{
|
||||
Ip: s.Ip,
|
||||
Port: uint32(s.Port),
|
||||
PublicUrl: s.PublicUrl,
|
||||
MaxVolumeCount: uint32(maxVolumeCount),
|
||||
MaxFileKey: NeedleIdToUint64(maxFileKey),
|
||||
DataCenter: s.dataCenter,
|
||||
Rack: s.rack,
|
||||
Volumes: volumeMessages,
|
||||
HasNoVolumes: len(volumeMessages) == 0,
|
||||
Ip: s.Ip,
|
||||
Port: uint32(s.Port),
|
||||
PublicUrl: s.PublicUrl,
|
||||
MaxVolumeCounts: maxVolumeCounts,
|
||||
MaxFileKey: NeedleIdToUint64(maxFileKey),
|
||||
DataCenter: s.dataCenter,
|
||||
Rack: s.rack,
|
||||
Volumes: volumeMessages,
|
||||
HasNoVolumes: len(volumeMessages) == 0,
|
||||
}
|
||||
|
||||
}
|
||||
@@ -371,6 +376,7 @@ func (s *Store) MountVolume(i needle.VolumeId) error {
|
||||
ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
|
||||
Version: uint32(v.Version()),
|
||||
Ttl: v.Ttl.ToUint32(),
|
||||
DiskType: string(v.location.DiskType),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -390,6 +396,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error {
|
||||
ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
|
||||
Version: uint32(v.Version()),
|
||||
Ttl: v.Ttl.ToUint32(),
|
||||
DiskType: string(v.location.DiskType),
|
||||
}
|
||||
|
||||
for _, location := range s.Locations {
|
||||
@@ -414,6 +421,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
|
||||
ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
|
||||
Version: uint32(v.Version()),
|
||||
Ttl: v.Ttl.ToUint32(),
|
||||
DiskType: string(v.location.DiskType),
|
||||
}
|
||||
for _, location := range s.Locations {
|
||||
if err := location.DeleteVolume(i); err == nil {
|
||||
@@ -463,6 +471,9 @@ func (s *Store) GetVolumeSizeLimit() uint64 {
|
||||
|
||||
func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
|
||||
volumeSizeLimit := s.GetVolumeSizeLimit()
|
||||
if volumeSizeLimit == 0 {
|
||||
return
|
||||
}
|
||||
for _, diskLocation := range s.Locations {
|
||||
if diskLocation.OriginalMaxVolumeCount == 0 {
|
||||
currentMaxVolumeCount := diskLocation.MaxVolumeCount
|
||||
|
||||
@@ -58,6 +58,7 @@ func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId er
|
||||
Id: uint32(vid),
|
||||
Collection: collection,
|
||||
EcIndexBits: uint32(shardBits.AddShardId(shardId)),
|
||||
DiskType: string(location.DiskType),
|
||||
}
|
||||
return nil
|
||||
} else if err == os.ErrNotExist {
|
||||
@@ -82,6 +83,7 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar
|
||||
Id: uint32(vid),
|
||||
Collection: ecShard.Collection,
|
||||
EcIndexBits: uint32(shardBits.AddShardId(shardId)),
|
||||
DiskType: string(ecShard.DiskType),
|
||||
}
|
||||
|
||||
for _, location := range s.Locations {
|
||||
|
||||
33
weed/storage/types/volume_disk_type.go
Normal file
33
weed/storage/types/volume_disk_type.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DiskType string
|
||||
|
||||
const (
|
||||
HardDriveType DiskType = ""
|
||||
SsdType = "ssd"
|
||||
)
|
||||
|
||||
func ToDiskType(vt string) (diskType DiskType) {
|
||||
vt = strings.ToLower(vt)
|
||||
diskType = HardDriveType
|
||||
switch vt {
|
||||
case "", "hdd":
|
||||
diskType = HardDriveType
|
||||
case "ssd":
|
||||
diskType = SsdType
|
||||
default:
|
||||
diskType = DiskType(vt)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (diskType DiskType) String() string {
|
||||
if diskType == "" {
|
||||
return ""
|
||||
}
|
||||
return string(diskType)
|
||||
}
|
||||
@@ -171,6 +171,10 @@ func (v *Volume) IndexFileSize() uint64 {
|
||||
return v.nm.IndexFileSize()
|
||||
}
|
||||
|
||||
func (v *Volume) DiskType() types.DiskType {
|
||||
return v.location.DiskType
|
||||
}
|
||||
|
||||
// Close cleanly shuts down this volume
|
||||
func (v *Volume) Close() {
|
||||
v.dataFileAccessLock.Lock()
|
||||
@@ -262,6 +266,7 @@ func (v *Volume) ToVolumeInformationMessage() (types.NeedleId, *master_pb.Volume
|
||||
Ttl: v.Ttl.ToUint32(),
|
||||
CompactRevision: uint32(v.SuperBlock.CompactionRevision),
|
||||
ModifiedAtSecond: modTime.Unix(),
|
||||
DiskType: string(v.location.DiskType),
|
||||
}
|
||||
|
||||
volumeInfo.RemoteStorageName, volumeInfo.RemoteStorageKey = v.RemoteStorageNameKey()
|
||||
|
||||
@@ -2,6 +2,7 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
@@ -148,3 +149,18 @@ func verifyDeletedNeedleIntegrity(datFile backend.BackendStorageFile, v needle.V
|
||||
}
|
||||
return n.AppendAtNs, err
|
||||
}
|
||||
|
||||
func (v *Volume) checkIdxFile() error {
|
||||
datFileSize, _, err := v.DataBackend.GetStat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get stat %s: %v", v.FileName(".dat"), err)
|
||||
}
|
||||
if datFileSize <= super_block.SuperBlockSize {
|
||||
return nil
|
||||
}
|
||||
indexFileName := v.FileName(".idx")
|
||||
if util.FileExists(indexFileName) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("idx file %s does not exists", indexFileName)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ type VolumeInfo struct {
|
||||
Size uint64
|
||||
ReplicaPlacement *super_block.ReplicaPlacement
|
||||
Ttl *needle.TTL
|
||||
DiskType string
|
||||
Collection string
|
||||
Version needle.Version
|
||||
FileCount int
|
||||
@@ -40,6 +41,7 @@ func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err er
|
||||
ModifiedAtSecond: m.ModifiedAtSecond,
|
||||
RemoteStorageName: m.RemoteStorageName,
|
||||
RemoteStorageKey: m.RemoteStorageKey,
|
||||
DiskType: m.DiskType,
|
||||
}
|
||||
rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement))
|
||||
if e != nil {
|
||||
@@ -62,6 +64,7 @@ func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi Volu
|
||||
}
|
||||
vi.ReplicaPlacement = rp
|
||||
vi.Ttl = needle.LoadTTLFromUint32(m.Ttl)
|
||||
vi.DiskType = m.DiskType
|
||||
return vi, nil
|
||||
}
|
||||
|
||||
@@ -90,6 +93,7 @@ func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMe
|
||||
ModifiedAtSecond: vi.ModifiedAtSecond,
|
||||
RemoteStorageName: vi.RemoteStorageName,
|
||||
RemoteStorageKey: vi.RemoteStorageKey,
|
||||
DiskType: vi.DiskType,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -96,6 +96,10 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
|
||||
v.dirIdx = v.dir
|
||||
}
|
||||
}
|
||||
// check volume idx files
|
||||
if err := v.checkIdxFile(); err != nil {
|
||||
glog.Fatalf("check volume idx file %s: %v", v.FileName(".idx"), err)
|
||||
}
|
||||
var indexFile *os.File
|
||||
if v.noWriteOrDelete {
|
||||
glog.V(0).Infoln("open to read file", v.FileName(".idx"))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user