Merge branch 'master' into handle_implicit_username

This commit is contained in:
guosj
2022-04-23 09:48:25 +08:00
35 changed files with 1065 additions and 204 deletions

View File

@@ -71,7 +71,7 @@ var cmdFilerCopy = &Command{
It can copy one or a list of files or folders.
If copying a whole folder recursively:
All files under the folder and subfolders will be copyed.
All files under the folder and sub folders will be copied.
Optional parameter "-include" allows you to specify the file name patterns.
If "maxMB" is set to a positive number, files larger than it would be split into chunks.

View File

@@ -1,12 +1,9 @@
package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/golang/protobuf/jsonpb"
jsoniter "github.com/json-iterator/go"
elastic "github.com/olivere/elastic/v7"
"os"
"path/filepath"
"strings"
@@ -124,72 +121,3 @@ func runFilerMetaTail(cmd *Command, args []string) bool {
return true
}
type EsDocument struct {
Dir string `json:"dir,omitempty"`
Name string `json:"name,omitempty"`
IsDirectory bool `json:"isDir,omitempty"`
Size uint64 `json:"size,omitempty"`
Uid uint32 `json:"uid,omitempty"`
Gid uint32 `json:"gid,omitempty"`
UserName string `json:"userName,omitempty"`
Collection string `json:"collection,omitempty"`
Crtime int64 `json:"crtime,omitempty"`
Mtime int64 `json:"mtime,omitempty"`
Mime string `json:"mime,omitempty"`
}
func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
entry := event.NewEntry
dir, name := event.NewParentPath, entry.Name
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
esEntry := &EsDocument{
Dir: dir,
Name: name,
IsDirectory: entry.IsDirectory,
Size: entry.Attributes.FileSize,
Uid: entry.Attributes.Uid,
Gid: entry.Attributes.Gid,
UserName: entry.Attributes.UserName,
Collection: entry.Attributes.Collection,
Crtime: entry.Attributes.Crtime,
Mtime: entry.Attributes.Mtime,
Mime: entry.Attributes.Mime,
}
return esEntry, id
}
func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
options := []elastic.ClientOptionFunc{}
options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
options = append(options, elastic.SetSniff(false))
options = append(options, elastic.SetHealthcheck(false))
client, err := elastic.NewClient(options...)
if err != nil {
return nil, err
}
return func(resp *filer_pb.SubscribeMetadataResponse) error {
event := resp.EventNotification
if event.OldEntry != nil &&
(event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
// delete or not update the same file
dir, name := resp.Directory, event.OldEntry.Name
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
println("delete", id)
_, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
return err
}
if event.NewEntry != nil {
// add a new file or update the same file
esEntry, id := toEsEntry(event)
value, err := jsoniter.Marshal(esEntry)
if err != nil {
return err
}
println(string(value))
_, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
return err
}
return nil
}, nil
}

View File

@@ -0,0 +1,82 @@
//go:build elastic
// +build elastic
package command
import (
"context"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
jsoniter "github.com/json-iterator/go"
elastic "github.com/olivere/elastic/v7"
"strings"
)
type EsDocument struct {
Dir string `json:"dir,omitempty"`
Name string `json:"name,omitempty"`
IsDirectory bool `json:"isDir,omitempty"`
Size uint64 `json:"size,omitempty"`
Uid uint32 `json:"uid,omitempty"`
Gid uint32 `json:"gid,omitempty"`
UserName string `json:"userName,omitempty"`
Collection string `json:"collection,omitempty"`
Crtime int64 `json:"crtime,omitempty"`
Mtime int64 `json:"mtime,omitempty"`
Mime string `json:"mime,omitempty"`
}
func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
entry := event.NewEntry
dir, name := event.NewParentPath, entry.Name
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
esEntry := &EsDocument{
Dir: dir,
Name: name,
IsDirectory: entry.IsDirectory,
Size: entry.Attributes.FileSize,
Uid: entry.Attributes.Uid,
Gid: entry.Attributes.Gid,
UserName: entry.Attributes.UserName,
Collection: entry.Attributes.Collection,
Crtime: entry.Attributes.Crtime,
Mtime: entry.Attributes.Mtime,
Mime: entry.Attributes.Mime,
}
return esEntry, id
}
func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
options := []elastic.ClientOptionFunc{}
options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
options = append(options, elastic.SetSniff(false))
options = append(options, elastic.SetHealthcheck(false))
client, err := elastic.NewClient(options...)
if err != nil {
return nil, err
}
return func(resp *filer_pb.SubscribeMetadataResponse) error {
event := resp.EventNotification
if event.OldEntry != nil &&
(event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
// delete or not update the same file
dir, name := resp.Directory, event.OldEntry.Name
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
println("delete", id)
_, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
return err
}
if event.NewEntry != nil {
// add a new file or update the same file
esEntry, id := toEsEntry(event)
value, err := jsoniter.Marshal(esEntry)
if err != nil {
return err
}
println(string(value))
_, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
return err
}
return nil
}, nil
}

View File

@@ -0,0 +1,14 @@
//go:build !elastic
// +build !elastic
package command
import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
return func(resp *filer_pb.SubscribeMetadataResponse) error {
return nil
}, nil
}

View File

@@ -15,6 +15,7 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
_ "github.com/chrislusf/seaweedfs/weed/filer/arangodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"

View File

@@ -285,6 +285,16 @@ healthcheck_enabled = false
index.max_result_window = 10000
[arangodb] # in development dont use it
enabled = false
db_name = "seaweedfs"
servers=["http://localhost:8529"] # list of servers to connect to
# only basic auth supported for now
username=""
password=""
# skip tls cert validation
insecure_skip_verify = true
##########################
##########################

View File

@@ -0,0 +1,347 @@
package arangodb
import (
"context"
"crypto/tls"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/arangodb/go-driver"
"github.com/arangodb/go-driver/http"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
filer.Stores = append(filer.Stores, &ArangodbStore{})
}
var (
BUCKET_PREFIX = "/buckets"
DEFAULT_COLLECTION = "seaweed_no_bucket"
KVMETA_COLLECTION = "seaweed_kvmeta"
)
type ArangodbStore struct {
connect driver.Connection
client driver.Client
database driver.Database
kvCollection driver.Collection
buckets map[string]driver.Collection
mu sync.RWMutex
databaseName string
}
type Model struct {
Key string `json:"_key"`
Directory string `json:"directory,omitempty"`
Name string `json:"name,omitempty"`
Ttl string `json:"ttl,omitempty"`
//arangodb does not support binary blobs
//we encode byte slice into uint64 slice
//see helpers.go
Meta []uint64 `json:"meta"`
}
func (store *ArangodbStore) GetName() string {
return "arangodb"
}
func (store *ArangodbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
store.buckets = make(map[string]driver.Collection, 3)
store.databaseName = configuration.GetString(prefix + "db_name")
return store.connection(configuration.GetStringSlice(prefix+"servers"),
configuration.GetString(prefix+"username"),
configuration.GetString(prefix+"password"),
configuration.GetBool(prefix+"insecure_skip_verify"),
)
}
func (store *ArangodbStore) connection(uris []string, user string, pass string, insecure bool) (err error) {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
store.connect, err = http.NewConnection(http.ConnectionConfig{
Endpoints: uris,
TLSConfig: &tls.Config{
InsecureSkipVerify: insecure,
},
})
if err != nil {
return err
}
store.client, err = driver.NewClient(driver.ClientConfig{
Connection: store.connect,
Authentication: driver.BasicAuthentication(user, pass),
})
if err != nil {
return err
}
ok, err := store.client.DatabaseExists(ctx, store.databaseName)
if err != nil {
return err
}
if ok {
store.database, err = store.client.Database(ctx, store.databaseName)
} else {
store.database, err = store.client.CreateDatabase(ctx, store.databaseName, &driver.CreateDatabaseOptions{})
}
if err != nil {
return err
}
if store.kvCollection, err = store.ensureCollection(ctx, KVMETA_COLLECTION); err != nil {
return err
}
return err
}
type key int
const (
transactionKey key = 0
)
func (store *ArangodbStore) BeginTransaction(ctx context.Context) (context.Context, error) {
keys := make([]string, 0, len(store.buckets)+1)
for k := range store.buckets {
keys = append(keys, k)
}
keys = append(keys, store.kvCollection.Name())
txn, err := store.database.BeginTransaction(ctx, driver.TransactionCollections{
Exclusive: keys,
}, &driver.BeginTransactionOptions{})
if err != nil {
return nil, err
}
return context.WithValue(ctx, transactionKey, txn), nil
}
func (store *ArangodbStore) CommitTransaction(ctx context.Context) error {
val := ctx.Value(transactionKey)
cast, ok := val.(driver.TransactionID)
if !ok {
return fmt.Errorf("txn cast fail %s:", val)
}
err := store.database.CommitTransaction(ctx, cast, &driver.CommitTransactionOptions{})
if err != nil {
return err
}
return nil
}
func (store *ArangodbStore) RollbackTransaction(ctx context.Context) error {
val := ctx.Value(transactionKey)
cast, ok := val.(driver.TransactionID)
if !ok {
return fmt.Errorf("txn cast fail %s:", val)
}
err := store.database.AbortTransaction(ctx, cast, &driver.AbortTransactionOptions{})
if err != nil {
return err
}
return nil
}
func (store *ArangodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > 50 {
meta = util.MaybeGzipData(meta)
}
model := &Model{
Key: hashString(string(entry.FullPath)),
Directory: dir,
Name: name,
Meta: bytesToArray(meta),
}
if entry.TtlSec > 0 {
model.Ttl = time.Now().Add(time.Second * time.Duration(entry.TtlSec)).Format(time.RFC3339)
} else {
model.Ttl = ""
}
targetCollection, err := store.extractBucketCollection(ctx, entry.FullPath)
if err != nil {
return err
}
_, err = targetCollection.CreateDocument(ctx, model)
if driver.IsConflict(err) {
return store.UpdateEntry(ctx, entry)
}
if err != nil {
return fmt.Errorf("InsertEntry %s: %v", entry.FullPath, err)
}
return nil
}
func (store *ArangodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > 50 {
meta = util.MaybeGzipData(meta)
}
model := &Model{
Key: hashString(string(entry.FullPath)),
Directory: dir,
Name: name,
Meta: bytesToArray(meta),
}
if entry.TtlSec > 0 {
model.Ttl = time.Now().Add(time.Duration(entry.TtlSec) * time.Second).Format(time.RFC3339)
} else {
model.Ttl = "none"
}
targetCollection, err := store.extractBucketCollection(ctx, entry.FullPath)
if err != nil {
return err
}
_, err = targetCollection.UpdateDocument(ctx, model.Key, model)
if err != nil {
return fmt.Errorf("UpdateEntry %s: %v", entry.FullPath, err)
}
return nil
}
func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
var data Model
targetCollection, err := store.extractBucketCollection(ctx, fullpath)
if err != nil {
return nil, err
}
_, err = targetCollection.ReadDocument(ctx, hashString(string(fullpath)), &data)
if err != nil {
if driver.IsNotFound(err) {
return nil, filer_pb.ErrNotFound
}
glog.Errorf("find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound
}
if len(data.Meta) == 0 {
return nil, filer_pb.ErrNotFound
}
entry = &filer.Entry{
FullPath: fullpath,
}
err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(arrayToBytes(data.Meta)))
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
return entry, nil
}
func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
targetCollection, err := store.extractBucketCollection(ctx, fullpath)
if err != nil {
return err
}
_, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath)))
if err != nil && !driver.IsNotFound(err) {
glog.Errorf("find %s: %v", fullpath, err)
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
}
// this runs in log time
func (store *ArangodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
var query string
targetCollection, err := store.extractBucketCollection(ctx, fullpath)
if err != nil {
return err
}
query = query + fmt.Sprintf(`
for d in %s
filter starts_with(d.directory, "%s/") || d.directory == "%s"
remove d._key in %s`,
targetCollection.Name(),
strings.Join(strings.Split(string(fullpath), "/"), ","),
string(fullpath),
targetCollection.Name(),
)
cur, err := store.database.Query(ctx, query, nil)
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
defer cur.Close()
return nil
}
func (store *ArangodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
}
func (store *ArangodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
targetCollection, err := store.extractBucketCollection(ctx, dirPath+"/")
if err != nil {
return lastFileName, err
}
query := "for d in " + targetCollection.Name()
if includeStartFile {
query = query + " filter d.name >= \"" + startFileName + "\" "
} else {
query = query + " filter d.name > \"" + startFileName + "\" "
}
if prefix != "" {
query = query + fmt.Sprintf(`&& starts_with(d.name, "%s")`, prefix)
}
query = query + `
filter d.directory == @dir
sort d.name asc
`
if limit > 0 {
query = query + "limit " + strconv.Itoa(int(limit))
}
query = query + "\n return d"
cur, err := store.database.Query(ctx, query, map[string]interface{}{"dir": dirPath})
if err != nil {
return lastFileName, fmt.Errorf("failed to list directory entries: find error: %w", err)
}
defer cur.Close()
for cur.HasMore() {
var data Model
_, err = cur.ReadDocument(ctx, &data)
if err != nil {
break
}
entry := &filer.Entry{
FullPath: util.NewFullPath(data.Directory, data.Name),
}
lastFileName = data.Name
converted := arrayToBytes(data.Meta)
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {
break
}
}
return lastFileName, err
}
func (store *ArangodbStore) Shutdown() {
}

View File

@@ -0,0 +1,40 @@
package arangodb
import (
"context"
"github.com/arangodb/go-driver"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
)
var _ filer.BucketAware = (*ArangodbStore)(nil)
func (store *ArangodbStore) OnBucketCreation(bucket string) {
timeout, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// create the collection && add to cache
_, err := store.ensureBucket(timeout, bucket)
if err != nil {
glog.Errorf("bucket create %s: %v", bucket, err)
}
}
func (store *ArangodbStore) OnBucketDeletion(bucket string) {
timeout, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
collection, err := store.ensureBucket(timeout, bucket)
if err != nil {
glog.Errorf("bucket delete %s: %v", bucket, err)
return
}
err = collection.Remove(timeout)
if err != nil && !driver.IsNotFound(err) {
glog.Errorf("bucket delete %s: %v", bucket, err)
return
}
}
func (store *ArangodbStore) CanDropWholeBucket() bool {
return true
}

View File

@@ -0,0 +1,54 @@
package arangodb
import (
"context"
"fmt"
"github.com/arangodb/go-driver"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
)
func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
model := &Model{
Key: hashString(".kvstore." + string(key)),
Directory: ".kvstore." + string(key),
Meta: bytesToArray(value),
}
exists, err := store.kvCollection.DocumentExists(ctx, model.Key)
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
if exists {
_, err = store.kvCollection.UpdateDocument(ctx, model.Key, model)
} else {
_, err = store.kvCollection.CreateDocument(ctx, model)
}
if err != nil {
return fmt.Errorf("kv put: %v", err)
}
return nil
}
func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
var model Model
_, err = store.kvCollection.ReadDocument(ctx, hashString(".kvstore."+string(key)), &model)
if driver.IsNotFound(err) {
return nil, filer.ErrKvNotFound
}
if err != nil {
glog.Errorf("kv get: %s %v", string(key), err)
return nil, filer.ErrKvNotFound
}
return arrayToBytes(model.Meta), nil
}
func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key)))
if err != nil {
glog.Errorf("kv del: %v", err)
return filer.ErrKvNotFound
}
return nil
}

View File

@@ -0,0 +1,136 @@
package arangodb
import (
"context"
"crypto/md5"
"encoding/binary"
"encoding/hex"
"io"
"strings"
"github.com/arangodb/go-driver"
"github.com/chrislusf/seaweedfs/weed/util"
)
//convert a string into arango-key safe hex bytes hash
func hashString(dir string) string {
h := md5.New()
io.WriteString(h, dir)
b := h.Sum(nil)
return hex.EncodeToString(b)
}
// convert slice of bytes into slice of uint64
// the first uint64 indicates the length in bytes
func bytesToArray(bs []byte) []uint64 {
out := make([]uint64, 0, 2+len(bs)/8)
out = append(out, uint64(len(bs)))
for len(bs)%8 != 0 {
bs = append(bs, 0)
}
for i := 0; i < len(bs); i = i + 8 {
out = append(out, binary.BigEndian.Uint64(bs[i:]))
}
return out
}
// convert from slice of uint64 back to bytes
// if input length is 0 or 1, will return nil
func arrayToBytes(xs []uint64) []byte {
if len(xs) < 2 {
return nil
}
first := xs[0]
out := make([]byte, len(xs)*8) // i think this can actually be len(xs)*8-8, but i dont think an extra 8 bytes hurts...
for i := 1; i < len(xs); i = i + 1 {
binary.BigEndian.PutUint64(out[((i-1)*8):], xs[i])
}
return out[:first]
}
// gets the collection the bucket points to from filepath
func (store *ArangodbStore) extractBucketCollection(ctx context.Context, fullpath util.FullPath) (c driver.Collection, err error) {
bucket, _ := extractBucket(fullpath)
if bucket == "" {
bucket = DEFAULT_COLLECTION
}
c, err = store.ensureBucket(ctx, bucket)
if err != nil {
return nil, err
}
return c, err
}
// called by extractBucketCollection
func extractBucket(fullpath util.FullPath) (string, string) {
if !strings.HasPrefix(string(fullpath), BUCKET_PREFIX+"/") {
return "", string(fullpath)
}
if strings.Count(string(fullpath), "/") < 3 {
return "", string(fullpath)
}
bucketAndObjectKey := string(fullpath)[len(BUCKET_PREFIX+"/"):]
t := strings.Index(bucketAndObjectKey, "/")
bucket := bucketAndObjectKey
shortPath := "/"
if t > 0 {
bucket = bucketAndObjectKey[:t]
shortPath = string(util.FullPath(bucketAndObjectKey[t:]))
}
return bucket, shortPath
}
// get bucket collection from cache. if not exist, creates the buckets collection and grab it
func (store *ArangodbStore) ensureBucket(ctx context.Context, bucket string) (bc driver.Collection, err error) {
var ok bool
store.mu.RLock()
bc, ok = store.buckets[bucket]
store.mu.RUnlock()
if ok {
return bc, nil
}
store.mu.Lock()
defer store.mu.Unlock()
store.buckets[bucket], err = store.ensureCollection(ctx, bucket)
if err != nil {
return nil, err
}
return store.buckets[bucket], nil
}
// creates collection if not exist, ensures indices if not exist
func (store *ArangodbStore) ensureCollection(ctx context.Context, name string) (c driver.Collection, err error) {
ok, err := store.database.CollectionExists(ctx, name)
if err != nil {
return
}
if ok {
c, err = store.database.Collection(ctx, name)
} else {
c, err = store.database.CreateCollection(ctx, name, &driver.CreateCollectionOptions{})
}
if err != nil {
return
}
// ensure indices
if _, _, err = c.EnsurePersistentIndex(ctx, []string{"directory", "name"},
&driver.EnsurePersistentIndexOptions{
Name: "directory_name_multi", Unique: true,
}); err != nil {
return
}
if _, _, err = c.EnsurePersistentIndex(ctx, []string{"directory"},
&driver.EnsurePersistentIndexOptions{Name: "IDX_directory"}); err != nil {
return
}
if _, _, err = c.EnsureTTLIndex(ctx, "ttl", 1,
&driver.EnsureTTLIndexOptions{Name: "IDX_TTL"}); err != nil {
return
}
if _, _, err = c.EnsurePersistentIndex(ctx, []string{"name"}, &driver.EnsurePersistentIndexOptions{
Name: "IDX_name",
}); err != nil {
return
}
return c, nil
}

View File

@@ -0,0 +1,52 @@
##arangodb
database: https://github.com/arangodb/arangodb
go driver: https://github.com/arangodb/go-driver
options:
```
[arangodb]
enabled=true
db_name="seaweedfs"
servers=["http://localhost:8529"]
#basic auth
user="root"
pass="test"
# tls settings
insecure_skip_verify=true
```
i test using this dev database:
`docker run -p 8529:8529 -e ARANGO_ROOT_PASSWORD=test arangodb/arangodb:3.9.0`
## features i don't personally need but are missing
[ ] provide tls cert to arango
[ ] authentication that is not basic auth
[ ] synchronise endpoint interval config
[ ] automatic creation of custom index
[ ] configure default arangodb collection sharding rules
[ ] configure default arangodb collection replication rules
## complexity
ok, so if https://www.arangodb.com/docs/stable/indexing-index-basics.html#persistent-index is correct
O(1)
- InsertEntry
- UpdateEntry
- FindEntry
- DeleteEntry
- KvPut
- KvGet
- KvDelete
O(log(BUCKET_SIZE))
- DeleteFolderChildren
O(log(DIRECTORY_SIZE))
- ListDirectoryEntries
- ListDirectoryPrefixedEntries

View File

@@ -0,0 +1,9 @@
/*
Package elastic is for elastic filer store.
The referenced "github.com/olivere/elastic/v7" library is too big when compiled.
So this is only compiled in "make full_install".
*/
package elastic

View File

@@ -1,3 +1,6 @@
//go:build elastic
// +build elastic
package elastic
import (

View File

@@ -1,3 +1,6 @@
//go:build elastic
// +build elastic
package elastic
import (

View File

@@ -25,9 +25,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
if findErr != nil {
return findErr
}
isDeleteCollection := f.isBucket(entry)
if entry.IsDirectory() {
// delete the folder children, not including the folder itself
err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures, func(chunks []*filer_pb.FileChunk) error {

9
weed/filer/sqlite/doc.go Normal file
View File

@@ -0,0 +1,9 @@
/*
Package sqlite is for sqlite filer store.
The referenced "modernc.org/sqlite" library is too big when compiled.
So this is only compiled in "make full_install".
*/
package sqlite

View File

@@ -1,5 +1,6 @@
//go:build linux || darwin || windows
//go:build (linux || darwin || windows) && sqlite
// +build linux darwin windows
// +build sqlite
// limited GOOS due to modernc.org/libc/unistd

View File

@@ -1,5 +1,5 @@
//go:build !linux && !darwin && !windows && !s390 && !ppc64le && !mips64
// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64
//go:build !linux && !darwin && !windows && !s390 && !ppc64le && !mips64 && !sqlite
// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64,!sqlite
// limited GOOS due to modernc.org/libc/unistd

View File

@@ -385,27 +385,28 @@ func handleImplicitUsername(r *http.Request, values url.Values) {
if len(r.Header["Authorization"]) == 0 || values.Get("UserName") != "" {
return
}
// get username who signs the request
// for a typical Authorization:
// get username who signs the request. For a typical Authorization:
// "AWS4-HMAC-SHA256 Credential=197FSAQ7HHTA48X64O3A/20220420/test1/iam/aws4_request, SignedHeaders=content-type;
// host;x-amz-date, Signature=6757dc6b3d7534d67e17842760310e99ee695408497f6edc4fdb84770c252dc8"
// host;x-amz-date, Signature=6757dc6b3d7534d67e17842760310e99ee695408497f6edc4fdb84770c252dc8",
// the "test1" will be extracted as the username
glog.V(4).Infof("Authorization field: %v", r.Header["Authorization"][0])
s := strings.Split(r.Header["Authorization"][0], "Credential=")
if len(s) < 2 {
return
}
glog.V(6).Infof("s: %v\n", s)
glog.V(4).Infof("First strip: %v", s)
s = strings.Split(s[1], ",")
if len(s) < 2 {
return
}
glog.V(6).Infof("s: %v\n", s)
glog.V(4).Infof("Second strip: %v", s)
s = strings.Split(s[0], "/")
if len(s) < 5 {
return
}
glog.V(6).Infof("s: %v\n", s)
glog.V(4).Infof("Third strip: %v", s)
userName := s[2]
glog.V(4).Infof("UserName: %v", userName)
values.Set("UserName", userName)
}

View File

@@ -0,0 +1,9 @@
/*
Package gocdk_pub_sub is for Azure Service Bus and RabbitMQ.
The referenced "gocloud.dev/pubsub" library is too big when compiled.
So this is only compiled in "make full_install".
*/
package gocdk_pub_sub

View File

@@ -1,3 +1,6 @@
//go:build gocdk
// +build gocdk
// Package gocdk_pub_sub supports the Go CDK (Cloud Development Kit) PubSub API,
// which in turn supports many providers, including Amazon SNS/SQS, Azure Service Bus,
// Google Cloud PubSub, and RabbitMQ.

View File

@@ -0,0 +1,9 @@
/*
Package hdfs is for remote hdfs storage.
The referenced "github.com/colinmarc/hdfs/v2" library is too big when compiled.
So this is only compiled in "make full_install".
*/
package hdfs

View File

@@ -1,3 +1,6 @@
//go:build hdfs
// +build hdfs
package hdfs
import (

View File

@@ -1,3 +1,6 @@
//go:build hdfs
// +build hdfs
package hdfs
import (
@@ -7,7 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/colinmarc/hdfs/v2"
hdfs "github.com/colinmarc/hdfs/v2"
"io"
"os"
"path"

View File

@@ -1,3 +1,6 @@
//go:build gocdk
// +build gocdk
package sub
import (

View File

@@ -21,6 +21,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/filer"
_ "github.com/chrislusf/seaweedfs/weed/filer/arangodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"

View File

@@ -5,16 +5,6 @@ import (
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
@@ -25,6 +15,17 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
)
func init() {
@@ -65,8 +66,11 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
verbose := fsckCommand.Bool("v", false, "verbose mode")
findMissingChunksInFiler := fsckCommand.Bool("findMissingChunksInFiler", false, "see \"help volume.fsck\"")
findMissingChunksInFilerPath := fsckCommand.String("findMissingChunksInFilerPath", "/", "used together with findMissingChunksInFiler")
findMissingChunksInVolumeId := fsckCommand.Int("findMissingChunksInVolumeId", 0, "used together with findMissingChunksInFiler")
applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only!> after detection, delete missing data from volumes / delete missing file entries from filer")
purgeAbsent := fsckCommand.Bool("reallyDeleteFilerEntries", false, "<expert only!> delete missing file entries from filer if the corresponding volume is missing for any reason, please ensure all still existing/expected volumes are connected! used together with findMissingChunksInFiler")
tempPath := fsckCommand.String("tempPath", path.Join(os.TempDir()), "path for temporary idx files")
if err = fsckCommand.Parse(args); err != nil {
return nil
}
@@ -78,7 +82,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
c.env = commandEnv
// create a temp folder
tempFolder, err := os.MkdirTemp("", "sw_fsck")
tempFolder, err := os.MkdirTemp(*tempPath, "sw_fsck")
if err != nil {
return fmt.Errorf("failed to create temp folder: %v", err)
}
@@ -88,14 +92,14 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
defer os.RemoveAll(tempFolder)
// collect all volume id locations
volumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
dataNodeVolumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
if err != nil {
return fmt.Errorf("failed to collect all volume locations: %v", err)
}
isBucketsPath := false
var fillerBucketsPath string
if *findMissingChunksInFiler && *findMissingChunksInFilerPath != "" {
if *findMissingChunksInFiler && *findMissingChunksInFilerPath != "/" {
fillerBucketsPath, err = readFilerBucketsPath(commandEnv)
if err != nil {
return fmt.Errorf("read filer buckets path: %v", err)
@@ -108,34 +112,43 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
return fmt.Errorf("read filer buckets path: %v", err)
}
collectMtime := time.Now().Unix()
// collect each volume file ids
for volumeId, vinfo := range volumeIdToVInfo {
if isBucketsPath && !strings.HasPrefix(*findMissingChunksInFilerPath, fillerBucketsPath+"/"+vinfo.collection) {
delete(volumeIdToVInfo, volumeId)
continue
}
err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer)
if err != nil {
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
for volumeId, vinfo := range volumeIdToVInfo {
if *findMissingChunksInVolumeId > 0 && uint32(*findMissingChunksInVolumeId) != volumeId {
delete(volumeIdToVInfo, volumeId)
continue
}
if isBucketsPath && !strings.HasPrefix(*findMissingChunksInFilerPath, fillerBucketsPath+"/"+vinfo.collection) {
delete(volumeIdToVInfo, volumeId)
continue
}
err = c.collectOneVolumeFileIds(tempFolder, dataNodeId, volumeId, vinfo, *verbose, writer)
if err != nil {
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
}
}
}
if *findMissingChunksInFiler {
// collect all filer file ids and paths
if err = c.collectFilerFileIdAndPaths(volumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent); err != nil {
if err = c.collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent, collectMtime); err != nil {
return fmt.Errorf("collectFilerFileIdAndPaths: %v", err)
}
// for each volume, check filer file ids
if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
// for each volume, check filer file ids
if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, dataNodeId, writer, *verbose, *applyPurging); err != nil {
return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
}
}
} else {
// collect all filer file ids
if err = c.collectFilerFileIds(volumeIdToVInfo, tempFolder, writer, *verbose); err != nil {
if err = c.collectFilerFileIds(dataNodeVolumeIdToVInfo, tempFolder, writer, *verbose); err != nil {
return fmt.Errorf("failed to collect file ids from filer: %v", err)
}
// volume file ids subtract filer file ids
if err = c.findExtraChunksInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
if err = c.findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
}
}
@@ -143,19 +156,24 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
return nil
}
func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool) error {
func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool, collectMtime int64) error {
if verbose {
fmt.Fprintf(writer, "checking each file from filer ...\n")
}
files := make(map[uint32]*os.File)
for vid := range volumeIdToServer {
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if openErr != nil {
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
for vid := range volumeIdToServer {
if _, ok := files[vid]; ok {
continue
}
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if openErr != nil {
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
}
files[vid] = dst
}
files[vid] = dst
}
defer func() {
for _, f := range files {
@@ -179,6 +197,9 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint
}
dataChunks = append(dataChunks, manifestChunks...)
for _, chunk := range dataChunks {
if chunk.Mtime > collectMtime {
continue
}
outputChan <- &Item{
vid: chunk.Fid.VolumeId,
fileKey: chunk.Fid.FileKey,
@@ -210,10 +231,10 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint
}
func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, dataNodeId string, writer io.Writer, verbose bool, applyPurging bool) error {
for volumeId, vinfo := range volumeIdToVInfo {
checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, volumeId, writer, verbose, applyPurging)
checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, dataNodeId, volumeId, writer, verbose, applyPurging)
if checkErr != nil {
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
}
@@ -221,55 +242,93 @@ func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInf
return nil
}
func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
for volumeId, vinfo := range volumeIdToVInfo {
inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, verbose)
if checkErr != nil {
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
}
totalInUseCount += inUseCount
totalOrphanChunkCount += uint64(len(orphanFileIds))
totalOrphanDataSize += orphanDataSize
if verbose {
for _, fid := range orphanFileIds {
fmt.Fprintf(writer, "%s\n", fid)
volumeIdOrphanFileIds := make(map[uint32]map[string]bool)
isSeveralReplicas := make(map[uint32]bool)
isEcVolumeReplicas := make(map[uint32]bool)
isReadOnlyReplicas := make(map[uint32]bool)
serverReplicas := make(map[uint32][]pb.ServerAddress)
for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
for volumeId, vinfo := range volumeIdToVInfo {
inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, dataNodeId, volumeId, writer, verbose)
if checkErr != nil {
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
}
isSeveralReplicas[volumeId] = false
if _, found := volumeIdOrphanFileIds[volumeId]; !found {
volumeIdOrphanFileIds[volumeId] = make(map[string]bool)
} else {
isSeveralReplicas[volumeId] = true
}
for _, fid := range orphanFileIds {
if isSeveralReplicas[volumeId] {
if _, found := volumeIdOrphanFileIds[volumeId][fid]; !found {
continue
}
}
volumeIdOrphanFileIds[volumeId][fid] = isSeveralReplicas[volumeId]
}
totalInUseCount += inUseCount
totalOrphanChunkCount += uint64(len(orphanFileIds))
totalOrphanDataSize += orphanDataSize
if verbose {
for _, fid := range orphanFileIds {
fmt.Fprintf(writer, "%s\n", fid)
}
}
isEcVolumeReplicas[volumeId] = vinfo.isEcVolume
if isReadOnly, found := isReadOnlyReplicas[volumeId]; !(found && isReadOnly) {
isReadOnlyReplicas[volumeId] = vinfo.isReadOnly
}
serverReplicas[volumeId] = append(serverReplicas[volumeId], vinfo.server)
}
if applyPurging && len(orphanFileIds) > 0 {
for volumeId, orphanReplicaFileIds := range volumeIdOrphanFileIds {
if !(applyPurging && len(orphanReplicaFileIds) > 0) {
continue
}
orphanFileIds := []string{}
for fid, foundInAllReplicas := range orphanReplicaFileIds {
if !isSeveralReplicas[volumeId] || (isSeveralReplicas[volumeId] && foundInAllReplicas) {
orphanFileIds = append(orphanFileIds, fid)
}
}
if !(len(orphanFileIds) > 0) {
continue
}
if verbose {
fmt.Fprintf(writer, "purging process for volume %d", volumeId)
}
if vinfo.isEcVolume {
if isEcVolumeReplicas[volumeId] {
fmt.Fprintf(writer, "skip purging for Erasure Coded volume %d.\n", volumeId)
continue
}
for _, server := range serverReplicas[volumeId] {
needleVID := needle.VolumeId(volumeId)
needleVID := needle.VolumeId(volumeId)
if isReadOnlyReplicas[volumeId] {
err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, server, true)
if err != nil {
return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
}
if vinfo.isReadOnly {
err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, true)
if err != nil {
return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, server)
defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, server, false)
fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, server)
}
if verbose {
fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
}
fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, false)
}
fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
if verbose {
fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
}
if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
return fmt.Errorf("purging volume %d: %v", volumeId, err)
if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
return fmt.Errorf("purging volume %d: %v", volumeId, err)
}
}
}
}
@@ -290,7 +349,7 @@ func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[u
return nil
}
func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, dataNodeId string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
if verbose {
fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
@@ -316,7 +375,7 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId
return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
}
err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId))
err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, dataNodeId, volumeId))
if err != nil {
return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err)
}
@@ -327,19 +386,21 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId
}
func (c *commandVolumeFsck) collectFilerFileIds(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool) error {
func (c *commandVolumeFsck) collectFilerFileIds(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool) error {
if verbose {
fmt.Fprintf(writer, "collecting file ids from filer ...\n")
}
files := make(map[uint32]*os.File)
for vid := range volumeIdToServer {
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if openErr != nil {
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
for vid := range volumeIdToServer {
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if openErr != nil {
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
}
files[vid] = dst
}
files[vid] = dst
}
defer func() {
for _, f := range files {
@@ -377,16 +438,16 @@ func (c *commandVolumeFsck) collectFilerFileIds(volumeIdToServer map[uint32]VInf
})
}
func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
if verbose {
fmt.Fprintf(writer, "find missing file chunks in volume %d ...\n", volumeId)
fmt.Fprintf(writer, "find missing file chunks in dataNodeId %s volume %d ...\n", dataNodeId, volumeId)
}
db := needle_map.NewMemDb()
defer db.Close()
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)); err != nil {
return
}
@@ -473,12 +534,12 @@ func (c *commandVolumeFsck) httpDelete(path util.FullPath, verbose bool) {
}
}
func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
db := needle_map.NewMemDb()
defer db.Close()
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)); err != nil {
return
}
@@ -509,8 +570,8 @@ func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder stri
if orphanFileCount > 0 {
pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
fmt.Fprintf(writer, "dataNode:%s\tvolume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
dataNodeId, volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
}
return
@@ -524,13 +585,13 @@ type VInfo struct {
isReadOnly bool
}
func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[string]map[uint32]VInfo, err error) {
if verbose {
fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
}
volumeIdToServer = make(map[uint32]VInfo)
volumeIdToServer = make(map[string]map[uint32]VInfo)
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
if err != nil {
@@ -539,8 +600,10 @@ func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose boo
eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
for _, diskInfo := range t.DiskInfos {
dataNodeId := t.GetId()
volumeIdToServer[dataNodeId] = make(map[uint32]VInfo)
for _, vi := range diskInfo.VolumeInfos {
volumeIdToServer[vi.Id] = VInfo{
volumeIdToServer[dataNodeId][vi.Id] = VInfo{
server: pb.NewServerAddressFromDataNode(t),
collection: vi.Collection,
isEcVolume: false,
@@ -548,7 +611,7 @@ func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose boo
}
}
for _, ecShardInfo := range diskInfo.EcShardInfos {
volumeIdToServer[ecShardInfo.Id] = VInfo{
volumeIdToServer[dataNodeId][ecShardInfo.Id] = VInfo{
server: pb.NewServerAddressFromDataNode(t),
collection: ecShardInfo.Collection,
isEcVolume: true,
@@ -600,8 +663,8 @@ func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []
return
}
func getVolumeFileIdFile(tempFolder string, vid uint32) string {
return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid))
func getVolumeFileIdFile(tempFolder string, dataNodeid string, vid uint32) string {
return filepath.Join(tempFolder, fmt.Sprintf("%s_%d.idx", dataNodeid, vid))
}
func getFilerFileIdFile(tempFolder string, vid uint32) string {

View File

@@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"golang.org/x/exp/slices"
"path/filepath"
"io"
)
@@ -16,6 +17,9 @@ func init() {
}
type commandVolumeList struct {
collectionPattern *string
readonly *bool
volumeId *uint64
}
func (c *commandVolumeList) Name() string {
@@ -34,6 +38,10 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
volumeListCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
verbosityLevel := volumeListCommand.Int("v", 5, "verbose mode: 0, 1, 2, 3, 4, 5")
c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly")
c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id")
if err = volumeListCommand.Parse(args); err != nil {
return nil
}
@@ -44,7 +52,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
return err
}
writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
c.writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
return nil
}
@@ -65,53 +73,71 @@ func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
return buf.String()
}
func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
return a.Id < b.Id
})
var s statistics
for _, dc := range t.DataCenterInfos {
s = s.plus(writeDataCenterInfo(writer, dc, verbosityLevel))
s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel))
}
output(verbosityLevel >= 0, writer, "%+v \n", s)
return s
}
func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
return a.Id < b.Id
})
for _, r := range t.RackInfos {
s = s.plus(writeRackInfo(writer, r, verbosityLevel))
s = s.plus(c.writeRackInfo(writer, r, verbosityLevel))
}
output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s)
return s
}
func writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
return a.Id < b.Id
})
for _, dn := range t.DataNodeInfos {
s = s.plus(writeDataNodeInfo(writer, dn, verbosityLevel))
s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel))
}
output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s)
return s
}
func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
func (c *commandVolumeList) writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 3, writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
for _, diskInfo := range t.DiskInfos {
s = s.plus(writeDiskInfo(writer, diskInfo, verbosityLevel))
s = s.plus(c.writeDiskInfo(writer, diskInfo, verbosityLevel))
}
output(verbosityLevel >= 3, writer, " DataNode %s %+v \n", t.Id, s)
return s
}
func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
func (c *commandVolumeList) isNotMatchDiskInfo(readOnly bool, collection string, volumeId uint32) bool {
if *c.readonly && !readOnly {
return true
}
if *c.collectionPattern != "" {
if matched, _ := filepath.Match(*c.collectionPattern, collection); !matched {
return true
}
}
if *c.volumeId > 0 && *c.volumeId != uint64(volumeId) {
return true
}
return false
}
func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
var s statistics
diskType := t.Type
if diskType == "" {
@@ -122,9 +148,15 @@ func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int)
return a.Id < b.Id
})
for _, vi := range t.VolumeInfos {
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
continue
}
s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
}
for _, ecShardInfo := range t.EcShardInfos {
if c.isNotMatchDiskInfo(false, ecShardInfo.Collection, ecShardInfo.Id) {
continue
}
output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
}
output(verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)

View File

@@ -434,10 +434,13 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error {
}
for _, location := range s.Locations {
if err := location.UnloadVolume(i); err == nil || err == ErrVolumeNotFound {
err := location.UnloadVolume(i)
if err == nil {
glog.V(0).Infof("UnmountVolume %d", i)
s.DeletedVolumesChan <- message
return nil
} else if err == ErrVolumeNotFound {
continue
}
}
@@ -458,10 +461,13 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
DiskType: string(v.location.DiskType),
}
for _, location := range s.Locations {
if err := location.DeleteVolume(i); err == nil || err == ErrVolumeNotFound {
err := location.DeleteVolume(i)
if err == nil {
glog.V(0).Infof("DeleteVolume %d", i)
s.DeletedVolumesChan <- message
return nil
} else if err == ErrVolumeNotFound {
continue
} else {
glog.Errorf("DeleteVolume %d: %v", i, err)
}