* admin: add plugin lock coordination * shell: allow bypassing lock checks * plugin worker: add admin script handler * mini: include admin_script in plugin defaults * admin script UI: drop name and enlarge text * admin script: add default script * admin_script: make run interval configurable * plugin: gate other jobs during admin_script runs * plugin: use last completed admin_script run * admin: backfill plugin config defaults * templ Co-Authored-By: Copilot <223556219+Copilot@users.noreply.github.com> * comparable to default version Co-Authored-By: Copilot <223556219+Copilot@users.noreply.github.com> * default to run Co-Authored-By: Copilot <223556219+Copilot@users.noreply.github.com> * format Co-Authored-By: Copilot <223556219+Copilot@users.noreply.github.com> * shell: respect pre-set noLock for fix.replication * shell: add force no-lock mode for admin scripts * volume balance worker already exists Co-Authored-By: Copilot <223556219+Copilot@users.noreply.github.com> * admin: expose scheduler status JSON * shell: add sleep command * shell: restrict sleep syntax * Revert "shell: respect pre-set noLock for fix.replication" This reverts commit 2b14e8b82602a740d3a473c085e3b3a14f1ddbb3. * templ Co-Authored-By: Copilot <223556219+Copilot@users.noreply.github.com> * fix import Co-Authored-By: Copilot <223556219+Copilot@users.noreply.github.com> * less logs Co-Authored-By: Copilot <223556219+Copilot@users.noreply.github.com> * Reduce master client logs on canceled contexts * Update mini default job type count --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
80 lines
2.6 KiB
Go
80 lines
2.6 KiB
Go
package shell
|
|
|
|
import (
|
|
"flag"
|
|
"io"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
|
)
|
|
|
|
func init() {
|
|
Commands = append(Commands, &commandEcBalance{})
|
|
}
|
|
|
|
type commandEcBalance struct {
|
|
}
|
|
|
|
func (c *commandEcBalance) Name() string {
|
|
return "ec.balance"
|
|
}
|
|
|
|
func (c *commandEcBalance) Help() string {
|
|
return `balance all ec shards among all racks and volume servers
|
|
|
|
ec.balance [-c EACH_COLLECTION|<collection_name>] [-apply] [-dataCenter <data_center>] [-shardReplicaPlacement <replica_placement>] [-diskType <disk_type>]
|
|
|
|
Options:
|
|
-diskType: the disk type for EC shards (hdd, ssd, or empty for default hdd)
|
|
|
|
Algorithm:
|
|
` + ecBalanceAlgorithmDescription
|
|
}
|
|
|
|
func (c *commandEcBalance) HasTag(CommandTag) bool {
|
|
return false
|
|
}
|
|
|
|
func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
|
balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
|
collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
|
|
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
|
|
shardReplicaPlacement := balanceCommand.String("shardReplicaPlacement", "", "replica placement for EC shards, or master default if empty")
|
|
diskTypeStr := balanceCommand.String("diskType", "", "the disk type for EC shards (hdd, ssd, or empty for default hdd)")
|
|
maxParallelization := balanceCommand.Int("maxParallelization", DefaultMaxParallelization, "run up to X tasks in parallel, whenever possible")
|
|
applyBalancing := balanceCommand.Bool("apply", false, "apply the balancing plan")
|
|
// TODO: remove this alias
|
|
applyBalancingAlias := balanceCommand.Bool("force", false, "apply the balancing plan (alias for -apply)")
|
|
|
|
if err = balanceCommand.Parse(args); err != nil {
|
|
return nil
|
|
}
|
|
|
|
handleDeprecatedForceFlag(writer, balanceCommand, applyBalancingAlias, applyBalancing)
|
|
infoAboutSimulationMode(writer, *applyBalancing, "-apply")
|
|
|
|
if err = commandEnv.confirmIsLocked(args); err != nil {
|
|
return
|
|
}
|
|
|
|
var collections []string
|
|
if *collection == "EACH_COLLECTION" {
|
|
collections, err = ListCollectionNames(commandEnv, false, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
} else {
|
|
collections = append(collections, *collection)
|
|
}
|
|
glog.V(1).Infof("balanceEcVolumes collections %+v\n", len(collections))
|
|
|
|
rp, err := parseReplicaPlacementArg(commandEnv, *shardReplicaPlacement)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
diskType := types.ToDiskType(*diskTypeStr)
|
|
|
|
return EcBalance(commandEnv, collections, *dc, rp, diskType, *maxParallelization, *applyBalancing)
|
|
}
|