* fix(s3): include static identities in listing operations Static identities loaded from -s3.config file were only stored in the S3 API server's in-memory state. Listing operations (s3.configure shell command, aws iam list-users) queried the credential manager which only returned dynamic identities from the backend store. Register static identities with the credential manager after loading so they are included in LoadConfiguration and ListUsers results, and filtered out before SaveConfiguration to avoid persisting them to the dynamic store. Fixes https://github.com/seaweedfs/seaweedfs/discussions/8896 * fix: avoid mutating caller's config and defensive copies - SaveConfiguration: use shallow struct copy instead of mutating the caller's config.Identities field - SetStaticIdentities: skip nil entries to avoid panics - GetStaticIdentities: defensively copy PolicyNames slice to avoid aliasing the original * fix: filter nil static identities and sync on config reload - SetStaticIdentities: filter nil entries from the stored slice (not just from staticNames) to prevent panics in LoadConfiguration/ListUsers - Extract updateCredentialManagerStaticIdentities helper and call it from both startup and the grace.OnReload handler so the credential manager's static snapshot stays current after config file reloads * fix: add mutex for static identity fields and fix ListUsers for store callers - Add sync.RWMutex to protect staticIdentities/staticNames against concurrent reads during config reload - Revert CredentialManager.ListUsers to return only store users, since internal callers (e.g. DeletePolicy) look up each user in the store and fail on non-existent static entries - Merge static usernames in the filer gRPC ListUsers handler instead, via the new GetStaticUsernames method - Fix CI: TestIAMPolicyManagement/managed_policy_crud_lifecycle was failing because DeletePolicy iterated static users that don't exist in the store * fix: show static identities in admin UI and weed shell The admin UI and weed shell s3.configure command query the filer's credential manager via gRPC, which is a separate instance from the S3 server's credential manager. Static identities were only registered on the S3 server's credential manager, so they never appeared in the filer's responses. - Add CredentialManager.LoadS3ConfigFile to parse a static S3 config file and register its identities - Add FilerOptions.s3ConfigFile so the filer can load the same static config that the S3 server uses - Wire s3ConfigFile through in weed mini and weed server modes - Merge static usernames in filer gRPC ListUsers handler - Add CredentialManager.GetStaticUsernames helper - Add sync.RWMutex to protect concurrent access to static identity fields - Avoid importing weed/filer from weed/credential (which pulled in filer store init() registrations and broke test isolation) - Add docker/compose/s3_static_users_example.json * fix(admin): make static users read-only in admin UI Static users loaded from the -s3.config file should not be editable or deletable through the admin UI since they are managed via the config file. - Add IsStatic field to ObjectStoreUser, set from credential manager - Hide edit, delete, and access key buttons for static users in the users table template - Show a "static" badge next to static user names - Return 403 Forbidden from UpdateUser and DeleteUser API handlers when the target user is a static identity * fix(admin): show details for static users GetObjectStoreUserDetails called credentialManager.GetUser which only queries the dynamic store. For static users this returned ErrUserNotFound. Fall back to GetStaticIdentity when the store lookup fails. * fix(admin): load static S3 identities in admin server The admin server has its own credential manager (gRPC store) which is a separate instance from the S3 server's and filer's. It had no static identity data, so IsStaticIdentity returned false (edit/delete buttons shown) and GetStaticIdentity returned nil (details page failed). Pass the -s3.config file path through to the admin server and call LoadS3ConfigFile on its credential manager, matching the approach used for the filer. * fix: use protobuf is_static field instead of passing config file path The previous approach passed -s3.config file path to every component (filer, admin). This is wrong because the admin server should not need to know about S3 config files. Instead, add an is_static field to the Identity protobuf message. The field is set when static identities are serialized (in GetStaticIdentities and LoadS3ConfigFile). Any gRPC client that loads configuration via GetConfiguration automatically sees which identities are static, without needing the config file. - Add is_static field (tag 8) to iam_pb.Identity proto message - Set IsStatic=true in GetStaticIdentities and LoadS3ConfigFile - Admin GetObjectStoreUsers reads identity.IsStatic from proto - Admin IsStaticUser helper loads config via gRPC to check the flag - Filer GetUser gRPC handler falls back to GetStaticIdentity - Remove s3ConfigFile from AdminOptions and NewAdminServer signature
533 lines
24 KiB
Go
533 lines
24 KiB
Go
package command
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"crypto/x509"
|
|
"fmt"
|
|
"net"
|
|
"net/http"
|
|
"os"
|
|
"runtime"
|
|
"sort"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/spf13/viper"
|
|
"google.golang.org/grpc/credentials/tls/certprovider"
|
|
"google.golang.org/grpc/credentials/tls/certprovider/pemfile"
|
|
"google.golang.org/grpc/reflection"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/credential"
|
|
_ "github.com/seaweedfs/seaweedfs/weed/credential/filer_etc"
|
|
_ "github.com/seaweedfs/seaweedfs/weed/credential/memory"
|
|
_ "github.com/seaweedfs/seaweedfs/weed/credential/postgres"
|
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/security"
|
|
weed_server "github.com/seaweedfs/seaweedfs/weed/server"
|
|
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
|
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
|
)
|
|
|
|
var (
|
|
f FilerOptions
|
|
filerStartS3 *bool
|
|
filerS3Options S3Options
|
|
filerStartWebDav *bool
|
|
filerWebDavOptions WebDavOption
|
|
filerStartIam *bool
|
|
filerIamOptions IamOptions
|
|
filerStartSftp *bool
|
|
filerSftpOptions SftpOptions
|
|
)
|
|
|
|
type FilerOptions struct {
|
|
masters *pb.ServerDiscovery
|
|
mastersString *string
|
|
ip *string
|
|
bindIp *string
|
|
port *int
|
|
portGrpc *int
|
|
publicPort *int
|
|
filerGroup *string
|
|
collection *string
|
|
defaultReplicaPlacement *string
|
|
disableDirListing *bool
|
|
maxMB *int
|
|
dirListingLimit *int
|
|
dataCenter *string
|
|
rack *string
|
|
enableNotification *bool
|
|
disableHttp *bool
|
|
cipher *bool
|
|
metricsHttpPort *int
|
|
metricsHttpIp *string
|
|
saveToFilerLimit *int
|
|
defaultLevelDbDirectory *string
|
|
concurrentUploadLimitMB *int
|
|
concurrentFileUploadLimit *int
|
|
debug *bool
|
|
debugPort *int
|
|
localSocket *string
|
|
showUIDirectoryDelete *bool
|
|
downloadMaxMBps *int
|
|
diskType *string
|
|
allowedOrigins *string
|
|
exposeDirectoryData *bool
|
|
tusBasePath *string
|
|
certProvider certprovider.Provider
|
|
s3ConfigFile *string // optional path to static S3 identity config
|
|
}
|
|
|
|
func init() {
|
|
cmdFiler.Run = runFiler // break init cycle
|
|
f.mastersString = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers or a single DNS SRV record of at least 1 master server, prepended with dnssrv+")
|
|
f.filerGroup = cmdFiler.Flag.String("filerGroup", "", "share metadata with other filers in the same filerGroup")
|
|
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection")
|
|
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
|
|
f.bindIp = cmdFiler.Flag.String("ip.bind", "", "ip address to bind to. If empty, default to same as -ip option.")
|
|
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
|
|
f.portGrpc = cmdFiler.Flag.Int("port.grpc", 0, "filer server grpc listen port")
|
|
f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
|
|
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
|
|
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
|
|
f.maxMB = cmdFiler.Flag.Int("maxMB", 4, "split files larger than the limit")
|
|
f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size")
|
|
f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center")
|
|
f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack")
|
|
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
|
|
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
|
|
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
|
f.metricsHttpIp = cmdFiler.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.")
|
|
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
|
|
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
|
|
f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 0, "limit total concurrent upload size, 0 means unlimited")
|
|
f.concurrentFileUploadLimit = cmdFiler.Flag.Int("concurrentFileUploadLimit", 0, "limit number of concurrent file uploads, 0 means unlimited")
|
|
f.debug = cmdFiler.Flag.Bool("debug", false, "serves runtime profiling data, e.g., http://localhost:<debug.port>/debug/pprof/goroutine?debug=2")
|
|
f.debugPort = cmdFiler.Flag.Int("debug.port", 6060, "http port for debugging")
|
|
f.localSocket = cmdFiler.Flag.String("localSocket", "", "default to /tmp/seaweedfs-filer-<port>.sock")
|
|
f.showUIDirectoryDelete = cmdFiler.Flag.Bool("ui.deleteDir", true, "enable filer UI show delete directory button")
|
|
f.downloadMaxMBps = cmdFiler.Flag.Int("downloadMaxMBps", 0, "download max speed for each download request, in MB per second")
|
|
f.diskType = cmdFiler.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
|
f.allowedOrigins = cmdFiler.Flag.String("allowedOrigins", "*", "comma separated list of allowed origins")
|
|
f.exposeDirectoryData = cmdFiler.Flag.Bool("exposeDirectoryData", true, "whether to return directory metadata and content in Filer UI")
|
|
f.tusBasePath = cmdFiler.Flag.String("tusBasePath", "/.tus", "TUS resumable upload endpoint base path (e.g., /.tus)")
|
|
|
|
// start s3 on filer
|
|
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
|
|
filerS3Options.port = cmdFiler.Flag.Int("s3.port", 8333, "s3 server http listen port")
|
|
filerS3Options.portHttps = cmdFiler.Flag.Int("s3.port.https", 0, "s3 server https listen port")
|
|
filerS3Options.portGrpc = cmdFiler.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port")
|
|
filerS3Options.domainName = cmdFiler.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}")
|
|
filerS3Options.allowedOrigins = cmdFiler.Flag.String("s3.allowedOrigins", "*", "comma separated list of allowed origins")
|
|
filerS3Options.dataCenter = cmdFiler.Flag.String("s3.dataCenter", "", "prefer to read and write to volumes in this data center")
|
|
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
|
|
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
|
|
filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
|
|
filerS3Options.iamConfig = cmdFiler.Flag.String("s3.iam.config", "", "path to the advanced IAM config file")
|
|
filerS3Options.auditLogConfig = cmdFiler.Flag.String("s3.auditLogConfig", "", "path to the audit log config file")
|
|
filerS3Options.metricsHttpPort = cmdFiler.Flag.Int("s3.metricsPort", 0, "Prometheus metrics listen port")
|
|
filerS3Options.metricsHttpIp = cmdFiler.Flag.String("s3.metricsIp", "", "metrics listen ip. If empty, default to same as -s3.ip.bind option.")
|
|
cmdFiler.Flag.Bool("s3.allowEmptyFolder", true, "deprecated, ignored. Empty folder cleanup is now automatic.")
|
|
filerS3Options.allowDeleteBucketNotEmpty = cmdFiler.Flag.Bool("s3.allowDeleteBucketNotEmpty", true, "allow recursive deleting all entries along with bucket")
|
|
filerS3Options.localSocket = cmdFiler.Flag.String("s3.localSocket", "", "default to /tmp/seaweedfs-s3-<port>.sock")
|
|
filerS3Options.tlsCACertificate = cmdFiler.Flag.String("s3.cacert.file", "", "path to the TLS CA certificate file")
|
|
filerS3Options.tlsVerifyClientCert = cmdFiler.Flag.Bool("s3.tlsVerifyClientCert", false, "whether to verify the client's certificate")
|
|
filerS3Options.bindIp = cmdFiler.Flag.String("s3.ip.bind", "", "ip address to bind to. If empty, default to same as -ip.bind option.")
|
|
filerS3Options.idleTimeout = cmdFiler.Flag.Int("s3.idleTimeout", 120, "connection idle seconds")
|
|
filerS3Options.concurrentUploadLimitMB = cmdFiler.Flag.Int("s3.concurrentUploadLimitMB", 0, "limit total concurrent upload size for S3, 0 means unlimited")
|
|
filerS3Options.concurrentFileUploadLimit = cmdFiler.Flag.Int("s3.concurrentFileUploadLimit", 0, "limit number of concurrent file uploads for S3, 0 means unlimited")
|
|
filerS3Options.enableIam = cmdFiler.Flag.Bool("s3.iam", true, "enable embedded IAM API on the same S3 port")
|
|
filerS3Options.cipher = cmdFiler.Flag.Bool("s3.encryptVolumeData", false, "encrypt data on volume servers for S3 uploads")
|
|
filerS3Options.iamReadOnly = cmdFiler.Flag.Bool("s3.iam.readOnly", true, "disable IAM write operations on this server")
|
|
filerS3Options.portIceberg = cmdFiler.Flag.Int("s3.port.iceberg", 8181, "Iceberg REST Catalog server listen port (0 to disable)")
|
|
filerS3Options.externalUrl = cmdFiler.Flag.String("s3.externalUrl", "", "the external URL clients use to connect (e.g. https://api.example.com:9000). Used for S3 signature verification behind a reverse proxy. Falls back to S3_EXTERNAL_URL env var.")
|
|
filerS3Options.defaultFileMode = cmdFiler.Flag.String("s3.defaultFileMode", "", "default file mode for S3 uploaded objects, e.g. 0660, 0644, 0666")
|
|
|
|
// start webdav on filer
|
|
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
|
|
filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
|
filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
|
|
filerWebDavOptions.replication = cmdFiler.Flag.String("webdav.replication", "", "replication to create the files")
|
|
filerWebDavOptions.disk = cmdFiler.Flag.String("webdav.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
|
filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
|
|
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
|
|
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
|
|
filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 0, "local cache capacity in MB")
|
|
filerWebDavOptions.maxMB = cmdFiler.Flag.Int("webdav.maxMB", 4, "split files larger than the limit")
|
|
filerWebDavOptions.filerRootPath = cmdFiler.Flag.String("webdav.filer.path", "/", "use this remote path from filer server")
|
|
|
|
// start iam on filer
|
|
filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service")
|
|
filerIamOptions.ip = cmdFiler.Flag.String("iam.ip", *f.ip, "iam server http listen ip address")
|
|
filerIamOptions.port = cmdFiler.Flag.Int("iam.port", 8111, "iam server http listen port")
|
|
|
|
filerStartSftp = cmdFiler.Flag.Bool("sftp", false, "whether to start the SFTP server")
|
|
filerSftpOptions.port = cmdFiler.Flag.Int("sftp.port", 2022, "SFTP server listen port")
|
|
filerSftpOptions.sshPrivateKey = cmdFiler.Flag.String("sftp.sshPrivateKey", "", "path to the SSH private key file for host authentication")
|
|
filerSftpOptions.hostKeysFolder = cmdFiler.Flag.String("sftp.hostKeysFolder", "", "path to folder containing SSH private key files for host authentication")
|
|
filerSftpOptions.authMethods = cmdFiler.Flag.String("sftp.authMethods", "password,publickey", "comma-separated list of allowed auth methods: password, publickey, keyboard-interactive")
|
|
filerSftpOptions.maxAuthTries = cmdFiler.Flag.Int("sftp.maxAuthTries", 6, "maximum number of authentication attempts per connection")
|
|
filerSftpOptions.bannerMessage = cmdFiler.Flag.String("sftp.bannerMessage", "SeaweedFS SFTP Server - Unauthorized access is prohibited", "message displayed before authentication")
|
|
filerSftpOptions.loginGraceTime = cmdFiler.Flag.Duration("sftp.loginGraceTime", 2*time.Minute, "timeout for authentication")
|
|
filerSftpOptions.clientAliveInterval = cmdFiler.Flag.Duration("sftp.clientAliveInterval", 5*time.Second, "interval for sending keep-alive messages")
|
|
filerSftpOptions.clientAliveCountMax = cmdFiler.Flag.Int("sftp.clientAliveCountMax", 3, "maximum number of missed keep-alive messages before disconnecting")
|
|
filerSftpOptions.userStoreFile = cmdFiler.Flag.String("sftp.userStoreFile", "", "path to JSON file containing user credentials and permissions")
|
|
filerSftpOptions.dataCenter = cmdFiler.Flag.String("sftp.dataCenter", "", "prefer to read and write to volumes in this data center")
|
|
filerSftpOptions.bindIp = cmdFiler.Flag.String("sftp.ip.bind", "", "ip address to bind to. If empty, default to same as -ip.bind option.")
|
|
filerSftpOptions.localSocket = cmdFiler.Flag.String("sftp.localSocket", "", "default to /tmp/seaweedfs-sftp-<port>.sock")
|
|
}
|
|
|
|
func filerLongDesc() string {
|
|
desc := `start a file server which accepts REST operation for any files.
|
|
|
|
//create or overwrite the file, the directories /path/to will be automatically created
|
|
POST /path/to/file
|
|
//get the file content
|
|
GET /path/to/file
|
|
//create or overwrite the file, the filename in the multipart request will be used
|
|
POST /path/to/
|
|
//return a json format subdirectory and files listing
|
|
GET /path/to/
|
|
|
|
The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order.
|
|
If the "filer.toml" is not found, an embedded filer store will be created under "-defaultStoreDir".
|
|
|
|
The example filer.toml configuration file can be generated by "weed scaffold -config=filer"
|
|
|
|
Supported Filer Stores:
|
|
`
|
|
|
|
storeNames := make([]string, len(filer.Stores))
|
|
for i, store := range filer.Stores {
|
|
storeNames[i] = "\t" + store.GetName()
|
|
}
|
|
sort.Strings(storeNames)
|
|
storeList := strings.Join(storeNames, "\n")
|
|
return desc + storeList
|
|
}
|
|
|
|
var cmdFiler = &Command{
|
|
UsageLine: "filer -port=8888 -master=<ip:port>[,<ip:port>]*",
|
|
Short: "start a file server that points to a master server, or a list of master servers",
|
|
Long: filerLongDesc(),
|
|
}
|
|
|
|
func runFiler(cmd *Command, args []string) bool {
|
|
if *f.debug {
|
|
go http.ListenAndServe(fmt.Sprintf(":%d", *f.debugPort), nil)
|
|
}
|
|
|
|
util.LoadSecurityConfiguration()
|
|
|
|
switch {
|
|
case *f.metricsHttpIp != "":
|
|
// noting to do, use f.metricsHttpIp
|
|
case *f.bindIp != "":
|
|
*f.metricsHttpIp = *f.bindIp
|
|
case *f.ip != "":
|
|
*f.metricsHttpIp = *f.ip
|
|
}
|
|
go stats_collect.StartMetricsServer(*f.metricsHttpIp, *f.metricsHttpPort)
|
|
|
|
filerAddress := pb.NewServerAddress(*f.ip, *f.port, *f.portGrpc).String()
|
|
startDelay := time.Duration(2)
|
|
if *filerStartS3 {
|
|
filerS3Options.filer = &filerAddress
|
|
if *filerS3Options.bindIp == "" {
|
|
filerS3Options.bindIp = f.bindIp
|
|
}
|
|
filerS3Options.localFilerSocket = f.localSocket
|
|
if *f.dataCenter != "" && *filerS3Options.dataCenter == "" {
|
|
filerS3Options.dataCenter = f.dataCenter
|
|
}
|
|
// Set S3 metrics IP based on bind IP if not explicitly set
|
|
if *filerS3Options.metricsHttpIp == "" {
|
|
*filerS3Options.metricsHttpIp = *filerS3Options.bindIp
|
|
}
|
|
go func(delay time.Duration) {
|
|
time.Sleep(delay * time.Second)
|
|
filerS3Options.startS3Server()
|
|
}(startDelay)
|
|
startDelay++
|
|
}
|
|
|
|
if *filerStartWebDav {
|
|
filerWebDavOptions.filer = &filerAddress
|
|
filerWebDavOptions.ipBind = f.bindIp
|
|
|
|
if *filerWebDavOptions.disk == "" {
|
|
filerWebDavOptions.disk = f.diskType
|
|
}
|
|
|
|
go func(delay time.Duration) {
|
|
time.Sleep(delay * time.Second)
|
|
filerWebDavOptions.startWebDav()
|
|
}(startDelay)
|
|
startDelay++
|
|
}
|
|
|
|
if *filerStartIam {
|
|
filerIamOptions.filer = &filerAddress
|
|
filerIamOptions.masters = f.mastersString
|
|
go func(delay time.Duration) {
|
|
time.Sleep(delay * time.Second)
|
|
filerIamOptions.startIamServer()
|
|
}(startDelay)
|
|
startDelay++
|
|
}
|
|
|
|
if *filerStartSftp {
|
|
filerSftpOptions.filer = &filerAddress
|
|
if *filerSftpOptions.bindIp == "" {
|
|
filerSftpOptions.bindIp = f.bindIp
|
|
}
|
|
if *f.dataCenter != "" && *filerSftpOptions.dataCenter == "" {
|
|
filerSftpOptions.dataCenter = f.dataCenter
|
|
}
|
|
go func(delay time.Duration) {
|
|
time.Sleep(delay * time.Second)
|
|
filerSftpOptions.startSftpServer()
|
|
}(startDelay)
|
|
}
|
|
|
|
f.masters = pb.ServerAddresses(*f.mastersString).ToServiceDiscovery()
|
|
|
|
f.startFiler()
|
|
|
|
return true
|
|
}
|
|
|
|
// GetCertificateWithUpdate Auto refreshing TSL certificate
|
|
func (fo *FilerOptions) GetCertificateWithUpdate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
certs, err := fo.certProvider.KeyMaterial(context.Background())
|
|
if certs == nil {
|
|
return nil, err
|
|
}
|
|
return &certs.Certs[0], err
|
|
}
|
|
|
|
func (fo *FilerOptions) startFiler() {
|
|
|
|
defaultMux := http.NewServeMux()
|
|
publicVolumeMux := defaultMux
|
|
|
|
if *fo.publicPort != 0 {
|
|
publicVolumeMux = http.NewServeMux()
|
|
}
|
|
if *fo.portGrpc == 0 {
|
|
*fo.portGrpc = 10000 + *fo.port
|
|
}
|
|
if *fo.bindIp == "" {
|
|
*fo.bindIp = *fo.ip
|
|
}
|
|
if *fo.allowedOrigins == "" {
|
|
*fo.allowedOrigins = "*"
|
|
}
|
|
|
|
defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2")
|
|
|
|
filerAddress := pb.NewServerAddress(*fo.ip, *fo.port, *fo.portGrpc)
|
|
|
|
// Initialize credential manager for IAM gRPC service
|
|
var credentialManager *credential.CredentialManager
|
|
var err error
|
|
credentialManager, err = credential.NewCredentialManagerWithDefaults("")
|
|
if err != nil {
|
|
glog.Warningf("Failed to initialize credential manager: %v", err)
|
|
} else {
|
|
glog.V(0).Infof("Initialized credential manager: %s", credentialManager.GetStoreName())
|
|
}
|
|
|
|
// Load static S3 identities from config file if specified
|
|
if fo.s3ConfigFile != nil && *fo.s3ConfigFile != "" {
|
|
if credentialManager != nil {
|
|
if err := credentialManager.LoadS3ConfigFile(*fo.s3ConfigFile); err != nil {
|
|
glog.Warningf("Failed to load S3 config file for static identities: %v", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
|
|
Masters: fo.masters,
|
|
FilerGroup: *fo.filerGroup,
|
|
Collection: *fo.collection,
|
|
DefaultReplication: *fo.defaultReplicaPlacement,
|
|
DisableDirListing: *fo.disableDirListing,
|
|
MaxMB: *fo.maxMB,
|
|
DirListingLimit: *fo.dirListingLimit,
|
|
DataCenter: *fo.dataCenter,
|
|
Rack: *fo.rack,
|
|
DefaultLevelDbDir: defaultLevelDbDirectory,
|
|
DisableHttp: *fo.disableHttp,
|
|
Host: filerAddress,
|
|
Cipher: *fo.cipher,
|
|
SaveToFilerLimit: int64(*fo.saveToFilerLimit),
|
|
ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
|
|
ConcurrentFileUploadLimit: int64(*fo.concurrentFileUploadLimit),
|
|
ShowUIDirectoryDelete: *fo.showUIDirectoryDelete,
|
|
DownloadMaxBytesPs: int64(*fo.downloadMaxMBps) * 1024 * 1024,
|
|
DiskType: *fo.diskType,
|
|
AllowedOrigins: strings.Split(*fo.allowedOrigins, ","),
|
|
TusBasePath: *fo.tusBasePath,
|
|
CredentialManager: credentialManager,
|
|
})
|
|
if nfs_err != nil {
|
|
glog.Fatalf("Filer startup error: %v", nfs_err)
|
|
}
|
|
|
|
if *fo.publicPort != 0 {
|
|
publicListeningAddress := util.JoinHostPort(*fo.bindIp, *fo.publicPort)
|
|
glog.V(0).Infoln("Start Seaweed filer server", version.Version(), "public at", publicListeningAddress)
|
|
publicListener, localPublicListener, e := util.NewIpAndLocalListeners(*fo.bindIp, *fo.publicPort, 0)
|
|
if e != nil {
|
|
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
|
|
}
|
|
go func() {
|
|
if e := http.Serve(publicListener, publicVolumeMux); e != nil {
|
|
glog.Fatalf("Volume server fail to serve public: %v", e)
|
|
}
|
|
}()
|
|
if localPublicListener != nil {
|
|
go func() {
|
|
if e := http.Serve(localPublicListener, publicVolumeMux); e != nil {
|
|
glog.Errorf("Volume server fail to serve public: %v", e)
|
|
}
|
|
}()
|
|
}
|
|
}
|
|
|
|
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", version.Version(), *fo.ip, *fo.port)
|
|
filerListener, filerLocalListener, e := util.NewIpAndLocalListeners(
|
|
*fo.bindIp, *fo.port,
|
|
time.Duration(10)*time.Second,
|
|
)
|
|
if e != nil {
|
|
glog.Fatalf("Filer listener error: %v", e)
|
|
}
|
|
|
|
// starting grpc server
|
|
grpcPort := *fo.portGrpc
|
|
grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*fo.bindIp, grpcPort, 0)
|
|
if err != nil {
|
|
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
|
}
|
|
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
|
|
filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
|
|
|
|
// Register IAM gRPC service if credential manager is available
|
|
if credentialManager != nil {
|
|
iamGrpcServer := weed_server.NewIamGrpcServer(credentialManager)
|
|
iam_pb.RegisterSeaweedIdentityAccessManagementServer(grpcS, iamGrpcServer)
|
|
glog.V(0).Info("Registered IAM gRPC service on filer")
|
|
}
|
|
|
|
reflection.Register(grpcS)
|
|
if grpcLocalL != nil {
|
|
go grpcS.Serve(grpcLocalL)
|
|
}
|
|
go grpcS.Serve(grpcL)
|
|
pb.ServeGrpcOnLocalSocket(grpcS, grpcPort)
|
|
|
|
if runtime.GOOS != "windows" {
|
|
localSocket := *fo.localSocket
|
|
if localSocket == "" {
|
|
localSocket = fmt.Sprintf("/tmp/seaweedfs-filer-%d.sock", *fo.port)
|
|
}
|
|
if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) {
|
|
glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error())
|
|
}
|
|
go func() {
|
|
// start on local unix socket
|
|
filerSocketListener, err := net.Listen("unix", localSocket)
|
|
if err != nil {
|
|
glog.Fatalf("Failed to listen on %s: %v", localSocket, err)
|
|
}
|
|
newHttpServer(defaultMux, nil).Serve(filerSocketListener)
|
|
}()
|
|
}
|
|
|
|
if viper.GetString("https.filer.key") != "" {
|
|
certFile := viper.GetString("https.filer.cert")
|
|
keyFile := viper.GetString("https.filer.key")
|
|
caCertFile := viper.GetString("https.filer.ca")
|
|
disbaleTlsVerifyClientCert := viper.GetBool("https.filer.disable_tls_verify_client_cert")
|
|
|
|
pemfileOptions := pemfile.Options{
|
|
CertFile: certFile,
|
|
KeyFile: keyFile,
|
|
RefreshDuration: security.CredRefreshingInterval,
|
|
}
|
|
if fo.certProvider, err = pemfile.NewProvider(pemfileOptions); err != nil {
|
|
glog.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err)
|
|
}
|
|
|
|
caCertPool := x509.NewCertPool()
|
|
if caCertFile != "" {
|
|
caCertFile, err := os.ReadFile(caCertFile)
|
|
if err != nil {
|
|
glog.Fatalf("error reading CA certificate: %v", err)
|
|
}
|
|
caCertPool.AppendCertsFromPEM(caCertFile)
|
|
}
|
|
|
|
clientAuth := tls.NoClientCert
|
|
if !disbaleTlsVerifyClientCert {
|
|
clientAuth = tls.RequireAndVerifyClientCert
|
|
}
|
|
|
|
tlsConfig := &tls.Config{
|
|
GetCertificate: fo.GetCertificateWithUpdate,
|
|
ClientAuth: clientAuth,
|
|
ClientCAs: caCertPool,
|
|
}
|
|
|
|
security.FixTlsConfig(util.GetViper(), tlsConfig)
|
|
|
|
if filerLocalListener != nil {
|
|
go func() {
|
|
if err := newHttpServer(defaultMux, tlsConfig).ServeTLS(filerLocalListener, "", ""); err != nil {
|
|
glog.Errorf("Filer Fail to serve: %v", err)
|
|
}
|
|
}()
|
|
}
|
|
httpS := newHttpServer(defaultMux, tlsConfig)
|
|
if MiniClusterCtx != nil {
|
|
ctx := MiniClusterCtx
|
|
go func() {
|
|
<-ctx.Done()
|
|
httpS.Shutdown(context.Background())
|
|
grpcS.Stop()
|
|
}()
|
|
}
|
|
if err := httpS.ServeTLS(filerListener, "", ""); err != nil && err != http.ErrServerClosed {
|
|
glog.Fatalf("Filer Fail to serve: %v", err)
|
|
}
|
|
} else {
|
|
if filerLocalListener != nil {
|
|
go func() {
|
|
if err := newHttpServer(defaultMux, nil).Serve(filerLocalListener); err != nil {
|
|
glog.Errorf("Filer Fail to serve: %v", err)
|
|
}
|
|
}()
|
|
}
|
|
httpS := newHttpServer(defaultMux, nil)
|
|
if MiniClusterCtx != nil {
|
|
ctx := MiniClusterCtx
|
|
go func() {
|
|
<-ctx.Done()
|
|
httpS.Shutdown(context.Background())
|
|
grpcS.Stop()
|
|
}()
|
|
}
|
|
if err := httpS.Serve(filerListener); err != nil && err != http.ErrServerClosed {
|
|
glog.Fatalf("Filer Fail to serve: %v", err)
|
|
}
|
|
}
|
|
}
|