Use Unix sockets for gRPC in mini mode (#8856)

* Use Unix sockets for gRPC between co-located services in mini mode

In `weed mini`, all services run in one process. Previously, inter-service
gRPC traffic (volume↔master, filer↔master, S3↔filer, worker↔admin, etc.)
went through TCP loopback. This adds a gRPC Unix socket registry in the pb
package: mini mode registers a socket path per gRPC port at startup, each
gRPC server additionally listens on its socket, and GrpcDial transparently
routes to the socket via WithContextDialer when a match is found.

Standalone commands (weed master, weed filer, etc.) are unaffected since
no sockets are registered. TCP listeners are kept for external clients.

* Handle Serve error and clean up socket file in ServeGrpcOnLocalSocket

Log non-expected errors from grpcServer.Serve (ignoring
grpc.ErrServerStopped) and always remove the Unix socket file
when Serve returns, ensuring cleanup on Stop/GracefulStop.
This commit is contained in:
Chris Lu
2026-03-30 18:18:52 -07:00
committed by GitHub
parent 0ce4a857e6
commit 2eaf98a7a2
7 changed files with 85 additions and 0 deletions

View File

@@ -421,6 +421,7 @@ func (fo *FilerOptions) startFiler() {
go grpcS.Serve(grpcLocalL)
}
go grpcS.Serve(grpcL)
pb.ServeGrpcOnLocalSocket(grpcS, grpcPort)
if runtime.GOOS != "windows" {
localSocket := *fo.localSocket

View File

@@ -254,6 +254,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
go grpcS.Serve(grpcLocalL)
}
go grpcS.Serve(grpcL)
pb.ServeGrpcOnLocalSocket(grpcS, grpcPort)
// For multi-master mode with non-Hashicorp raft, wait and check if we should join
if !*masterOption.raftHashicorp && !isSingleMaster {

View File

@@ -823,6 +823,16 @@ func runMini(cmd *Command, args []string) bool {
miniS3Options.filer = &filerAddress
miniWebDavOptions.filer = &filerAddress
// Register Unix socket paths for gRPC services so local inter-service
// communication goes through Unix sockets instead of TCP.
pb.RegisterLocalGrpcSocket(*miniMasterOptions.portGrpc, fmt.Sprintf("/tmp/seaweedfs-master-grpc-%d.sock", *miniMasterOptions.portGrpc))
pb.RegisterLocalGrpcSocket(*miniOptions.v.portGrpc, fmt.Sprintf("/tmp/seaweedfs-volume-grpc-%d.sock", *miniOptions.v.portGrpc))
pb.RegisterLocalGrpcSocket(*miniFilerOptions.portGrpc, fmt.Sprintf("/tmp/seaweedfs-filer-grpc-%d.sock", *miniFilerOptions.portGrpc))
if *miniS3Options.portGrpc > 0 {
pb.RegisterLocalGrpcSocket(*miniS3Options.portGrpc, fmt.Sprintf("/tmp/seaweedfs-s3-grpc-%d.sock", *miniS3Options.portGrpc))
}
pb.RegisterLocalGrpcSocket(*miniAdminOptions.grpcPort, fmt.Sprintf("/tmp/seaweedfs-admin-grpc-%d.sock", *miniAdminOptions.grpcPort))
go stats_collect.StartMetricsServer(*miniMetricsHttpIp, *miniMetricsHttpPort)
if *miniMasterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {

View File

@@ -376,6 +376,7 @@ func (s3opt *S3Options) startS3Server() bool {
go grpcS.Serve(grpcLocalL)
}
go grpcS.Serve(grpcL)
pb.ServeGrpcOnLocalSocket(grpcS, grpcPort)
if *s3opt.tlsPrivateKey != "" {
// Check for port conflict when both HTTP and HTTPS are enabled on the same port

View File

@@ -415,6 +415,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
glog.Fatalf("start gRPC service failed, %s", err)
}
}()
pb.ServeGrpcOnLocalSocket(grpcS, grpcPort)
return grpcS
}