Files
seaweedFS/weed/command/scaffold/notification.toml
Chris Lu 937a168d34 notification.kafka: add SASL authentication and TLS support (#8832)
* notification.kafka: add SASL authentication and TLS support (#8827)

Wire sarama SASL (PLAIN, SCRAM-SHA-256, SCRAM-SHA-512) and TLS
configuration into the Kafka notification producer and consumer,
enabling connections to secured Kafka clusters.

* notification.kafka: validate mTLS config

* kafka notification: validate partial mTLS config, replace panics with errors

- Reject when only one of tls_client_cert/tls_client_key is provided
- Replace three panic() calls in KafkaInput.initialize with returned errors

* kafka notification: enforce minimum TLS 1.2 for Kafka connections
2026-03-29 13:45:54 -07:00

83 lines
3.9 KiB
TOML
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
# A sample TOML config file for SeaweedFS filer store
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
# Put this file to one of the location, with descending priority
# ./notification.toml
# $HOME/.seaweedfs/notification.toml
# /etc/seaweedfs/notification.toml
####################################################
# notification
# send and receive filer updates for each file to an external message queue
####################################################
[notification.log]
# this is only for debugging purpose and does not work with "weed filer.replicate"
enabled = false
[notification.kafka]
enabled = false
hosts = [
"localhost:9092"
]
topic = "seaweedfs_filer"
offsetFile = "./last.offset"
offsetSaveIntervalSeconds = 10
# SASL Authentication
sasl_enabled = false
sasl_mechanism = "PLAIN" # PLAIN, SCRAM-SHA-256, SCRAM-SHA-512
sasl_username = ""
sasl_password = ""
# TLS/SSL
tls_enabled = false
tls_ca_cert = "" # path to CA certificate PEM file
tls_client_cert = "" # path to client certificate PEM file (for mTLS)
tls_client_key = "" # path to client private key PEM file (for mTLS)
tls_insecure_skip_verify = false
[notification.aws_sqs]
# experimental, let me know if it works
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
sqs_queue_name = "my_filer_queue" # an existing queue name
[notification.google_pub_sub]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
[notification.gocdk_pub_sub]
# The Go Cloud Development Kit (https://gocloud.dev).
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
# create binding myexchange => myqueue
topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"
[notification.webhook]
# Send file system events to HTTP webhook endpoints (push model)
# BEST FOR: Low to moderate traffic (< 100 events/second sustained)
# FOR HIGH TRAFFIC: Consider using Kafka, SQS, or pull-based event logs instead
# Documentation: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Notification-Webhook
enabled = false
endpoint = "https://your-server.com/webhook" # required: HTTP endpoint URL
bearer_token = "" # optional: bearer token for authentication
timeout_seconds = 10 # optional: HTTP timeout (default: 10, range: 1-300)
max_retries = 3 # optional: retry attempts (default: 3, range: 0-10)
backoff_seconds = 3 # optional: initial backoff delay (default: 3, range: 1-60)
max_backoff_seconds = 30 # optional: max backoff delay (default: 30, range: backoff_seconds-300)
workers = 5 # optional: concurrent workers (default: 5, range: 1-100)
buffer_size = 10000 # optional: event buffer size (default: 10000, range: 100-1000000)
# event_types = ["create", "update", "delete", "rename"] # optional: filter by event types (default: all)
# path_prefixes = ["/important", "/data"] # optional: filter by path prefixes (default: all)