* fix(helm): namespace app-specific values under global.seaweedfs Move all app-specific values from the global namespace to global.seaweedfs.* to avoid polluting the shared .Values.global namespace when the chart is used as a subchart. Standard Helm conventions (global.imageRegistry, global.imagePullSecrets) remain at the global level as they are designed to be shared across subcharts. Fixes seaweedfs/seaweedfs#8699 BREAKING CHANGE: global values have been restructured. Users must update their values files to use the new paths: - global.registry → global.imageRegistry - global.repository → global.seaweedfs.image.repository - global.imageName → global.seaweedfs.image.name - global.<key> → global.seaweedfs.<key> (for all other app-specific values) * fix(ci): update helm CI tests to use new global.seaweedfs.* value paths Update all --set flags in helm_ci.yml to use the new namespaced global.seaweedfs.* paths matching the values.yaml restructuring. * fix(ci): install Claude Code via npm to avoid install.sh 403 The claude-code-action's built-in installer uses `curl https://claude.ai/install.sh | bash` which can fail with 403. Due to the pipe, bash exits 0 on empty input, masking the curl failure and leaving the `claude` binary missing. Work around this by installing Claude Code via npm before invoking the action, and passing the executable path via path_to_claude_code_executable. * revert: remove claude-code-review.yml changes from this PR The claude-code-action OIDC token exchange validates that the workflow file matches the version on the default branch. Modifying it in a PR causes the review job to fail with "Workflow validation failed". The Claude Code install fix will need to be applied directly to master or in a separate PR. * fix: update stale references to old global.* value paths - admin-statefulset.yaml: fix fail message to reference global.seaweedfs.masterServer - values.yaml: fix comment to reference image.name instead of imageName - helm_ci.yml: fix diagnostic message to reference global.seaweedfs.enableSecurity * feat(helm): add backward-compat shim for old global.* value paths Add _compat.tpl with a seaweedfs.compat helper that detects old-style global.* keys (e.g. global.enableSecurity, global.registry) and merges them into the new global.seaweedfs.* namespace. Since the old keys no longer have defaults in values.yaml, their presence means the user explicitly provided them. The helper uses in-place mutation via `set` so all templates see the merged values. This ensures existing deployments using old value paths continue to work without changes after upgrading. * fix: update stale comment references in values.yaml Update comments referencing global.enableSecurity and global.masterServer to the new global.seaweedfs.* paths. --------- Co-authored-by: Copilot <copilot@github.com>
98 lines
4.1 KiB
YAML
98 lines
4.1 KiB
YAML
{{- include "seaweedfs.compat" . -}}
|
|
{{- if .Values.global.seaweedfs.enableSecurity }}
|
|
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: {{ include "seaweedfs.fullname" . }}-security-config
|
|
namespace: {{ .Release.Namespace }}
|
|
labels:
|
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
|
|
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
|
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
data:
|
|
{{- $fullname := include "seaweedfs.fullname" . }}
|
|
{{- $securityConfigName := printf "%s-security-config" $fullname }}
|
|
{{- $existing := lookup "v1" "ConfigMap" .Release.Namespace $securityConfigName }}
|
|
{{- if not $existing }}
|
|
{{- $legacyName := printf "%s-%s" (include "seaweedfs.name" .) "security-config" }}
|
|
{{- $existing = lookup "v1" "ConfigMap" .Release.Namespace $legacyName }}
|
|
{{- end }}
|
|
{{- $securityConfig := fromToml (dig "data" "security.toml" "" $existing) }}
|
|
security.toml: |-
|
|
# this file is read by master, volume server, and filer
|
|
|
|
{{- if .Values.global.seaweedfs.securityConfig.jwtSigning.volumeWrite }}
|
|
# the jwt signing key is read by master and volume server
|
|
# a jwt expires in 10 seconds
|
|
[jwt.signing]
|
|
key = "{{ dig "jwt" "signing" "key" (randAlphaNum 10 | b64enc) $securityConfig }}"
|
|
{{- end }}
|
|
|
|
{{- if .Values.global.seaweedfs.securityConfig.jwtSigning.volumeRead }}
|
|
# this jwt signing key is read by master and volume server, and it is used for read operations:
|
|
# - the Master server generates the JWT, which can be used to read a certain file on a volume server
|
|
# - the Volume server validates the JWT on reading
|
|
[jwt.signing.read]
|
|
key = "{{ dig "jwt" "signing" "read" "key" (randAlphaNum 10 | b64enc) $securityConfig }}"
|
|
{{- end }}
|
|
|
|
{{- if .Values.global.seaweedfs.securityConfig.jwtSigning.filerWrite }}
|
|
# If this JWT key is configured, Filer only accepts writes over HTTP if they are signed with this JWT:
|
|
# - f.e. the S3 API Shim generates the JWT
|
|
# - the Filer server validates the JWT on writing
|
|
# the jwt defaults to expire after 10 seconds.
|
|
[jwt.filer_signing]
|
|
key = "{{ dig "jwt" "filer_signing" "key" (randAlphaNum 10 | b64enc) $securityConfig }}"
|
|
{{- end }}
|
|
|
|
{{- if .Values.global.seaweedfs.securityConfig.jwtSigning.filerRead }}
|
|
# If this JWT key is configured, Filer only accepts reads over HTTP if they are signed with this JWT:
|
|
# - f.e. the S3 API Shim generates the JWT
|
|
# - the Filer server validates the JWT on reading
|
|
# the jwt defaults to expire after 10 seconds.
|
|
[jwt.filer_signing.read]
|
|
key = "{{ dig "jwt" "filer_signing" "read" "key" (randAlphaNum 10 | b64enc) $securityConfig }}"
|
|
{{- end }}
|
|
|
|
# all grpc tls authentications are mutual
|
|
# the values for the following ca, cert, and key are paths to the PERM files.
|
|
[grpc]
|
|
ca = "/usr/local/share/ca-certificates/ca/tls.crt"
|
|
|
|
[grpc.volume]
|
|
cert = "/usr/local/share/ca-certificates/volume/tls.crt"
|
|
key = "/usr/local/share/ca-certificates/volume/tls.key"
|
|
|
|
[grpc.master]
|
|
cert = "/usr/local/share/ca-certificates/master/tls.crt"
|
|
key = "/usr/local/share/ca-certificates/master/tls.key"
|
|
|
|
[grpc.filer]
|
|
cert = "/usr/local/share/ca-certificates/filer/tls.crt"
|
|
key = "/usr/local/share/ca-certificates/filer/tls.key"
|
|
|
|
[grpc.admin]
|
|
cert = "/usr/local/share/ca-certificates/admin/tls.crt"
|
|
key = "/usr/local/share/ca-certificates/admin/tls.key"
|
|
|
|
[grpc.worker]
|
|
cert = "/usr/local/share/ca-certificates/worker/tls.crt"
|
|
key = "/usr/local/share/ca-certificates/worker/tls.key"
|
|
|
|
# use this for any place needs a grpc client
|
|
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
|
|
[grpc.client]
|
|
cert = "/usr/local/share/ca-certificates/client/tls.crt"
|
|
key = "/usr/local/share/ca-certificates/client/tls.key"
|
|
|
|
# volume server https options
|
|
# Note: work in progress!
|
|
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
|
|
[https.client]
|
|
enabled = false
|
|
[https.volume]
|
|
cert = ""
|
|
key = ""
|
|
{{- end }}
|