Files
Gists/helm/opengist/values.yaml
Thomas Miceli 6bd8df6a74 v1.12.0
2026-01-27 22:28:20 +07:00

297 lines
8.7 KiB
YAML

## Kubernetes workload configuration for Opengist
nameOverride: ""
fullnameOverride: ""
namespace: ""
## Opengist YAML Application Config. See more at https://opengist.io/docs/configuration/cheat-sheet.html
## This will create a Kubernetes secret with the key `config.yml` containing the YAML configuration mounted in the pod.
config:
log-level: "warn"
log-output: "stdout"
metrics.enabled: false
## If defined, the existing secret will be used instead of creating a new one.
## The secret must contain a key named `config.yml` with the YAML configuration.
configExistingSecret: ""
## Define the image repository and tag to use.
image:
repository: ghcr.io/thomiceli/opengist
pullPolicy: Always
tag: "1.12.0"
digest: ""
imagePullSecrets: []
# - name: "image-pull-secret"
## Define the deployment replica count
replicaCount: 1
## Define the deployment strategy type
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "100%"
maxUnavailable: 0
## StatefulSet configuration
## Enables StatefulSet workload instead of Deployment (required for volumeClaimTemplates or stable pod identities).
##
## Single-replica SQLite example (default behavior):
## statefulSet.enabled: true
## replicaCount: 1
## persistence.mode: perReplica # or omit (default)
## # Creates one PVC per pod via volumeClaimTemplates (RWO)
##
## Multi-replica requirements (replicaCount > 1):
## 1. External database: config.db-uri must be postgres:// or mysql:// (SQLite NOT supported)
## 2. Shared storage: Use ONE of:
## a) Existing claim: persistence.existingClaim: "my-rwx-pvc"
## b) Chart-created: persistence.mode: shared + persistence.create.enabled: true + accessModes: [ReadWriteMany]
## 3. Chart will FAIL FAST if constraints are not met to prevent data divergence
##
## Persistence decision tree:
## - persistence.existingClaim set → mount that PVC directly (no volumeClaimTemplates)
## - persistence.mode=shared + create.* → chart creates single RWX PVC, all pods mount it
## - persistence.mode=perReplica (default) → volumeClaimTemplates (one PVC/pod, RWO typically)
## - persistence.enabled=false → emptyDir (ephemeral)
statefulSet:
enabled: false
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
## Security Context settings
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
podSecurityContext:
fsGroup: 1000
securityContext: {}
# allowPrivilegeEscalation: false
## Pod Disruption Budget settings
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
## Set the Kubernetes service type
## ref: https://kubernetes.io/docs/concepts/services-networking/service/
service:
http:
type: ClusterIP
clusterIP:
port: 6157
nodePort:
loadBalancerIP:
externalIPs: []
labels: {}
annotations: {}
loadBalancerSourceRanges: []
externalTrafficPolicy:
ssh:
enabled: true
type: ClusterIP
clusterIP:
port: 2222
nodePort:
loadBalancerIP:
externalIPs: []
labels: {}
annotations: {}
loadBalancerSourceRanges: []
externalTrafficPolicy:
# A metrics K8S service on port 6158 is created when the Opengist config metrics.enabled: true
metrics:
type: ClusterIP
clusterIP:
port: 6158
nodePort:
labels: {}
annotations: {}
# A service monitor can be used to work with your Prometheus setup.
serviceMonitor:
enabled: true
labels: {}
# release: kube-prom-stack
interval:
scrapeTimeout:
annotations: {}
relabelings: []
metricRelabelings: []
## HTTP Ingress for Opengist
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
labels: {}
# node-role.kubernetes.io/ingress: platform
annotations: {}
# kubernetes.io/ingress.class: nginx
hosts:
- host: opengist.example.com
paths:
- path: /
pathType: Prefix
tls: []
# - secretName: opengist-tls
# hosts:
# - opengist.example.com
## Service Account for Opengist pods
## ref: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
create: true
annotations: {}
name: ""
## Persistent storage for /opengist data directory
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
persistence:
enabled: true
## Persistence mode controls how storage is provisioned:
##
## perReplica (DEFAULT):
## - StatefulSet creates one PVC per replica via volumeClaimTemplates
## - Typically RWO (ReadWriteOnce) storage
## - Safe ONLY for replicaCount=1 (multi-replica causes data divergence)
## - Use when: single-node dev/test, no horizontal scaling needed
##
## shared:
## - Single RWX (ReadWriteMany) PVC shared by all replicas
## - Required for replicaCount > 1
## - Two provisioning paths:
## a) existingClaim: "my-rwx-pvc" (you manage the PVC lifecycle)
## b) existingClaim: "" + create.enabled: true (chart creates PVC automatically)
## - Use when: multi-replica HA, horizontal scaling, shared file access
##
## WARNING: Switching modes after initial deploy requires manual data migration:
## 1. Scale down to 1 replica
## 2. Create/provision RWX PVC and copy data
## 3. Update values: mode=shared, existingClaim or create.enabled
## 4. Scale up
mode: perReplica
## Reference an existing PVC (takes precedence over create.*)
## When set:
## - Chart will NOT create a PVC
## - StatefulSet mounts this claim directly (no volumeClaimTemplates)
## - Must be RWX for replicaCount > 1
## Example: existingClaim: "opengist-shared-rwx"
existingClaim: ""
## Common persistence parameters (apply to perReplica mode OR as defaults for create.*)
storageClass: "" # Empty = cluster default
labels: {}
annotations:
helm.sh/resource-policy: keep # Prevents PVC deletion on helm uninstall
size: 5Gi
accessModes:
- ReadWriteOnce # perReplica default; override to [ReadWriteMany] if using existingClaim
subPath: "" # Optional subpath within volume
## Chart-managed PVC creation (ONLY for mode=shared when existingClaim is empty)
## Renders templates/pvc-shared.yaml
create:
enabled: true
nameSuffix: shared # PVC name: <release-name>-shared
storageClass: "" # Empty = cluster default; override if you need specific storage class
size: 5Gi # Override top-level persistence.size if needed
accessModes:
- ReadWriteMany # REQUIRED for multi-replica; NFS/CephFS/Longhorn RWX/etc.
labels: {}
annotations: {}
## Example for specific storage:
## storageClass: "nfs-client"
## size: 20Gi
extraVolumes: []
extraVolumeMounts: []
## Additional pod labels and annotations
podLabels: {}
podAnnotations: {}
## Configure resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Configure the liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/configuration/liveness-readiness-startup-probes/
livenessProbe:
enabled: true
initialDelaySeconds: 200
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
## Define autoscaling configuration using Horizontal Pod Autoscaler
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
annotations: {}
## Additional deployment configuration
deployment:
env: []
terminationGracePeriodSeconds: 60
labels: {}
annotations: {}
## Set pod assignment with node labels
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
nodeSelector: {}
tolerations: []
affinity: {}
## Use PostgreSQL as a database, using Bitnami's PostgreSQL Helm chart
## ref: https://artifacthub.io/packages/helm/bitnami/postgresql/16.5.6
postgresql:
enabled: false
global:
postgresql:
auth:
username: opengist
password: opengist
database: opengist
service:
ports:
postgresql: 5432
primary:
persistence:
size: 10Gi
## Use Meilisearch as a code indexer, using Meilisearch's Helm chart
## ref: https://github.com/meilisearch/meilisearch-kubernetes/tree/meilisearch-0.12.0
meilisearch:
enabled: false
environment:
MEILI_ENV: "production"
auth:
existingMasterKeySecret: