|
@@ -1,5 +1,7 @@
|
|
|
rbac:
|
|
|
create: true
|
|
|
+ ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
|
|
|
+ # useExistingRole: name-of-some-(cluster)role
|
|
|
pspEnabled: true
|
|
|
pspUseAppArmor: true
|
|
|
namespaced: false
|
|
@@ -15,11 +17,34 @@ serviceAccount:
|
|
|
create: true
|
|
|
name:
|
|
|
nameTest:
|
|
|
+ ## ServiceAccount labels.
|
|
|
+ labels: {}
|
|
|
+## Service account annotations. Can be templated.
|
|
|
# annotations:
|
|
|
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
|
|
|
+ autoMount: true
|
|
|
|
|
|
replicas: 1
|
|
|
|
|
|
+## Create a headless service for the deployment
|
|
|
+headlessService: false
|
|
|
+
|
|
|
+## Create HorizontalPodAutoscaler object for deployment type
|
|
|
+#
|
|
|
+autoscaling:
|
|
|
+ enabled: false
|
|
|
+# minReplicas: 1
|
|
|
+# maxReplicas: 10
|
|
|
+# metrics:
|
|
|
+# - type: Resource
|
|
|
+# resource:
|
|
|
+# name: cpu
|
|
|
+# targetAverageUtilization: 60
|
|
|
+# - type: Resource
|
|
|
+# resource:
|
|
|
+# name: memory
|
|
|
+# targetAverageUtilization: 60
|
|
|
+
|
|
|
## See `kubectl explain poddisruptionbudget.spec` for more
|
|
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
|
|
|
podDisruptionBudget: {}
|
|
@@ -50,14 +75,16 @@ livenessProbe:
|
|
|
# schedulerName: "default-scheduler"
|
|
|
|
|
|
image:
|
|
|
- repository: registry.cn-beijing.aliyuncs.com/dotbalo/grafana
|
|
|
- tag: 7.2.1
|
|
|
+ repository: grafana/grafana
|
|
|
+ # Overrides the Grafana image tag whose default is the chart appVersion
|
|
|
+ tag: ""
|
|
|
sha: ""
|
|
|
pullPolicy: IfNotPresent
|
|
|
|
|
|
## Optionally specify an array of imagePullSecrets.
|
|
|
## Secrets must be manually created in the namespace.
|
|
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
|
+ ## Can be templated.
|
|
|
##
|
|
|
# pullSecrets:
|
|
|
# - myRegistrKeySecretName
|
|
@@ -65,7 +92,7 @@ image:
|
|
|
testFramework:
|
|
|
enabled: true
|
|
|
image: "bats/bats"
|
|
|
- tag: "v1.1.0"
|
|
|
+ tag: "v1.4.1"
|
|
|
imagePullPolicy: IfNotPresent
|
|
|
securityContext: {}
|
|
|
|
|
@@ -74,7 +101,14 @@ securityContext:
|
|
|
runAsGroup: 472
|
|
|
fsGroup: 472
|
|
|
|
|
|
+containerSecurityContext:
|
|
|
+ {}
|
|
|
+
|
|
|
+# Enable creating the grafana configmap
|
|
|
+createConfigmap: true
|
|
|
|
|
|
+# Extra configmaps to mount in grafana pods
|
|
|
+# Values are templated.
|
|
|
extraConfigmapMounts: []
|
|
|
# - name: certs-configmap
|
|
|
# mountPath: /etc/grafana/ssl/
|
|
@@ -88,18 +122,23 @@ extraEmptyDirMounts: []
|
|
|
# mountPath: /etc/grafana/provisioning/notifiers
|
|
|
|
|
|
|
|
|
+# Apply extra labels to common labels.
|
|
|
+extraLabels: {}
|
|
|
+
|
|
|
## Assign a PriorityClassName to pods if set
|
|
|
# priorityClassName:
|
|
|
|
|
|
downloadDashboardsImage:
|
|
|
repository: curlimages/curl
|
|
|
- tag: 7.70.0
|
|
|
+ tag: 7.85.0
|
|
|
sha: ""
|
|
|
pullPolicy: IfNotPresent
|
|
|
|
|
|
downloadDashboards:
|
|
|
env: {}
|
|
|
+ envFromSecret: ""
|
|
|
resources: {}
|
|
|
+ securityContext: {}
|
|
|
|
|
|
## Pod Annotations
|
|
|
# podAnnotations: {}
|
|
@@ -117,13 +156,17 @@ podPortName: grafana
|
|
|
## ref: http://kubernetes.io/docs/user-guide/services/
|
|
|
##
|
|
|
service:
|
|
|
+ enabled: true
|
|
|
type: ClusterIP
|
|
|
port: 80
|
|
|
targetPort: 3000
|
|
|
# targetPort: 4181 To be used with a proxy extraContainer
|
|
|
+ ## Service annotations. Can be templated.
|
|
|
annotations: {}
|
|
|
labels: {}
|
|
|
portName: service
|
|
|
+ # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp"
|
|
|
+ appProtocol: ""
|
|
|
|
|
|
serviceMonitor:
|
|
|
## If true, a ServiceMonitor CRD is created for a prometheus operator
|
|
@@ -134,6 +177,8 @@ serviceMonitor:
|
|
|
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
|
|
|
labels: {}
|
|
|
interval: 1m
|
|
|
+ scheme: http
|
|
|
+ tlsConfig: {}
|
|
|
scrapeTimeout: 30s
|
|
|
relabelings: []
|
|
|
|
|
@@ -151,12 +196,19 @@ hostAliases: []
|
|
|
|
|
|
ingress:
|
|
|
enabled: false
|
|
|
+ # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
|
|
|
+ # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
|
|
|
+ # ingressClassName: nginx
|
|
|
# Values can be templated
|
|
|
annotations: {}
|
|
|
# kubernetes.io/ingress.class: nginx
|
|
|
# kubernetes.io/tls-acme: "true"
|
|
|
labels: {}
|
|
|
path: /
|
|
|
+
|
|
|
+ # pathType is only for k8s >= 1.1=
|
|
|
+ pathType: Prefix
|
|
|
+
|
|
|
hosts:
|
|
|
- chart-example.local
|
|
|
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
|
|
@@ -165,6 +217,16 @@ ingress:
|
|
|
# backend:
|
|
|
# serviceName: ssl-redirect
|
|
|
# servicePort: use-annotation
|
|
|
+ ## Or for k8s > 1.19
|
|
|
+ # - path: /*
|
|
|
+ # pathType: Prefix
|
|
|
+ # backend:
|
|
|
+ # service:
|
|
|
+ # name: ssl-redirect
|
|
|
+ # port:
|
|
|
+ # name: use-annotation
|
|
|
+
|
|
|
+
|
|
|
tls: []
|
|
|
# - secretName: chart-example-tls
|
|
|
# hosts:
|
|
@@ -188,15 +250,24 @@ nodeSelector: {}
|
|
|
##
|
|
|
tolerations: []
|
|
|
|
|
|
-## Affinity for pod assignment
|
|
|
+## Affinity for pod assignment (evaluated as template)
|
|
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
|
##
|
|
|
affinity: {}
|
|
|
|
|
|
+## Topology Spread Constraints
|
|
|
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
|
|
+##
|
|
|
+topologySpreadConstraints: []
|
|
|
+
|
|
|
+## Additional init containers (evaluated as template)
|
|
|
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
|
|
+##
|
|
|
extraInitContainers: []
|
|
|
|
|
|
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
|
|
|
-extraContainers: |
|
|
|
+extraContainers: ""
|
|
|
+# extraContainers: |
|
|
|
# - name: proxy
|
|
|
# image: quay.io/gambol99/keycloak-proxy:latest
|
|
|
# args:
|
|
@@ -233,12 +304,28 @@ persistence:
|
|
|
# annotations: {}
|
|
|
finalizers:
|
|
|
- kubernetes.io/pvc-protection
|
|
|
+ # selectorLabels: {}
|
|
|
+ ## Sub-directory of the PV to mount. Can be templated.
|
|
|
# subPath: ""
|
|
|
+ ## Name of an existing PVC. Can be templated.
|
|
|
# existingClaim:
|
|
|
+ ## Extra labels to apply to a PVC.
|
|
|
+ extraPvcLabels: {}
|
|
|
+
|
|
|
+ ## If persistence is not enabled, this allows to mount the
|
|
|
+ ## local storage in-memory to improve performance
|
|
|
+ ##
|
|
|
+ inMemory:
|
|
|
+ enabled: false
|
|
|
+ ## The maximum usage on memory medium EmptyDir would be
|
|
|
+ ## the minimum value between the SizeLimit specified
|
|
|
+ ## here and the sum of memory limits of all containers in a pod
|
|
|
+ ##
|
|
|
+ # sizeLimit: 300Mi
|
|
|
|
|
|
initChownData:
|
|
|
## If false, data ownership will not be reset at startup
|
|
|
- ## This allows the prometheus-server to be run with an arbitrary user
|
|
|
+ ## This allows the grafana-server to be run with an arbitrary user
|
|
|
##
|
|
|
enabled: true
|
|
|
|
|
@@ -260,6 +347,9 @@ initChownData:
|
|
|
# requests:
|
|
|
# cpu: 100m
|
|
|
# memory: 128Mi
|
|
|
+ securityContext:
|
|
|
+ runAsNonRoot: false
|
|
|
+ runAsUser: 0
|
|
|
|
|
|
|
|
|
# Administrator credentials when not using an existing secret (see below)
|
|
@@ -268,6 +358,7 @@ adminUser: admin
|
|
|
|
|
|
# Use an existing secret for the admin user.
|
|
|
admin:
|
|
|
+ ## Name of the secret. Can be templated.
|
|
|
existingSecret: ""
|
|
|
userKey: admin-user
|
|
|
passwordKey: admin-password
|
|
@@ -308,8 +399,8 @@ admin:
|
|
|
|
|
|
env: {}
|
|
|
|
|
|
-## "valueFrom" environment variable references that will be added to deployment pods
|
|
|
-## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core
|
|
|
+## "valueFrom" environment variable references that will be added to deployment pods. Name is templated.
|
|
|
+## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core
|
|
|
## Renders in container spec as:
|
|
|
## env:
|
|
|
## ...
|
|
@@ -317,6 +408,10 @@ env: {}
|
|
|
## valueFrom:
|
|
|
## <value rendered as YAML>
|
|
|
envValueFrom: {}
|
|
|
+ # ENV_NAME:
|
|
|
+ # configMapKeyRef:
|
|
|
+ # name: configmap-name
|
|
|
+ # key: value_key
|
|
|
|
|
|
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
|
|
|
## This can be useful for auth tokens, etc. Value is templated.
|
|
@@ -326,6 +421,25 @@ envFromSecret: ""
|
|
|
## This can be useful for auth tokens, etc
|
|
|
envRenderSecret: {}
|
|
|
|
|
|
+## The names of secrets in the same kubernetes namespace which contain values to be added to the environment
|
|
|
+## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key.
|
|
|
+## Name is templated.
|
|
|
+envFromSecrets: []
|
|
|
+## - name: secret-name
|
|
|
+## optional: true
|
|
|
+
|
|
|
+## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment
|
|
|
+## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key.
|
|
|
+## Name is templated.
|
|
|
+## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core
|
|
|
+envFromConfigMaps: []
|
|
|
+## - name: configmap-name
|
|
|
+## optional: true
|
|
|
+
|
|
|
+# Inject Kubernetes services as environment variables.
|
|
|
+# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables
|
|
|
+enableServiceLinks: true
|
|
|
+
|
|
|
## Additional grafana server secret mounts
|
|
|
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
|
|
|
extraSecretMounts: []
|
|
@@ -346,14 +460,43 @@ extraSecretMounts: []
|
|
|
# audience: sts.amazonaws.com
|
|
|
# expirationSeconds: 86400
|
|
|
# path: token
|
|
|
+ #
|
|
|
+ # for CSI e.g. Azure Key Vault use the following
|
|
|
+ # - name: secrets-store-inline
|
|
|
+ # mountPath: /run/secrets
|
|
|
+ # readOnly: true
|
|
|
+ # csi:
|
|
|
+ # driver: secrets-store.csi.k8s.io
|
|
|
+ # readOnly: true
|
|
|
+ # volumeAttributes:
|
|
|
+ # secretProviderClass: "akv-grafana-spc"
|
|
|
+ # nodePublishSecretRef: # Only required when using service principal mode
|
|
|
+ # name: grafana-akv-creds # Only required when using service principal mode
|
|
|
|
|
|
## Additional grafana server volume mounts
|
|
|
# Defines additional volume mounts.
|
|
|
extraVolumeMounts: []
|
|
|
- # - name: extra-volume
|
|
|
- # mountPath: /mnt/volume
|
|
|
+ # - name: extra-volume-0
|
|
|
+ # mountPath: /mnt/volume0
|
|
|
# readOnly: true
|
|
|
# existingClaim: volume-claim
|
|
|
+ # - name: extra-volume-1
|
|
|
+ # mountPath: /mnt/volume1
|
|
|
+ # readOnly: true
|
|
|
+ # hostPath: /usr/shared/
|
|
|
+ # - name: grafana-secrets
|
|
|
+ # csi: true
|
|
|
+ # data:
|
|
|
+ # driver: secrets-store.csi.k8s.io
|
|
|
+ # readOnly: true
|
|
|
+ # volumeAttributes:
|
|
|
+ # secretProviderClass: "grafana-env-spc"
|
|
|
+
|
|
|
+## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request
|
|
|
+lifecycleHooks: {}
|
|
|
+ # postStart:
|
|
|
+ # exec:
|
|
|
+ # command: []
|
|
|
|
|
|
## Pass the plugins you want installed as a list.
|
|
|
##
|
|
@@ -374,13 +517,78 @@ datasources: {}
|
|
|
# access: proxy
|
|
|
# isDefault: true
|
|
|
# - name: CloudWatch
|
|
|
-# type: cloudwatch
|
|
|
-# access: proxy
|
|
|
-# uid: cloudwatch
|
|
|
-# editable: false
|
|
|
-# jsonData:
|
|
|
-# authType: credentials
|
|
|
-# defaultRegion: us-east-1
|
|
|
+# type: cloudwatch
|
|
|
+# access: proxy
|
|
|
+# uid: cloudwatch
|
|
|
+# editable: false
|
|
|
+# jsonData:
|
|
|
+# authType: default
|
|
|
+# defaultRegion: us-east-1
|
|
|
+
|
|
|
+## Configure grafana alerting (can be templated)
|
|
|
+## ref: http://docs.grafana.org/administration/provisioning/#alerting
|
|
|
+##
|
|
|
+alerting: {}
|
|
|
+ # rules.yaml:
|
|
|
+ # apiVersion: 1
|
|
|
+ # groups:
|
|
|
+ # - orgId: 1
|
|
|
+ # name: '{{ .Chart.Name }}_my_rule_group'
|
|
|
+ # folder: my_first_folder
|
|
|
+ # interval: 60s
|
|
|
+ # rules:
|
|
|
+ # - uid: my_id_1
|
|
|
+ # title: my_first_rule
|
|
|
+ # condition: A
|
|
|
+ # data:
|
|
|
+ # - refId: A
|
|
|
+ # datasourceUid: '-100'
|
|
|
+ # model:
|
|
|
+ # conditions:
|
|
|
+ # - evaluator:
|
|
|
+ # params:
|
|
|
+ # - 3
|
|
|
+ # type: gt
|
|
|
+ # operator:
|
|
|
+ # type: and
|
|
|
+ # query:
|
|
|
+ # params:
|
|
|
+ # - A
|
|
|
+ # reducer:
|
|
|
+ # type: last
|
|
|
+ # type: query
|
|
|
+ # datasource:
|
|
|
+ # type: __expr__
|
|
|
+ # uid: '-100'
|
|
|
+ # expression: 1==0
|
|
|
+ # intervalMs: 1000
|
|
|
+ # maxDataPoints: 43200
|
|
|
+ # refId: A
|
|
|
+ # type: math
|
|
|
+ # dashboardUid: my_dashboard
|
|
|
+ # panelId: 123
|
|
|
+ # noDataState: Alerting
|
|
|
+ # for: 60s
|
|
|
+ # annotations:
|
|
|
+ # some_key: some_value
|
|
|
+ # labels:
|
|
|
+ # team: sre_team_1
|
|
|
+ # contactpoints.yaml:
|
|
|
+ # apiVersion: 1
|
|
|
+ # contactPoints:
|
|
|
+ # - orgId: 1
|
|
|
+ # name: cp_1
|
|
|
+ # receivers:
|
|
|
+ # - uid: first_uid
|
|
|
+ # type: pagerduty
|
|
|
+ # settings:
|
|
|
+ # integrationKey: XXX
|
|
|
+ # severity: critical
|
|
|
+ # class: ping failure
|
|
|
+ # component: Grafana
|
|
|
+ # group: app-stack
|
|
|
+ # summary: |
|
|
|
+ # {{ `{{ template "default.message" . }}` }}
|
|
|
|
|
|
## Configure notifiers
|
|
|
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
|
|
@@ -437,9 +645,17 @@ dashboards: {}
|
|
|
# datasource: Prometheus
|
|
|
# local-dashboard:
|
|
|
# url: https://example.com/repository/test.json
|
|
|
+ # token: ''
|
|
|
# local-dashboard-base64:
|
|
|
# url: https://example.com/repository/test-b64.json
|
|
|
+ # token: ''
|
|
|
# b64content: true
|
|
|
+ # local-dashboard-gitlab:
|
|
|
+ # url: https://example.com/repository/test-gitlab.json
|
|
|
+ # gitlabToken: ''
|
|
|
+ # local-dashboard-bitbucket:
|
|
|
+ # url: https://example.com/repository/test-bitbucket.json
|
|
|
+ # bearerToken: ''
|
|
|
|
|
|
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
|
|
|
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
|
|
@@ -458,7 +674,7 @@ dashboardsConfigMaps: {}
|
|
|
##
|
|
|
grafana.ini:
|
|
|
paths:
|
|
|
- data: /var/lib/grafana/data
|
|
|
+ data: /var/lib/grafana/
|
|
|
logs: /var/log/grafana
|
|
|
plugins: /var/lib/grafana/plugins
|
|
|
provisioning: /etc/grafana/provisioning
|
|
@@ -468,6 +684,8 @@ grafana.ini:
|
|
|
mode: console
|
|
|
grafana_net:
|
|
|
url: https://grafana.net
|
|
|
+ server:
|
|
|
+ domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}"
|
|
|
## grafana Authentication can be enabled with the following values on grafana.ini
|
|
|
# server:
|
|
|
# The full public facing url you use in browser, used for redirects and emails
|
|
@@ -528,8 +746,8 @@ smtp:
|
|
|
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
|
|
|
sidecar:
|
|
|
image:
|
|
|
- repository: kiwigrid/k8s-sidecar
|
|
|
- tag: 0.1.209
|
|
|
+ repository: quay.io/kiwigrid/k8s-sidecar
|
|
|
+ tag: 1.19.2
|
|
|
sha: ""
|
|
|
imagePullPolicy: IfNotPresent
|
|
|
resources: {}
|
|
@@ -539,22 +757,92 @@ sidecar:
|
|
|
# requests:
|
|
|
# cpu: 50m
|
|
|
# memory: 50Mi
|
|
|
+ securityContext: {}
|
|
|
# skipTlsVerify Set to true to skip tls verification for kube api calls
|
|
|
# skipTlsVerify: true
|
|
|
enableUniqueFilenames: false
|
|
|
+ readinessProbe: {}
|
|
|
+ livenessProbe: {}
|
|
|
+ # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO
|
|
|
+ # logLevel: INFO
|
|
|
+ alerts:
|
|
|
+ enabled: false
|
|
|
+ # Additional environment variables for the alerts sidecar
|
|
|
+ env: {}
|
|
|
+ # Do not reprocess already processed unchanged resources on k8s API reconnect.
|
|
|
+ # ignoreAlreadyProcessed: true
|
|
|
+ # label that the configmaps with alert are marked with
|
|
|
+ label: grafana_alert
|
|
|
+ # value of label that the configmaps with alert are set to
|
|
|
+ labelValue: ""
|
|
|
+ # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL.
|
|
|
+ # logLevel: INFO
|
|
|
+ # If specified, the sidecar will search for alert config-maps inside this namespace.
|
|
|
+ # Otherwise the namespace in which the sidecar is running will be used.
|
|
|
+ # It's also possible to specify ALL to search in all namespaces
|
|
|
+ searchNamespace: null
|
|
|
+ # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
|
|
+ watchMethod: WATCH
|
|
|
+ # search in configmap, secret or both
|
|
|
+ resource: both
|
|
|
+ # watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
|
|
|
+ # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
|
|
|
+ # watchServerTimeout: 3600
|
|
|
+ #
|
|
|
+ # watchClientTimeout: is a client-side timeout, configuring your local socket.
|
|
|
+ # If you have a network outage dropping all packets with no RST/FIN,
|
|
|
+ # this is how long your client waits before realizing & dropping the connection.
|
|
|
+ # defaults to 66sec (sic!)
|
|
|
+ # watchClientTimeout: 60
|
|
|
+ #
|
|
|
+ # Endpoint to send request to reload alerts
|
|
|
+ reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload"
|
|
|
+ # Absolute path to shell script to execute after a alert got reloaded
|
|
|
+ script: null
|
|
|
+ skipReload: false
|
|
|
+ # Deploy the alert sidecar as an initContainer in addition to a container.
|
|
|
+ # Sets the size limit of the alert sidecar emptyDir volume
|
|
|
+ sizeLimit: {}
|
|
|
dashboards:
|
|
|
enabled: false
|
|
|
+ # Additional environment variables for the dashboards sidecar
|
|
|
+ env: {}
|
|
|
+ # Do not reprocess already processed unchanged resources on k8s API reconnect.
|
|
|
+ # ignoreAlreadyProcessed: true
|
|
|
SCProvider: true
|
|
|
# label that the configmaps with dashboards are marked with
|
|
|
label: grafana_dashboard
|
|
|
+ # value of label that the configmaps with dashboards are set to
|
|
|
+ labelValue: ""
|
|
|
+ # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL.
|
|
|
+ # logLevel: INFO
|
|
|
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
|
|
|
folder: /tmp/dashboards
|
|
|
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
|
|
|
defaultFolderName: null
|
|
|
- # If specified, the sidecar will search for dashboard config-maps inside this namespace.
|
|
|
+ # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces.
|
|
|
# Otherwise the namespace in which the sidecar is running will be used.
|
|
|
- # It's also possible to specify ALL to search in all namespaces
|
|
|
+ # It's also possible to specify ALL to search in all namespaces.
|
|
|
searchNamespace: null
|
|
|
+ # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
|
|
+ watchMethod: WATCH
|
|
|
+ # search in configmap, secret or both
|
|
|
+ resource: both
|
|
|
+ # If specified, the sidecar will look for annotation with this name to create folder and put graph here.
|
|
|
+ # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
|
|
|
+ folderAnnotation: null
|
|
|
+ # Absolute path to shell script to execute after a configmap got reloaded
|
|
|
+ script: null
|
|
|
+ # watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
|
|
|
+ # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
|
|
|
+ # watchServerTimeout: 3600
|
|
|
+ #
|
|
|
+ # watchClientTimeout: is a client-side timeout, configuring your local socket.
|
|
|
+ # If you have a network outage dropping all packets with no RST/FIN,
|
|
|
+ # this is how long your client waits before realizing & dropping the connection.
|
|
|
+ # defaults to 66sec (sic!)
|
|
|
+ # watchClientTimeout: 60
|
|
|
+ #
|
|
|
# provider configuration that lets grafana manage the dashboards
|
|
|
provider:
|
|
|
# name of the provider, should be unique
|
|
@@ -571,22 +859,130 @@ sidecar:
|
|
|
allowUiUpdates: false
|
|
|
# allow Grafana to replicate dashboard structure from filesystem
|
|
|
foldersFromFilesStructure: false
|
|
|
+ # Additional dashboard sidecar volume mounts
|
|
|
+ extraMounts: []
|
|
|
+ # Sets the size limit of the dashboard sidecar emptyDir volume
|
|
|
+ sizeLimit: {}
|
|
|
datasources:
|
|
|
enabled: false
|
|
|
+ # Additional environment variables for the datasourcessidecar
|
|
|
+ env: {}
|
|
|
+ # Do not reprocess already processed unchanged resources on k8s API reconnect.
|
|
|
+ # ignoreAlreadyProcessed: true
|
|
|
# label that the configmaps with datasources are marked with
|
|
|
label: grafana_datasource
|
|
|
+ # value of label that the configmaps with datasources are set to
|
|
|
+ labelValue: ""
|
|
|
+ # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL.
|
|
|
+ # logLevel: INFO
|
|
|
# If specified, the sidecar will search for datasource config-maps inside this namespace.
|
|
|
# Otherwise the namespace in which the sidecar is running will be used.
|
|
|
# It's also possible to specify ALL to search in all namespaces
|
|
|
searchNamespace: null
|
|
|
+ # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
|
|
+ watchMethod: WATCH
|
|
|
+ # search in configmap, secret or both
|
|
|
+ resource: both
|
|
|
+ # watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
|
|
|
+ # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
|
|
|
+ # watchServerTimeout: 3600
|
|
|
+ #
|
|
|
+ # watchClientTimeout: is a client-side timeout, configuring your local socket.
|
|
|
+ # If you have a network outage dropping all packets with no RST/FIN,
|
|
|
+ # this is how long your client waits before realizing & dropping the connection.
|
|
|
+ # defaults to 66sec (sic!)
|
|
|
+ # watchClientTimeout: 60
|
|
|
+ #
|
|
|
+ # Endpoint to send request to reload datasources
|
|
|
+ reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload"
|
|
|
+ # Absolute path to shell script to execute after a datasource got reloaded
|
|
|
+ script: null
|
|
|
+ skipReload: false
|
|
|
+ # Deploy the datasource sidecar as an initContainer in addition to a container.
|
|
|
+ # This is needed if skipReload is true, to load any datasources defined at startup time.
|
|
|
+ initDatasources: false
|
|
|
+ # Sets the size limit of the datasource sidecar emptyDir volume
|
|
|
+ sizeLimit: {}
|
|
|
+ plugins:
|
|
|
+ enabled: false
|
|
|
+ # Additional environment variables for the plugins sidecar
|
|
|
+ env: {}
|
|
|
+ # Do not reprocess already processed unchanged resources on k8s API reconnect.
|
|
|
+ # ignoreAlreadyProcessed: true
|
|
|
+ # label that the configmaps with plugins are marked with
|
|
|
+ label: grafana_plugin
|
|
|
+ # value of label that the configmaps with plugins are set to
|
|
|
+ labelValue: ""
|
|
|
+ # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL.
|
|
|
+ # logLevel: INFO
|
|
|
+ # If specified, the sidecar will search for plugin config-maps inside this namespace.
|
|
|
+ # Otherwise the namespace in which the sidecar is running will be used.
|
|
|
+ # It's also possible to specify ALL to search in all namespaces
|
|
|
+ searchNamespace: null
|
|
|
+ # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
|
|
+ watchMethod: WATCH
|
|
|
+ # search in configmap, secret or both
|
|
|
+ resource: both
|
|
|
+ # watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
|
|
|
+ # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
|
|
|
+ # watchServerTimeout: 3600
|
|
|
+ #
|
|
|
+ # watchClientTimeout: is a client-side timeout, configuring your local socket.
|
|
|
+ # If you have a network outage dropping all packets with no RST/FIN,
|
|
|
+ # this is how long your client waits before realizing & dropping the connection.
|
|
|
+ # defaults to 66sec (sic!)
|
|
|
+ # watchClientTimeout: 60
|
|
|
+ #
|
|
|
+ # Endpoint to send request to reload plugins
|
|
|
+ reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload"
|
|
|
+ # Absolute path to shell script to execute after a plugin got reloaded
|
|
|
+ script: null
|
|
|
+ skipReload: false
|
|
|
+ # Deploy the datasource sidecar as an initContainer in addition to a container.
|
|
|
+ # This is needed if skipReload is true, to load any plugins defined at startup time.
|
|
|
+ initPlugins: false
|
|
|
+ # Sets the size limit of the plugin sidecar emptyDir volume
|
|
|
+ sizeLimit: {}
|
|
|
notifiers:
|
|
|
enabled: false
|
|
|
+ # Additional environment variables for the notifierssidecar
|
|
|
+ env: {}
|
|
|
+ # Do not reprocess already processed unchanged resources on k8s API reconnect.
|
|
|
+ # ignoreAlreadyProcessed: true
|
|
|
# label that the configmaps with notifiers are marked with
|
|
|
label: grafana_notifier
|
|
|
+ # value of label that the configmaps with notifiers are set to
|
|
|
+ labelValue: ""
|
|
|
+ # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL.
|
|
|
+ # logLevel: INFO
|
|
|
# If specified, the sidecar will search for notifier config-maps inside this namespace.
|
|
|
# Otherwise the namespace in which the sidecar is running will be used.
|
|
|
# It's also possible to specify ALL to search in all namespaces
|
|
|
searchNamespace: null
|
|
|
+ # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
|
|
+ watchMethod: WATCH
|
|
|
+ # search in configmap, secret or both
|
|
|
+ resource: both
|
|
|
+ # watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
|
|
|
+ # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
|
|
|
+ # watchServerTimeout: 3600
|
|
|
+ #
|
|
|
+ # watchClientTimeout: is a client-side timeout, configuring your local socket.
|
|
|
+ # If you have a network outage dropping all packets with no RST/FIN,
|
|
|
+ # this is how long your client waits before realizing & dropping the connection.
|
|
|
+ # defaults to 66sec (sic!)
|
|
|
+ # watchClientTimeout: 60
|
|
|
+ #
|
|
|
+ # Endpoint to send request to reload notifiers
|
|
|
+ reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload"
|
|
|
+ # Absolute path to shell script to execute after a notifier got reloaded
|
|
|
+ script: null
|
|
|
+ skipReload: false
|
|
|
+ # Deploy the notifier sidecar as an initContainer in addition to a container.
|
|
|
+ # This is needed if skipReload is true, to load any notifiers defined at startup time.
|
|
|
+ initNotifiers: false
|
|
|
+ # Sets the size limit of the notifier sidecar emptyDir volume
|
|
|
+ sizeLimit: {}
|
|
|
|
|
|
## Override the deployment namespace
|
|
|
##
|
|
@@ -598,6 +994,7 @@ revisionHistoryLimit: 10
|
|
|
|
|
|
## Add a seperate remote image renderer deployment/service
|
|
|
imageRenderer:
|
|
|
+ deploymentStrategy: {}
|
|
|
# Enable the image-renderer deployment & service
|
|
|
enabled: false
|
|
|
replicas: 1
|
|
@@ -611,20 +1008,39 @@ imageRenderer:
|
|
|
# image-renderer ImagePullPolicy
|
|
|
pullPolicy: Always
|
|
|
# extra environment variables
|
|
|
- env: {}
|
|
|
- # RENDERING_ARGS: --disable-gpu,--window-size=1280x758
|
|
|
+ env:
|
|
|
+ HTTP_HOST: "0.0.0.0"
|
|
|
+ # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758
|
|
|
# RENDERING_MODE: clustered
|
|
|
+ # IGNORE_HTTPS_ERRORS: true
|
|
|
+ # image-renderer deployment serviceAccount
|
|
|
+ serviceAccountName: ""
|
|
|
# image-renderer deployment securityContext
|
|
|
securityContext: {}
|
|
|
+ # image-renderer deployment container securityContext
|
|
|
+ containerSecurityContext:
|
|
|
+ capabilities:
|
|
|
+ drop: ['ALL']
|
|
|
+ allowPrivilegeEscalation: false
|
|
|
+ readOnlyRootFilesystem: true
|
|
|
# image-renderer deployment Host Aliases
|
|
|
hostAliases: []
|
|
|
# image-renderer deployment priority class
|
|
|
priorityClassName: ''
|
|
|
service:
|
|
|
+ # Enable the image-renderer service
|
|
|
+ enabled: true
|
|
|
# image-renderer service port name
|
|
|
portName: 'http'
|
|
|
# image-renderer service port used by both service and deployment
|
|
|
port: 8081
|
|
|
+ targetPort: 8081
|
|
|
+ # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp"
|
|
|
+ appProtocol: ""
|
|
|
+ # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana
|
|
|
+ grafanaProtocol: http
|
|
|
+ # In case a sub_path is used this needs to be added to the image renderer callback
|
|
|
+ grafanaSubPath: ""
|
|
|
# name of the image-renderer port on the pod
|
|
|
podPortName: http
|
|
|
# number of image-renderer replica sets to keep
|
|
@@ -641,3 +1057,86 @@ imageRenderer:
|
|
|
# requests:
|
|
|
# cpu: 50m
|
|
|
# memory: 50Mi
|
|
|
+ ## Node labels for pod assignment
|
|
|
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
|
+ #
|
|
|
+ nodeSelector: {}
|
|
|
+
|
|
|
+ ## Tolerations for pod assignment
|
|
|
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
|
+ ##
|
|
|
+ tolerations: []
|
|
|
+
|
|
|
+ ## Affinity for pod assignment (evaluated as template)
|
|
|
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
|
+ ##
|
|
|
+ affinity: {}
|
|
|
+
|
|
|
+networkPolicy:
|
|
|
+ ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
|
|
|
+ ##
|
|
|
+ enabled: false
|
|
|
+ ## @param networkPolicy.allowExternal Don't require client label for connections
|
|
|
+ ## The Policy model to apply. When set to false, only pods with the correct
|
|
|
+ ## client label will have network access to grafana port defined.
|
|
|
+ ## When true, grafana will accept connections from any source
|
|
|
+ ## (with the correct destination port).
|
|
|
+ ##
|
|
|
+ ingress: true
|
|
|
+ ## @param networkPolicy.ingress When true enables the creation
|
|
|
+ ## an ingress network policy
|
|
|
+ ##
|
|
|
+ allowExternal: true
|
|
|
+ ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed
|
|
|
+ ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
|
|
|
+ ## and that match other criteria, the ones that have the good label, can reach the grafana.
|
|
|
+ ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this
|
|
|
+ ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
|
|
|
+ ##
|
|
|
+ ## Example:
|
|
|
+ ## explicitNamespacesSelector:
|
|
|
+ ## matchLabels:
|
|
|
+ ## role: frontend
|
|
|
+ ## matchExpressions:
|
|
|
+ ## - {key: role, operator: In, values: [frontend]}
|
|
|
+ ##
|
|
|
+ explicitNamespacesSelector: {}
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ egress:
|
|
|
+ ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be
|
|
|
+ ## created allowing grafana to connect to external data sources from kubernetes cluster.
|
|
|
+ enabled: false
|
|
|
+ ##
|
|
|
+ ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress
|
|
|
+ ports: []
|
|
|
+ ## Add ports to the egress by specifying - port: <port number>
|
|
|
+ ## E.X.
|
|
|
+ ## ports:
|
|
|
+ ## - port: 80
|
|
|
+ ## - port: 443
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+ ##
|
|
|
+
|
|
|
+# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option
|
|
|
+enableKubeBackwardCompatibility: false
|
|
|
+useStatefulSet: false
|
|
|
+# Create a dynamic manifests via values:
|
|
|
+extraObjects: []
|
|
|
+ # - apiVersion: "kubernetes-client.io/v1"
|
|
|
+ # kind: ExternalSecret
|
|
|
+ # metadata:
|
|
|
+ # name: grafana-secrets
|
|
|
+ # spec:
|
|
|
+ # backendType: gcpSecretsManager
|
|
|
+ # data:
|
|
|
+ # - key: grafana-admin-password
|
|
|
+ # name: adminPassword
|