Helm Values (4.1)
These are the helm values supported in this release of Usage Engine Private Edition.
## Default values for Usage Engine Private Edition.
## This is a YAML-formatted file.
## Declare variables to be passed into your templates.
## Pure documentation comments are prefixed with ## (double hash)
## Commented out values are prefixed with # (single hash)
## Only on-premise, aws and gcp supported for now
environment: on-premise
global:
## The domain value is used when a hostname ingress is configured.
#domain: my.domain.com
## The region that the kubernetes cluster belongs to
#region: west1
## Service Account used to apply the objects
#serviceAccountName: default
## Whether this installation is part of a multi-tenant installation or not.
## Please refer to the InfoZone documentation on the topic of Multi Tenancy for details.
multiTenant: false
## If the container images shall be pulled from a private registry,
## then uncomment this section and specify the name of the secret
## containing the credentials for that registry.
#imagePullSecrets:
#- name: regcred
## Performance metrics are generated and exposed by default.
metrics:
## Monitor resources (PodMonitor / ServiceMonitor) are setup automatically if those resource definitions exist in the cluster,
## thereby making the metrics automatically discoverable by your Prometheus resource.
monitor:
## Set the label(s) required by your Prometheus resource. If any.
## For details refer to the serviceMonitorSelector.matchLabels and podMonitorSelector.matchLabels fields in the Prometheus documentation:
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md
labels: {}
## Set to true to force roll of all deployments and statefulsets in this chart
forceRoll: false
namespace:
## Enables namespace if you have multiple Usage Engine Private Edition installations in multiple namespaces in EKS/GKE cluster
## When setting global.namespace.enabled to false:
## - Resulting domain name to be <function>.<domain name>, e.g. desktop-online.uepe-eks.example.com
## - [postgres | oracle | saphana].db as per value
## When setting global.namespace.enabled to true:
## - "-<namespace>" will be added into domain name, resulting <function>-<namespace>.<domain name>, e.g. desktop-online-namespace1.uepe-eks.example.com
## - If [postgres | oracle | saphana].db is empty, a suffix "<namespace>" will be added to [postgres | oracle | saphana].db value, e.g. namespace1
## Note that if you are using GCP managed certificate, before enable this property you need to remove the existing certificate
enabled: false
ingressController:
## The name of the nginx ingress controller service, this was used by the alb ingress resource
serviceName: "{{ .Release.Name }}-ingress-nginx-v4-controller"
debug:
script:
enabled: false
log:
level:
codeserver: info
jetty: 'off'
others: warn
jmx:
## Legacy configuration to expose metrics for scraping by prometheus.
## This is deprecated in favor of using the automatic service discovery capabilites of the prometheus stack.
## Refer to the global.metrics values.
export:
## Set to true to expose platform metrics for scraping by prometheus.
enabled: false
## The port on which the platform metrics are exposed.
port: 8888
log:
## Format can be "json" or "raw". Default is "raw"
format: raw
## Pattern is only for raw format, refer to log4j standard
pattern: '%d: %5p: %m%n'
## Paste the license key here, otherwise use the option '--set-file licenseKey=<licenseKey_file>' when running helm install.
licenseKey: ' '
## Timezone MediationZone should run as, e.g. 'Europe/Stockholm'
timezone: UTC
## Schedule downtime for all ECDs for the purpose of cold backup.
suspend:
## The time when the downtime shall begin. Needs to be specified on crontab format.
#from: "0 3 * * *"
## The time when the downtime shall end. Needs to be specified on crontab format.
#until: "10 3 * * *"
persistence:
enabled: false
## A manually managed Persistent Volume and Claim
## If defined, the PVC must be created manually before the volume will be bound
## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
#existingClaim:
## If existingClaim is not defined, it will fallback to a bundled PVC based on the current environment.
## Currently only aws environment has a bundled PVC associated with it.
#bundledClaim:
## The amount of storage to request. Default is 1Gi.
#storageRequest: "10Gi"
## See https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes for the available access modes.
## aws default is "ReadWriteMany", others default to "ReadWriteOnce".
#accessModes: []
## Specify the storage class to be used in the bundled PVC.
## If this is not set, default storage class name will be used. aws defaults to "aws-efs".
#storageClassName:
platform:
metadata:
annotations: {}
labels: {}
replicaCount: 1
repository: 462803626708.dkr.ecr.eu-west-1.amazonaws.com/usage-engine-private-edition
tag: 4.1.0
pullPolicy: IfNotPresent
## Add/override jvm arguments
jvmArgs:
- XX:MaxMetaspaceSize=512m
- Xms256m
- Xmx2g
## Add/override system properties
## It is possible to refer to another system property by wrapping it in ${...}
systemProperties:
#- someotherprop=${mz.home}/someothervalue
init:
## Platform init container resources
## Set this if you need to specify resource requests and/or limits
## Reference: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
db:
## derby, postgresql, oracle or saphana
type: derby
## The credentials of the jdbc user. I.e. the user that is used in runtime to connect to the system database.
## It is not recommended to provide the password in this way since it is a potential security risk.
## Refer to the Bootstrapping System Credentials section in the installation guide on InfoZone for additional information about this.
jdbcUser: mzadmin
#jdbcPassword: bXo=
## Keystore is stored in /opt/mz/persistent/keys even if persistence is disabled.
tls:
enabled: false
cert:
## Method to provide certificate.
## Supported values are:
## 'certManager' - Generation and renewal of cert is managed by cert-manager (needs to be installed separately).
## 'secret' - A keystore is manually stored in a K8s secret with the specified name
## 'key' A self signed certificate is generated and stored on disk - this is deprecated and will be removed in a future release.
public: key
## Used when "platform.tls.cert.public=certManager" to automate certificate management using cert-manager
## Requires an Issuer or ClusterIssuer to be created separately
## See details in https://infozone.atlassian.net/wiki/spaces/UEPE4D/pages/107217404/Bootstrapping+System+Certificates+and+Secrets+-+Private+Cloud+4.0
#certManager:
# public:
# issuer:
# name: letsencrypt-prod
# kind: ClusterIssuer
## This value is deprecated, please use global.domain instead.
## domain: xxx.xxx.xxx
## Used when "platform.tls.cert.public=secret" to configure manually provisioned keystore and certificate
## See details in https://infozone.atlassian.net/wiki/spaces/UEPE4D/pages/107217404/Bootstrapping+System+Certificates+and+Secrets+-+Private+Cloud+4.0
#secret:
# public:
# name: mz-cert
key:
## Uncomment if credentials are not already provided through secret "env-secrets"
## Note that when cert-manager is used, password and storepassword must have the same values!
#password: RGVmYXVsdEtleXN0b3JlUFdE
#storepassword: RGVmYXVsdEtleXN0b3JlUFdE
alias: certificate
## Platform container resources
## Set this if you need to specify resource requests and/or limits
## Reference: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
probes:
## If the platform takes a very long time to start it
## might get restarted because the thresholds have been reached.
## If a pod does not reach ready state (readiness probe success) it will be restarted.
## If a pod's liveness probe fails for X times, the pod will be restarted.
liveness:
initialDelaySeconds: 300
periodSeconds: 15
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 3
readiness:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 120
## Node, affinity, tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector: {}
affinity: {}
tolerations: []
service:
metadata:
annotations: {}
type: NodePort
ports:
- name: http
port: 9000
#nodePort: 30900 # Use this to explicitly set the external port
targetPort: 9000
protocol: TCP
- name: rcp
port: 6790
#nodePort: 30679 # Use this to explicitly set the external port
targetPort: 6790
protocol: TCP
## Use the configMaps field to mount configuration files
## like external references files
## into /opt/mz/etc
#configMaps:
#- file: extrefs.txt
# data: |
# parameter1=value1
# parameter2=value2
## Metrics configuration specific to the platform.
metrics:
podMonitor:
## Relabeling to apply to the platform podMonitor resource.
## Need to be given as an array of RelabelConfig.
## Refer to the prometheus documentation for details:
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md
relabelings: []
## Defines additional secret mounts. Secrets must be manually created in the namespace.
extraSecretMounts: []
#- name: secret-files
# mountPath: /etc/secrets
# subPath: ""
# secretName: my-secret-files
# readOnly: true
## Defines additional config map mounts. Config maps must be manually created in the namespace.
extraConfigmapMounts: []
#- name: my-configmap
# mountPath: /etc/config
# subPath: ""
# configMap: my-configmap
# readOnly: true
## Extra general purpose volume mounts
extraVolumeMounts: []
#- name: example
# mountPath: /example
## Extra general purpose volumes
extraVolumes: []
#- name: example
# emptyDir: {}
## Optional sidecar containers.
## The items in this list must be specified according the Kubernetes Container API.
sidecars: []
#- name: example
# image: example/example
# imagePullPolicy: IfNotPresent
# resources: {}
# ports:
# - name: example
# containerPort: 8080
# protocol: TCP
# env:
# - name: EXAMPLE
# value: example
# volumeMounts:
# - name: example
# mountPath: /example
postgres:
## The PostgreSQL database administrator username.
## Only required if the Usage Engine Private Edition system database is to be automatically created.
## Refer to the System Database section in the installation guide on InfoZone additional information about this.
adminUsername: postgres
## The PostgreSQL database administrator password.
## Only required if the Usage Engine Private Edition system database is to be automatically created.
## Refer to the System Database section in the installation guide on InfoZone for additional information about this.
## Also, it is not recommended to provide the password in this way since it is a potential security risk.
## Refer to the Bootstrapping System Credentials section in the installation guide on InfoZone for additional information about this.
#adminPassword: dGVzdA==
## The password of the mzowner user. I.e. the user that is the owner of the system database schema.
## It is not recommended to provide the password in this way since it is a potential security risk.
## Refer to the Bootstrapping System Credentials section in the installation guide on InfoZone for additional information about this.
#mzownerPassword: bXpwb3N0Z3Jlcw==
## If postgres.db is set, db name will be created as per value
## Else if global.namespace.enabled is true and env is "aws" or "gcp", default to mz<namespace>, e.g. mznamespace1
## Else default to "mz"
db:
port: 5432
host: postgresql
saphana:
## The SAP HANA database administrator username.
## Only required if the Usage Engine Private Edition system database is to be automatically created.
## Refer to the System Database section in the installation guide on InfoZone additional information about this.
adminUsername: SYSTEM
## The SAP HANA database administrator password.
## Only required if the Usage Engine Private Edition system database is to be automatically created.
## Refer to the System Database section in the installation guide on InfoZone for additional information about this.
## Also, it is not recommended to provide the password in this way since it is a potential security risk.
## Refer to the Bootstrapping System Credentials section in the installation guide on InfoZone for additional information about this.
#adminPassword: dGVzdA==
## The name of the Usage Engine Private Edition system database.
db: MZ
ports:
## The port that will be used in runtime for jdbc connections towards the Usage Engine Private Edition system database.
jdbc: 39041
## The port that will be used by the hdbsql client when first creating the Usage Engine Private Edition system database.
hdbsql: 39013
## The host of the SAP HANA database service.
host: saphana
## The name of the SAP HANA System Database
systemDb: SYSTEMDB
## The SAP HANA instance number
instance: 90
oracle:
## The Oracle database administrator username.
## Only required if the Usage Engine Private Edition system database is to be automatically created (only supported for Oracle Expresse Edition - see the expressEdition value below).
## Refer to the System Database section in the installation guide on InfoZone additional information about this.
adminUsername: sys
## The Oracle database administrator password.
## Only required if the Usage Engine Private Edition system database is to be automatically created (only supported for Oracle Expresse Edition - see the expressEdition value below).
## Refer to the System Database section in the installation guide on InfoZone additional information about this.
## Also, it is not recommended to provide the password in this way since it is a potential security risk.
## Refer to the Bootstrapping System Credentials section in the installation guide on InfoZone for additional information about this.
#adminPassword: T3JhY2xlMTg=
## The password of the mzowner user. I.e. the user that is the owner of the system database schema.
## It is not recommended to provide the password in this way since it is a potential security risk.
## Refer to the Bootstrapping System Credentials section in the installation guide on InfoZone for additional information about this.
#mzownerPassword: ZHI=
## The oracle home. Needs to correspond to the ORACLE_HOME env var on the database server.
## Not required when using Oracle Expresse Edition (see the expressEdition value below).
home:
## The host and domain of the database server.
host: oracle18xe
## The port.
port: 1521
## The name of the database. Translates to the SID (or pluggable database name if using Oracle Expresse Edition).
db: MZ
## The database size (small, medium or large).
size: small
## The path where the data files should be located.
data: /opt/oracle/oradata/XE
## Set to true when using an Oracle Express Edition (XE) installation. Only for dev/test purposes.
expressEdition: true
## The Oracle client version to use. Should be specified on <major>.<minor> format.
## Currently, only version 19.x is supported.
## Needs to correspond with the basicLiteRpm and sqlPlusRpm values below.
clientVersion: 19.9
## The name of the basic lite rpm file corresponding with the Oracle client version specified above.
## This file will have to be added through an extension image.
## See the documentation on the extensions.* values elsewhere in this values file for further details.
basicLiteRpm: oracle-instantclient19.9-basiclite-19.9.0.0.0-1.x86_64.rpm
## The name of the sqlplus rpm file corresponding with the Oracle client version specified above.
## This file will have to be added through an extension image.
## See the documentation on the extensions.* values elsewhere in this values file for further details.
sqlPlusRpm: oracle-instantclient19.9-sqlplus-19.9.0.0.0-1.x86_64.rpm
## Operator deployment.
operator:
metadata:
annotations: {}
labels: {}
## Enable/disable the operator. Setting this to false means the operator related kubernetes resources will not be created.
enabled: true
## Set to false if you do not want to install the CRDs that are part of this helm chart.
## One reason for doing this is in the situation where the user installing the helm chart does not have permissions
## to create/update CRDs in the cluster.
## In this situation a cluster admin will have to manually install/update the CRDs.
## See the documentation for further details.
installCRDs: true
## Set a specific namespace that the operator *listens* on.
## Ie. If you have a non-clusterwide operator it will only act
## on resources deployed to this namespace.
## Defaults to the helm release namespace!
#namespace: operatornamespace
repository: 462803626708.dkr.ecr.eu-west-1.amazonaws.com/usage-engine-private-edition
tag: 4.1.0-operator
pullPolicy: IfNotPresent
## The auth proxy protects the /metrics endpoint
rbacAuthProxy:
enabled: true
webhook:
enabled: false
tls:
cert:
## Delegate certificate management to either certManager or internal.
## Selecting certManager requires cert-manager (https://cert-manager.io) to have been deployed priorly.
## Selecting internal means basic self-signed certificate management without auto-renewal.
delegate: certManager
## Node, affinity, tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector: {}
affinity: {}
tolerations: []
## Common config for the resources that the operator is managing
common:
## List of common config for the ECDs that the operator manages
## This enables configuring common annotations, labels and serviceAccount on groups of ECDs.
## The grouping is achieved via the nameRegex.
##
## If you specify that an ECD shall be using a custom serviceAccount here,
## then you need to make sure that it has permissions to perform get and patch operation on the pods/status resource.
## This is an example rule specification to achieve this:
## kind: Role
## apiVersion: rbac.authorization.k8s.io/v1
## metadata:
## name: my-ecd-role
## rules:
## - apiGroups: [""]
## resources: ["pods/status"]
## verbs: ["get", "patch"]
##
ecd:
#- nameRegex: ecd1-.*
# annotations:
# annotation1: some-annotation-value
# annotation2: some-other-annotation-value
# labels:
# label1: some-label-value
# label2: some-other-label-value
# serviceAccount: serviceAccount1
#- nameRegex: ecd2-.*
# annotations:
# annotation3: some-annotation-value
# annotation4: some-other-annotation-value
# labels:
# label3: some-label-value
# label4: some-other-label-value
# serviceAccount: serviceAccount2
## Debug logging for operator
debug:
enabled: false
## Should only be changed if deploying to an environment with an offline network
kubeRbacProxyImageRepository: gcr.io/kubebuilder/kube-rbac-proxy
## Resources for the operator pod. Uncomment to use custom resources.
#resources:
#limits:
#cpu: 100m
#memory: 300Mi
#requests:
#cpu: 100m
#memory: 200Mi
## The syncPeriod is the time the operator waits in between each reconcile loop.
## For large systems (i.e. with many ECDs and many workflows) it may be required to
## increase this in order to prevent the reconcile loops from from piling up.
## Default is 300 s.
#syncPeriod: 300s
## The time to wait before requeuing a previously failed reconciliation.
## The value must be a parseable duration (see golang time package).
## Default is 2 seconds.
#requeueAfter: 2s
## The timeout value used when the operator places http requests against the platform as part of the process
## of reconciling workflows.
## If you see errors like "context deadline exceeded" in the operator log when reconciling workflows,
## then you can try to increase this timeout.
httpTimeout: 20s
## The password of the mzk8soperator user.
## This user is used for internal communication between the operator and the platform.
## It is not recommended to provide the password in this way since it is a potential security risk.
## Refer to the Bootstrapping System Credentials section in the installation guide on InfoZone for additional information about this.
#operatorPassword:
## aws setup
## Setup aws load balancers and route53 records for the hosted zones and
## control allowed cidrs to access the platform services
aws:
## This value is deprecated, please use global.namespace.enabled instead.
#namespace:
## Enables namespace if you have multiple Usage Engine Private Edition installations in multiple namespaces in EKS cluster
## When setting aws.namespace.enabled to false:
## - Resulting domain name to be <function>.<domain name>, e.g. desktop-online.uepe-eks.example.com
## - [postgres | oracle | saphana].db as per value
## When setting aws.namespace.enabled to true:
## - "-<namespace>" will be added into domain name, resulting <function>-<namespace>.<domain name>, e.g. desktop-online-namespace1.uepe-eks.example.com
## - If [postgres | oracle | saphana].db is empty, a suffix "<namespace>" will be added to [postgres | oracle | saphana].db value, e.g. namespace1
#enabled: false
## The certificate to use for ingress traffic.
## Check the AWS Certificate Manager in the AWS Management Console to find out which certificates that are available.
acm_certificate: arn:aws:acm:eu-west-1:1234567890:certificate/xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxxx
## This list of values controls from which network ranges ingress traffic is accepted. Use CIDR notation when setting this value.
access_cidr_blocks:
- 0.0.0.0/0
ingress:
metadata:
## Annotations for the ingress-alb ingress.
annotations:
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80},{"HTTPS": 443}]'
alb.ingress.kubernetes.io/ssl-policy: "ELBSecurityPolicy-FS-1-2-Res-2019-08"
alb.ingress.kubernetes.io/successCodes: "200-404"
alb.ingress.kubernetes.io/success-codes: "200-404"
## This value is deprecated, please use global.ingressController.serviceName instead.
## The name of the ingress controller service to use
#serviceName: "{{ .Release.Name }}-ingress-nginx-v4-controller"
platform:
service:
metadata:
## Annotations for the platform service.
annotations:
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "9000,443"
service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01"
service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "True"
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "3600"
service.beta.kubernetes.io/aws-load-balancer-type: external
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
external-dns.alpha.kubernetes.io/ttl: "60"
## This helm chart sets some aws related annotations by default on the platform service and the ingress-alb ingress.
## If this is not suitable for one reason or another, they can be excluded by adding the annotation name to this list.
excludeDefaultAnnotations: []
## gcp setup
## This helm chart by default is using GCP CRDs (ManagedCertificate, FrontendConfig and BackendConfig) to setting up
## load balancer certificate, ingress HTTP-to-HTTPS redirect and custom health check. If they are not suitable for the setup,
## you can disable them by adding the associated ingress alb or platform service annotation to "gcp.excludeDefaultAnnotations".
gcp:
managedCert:
## If this is enabled, a Google managed certificate (ManagedCertificate CRD) will be created.
## While the certificate is being provision, certificate can take considerable amount of time to be validated.
## Note that the issued certificate will not be overwritten or removed by the subsequent helm chart install or uninstall.
## To regenerate the certificate, you need to manually delete the ManagedCertificate CRD object and install helm chart again.
enabled: true
name: managed-cert
ingress:
metadata:
## Annotations for the ingress-alb ingress.
annotations: {}
platform:
service:
metadata:
## Annotations for the platform service.
annotations:
cloud.google.com/l4-rbs: "enabled"
external-dns.alpha.kubernetes.io/ttl: "60"
## This helm chart sets some gcp related annotations by default on the platform service and the ingress-alb ingress.
## If this is not suitable for one reason or another, they can be excluded by adding the annotation name to this list.
excludeDefaultAnnotations: []
portal:
## Configuration of the apps that shall be present on the portal page.
## The portal page is accessible at: http(s)://<desktop online host>/desktop/portal
## If hostname ingress is setup, the $(domain) token can be used to have the domain automatically resolved based on the global.domain value.
apps:
- name: Desktop Online
url: https://desktop-online$(domain)
- name: InfoZone
url: https://infozone.atlassian.net/wiki/spaces/UEPE4D
- name: mzcli
url: https://platform$(domain)/download/mzcli
- name: Desktop Client
url: https://platform$(domain)/launch/desktop/
- name: Grafana
url: https://grafana$(domain)
- name: Kubernetes Dashboard
url: https://dashboard$(domain)
security:
password:
control:
## Set this to true to enforce stricter password requirements and mandate a password change upon the first login.
enabled: true
auth:
oidc:
rp:
## Activate/deactivate Usage Engine Private Edition as OIDC Relying Party
enabled: false
auth:
## Available auth methods is CLIENT_SECRET_BASIC and PRIVATE_KEY_JWT
method: "CLIENT_SECRET_BASIC"
client:
## Client id
id: ""
## Client secret is only used when the method is CLIENT_SECRET_BASIC
## Uncomment if credentials are not already provided through secret "oidc-rp-secret"
#secret: ""
## JWT section only used when method is PRIVATE_KEY_JWT
jwt:
## Opional ID Provider KeyId
keyId:
jks:
secret:
## Name of secret to store jks
name:
## Key Alias
alias:
## Key password
## Uncomment if credentials are not already provided through secret "oidc-rp-secret"
#password:
## Keystore password
## Uncomment if credentials are not already provided through secret "oidc-rp-secret"
#storePassword:
provider:
## Base URL for Identity Provider
## URL before /.well-known/openid-configuration
## Eg. https://login.microsoftonline.com/<tenant_ID>/v2.0
url:
## Name of Provider, eg. Azure
name: ""
group:
## Path in UserInfo or ID Token to find access groups mapping, separated by dot (.)
## The groups should be a array of Strings.
## *** Exampel ***
## Here is the groups array inside a object.
## { myObject : { myGroups : [ "myGroup1", "mygroup2" ] } }
## The path should then be:
## groupPath: myObject.myGroups
## When the groups array is direct under UserInfo then groupPath is just the
## name of the groups array.
path:
## Disable group syncronization from Identity Provider.
## When this is true groups is set manually on SSO Users
syncDisabled: false
## When Group Sync is disabled a defualt group can be added to users logged in through SSO
defaultGroup: ""
## Claim to use for Username
userNameClaim:
## Additional scopes
scopes:
## Set this to true during implementation of SSO Access to get more information.
debug: false
ip:
## Activate/deactivate Usage Engine Private Edition as OIDC Identity Provider
enabled: false
## The oidc base url. Needs to be usable by clients to reach the platform webserver.
## The $(domain) token can be used to have the domain automatically resolved based on the global.domain value.
## The default value is set based on the assumption that hostname ingress is used. If not using hostname ingress,
## the value should point to the ip address and port of the desktop app instead.
## Example: https://192.168.205.5:31327
oidcBaseUrl: https://platform$(domain)
## Whether to use helm generated secrets or not.
## When this is true, the client id and client secret for each client in the list of clients below will be auto-generated by helm.
## When this is false, the client id:s and client secrets have to be specified explicitly.
useHelmGeneratedSecrets: true
## List of clients that are granted to use MediationZone as OIDC provider when authenticating
clients:
## -----------------------------------------------
## Example client using helm generated secrets
## -----------------------------------------------
#- clientName: Some Application
## The clientId value is just an arbitrary name of the secret in env-secrets that will hold the real clientId.
## Just make sure that it is unique related to any other client id in this list.
## If the secret does not already exists it will be automatically generated.
# clientId: someAppClientId
## The clientSecret value is just an arbitrary name of the secret in env-secrets that will hold the real clientSecret.
## Just make sure that it is unique related to any other client secret in this list.
## If the secret does not already exists it will be automatically generated.
# clientSecret: someAppClientSecret
## The list of roles associated with the client.
## This controls what level of access that can be provisioned for a given client in the Access Controller.
# roles:
# - Editor
# - Viewer
## -----------------------------------------------
## Example client NOT using helm generated secrets
## -----------------------------------------------
#- clientName: Some Application
## The clientId value is expected to be a UUID.
# clientId: 123e4567-e89b-12d3-a456-426614174000
## The clientSecret value is expected to be a cryptographically secure random.
# clientSecret: 33v1rxwAtBhFTl9SLtQ2lqeCAigN798cUJpZIFFMCz3Nf9PSeVd3ze4MsPMrrNSP
## The list of roles associated with the client.
## This controls what level of access that can be provisioned for a given client in the Access Controller.
# roles:
# - Editor
# - Viewer
## PCC backend configuration.
## Supported production grade storages are Couchbase and Redis.
## The memory/directory storage is only meant for development and testing purposes.
pcc:
## Set to true to enable the PCC backend configuration.
## In addition to this, an ECDeployment having system property mz.pcc.properties=/etc/pcc/pcc.properties will have to be created.
## This ECDeployment will automatically handle the communication with the PCC backend storage.
enabled: false
properties:
## PCC Config Storage Class
# mz.pcc.config.storage.class: com.digitalroute.pcc.storage.config.mysqlc.MySQLClusterStorage
mz.pcc.config.storage.class: com.digitalroute.pcc.storage.config.directory.DirectoryStorage
#mz.pcc.config.storage.class: com.digitalroute.pcc.storage.config.couchbase.CouchbaseConfigStorage
#mz.pcc.config.storage.class: com.digitalroute.pcc.storage.config.redis.RedisConfigStorage
## Directory Config Storage Properties (if used)
mz.pcc.directorystorage.directory: ${mz.home}/tmp/pcc
## PCC Bucket Storage Class
# mz.pcc.bucket.storage.class: com.digitalroute.pcc.buckets.storage.mysqlc.MySQLClusterBucketStorage
mz.pcc.bucket.storage.class: com.digitalroute.pcc.buckets.storage.memory.MemoryBucketStorage
#mz.pcc.bucket.storage.class: com.digitalroute.pcc.buckets.storage.couchbase.CouchbaseBucketStorage
#mz.pcc.bucket.storage.class: com.digitalroute.pcc.buckets.storage.redis.RedisBucketStorage
## Timeout bucket data storage locks (transactions) after timeout milliseconds
mz.pcc.storage.lock.timeout: 3000
## Couchbase Storage Properties (if used)
## The qualified name of the couchbase profile representing the storage area of products and rules.
#mz.pcc.storage.couchbase.config.profile:
## The qualified name of the couchbase profile representing the storage area of buckets.
#mz.pcc.storage.couchbase.buckets.profile:
## Redis Storage Properties (if used)
## The qualified name of the redis profile representing the storage area of products and rules.
#mz.pcc.storage.redis.config.profile:
## The qualified name of the redis profile representing the storage area of buckets.
#mz.pcc.storage.redis.buckets.profile:
## MySQL Cluster Storage Properties (if used)
#mz.pcc.storage.mysqlc.host:
#mz.pcc.storage.mysqlc.port: 1186
#mz.pcc.storage.mysqlc.database: pcc
#mz.pcc.storage.mysqlc.clusterj.retries: 4
#mz.pcc.storage.mysqlc.clusterj.delay: 5
#mz.pcc.storage.mysqlc.clusterj.verbose: 1
#mz.pcc.storage.mysqlc.clusterj.timeout.before: 30
#mz.pcc.storage.mysqlc.clusterj.timeout.after: 20
#mz.pcc.storage.mysqlc.clusterj.max.transactions: 1024
#mz.pcc.storage.mysqlc.clusterj.connection.pool.size: 2
## If the connection to MySQL Cluster is detected as down
## it will try to reconnect
## Set to false to disable reconnect
#mz.pcc.storage.mysqlc.auto.reconnect: true
## If MySQL Cluster key should support string, set this to true and
## make sure to create the commented table schema in pcc_bucket_mysqlc.sql.
#mz.pcc.storage.mysqlc.stringkey: false
## PCC Batch Storage Class (used for Batch Counting)
mz.pcc.batch.storage.class: com.digitalroute.pcc.batch.storage.directory.DirectoryBatchStorage
mz.pcc.batchdirectorystorage.directory: ${mz.home}/tmp/pccbatch
## Toggle for the system log trace (one log per action)
## Valid values are enabled or disabled
#mz.system.log.trace: enabled
authorizationServer:
enabled: false
storage:
# The storage type can be either "file-based" or "database"
type: file-based
database:
# Only used when storage type is "database". PostgreSQL or Oracle DB only
profile-name: <Path.DBProfileName>
poolsize: 8
file-based:
# Only used when storage type is "file-based"
storage-location: /opt/mz/persistent/auth-server/storage
management-api:
# HTTP Basic Authentication
enable-basic-auth: true
## Uncomment if credentials are not already provided through secret "authorization-server-secrets"
## If the username does not already exist mzadmin will be the default value.
#username: mzadmin
## If the password does not already exist it will be automatically generated.
#password:
jwt:
# Only RS256, RS384 and RS512 are supported
signature-algorithm: RS256
## Uncomment if credentials are not already provided through secret "authorization-server-secrets"
## Keystore is the base64 encoded string from local keystore file, it can be generated through command below:
## 'base64 -i /path/to/keystore.jks -o keystore_b64Encoded.txt'
#keystore:
#key-id:
#key-password:
#keystore-password:
server:
# Validity period in seconds for access token generated
access-token-expiry: 1800
## Optionally deploy DTK mzp:s.
## This is done from a custom container image that you need to build and maintain.
## The only requirement on this container image is that it contains one specific folder holding the DTK mzp:s to deploy
## (it needs to be a flat list of mzp:s - i.e. nested folders are not supported).
## This is a minimal Dockerfile example that can be used to build such a container image:
## ----------------------------------------
## FROM alpine:latest
## ARG DTK_MZP_DIR
## RUN mkdir -p /opt/my-dtk-mzps/
## COPY $DTK_MZP_DIR/* /opt/my-dtk-mzps/
## ----------------------------------------
## Here the DTK_MZP_DIR argument is expected to be the local folder holding the DTK mzp:s to deploy.
## Using a container image built using the above Dockerfile would mean
## that the dtk.path value should be set to "/opt/my-dtk-mzps".
#dtk:
## The container image containing the DTK mzp:s
#image: <container repo>:<container version>
## The path of the folder within the container image that contains the DTK mzp:s
#path: /opt/my-dtk-mzps
## Values related to desktop online
desktopOnline:
## Config for the desktop online container image
repository: 462803626708.dkr.ecr.eu-west-1.amazonaws.com/usage-engine-private-edition
tag: 4.1.0-ui
pullPolicy: IfNotPresent
## Add/override jvm arguments
jvmArgs:
- XX:MaxMetaspaceSize=512m
- Xms256m
- Xmx2g
## Add/override system properties
systemProperties:
# - someprop=somevalue
## Configure pod resources (limits and/or requests) here if needed
resources: {}
## Allows for the configuration of the liveness and readiness probes respectively
probes:
liveness:
initialDelaySeconds: 300
periodSeconds: 15
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 3
readiness:
initialDelaySeconds: 10
periodSeconds: 15
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 10
ecd:
## Allows for tying a given ECD to specific node in the cluster.
## Enabling this will result in the necessary ClusterRoles being created.
## If ClusterRoles are not allowed this feature must therefore be disabled.
nodeHostSelectionEnabled: true
## Node, affinity, tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
##
nodeSelector: {}
affinity: {}
tolerations: []
service:
## Uncomment to set an explicit node port
#nodePort: 30000
## The desktop online web server base url. This is used to locate the desktop online app.
## The $(domain) token can be used to have the domain automatically resolved based on the global.domain value.
## The default value is set based on the assumption that hostname ingress is used. If not using hostname ingress,
## the value should point to the ip address and port of the desktop app instead.
## Example: https://192.168.205.5:31327
baseUrl: https://desktop-online$(domain)
utils:
kubectl:
## The kubectl container image repo
## Should only be changed if deploying to an environment with an offline network
repository: "bitnami/kubectl"
## The tag of the kubectl container image
tag: "1.29.3-debian-12-r0"
## Optionally add extensions.
## An extension can be an arbitrary third party product (3pp) such as a jar file, a shared library or any file really.
## This is done via a custom container image that you need to build and maintain.
## The only requirement on this container image is that it contains two specific folders, namely:
##
## "/opt/uepe/3pp": This folder can be used for adding arbitrary 3pp(s).
## 3pp(s) in the form of jar files that are added to this folder will be added to the runtime classpath automatically.
##
## "/opt/uepe/jni": This folder must be used for adding shared libraries required for JNI, such as a .so file.
##
## This is a minimal Dockerfile example that can be used to build such a container image:
## ----------------------------------------
## FROM alpine:latest
## COPY 3pp /opt/uepe/3pp/
## COPY jni /opt/uepe/jni/
## ----------------------------------------
## Here the 3pp and jni folders are expected to be local folders holding the 3pp(s) and shared libraries respectively.
## Note that Usage Engine Private Edition supports both amd64 and arm64 platform architecture.
## When building the extension image, you need to make sure to use the same architecture that Usage Engine Private Edition is running on.
## For more info about building multiple architecture images, please refer to https://docs.docker.com/build/building/multi-platform/.
extensions:
## Whether to enable extensions or not.
enabled: false
## The container image containing the extensions.
image: my-uepe-extensions:1.0.0
, multiple selections available,
Related content
Helm Values (4.2)
Helm Values (4.2)
More like this
Helm Values (5.0)
Helm Values (5.0)
More like this
Making Metrics Available to Prometheus(4.2)
Making Metrics Available to Prometheus(4.2)
More like this
Setting up Prometheus(4.1)
Setting up Prometheus(4.1)
More like this
Making Metrics Available to Prometheus (5.0)
Making Metrics Available to Prometheus (5.0)
More like this
Setting up Prometheus(4.2)
Setting up Prometheus(4.2)
More like this