964 lines
35 KiB
YAML
964 lines
35 KiB
YAML
expose:
|
||
# Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer"
|
||
# and fill the information in the corresponding section
|
||
type: loadBalancer
|
||
tls:
|
||
# Enable TLS or not.
|
||
# Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
|
||
# Note: if the "expose.type" is "ingress" and TLS is disabled,
|
||
# the port must be included in the command when pulling/pushing images.
|
||
# Refer to https://github.com/goharbor/harbor/issues/5291 for details.
|
||
enabled: false
|
||
# The source of the tls certificate. Set as "auto", "secret"
|
||
# or "none" and fill the information in the corresponding section
|
||
# 1) auto: generate the tls certificate automatically
|
||
# 2) secret: read the tls certificate from the specified secret.
|
||
# The tls certificate can be generated manually or by cert manager
|
||
# 3) none: configure no tls certificate for the ingress. If the default
|
||
# tls certificate is configured in the ingress controller, choose this option
|
||
certSource: auto
|
||
auto:
|
||
# The common name used to generate the certificate, it's necessary
|
||
# when the type isn't "ingress"
|
||
commonName: ""
|
||
secret:
|
||
# The name of secret which contains keys named:
|
||
# "tls.crt" - the certificate
|
||
# "tls.key" - the private key
|
||
secretName: ""
|
||
ingress:
|
||
hosts:
|
||
core: harbor.guaranteedstruggle.host
|
||
# set to the type of ingress controller if it has specific requirements.
|
||
# leave as `default` for most ingress controllers.
|
||
# set to `gce` if using the GCE ingress controller
|
||
# set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller
|
||
# set to `alb` if using the ALB ingress controller
|
||
# set to `f5-bigip` if using the F5 BIG-IP ingress controller
|
||
controller: default
|
||
## Allow .Capabilities.KubeVersion.Version to be overridden while creating ingress
|
||
kubeVersionOverride: ""
|
||
className: ""
|
||
annotations:
|
||
# note different ingress controllers may require a different ssl-redirect annotation
|
||
# for Envoy, use ingress.kubernetes.io/force-ssl-redirect: "true" and remove the nginx lines below
|
||
ingress.kubernetes.io/ssl-redirect: "true"
|
||
ingress.kubernetes.io/proxy-body-size: "0"
|
||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
||
harbor:
|
||
# harbor ingress-specific annotations
|
||
annotations: {}
|
||
# harbor ingress-specific labels
|
||
labels: {}
|
||
clusterIP:
|
||
# The name of ClusterIP service
|
||
name: harbor
|
||
# Annotations on the ClusterIP service
|
||
annotations: {}
|
||
ports:
|
||
# The service port Harbor listens on when serving HTTP
|
||
httpPort: 80
|
||
# The service port Harbor listens on when serving HTTPS
|
||
httpsPort: 443
|
||
nodePort:
|
||
# The name of NodePort service
|
||
name: harbor
|
||
ports:
|
||
http:
|
||
# The service port Harbor listens on when serving HTTP
|
||
port: 80
|
||
# The node port Harbor listens on when serving HTTP
|
||
nodePort: 30002
|
||
https:
|
||
# The service port Harbor listens on when serving HTTPS
|
||
port: 443
|
||
# The node port Harbor listens on when serving HTTPS
|
||
nodePort: 30003
|
||
loadBalancer:
|
||
# The name of LoadBalancer service
|
||
name: harbor
|
||
# Set the IP if the LoadBalancer supports assigning IP
|
||
IP: ""
|
||
ports:
|
||
# The service port Harbor listens on when serving HTTP
|
||
httpPort: 80
|
||
# The service port Harbor listens on when serving HTTPS
|
||
#httpsPort: 443
|
||
annotations: {}
|
||
sourceRanges: []
|
||
|
||
# The external URL for Harbor core service. It is used to
|
||
# 1) populate the docker/helm commands showed on portal
|
||
# 2) populate the token service URL returned to docker client
|
||
#
|
||
# Format: protocol://domain[:port]. Usually:
|
||
# 1) if "expose.type" is "ingress", the "domain" should be
|
||
# the value of "expose.ingress.hosts.core"
|
||
# 2) if "expose.type" is "clusterIP", the "domain" should be
|
||
# the value of "expose.clusterIP.name"
|
||
# 3) if "expose.type" is "nodePort", the "domain" should be
|
||
# the IP address of k8s node
|
||
#
|
||
# If Harbor is deployed behind the proxy, set it as the URL of proxy
|
||
externalURL: https://harbor.guaranteedstruggle.host
|
||
|
||
# The internal TLS used for harbor components secure communicating. In order to enable https
|
||
# in each component tls cert files need to provided in advance.
|
||
internalTLS:
|
||
# If internal TLS enabled
|
||
enabled: false
|
||
# enable strong ssl ciphers (default: false)
|
||
strong_ssl_ciphers: false
|
||
# There are three ways to provide tls
|
||
# 1) "auto" will generate cert automatically
|
||
# 2) "manual" need provide cert file manually in following value
|
||
# 3) "secret" internal certificates from secret
|
||
certSource: "auto"
|
||
# The content of trust ca, only available when `certSource` is "manual"
|
||
trustCa: ""
|
||
# core related cert configuration
|
||
core:
|
||
# secret name for core's tls certs
|
||
secretName: ""
|
||
# Content of core's TLS cert file, only available when `certSource` is "manual"
|
||
crt: ""
|
||
# Content of core's TLS key file, only available when `certSource` is "manual"
|
||
key: ""
|
||
# jobservice related cert configuration
|
||
jobservice:
|
||
# secret name for jobservice's tls certs
|
||
secretName: ""
|
||
# Content of jobservice's TLS key file, only available when `certSource` is "manual"
|
||
crt: ""
|
||
# Content of jobservice's TLS key file, only available when `certSource` is "manual"
|
||
key: ""
|
||
# registry related cert configuration
|
||
registry:
|
||
# secret name for registry's tls certs
|
||
secretName: ""
|
||
# Content of registry's TLS key file, only available when `certSource` is "manual"
|
||
crt: ""
|
||
# Content of registry's TLS key file, only available when `certSource` is "manual"
|
||
key: ""
|
||
# portal related cert configuration
|
||
portal:
|
||
# secret name for portal's tls certs
|
||
secretName: ""
|
||
# Content of portal's TLS key file, only available when `certSource` is "manual"
|
||
crt: ""
|
||
# Content of portal's TLS key file, only available when `certSource` is "manual"
|
||
key: ""
|
||
# trivy related cert configuration
|
||
trivy:
|
||
# secret name for trivy's tls certs
|
||
secretName: ""
|
||
# Content of trivy's TLS key file, only available when `certSource` is "manual"
|
||
crt: ""
|
||
# Content of trivy's TLS key file, only available when `certSource` is "manual"
|
||
key: ""
|
||
|
||
ipFamily:
|
||
# ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component
|
||
ipv6:
|
||
enabled: false
|
||
# ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component
|
||
ipv4:
|
||
enabled: true
|
||
|
||
# The persistence is enabled by default and a default StorageClass
|
||
# is needed in the k8s cluster to provision volumes dynamically.
|
||
# Specify another StorageClass in the "storageClass" or set "existingClaim"
|
||
# if you already have existing persistent volumes to use
|
||
#
|
||
# For storing images and charts, you can also use "azure", "gcs", "s3",
|
||
# "swift" or "oss". Set it in the "imageChartStorage" section
|
||
persistence:
|
||
enabled: true
|
||
# Setting it to "keep" to avoid removing PVCs during a helm delete
|
||
# operation. Leaving it empty will delete PVCs after the chart deleted
|
||
# (this does not apply for PVCs that are created for internal database
|
||
# and redis components, i.e. they are never deleted automatically)
|
||
resourcePolicy: "keep"
|
||
persistentVolumeClaim:
|
||
registry:
|
||
# Use the existing PVC which must be created manually before bound,
|
||
# and specify the "subPath" if the PVC is shared with other components
|
||
existingClaim: ""
|
||
# Specify the "storageClass" used to provision the volume. Or the default
|
||
# StorageClass will be used (the default).
|
||
# Set it to "-" to disable dynamic provisioning
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 5Gi
|
||
annotations: {}
|
||
jobservice:
|
||
jobLog:
|
||
existingClaim: ""
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 1Gi
|
||
annotations: {}
|
||
# If external database is used, the following settings for database will
|
||
# be ignored
|
||
database:
|
||
existingClaim: ""
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 1Gi
|
||
annotations: {}
|
||
# If external Redis is used, the following settings for Redis will
|
||
# be ignored
|
||
redis:
|
||
existingClaim: ""
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 1Gi
|
||
annotations: {}
|
||
trivy:
|
||
existingClaim: ""
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 5Gi
|
||
annotations: {}
|
||
# Define which storage backend is used for registry to store
|
||
# images and charts. Refer to
|
||
# https://github.com/distribution/distribution/blob/main/docs/configuration.md#storage
|
||
# for the detail.
|
||
imageChartStorage:
|
||
# Specify whether to disable `redirect` for images and chart storage, for
|
||
# backends which not supported it (such as using minio for `s3` storage type), please disable
|
||
# it. To disable redirects, simply set `disableredirect` to `true` instead.
|
||
# Refer to
|
||
# https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect
|
||
# for the detail.
|
||
disableredirect: false
|
||
# Specify the "caBundleSecretName" if the storage service uses a self-signed certificate.
|
||
# The secret must contain keys named "ca.crt" which will be injected into the trust store
|
||
# of registry's containers.
|
||
# caBundleSecretName:
|
||
|
||
# Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift",
|
||
# "oss" and fill the information needed in the corresponding section. The type
|
||
# must be "filesystem" if you want to use persistent volumes for registry
|
||
type: filesystem
|
||
filesystem:
|
||
rootdirectory: /storage
|
||
#maxthreads: 100
|
||
azure:
|
||
accountname: accountname
|
||
accountkey: base64encodedaccountkey
|
||
container: containername
|
||
#realm: core.windows.net
|
||
# To use existing secret, the key must be AZURE_STORAGE_ACCESS_KEY
|
||
existingSecret: ""
|
||
gcs:
|
||
bucket: bucketname
|
||
# The base64 encoded json file which contains the key
|
||
encodedkey: base64-encoded-json-key-file
|
||
#rootdirectory: /gcs/object/name/prefix
|
||
#chunksize: "5242880"
|
||
# To use existing secret, the key must be GCS_KEY_DATA
|
||
existingSecret: ""
|
||
useWorkloadIdentity: false
|
||
s3:
|
||
# Set an existing secret for S3 accesskey and secretkey
|
||
# keys in the secret should be REGISTRY_STORAGE_S3_ACCESSKEY and REGISTRY_STORAGE_S3_SECRETKEY for registry
|
||
#existingSecret: ""
|
||
region: us-west-1
|
||
bucket: bucketname
|
||
#accesskey: awsaccesskey
|
||
#secretkey: awssecretkey
|
||
#regionendpoint: http://myobjects.local
|
||
#encrypt: false
|
||
#keyid: mykeyid
|
||
#secure: true
|
||
#skipverify: false
|
||
#v4auth: true
|
||
#chunksize: "5242880"
|
||
#rootdirectory: /s3/object/name/prefix
|
||
#storageclass: STANDARD
|
||
#multipartcopychunksize: "33554432"
|
||
#multipartcopymaxconcurrency: 100
|
||
#multipartcopythresholdsize: "33554432"
|
||
swift:
|
||
authurl: https://storage.myprovider.com/v3/auth
|
||
username: username
|
||
password: password
|
||
container: containername
|
||
#region: fr
|
||
#tenant: tenantname
|
||
#tenantid: tenantid
|
||
#domain: domainname
|
||
#domainid: domainid
|
||
#trustid: trustid
|
||
#insecureskipverify: false
|
||
#chunksize: 5M
|
||
#prefix:
|
||
#secretkey: secretkey
|
||
#accesskey: accesskey
|
||
#authversion: 3
|
||
#endpointtype: public
|
||
#tempurlcontainerkey: false
|
||
#tempurlmethods:
|
||
oss:
|
||
accesskeyid: accesskeyid
|
||
accesskeysecret: accesskeysecret
|
||
region: regionname
|
||
bucket: bucketname
|
||
#endpoint: endpoint
|
||
#internal: false
|
||
#encrypt: false
|
||
#secure: true
|
||
#chunksize: 10M
|
||
#rootdirectory: rootdirectory
|
||
|
||
imagePullPolicy: IfNotPresent
|
||
|
||
# Use this set to assign a list of default pullSecrets
|
||
imagePullSecrets:
|
||
# - name: docker-registry-secret
|
||
# - name: internal-registry-secret
|
||
|
||
# The update strategy for deployments with persistent volumes(jobservice, registry): "RollingUpdate" or "Recreate"
|
||
# Set it as "Recreate" when "RWM" for volumes isn't supported
|
||
updateStrategy:
|
||
type: RollingUpdate
|
||
|
||
# debug, info, warning, error or fatal
|
||
logLevel: info
|
||
|
||
# The initial password of Harbor admin. Change it from portal after launching Harbor
|
||
# or give an existing secret for it
|
||
# key in secret is given via (default to HARBOR_ADMIN_PASSWORD)
|
||
# existingSecretAdminPassword:
|
||
existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD
|
||
harborAdminPassword: "admin2ch"
|
||
|
||
# The name of the secret which contains key named "ca.crt". Setting this enables the
|
||
# download link on portal to download the CA certificate when the certificate isn't
|
||
# generated automatically
|
||
caSecretName: ""
|
||
|
||
# The secret key used for encryption. Must be a string of 16 chars.
|
||
secretKey: "not-a-secure-key"
|
||
# If using existingSecretSecretKey, the key must be secretKey
|
||
existingSecretSecretKey: ""
|
||
|
||
# The proxy settings for updating trivy vulnerabilities from the Internet and replicating
|
||
# artifacts from/to the registries that cannot be reached directly
|
||
proxy:
|
||
httpProxy:
|
||
httpsProxy:
|
||
noProxy: 127.0.0.1,localhost,.local,.internal
|
||
components:
|
||
- core
|
||
- jobservice
|
||
- trivy
|
||
|
||
# Run the migration job via helm hook
|
||
enableMigrateHelmHook: false
|
||
|
||
# The custom ca bundle secret, the secret must contain key named "ca.crt"
|
||
# which will be injected into the trust store for core, jobservice, registry, trivy components
|
||
# caBundleSecretName: ""
|
||
|
||
## UAA Authentication Options
|
||
# If you're using UAA for authentication behind a self-signed
|
||
# certificate you will need to provide the CA Cert.
|
||
# Set uaaSecretName below to provide a pre-created secret that
|
||
# contains a base64 encoded CA Certificate named `ca.crt`.
|
||
# uaaSecretName:
|
||
|
||
# If service exposed via "ingress", the Nginx will not be used
|
||
nginx:
|
||
image:
|
||
repository: goharbor/nginx-photon
|
||
tag: v2.9.1
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
|
||
portal:
|
||
image:
|
||
repository: goharbor/harbor-portal
|
||
tag: v2.9.1
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
|
||
core:
|
||
image:
|
||
repository: goharbor/harbor-core
|
||
tag: v2.9.1
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
## Startup probe values
|
||
startupProbe:
|
||
enabled: true
|
||
initialDelaySeconds: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## Additional service annotations
|
||
serviceAnnotations: {}
|
||
## User settings configuration json string
|
||
configureUserSettings:
|
||
# The provider for updating project quota(usage), there are 2 options, redis or db.
|
||
# By default it is implemented by db but you can configure it to redis which
|
||
# can improve the performance of high concurrent pushing to the same project,
|
||
# and reduce the database connections spike and occupies.
|
||
# Using redis will bring up some delay for quota usage updation for display, so only
|
||
# suggest switch provider to redis if you were ran into the db connections spike around
|
||
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||
quotaUpdateProvider: db # Or redis
|
||
# Secret is used when core server communicates with other components.
|
||
# If a secret key is not specified, Helm will generate one.
|
||
# Must be a string of 16 chars.
|
||
secret: ""
|
||
# Fill in the name of a kubernetes secret if you want to use your own
|
||
# TLS certificate and private key for token encryption/decryption.
|
||
# The secret must contain keys named:
|
||
# "tls.key" - the private key
|
||
# "tls.crt" - the certificate
|
||
secretName: ""
|
||
# If not specifying a preexisting secret, a secret can be created from tokenKey and tokenCert and used instead.
|
||
# If none of secretName, tokenKey, and tokenCert are specified, an ephemeral key and certificate will be autogenerated.
|
||
# tokenKey and tokenCert must BOTH be set or BOTH unset.
|
||
# The tokenKey value is formatted as a multiline string containing a PEM-encoded RSA key, indented one more than tokenKey on the following line.
|
||
tokenKey: |
|
||
# If tokenKey is set, the value of tokenCert must be set as a PEM-encoded certificate signed by tokenKey, and supplied as a multiline string, indented one more than tokenCert on the following line.
|
||
tokenCert: |
|
||
# The XSRF key. Will be generated automatically if it isn't specified
|
||
xsrfKey: ""
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# The time duration for async update artifact pull_time and repository
|
||
# pull_count, the unit is second. Will be 10 seconds if it isn't set.
|
||
# eg. artifactPullAsyncFlushDuration: 10
|
||
artifactPullAsyncFlushDuration:
|
||
gdpr:
|
||
deleteUser: false
|
||
|
||
jobservice:
|
||
image:
|
||
repository: goharbor/harbor-jobservice
|
||
tag: v2.9.1
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
maxJobWorkers: 10
|
||
# The logger for jobs: "file", "database" or "stdout"
|
||
jobLoggers:
|
||
- file
|
||
# - database
|
||
# - stdout
|
||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||
loggerSweeperDuration: 14 #days
|
||
notification:
|
||
webhook_job_max_retry: 3
|
||
webhook_job_http_client_timeout: 3 # in seconds
|
||
reaper:
|
||
# the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24
|
||
max_update_hours: 24
|
||
# the max time for execution in running state without new task created
|
||
max_dangling_hours: 168
|
||
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints:
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
# Secret is used when job service communicates with other components.
|
||
# If a secret key is not specified, Helm will generate one.
|
||
# Must be a string of 16 chars.
|
||
secret: ""
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
|
||
registry:
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
registry:
|
||
image:
|
||
repository: goharbor/registry-photon
|
||
tag: v2.9.1
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
controller:
|
||
image:
|
||
repository: goharbor/harbor-registryctl
|
||
tag: v2.9.1
|
||
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# Secret is used to secure the upload state from client
|
||
# and registry storage backend.
|
||
# See: https://github.com/distribution/distribution/blob/main/docs/configuration.md#http
|
||
# If a secret key is not specified, Helm will generate one.
|
||
# Must be a string of 16 chars.
|
||
secret: ""
|
||
# If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL.
|
||
relativeurls: true
|
||
credentials:
|
||
username: "harbor_registry_user"
|
||
password: "harbor_registry_password"
|
||
# If using existingSecret, the key must be REGISTRY_PASSWD and REGISTRY_HTPASSWD
|
||
existingSecret: ""
|
||
# Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt.
|
||
# htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string
|
||
middleware:
|
||
enabled: false
|
||
type: cloudFront
|
||
cloudFront:
|
||
baseurl: example.cloudfront.net
|
||
keypairid: KEYPAIRID
|
||
duration: 3000s
|
||
ipfilteredby: none
|
||
# The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
|
||
# that allows access to CloudFront
|
||
privateKeySecret: "my-secret"
|
||
# enable purge _upload directories
|
||
upload_purging:
|
||
enabled: true
|
||
# remove files in _upload directories which exist for a period of time, default is one week.
|
||
age: 168h
|
||
# the interval of the purge operations
|
||
interval: 24h
|
||
dryrun: false
|
||
|
||
trivy:
|
||
# enabled the flag to enable Trivy scanner
|
||
enabled: true
|
||
image:
|
||
# repository the repository for Trivy adapter image
|
||
repository: goharbor/trivy-adapter-photon
|
||
# tag the tag for Trivy adapter image
|
||
tag: v2.9.1
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
# replicas the number of Pod replicas
|
||
replicas: 1
|
||
# debugMode the flag to enable Trivy debug mode with more verbose scanning log
|
||
debugMode: false
|
||
# vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`.
|
||
vulnType: "os,library"
|
||
# severity a comma-separated list of severities to be checked
|
||
severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
|
||
# ignoreUnfixed the flag to display only fixed vulnerabilities
|
||
ignoreUnfixed: false
|
||
# insecure the flag to skip verifying registry certificate
|
||
insecure: false
|
||
# gitHubToken the GitHub access token to download Trivy DB
|
||
#
|
||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||
# in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
|
||
# timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
|
||
# Currently, the database is updated every 12 hours and published as a new release to GitHub.
|
||
#
|
||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||
# https://developer.github.com/v3/#rate-limiting
|
||
#
|
||
# You can create a GitHub token by following the instructions in
|
||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||
gitHubToken: ""
|
||
# skipUpdate the flag to disable Trivy DB downloads from GitHub
|
||
#
|
||
# You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||
# If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
|
||
# `/home/scanner/.cache/trivy/db/trivy.db` path.
|
||
skipUpdate: false
|
||
# The offlineScan option prevents Trivy from sending API requests to identify dependencies.
|
||
#
|
||
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||
# It would work if all the dependencies are in local.
|
||
# This option doesn’t affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment.
|
||
offlineScan: false
|
||
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
|
||
securityCheck: "vuln"
|
||
# The duration to wait for scan completion
|
||
timeout: 5m0s
|
||
resources:
|
||
requests:
|
||
cpu: 200m
|
||
memory: 512Mi
|
||
limits:
|
||
cpu: 1
|
||
memory: 1Gi
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
|
||
database:
|
||
# if external database is used, set "type" to "external"
|
||
# and fill the connection information in "external" section
|
||
type: internal
|
||
internal:
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
image:
|
||
repository: goharbor/harbor-db
|
||
tag: v2.9.1
|
||
# The initial superuser password for internal database
|
||
password: "changeit"
|
||
# The size limit for Shared memory, pgSQL use it for shared_buffer
|
||
# More details see:
|
||
# https://github.com/goharbor/harbor/issues/15034
|
||
shmSizeLimit: 512Mi
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
# The timeout used in livenessProbe; 1 to 5 seconds
|
||
livenessProbe:
|
||
timeoutSeconds: 1
|
||
# The timeout used in readinessProbe; 1 to 5 seconds
|
||
readinessProbe:
|
||
timeoutSeconds: 1
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
initContainer:
|
||
migrator: {}
|
||
# resources:
|
||
# requests:
|
||
# memory: 128Mi
|
||
# cpu: 100m
|
||
permissions: {}
|
||
# resources:
|
||
# requests:
|
||
# memory: 128Mi
|
||
# cpu: 100m
|
||
external:
|
||
host: "192.168.0.1"
|
||
port: "5432"
|
||
username: "user"
|
||
password: "password"
|
||
coreDatabase: "registry"
|
||
# if using existing secret, the key must be "password"
|
||
existingSecret: ""
|
||
# "disable" - No SSL
|
||
# "require" - Always SSL (skip verification)
|
||
# "verify-ca" - Always SSL (verify that the certificate presented by the
|
||
# server was signed by a trusted CA)
|
||
# "verify-full" - Always SSL (verify that the certification presented by the
|
||
# server was signed by a trusted CA and the server host name matches the one
|
||
# in the certificate)
|
||
sslmode: "disable"
|
||
# The maximum number of connections in the idle connection pool per pod (core+exporter).
|
||
# If it <=0, no idle connections are retained.
|
||
maxIdleConns: 100
|
||
# The maximum number of open connections to the database per pod (core+exporter).
|
||
# If it <= 0, then there is no limit on the number of open connections.
|
||
# Note: the default number of connections is 1024 for postgre of harbor.
|
||
maxOpenConns: 900
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
|
||
redis:
|
||
# if external Redis is used, set "type" to "external"
|
||
# and fill the connection information in "external" section
|
||
type: internal
|
||
internal:
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
image:
|
||
repository: goharbor/redis-photon
|
||
tag: v2.9.1
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# # jobserviceDatabaseIndex defaults to "1"
|
||
# # registryDatabaseIndex defaults to "2"
|
||
# # trivyAdapterIndex defaults to "5"
|
||
# # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional
|
||
# # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional
|
||
jobserviceDatabaseIndex: "1"
|
||
registryDatabaseIndex: "2"
|
||
trivyAdapterIndex: "5"
|
||
# harborDatabaseIndex: "6"
|
||
# cacheLayerDatabaseIndex: "7"
|
||
external:
|
||
# support redis, redis+sentinel
|
||
# addr for redis: <host_redis>:<port_redis>
|
||
# addr for redis+sentinel: <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||
addr: "192.168.0.2:6379"
|
||
# The name of the set of Redis instances to monitor, it must be set to support redis+sentinel
|
||
sentinelMasterSet: ""
|
||
# The "coreDatabaseIndex" must be "0" as the library Harbor
|
||
# used doesn't support configuring it
|
||
# harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional
|
||
# cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional
|
||
coreDatabaseIndex: "0"
|
||
jobserviceDatabaseIndex: "1"
|
||
registryDatabaseIndex: "2"
|
||
trivyAdapterIndex: "5"
|
||
# harborDatabaseIndex: "6"
|
||
# cacheLayerDatabaseIndex: "7"
|
||
# username field can be an empty string, and it will be authenticated against the default user
|
||
username: ""
|
||
password: ""
|
||
# If using existingSecret, the key must be REDIS_PASSWORD
|
||
existingSecret: ""
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
|
||
exporter:
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
image:
|
||
repository: goharbor/harbor-exporter
|
||
tag: v2.9.1
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
cacheDuration: 23
|
||
cacheCleanInterval: 14400
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
|
||
metrics:
|
||
enabled: false
|
||
core:
|
||
path: /metrics
|
||
port: 8001
|
||
registry:
|
||
path: /metrics
|
||
port: 8001
|
||
jobservice:
|
||
path: /metrics
|
||
port: 8001
|
||
exporter:
|
||
path: /metrics
|
||
port: 8001
|
||
## Create prometheus serviceMonitor to scrape harbor metrics.
|
||
## This requires the monitoring.coreos.com/v1 CRD. Please see
|
||
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md
|
||
##
|
||
serviceMonitor:
|
||
enabled: false
|
||
additionalLabels: {}
|
||
# Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
interval: ""
|
||
# Metric relabel configs to apply to samples before ingestion.
|
||
metricRelabelings:
|
||
[]
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
# Relabel configs to apply to samples before ingestion.
|
||
relabelings:
|
||
[]
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
trace:
|
||
enabled: false
|
||
# trace provider: jaeger or otel
|
||
# jaeger should be 1.26+
|
||
provider: jaeger
|
||
# set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||
sample_rate: 1
|
||
# namespace used to differentiate different harbor services
|
||
# namespace:
|
||
# attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||
# attributes:
|
||
# application: harbor
|
||
jaeger:
|
||
# jaeger supports two modes:
|
||
# collector mode(uncomment endpoint and uncomment username, password if needed)
|
||
# agent mode(uncomment agent_host and agent_port)
|
||
endpoint: http://hostname:14268/api/traces
|
||
# username:
|
||
# password:
|
||
# agent_host: hostname
|
||
# export trace data by jaeger.thrift in compact mode
|
||
# agent_port: 6831
|
||
otel:
|
||
endpoint: hostname:4318
|
||
url_path: /v1/traces
|
||
compression: false
|
||
insecure: true
|
||
# timeout is in seconds
|
||
timeout: 10
|
||
|
||
# cache layer configurations
|
||
# if this feature enabled, harbor will cache the resource
|
||
# `project/project_metadata/repository/artifact/manifest` in the redis
|
||
# which help to improve the performance of high concurrent pulling manifest.
|
||
cache:
|
||
# default is not enabled.
|
||
enabled: false
|
||
# default keep cache for one day.
|
||
expireHours: 24
|