-
Notifications
You must be signed in to change notification settings - Fork 241
/
values.yaml
203 lines (180 loc) · 6.09 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
# Default configuration for pre-requisites to get you started
# Copy this file and update to the configuration of choice
elasticsearch:
# set this to false, if you want to provide your own ES instance.
enabled: true
# If you're running in production, set this to 3 and comment out antiAffinity below
# Or alternatively if you're running production, bring your own ElasticSearch
replicas: 1
minimumMasterNodes: 1
# Set replicas to 1 and uncomment this to allow the instance to be scheduled on
# a master node when deploying on a single node Minikube / Kind / etc cluster.
antiAffinity: "soft"
# If you are running a multi-replica cluster, comment this out
clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s"
# Shrink default JVM heap.
esJavaOpts: "-Xmx512m -Xms512m"
# Allocate smaller chunks of memory per pod.
resources:
requests:
cpu: "100m"
memory: "1024M"
limits:
cpu: "1000m"
memory: "1024M"
# Request smaller persistent volumes.
# volumeClaimTemplate:
# accessModes: ["ReadWriteOnce"]
# storageClassName: "standard"
# resources:
# requests:
# storage: 100M
opensearch:
enabled: false
# If you're running in production, set this to false, replicas to 3 and uncomment antiAffinity below
# Or alternatively if you're running production, bring your own OpenSearch
singleNode: true
# replicas: 3
# antiAffinity: "hard"
# By default security is enabled for OpenSearch, disable it here.
config:
opensearch.yml: |
plugins:
security:
disabled: true
extraEnvs:
- name: DISABLE_INSTALL_DEMO_CONFIG
value: "true"
image:
tag: "2.11.0"
# opensearchJavaOpts: "-Xmx512M -Xms512M"
# resources:
# requests:
# cpu: "1000m"
# memory: "100Mi"
# Request smaller persistent volumes.
# persistence:
# storageClass: "standard"
# accessModes:
# - ReadWriteOnce
# size: 100M
# Official neo4j chart, supports both community and enterprise editions
# see https://neo4j.com/docs/operations-manual/current/kubernetes/ for more information
# source: https://github.com/neo4j/helm-charts
neo4j:
enabled: false
nameOverride: neo4j
neo4j:
name: neo4j
edition: "community"
acceptLicenseAgreement: "yes"
defaultDatabase: "graph.db"
password: "datahub"
# For better security, add password to neo4j-secrets k8s secret with: neo4j-username, neo4j-password, and NEO4J_AUTH then uncomment below
# NEO4J_AUTH: should be composed like so: {Username}/{Password}
# passwordFromSecret: neo4j-secrets
# Set security context for pod
securityContext:
runAsNonRoot: true
runAsUser: 7474
runAsGroup: 7474
fsGroup: 7474
fsGroupChangePolicy: "Always"
# Disallow privilegeEscalation on container level
containerSecurityContext:
allowPrivilegeEscalation: false
# Create a volume for neo4j, SSD storage is recommended
volumes:
data:
mode: "defaultStorageClass"
# mode: "dynamic"
# dynamic:
# storageClassName: managed-csi-premium
env:
NEO4J_PLUGINS: '["apoc"]'
mysql:
enabled: true
image:
tag: 8.0.32-debian-11-r26
auth:
# For better security, add mysql-secrets k8s secret with mysql-root-password, mysql-replication-password and mysql-password
existingSecret: mysql-secrets
primary:
extraFlags: "--character-set-server=utf8mb4 --collation-server=utf8mb4_bin"
postgresql:
enabled: false
auth:
# For better security, add postgresql-secrets k8s secret with postgres-password, replication-password and password
existingSecret: postgresql-secrets
# Using gcloud-proxy requires the node in a GKE cluster to have Cloud SQL Admin scope,
# you will need to create a new node and migrate the workload if your current node does not have this scope
gcloud-sqlproxy:
enabled: false
# Specify an existing secret holding the cloud-sql service account credentials, if not specify,
# the default compute engine service account will be used and it needs to have Cloud SQL Client role
existingSecret: ""
# The key in the existing secret that stores the credentials
existingSecretKey: ""
## the name of the ServiceAccount to be used
serviceAccountName: ""
# SQL connection settings
cloudsql:
# MySQL instances:
# update with your GCP project, the region of your Cloud SQL instance and the id of your Cloud SQL instance
# use port 3306 for MySQL, or other port you set for your SQL instance.
instances:
# GCP Cloud SQL instance id
- instance: ""
# GCP project where the instance exists.
project: ""
# GCP region where the instance exists.
region: ""
# Port number for the proxy to expose for this instance.
port: 3306
cp-helm-charts:
enabled: false
# Schema registry is under the community license
cp-schema-registry:
enabled: false
kafka:
# <<release-name>>-kafka:9092
bootstrapServers: "prerequisites-kafka:9092"
cp-kafka:
enabled: false
cp-zookeeper:
enabled: false
cp-kafka-rest:
enabled: false
cp-kafka-connect:
enabled: false
cp-ksql-server:
enabled: false
cp-control-center:
enabled: false
# Bitnami version of Kafka that deploys open source Kafka https://artifacthub.io/packages/helm/bitnami/kafka
kafka:
enabled: true
listeners:
client:
protocol: PLAINTEXT
interbroker:
protocol: PLAINTEXT
controller:
replicaCount: 0
broker:
replicaCount: 1
# The new minId for broker is 100. If we don't override this, the broker will have id 100
# and cannot load the partitions. So we set minId to 0 to be backwards compatible
minId: 0
# These server properties are no longer exposed as parameters in the bitnami kafka chart since 24.0.0
# They need to be passed in through extraConfig. See below for reference
# https://github.com/bitnami/charts/tree/main/bitnami/kafka#to-2400
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
kraft:
enabled: false
zookeeper:
enabled: true