values.yaml 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. # helm repo add elastic https://helm.elastic.co
  2. # helm upgrade --install elasticsearch elastic/elasticsearch -n elasticsearch -f ~/server/elasticsearch/values.yaml --version 8.5.1
  3. ---
  4. clusterName: "elasticsearch"
  5. nodeGroup: "master"
  6. # The service that non master groups will try to connect to when joining the cluster
  7. # This should be set to clusterName + "-" + nodeGroup for your master group
  8. masterService: ""
  9. # Elasticsearch roles that will be applied to this nodeGroup
  10. # These will be set as environment variables. E.g. node.roles=master
  11. # https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles
  12. roles:
  13. - master
  14. - data
  15. - data_content
  16. - data_hot
  17. - data_warm
  18. - data_cold
  19. - ingest
  20. - ml
  21. - remote_cluster_client
  22. - transform
  23. replicas: 3
  24. minimumMasterNodes: 2
  25. esMajorVersion: ""
  26. # Allows you to add any config files in /usr/share/elasticsearch/config/
  27. # such as elasticsearch.yml and log4j2.properties
  28. esConfig:
  29. # SSL is disabled: internal-only cluster, & self-signed certs in mastodon are a pain
  30. elasticsearch.yml: |
  31. xpack.security.enabled: false
  32. xpack.security.http.ssl.enabled: false
  33. xpack.security.transport.ssl.enabled: false
  34. # key:
  35. # nestedkey: value
  36. # log4j2.properties: |
  37. # key = value
  38. createCert: false
  39. esJvmOptions: {}
  40. # processors.options: |
  41. # -XX:ActiveProcessorCount=3
  42. # Extra environment variables to append to this nodeGroup
  43. # This will be appended to the current 'env:' key. You can use any of the kubernetes env
  44. # syntax here
  45. extraEnvs: []
  46. # - name: MY_ENVIRONMENT_VAR
  47. # value: the_value_goes_here
  48. # Allows you to load environment variables from kubernetes secret or config map
  49. envFrom: []
  50. # - secretRef:
  51. # name: env-secret
  52. # - configMapRef:
  53. # name: config-map
  54. # Disable it to use your own elastic-credential Secret.
  55. secret:
  56. enabled: true
  57. password: "" # generated randomly if not defined
  58. # A list of secrets and their paths to mount inside the pod
  59. # This is useful for mounting certificates for security and for mounting
  60. # the X-Pack license
  61. secretMounts: []
  62. # - name: elastic-certificates
  63. # secretName: elastic-certificates
  64. # path: /usr/share/elasticsearch/config/certs
  65. # defaultMode: 0755
  66. hostAliases: []
  67. #- ip: "127.0.0.1"
  68. # hostnames:
  69. # - "foo.local"
  70. # - "bar.local"
  71. image: "docker.elastic.co/elasticsearch/elasticsearch"
  72. imageTag: "8.5.1"
  73. imagePullPolicy: "IfNotPresent"
  74. podAnnotations: {}
  75. # iam.amazonaws.com/role: es-cluster
  76. # additionals labels
  77. labels: {}
  78. esJavaOpts: "" # example: "-Xmx1g -Xms1g"
  79. resources:
  80. requests:
  81. cpu: "1000m"
  82. memory: "2Gi"
  83. limits:
  84. cpu: "1000m"
  85. memory: "2Gi"
  86. initResources: {}
  87. # limits:
  88. # cpu: "25m"
  89. # # memory: "128Mi"
  90. # requests:
  91. # cpu: "25m"
  92. # memory: "128Mi"
  93. networkHost: "0.0.0.0"
  94. volumeClaimTemplate:
  95. accessModes: ["ReadWriteOnce"]
  96. resources:
  97. requests:
  98. storage: 30Gi
  99. storageClassName: "ceph-block"
  100. rbac:
  101. create: false
  102. serviceAccountAnnotations: {}
  103. serviceAccountName: ""
  104. automountToken: true
  105. podSecurityPolicy:
  106. create: false
  107. name: ""
  108. spec:
  109. privileged: true
  110. fsGroup:
  111. rule: RunAsAny
  112. runAsUser:
  113. rule: RunAsAny
  114. seLinux:
  115. rule: RunAsAny
  116. supplementalGroups:
  117. rule: RunAsAny
  118. volumes:
  119. - secret
  120. - configMap
  121. - persistentVolumeClaim
  122. - emptyDir
  123. persistence:
  124. enabled: true
  125. labels:
  126. # Add default labels for the volumeClaimTemplate of the StatefulSet
  127. enabled: false
  128. annotations: {}
  129. extraVolumes: []
  130. # - name: extras
  131. # emptyDir: {}
  132. extraVolumeMounts: []
  133. # - name: extras
  134. # mountPath: /usr/share/extras
  135. # readOnly: true
  136. extraContainers: []
  137. # - name: do-something
  138. # image: busybox
  139. # command: ['do', 'something']
  140. extraInitContainers: []
  141. # - name: do-something
  142. # image: busybox
  143. # command: ['do', 'something']
  144. # This is the PriorityClass settings as defined in
  145. # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
  146. priorityClassName: ""
  147. # By default this will make sure two pods don't end up on the same node
  148. # Changing this to a region would allow you to spread pods across regions
  149. antiAffinityTopologyKey: "kubernetes.io/hostname"
  150. # Hard means that by default pods will only be scheduled if there are enough nodes for them
  151. # and that they will never end up on the same node. Setting this to soft will do this "best effort"
  152. antiAffinity: "hard"
  153. # This is the node affinity settings as defined in
  154. # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
  155. nodeAffinity: {}
  156. # The default is to deploy all pods serially. By setting this to parallel all pods are started at
  157. # the same time when bootstrapping the cluster
  158. podManagementPolicy: "Parallel"
  159. # The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when
  160. # there are many services in the current namespace.
  161. # If you experience slow pod startups you probably want to set this to `false`.
  162. enableServiceLinks: true
  163. protocol: http
  164. httpPort: 9200
  165. transportPort: 9300
  166. service:
  167. enabled: true
  168. labels: {}
  169. labelsHeadless: {}
  170. type: ClusterIP
  171. # Consider that all endpoints are considered "ready" even if the Pods themselves are not
  172. # https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
  173. publishNotReadyAddresses: false
  174. nodePort: ""
  175. annotations: {}
  176. httpPortName: http
  177. transportPortName: transport
  178. loadBalancerIP: ""
  179. loadBalancerSourceRanges: []
  180. externalTrafficPolicy: ""
  181. updateStrategy: RollingUpdate
  182. # This is the max unavailable setting for the pod disruption budget
  183. # The default value of 1 will make sure that kubernetes won't allow more than 1
  184. # of your pods to be unavailable during maintenance
  185. maxUnavailable: 1
  186. podSecurityContext:
  187. fsGroup: 1000
  188. runAsUser: 1000
  189. securityContext:
  190. capabilities:
  191. drop:
  192. - ALL
  193. # readOnlyRootFilesystem: true
  194. runAsNonRoot: true
  195. runAsUser: 1000
  196. # How long to wait for elasticsearch to stop gracefully
  197. terminationGracePeriod: 120
  198. sysctlVmMaxMapCount: 262144
  199. readinessProbe:
  200. failureThreshold: 3
  201. initialDelaySeconds: 10
  202. periodSeconds: 10
  203. successThreshold: 3
  204. timeoutSeconds: 5
  205. # https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
  206. clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
  207. ## Use an alternate scheduler.
  208. ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
  209. ##
  210. schedulerName: ""
  211. imagePullSecrets: []
  212. nodeSelector: {}
  213. tolerations: []
  214. # Enabling this will publicly expose your Elasticsearch instance.
  215. # Only enable this if you have security enabled on your cluster
  216. ingress:
  217. enabled: false
  218. annotations: {}
  219. # kubernetes.io/ingress.class: nginx
  220. # kubernetes.io/tls-acme: "true"
  221. className: "nginx"
  222. pathtype: ImplementationSpecific
  223. hosts:
  224. - host: chart-example.local
  225. paths:
  226. - path: /
  227. tls: []
  228. # - secretName: chart-example-tls
  229. # hosts:
  230. # - chart-example.local
  231. nameOverride: ""
  232. fullnameOverride: ""
  233. healthNameOverride: ""
  234. lifecycle: {}
  235. # preStop:
  236. # exec:
  237. # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
  238. # postStart:
  239. # exec:
  240. # command:
  241. # - bash
  242. # - -c
  243. # - |
  244. # #!/bin/bash
  245. # # Add a template to adjust number of shards/replicas
  246. # TEMPLATE_NAME=my_template
  247. # INDEX_PATTERN="logstash-*"
  248. # SHARD_COUNT=8
  249. # REPLICA_COUNT=1
  250. # ES_URL=http://localhost:9200
  251. # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
  252. # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
  253. sysctlInitContainer:
  254. enabled: true
  255. keystore: []
  256. networkPolicy:
  257. ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
  258. ## In order for a Pod to access Elasticsearch, it needs to have the following label:
  259. ## {{ template "uname" . }}-client: "true"
  260. ## Example for default configuration to access HTTP port:
  261. ## elasticsearch-master-http-client: "true"
  262. ## Example for default configuration to access transport port:
  263. ## elasticsearch-master-transport-client: "true"
  264. http:
  265. enabled: false
  266. ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace
  267. ## and matching all criteria can reach the DB.
  268. ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this
  269. ## parameter to select these namespaces
  270. ##
  271. # explicitNamespacesSelector:
  272. # # Accept from namespaces with all those different rules (only from whitelisted Pods)
  273. # matchLabels:
  274. # role: frontend
  275. # matchExpressions:
  276. # - {key: role, operator: In, values: [frontend]}
  277. ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
  278. ##
  279. # additionalRules:
  280. # - podSelector:
  281. # matchLabels:
  282. # role: frontend
  283. # - podSelector:
  284. # matchExpressions:
  285. # - key: role
  286. # operator: In
  287. # values:
  288. # - frontend
  289. transport:
  290. ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled.
  291. enabled: false
  292. # explicitNamespacesSelector:
  293. # matchLabels:
  294. # role: frontend
  295. # matchExpressions:
  296. # - {key: role, operator: In, values: [frontend]}
  297. # additionalRules:
  298. # - podSelector:
  299. # matchLabels:
  300. # role: frontend
  301. # - podSelector:
  302. # matchExpressions:
  303. # - key: role
  304. # operator: In
  305. # values:
  306. # - frontend
  307. tests:
  308. enabled: true