From 148713bd751c0c575a15d3156e28099de6c0e331 Mon Sep 17 00:00:00 2001
From: harshavardhanc <harshavardhanc95@gmail.com>
Date: Wed, 13 May 2020 12:56:40 +0530
Subject: [PATCH] Issue #00 fix: log-es clustering and helm chart for fluentd

---
 ansible/esupgradelatest.yml                   |   8 +-
 .../roles/es6/templates/elasticsearch.yml.j2  |   2 +-
 .../log-es6/templates/elasticsearch.yml.j2    |   2 +-
 .../templates/fluent-elasticsearch.yaml       |   2 +
 .../roles/logging/templates/kibana.yaml       |   8 +-
 .../logging/fluentd-elasticsearch/Chart.yaml  |  24 +
 .../logging/fluentd-elasticsearch/OWNERS      |   6 +
 .../logging/fluentd-elasticsearch/README.md   | 306 ++++++++++
 .../fluentd-elasticsearch/templates/NOTES.txt |  27 +
 .../templates/_helpers.tpl                    |  55 ++
 .../templates/clusterrole.yaml                |  22 +
 .../templates/clusterrolebinding.yaml         |  20 +
 .../templates/configmaps.yaml                 | 543 ++++++++++++++++++
 .../templates/daemonset.yaml                  | 218 +++++++
 .../templates/metrics-service.yaml            |  22 +
 .../templates/pod-security-policy.yaml        |  55 ++
 .../templates/prometheusrule.yaml             |  70 +++
 .../fluentd-elasticsearch/templates/role.yaml |  22 +
 .../templates/rolebinding.yaml                |  21 +
 .../templates/service-account.yaml            |  19 +
 .../templates/service.yaml                    |  31 +
 .../templates/servicemonitor.yaml             |  37 ++
 .../logging/fluentd-elasticsearch/values.yaml | 257 +++++++++
 .../logging/kibana/templates/deployment.yaml  |   3 +-
 .../helm_charts/logging/kibana/values.yaml    |   2 +-
 25 files changed, 1767 insertions(+), 15 deletions(-)
 create mode 100644 kubernetes/ansible/roles/logging/templates/fluent-elasticsearch.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/Chart.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/OWNERS
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/README.md
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/NOTES.txt
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/_helpers.tpl
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/clusterrole.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/clusterrolebinding.yaml
 create mode 100755 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/configmaps.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/daemonset.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/metrics-service.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/pod-security-policy.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/prometheusrule.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/role.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/rolebinding.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/service-account.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/service.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/servicemonitor.yaml
 create mode 100644 kubernetes/helm_charts/logging/fluentd-elasticsearch/values.yaml

diff --git a/ansible/esupgradelatest.yml b/ansible/esupgradelatest.yml
index ab354efcb..b3d14a440 100644
--- a/ansible/esupgradelatest.yml
+++ b/ansible/esupgradelatest.yml
@@ -1,4 +1,4 @@
-- hosts: log-es-2
+- hosts: log-es
   become: yes
   vars_files:
     - ['{{inventory_dir}}/secrets.yml']
@@ -10,14 +10,14 @@
     - openjdk
     - { role: log-es6,
         es_config: {
-          cluster.name: "{{ node_name }}",
-          discovery.zen.ping.unicast.hosts: "{{ groups['log-es-2'] }}",
+          cluster.name: "{{ log_es_etc_cluster_name }}",
+          discovery.zen.ping.unicast.hosts: "{{ groups['log-es'] }}",
           http.port: 9200,
           transport.tcp.port: 9300,
           node.data: "{{ es_etc_node_data | default('true') }}",
           node.master: "{{ es_etc_node_master | default('true') }}",
           bootstrap.memory_lock: true,
         },
-        es_etc_discovery_zen_ping_unicast_hosts: "{{ groups['log-es-2'] }}",
+        es_etc_discovery_zen_ping_unicast_hosts: "{{ groups['log-es'] }}",
         es_etc_cluster_name: "{{ node_name }}"
       }
diff --git a/ansible/roles/es6/templates/elasticsearch.yml.j2 b/ansible/roles/es6/templates/elasticsearch.yml.j2
index a3744b081..030cc6e1a 100644
--- a/ansible/roles/es6/templates/elasticsearch.yml.j2
+++ b/ansible/roles/es6/templates/elasticsearch.yml.j2
@@ -10,7 +10,7 @@ cluster.name: elasticsearch
 {% if (groups['es']|length) <= 2 %}
 discovery.zen.minimum_master_nodes: 1
 {% else %}
-discovery.zen.minimum_master_nodes: "{{ ((groups['es']|length) / 2 +1) | round(0, 'ceil') | int}}"
+discovery.zen.minimum_master_nodes: "{{ ((groups['es']|length) / 2 +1) | round(0, 'floor') | int}}"
 {% endif %}
 
 {% if es_config['node.name'] is not defined %}
diff --git a/ansible/roles/log-es6/templates/elasticsearch.yml.j2 b/ansible/roles/log-es6/templates/elasticsearch.yml.j2
index e83c6741a..ced4c41a1 100644
--- a/ansible/roles/log-es6/templates/elasticsearch.yml.j2
+++ b/ansible/roles/log-es6/templates/elasticsearch.yml.j2
@@ -10,7 +10,7 @@ cluster.name: elasticsearch
 {% if (groups['log-es']|length) <= 2 %}
 discovery.zen.minimum_master_nodes: 1
 {% else %}
-discovery.zen.minimum_master_nodes: "{{ ((groups['log-es']|length) / 2 +1) | round(0, 'ceil') | int}}"
+discovery.zen.minimum_master_nodes: "{{ ((groups['log-es']|length) / 2 +1) | round(0, 'floor') | int}}"
 {% endif %}
 
 {% if es_config['node.name'] is not defined %}
diff --git a/kubernetes/ansible/roles/logging/templates/fluent-elasticsearch.yaml b/kubernetes/ansible/roles/logging/templates/fluent-elasticsearch.yaml
new file mode 100644
index 000000000..5c233d18a
--- /dev/null
+++ b/kubernetes/ansible/roles/logging/templates/fluent-elasticsearch.yaml
@@ -0,0 +1,2 @@
+elasticsearch:
+  hosts: {{groups['log-es']|join(':9200,')}}:9200
diff --git a/kubernetes/ansible/roles/logging/templates/kibana.yaml b/kubernetes/ansible/roles/logging/templates/kibana.yaml
index 96109b957..31063dedc 100644
--- a/kubernetes/ansible/roles/logging/templates/kibana.yaml
+++ b/kubernetes/ansible/roles/logging/templates/kibana.yaml
@@ -1,5 +1,3 @@
-elasticsearchHosts: "http://{{logger_es6_host}}:{{logger_es_port}}"
-
 replicas: {{ kibana_replicas | default(1) }}
 image: "docker.elastic.co/kibana/kibana"
 imageTag: "6.8.6"
@@ -18,15 +16,13 @@ healthCheckPath: "/dashboard"
 kibanaConfig:
    kibana.yml: |
      server.basePath: "/dashboard"
+     server.host: "0.0.0.0"
      server.rewriteBasePath: true
      elasticsearch.shardTimeout: 0
+     elasticsearch.hosts: ["http://{{groups['log-es'] | join(':9200","http://')}}:9200"] 
      elasticsearch.requestTimeout: 300000
      elasticsearch.preserveHost: true
      elasticsearch.ssl.verificationMode: full
-     kibana.index: ".kibana"
-     kibana.defaultAppId: "discover"
-     xpack.ilm.enabled: false
-     xpack.security.enabled: false
 
 updateStrategy:
   type: "RollingUpdate"
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/Chart.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/Chart.yaml
new file mode 100644
index 000000000..5d0e10154
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v1
+name: fluentd-elasticsearch
+version: 9.0.0
+appVersion: 3.0.1
+home: https://www.fluentd.org/
+description: A Fluentd Helm chart for Kubernetes with Elasticsearch output
+icon: https://raw.githubusercontent.com/fluent/fluentd-docs/master/public/logo/Fluentd_square.png
+keywords:
+- fluentd
+- elasticsearch
+- multiline
+- detect-exceptions
+- logging
+sources:
+- https://github.com/kiwigrid/helm-charts/tree/master/charts/fluentd-elasticsearch
+- https://github.com/fluent/fluentd-kubernetes-daemonset
+- https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions
+- https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/fluentd-elasticsearch/fluentd-es-image
+maintainers:
+- name: monotek
+  email: andre.bauer@kiwigrid.com
+- name: axdotl
+  email: axel.koehler@kiwigrid.com
+engine: gotpl
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/OWNERS b/kubernetes/helm_charts/logging/fluentd-elasticsearch/OWNERS
new file mode 100644
index 000000000..d2be9eee1
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/OWNERS
@@ -0,0 +1,6 @@
+approvers:
+- axdotl
+- monotek
+reviewers:
+- axdotl
+- monotek
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/README.md b/kubernetes/helm_charts/logging/fluentd-elasticsearch/README.md
new file mode 100644
index 000000000..36e176dca
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/README.md
@@ -0,0 +1,306 @@
+# Fluentd Elasticsearch
+
+- Installs [Fluentd](https://www.fluentd.org/) log forwarder.
+
+## TL;DR
+
+```console
+helm install kiwigrid/fluentd-elasticsearch
+```
+
+## Introduction
+
+This chart bootstraps a [Fluentd](https://www.fluentd.org/) daemonset on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+It's meant to be a drop in replacement for fluentd-gcp on GKE which sends logs to Google's Stackdriver service, but can also be used in other places where logging to ElasticSearch is required.
+The used Docker image also contains Google's detect exceptions (for Java multiline stacktraces), Prometheus exporter, Kubernetes metadata filter & Systemd plugins.
+
+## Prerequisites
+
+- Kubernetes 1.8+ with Beta APIs enabled
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+helm install --name my-release kiwigrid/fluentd-elasticsearch
+```
+
+The command deploys fluentd-elasticsearch on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following table lists the configurable parameters of the Fluentd elasticsearch chart and their default values.
+
+| Parameter                                            | Description                                                                    | Default                                            |
+| ---------------------------------------------------- | ------------------------------------------------------------------------------ | -------------------------------------------------- |
+| `affinity`                                           | Optional daemonset affinity                                                    | `{}`                                               |
+| `annotations`                                        | Optional daemonset annotations                                                 | `NULL`                                             |
+| `podAnnotations`                                     | Optional daemonset's pods annotations                                          | `NULL`                                             |
+| `configMaps.useDefaults.systemConf`                  | Use default system.conf                                                        | true                                               |
+| `configMaps.useDefaults.containersInputConf`         | Use default containers.input.conf                                              | true                                               |
+| `configMaps.useDefaults.systemInputConf`             | Use default system.input.conf                                                  | true                                               |
+| `configMaps.useDefaults.forwardInputConf`            | Use default forward.input.conf                                                 | true                                               |
+| `configMaps.useDefaults.monitoringConf`              | Use default monitoring.conf                                                    | true                                               |
+| `configMaps.useDefaults.outputConf`                  | Use default output.conf                                                        | true                                               |
+| `extraConfigMaps`                                    | Add additional Configmap or overwrite disabled default                         | `{}`                                               |
+| `awsSigningSidecar.enabled`                          | Enable AWS request signing sidecar                                             | `false`                                            |
+| `awsSigningSidecar.resources`                        | AWS Sidecar resources                                                          | `{}`                                               |
+| `awsSigningSidecar.network.port`                     | AWS Sidecar exposure port                                                      | `8080`                                             |
+| `awsSigningSidecar.network.address`                  | AWS Sidecar listen address                                                     | `localhost`                                        |
+| `awsSigningSidecar.network.remoteReadTimeoutSeconds` | AWS Sidecar socket read timeout when talking to ElasticSearch                  | `15`                                               |
+| `awsSigningSidecar.image.repository`                 | AWS signing sidecar repository image                                           | `abutaha/aws-es-proxy`                             |
+| `awsSigningSidecar.image.tag`                        | AWS signing sidecar repository tag                                             | `v1.0`                                             |
+| `elasticsearch.auth.enabled`                         | Elasticsearch Auth enabled                                                     | `false`                                            |
+| `elasticsearch.auth.user`                            | Elasticsearch Auth User                                                        | `""`                                               |
+| `elasticsearch.auth.password`                        | Elasticsearch Auth Password                                                    | `""`                                               |
+| `elasticsearch.hosts`                                | Elasticsearch Hosts List (host and port)                                       | `["elasticsearch-client:9200"]`                    |
+| `elasticsearch.includeTagKey`                        | Elasticsearch Including of Tag key                                             | `true`                                             |
+| `elasticsearch.logstash.enabled`                     | Elasticsearch Logstash enabled                                                 | `true`                                             |
+| `elasticsearch.logstash.prefix`                      | Elasticsearch Logstash prefix                                                  | `logstash`                                         |
+| `elasticsearch.path`                                 | Elasticsearch Path                                                             | `""`                                               |
+| `elasticsearch.scheme`                               | Elasticsearch scheme setting                                                   | `http`                                             |
+| `elasticsearch.sslVerify`                            | Elasticsearch Auth SSL verify                                                  | `true`                                             |
+| `elasticsearch.sslVersion`                           | Elasticsearch tls version setting                                              | `TLSv1_2`                                          |
+| `elasticsearch.outputType`                           | Elasticsearch output type                                                      | `elasticsearch`                                    |
+| `elasticsearch.typeName`                             | Elasticsearch type name                                                        | `_doc`                                             |
+| `elasticsearch.logLevel`                             | Elasticsearch global log level                                                 | `info`                                             |
+| `elasticsearch.reconnectOnError`                     | Elasticsearch Reconnect on error                                               | `true`                                             |
+| `elasticsearch.reloadOnFailure`                      | Elasticsearch Reload on failure                                                | `false`                                            |
+| `elasticsearch.reloadConnections`                    | Elasticsearch reload connections                                               | `false`                                            |
+| `elasticsearch.buffer.enabled`                       | Elasticsearch Buffer enabled                                                   | `true`                                             |
+| `elasticsearch.buffer.type`                          | Elasticsearch Buffer type                                                      | `file`                                             |
+| `elasticsearch.buffer.path`                          | Elasticsearch Buffer path                                                      | `/var/log/fluentd-buffers/kubernetes.system.buffer`|
+| `elasticsearch.buffer.flushMode`                     | Elasticsearch Buffer flush mode                                                | `interval`                                         |
+| `elasticsearch.buffer.retryType`                     | Elasticsearch Buffer retry type                                                | `exponential_backoff`                              |
+| `elasticsearch.buffer.flushThreadCount`              | Elasticsearch Buffer flush thread count                                        | `2`                                                |
+| `elasticsearch.buffer.flushInterval`                 | Elasticsearch Buffer flush interval                                            | `5s`                                               |
+| `elasticsearch.buffer.retryForever`                  | Elasticsearch Buffer retry forever                                             | `true`                                             |
+| `elasticsearch.buffer.retryMaxInterval`              | Elasticsearch Buffer retry max interval                                        | `30`                                               |
+| `elasticsearch.buffer.chunkLimitSize`                | Elasticsearch Buffer chunk limit size                                          | `2M`                                               |
+| `elasticsearch.buffer.queueLimitLength`              | Elasticsearch Buffer queue limit size                                          | `8`                                                |
+| `elasticsearch.buffer.overflowAction`                | Elasticsearch Buffer over flow action                                          | `block`                                            |
+| `env`                                                | List of env vars that are added to the fluentd pods                            | `{}`                                               |
+| `fluentdArgs`                                        | Fluentd args                                                                   | `--no-supervisor -q`                               |
+| `secret`                                             | List of env vars that are set from secrets and added to the fluentd pods       | `[]`                                               |
+| `extraVolumeMounts`                                  | Mount extra volume, required to mount ssl certificates when ES has tls enabled | `[]`                                               |
+| `extraVolume`                                        | Extra volume                                                                   | `[]`                                               |
+| `hostLogDir.varLog`                                  | Specify where fluentd can find var log                                         | `/var/log`                                         |
+| `hostLogDir.dockerContainers`                        | Specify where fluentd can find logs for docker container                       | `/var/lib/docker/containers`                       |
+| `hostLogDir.libSystemdDir`                           | Specify where fluentd can find logs for lib Systemd                            | `/usr/lib64`                                       |
+| `image.repository`                                   | Image                                                                          | `quay.io/fluentd_elasticsearch/fluentd`            |
+| `image.tag`                                          | Image tag                                                                      | `v3.0.1`                                           |
+| `image.pullPolicy`                                   | Image pull policy                                                              | `IfNotPresent`                                     |
+| `image.pullSecrets`                                  | Image pull secrets                                                             | ``                                                 |
+| `livenessProbe.enabled`                              | Whether to enable livenessProbe                                                | `true`                                             |
+| `livenessProbe.initialDelaySeconds`                  | livenessProbe initial delay seconds                                            | `600`                                              |
+| `livenessProbe.periodSeconds`                        | livenessProbe period seconds                                                   | `60`                                               |
+| `livenessProbe.kind`                                 | livenessProbe kind                                                             | `Set to a Linux compatible command`                |
+| `nodeSelector`                                       | Optional daemonset nodeSelector                                                | `{}`                                               |
+| `podSecurityPolicy.annotations`                      | Specify pod annotations in the pod security policy                             | `{}`                                               |
+| `podSecurityPolicy.enabled`                          | Specify if a pod security policy must be created                               | `false`                                            |
+| `priorityClassName`                                  | Optional PriorityClass for pods                                                | `""`                                               |
+| `prometheusRule.enabled`                             | Whether to enable Prometheus prometheusRule                                    | `false`                                            |
+| `prometheusRule.prometheusNamespace`                 | Namespace for prometheusRule                                                   | `monitoring`                                       |
+| `prometheusRule.labels`                              | Optional labels for prometheusRule                                             | `{}`                                               |
+| `rbac.create`                                        | RBAC                                                                           | `true`                                             |
+| `resources.limits.cpu`                               | CPU limit                                                                      | `100m`                                             |
+| `resources.limits.memory`                            | Memory limit                                                                   | `500Mi`                                            |
+| `resources.requests.cpu`                             | CPU request                                                                    | `100m`                                             |
+| `resources.requests.memory`                          | Memory request                                                                 | `200Mi`                                            |
+| `service`                                            | Service definition                                                             | `{}`                                               |
+| `service.ports`                                      | List of service ports dict [{name:...}...]                                     | Not Set                                            |
+| `service.ports[].type`                               | Service type (ClusterIP/NodePort)                                              | `ClusterIP`                                        |
+| `service.ports[].name`                               | One of service ports name                                                      | Not Set                                            |
+| `service.ports[].port`                               | Service port                                                                   | Not Set                                            |
+| `service.ports[].nodePort`                           | NodePort port (when service.type is NodePort)                                  | Not Set                                            |
+| `service.ports[].protocol`                           | Service protocol(optional, can be TCP/UDP)                                     | Not Set                                            |
+| `serviceAccount.create`                              | Specifies whether a service account should be created.                         | `true`                                             |
+| `serviceAccount.name`                                | Name of the service account.                                                   | `""`                                               |
+| `serviceAccount.annotations`                         | Specify annotations in the pod service account                                 | `{}`                                               |
+| `serviceMetric.enabled`                              | Generate the metric service regardless of whether serviceMonitor is enabled.   | `false`                                            |
+| `serviceMonitor.enabled`                             | Whether to enable Prometheus serviceMonitor                                    | `false`                                            |
+| `serviceMonitor.port`                                | Define on which port the ServiceMonitor should scrape                          | `24231`                                            |
+| `serviceMonitor.interval`                            | Interval at which metrics should be scraped                                    | `10s`                                              |
+| `serviceMonitor.path`                                | Path for Metrics                                                               | `/metrics`                                         |
+| `serviceMonitor.labels`                              | Optional labels for serviceMonitor                                             | `{}`                                               |
+| `serviceMonitor.metricRelabelings`                   | Optional metric relabel configs to apply to samples before ingestion           | `[]`                                               |
+| `serviceMonitor.relabelings`                         | Optional relabel configs to apply to samples before scraping                   | `[]`                                               |
+| `serviceMonitor.jobLabel`                            | Label whose value will define the job name                                     | `app.kubernetes.io/instance`                       |
+| `serviceMonitor.type`                                | Optional the type of the metrics service                                       | `ClusterIP`                                        |
+| `tolerations`                                        | Optional daemonset tolerations                                                 | `[]`                                               |
+| `updateStrategy`                                     | Optional daemonset update strategy                                             | `type: RollingUpdate`                              |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+helm install --name my-release kiwigrid/fluentd-elasticsearch
+```
+
+Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
+
+```console
+helm install --name my-release -f values.yaml kiwigrid/fluentd-elasticsearch
+```
+
+## Installation
+
+### IBM IKS
+
+For IBM IKS path `/var/log/pods` must be mounted, otherwise only kubelet logs would be available
+
+```yaml
+extraVolumeMounts: |
+    - name: pods
+      mountPath: /var/log/pods
+      readOnly: true
+
+extraVolumes: |
+    - name: pods
+      hostPath:
+        path: "/var/log/pods"
+        type: Directory
+```
+
+### AWS Elasticsearch Domains
+
+AWS Elasticsearch requires requests to upload data to be signed using [AWS Signature V4](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). In order to support this, you can add `awsSigningSidecar: {enabled: true}` to your configuration. This results in a sidecar container being deployed that proxies all requests to your Elasticsearch domain and signs them appropriately.
+
+## Upgrading
+
+### From a version < 2.0.0
+
+When you upgrade this chart you have to add the "--force" parameter to your helm upgrade command as there have been changes to the lables which makes a normal upgrade impossible.
+
+### From a version &ge; 4.9.3 to version &ge; 5.0.0
+
+When upgrading this chart you need to rename `livenessProbe.command` parameter to `livenessProbe.kind.exec.command` (only applicable if `livenessProbe.command` parameter was used).
+
+### From a version &lt; 6.0.0 to version &ge; 6.0.0
+
+When upgrading this chart  you have to perform updates for any system that
+uses fluentd output from systemd logs, because now:
+
+- field names have removed leading underscores (`_pid` becomes `pid`)
+- field names from systemd are now lowercase (`PROCESS` becomes `process`)
+
+This means any system that uses fluend output needs to be updated,
+especially:
+
+- in Kibana go to `Management > Index Patterns`, for each index click on
+   `Refresh field list` icon
+- fix renamed fields in other places - such as Kibana or Grafana, in items
+  such as dashboards queries/vars/annotations
+
+It is strongly suggested to set up temporarily new fluentd instance with output
+to another elasticsearch index prefix to see the differences and then apply changes. The amount of fields altered can be noticeable and hard to list them all in this document.
+
+Some dashboards can be easily fixed with sed:
+
+```bash
+cat dashboard.json | sed -e 's/_PID/pid/g'
+```
+
+Below list of most commonly used systemd fields:
+
+```text
+__MONOTONIC_TIMESTAMP
+__REALTIME_TIMESTAMP
+_BOOT_ID
+_CAP_EFFECTIVE
+_CMDLINE
+_COMM
+_EXE
+_GID
+_HOSTNAME
+_MACHINE_ID
+_PID
+_SOURCE_REALTIME_TIMESTAMP
+_SYSTEMD_CGROUP
+_SYSTEMD_SLICE
+_SYSTEMD_UNIT
+_TRANSPORT
+_UID
+CODE_FILE
+CODE_FUNC
+CODE_FUNCTION
+CODE_LINE
+MESSAGE
+MESSAGE_ID
+NM_LOG_DOMAINS
+NM_LOG_LEVEL
+PRIORITY
+SYSLOG_FACILITY
+SYSLOG_IDENTIFIER
+SYSLOG_PID
+TIMESTAMP_BOOTTIME
+TIMESTAMP_MONOTONIC
+UNIT
+```
+
+### From a version <= 6.3.0 to version => 7.0.0
+
+The additional plugins option has been removed as the used container image does not longer contains the build tools needed to build the plugins. Please use an own container image containing the plugins you want to use.
+
+### From a version < 8.0.0 to version => 8.0.0
+
+> Both `elasticsearch.host` and `elasticsearch.port` are removed in favor of `elasticsearch.hosts`
+
+You can now [configure multiple elasticsearch hosts](https://docs.fluentd.org/output/elasticsearch#hosts-optional) as target for fluentd.
+
+The following parameters are deprecated and will be replaced by `elasticsearch.hosts` with a default value of `["elasticsearch-client:9200"]`
+```yaml
+elasticsearch:
+  host: elasticsearch-client
+  port: 9200
+```
+
+You can use any yaml array syntax:
+```yaml
+elasticsearch:
+  hosts: ["elasticsearch-node-1:9200", "elasticsearch-node-2:9200"]
+```
+```yaml
+elasticsearch:
+  hosts:
+    - "elasticsearch-node-1:9200"
+    - "elasticsearch-node-2:9200"
+```
+
+Note:
+> If you are using the AWS Sidecar, only the first host in the array is used. [Aws-es-proxy](https://github.com/abutaha/aws-es-proxy) is limited to one endpoint.
+
+### From a version < 8.0.0 to version => 9.0.0
+In this version elasticsearch template in `output.conf` configmap was expanded to be fully configured from `values.yaml`
+ - decide if to add a `logstash` - toggle `logstash.enabled`
+ - decide if to add a `buffer` - toggle `buffer.enabled`
+#### The following fields were removed from the elasticsearch block in vlaues.yaml
+ - `bufferChunkLimit` in favor of `buffer.chunkLimitSize`
+ - `bufferQueueLimit` in favor of `buffer.queueLimitLength`
+ - `logstashPrefix` in favor of `logstash.enabled` and `logstash.prefix`
+#### The following fields were added
+ - `reconnectOnError`
+ - `reloadOnFailure`
+ - `reloadConnections`
+ - `buffer.enabled`
+ - `buffer.type`
+ - `buffer.path`
+ - `buffer.flushMode`
+ - `buffer.retryType`
+ - `buffer.flushThreadCount`
+ - `buffer.flushInterval`
+ - `buffer.retryForever`
+ - `buffer.retryMaxInterval`
+ - `buffer.chunkLimitSize`
+ - `buffer.queueLimitLength`
+ - `buffer.overflowAction`
\ No newline at end of file
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/NOTES.txt b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/NOTES.txt
new file mode 100644
index 000000000..806d7d5b8
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/NOTES.txt
@@ -0,0 +1,27 @@
+1. To verify that Fluentd has started, run:
+
+  kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ include "fluentd-elasticsearch.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"
+
+THIS APPLICATION CAPTURES ALL CONSOLE OUTPUT AND FORWARDS IT TO elasticsearch . Anything that might be identifying,
+including things like IP addresses, container images, and object names will NOT be anonymized.
+
+{{- if .Values.service }}
+2. Get the application URL by running these commands:
+{{- range $port := .Values.service.ports }}
+{{- $service_type := $port.type | default "ClusterIP" -}}
+{{- if contains "NodePort" $service_type }}
+  export NODE_PORT=$(kubectl get --namespace {{ $.Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "fluentd-elasticsearch.fullname" $ }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ $.Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" $service_type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get svc -w {{ include "fluentd-elasticsearch.fullname" $ }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ $.Release.Namespace }} {{ include "fluentd-elasticsearch.fullname" $ }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ $port.port }}
+{{- else if contains "ClusterIP" $service_type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ $.Release.Namespace }} -l "app.kubernetes.io/name={{ include "fluentd-elasticsearch.name" $ }},app.kubernetes.io/instance={{ $.Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl port-forward $POD_NAME 8080:80
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/_helpers.tpl b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/_helpers.tpl
new file mode 100644
index 000000000..c46074df7
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/_helpers.tpl
@@ -0,0 +1,55 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "fluentd-elasticsearch.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "fluentd-elasticsearch.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "fluentd-elasticsearch.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "fluentd-elasticsearch.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+    {{ default (include "fluentd-elasticsearch.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "fluentd-elasticsearch.labels" -}}
+app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
+helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/clusterrole.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/clusterrole.yaml
new file mode 100644
index 000000000..1a107ab06
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/clusterrole.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create -}}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: {{ include "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - "namespaces"
+  - "pods"
+  verbs:
+  - "get"
+  - "watch"
+  - "list"
+{{- end -}}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/clusterrolebinding.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/clusterrolebinding.yaml
new file mode 100644
index 000000000..708e4ee4e
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/clusterrolebinding.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.rbac.create -}}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: {{ include "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+subjects:
+- kind: ServiceAccount
+  name: {{ if .Values.serviceAccount.name }}{{ .Values.serviceAccount.name }}{{ else }}{{ include "fluentd-elasticsearch.fullname" . }}{{ end }}
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  kind: ClusterRole
+  name: {{ include "fluentd-elasticsearch.fullname" . }}
+  apiGroup: rbac.authorization.k8s.io
+{{- end -}}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/configmaps.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/configmaps.yaml
new file mode 100755
index 000000000..171c1c4cb
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/configmaps.yaml
@@ -0,0 +1,543 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+data:
+{{- if .Values.configMaps.useDefaults.systemConf }}
+  system.conf: |-
+    <system>
+      root_dir /tmp/fluentd-buffers/
+    </system>
+{{- end }}
+
+{{- if .Values.configMaps.useDefaults.containersInputConf }}
+  containers.input.conf: |-
+    # This configuration file for Fluentd / td-agent is used
+    # to watch changes to Docker log files. The kubelet creates symlinks that
+    # capture the pod name, namespace, container name & Docker container ID
+    # to the docker logs for pods in the /var/log/containers directory on the host.
+    # If running this fluentd configuration in a Docker container, the /var/log
+    # directory should be mounted in the container.
+    #
+    # These logs are then submitted to Elasticsearch which assumes the
+    # installation of the fluent-plugin-elasticsearch & the
+    # fluent-plugin-kubernetes_metadata_filter plugins.
+    # See https://github.com/uken/fluent-plugin-elasticsearch &
+    # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
+    # more information about the plugins.
+    #
+    # Example
+    # =======
+    # A line in the Docker log file might look like this JSON:
+    #
+    # {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
+    #  "stream":"stderr",
+    #   "time":"2014-09-25T21:15:03.499185026Z"}
+    #
+    # The time_format specification below makes sure we properly
+    # parse the time format produced by Docker. This will be
+    # submitted to Elasticsearch and should appear like:
+    # $ curl 'http://elasticsearch-logging:9200/_search?pretty'
+    # ...
+    # {
+    #      "_index" : "logstash-2014.09.25",
+    #      "_type" : "fluentd",
+    #      "_id" : "VBrbor2QTuGpsQyTCdfzqA",
+    #      "_score" : 1.0,
+    #      "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
+    #                 "stream":"stderr","tag":"docker.container.all",
+    #                 "@timestamp":"2014-09-25T22:45:50+00:00"}
+    #    },
+    # ...
+    #
+    # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
+    # record & add labels to the log record if properly configured. This enables users
+    # to filter & search logs on any metadata.
+    # For example a Docker container's logs might be in the directory:
+    #
+    #  /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
+    #
+    # and in the file:
+    #
+    #  997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
+    #
+    # where 997599971ee6... is the Docker ID of the running container.
+    # The Kubernetes kubelet makes a symbolic link to this file on the host machine
+    # in the /var/log/containers directory which includes the pod name and the Kubernetes
+    # container name:
+    #
+    #    synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+    #    ->
+    #    /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
+    #
+    # The /var/log directory on the host is mapped to the /var/log directory in the container
+    # running this instance of Fluentd and we end up collecting the file:
+    #
+    #   /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+    #
+    # This results in the tag:
+    #
+    #  var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+    #
+    # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
+    # which are added to the log message as a kubernetes field object & the Docker container ID
+    # is also added under the docker field object.
+    # The final tag is:
+    #
+    #   kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+    #
+    # And the final log record look like:
+    #
+    # {
+    #   "log":"2014/09/25 21:15:03 Got request with path wombat\n",
+    #   "stream":"stderr",
+    #   "time":"2014-09-25T21:15:03.499185026Z",
+    #   "kubernetes": {
+    #     "namespace": "default",
+    #     "pod_name": "synthetic-logger-0.25lps-pod",
+    #     "container_name": "synth-lgr"
+    #   },
+    #   "docker": {
+    #     "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
+    #   }
+    # }
+    #
+    # This makes it easier for users to search for logs by pod name or by
+    # the name of the Kubernetes container regardless of how many times the
+    # Kubernetes pod has been restarted (resulting in a several Docker container IDs).
+    # Json Log Example:
+    # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
+    # CRI Log Example:
+    # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
+    <source>
+      @id fluentd-containers.log
+      @type tail
+      path /var/log/containers/*.log
+      pos_file /var/log/containers.log.pos
+      tag raw.kubernetes.*
+      read_from_head true
+      <parse>
+        @type multi_format
+        <pattern>
+          format json
+          time_key time
+          time_format %Y-%m-%dT%H:%M:%S.%NZ
+        </pattern>
+        <pattern>
+          format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
+          time_format %Y-%m-%dT%H:%M:%S.%N%:z
+        </pattern>
+      </parse>
+    </source>
+
+    # Detect exceptions in the log output and forward them as one log entry.
+    <match raw.kubernetes.**>
+      @id raw.kubernetes
+      @type detect_exceptions
+      remove_tag_prefix raw
+      message log
+      stream stream
+      multiline_flush_interval 5
+      max_bytes 500000
+      max_lines 1000
+    </match>
+
+    # Concatenate multi-line logs
+    <filter **>
+      @id filter_concat
+      @type concat
+      key message
+      multiline_end_regexp /\n$/
+      separator ""
+      timeout_label @NORMAL
+      flush_interval 5
+    </filter>
+
+    # Enriches records with Kubernetes metadata
+    <filter kubernetes.**>
+      @id filter_kubernetes_metadata
+      @type kubernetes_metadata
+    </filter>
+
+    # Fixes json fields in Elasticsearch
+    <filter kubernetes.**>
+      @id filter_parser
+      @type parser
+      key_name log
+      reserve_time true
+      reserve_data true
+      remove_key_name_field true
+      <parse>
+        @type multi_format
+        <pattern>
+          format json
+        </pattern>
+        <pattern>
+          format none
+        </pattern>
+      </parse>
+    </filter>
+{{- end }}
+
+{{- if .Values.configMaps.useDefaults.systemInputConf }}
+  system.input.conf: |-
+    # Example:
+    # 2015-12-21 23:17:22,066 [salt.state       ][INFO    ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
+    <source>
+      @id minion
+      @type tail
+      format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
+      time_format %Y-%m-%d %H:%M:%S
+      path /var/log/salt/minion
+      pos_file /var/log/salt.pos
+      tag salt
+    </source>
+
+    # Example:
+    # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
+    <source>
+      @id startupscript.log
+      @type tail
+      format syslog
+      path /var/log/startupscript.log
+      pos_file /var/log/startupscript.log.pos
+      tag startupscript
+    </source>
+
+    # Examples:
+    # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
+    # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
+    # TODO(random-liu): Remove this after cri container runtime rolls out.
+    <source>
+      @id docker.log
+      @type tail
+      format /time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
+      path /var/log/docker.log
+      pos_file /var/log/docker.log.pos
+      tag docker
+    </source>
+
+    # Example:
+    # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
+    <source>
+      @id etcd.log
+      @type tail
+      # Not parsing this, because it doesn't have anything particularly useful to
+      # parse out of it (like severities).
+      format none
+      path /var/log/etcd.log
+      pos_file /var/log/etcd.log.pos
+      tag etcd
+    </source>
+
+    # Multi-line parsing is required for all the kube logs because very large log
+    # statements, such as those that include entire object bodies, get split into
+    # multiple lines by glog.
+    # Example:
+    # I0204 07:32:30.020537    3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
+    <source>
+      @id kubelet.log
+      @type tail
+      format multiline
+      multiline_flush_interval 5s
+      format_firstline /^\w\d{4}/
+      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
+      time_format %m%d %H:%M:%S.%N
+      path /var/log/kubelet.log
+      pos_file /var/log/kubelet.log.pos
+      tag kubelet
+    </source>
+
+    # Example:
+    # I1118 21:26:53.975789       6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
+    <source>
+      @id kube-proxy.log
+      @type tail
+      format multiline
+      multiline_flush_interval 5s
+      format_firstline /^\w\d{4}/
+      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
+      time_format %m%d %H:%M:%S.%N
+      path /var/log/kube-proxy.log
+      pos_file /var/log/kube-proxy.log.pos
+      tag kube-proxy
+    </source>
+
+    # Example:
+    # I0204 07:00:19.604280       5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
+    <source>
+      @id kube-apiserver.log
+      @type tail
+      format multiline
+      multiline_flush_interval 5s
+      format_firstline /^\w\d{4}/
+      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
+      time_format %m%d %H:%M:%S.%N
+      path /var/log/kube-apiserver.log
+      pos_file /var/log/kube-apiserver.log.pos
+      tag kube-apiserver
+    </source>
+
+    # Example:
+    # I0204 06:55:31.872680       5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
+    <source>
+      @id kube-controller-manager.log
+      @type tail
+      format multiline
+      multiline_flush_interval 5s
+      format_firstline /^\w\d{4}/
+      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
+      time_format %m%d %H:%M:%S.%N
+      path /var/log/kube-controller-manager.log
+      pos_file /var/log/kube-controller-manager.log.pos
+      tag kube-controller-manager
+    </source>
+
+    # Example:
+    # W0204 06:49:18.239674       7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
+    <source>
+      @id kube-scheduler.log
+      @type tail
+      format multiline
+      multiline_flush_interval 5s
+      format_firstline /^\w\d{4}/
+      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
+      time_format %m%d %H:%M:%S.%N
+      path /var/log/kube-scheduler.log
+      pos_file /var/log/kube-scheduler.log.pos
+      tag kube-scheduler
+    </source>
+
+    # Example:
+    # I0603 15:31:05.793605       6 cluster_manager.go:230] Reading config from path /etc/gce.conf
+    <source>
+      @id glbc.log
+      @type tail
+      format multiline
+      multiline_flush_interval 5s
+      format_firstline /^\w\d{4}/
+      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
+      time_format %m%d %H:%M:%S.%N
+      path /var/log/glbc.log
+      pos_file /var/log/glbc.log.pos
+      tag glbc
+    </source>
+
+    # Example:
+    # TODO Add a proper example here.
+    <source>
+      @id cluster-autoscaler.log
+      @type tail
+      format multiline
+      multiline_flush_interval 5s
+      format_firstline /^\w\d{4}/
+      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
+      time_format %m%d %H:%M:%S.%N
+      path /var/log/cluster-autoscaler.log
+      pos_file /var/log/cluster-autoscaler.log.pos
+      tag cluster-autoscaler
+    </source>
+
+    # Logs from systemd-journal for interesting services.
+    # TODO(random-liu): Remove this after cri container runtime rolls out.
+    <source>
+      @id journald-docker
+      @type systemd
+      matches [{ "_SYSTEMD_UNIT": "docker.service" }]
+      <storage>
+        @type local
+        persistent true
+        path /var/log/journald-docker.pos
+      </storage>
+      <entry>
+        fields_strip_underscores true
+        fields_lowercase true
+      </entry>
+      read_from_head true
+      tag docker
+    </source>
+
+    <source>
+      @id journald-container-runtime
+      @type systemd
+      matches [{ "_SYSTEMD_UNIT": "{{"{{ fluentd_container_runtime_service }}"}}.service" }]
+      <storage>
+        @type local
+        persistent true
+        path /var/log/journald-container-runtime.pos
+      </storage>
+      <entry>
+        fields_strip_underscores true
+        fields_lowercase true
+      </entry>
+      read_from_head true
+      tag container-runtime
+    </source>
+
+    <source>
+      @id journald-kubelet
+      @type systemd
+      matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
+      <storage>
+        @type local
+        persistent true
+        path /var/log/journald-kubelet.pos
+      </storage>
+      <entry>
+        fields_strip_underscores true
+        fields_lowercase true
+      </entry>
+      read_from_head true
+      tag kubelet
+    </source>
+
+    <source>
+      @id journald-node-problem-detector
+      @type systemd
+      matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
+      <storage>
+        @type local
+        persistent true
+        path /var/log/journald-node-problem-detector.pos
+      </storage>
+      <entry>
+        fields_strip_underscores true
+        fields_lowercase true
+      </entry>
+      read_from_head true
+      tag node-problem-detector
+    </source>
+
+    <source>
+      @id kernel
+      @type systemd
+      matches [{ "_TRANSPORT": "kernel" }]
+      <storage>
+        @type local
+        persistent true
+        path /var/log/kernel.pos
+      </storage>
+      <entry>
+        fields_strip_underscores true
+        fields_lowercase true
+      </entry>
+      read_from_head true
+      tag kernel
+    </source>
+{{- end }}
+
+{{- if .Values.configMaps.useDefaults.forwardInputConf }}
+  forward.input.conf: |-
+    # Takes the messages sent over TCP
+    <source>
+      @id forward
+      @type forward
+    </source>
+{{- end }}
+
+{{- if .Values.configMaps.useDefaults.monitoringConf }}
+  monitoring.conf: |-
+    # Prometheus Exporter Plugin
+    # input plugin that exports metrics
+    <source>
+      @id prometheus
+      @type prometheus
+    </source>
+
+    <source>
+      @id monitor_agent
+      @type monitor_agent
+    </source>
+
+    # input plugin that collects metrics from MonitorAgent
+    <source>
+      @id prometheus_monitor
+      @type prometheus_monitor
+      <labels>
+        host ${hostname}
+      </labels>
+    </source>
+
+    # input plugin that collects metrics for output plugin
+    <source>
+      @id prometheus_output_monitor
+      @type prometheus_output_monitor
+      <labels>
+        host ${hostname}
+      </labels>
+    </source>
+
+    # input plugin that collects metrics for in_tail plugin
+    <source>
+      @id prometheus_tail_monitor
+      @type prometheus_tail_monitor
+      <labels>
+        host ${hostname}
+      </labels>
+    </source>
+{{- end }}
+
+{{- if .Values.configMaps.useDefaults.outputConf }}
+  output.conf: |-
+    # handle timeout log lines from concat plugin
+    <match **>
+      @type relabel
+      @label @NORMAL
+    </match>
+
+    <label @NORMAL>
+    <match **>
+      @id elasticsearch
+      @type "#{ENV['OUTPUT_TYPE']}"
+      @log_level "#{ENV['OUTPUT_LOG_LEVEL']}"
+      include_tag_key "#{ENV['OUTPUT_INCLUDE_TAG_KEY']}"
+      hosts "#{ENV['OUTPUT_HOSTS']}"
+      path "#{ENV['OUTPUT_PATH']}"
+      scheme "#{ENV['OUTPUT_SCHEME']}"
+      ssl_verify "#{ENV['OUTPUT_SSL_VERIFY']}"
+      ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
+{{- if (ne .Values.elasticsearch.typeName "") }}
+      type_name "#{ENV['OUTPUT_TYPE_NAME']}"
+{{- end }}
+{{- if .Values.elasticsearch.auth.enabled }}
+      user "#{ENV['OUTPUT_USER']}"
+      password "#{ENV['OUTPUT_PASSWORD']}"
+{{- end }}
+{{- if .Values.elasticsearch.logstash.enabled }}
+      logstash_format "#{ENV['LOGSTASH_FORMAT']}"
+      logstash_prefix "#{ENV['LOGSTASH_PREFIX']}"
+{{- else }}
+      logstash_format "#{ENV['LOGSTASH_FORMAT']}"
+{{- end }}
+      reconnect_on_error "#{ENV['OUTPUT_RECONNECT_ON_ERROR']}"
+      reload_on_failure "#{ENV['OUTPUT_RELOAD_ON_FAILURE']}"
+      reload_connections "#{ENV['OUTPUT_RELOAD_CONNECTIONS']}"
+{{- if .Values.elasticsearch.buffer.enabled }}
+      <buffer>
+        @type "#{ENV['OUTPUT_BUFFER_TYPE']}"
+        path "#{ENV['OUTPUT_BUFFER_PATH']}"
+        flush_mode "#{ENV['OUTPUT_BUFFER_FLUSH_MODE']}"
+        retry_type "#{ENV['OUTPUT_BUFFER_RETRY_TYPE']}"
+        flush_thread_count "#{ENV['OUTPUT_BUFFER_FLUSH_THREAD_TYPE']}"
+        flush_interval "#{ENV['OUTPUT_BUFFER_FLUSH_INTERVAL']}"
+        retry_forever "#{ENV['OUTPUT_BUFFER_RETRY_FOREVER']}"
+        retry_max_interval "#{ENV['OUTPUT_BUFFER_RETRY_MAX_INTERVAL']}"
+        chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
+        queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
+        overflow_action "#{ENV['OUTPUT_BUFFER_OVERFLOW_ACTION']}"
+      </buffer>
+{{- end }}
+    </match>
+    </label>
+{{- end }}
+
+{{- range $key, $value := .Values.extraConfigMaps }}
+  {{ $key }}: |-
+{{ $value | indent 4 }}
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/daemonset.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/daemonset.yaml
new file mode 100644
index 000000000..a9f47bc2a
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/daemonset.yaml
@@ -0,0 +1,218 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: {{ include "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+{{- if .Values.annotations }}
+  annotations:
+{{ toYaml .Values.annotations | indent 4 }}
+{{- end }}
+spec:
+  updateStrategy:
+{{ toYaml .Values.updateStrategy | indent 4 }}
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
+      app.kubernetes.io/instance: {{ .Release.Name }}
+  template:
+    metadata:
+      labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 8 }}
+        {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+        kubernetes.io/cluster-service: "true"
+        {{- end }}
+      annotations:
+        {{- if semverCompare "< 1.13" .Capabilities.KubeVersion.GitVersion }}
+        # This annotation ensures that fluentd does not get evicted if the node
+        # supports critical pod annotation based priority scheme.
+        # Note that this does not guarantee admission on the nodes (#40573).
+        # NB! this annotation is deprecated as of version 1.13 and will be removed in 1.14.
+        # ref: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+        {{- end }}
+        checksum/config: {{ include (print $.Template.BasePath "/configmaps.yaml") . | sha256sum }}
+{{- if .Values.podAnnotations }}
+{{ toYaml .Values.podAnnotations | indent 8 }}
+{{- end }}
+    spec:
+      serviceAccountName: {{ include "fluentd-elasticsearch.fullname" . }}
+      {{- if .Values.priorityClassName }}
+      priorityClassName: {{ .Values.priorityClassName | quote }}
+      {{- end }}
+      {{- if .Values.image.pullSecrets }}
+      imagePullSecrets:
+      {{- range .Values.image.pullSecrets }}
+        - name: {{ . }}
+      {{- end }}
+      {{- end }}
+      containers:
+      - name: {{ include "fluentd-elasticsearch.fullname" . }}
+        image:  "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+        imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+        env:
+        - name: FLUENTD_ARGS
+          value: {{ .Values.fluentdArgs | quote }}
+        - name: OUTPUT_HOSTS
+          {{- if .Values.awsSigningSidecar.enabled }}
+          value: "{{ .Values.awsSigningSidecar.network.address }}:{{ .Values.awsSigningSidecar.network.port }}"
+          {{- else }}
+          value: "{{- join "," .Values.elasticsearch.hosts }}"
+          {{- end }}
+        - name: OUTPUT_PATH
+          value: {{ .Values.elasticsearch.path | quote }}
+{{- if .Values.elasticsearch.auth.enabled }}
+        - name: OUTPUT_USER
+          value: {{ .Values.elasticsearch.auth.user | quote }}
+{{- if .Values.elasticsearch.auth.password }}
+        - name: OUTPUT_PASSWORD
+          value: {{ .Values.elasticsearch.auth.password | quote }}
+{{- end }}
+{{- end }}
+        - name: LOGSTASH_PREFIX
+          value: {{ .Values.elasticsearch.logstash.prefix | quote }}
+        - name: LOGSTASH_FORMAT
+          value: {{ .Values.elasticsearch.logstash.enabled | quote }}
+        - name: OUTPUT_SCHEME
+          {{- if .Values.awsSigningSidecar.enabled }}
+          value: 'http'
+          {{- else }}
+          value: {{ .Values.elasticsearch.scheme | quote }}
+          {{- end }}
+        - name: OUTPUT_TYPE
+          value: {{ .Values.elasticsearch.outputType | quote }}
+        - name: OUTPUT_SSL_VERIFY
+          value: {{ .Values.elasticsearch.sslVerify | quote }}
+        - name: OUTPUT_SSL_VERSION
+          value: {{ .Values.elasticsearch.sslVersion | quote }}
+        - name: OUTPUT_TYPE_NAME
+          value: {{ .Values.elasticsearch.typeName | quote }}
+        - name: OUTPUT_BUFFER_CHUNK_LIMIT
+          value: {{ .Values.elasticsearch.buffer.chunkLimitSize | quote }}
+        - name: OUTPUT_BUFFER_QUEUE_LIMIT
+          value: {{ .Values.elasticsearch.buffer.queueLimitLength | quote }}
+        - name: OUTPUT_BUFFER_TYPE
+          value: {{ .Values.elasticsearch.buffer.type | quote }}
+        - name: OUTPUT_BUFFER_PATH
+          value: {{ .Values.elasticsearch.buffer.path | quote }}
+        - name: OUTPUT_BUFFER_FLUSH_MODE
+          value: {{ .Values.elasticsearch.buffer.flushMode | quote }}
+        - name: OUTPUT_BUFFER_RETRY_TYPE
+          value: {{ .Values.elasticsearch.buffer.retryType | quote }}
+        - name: OUTPUT_BUFFER_FLUSH_THREAD_TYPE
+          value: {{ .Values.elasticsearch.buffer.flushThreadCount | quote }}
+        - name: OUTPUT_BUFFER_FLUSH_INTERVAL
+          value: {{ .Values.elasticsearch.buffer.flushInterval | quote }}
+        - name: OUTPUT_BUFFER_RETRY_FOREVER
+          value: {{ .Values.elasticsearch.buffer.retryForever | quote }}
+        - name: OUTPUT_BUFFER_RETRY_MAX_INTERVAL
+          value: {{ .Values.elasticsearch.buffer.retryMaxInterval | quote }}
+        - name: OUTPUT_BUFFER_OVERFLOW_ACTION
+          value: {{ .Values.elasticsearch.buffer.overflowAction | quote }}
+        - name: OUTPUT_LOG_LEVEL
+          value: {{ .Values.elasticsearch.logLevel | quote }}
+        - name: OUTPUT_INCLUDE_TAG_KEY
+          value: {{ .Values.elasticsearch.includeTagKey | quote }}
+        - name: OUTPUT_RECONNECT_ON_ERROR
+          value: {{ .Values.elasticsearch.reconnectOnError | quote }}
+        - name: OUTPUT_RELOAD_ON_FAILURE
+          value: {{ .Values.elasticsearch.reloadOnFailure | quote }}
+        - name: OUTPUT_RELOAD_CONNECTIONS
+          value: {{ .Values.elasticsearch.reloadConnections | quote }}
+        {{- if .Values.env }}
+        {{- range $key, $value := .Values.env }}
+        - name: {{ $key }}
+          value: {{ $value | quote }}
+        {{- end }}
+        {{- end }}
+        {{- if .Values.secret }}
+        {{- range $key, $value := .Values.secret }}
+        - name: {{ .name }}
+          valueFrom:
+            secretKeyRef:
+              name: {{ $value.secret_name }}
+              key: {{ $value.secret_key | quote }}
+        {{- end }}
+        {{- end }}
+        - name: K8S_NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        resources:
+{{ toYaml .Values.resources | indent 10 }}
+        volumeMounts:
+        - name: varlog
+          mountPath: {{ .Values.hostLogDir.varLog }}
+        - name: varlibdockercontainers
+          mountPath: {{ .Values.hostLogDir.dockerContainers }}
+          readOnly: true
+        - name: libsystemddir
+          mountPath: {{ .Values.hostLogDir.libSystemdDir }}
+          readOnly: true
+        - name: config-volume
+          mountPath: /etc/fluent/config.d
+{{- if .Values.extraVolumeMounts }}
+{{ toYaml .Values.extraVolumeMounts | indent 8 }}
+{{- end }}
+      {{- if .Values.livenessProbe.enabled }}  #pointing to fluentd Dockerfile
+        livenessProbe:
+          initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+          periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+{{ toYaml .Values.livenessProbe.kind | indent 10 }}
+{{- end }}
+        ports:
+{{- range $port := .Values.service.ports }}
+          - name: {{ $port.name }}
+            containerPort: {{ $port.port }}
+{{- if $port.protocol }}
+            protocol: {{ $port.protocol }}
+{{- end }}
+{{- end }}
+      {{- if .Values.awsSigningSidecar.enabled }}
+      - name: {{ include "fluentd-elasticsearch.fullname" . }}-aws-es-proxy
+        image: {{ .Values.awsSigningSidecar.image.repository }}:{{ .Values.awsSigningSidecar.image.tag }}
+        imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+        args: ["-endpoint", "{{ .Values.elasticsearch.scheme }}://{{ index .Values.elasticsearch.hosts 0 }}",
+               "-listen",   "{{ .Values.awsSigningSidecar.network.address }}:{{ .Values.awsSigningSidecar.network.port }}",
+               "-timeout",  "{{ .Values.awsSigningSidecar.network.remoteReadTimeoutSeconds }}"]
+        env:
+        - name: PORT_NUM
+          value: {{ .Values.awsSigningSidecar.network.port | quote }}
+        resources:
+{{ toYaml .Values.awsSigningSidecar.resources | indent 10 }}
+        volumeMounts:
+      {{- end }}
+      terminationGracePeriodSeconds: 30
+      volumes:
+      - name: varlog
+        hostPath:
+          path: {{ .Values.hostLogDir.varLog }}
+      - name: varlibdockercontainers
+        hostPath:
+          path: {{ .Values.hostLogDir.dockerContainers }}
+      # It is needed to copy systemd library to decompress journals
+      - name: libsystemddir
+        hostPath:
+          path: {{ .Values.hostLogDir.libSystemdDir }}
+      - name: config-volume
+        configMap:
+          name: {{ include "fluentd-elasticsearch.fullname" . }}
+{{- if .Values.extraVolumes }}
+{{ toYaml .Values.extraVolumes | indent 6 }}
+{{- end }}
+{{- if .Values.affinity }}
+      affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.nodeSelector }}
+      nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+{{- end }}
+{{- if .Values.tolerations }}
+      tolerations:
+{{ toYaml .Values.tolerations | indent 6 }}
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/metrics-service.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/metrics-service.yaml
new file mode 100644
index 000000000..13233b17c
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/metrics-service.yaml
@@ -0,0 +1,22 @@
+{{- if or (.Values.serviceMonitor.enabled) (.Values.serviceMetric.enabled) }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "fluentd-elasticsearch.fullname" $ }}-metrics
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  type: {{ .Values.serviceMonitor.type }}
+  ports:
+    - name: metrics
+      port: {{ .Values.serviceMonitor.port }}
+      targetPort: {{ .Values.serviceMonitor.port }}
+  selector:
+    app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/pod-security-policy.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/pod-security-policy.yaml
new file mode 100644
index 000000000..36d17fae4
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/pod-security-policy.yaml
@@ -0,0 +1,55 @@
+{{- if .Values.podSecurityPolicy.enabled }}
+{{- if semverCompare "> 1.15" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: policy/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: PodSecurityPolicy
+metadata:
+  name: {{ template "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+  annotations:
+{{- if .Values.podSecurityPolicy.annotations }}
+{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }}
+{{- end }}
+spec:
+  privileged: false
+  allowPrivilegeEscalation: true
+  volumes:
+    - 'configMap'
+    - 'emptyDir'
+    - 'hostPath'
+    - 'secret'
+  allowedHostPaths:
+    - pathPrefix: {{ .Values.hostLogDir.varLog}}
+      readOnly: false
+    - pathPrefix: {{ .Values.hostLogDir.dockerContainers}}
+      readOnly: true
+    - pathPrefix: {{ .Values.hostLogDir.libSystemdDir}}
+      readOnly: true
+  hostNetwork: false
+  hostPID: false
+  hostIPC: false
+  runAsUser:
+    rule: 'RunAsAny'
+  runAsGroup:
+    rule: 'RunAsAny'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'RunAsAny'
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+      - min: 1
+        max: 65535
+  readOnlyRootFilesystem: false
+  hostPorts:
+    - min: 1
+      max: 65535
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/prometheusrule.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/prometheusrule.yaml
new file mode 100644
index 000000000..f35b6ab8f
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/prometheusrule.yaml
@@ -0,0 +1,70 @@
+{{- if .Values.prometheusRule.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+  name: {{ template "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+    {{- if .Values.prometheusRule.labels }}
+    {{- toYaml .Values.prometheusRule.labels | nindent 4 }}
+    {{- end }}
+  namespace: {{ .Values.prometheusRule.prometheusNamespace }}
+spec:
+  groups:
+  - name: fluentd
+    rules:
+    - alert: FluentdNodeDown
+      expr: up{job="{{ include "fluentd-elasticsearch.fullname" . }}"} == 0
+      for: 10m
+      labels:
+        service: fluentd
+        severity: warning
+      annotations:
+        summary: fluentd cannot be scraped
+        description: Prometheus could not scrape {{ "{{ $labels.job }}" }} for more than 10 minutes
+  
+    - alert: FluentdNodeDown
+      expr: up{job="{{ include "fluentd-elasticsearch.fullname" . }}"} == 0
+      for: 30m
+      labels:
+        service: fluentd
+        severity: critical
+      annotations:
+        summary: fluentd cannot be scraped
+        description: Prometheus could not scrape {{ "{{ $labels.job }}" }} for more than 30 minutes
+  
+    - alert: FluentdQueueLength
+      expr: rate(fluentd_status_buffer_queue_length[5m]) > 0.3
+      for: 1m
+      labels:
+        service: fluentd
+        severity: warning
+      annotations:
+        summary: fluentd node are failing
+        description: In the last 5 minutes, fluentd queues increased 30%. Current value is {{ "{{ $value }}" }}
+
+    - alert: FluentdQueueLength
+      expr: rate(fluentd_status_buffer_queue_length[5m]) > 0.5
+      for: 1m
+      labels:
+        service: fluentd
+        severity: critical
+      annotations:
+        summary: fluentd node are critical
+        description: In the last 5 minutes, fluentd queues increased 50%. Current value is {{ "{{ $value }}" }}
+
+    - alert: FluentdRecordsCountsHigh
+      expr: sum(rate(fluentd_output_status_emit_records{job="{{ .Release.Name }}"}[5m])) BY (instance) >  (3 * sum(rate(fluentd_output_status_emit_records{job="{{ .Release.Name }}"}[15m])) BY (instance))
+      for: 1m
+      labels:
+        service: fluentd
+        severity: critical
+      annotations:
+        summary: fluentd records count are critical
+        description: In the last 5m, records counts increased 3 times, comparing to the latest 15 min.
+
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/role.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/role.yaml
new file mode 100644
index 000000000..4f0aab09a
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/role.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.podSecurityPolicy.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ template "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+{{- if semverCompare "> 1.15" .Capabilities.KubeVersion.GitVersion }}
+- apiGroups: ['policy']
+{{- else }}
+- apiGroups: ['extensions']
+{{- end }}
+  resources: ['podsecuritypolicies']
+  verbs:     ['use']
+  resourceNames:
+  - {{ template "fluentd-elasticsearch.fullname" . }}
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/rolebinding.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/rolebinding.yaml
new file mode 100644
index 000000000..af79f669e
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/rolebinding.yaml
@@ -0,0 +1,21 @@
+
+{{- if .Values.podSecurityPolicy.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ template "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+  kind: Role
+  name: {{ template "fluentd-elasticsearch.fullname" . }}
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: {{ template "fluentd-elasticsearch.fullname" . }}
+  namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/service-account.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/service-account.yaml
new file mode 100644
index 000000000..615d14720
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/service-account.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ if .Values.serviceAccount.name }}{{ .Values.serviceAccount.name }}{{ else }}{{ include "fluentd-elasticsearch.fullname" . }}{{ end }}
+  labels:
+    app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" . }}
+    helm.sh/chart: {{ include "fluentd-elasticsearch.chart" . }}
+    app.kubernetes.io/instance: {{ .Release.Name }}
+    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+  annotations:
+{{- if .Values.serviceAccount.annotations }}
+{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
+{{- end }}
+{{- end -}}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/service.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/service.yaml
new file mode 100644
index 000000000..9bf2bae58
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/service.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.service }}
+{{- range $port := .Values.service.ports  }}
+{{- $service_type := $port.type | default "ClusterIP" }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "fluentd-elasticsearch.fullname" $ | trunc 50 }}-{{ $port.name | trunc 12 }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" $ | indent 4 }}
+    {{- if semverCompare "> 1.6" $.Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  type: {{ $service_type }}
+  ports:
+    - name: {{ $port.name }}
+      port: {{ $port.port }}
+      targetPort: {{ $port.port }}
+      {{- if and ($port.nodePort) (eq $service_type "NodePort") }}
+      nodePort: {{ $port.nodePort }}
+      {{- end }}
+      {{- if $port.protocol }}
+      protocol: {{ $port.protocol }}
+      {{- end }}
+  selector:
+    app.kubernetes.io/name: {{ include "fluentd-elasticsearch.name" $ }}
+    app.kubernetes.io/instance: {{ $.Release.Name }}
+{{- end }}
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/servicemonitor.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/servicemonitor.yaml
new file mode 100644
index 000000000..66f6d34ae
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/templates/servicemonitor.yaml
@@ -0,0 +1,37 @@
+{{- if .Values.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ template "fluentd-elasticsearch.fullname" . }}
+  labels:
+{{ include "fluentd-elasticsearch.labels" . | indent 4 }}
+    {{- if semverCompare "> 1.6" .Capabilities.KubeVersion.GitVersion }}
+    kubernetes.io/cluster-service: "true"
+    {{- end }}
+    addonmanager.kubernetes.io/mode: Reconcile
+    {{- if .Values.serviceMonitor.labels }}
+    {{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
+    {{- end }}
+spec:
+  endpoints:
+  - interval: {{ .Values.serviceMonitor.interval }}
+    honorLabels: true
+    port: metrics
+    path: {{ .Values.serviceMonitor.path }}
+    {{- if .Values.serviceMonitor.metricRelabelings }}
+    metricRelabelings:
+    {{- toYaml .Values.serviceMonitor.metricRelabelings | nindent 4 }}
+    {{- end }}
+    {{- if .Values.serviceMonitor.relabelings }}
+    relabelings:
+    {{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }}
+    {{- end }}
+  jobLabel: {{ .Values.serviceMonitor.jobLabel }}
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: {{ template "fluentd-elasticsearch.name" . }}
+      app.kubernetes.io/instance: "{{ .Release.Name }}"
+  namespaceSelector:
+    matchNames:
+      - {{ .Release.Namespace }}
+{{- end }}
diff --git a/kubernetes/helm_charts/logging/fluentd-elasticsearch/values.yaml b/kubernetes/helm_charts/logging/fluentd-elasticsearch/values.yaml
new file mode 100644
index 000000000..023baac88
--- /dev/null
+++ b/kubernetes/helm_charts/logging/fluentd-elasticsearch/values.yaml
@@ -0,0 +1,257 @@
+image:
+  repository: quay.io/fluentd_elasticsearch/fluentd
+## Specify an imagePullPolicy (Required)
+## It's recommended to change this to 'Always' if the image tag is 'latest'
+## ref: http://kubernetes.io/docs/user-guide/images/#updating-images
+  tag: v3.0.1
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistrKeySecretName
+
+## If using AWS Elasticsearch, all requests to ES need to be signed regardless of whether
+## one is using Cognito or not. By setting this to true, this chart will install a sidecar
+## proxy that takes care of signing all requests being sent to the AWS ES Domain.
+awsSigningSidecar:
+  enabled: false
+  resources: {}
+  # limits:
+  #   cpu: 100m
+  #   memory: 500Mi
+  # requests:
+  #   cpu: 100m
+  #   memory: 200Mi
+  network:
+    port: 8080
+    address: localhost
+    remoteReadTimeoutSeconds: 15
+  image:
+    repository: abutaha/aws-es-proxy
+    tag: v1.0
+
+# Specify to use specific priorityClass for pods
+# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+# If a Pod cannot be scheduled, the scheduler tries to preempt (evict) lower priority
+# Pods to make scheduling of the pending Pod possible.
+priorityClassName: ""
+
+# Specify where fluentd can find logs
+hostLogDir:
+  varLog: /var/log
+  dockerContainers: /var/lib/docker/containers
+  libSystemdDir: /usr/lib64
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources: {}
+  # limits:
+  #   cpu: 100m
+  #   memory: 500Mi
+  # requests:
+  #   cpu: 100m
+  #   memory: 200Mi
+
+elasticsearch:
+  auth:
+    enabled: false
+    user: "yourUser"
+    password: "yourPass"
+  includeTagKey: true
+  hosts: {} 
+  logstash:
+    enabled: true
+    prefix: "logstash"
+  path: ""
+  scheme: "http"
+  sslVerify: true
+  sslVersion: "TLSv1_2"
+  outputType: "elasticsearch"
+  typeName: "_doc"
+  logLevel: "info"
+  reconnectOnError: true
+  reloadOnFailure: false
+  reloadConnections: false
+  buffer:
+    enabled: true
+    type: "file"
+    path: "/var/log/fluentd-buffers/kubernetes.system.buffer"
+    flushMode: "interval"
+    retryType: "exponential_backoff"
+    flushThreadCount: 2
+    flushInterval: "5s"
+    retryForever: true
+    retryMaxInterval: 30
+    chunkLimitSize: "2M"
+    queueLimitLength: 8
+    overflowAction: "block"
+
+# If you want to change args of fluentd process
+# by example you can add -vv to launch with trace log
+fluentdArgs: "--no-supervisor -q"
+
+# If you want to add custom environment variables, use the env dict
+# You can then reference these in your config file e.g.:
+#     user "#{ENV['OUTPUT_USER']}"
+env: {}
+  # OUTPUT_USER: my_user
+  # LIVENESS_THRESHOLD_SECONDS: 300
+  # STUCK_THRESHOLD_SECONDS: 900
+
+# If you want to add custom environment variables from secrets, use the secret list
+secret: []
+# - name: ELASTICSEARCH_PASSWORD
+#   secret_name: elasticsearch
+#   secret_key: password
+
+rbac:
+  create: true
+
+serviceAccount:
+  # Specifies whether a ServiceAccount should be created
+  create: true
+  # The name of the ServiceAccount to use.
+  # If not set and create is true, a name is generated using the fullname template
+  name: ""
+  annotations: {}
+
+## Specify if a Pod Security Policy for node-exporter must be created
+## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+##
+podSecurityPolicy:
+  enabled: false
+  annotations: {}
+    ## Specify pod annotations
+    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+    ##
+    # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+    # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+    # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 600
+  periodSeconds: 60
+  kind:
+    exec:
+      command:
+      # Liveness probe is aimed to help in situations where fluentd
+      # silently hangs for no apparent reasons until manual restart.
+      # The idea of this probe is that if fluentd is not queueing or
+      # flushing chunks for 5 minutes, something is not right. If
+      # you want to change the fluentd configuration, reducing amount of
+      # logs fluentd collects, consider changing the threshold or turning
+      # liveness probe off completely.
+      - '/bin/sh'
+      - '-c'
+      - >
+        LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
+        STUCK_THRESHOLD_SECONDS=${STUCK_THRESHOLD_SECONDS:-900};
+        if [ ! -e /var/log/fluentd-buffers ];
+        then
+          exit 1;
+        fi;
+        touch -d "${STUCK_THRESHOLD_SECONDS} seconds ago" /tmp/marker-stuck;
+        if [ -z "$(find /var/log/fluentd-buffers -type d -newer /tmp/marker-stuck -print -quit)" ];
+        then
+          rm -rf /var/log/fluentd-buffers;
+          exit 1;
+        fi;
+        touch -d "${LIVENESS_THRESHOLD_SECONDS} seconds ago" /tmp/marker-liveness;
+        if [ -z "$(find /var/log/fluentd-buffers -type d -newer /tmp/marker-liveness -print -quit)" ];
+        then
+          exit 1;
+        fi;
+
+annotations: {}
+
+podAnnotations: {}
+  # prometheus.io/scrape: "true"
+  # prometheus.io/port: "24231"
+
+## DaemonSet update strategy
+## Ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
+updateStrategy:
+  type: RollingUpdate
+
+tolerations: []
+  # - key: node-role.kubernetes.io/master
+  #   operator: Exists
+  #   effect: NoSchedule
+
+affinity: {}
+  # nodeAffinity:
+  #   requiredDuringSchedulingIgnoredDuringExecution:
+  #     nodeSelectorTerms:
+  #     - matchExpressions:
+  #       - key: node-role.kubernetes.io/master
+  #         operator: DoesNotExist
+
+nodeSelector: {}
+
+service: {}
+  # ports:
+  #   - name: "monitor-agent"
+  #     type: ClusterIP
+  #     port: 24231
+
+serviceMonitor:
+  ## If true, a ServiceMonitor CRD is created for a prometheus operator
+  ## https://github.com/coreos/prometheus-operator
+  ##
+  enabled: false
+  interval: 10s
+  path: /metrics
+  port: 24231
+  labels: {}
+  metricRelabelings: []
+  relabelings: []
+  jobLabel: "app.kubernetes.io/instance"
+  type: ClusterIP
+
+serviceMetric:
+  ## If true, the metrics service will be created
+  ## Alternative to implicit creation through serviceMonitor.enabled
+  ##
+  enabled: false
+
+prometheusRule:
+  ## If true, a PrometheusRule CRD is created for a prometheus operator
+  ## https://github.com/coreos/prometheus-operator
+  ##
+  enabled: false
+  prometheusNamespace: monitoring
+  labels: {}
+  #  role: alert-rules
+
+configMaps:
+  useDefaults:
+    systemConf: true
+    containersInputConf: true
+    systemInputConf: true
+    forwardInputConf: true
+    monitoringConf: true
+    outputConf: true
+
+# can be used to add new config or overwrite the default configmaps completely after the configmaps default has been disabled above
+extraConfigMaps: {}
+  # system.conf: |-
+  #   <system>
+  #     root_dir /tmp/fluentd-buffers/
+  #   </system>
+
+extraVolumes: []
+#   - name: es-certs
+#     secret:
+#       defaultMode: 420
+#       secretName: es-certs
+
+extraVolumeMounts: []
+#   - name: es-certs
+#     mountPath: /certs
+#     readOnly: true
diff --git a/kubernetes/helm_charts/logging/kibana/templates/deployment.yaml b/kubernetes/helm_charts/logging/kibana/templates/deployment.yaml
index b7a97758e..eec97658b 100644
--- a/kubernetes/helm_charts/logging/kibana/templates/deployment.yaml
+++ b/kubernetes/helm_charts/logging/kibana/templates/deployment.yaml
@@ -87,7 +87,6 @@ spec:
             value: "{{ .Values.elasticsearchHosts }}"
           {{- end }}
           - name: SERVER_HOST
-            value: "{{ .Values.serverHost }}"
 {{- if .Values.extraEnvs }}
 {{ toYaml .Values.extraEnvs | indent 10 }}
 {{- end }}
@@ -140,4 +139,4 @@ spec:
           {{- end -}}
       {{- if .Values.extraContainers }}
 {{ tpl .Values.extraContainers . | indent 6 }}
-      {{- end }}
\ No newline at end of file
+      {{- end }}
diff --git a/kubernetes/helm_charts/logging/kibana/values.yaml b/kubernetes/helm_charts/logging/kibana/values.yaml
index 468a98533..38261b7b1 100755
--- a/kubernetes/helm_charts/logging/kibana/values.yaml
+++ b/kubernetes/helm_charts/logging/kibana/values.yaml
@@ -1,7 +1,7 @@
 ---
 
 elasticsearchURL: "" # "http://elasticsearch-master:9200"
-elasticsearchHosts: "http://elasticsearch-master:9200"
+elasticsearchHosts: "" 
 
 replicas: 1
 
-- 
GitLab