From 023737711885f0ff7a0ba20e39dfb803c8284a77 Mon Sep 17 00:00:00 2001
From: Rani Mounika Kotakadi
 <43810286+RaniMounikaKotakadi@users.noreply.github.com>
Date: Fri, 28 Jun 2019 14:43:38 +0530
Subject: [PATCH] alert categories to loadtest (#575)

* alert categories to sunbird

* alert categories to sunbirf

* Added alert Categorisation variables
---
 .../stack-monitor-stateful/defaults/main.yml  |  33 ++++++
 .../templates/alertrules.backups.yml          |   8 +-
 .../templates/alertrules.docker.yml           |   4 +-
 .../templates/alertrules.es.yml               |  34 +++++-
 .../templates/alertrules.logs.yml             |   4 +-
 .../templates/alertrules.nodes.yml            | 106 +++++++++++++++---
 .../templates/alertrules.postgresql.yml       |  34 ++++--
 .../templates/alertrules.process.yml          |  44 +++++++-
 .../templates/alertrules.services.yml         |   6 +-
 .../templates/alertrules.task.yml             |  53 +++++++--
 ansible/roles/stack-monitor/defaults/main.yml |  33 ++++++
 .../templates/alertrules.docker.yml           |   4 +-
 .../templates/alertrules.kong.yml             |   4 +-
 .../templates/alertrules.nodes.yml            |  93 +++++++++++++--
 .../templates/alertrules.task.yml             |  52 +++++++--
 15 files changed, 435 insertions(+), 77 deletions(-)

diff --git a/ansible/roles/stack-monitor-stateful/defaults/main.yml b/ansible/roles/stack-monitor-stateful/defaults/main.yml
index 508a0c9e5..b965b2668 100644
--- a/ansible/roles/stack-monitor-stateful/defaults/main.yml
+++ b/ansible/roles/stack-monitor-stateful/defaults/main.yml
@@ -164,3 +164,36 @@ root_owner: root
 backup_storage_name: prometheus_backup
 prometheus_stateful_mount_point: "/root/dockerdata/prometheus_stateful/data/"
 docker_service_replicas_memory_limit: 512MB
+
+#################################################### Monitoring limits ################################################
+container_cpu_usage_percentage_theshold_Warning: 70
+container_cpu_usage_percentage_theshold_Critial: 85
+container_cpu_usage_percentage_theshold_Fatal: 95
+
+container_memory_usage_percentage_theshold_Warning: 70
+container_memory_usage_percentage_theshold_Critical: 85
+container_memory_usage_percentage_theshold_Fatal: 95
+
+node_cpu_usage_percentage_theshold_Warning: 70
+node_cpu_usage_percentage_theshold_Critial: 85
+node_cpu_usage_percentage_theshold_Fatal: 95
+
+node_memory_usage_percentage_theshold_Warning: 70
+node_memory_usage_percentage_theshold_Critical: 85
+node_memory_usage_percentage_theshold_Fatal: 95
+
+node_load_avg_theshold_Warning: 85
+node_load_avg_theshold_Critial: 95
+node_load_avg__theshold_Fatal: 120
+
+node_disk_usage_percentage_theshold_Warning: 70
+node_disk_usage_percentage_theshold_Critial: 85
+node_disk_usage_percentage_theshold_Fatal: 95
+
+postgres_number_of_connections_Warning: 100
+postgres_number_of_connections_Critical: 110
+postgres_number_of_connections_Fatal: 130
+
+elasticsearch_filesystem_data_remaining_theshold_Warning: 30
+elasticsearch_filesystem_data_remaining_theshold_Critical: 20
+elasticsearch_filesystem_data_remaining_theshold_Fatal: 10
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.backups.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.backups.yml
index 3f302fb6c..ee3260de2 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.backups.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.backups.yml
@@ -1,15 +1,19 @@
 groups:
 - name: alertrules.backups
   rules:
-  - alert: backup_is_too_old
+  - alert: backup_is_too_old_CRITICAL
     expr: time() - azure_blob_latest_file_timestamp{job="data-backup-azure-blob-exporter"} / 1000 > {{ expected_data_backup_interval_in_minutes|int * 60 }}
     for: 5m
+    labels:
+      severity: CRITICAL
     annotations:
       description: '{% raw %}{{ $labels.container }}{% endraw %}: Latest backup file was created {% raw %}{{ humanizeDuration $value }}{% endraw %} ago. Threshold: {{ expected_data_backup_interval_in_minutes }} minutes'
       summary: Backup is too old
-  - alert: backup_size_is_too_small
+  - alert: backup_size_is_too_small_CRITICAL
     expr: azure_blob_latest_file_size{job="data-backup-azure-blob-exporter"} < {{ expected_data_backup_size_in_bytes }}
     for: 5m
+    labels:
+      severity: CRITICAL
     annotations:
       description: '{% raw %}{{ $labels.container }}{% endraw %}: Latest backup file is {% raw %}{{ $value }}{% endraw %} bytes, smaller than the threshold {{ expected_data_backup_size_in_bytes }} bytes'
       summary: Backup size is too small
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.docker.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.docker.yml
index f3ff6b89f..e222e7c07 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.docker.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.docker.yml
@@ -1,11 +1,11 @@
 groups:
 - name: alertrules.docker
   rules:
-  - alert: docker_swarm_node_down
+  - alert: docker_swarm_node_down_FATAL
     expr: swarm_manager_nodes{state="down"} > 0
     for: 1m
     labels:
-      severity: critical
+      severity: FATAL
     annotations:
       description: 'Number nodes down : {% raw %}{{$value}}{% endraw %}'
       summary: 'Docker swarm node down'
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.es.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.es.yml
index 05ced8a09..6da75bf0e 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.es.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.es.yml
@@ -3,13 +3,35 @@ groups:
   rules:
   - record: elasticsearch_filesystem_data_used_percent
     expr: 100 * (elasticsearch_filesystem_data_size_bytes - elasticsearch_filesystem_data_free_bytes) / elasticsearch_filesystem_data_size_bytes
-  - record: elasticsearch_filesystem_data_free_percent
-    expr: 100 - elasticsearch_filesystem_data_used_percent
+  - alert: elasticsearch_filesystem_data_free_percent_WARNING
+    expr: 100 - elasticsearch_filesystem_data_used_percent >= {{ elasticsearch_filesystem_data_remaining_theshold_Warning }}
+    for: 1m
+    labels:
+      severity: WARNING
+    annotations:
+      description: Elasticsearch Free space on Disk {% raw %}{{$value}}{% endraw %}
+      summary: Elasticsearch has less free disk space {% raw %}{{$value}}{% endraw %}
+  - alert: elasticsearch_filesystem_data_free_percent_CRITICAL
+    expr: 100 - elasticsearch_filesystem_data_used_percent >= {{ elasticsearch_filesystem_data_remaining_theshold_Critical }}
+    for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: Elasticsearch Free space on Disk {% raw %}{{$value}}{% endraw %}
+      summary: Elasticsearch has less free disk space {% raw %}{{$value}}{% endraw %}
+  - alert: elasticsearch_filesystem_data_free_percent_FATAL
+    expr: 100 - elasticsearch_filesystem_data_used_percent >= {{ elasticsearch_filesystem_data_remaining_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
+    annotations:
+      description: Elasticsearch Free space on Disk {% raw %}{{$value}}{% endraw %}
+      summary: Elasticsearch has less free disk space {% raw %}{{$value}}{% endraw %}
   - alert: elasticsearch_too_few_nodes_running
     expr: elasticsearch_cluster_health_number_of_nodes{job="elasticsearch-exporter"} < {{ groups['es'] | length }}
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: There are only {% raw %}{{$value}}{% endraw %} < {{ groups['es'] | length }} ElasticSearch nodes running
       summary: ElasticSearch running on less than {{ groups['es'] | length }} nodes
@@ -18,7 +40,7 @@ groups:
     expr: elasticsearch_cluster_health_number_of_nodes{job="log-elasticsearch-exporter"} < {{ groups['log-es'] | length }}
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: There are only {% raw %}{{$value}}{% endraw %} < {{ groups['log-es'] | length }} ElasticSearch nodes running
       summary: ElasticSearch running on less than {{ groups['log-es'] | length }} nodes
@@ -27,7 +49,7 @@ groups:
     expr: elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"} > 0.9
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: The heap usage is over 90% for 15m
       summary: ElasticSearch node {% raw %}{{$labels.node}}{% endraw %} heap usage is high
@@ -35,7 +57,7 @@ groups:
     expr: time() - elasticsearch_snapshots_latest_successful_snapshot_timestamp{job="elasticsearch-snapshots-exporter"} / 1000 > {{ expected_elasticsearch_snapshot_interval_in_minutes|int * 60 }}
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: Elasticsearch snapshot is too old
       summary: Latest elasticSearch snapshot was taken {% raw %}{{ humanizeDuration $value }}{% endraw %} ago. Threshold is {{ expected_elasticsearch_snapshot_interval_in_minutes }} minutes
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.logs.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.logs.yml
index c577015a5..367c881ee 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.logs.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.logs.yml
@@ -1,11 +1,11 @@
 groups:
 - name: alertrules.logs
   rules:
-  - alert: logs_ingestion_slow
+  - alert: logs_ingestion_slow_CRITICAL
     expr: increase(elasticsearch_indices_docs{job="log-elasticsearch-exporter"}[5m]) / 5 < {{ expected_minimum_logs_per_minute }}
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: '{% raw %}{{ $labels.job }}{% endraw %}: Logs per minute is {% raw %}{{ $value }}{% endraw %}. It is below the threshold: {{ expected_minimum_logs_per_minute }}'
       summary: Logs are not flowing as expected
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.nodes.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.nodes.yml
index bc6db6774..40179e798 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.nodes.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.nodes.yml
@@ -1,35 +1,107 @@
 groups:
 - name: alertrules.nodes
   rules:
-  - alert: high_cpu_usage_on_node
-    expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) > 90
+  - alert: high_cpu_usage_on_node_WARNING
+    expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) >= {{ node_cpu_usage_percentage_theshold_Warning }} and (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) < {{ node_cpu_usage_percentage_theshold_Critial }}
     for: 1m
+    labels:
+      severity: WARNING
     annotations:
       description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of CPU. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.'
-      summary: HIGH CPU USAGE WARNING ON '{% raw %}{{ $labels.nodename }}{% endraw %}'
-  - alert: high_memory_usage_on_node
-    expr: sum by(nodename) (((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) > 95
+      summary: 'HIGH CPU USAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_cpu_usage_on_node_CRITICAL
+    expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) >= {{ node_cpu_usage_percentage_theshold_Critial }} and (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) < {{ node_cpu_usage_percentage_theshold_Fatal }}
     for: 1m
+    labels:
+      severity: CRITICAL
     annotations:
-      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of MEMORY. MEMORY usage is over {% raw %}{{ humanize $value}}{% endraw %}%.'
-      summary: HIGH MEMORY USAGE WARNING TASK ON '{% raw %}{{ $labels.nodename }}{% endraw %}'
-  - alert: high_load_on_node
-    expr: sum by(nodename) ((node_load1 / count without(cpu, mode) (node_cpu{mode="system"}))
-      * on(instance) group_left(nodename) node_uname_info * 100) > 200
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of CPU. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.'
+      summary: 'HIGH CPU USAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_cpu_usage_on_node_FATAL
+    expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) >= {{ node_cpu_usage_percentage_theshold_Fatal }}
     for: 1m
+    labels:
+      severity: FATAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of CPU. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.'
+      summary: 'HIGH CPU USAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_memory_usage_on_node_WARNING
+    expr: sum by(nodename) ((((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_memory_usage_percentage_theshold_Warning }} and (((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) < {{ node_memory_usage_percentage_theshold_Critical }} )
+    for: 1m
+    labels:
+      severity: WARNING
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of MEMORY. MEMORY usage is over {% raw %}{{ humanize $value}}{% endraw %}.'
+      summary: 'HIGH MEMORY USAGE WARNING TASK ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_memory_usage_on_node_CRITICAL
+    expr: sum by(nodename) ((((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_memory_usage_percentage_theshold_Critical }} and (((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) < {{ node_memory_usage_percentage_theshold_Fatal }} )
+    for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of MEMORY. MEMORY usage is over {% raw %}{{ humanize $value}}{% endraw %}.'
+      summary: 'HIGH MEMORY USAGE WARNING TASK ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_memory_usage_on_node_FATAL
+    expr: sum by(nodename) (((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_memory_usage_percentage_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of MEMORY. MEMORY usage is over {% raw %}{{ humanize $value}}{% endraw %}.'
+      summary: 'HIGH MEMORY USAGE WARNING TASK ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_load_on_node_WARNING
+    expr: sum by(nodename) (((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) >= 85 and ((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) < 95 )
+    for: 5m
+    labels:
+      severity: WARNING
     annotations:
       description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) has a high load average. Load average is {% raw %}{{ $value }}{% endraw %}%.'
-      summary: HIGH LOAD AVERAGE WARNING ON '{% raw %}{{ $labels.nodename }}{% endraw %}'
-  - alert: node_exporter_down
+      summary: 'HIGH LOAD AVERAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_load_on_node_CRITICAL
+    expr: sum by(nodename) (((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_load_avg_theshold_Warning }} and ((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) < {{ node_load_avg_theshold_Critial }} )
+    for: 5m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) has a high load average. Load average is {% raw %}{{ $value }}{% endraw %}%.'
+      summary: 'HIGH LOAD AVERAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_load_on_node_FATAL
+    expr: sum by(nodename) ((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) > {{ node_load_avg_theshold_Fatal }}
+    for: 5m
+    labels:
+      severity: FATAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) has a high load average. Load average is {% raw %}{{ $value }}{% endraw %}%.'
+      summary: 'HIGH LOAD AVERAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: node_exporter_down_CRITICAL
     expr: up == 0
     for: 1m
+    labels:
+      severity: CRITICAL
     annotations:
       description: The node exporter '{% raw %}{{ $labels.job }}{% endraw %}' is down.
       summary: 'NODE EXPORTER SERVICE CRITICAL: NODE ''{% raw %}{{ $labels.host }}{% endraw %}'''
-  - alert: node_running_out_of_disk_space
-    expr: sum by(nodename) ((node_filesystem_size{mountpoint="/"} - node_filesystem_free{mountpoint="/"})
-      * 100 / node_filesystem_size{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) > 80
+  - alert: node_running_out_of_disk_space_WARNING
+    expr: sum by(nodename) (((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) >= {{ node_disk_usage_percentage_theshold_Warning }} and ((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) < {{ node_disk_usage_percentage_theshold_Critial }} )
+    for: 1m
+    labels:
+      severity: WARNING
+    annotations:
+      description: 'More than 80% of disk used. Disk usage is {% raw %}{{ humanize $value }}{% endraw %}%'
+      summary: 'LOW DISK SPACE WARING: NODE {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: node_running_out_of_disk_space_WARNING
+    expr: sum by(nodename) (((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) >= {{ node_disk_usage_percentage_theshold_Critial }} and ((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) < {{ node_disk_usage_percentage_theshold_Fatal }} )
+    for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: 'More than 80% of disk used. Disk usage is {% raw %}{{ humanize $value }}{% endraw %}%'
+      summary: 'LOW DISK SPACE WARING: NODE {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: node_running_out_of_disk_space_FATAL
+    expr: sum by(nodename) ((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) >= {{ node_disk_usage_percentage_theshold_Fatal }}
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
-      description: More than 80% of disk used. Disk usage is {% raw %}{{ humanize $value }}{% endraw %}%
-      summary: 'LOW DISK SPACE WARING: NODE ''{% raw %}{{ $labels.nodename }}{% endraw %}'' '
+      description: 'More than 80% of disk used. Disk usage is {% raw %}{{ humanize $value }}{% endraw %}%'
+      summary: 'LOW DISK SPACE WARING: NODE {% raw %}{{ $labels.nodename }}{% endraw %}'
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.postgresql.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.postgresql.yml
index 1a3250238..271454551 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.postgresql.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.postgresql.yml
@@ -6,16 +6,32 @@ groups:
     expr: pg_exporter_last_scrape_error == 1
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: 'PostgreSQL unavailable as per job: {% raw %}{{$labels.job}}{% endraw %}'
       summary: PostgreSQL unavailable
 {% endif %}
-  - alert: postgres_high_number_of_connections
-    expr: sum(pg_stat_database_numbackends) > 90
+  - alert: postgres_high_number_of_connections_WARNING
+    expr: sum(pg_stat_database_numbackends) > {{ postgres_number_of_connections_Warning }} 
     for: 1m
     labels:
-      severity: critical
+      severity: WARNING
+    annotations:
+      description: 'Number of connections is above the high water mark: {% raw %}{{$value}}{% endraw %}'
+      summary: PostgreSQL high number of connections
+  - alert: postgres_high_number_of_connections_CRITICAL
+    expr: sum(pg_stat_database_numbackends) > {{ postgres_number_of_connections_Critical }}
+    for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: 'Number of connections is above the high water mark: {% raw %}{{$value}}{% endraw %}'
+      summary: PostgreSQL high number of connections
+  - alert: postgres_high_number_of_connections_FATAL
+    expr: sum(pg_stat_database_numbackends) > {{ postgres_number_of_connections_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of connections is above the high water mark: {% raw %}{{$value}}{% endraw %}'
       summary: PostgreSQL high number of connections
@@ -23,7 +39,7 @@ groups:
     expr: pg_server_standby_status_in_recovery{job="master-postgres-exporter"} == 1
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: 'PostgreSQL master is in recovery. pg_server_standby_status_in_recovery: {% raw %}{{$value}}{% endraw %}'
       summary: PostgreSQL master is in recovery
@@ -31,15 +47,15 @@ groups:
     expr: pg_server_standby_status_in_recovery{job="slave-postgres-exporter"} == 0
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: 'PostgreSQL slave is not in recovery. pg_server_standby_status_in_recovery: {% raw %}{{$value}}{% endraw %}'
       summary: PostgreSQL slave is not in recovery
-  - alert: postgres_high_peplication_byte_lag
+  - alert: postgres_high_replication_byte_lag
     expr: pg_stat_replication_byte_lag > 1e+06
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: 'The replication byte lag for salve: {% raw %}{{$labels.slave_addr}}{% endraw %} is above the high water mark: {% raw %}{{$value}}{% endraw %}'
       summary: PostgreSQL replication byte lag is high
@@ -47,7 +63,7 @@ groups:
     expr: pg_replication_lag > 60
     for: 1m
     labels:
-      severity: critical
+      severity: CRITICAL
     annotations:
       description: 'The replication lag between the master and slave is above the
         high water mark: {% raw %}{{$value}}{% endraw %}'
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.process.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.process.yml
index e1127d06c..98a73686b 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.process.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.process.yml
@@ -4,120 +4,152 @@ groups:
   - alert: tomcat_process_not_running
     expr: namedprocess_namegroup_states{groupname="tomcat",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: tomcat process is not running
   - alert: search_process_not_running
     expr: namedprocess_namegroup_states{groupname="search",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: search process is not running   
   - alert: neo4j_process_not_running
     expr: namedprocess_namegroup_states{groupname="neo4j",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: neo4j process is not running
   - alert: kafka_process_not_running
     expr: namedprocess_namegroup_states{groupname="kafka",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Kafka process is not running
   - alert: kafka_more_than_one_process_running
     expr: namedprocess_namegroup_num_procs{groupname="kafka"} > 1
     for: 1m
+    labels:
+      severity: CRITICAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: More than one process running
-  - alert: search_process_not_running
-    expr: namedprocess_namegroup_states{groupname="search",state="Sleeping"} < 1
-    for: 1m
-    annotations:
-      description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
-      summary: search process is not running
   - alert: secor_process_not_running
     expr: namedprocess_namegroup_states{groupname="secor",state="Sleeping"} != 9
     for: 1m
+    labels:
+      severity: CRITICAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Secor process is not running
   - alert: zookeeper_process_not_running
     expr: namedprocess_namegroup_states{groupname="zookeeper",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Zookeeper process is not running
   - alert: yarn_process_not_running
     expr: namedprocess_namegroup_states{groupname="yarn",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: YARN process is not running
   - alert: cassandra_process_not_running
     expr: namedprocess_namegroup_states{groupname="cassandra",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Cassandra process is not running
   - alert: elasticsearch_process_not_running
     expr: namedprocess_namegroup_states{groupname="elasticsearch",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Elasticsearch process is not running
   - alert: logstash_process_not_running
     expr: namedprocess_namegroup_states{groupname="logstash",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: CRITICAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Logstash process is not running
   - alert: Analytics_api_process_not_running
     expr: namedprocess_namegroup_states{groupname="analyticsapi",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Analytics API process is not running
   - alert: druid_zookeeper_process_not_running
     expr: namedprocess_namegroup_states{groupname="druidzookeeper",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Druid zookeeper is not running
   - alert: druid_postgres_process_not_running
     expr: namedprocess_namegroup_states{groupname="druidpostgres",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Druid postgres is not running
   - alert: druid_overlord_process_not_running
     expr: namedprocess_namegroup_states{groupname="overlord",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Druid overlord process is not running
   - alert: druid_coordinator_process_not_running
     expr: namedprocess_namegroup_states{groupname="coordinator",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Druid coordinator process is not running
   - alert: druid_historical_process_not_running
     expr: namedprocess_namegroup_states{groupname="historical",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Druid historical process is not running
   - alert: druid_broker_process_not_running
     expr: namedprocess_namegroup_states{groupname="broker",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Druid broker process is not running
   - alert: druid_middleManager_process_not_running
     expr: namedprocess_namegroup_states{groupname="middleManager",state="Sleeping"} < 1
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'Number of running processes are: {% raw %}{{$value}}{% endraw %}'
       summary: Druid middleManager process is not running
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.services.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.services.yml
index 1f3952a0c..037e5f252 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.services.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.services.yml
@@ -1,15 +1,19 @@
 groups:
 - name: alertrules.services
   rules:
-  - alert: service_down
+  - alert: service_down_FATAL
     expr: probe_success == 0
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: '{% raw %}{{ $labels.job }}{% endraw %}: The service is down.'
       summary: Service down
   - alert: health_check
     expr: changes(probe_success[5m]) > 2
     for: 2m
+    labels:
+      severity: CRITICAL
     annotations:
       description: 'The service status has changed {% raw %}{{$value}}{% endraw %} times in last 2 minutes. Threshold is : 2'
       summary: Health check is failing 
diff --git a/ansible/roles/stack-monitor-stateful/templates/alertrules.task.yml b/ansible/roles/stack-monitor-stateful/templates/alertrules.task.yml
index 974c8cfcd..7aa87cf50 100644
--- a/ansible/roles/stack-monitor-stateful/templates/alertrules.task.yml
+++ b/ansible/roles/stack-monitor-stateful/templates/alertrules.task.yml
@@ -1,24 +1,59 @@
 groups:
 - name: alertrules.task
   rules:
-  - alert: high_cpu_usage_on_container
-    expr: sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 > {{ container_cpu_usage_percentage_theshold }}
+  - alert: high_cpu_usage_on_container_WARNING
+    expr: sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 >= {{ container_cpu_usage_percentage_theshold_Warning }} and sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 < {{ container_cpu_usage_percentage_theshold_Critial }}
     for: 1m
+    labels:
+      severity: WARNING
     annotations:
       description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% CPU. Threshold is : {{ container_cpu_usage_percentage_theshold }}%'
       summary: 'HIGH CPU USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
-
-  - alert: high_memory_usage_on_container
-    expr: (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 > {{ container_memory_usage_percentage_theshold }} < Inf
+  - alert: high_cpu_usage_on_container_CRITICAL
+    expr: sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 >= {{ container_cpu_usage_percentage_theshold_Critical }} and sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 < {{ container_cpu_usage_percentage_theshold_Fatal }}
     for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% CPU. Threshold is : {{ container_cpu_usage_percentage_theshold }}%'
+      summary: 'HIGH CPU USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
+  - alert: high_cpu_usage_on_container_FATAL
+    expr: sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 >= {{ container_cpu_usage_percentage_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
+    annotations:
+      description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% CPU. Threshold is : {{ container_cpu_usage_percentage_theshold }}%'
+      summary: 'HIGH CPU USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
+    - alert: high_memory_usage_on_container_WARNING
+    expr: (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 >= {{ container_memory_usage_percentage_theshold_Warning }} and (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 < {{ container_memory_usage_percentage_theshold_Critical }} 
+    for: 1m
+    labels:
+      severity: WARNING
+    annotations:
+      description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% memory. Threshold is : {{ container_memory_usage_percentage_theshold }} %'
+      summary: 'HIGH MEMORY USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
+  - alert: high_memory_usage_on_container_CRITICAL
+    expr: (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 >= {{ container_memory_usage_percentage_theshold_Critical }} and (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 < {{ container_memory_usage_percentage_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% memory. Threshold is : {{ container_memory_usage_percentage_theshold }} %'
+      summary: 'HIGH MEMORY USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
+  - alert: high_memory_usage_on_container_FATAL
+    expr: (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 >= {{ container_memory_usage_percentage_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% memory. Threshold is : {{ container_memory_usage_percentage_theshold }} %'
       summary: 'HIGH MEMORY USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
-
-  - alert: replicas_uneven
-    expr: sum by (service_name) (docker_service_replicas_expected != docker_service_replicas_running)
+  - alert: replicas_uneven_FATAL
+    expr: sum by (service_name) (docker_service_replicas_expected != docker_service_replicas_running) 
     for: 1m
+    labels:
+      severity: FATAL 
     annotations:
       description: 'UNEVEN REPLICAS COUNT FOR {% raw %}{{ $labels.service_name }}{% endraw %}'
       summary: 'UNEVEN REPLICAS COUNT: {% raw %}{{ $labels.service_name }}{% endraw %} is having uneven count'
-
diff --git a/ansible/roles/stack-monitor/defaults/main.yml b/ansible/roles/stack-monitor/defaults/main.yml
index f2eb9294d..f10d5c5b6 100644
--- a/ansible/roles/stack-monitor/defaults/main.yml
+++ b/ansible/roles/stack-monitor/defaults/main.yml
@@ -169,3 +169,36 @@ backup_storage_name: prometheus_backup
 
 docker_service_replicas_memory_limit: 256MB
 prometheus_mount_point: "/root/dockerdata/prometheus/data/"
+
+#################################################### Monitoring limits ################################################
+container_cpu_usage_percentage_theshold_Warning: 70
+container_cpu_usage_percentage_theshold_Critial: 85
+container_cpu_usage_percentage_theshold_Fatal: 95
+
+container_memory_usage_percentage_theshold_Warning: 70
+container_memory_usage_percentage_theshold_Critical: 85
+container_memory_usage_percentage_theshold_Fatal: 95
+
+node_cpu_usage_percentage_theshold_Warning: 70
+node_cpu_usage_percentage_theshold_Critial: 85
+node_cpu_usage_percentage_theshold_Fatal: 95
+
+node_memory_usage_percentage_theshold_Warning: 70
+node_memory_usage_percentage_theshold_Critical: 85
+node_memory_usage_percentage_theshold_Fatal: 95
+
+node_load_avg_theshold_Warning: 85
+node_load_avg_theshold_Critial: 95
+node_load_avg__theshold_Fatal: 120
+
+node_disk_usage_percentage_theshold_Warning: 70
+node_disk_usage_percentage_theshold_Critial: 85
+node_disk_usage_percentage_theshold_Fatal: 95
+
+postgres_number_of_connections_Warning: 100
+postgres_number_of_connections_Critical: 110
+postgres_number_of_connections_Fatal: 130
+
+elasticsearch_filesystem_data_remaining_theshold_Warning: 30
+elasticsearch_filesystem_data_remaining_theshold_Critical: 20
+elasticsearch_filesystem_data_remaining_theshold_Fatal: 10
diff --git a/ansible/roles/stack-monitor/templates/alertrules.docker.yml b/ansible/roles/stack-monitor/templates/alertrules.docker.yml
index f3ff6b89f..e222e7c07 100644
--- a/ansible/roles/stack-monitor/templates/alertrules.docker.yml
+++ b/ansible/roles/stack-monitor/templates/alertrules.docker.yml
@@ -1,11 +1,11 @@
 groups:
 - name: alertrules.docker
   rules:
-  - alert: docker_swarm_node_down
+  - alert: docker_swarm_node_down_FATAL
     expr: swarm_manager_nodes{state="down"} > 0
     for: 1m
     labels:
-      severity: critical
+      severity: FATAL
     annotations:
       description: 'Number nodes down : {% raw %}{{$value}}{% endraw %}'
       summary: 'Docker swarm node down'
diff --git a/ansible/roles/stack-monitor/templates/alertrules.kong.yml b/ansible/roles/stack-monitor/templates/alertrules.kong.yml
index 2b3bd5b81..9122ba64c 100644
--- a/ansible/roles/stack-monitor/templates/alertrules.kong.yml
+++ b/ansible/roles/stack-monitor/templates/alertrules.kong.yml
@@ -1,11 +1,11 @@
 groups:
 - name: alertrules.kong
   rules:
-  - alert: kong_cluster_unhealthy
+  - alert: kong_cluster_unhealthy_FATAL
     expr: kong_cluster_alive_nodes != {{ kong_cluster_expected_number_of_nodes }}
     for: 1m
     labels:
-      severity: critical
+      severity: FATAL
     annotations:
       description: 'Number of live nodes : {% raw %}{{$value}}{% endraw %} not equal to : {{ kong_cluster_expected_number_of_nodes }}'
       summary: 'Kong cluster is unhealthy'
diff --git a/ansible/roles/stack-monitor/templates/alertrules.nodes.yml b/ansible/roles/stack-monitor/templates/alertrules.nodes.yml
index e321c9edb..890fe123a 100644
--- a/ansible/roles/stack-monitor/templates/alertrules.nodes.yml
+++ b/ansible/roles/stack-monitor/templates/alertrules.nodes.yml
@@ -1,34 +1,107 @@
 groups:
 - name: alertrules.nodes
   rules:
-  - alert: high_cpu_usage_on_node
-    expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) > 90
+  - alert: high_cpu_usage_on_node_WARNING
+    expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) >= {{ node_cpu_usage_percentage_theshold_Warning }} and (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) < {{ node_cpu_usage_percentage_theshold_Critical }}
     for: 1m
+    labels:
+      severity: WARNING
     annotations:
       description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of CPU. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.'
       summary: 'HIGH CPU USAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
-  - alert: high_memory_usage_on_node
-    expr: sum by(nodename) (((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) > 90
+  - alert: high_cpu_usage_on_node_CRITICAL
+    expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) >= {{ node_cpu_usage_percentage_theshold_Critical }} and (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) < {{ node_cpu_usage_percentage_theshold_Fatal }}
     for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of CPU. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.'
+      summary: 'HIGH CPU USAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_cpu_usage_on_node_FATAL
+    expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{job="vm-node-exporter",mode="idle"}[5m])) * 100) >= {{ node_cpu_usage_percentage_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of CPU. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.'
+      summary: 'HIGH CPU USAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_memory_usage_on_node_WARNING
+    expr: sum by(nodename) ((((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_memory_usage_percentage_theshold_Warning }} and (((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) < {{ node_memory_usage_percentage_theshold_Critical }} )
+    for: 1m
+    labels:
+      severity: WARNING
     annotations:
       description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of MEMORY. MEMORY usage is over {% raw %}{{ humanize $value}}{% endraw %}.'
       summary: 'HIGH MEMORY USAGE WARNING TASK ON {% raw %}{{ $labels.nodename }}{% endraw %}'
-  - alert: high_load_on_node
-    expr: sum by(nodename) ((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))
-      * on(instance) group_left(nodename) node_uname_info * 100) > 100
+  - alert: high_memory_usage_on_node_CRITICAL
+    expr: sum by(nodename) ((((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_memory_usage_percentage_theshold_Critical }} and (((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) < {{ node_memory_usage_percentage_theshold_Fatal }} )
     for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of MEMORY. MEMORY usage is over {% raw %}{{ humanize $value}}{% endraw %}.'
+      summary: 'HIGH MEMORY USAGE WARNING TASK ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_memory_usage_on_node_FATAL
+    expr: sum by(nodename) (((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes) * on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_memory_usage_percentage_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of MEMORY. MEMORY usage is over {% raw %}{{ humanize $value}}{% endraw %}.'
+      summary: 'HIGH MEMORY USAGE WARNING TASK ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_load_on_node_WARNING
+    expr: sum by(nodename) (((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_load_avg_theshold_Warning }} and ((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) < {{ node_load_avg_theshold_Critial }} )
+    for: 5m
+    labels:
+      severity: WARNING
     annotations:
       description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) has a high load average. Load average is {% raw %}{{ $value }}{% endraw %}%.'
       summary: 'HIGH LOAD AVERAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
-  - alert: monitoring_service_down
+  - alert: high_load_on_node_CRITICAL
+    expr: sum by(nodename) (((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) >= {{ node_load_avg_theshold_Critial }} and ((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) < {{ node_load_avg_theshold_Fatal }} )
+    for: 5m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) has a high load average. Load average is {% raw %}{{ $value }}{% endraw %}%.'
+      summary: 'HIGH LOAD AVERAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: high_load_on_node_FATAL
+    expr: sum by(nodename) ((node_load1 / count without(cpu, mode) (node_cpu_seconds_total{mode="system"}))* on(instance) group_left(nodename) node_uname_info * 100) > {{ node_load_avg_theshold_Fatal }}
+    for: 5m
+    labels:
+      severity: FATAL
+    annotations:
+      description: '{% raw %}{{ $labels.nodename }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) has a high load average. Load average is {% raw %}{{ $value }}{% endraw %}%.'
+      summary: 'HIGH LOAD AVERAGE WARNING ON {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: monitoring_service_down_FATAL
     expr: up == 0
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'The monitoring service {% raw %}{{ $labels.job }}{% endraw %} is down.'
       summary: 'MONITORING SERVICE DOWN WARNING: NODE {% raw %}{{ $labels.host }}{% endraw %}'
-  - alert: node_running_out_of_disk_space
-    expr: sum by(nodename) ((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) > 80
+  - alert: node_running_out_of_disk_space_WARNING
+    expr: sum by(nodename) (((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) >= {{ node_disk_usage_percentage_theshold_Warning }} and ((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) < {{ node_disk_usage_percentage_theshold_Critial }} )
+    for: 1m
+    labels:
+      severity: WARNING
+    annotations:
+      description: 'More than 80% of disk used. Disk usage is {% raw %}{{ humanize $value }}{% endraw %}%'
+      summary: 'LOW DISK SPACE WARING: NODE {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: node_running_out_of_disk_space_WARNING
+    expr: sum by(nodename) (((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) >= {{ node_disk_usage_percentage_theshold_Critial }} and ((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) < {{ node_disk_usage_percentage_theshold_Fatal }} )
+    for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: 'More than 80% of disk used. Disk usage is {% raw %}{{ humanize $value }}{% endraw %}%'
+      summary: 'LOW DISK SPACE WARING: NODE {% raw %}{{ $labels.nodename }}{% endraw %}'
+  - alert: node_running_out_of_disk_space_FATAL
+    expr: sum by(nodename) ((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) * 100 / node_filesystem_size_bytes{mountpoint="/"} * on(instance) group_left(nodename) node_uname_info) >= {{ node_disk_usage_percentage_theshold_Fatal }}
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'More than 80% of disk used. Disk usage is {% raw %}{{ humanize $value }}{% endraw %}%'
       summary: 'LOW DISK SPACE WARING: NODE {% raw %}{{ $labels.nodename }}{% endraw %}'
diff --git a/ansible/roles/stack-monitor/templates/alertrules.task.yml b/ansible/roles/stack-monitor/templates/alertrules.task.yml
index f30b3e135..34dbef54b 100644
--- a/ansible/roles/stack-monitor/templates/alertrules.task.yml
+++ b/ansible/roles/stack-monitor/templates/alertrules.task.yml
@@ -1,25 +1,59 @@
 groups:
 - name: alertrules.task
   rules:
-  - alert: high_cpu_usage_on_container
-    expr: sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name,
-      instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m]))
-      * 100 > {{ container_cpu_usage_percentage_theshold }}
+  - alert: high_cpu_usage_on_container_WARNING
+    expr: sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 >= {{ container_cpu_usage_percentage_theshold_Warning }} and sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 < {{ container_cpu_usage_percentage_theshold_Critial }}
     for: 1m
+    labels:
+      severity: WARNING
     annotations:
       description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% CPU. Threshold is : {{ container_cpu_usage_percentage_theshold }}%'
       summary: 'HIGH CPU USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
-
-  - alert: high_memory_usage_on_container
-    expr: (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 > {{ container_memory_usage_percentage_theshold }} < Inf
+  - alert: high_cpu_usage_on_container_CRITICAL
+    expr: sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 >= {{ container_cpu_usage_percentage_theshold_Critical }} and sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 < {{ container_cpu_usage_percentage_theshold_Fatal }}
     for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% CPU. Threshold is : {{ container_cpu_usage_percentage_theshold }}%'
+      summary: 'HIGH CPU USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
+  - alert: high_cpu_usage_on_container_FATAL
+    expr: sum by(container_label_com_docker_swarm_service_name, container_label_com_docker_swarm_task_name, instance) (rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[5m])) * 100 >= {{ container_cpu_usage_percentage_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
+    annotations:
+      description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% CPU. Threshold is : {{ container_cpu_usage_percentage_theshold }}%'
+      summary: 'HIGH CPU USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
+  - alert: high_memory_usage_on_container_WARNING
+    expr: (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 >= {{ container_memory_usage_percentage_theshold_Warning }} and (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 < {{ container_memory_usage_percentage_theshold_Critical }} 
+    for: 1m
+    labels:
+      severity: WARNING
+    annotations:
+      description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% memory. Threshold is : {{ container_memory_usage_percentage_theshold }} %'
+      summary: 'HIGH MEMORY USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
+  - alert: high_memory_usage_on_container_CRITICAL
+    expr: (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 >= {{ container_memory_usage_percentage_theshold_Critical }} and (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 < {{ container_memory_usage_percentage_theshold_Fatal }} 
+    for: 1m
+    labels:
+      severity: CRITICAL
+    annotations:
+      description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% memory. Threshold is : {{ container_memory_usage_percentage_theshold }} %'
+      summary: 'HIGH MEMORY USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
+  - alert: high_memory_usage_on_container_FATAL
+    expr: (container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"} / container_spec_memory_limit_bytes) * 100 >= {{ container_memory_usage_percentage_theshold_Fatal }}
+    for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using {% raw %}{{ $value }}{% endraw %}% memory. Threshold is : {{ container_memory_usage_percentage_theshold }} %'
       summary: 'HIGH MEMORY USAGE WARNING: TASK {% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} on {% raw %}{{ $labels.instance }}{% endraw %}'
-
-  - alert: replicas_uneven
+  - alert: replicas_uneven_FATAL
     expr: sum by (service_name) (docker_service_replicas_expected != docker_service_replicas_running) 
     for: 1m
+    labels:
+      severity: FATAL
     annotations:
       description: 'UNEVEN REPLICAS COUNT FOR {% raw %}{{ $labels.service_name }}{% endraw %}'
       summary: 'UNEVEN REPLICAS COUNT: {% raw %}{{ $labels.service_name }}{% endraw %} is having uneven count'
-- 
GitLab