diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml
index 2b499566bf601312ffbd4604b093d0cfd3db35f9..615a971a74a4271c5a8e54b4f4c705e7b54c0a21 100644
--- a/ansible/inventory/env/group_vars/all.yml
+++ b/ansible/inventory/env/group_vars/all.yml
@@ -483,74 +483,6 @@ sunbird_otp_expiration: 1800
 sunbird_otp_length: 6
 sunbird_help_link_visibility: FALSE
 
-
-
-#Container Limits
-proxy_replicas: 1
-proxy_reservation_memory: 64M
-proxy_limit_memory: 128M
-kong_replicas: 1
-kong_reservation_memory: 64M
-kong_limit_memory: 256M
-echo_service_replicas: 1
-echo_service_reservation_memory: 8M
-echo_service_limit_memory: 16M
-adminutil_replicas: 1
-adminutil_reservation_memory: 300M
-adminutil_limit_memory: 300M
-learner_limit_cpu: 1
-learner_replicas: 1
-learner_reservation_memory: 1000M
-learner_limit_memory: 1000M
-logger_logstash_heap_size: 512M
-logger_logstash_replicas: 1
-logger_logstash_reservation_memory: 512M
-logger_logstash_limit_memory: 750M
-logger_kibana_reservation_memory: 750M
-logger_kibana_limit_memory: 750M
-logger_logspout_reservation_memory: 200M
-logger_logspout_limit_memory: 200M
-logger_oauth_reservation_memory: 32M
-logger_oauth_limit_memory: 32M
-prometheus_reservation_memory: 1G
-prometheus_limit_memory: 1G
-alertmanager_reservation_memory: 100M
-alertmanager_limit_memory: 100M
-node_exporter_reservation_memory: 16M
-node_exporter_limit_memory: 32M
-cadvisor_reservation_memory: 100M
-cadvisor_limit_memory: 100M
-elasticsearch_exporter_reservation_memory: 8M
-elasticsearch_exporter_limit_memory: 24M
-postgres_exporter_reservation_memory: 16M
-postgres_exporter_limit_memory: 32M
-statsd_exporter_reservation_memory: 8M
-statsd_exporter_limit_memory: 16M
-blackbox_exporter_reservation_memory: 16M
-blackbox_exporter_limit_memory: 32M
-jsonpath_exporter_reservation_memory: 32M
-jsonpath_exporter_limit_memory: 64M
-azure_blob_exporter_reservation_memory: 16M
-azure_blob_exporter_limit_memory: 64M
-grafana_reservation_memory: 100M
-grafana_limit_memory: 100M
-monitor_logstash_replicas: 1
-monitor_logstash_heap_size: 256m
-monitor_logstash_reservation_memory: 256M
-monitor_logstash_limit_memory: 350M
-monitor_logspout_reservation_memory: 150M
-monitor_logspout_limit_memory: 150M
-player_replicas: 1
-player_reservation_memory: 256M
-player_limit_memory: 512M
-content_replicas: 1
-content_reservation_memory: 1000M
-content_limit_memory: 1000M
-badger_replicas: 1
-badger_reservation_memory: 500MB
-badger_limit_memory: 500MB
-
-
 # not required
 sunbird_image_storage_url: "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/dial/"
 vault_auth_key: "{{core_vault_auth_key}}"
diff --git a/ansible/logging.yml b/ansible/logging.yml
index f3bbad7e199cdeb1b2d0564164e5bd6344489fbb..fd3dc74c9538edf0d972fa05fef9b8fe59caed6d 100644
--- a/ansible/logging.yml
+++ b/ansible/logging.yml
@@ -1,12 +1,12 @@
-- name: Spray cluster name to nodes
-  hosts: swarm-nodes
+---
+- hosts: swarm-nodes
   become: yes
   tasks:
-  - copy: dest=/home/deployer/cluster_name content="{{ cluster_name }}"
+  - name: Spray cluster name to nodes
+    copy: dest=/home/deployer/cluster_name content="{{ cluster_name }}"
     when: cluster_name is defined
   tags:
     - stack-logger
-  run_once: true
 
 - hosts: swarm-bootstrap-manager
   become: yes
diff --git a/ansible/roles/stack-adminutil/defaults/main.yml b/ansible/roles/stack-adminutil/defaults/main.yml
index a0f0d3126aee754da59e2b1ad533b03efed06b8e..6991c16cfe28eeb18a6354d94cb892be4c3d7ace 100644
--- a/ansible/roles/stack-adminutil/defaults/main.yml
+++ b/ansible/roles/stack-adminutil/defaults/main.yml
@@ -1,5 +1,7 @@
 ---
 adminutil_replicas: 1
+adminutil_reserve_cpu: 0.2
+adminutil_limit_cpu: 0.5
 adminutil_reservation_memory: 512M
 adminutil_limit_memory: 1024M
 
diff --git a/ansible/roles/stack-adminutil/templates/stack-adminutil.yml b/ansible/roles/stack-adminutil/templates/stack-adminutil.yml
index efc505215fde9cd8a8654489bcbd6f86a0bb09a0..8b38da7e77e7b988d00c7f11fa50795c6c8a2433 100644
--- a/ansible/roles/stack-adminutil/templates/stack-adminutil.yml
+++ b/ansible/roles/stack-adminutil/templates/stack-adminutil.yml
@@ -21,8 +21,10 @@ services:
       replicas: {{ adminutil_replicas }}
       resources:
         reservations:
+          cpus: "{{ adminutil_reserve_cpu }}"
           memory: "{{ adminutil_reservation_memory }}"
         limits:
+          cpus: "{{ adminutil_limit_cpu }}"
           memory: "{{ adminutil_limit_memory }}"
       update_config:
         parallelism: 1
diff --git a/ansible/roles/stack-api-manager/defaults/main.yml b/ansible/roles/stack-api-manager/defaults/main.yml
index 1cd336a2d4763b3072342c15ca34fb6a0d50a447..535c5f063f3b1fd2e6b6c5b12aaa572374449e3d 100644
--- a/ansible/roles/stack-api-manager/defaults/main.yml
+++ b/ansible/roles/stack-api-manager/defaults/main.yml
@@ -4,8 +4,12 @@ kong_ssl: true
 kong_replicas: 1
 kong_reservation_memory: 64M
 kong_limit_memory: 256M
+kong_reserve_cpu: 0.3
+kong_limit_cpu: 1
 
 echo_service_replicas: 1
 echo_service_reservation_memory: 8M
 echo_service_limit_memory: 16M
+echo_service_reserve_cpu: 0.1
+echo_service_limit_cpu: 0.2
 kong_version: "{{kong_version}}"
diff --git a/ansible/roles/stack-api-manager/templates/stack-api-manager.yml b/ansible/roles/stack-api-manager/templates/stack-api-manager.yml
index a3c8cea043440b8f0d46b6c77fafaab13dc4b407..301c7f50012c610d2ccd40b5644d92672684d134 100644
--- a/ansible/roles/stack-api-manager/templates/stack-api-manager.yml
+++ b/ansible/roles/stack-api-manager/templates/stack-api-manager.yml
@@ -14,10 +14,11 @@ services:
       replicas: {{ kong_replicas }}
       resources:
         reservations:
+          cpus: "{{ kong_reserve_cpu }}"
           memory: "{{ kong_reservation_memory }}"
         limits:
           memory: "{{ kong_limit_memory }}"
-          cpus: "{{ kong_limit_cpu | default('1') }}"
+          cpus: "{{ kong_limit_cpu }}"
       update_config:
         parallelism: 1
         delay: 30s
@@ -33,8 +34,10 @@ services:
       resources:
         reservations:
           memory: "{{ echo_service_reservation_memory }}"
+          cpus: "{{ echo_service_reserve_cpu }}"
         limits:
           memory: "{{ echo_service_limit_memory }}"
+          cpus: "{{ echo_service_limit_cpu }}"
       update_config:
         parallelism: 1
         delay: 5s
diff --git a/ansible/roles/stack-badger/defaults/main.yml b/ansible/roles/stack-badger/defaults/main.yml
index 1e5632417cf0b6d494eda2cf265d809140e4f91a..5f31d0e327f4a23764df4598439d2841db2dee2d 100644
--- a/ansible/roles/stack-badger/defaults/main.yml
+++ b/ansible/roles/stack-badger/defaults/main.yml
@@ -1 +1,6 @@
 badger_admin_user: admin
+badger_replicas: 1
+badger_reservation_memory: 300MB
+badger_limit_memory: 500MB
+badger_reserve_cpu: 0.1
+badger_limit_cpu: 0.5
diff --git a/ansible/roles/stack-badger/tasks/.user.yml.swp b/ansible/roles/stack-badger/tasks/.user.yml.swp
deleted file mode 100644
index a5900099174a499850ce5d17806a36032e0bdd3a..0000000000000000000000000000000000000000
Binary files a/ansible/roles/stack-badger/tasks/.user.yml.swp and /dev/null differ
diff --git a/ansible/roles/stack-badger/tasks/main.yml b/ansible/roles/stack-badger/tasks/main.yml
index 8050187369b39ab14e5e8fce5b0ecc3e15875a59..6ded090df013b5b600e44bab247d9aa86d5ca69d 100644
--- a/ansible/roles/stack-badger/tasks/main.yml
+++ b/ansible/roles/stack-badger/tasks/main.yml
@@ -28,7 +28,7 @@
 
 - name: Deploy badger service
   become: yes
-  shell: "docker service create --with-registry-auth --replicas {{ badger_replicas }} -p 8004:8004 --name badger-service --hostname badger-service --reserve-memory {{ badger_reservation_memory }} --limit-memory {{ badger_limit_memory }}  --network application_default --config source=settings_local.py,target=/badger/code/apps/mainsite/settings_local.py,mode=0644 {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ badger_replicas }} -p 8004:8004 --name badger-service --hostname badger-service --reserve-cpu {{ badger_reserve_cpu  }} --limit-cpu {{ badger_limit_cpu }} --reserve-memory {{ badger_reservation_memory }} --limit-memory {{ badger_limit_memory }}  --network application_default --config source=settings_local.py,target=/badger/code/apps/mainsite/settings_local.py,mode=0644 {{hub_org}}/{{image_name}}:{{image_tag}}"
   args:
     chdir: /home/deployer/stack
 
diff --git a/ansible/roles/stack-logger/defaults/main.yml b/ansible/roles/stack-logger/defaults/main.yml
index f21a32040361c1ffb2f9d62465d696febfddf46b..d9bef3e8d72fda732e85f4cea269cb3997e53490 100644
--- a/ansible/roles/stack-logger/defaults/main.yml
+++ b/ansible/roles/stack-logger/defaults/main.yml
@@ -1,9 +1,11 @@
 # Please change memory requirements if heap_size is changed
 logger_logstash_heap_size: 512M
-logger_logstash_replicas: 2
 logger_logstash_reservation_memory: 512M
-logger_logstash_limit_memory: 750M
+logger_logstash_limit_memory: 512M
+logger_logstash_reservation_cpu: 0.2
+logger_logstash_limit_cpu: 0.5
+
 logger_logspout_reservation_memory: 75M
-logger_logspout_limit_memory: 75M
-logger_logstash_reservation_memory: 1024M
-logger_logstash_limit_memory: 1024M
+logger_logspout_limit_memory: 150M
+logger_logspout_reservation_cpu: 0.1
+logger_logspout_limit_cpu: 0.2
diff --git a/ansible/roles/stack-logger/templates/stack-logger.yml b/ansible/roles/stack-logger/templates/stack-logger.yml
index 29f1d77b859725b18345afb5ccb44b05fe4de065..1b3b5104803d828f818a46e0217170afdad2b026 100644
--- a/ansible/roles/stack-logger/templates/stack-logger.yml
+++ b/ansible/roles/stack-logger/templates/stack-logger.yml
@@ -6,12 +6,14 @@ services:
     image: sunbird/logstash:6.2.3
     command: /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash.conf
     deploy:
-      replicas: {{ logger_logstash_replicas }}
+      mode: global
       resources:
         reservations:
           memory: "{{ logger_logstash_reservation_memory }}"
+          cpus: "{{ logger_logstash_reservation_cpu }}"
         limits:
           memory: "{{ logger_logstash_limit_memory }}"
+          cpus: "{{ logger_logstash_limit_cpu }}"
     environment:
       - LOGSPOUT=ignore
       - LS_HEAP_SIZE={{ logger_logstash_heap_size }}
@@ -27,12 +29,14 @@ services:
     image: sunbird/logstash:2.4.1
     command: logstash -f /conf/logstash.conf
     deploy:
-      replicas: {{ logger_logstash_replicas }}
+      mode: gloabl
       resources:
         reservations:
           memory: "{{ logger_logstash_reservation_memory }}"
+          cpus: "{{ logger_logspout_reservation_cpu }}"
         limits:
           memory: "{{ logger_logstash_limit_memory }}"
+          cpus: "{{ logger_logspout_limit_cpu }}"
     environment:
       - LOGSPOUT=ignore
       - LS_HEAP_SIZE={{ logger_logstash_heap_size }}
@@ -54,8 +58,10 @@ services:
       resources:
         reservations:
           memory: "{{ logger_logspout_reservation_memory }}"
+          cpus: "{{ logger_logspout_reservation_cpu }}"
         limits:
           memory: "{{ logger_logspout_limit_memory }}"
+          cpus: "{{ logger_logspout_limit_cpu }}"
     environment:
       - SYSLOG_FORMAT=rfc3164
       - INACTIVITY_TIMEOUT=1m
diff --git a/ansible/roles/stack-oauth/defaults/main.yml b/ansible/roles/stack-oauth/defaults/main.yml
index cccc522e060145d9bd78844885e42e9b9324e663..f4a60743644cdeffda53f2e1de322629ca236373 100644
--- a/ansible/roles/stack-oauth/defaults/main.yml
+++ b/ansible/roles/stack-oauth/defaults/main.yml
@@ -7,4 +7,4 @@ logger_oauth_limit_memory: 32M
 kibana_oauth_authenticated_email_domains: []
 kibana_oauth_authenticated_email_ids: []
 
-swarm_agent_for_proxy: "{{groups['swarm-worker'][0]}}"
\ No newline at end of file
+swarm_agent_for_proxy: "{{groups['swarm-worker'][0]}}"
diff --git a/ansible/roles/stack-proxy-private/defaults/main.yml b/ansible/roles/stack-proxy-private/defaults/main.yml
index f71f4ddcb67084d794b6a009f40e3c8648ffe8d3..92107bf4752d0c217243a919ea5b24e9d6df887b 100644
--- a/ansible/roles/stack-proxy-private/defaults/main.yml
+++ b/ansible/roles/stack-proxy-private/defaults/main.yml
@@ -1,9 +1,11 @@
 ---
 hub_org: sunbird
 nginx_per_ip_connection_limit: 400
-proxy_replicas: 1
-proxy_reservation_memory: 32M
-proxy_limit_memory: 64M
+private_proxy_replicas: 1
+private_proxy_reservation_memory: 64M
+private_proxy_limit_memory: 128M
+private_proxy_reserve_cpu: 0.1
+private_proxy_limit_cpu: 0.3
 nginx_per_ip_connection_limit: 400
 merge_proxy_server_name: 
 proxy_prometheus: false
diff --git a/ansible/roles/stack-proxy-private/tasks/main.yml b/ansible/roles/stack-proxy-private/tasks/main.yml
index b29a861d58999586e9a98a120628da6cff400c81..37ac574a5f6bd9745052b3521ca42b4cbf380baa 100644
--- a/ansible/roles/stack-proxy-private/tasks/main.yml
+++ b/ansible/roles/stack-proxy-private/tasks/main.yml
@@ -16,13 +16,13 @@
 - name: Save stack file
   template: src=stack-proxy.yml dest=/home/deployer/stack/proxy-private.yml mode=0644
 
-- name: Save proxy-default.conf
-  template: src=proxy-default.conf dest=/home/deployer/config/proxy-default-private.conf mode=0644
-
 - name: Remove stack
-  shell: "docker stack rm proxy-private"
+  shell: "docker stack rm private"
   ignore_errors: yes
 
+- name: Save proxy-default.conf
+  template: src=proxy-default.conf dest=/home/deployer/config/proxy-default-private.conf mode=0644
+
 - name: Ensure network exists
   shell: "docker network create --driver overlay {{item.name}} --subnet {{item.subnet}}"
   with_items:
diff --git a/ansible/roles/stack-proxy-private/templates/stack-proxy.yml b/ansible/roles/stack-proxy-private/templates/stack-proxy.yml
index 722328713930e69d06fb36244a81a24d9b9dada9..15df509a8f4684acf942bc0581f8fdb35fb49c83 100644
--- a/ansible/roles/stack-proxy-private/templates/stack-proxy.yml
+++ b/ansible/roles/stack-proxy-private/templates/stack-proxy.yml
@@ -7,12 +7,14 @@ services:
     ports:
       - "31480:80"
     deploy:
-      replicas: 1
+      replicas: {{ private_proxy_replicas }}
       resources:
         reservations:
-          memory: "{{ proxy_reservation_memory }}"
+          memory: "{{ private_proxy_reservation_memory }}"
+          cpus: "{{ private_proxy_reserve_cpu }}"
         limits:
-          memory: "{{ proxy_limit_memory }}"
+          memory: "{{ private_proxy_limit_memory }}"
+          cpus: "{{ private_proxy_limit_cpu }}"
       update_config:
         parallelism: 1
         delay: 30s
diff --git a/ansible/roles/stack-proxy/defaults/main.yml b/ansible/roles/stack-proxy/defaults/main.yml
index 143ca9ddfc13a999ea0762d64b2d8847fa27d9c3..579709e41228342273d2b2dded13c7fb96facccb 100644
--- a/ansible/roles/stack-proxy/defaults/main.yml
+++ b/ansible/roles/stack-proxy/defaults/main.yml
@@ -2,8 +2,10 @@
 hub_org: sunbird
 nginx_per_ip_connection_limit: 400
 proxy_replicas: 1
-proxy_reservation_memory: 32M
-proxy_limit_memory: 64M
+proxy_reservation_memory: 64M
+proxy_limit_memory: 128M
+proxy_reserve_cpu: 0.1
+proxy_limit_cpu: 0.3
 nginx_per_ip_connection_limit: 400
 merge_proxy_server_name: 
 proxy_prometheus: false
diff --git a/ansible/roles/stack-proxy/templates/stack-proxy.yml b/ansible/roles/stack-proxy/templates/stack-proxy.yml
index d7c111d821c55a5e6f62c3a0b1c838dcf03a8ead..b13154fd0dfcb928e89c2b025dbf471e47d3ea7a 100644
--- a/ansible/roles/stack-proxy/templates/stack-proxy.yml
+++ b/ansible/roles/stack-proxy/templates/stack-proxy.yml
@@ -18,8 +18,10 @@ services:
       resources:
         reservations:
           memory: "{{ proxy_reservation_memory }}"
+          cpus: "{{ proxy_reserve_cpu  }}"
         limits:
           memory: "{{ proxy_limit_memory }}"
+          cpus: "{{ proxy_limit_cpu  }}"
       update_config:
         parallelism: 1
         delay: 30s
diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml
index a0b8479c83bf347abedcc5ec666887fed22b52c9..ba67525b41c384ed1a2315a0449319b869688ab3 100644
--- a/ansible/roles/stack-sunbird/defaults/main.yml
+++ b/ansible/roles/stack-sunbird/defaults/main.yml
@@ -17,9 +17,10 @@ actor_reservation_memory: 768M
 actor_limit_memory: 1024M
 
 learner_replicas: 1
-learner_reservation_memory: 256M
-learner_limit_memory: 512M
-learner_reservation_cpu: 0
+learner_reservation_memory: 512M
+learner_limit_memory: 1024M
+learner_reservation_cpu: 0.2
+learner_limit_cpu: 1
 learner_java_mem_limit: '-Xmx600m'
 
 #Config Service Vars
@@ -33,13 +34,22 @@ config_reservation_cpu: 0
 notification_replicas: 1
 notification_limit_memory: 500MB
 notification_limit_cpu: 1
+notification_reserve_memory: 300MB
+notification_reserve_cpu: 0.1
 
 #Report service vars
 report_replicas: 1
 report_reservation_memory: 300M
+report_reservation_cpu: 0.1
 report_limit_memory: 500M
 report_limit_cpu: 1
 
+player_replicas: 1
+player_reservation_memory: 128M
+player_limit_memory: 750M
+player_reserve_cpu: 0.1
+player_limit_cpu: 1
+
 # This variable is not to access learner service but to call the api
 # learner-service:9000/org/v1/search
 sunbird_learner_service_base_url: http://kong:8000/
@@ -47,18 +57,12 @@ sunbird_learner_service_base_url: http://kong:8000/
 telemetry_replicas: 1
 telemetry_reservation_memory: 300M
 telemetry_limit_memory: 300M
+telemetry_reservation_cpu: 0.1
 telemetry_limit_cpu: 1
 sunbird_telemetry_kafka_servers: "{{groups['kafka']|join(':9092,')}}:9092"
 sunbird_data_pipeline_kafka_servers: "{{groups['processing-cluster-kafka']|join(':9092,')}}:9092"
 
-player_replicas: 1
-player_reservation_memory: 64M
-player_limit_memory: 256M
 sunbird_build_number:
-content_replicas: 1
-content_reservation_memory: 64M
-content_java_mem_limit: '-Xmx600m'
-content_limit_memory: 256M
 keycloak_auth_server_url: "{{proto}}://{{proxy_server_name}}/auth"
 keycloak_realm: sunbird
 sunbird_web_url: "{{proto}}://{{proxy_server_name}}"
@@ -87,8 +91,6 @@ telemetry_logstash_reservation_memory: 1g
 telemetry_logstash_limit_memory: 2g
 sunbird_telemetry_api:
 sunbird_enable_signup:
-content_limit_cpu: 1
-content_reservation_cpu: 0
 
 user_org_replicas: 1
 user_org_reservation_memory: 750MB
@@ -100,23 +102,24 @@ cert_replicas: 1
 cert_reservation_memory: 500MB
 cert_limit_memory: 550MB
 cert_limit_cpu: 1
-cert_reservation_cpu: 0
+cert_reservation_cpu: 0.1
 
 cert_registry_replicas: 1
-cert_registry_limit_memory: 500MB
-cert_registry_limit_cpu: 1
+cert_registry_limit_memory: 512MB
+cert_registry_limit_cpu: 0.5
+cert_registry_reserve_cpu: 256MB
+cert_registry_reserve_memory: 0.1
 
 # Encryption service vars
 enc_replicas: 1
-enc_reservation_memory: 750MB
-enc_limit_memory: 800MB
-enc_limit_cpu: 1
-enc_reservation_cpu: 0
+enc_reservation_memory: 300MB
+enc_limit_memory: 500MB
+enc_limit_cpu: 0.5
+enc_reservation_cpu: 0.1
 postgres_port: 5432
 enc_dialect: postgres
 enc_entry_passwod: password
 
-
 telemetry_service_threads:
 telemetry_local_storage_enabled:
 telemetry_local_storage_type:
@@ -147,16 +150,16 @@ itext_license_enabled: false
 
 # Knowledge MW Service Config
 knowledge_mw_service_replicas: 1
-knowledge_mw_service_reservation_memory: 64M
-knowledge_mw_service_limit_memory: 256M
-knowledge_mw_service_reservation_cpu: 0
+knowledge_mw_service_reservation_memory: 200M
+knowledge_mw_service_limit_memory: 1000M
+knowledge_mw_service_reservation_cpu: 0.1
 knowledge_mw_service_limit_cpu: 1
 
 # Content Service Config
 content_service_replicas: 1
-content_service_reservation_memory: 1000M
+content_service_reservation_memory: 600M
 content_service_limit_memory: 1000M
-content_service_reservation_cpu: 0
+content_service_reservation_cpu: 0.1
 content_service_limit_cpu: 1
 content_java_mem_limit: '-Xmx600m'
 
@@ -164,23 +167,23 @@ content_java_mem_limit: '-Xmx600m'
 assessment_service_replicas: 1
 assessment_service_reservation_memory: 256M
 assessment_service_limit_memory: 512M
-assessment_service_reservation_cpu: 0
+assessment_service_reservation_cpu: 0.1
 assessment_service_limit_cpu: 1
 
 # LMS Service Config
 lms_service_replicas: 1
 lms_service_reservation_memory: 1000M
 lms_service_limit_memory: 1000M
-lms_service_reservation_cpu: 0
+lms_service_reservation_cpu: 0.1
 lms_service_limit_cpu: 1
 lms_java_mem_limit: '-Xmx600m'
 
 # Print Service Config
 print_service_replicas: 1
-print_service_reservation_memory: 64M
-print_service_limit_memory: 256M
-print_service_reservation_cpu: 0
-print_service_limit_cpu: 1
+print_service_reservation_memory: 128M
+print_service_limit_memory: 512M
+print_service_reservation_cpu: 0.1
+print_service_limit_cpu: 0.5
 
 ############################ kube vars #######################
 hub_org: sunbird
@@ -288,18 +291,16 @@ lms_liveness_readiness:
 apimanager_liveness_readiness:
   healthcheck: true
   readinessProbe:
-    httpGet:
-      path: /status
-      port: 8001
-    initialDelaySeconds: 30
-    periodSeconds: 30
+    tcpSocket:
+      port: 8000
+    initialDelaySeconds: 120
+    periodSeconds: 90
     timeoutSeconds: 10
     failureThreshold: 5
     successThreshold: 2
   livenessProbe:
-    httpGet:
-      path: /
-      port: 8001
+    tcpSocket:
+      port: 8000
     initialDelaySeconds: 30
     periodSeconds: 30
     timeoutSeconds: 10
diff --git a/ansible/roles/stack-sunbird/tasks/assessment-service.yml b/ansible/roles/stack-sunbird/tasks/assessment-service.yml
index cff61bb1348c4d6d8e3e93e883bb44ce4c995e72..f83528a529d73b10269fa5dc63a7aadfd1a83cb0 100644
--- a/ansible/roles/stack-sunbird/tasks/assessment-service.yml
+++ b/ansible/roles/stack-sunbird/tasks/assessment-service.yml
@@ -17,6 +17,6 @@
   shell: "docker config create assessment-service.conf /home/deployer/config/assessment-service.conf"
 
 - name: Deploy assessment-service
-  shell: "docker service create --with-registry-auth --replicas {{ assessment_service_replicas }} -p 9003:9000  --name assessment-service --hostname assessment-service --reserve-memory {{ assessment_service_reservation_memory }} --limit-memory {{ assessment_service_limit_memory }} --limit-cpu {{ assessment_service_limit_cpu }} --reserve-cpu {{ assessment_service_reservation_cpu }} --health-cmd 'wget -qO- assessment-service:9000/health || exit 1' --health-timeout 3s --health-retries 3  --network application_default --config source=assessment-service.conf,target=/home/sunbird/assessment-service-1.0-SNAPSHOT/config/application.conf,mode=0644  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ assessment_service_replicas }} -p 9003:9000  --name assessment-service --hostname assessment-service --reserve-memory {{ assessment_service_reservation_memory }} --limit-memory {{ assessment_service_limit_memory }} --limit-cpu {{ assessment_service_limit_cpu }} --reserve-cpu {{ assessment_service_reservation_cpu }} --health-cmd 'wget -qO- assessment-service:9000/health || exit 1' --health-timeout 10s --health-retries 5  --network application_default --config source=assessment-service.conf,target=/home/sunbird/assessment-service-1.0-SNAPSHOT/config/application.conf,mode=0644  {{hub_org}}/{{image_name}}:{{image_tag}}"
   args:
     chdir: /home/deployer/stack
diff --git a/ansible/roles/stack-sunbird/tasks/cert_registry_service.yml b/ansible/roles/stack-sunbird/tasks/cert_registry_service.yml
index d3b772133d5b400e9b03fc35addfacee0fb111e9..d23170bf2d5fd240c94c930d33a3f75759351dab 100644
--- a/ansible/roles/stack-sunbird/tasks/cert_registry_service.yml
+++ b/ansible/roles/stack-sunbird/tasks/cert_registry_service.yml
@@ -4,4 +4,4 @@
   ignore_errors: yes
 
 - name: Deploy cert registry service
-  shell: "docker service create --with-registry-auth --replicas {{ cert_registry_replicas }} -p 9013:9000  --name cert-registry-service --hostname cert-registry-service --limit-memory {{ cert_registry_limit_memory }} --limit-cpu {{ cert_registry_limit_cpu }} --health-cmd 'wget -qO- cert-registry-service:9000/service/health || exit 1' --health-timeout 3s --health-retries 3 --network application_default --env-file /home/deployer/env/sunbird_cert-registry-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ cert_registry_replicas }} -p 9013:9000  --name cert-registry-service --hostname cert-registry-service --limit-memory {{ cert_registry_limit_memory }} --limit-cpu {{ cert_registry_limit_cpu }} --reserve-memory {{ cert_registry_reserve_memory  }} --reserve-cpu {{ cert_registry_reserve_cpu  }}  --health-cmd 'wget -qO- cert-registry-service:9000/service/health || exit 1' --health-timeout 10s --health-retries 5 --network application_default --env-file /home/deployer/env/sunbird_cert-registry-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
diff --git a/ansible/roles/stack-sunbird/tasks/cert_service.yml b/ansible/roles/stack-sunbird/tasks/cert_service.yml
index f510b9ae65ca52186860c0291d485e3d9091b01a..14444046238bbf2d7a8e8bb5bf8edf47511484b3 100644
--- a/ansible/roles/stack-sunbird/tasks/cert_service.yml
+++ b/ansible/roles/stack-sunbird/tasks/cert_service.yml
@@ -4,5 +4,4 @@
   ignore_errors: yes
 
 - name: Deploy cert service
-  shell: "docker service create --with-registry-auth --replicas {{ cert_replicas }} -p 9011:9000  --name cert-service --hostname cert-service --limit-memory {{ cert_limit_memory }} --limit-cpu {{ cert_limit_cpu }} --health-cmd 'wget -qO- cert-service:9000/service/health || exit 1' --health-timeout 3s --health-retries 3 --network application_default --env-file /home/deployer/env/sunbird_cert-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
-  
\ No newline at end of file
+  shell: "docker service create --with-registry-auth --replicas {{ cert_replicas }} -p 9011:9000  --name cert-service --hostname cert-service --limit-memory {{ cert_limit_memory }} --limit-cpu {{ cert_limit_cpu }} --reserve-memory {{ cert_reservation_memory }} --reserve-cpu {{ cert_reservation_cpu }} --health-cmd 'wget -qO- cert-service:9000/service/health || exit 1' --health-timeout 10s --health-retries 5 --network application_default --env-file /home/deployer/env/sunbird_cert-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
diff --git a/ansible/roles/stack-sunbird/tasks/enc_service.yml b/ansible/roles/stack-sunbird/tasks/enc_service.yml
index 1ca81400142e4a1b3a2e912ff1772c21accd0f12..31e2c16349e7256b11d0d41ae494ed6506503731 100644
--- a/ansible/roles/stack-sunbird/tasks/enc_service.yml
+++ b/ansible/roles/stack-sunbird/tasks/enc_service.yml
@@ -4,4 +4,4 @@
   ignore_errors: yes
 
 - name: Deploy enc service
-  shell: "docker service create --with-registry-auth --replicas {{ enc_replicas }} -p 9010:8013  --name enc-service --hostname enc-service --limit-memory {{ enc_limit_memory }} --limit-cpu {{ enc_limit_cpu }} --health-cmd 'wget -qO- enc-service:8013/service/health || exit 1' --health-timeout 3s --health-retries 3  --network application_default --env-file /home/deployer/env/sunbird_enc-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ enc_replicas }} -p 9010:8013  --name enc-service --hostname enc-service --limit-memory {{ enc_limit_memory }} --limit-cpu {{ enc_limit_cpu }} --reserve-cpu {{ enc_reservation_cpu }} --reserve-memory {{ enc_reservation_memory  }} --health-cmd 'wget -qO- enc-service:8013/service/health || exit 1' --health-timeout 10s --health-retries 5  --network application_default --env-file /home/deployer/env/sunbird_enc-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
diff --git a/ansible/roles/stack-sunbird/tasks/knowledge-mw-service.yml b/ansible/roles/stack-sunbird/tasks/knowledge-mw-service.yml
index 9fd3e8aa3bd6323f1dfeae8068e871e598daffe6..343de7b0f70b8eae8e58b87989637d948c3fcefd 100644
--- a/ansible/roles/stack-sunbird/tasks/knowledge-mw-service.yml
+++ b/ansible/roles/stack-sunbird/tasks/knowledge-mw-service.yml
@@ -4,6 +4,6 @@
   ignore_errors: yes
 
 - name: Deploy knowledge-mw service
-  shell: "docker service create --with-registry-auth --replicas {{ knowledge_mw_service_replicas }} -p 5000:5000  --name knowledge-mw-service --hostname knowledge-mw-service --reserve-memory {{ knowledge_mw_service_reservation_memory }} --limit-memory {{ knowledge_mw_service_limit_memory }} --limit-cpu {{ knowledge_mw_service_limit_cpu }} --reserve-cpu {{ knowledge_mw_service_reservation_cpu }} --health-cmd 'wget -qO- knowledge-mw-service:5000/service/health || exit 1' --health-timeout 3s --health-retries 3  --network application_default --env-file /home/deployer/env/sunbird_knowledge-mw-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ knowledge_mw_service_replicas }} -p 5000:5000  --name knowledge-mw-service --hostname knowledge-mw-service --reserve-memory {{ knowledge_mw_service_reservation_memory }} --limit-memory {{ knowledge_mw_service_limit_memory }} --limit-cpu {{ knowledge_mw_service_limit_cpu }} --reserve-cpu {{ knowledge_mw_service_reservation_cpu }} --health-cmd 'wget -qO- knowledge-mw-service:5000/service/health || exit 1' --health-timeout 10s --health-retries 5  --network application_default --env-file /home/deployer/env/sunbird_knowledge-mw-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
   args:
     chdir: /home/deployer/stack
diff --git a/ansible/roles/stack-sunbird/tasks/learner_service.yml b/ansible/roles/stack-sunbird/tasks/learner_service.yml
index cd76cb5f9fcd499a7aa3b2fe9fa885d6645b3445..3320ee97ddcfa27e92d738b5e5fdda0cea831cd5 100644
--- a/ansible/roles/stack-sunbird/tasks/learner_service.yml
+++ b/ansible/roles/stack-sunbird/tasks/learner_service.yml
@@ -4,6 +4,6 @@
   ignore_errors: yes
 
 - name: Deploy learner service
-  shell: "docker service create --with-registry-auth --replicas {{ learner_replicas }} -p 9000:9000  --name learner-service --hostname learner-service --reserve-memory {{ learner_reservation_memory }} --limit-memory {{ learner_limit_memory }} --limit-cpu {{ learner_limit_cpu }} --reserve-cpu {{ learner_reservation_cpu }} --health-cmd 'wget -qO- learner-service:9000/service/health || exit 1' --health-timeout 3s --health-retries 3  --network application_default --env JAVA_OPTIONS={{ learner_java_mem_limit }} --env-file /home/deployer/env/sunbird_learner-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ learner_replicas }} -p 9000:9000  --name learner-service --hostname learner-service --reserve-memory {{ learner_reservation_memory }} --limit-memory {{ learner_limit_memory }} --limit-cpu {{ learner_limit_cpu }} --reserve-cpu {{ learner_reservation_cpu }} --health-cmd 'wget -qO- learner-service:9000/service/health || exit 1' --health-timeout 10s --health-retries 5  --network application_default --env JAVA_OPTIONS={{ learner_java_mem_limit }} --env-file /home/deployer/env/sunbird_learner-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
   args:
     chdir: /home/deployer/stack
diff --git a/ansible/roles/stack-sunbird/tasks/lms_service.yml b/ansible/roles/stack-sunbird/tasks/lms_service.yml
index 17cce16c0f1a8c3ddd3cb4489001d9710702e6a0..1be9bba680854e9aa58956b8b36ea2cfb1da51b9 100644
--- a/ansible/roles/stack-sunbird/tasks/lms_service.yml
+++ b/ansible/roles/stack-sunbird/tasks/lms_service.yml
@@ -4,6 +4,6 @@
   ignore_errors: yes
 
 - name: Deploy lms service
-  shell: "docker service create --with-registry-auth --replicas {{ lms_service_replicas }} -p 9005:9000  --name lms-service --hostname lms-service --reserve-memory {{ lms_service_reservation_memory }} --limit-memory {{ lms_service_limit_memory }} --limit-cpu {{ lms_service_limit_cpu }} --reserve-cpu {{ lms_service_reservation_cpu }} --health-cmd 'wget -qO- lms-service:9000/service/health || exit 1' --health-timeout 3s --health-retries 3  --network application_default --env JAVA_OPTIONS={{ lms_java_mem_limit }} --env-file /home/deployer/env/sunbird_lms-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ lms_service_replicas }} -p 9005:9000  --name lms-service --hostname lms-service --reserve-memory {{ lms_service_reservation_memory }} --limit-memory {{ lms_service_limit_memory }} --limit-cpu {{ lms_service_limit_cpu }} --reserve-cpu {{ lms_service_reservation_cpu }} --health-cmd 'wget -qO- lms-service:9000/service/health || exit 1' --health-timeout 10s --health-retries 5  --network application_default --env JAVA_OPTIONS={{ lms_java_mem_limit }} --env-file /home/deployer/env/sunbird_lms-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
   args:
     chdir: /home/deployer/stack
diff --git a/ansible/roles/stack-sunbird/tasks/notification_service.yml b/ansible/roles/stack-sunbird/tasks/notification_service.yml
index 22f3b42d5965fad26501874b6ea4f3afef469c75..fe69e6e41c8247bcf230dcc5af95caad957266af 100644
--- a/ansible/roles/stack-sunbird/tasks/notification_service.yml
+++ b/ansible/roles/stack-sunbird/tasks/notification_service.yml
@@ -4,4 +4,4 @@
   ignore_errors: yes
 
 - name: Deploy notification service
-  shell: "docker service create --with-registry-auth --replicas {{ notification_replicas }} -p 9012:9000  --name notification-service --hostname notification-service --limit-memory {{ notification_limit_memory }} --limit-cpu {{ notification_limit_cpu }} --network application_default --env-file /home/deployer/env/sunbird_notification-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ notification_replicas }} -p 9012:9000  --name notification-service --hostname notification-service --reserve-cpu {{ notification_reserve_cpu }} --reserve-memory {{ notification_reserve_memory }}  --limit-memory {{ notification_limit_memory }} --limit-cpu {{ notification_limit_cpu }} --network application_default --env-file /home/deployer/env/sunbird_notification-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
diff --git a/ansible/roles/stack-sunbird/tasks/print-service.yml b/ansible/roles/stack-sunbird/tasks/print-service.yml
index 55f0cedb64fcce32e3c5049bc472f2ccbe4c3ade..2434863e51a2b42b0c71dc5176a7e326227b66f9 100644
--- a/ansible/roles/stack-sunbird/tasks/print-service.yml
+++ b/ansible/roles/stack-sunbird/tasks/print-service.yml
@@ -4,6 +4,6 @@
   ignore_errors: yes
 
 - name: Deploy print service
-  shell: "docker service create --with-registry-auth --replicas {{ print_service_replicas }} -p 5001:5000  --name print-service --hostname print-service --reserve-memory {{ print_service_reservation_memory }} --limit-memory {{ print_service_limit_memory }} --limit-cpu {{ print_service_limit_cpu }} --reserve-cpu {{ print_service_reservation_cpu }} --health-cmd 'wget -qO- print-service:5000/health || exit 1' --health-timeout 3s --health-retries 3  --network application_default --env-file /home/deployer/env/sunbird_print-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ print_service_replicas }} -p 5001:5000  --name print-service --hostname print-service --reserve-memory {{ print_service_reservation_memory }} --limit-memory {{ print_service_limit_memory }} --limit-cpu {{ print_service_limit_cpu }} --reserve-cpu {{ print_service_reservation_cpu }} --health-cmd 'wget -qO- print-service:5000/health || exit 1' --health-timeout 10s --health-retries 5  --network application_default --env-file /home/deployer/env/sunbird_print-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
   args:
     chdir: /home/deployer/stack
diff --git a/ansible/roles/stack-sunbird/tasks/report-service.yml b/ansible/roles/stack-sunbird/tasks/report-service.yml
index 83762eb8b589f7c8efa8c8ba2a206019d9bf2da7..bd4397ae71ef218fb4e2590a01276ff7e75148ca 100644
--- a/ansible/roles/stack-sunbird/tasks/report-service.yml
+++ b/ansible/roles/stack-sunbird/tasks/report-service.yml
@@ -4,6 +4,6 @@
   ignore_errors: yes
 
 - name: Deploy report service
-  shell: "docker service create --replicas {{ report_replicas }} -p 3030:3030  --name report-service --hostname report-service --reserve-memory {{ report_reservation_memory }} --limit-memory {{ report_limit_memory }} --limit-cpu {{ report_limit_cpu }}  --network application_default --env-file /home/deployer/env/sunbird_report-service.env  --with-registry-auth {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --replicas {{ report_replicas }} -p 3030:3030  --name report-service --hostname report-service --reserve-memory {{ report_reservation_memory }} --reserve-cpu {{ report_reservation_cpu }} --limit-memory {{ report_limit_memory }} --limit-cpu {{ report_limit_cpu }}  --network application_default --env-file /home/deployer/env/sunbird_report-service.env  --with-registry-auth {{hub_org}}/{{image_name}}:{{image_tag}}"
   args:
     chdir: /home/deployer/stack
diff --git a/ansible/roles/stack-sunbird/tasks/telemetry_service.yml b/ansible/roles/stack-sunbird/tasks/telemetry_service.yml
index 51c16d9d17d209c07a026507f395e64a077bd844..073b85f9457a9c84366db5783a788c6b759bbe20 100644
--- a/ansible/roles/stack-sunbird/tasks/telemetry_service.yml
+++ b/ansible/roles/stack-sunbird/tasks/telemetry_service.yml
@@ -4,6 +4,6 @@
   ignore_errors: yes
 
 - name: Deploy telemetry service
-  shell: "docker service create --replicas {{ telemetry_replicas }} -p 9001:9001  --name telemetry-service --hostname telemetry-service --reserve-memory {{ telemetry_reservation_memory }} --limit-memory {{ telemetry_limit_memory }} --limit-cpu {{ telemetry_limit_cpu }}  --network application_default --env-file /home/deployer/env/sunbird_telemetry-service.env  --with-registry-auth {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --replicas {{ telemetry_replicas }} -p 9001:9001  --name telemetry-service --hostname telemetry-service --reserve-cpu {{ telemetry_reservation_cpu  }} --reserve-memory {{ telemetry_reservation_memory }} --limit-memory {{ telemetry_limit_memory }} --limit-cpu {{ telemetry_limit_cpu }}  --network application_default --env-file /home/deployer/env/sunbird_telemetry-service.env  --with-registry-auth {{hub_org}}/{{image_name}}:{{image_tag}}"
   args:
     chdir: /home/deployer/stack
diff --git a/ansible/roles/stack-sunbird/tasks/user_org_service.yml b/ansible/roles/stack-sunbird/tasks/user_org_service.yml
index 24ab18ebe902a6a6947ec5cc0d6a1b198a52278f..52accff451c3334c4212724fb5625ff65d7efa71 100644
--- a/ansible/roles/stack-sunbird/tasks/user_org_service.yml
+++ b/ansible/roles/stack-sunbird/tasks/user_org_service.yml
@@ -4,4 +4,4 @@
   ignore_errors: yes
 
 - name: Deploy user org service
-  shell: "docker service create --with-registry-auth --replicas {{ user_org_replicas }} -p 9009:9000  --name user-org-service --hostname user-org-service --limit-memory {{ user_org_limit_memory }} --limit-cpu {{ user_org_limit_cpu }}  --health-cmd 'wget -qO- user-org-service:9000/service/health || exit 1' --health-timeout 3s --health-retries 3 --network application_default --env-file /home/deployer/env/sunbird_user-org-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
+  shell: "docker service create --with-registry-auth --replicas {{ user_org_replicas }} -p 9009:9000  --name user-org-service --hostname user-org-service --limit-memory {{ user_org_limit_memory }} --limit-cpu {{ user_org_limit_cpu }}  --health-cmd 'wget -qO- user-org-service:9000/service/health || exit 1' --health-timeout 10s --health-retries 5 --network application_default --env-file /home/deployer/env/sunbird_user-org-service.env  {{hub_org}}/{{image_name}}:{{image_tag}}"
diff --git a/ansible/roles/stack-sunbird/templates/stack_content_service.yml b/ansible/roles/stack-sunbird/templates/stack_content_service.yml
deleted file mode 100644
index dff2a9c348249431dfc395c50f7667e99f85e92c..0000000000000000000000000000000000000000
--- a/ansible/roles/stack-sunbird/templates/stack_content_service.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-version: '3.1'
-
-services:
-  content_service:
-    image: "{{hub_org}}/{{image_name}}:{{image_tag}}"
-    deploy:
-      replicas: {{ content_replicas | default(1) }}
-      resources:
-        reservations:
-          memory: "{{ content_reservation_memory  | default('64M') }}"
-        limits:
-          memory: "{{ content_limit_memory | default('256M') }}"
-    healthcheck:
-      test: curl -f http://localhost:5000/health || exit 1
-      interval: 10s
-      timeout: 5s
-      retries: 5
-    env_file:
-      /home/deployer/env/sunbird_content_service.env
-    ports:
-     - "5000:5000"
-    networks:
-      - application_default
-
-networks:
-  application_default:
-    external: true
diff --git a/ansible/roles/stack-sunbird/templates/stack_player.yml b/ansible/roles/stack-sunbird/templates/stack_player.yml
index 5a091234fada689ebbc04dd946909638e96cf792..02722c0e4b491b0ebc7fb3fa4d8b137389f4c710 100644
--- a/ansible/roles/stack-sunbird/templates/stack_player.yml
+++ b/ansible/roles/stack-sunbird/templates/stack_player.yml
@@ -4,13 +4,14 @@ services:
   player:
     image: "{{hub_org}}/{{image_name}}:{{image_tag}}"
     deploy:
-      replicas: {{ player_replicas | default(1) }}
+      replicas: {{ player_replicas }}
       resources:
         reservations:
-          memory: "{{ player_reservation_memory  | default('64M') }}"
+          memory: "{{ player_reservation_memory }}"
+          cpus: "{{ player_reserve_cpu }}"
         limits:
-          memory: "{{ player_limit_memory  | default('256M') }}"
-          cpus: "{{ player_limit_cpu  | default('1') }}"
+          memory: "{{ player_limit_memory }}"
+          cpus: "{{ player_limit_cpu }}"
     volumes:
      - "{% if player_tenant_dir is defined and player_tenant_dir %}{{ player_tenant_dir }}:/home/sunbird/dist/tenant{% else %}/home/sunbird/dist/tenant{% endif %}"
     env_file:
diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml
index 2bdd0800006ac7dbd6ea5babcd12464f7ee8f9d3..d4857120e21070c69de7005d13fdc9ab370ed5b0 100644
--- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml
+++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml
@@ -55,3 +55,23 @@ adminutils_liveness_readiness:
     periodSeconds: 30
     timeoutSeconds: 10
     failureThreshold: 5
+
+api_managerecho_liveness_readiness:
+  healthcheck: true
+  readinessProbe:
+    httpGet:
+      path: /hello
+      port: 9595
+    initialDelaySeconds: 30
+    periodSeconds: 10
+    timeoutSeconds: 5
+    failureThreshold: 5
+    successThreshold: 1
+  livenessProbe:
+    httpGet:
+      path: /hello
+      port: 9595
+    initialDelaySeconds: 60
+    periodSeconds: 10
+    timeoutSeconds: 10
+    failureThreshold: 5
diff --git a/kubernetes/ansible/roles/sunbird-monitoring/tasks/main.yml b/kubernetes/ansible/roles/sunbird-monitoring/tasks/main.yml
index 6957ab31e341aeb90db18e58ba2f8cf70c92d6bc..83172fb9079514d8474665579dde5fae0ef06b85 100644
--- a/kubernetes/ansible/roles/sunbird-monitoring/tasks/main.yml
+++ b/kubernetes/ansible/roles/sunbird-monitoring/tasks/main.yml
@@ -16,6 +16,6 @@
     - dashboards
 
 - name: Install statsd-exporter
-  shell: "helm upgrade --install --force --cleanup-on-fail statsd-exporter {{chart_path}}/statsd-exporter --namespace {{ namespace }} -f /tmp/statsd-exporter.yaml"
+  shell: "helm upgrade --install --force --cleanup-on-fail statsd-exporter {{chart_path}}/statsd-exporter --namespace {{ namespace }}"
   tags:
     - statsd-exporter
diff --git a/kubernetes/ansible/roles/sunbird-monitoring/templates/statsd-exporter.yaml b/kubernetes/ansible/roles/sunbird-monitoring/templates/statsd-exporter.yaml
deleted file mode 100644
index 5e89387163ef5d4343c44f0576edf14c3d06a397..0000000000000000000000000000000000000000
--- a/kubernetes/ansible/roles/sunbird-monitoring/templates/statsd-exporter.yaml
+++ /dev/null
@@ -1 +0,0 @@
-namespace: {{ namespace }}
diff --git a/kubernetes/helm_charts/core/apimanagerecho/templates/apimanager_echo.yaml b/kubernetes/helm_charts/core/apimanagerecho/templates/deployment.yaml
similarity index 59%
rename from kubernetes/helm_charts/core/apimanagerecho/templates/apimanager_echo.yaml
rename to kubernetes/helm_charts/core/apimanagerecho/templates/deployment.yaml
index a25d987a5c32c47d8bd919d0477ffa72e9717ed7..b6bc5903c6f131401a6c39330b26eccc6c0c5783 100644
--- a/kubernetes/helm_charts/core/apimanagerecho/templates/apimanager_echo.yaml
+++ b/kubernetes/helm_charts/core/apimanagerecho/templates/deployment.yaml
@@ -4,17 +4,17 @@ kind: Deployment
 metadata:
   name: {{ .Chart.Name }}
   namespace: {{ .Values.namespace }}
+  annotations:
+    reloader.stakater.com/auto: "true"
 spec:
-  replicas: {{ .Values.apimanagerecho.replicaCount }}
+  replicas: {{ .Values.replicaCount }}
   strategy:
-     type: {{ .Values.apimanagerecho.strategy.type }}
+     type: {{ .Values.strategy.type }}
   selector:
     matchLabels:
       app: {{ .Chart.Name }}
   template:
     metadata:
-      annotations:
-        readiness.status.sidecar.istio.io/applicationPorts: ""
       labels:
         app: {{ .Chart.Name }}
     spec:
@@ -24,11 +24,17 @@ spec:
 {{- end }}
       containers:
       - name: {{ .Chart.Name }}
-        image: "{{ .Values.dockerhub }}/{{ .Values.apimanagerecho.repository }}:{{ .Values.apimanagerecho.image_tag }}"
+        image: "{{ .Values.dockerhub }}/{{ .Values.repository }}:{{ .Values.image_tag }}"
         resources:
 {{ toYaml .Values.resources | indent 10 }}
         ports:
-        - containerPort: {{ .Values.apimanagerecho.network.port }}
+        - containerPort: {{ .Values.network.port }}
+        {{- if .Values.healthcheck }}
+        livenessProbe:
+{{ toYaml .Values.livenessProbe | indent 10 }}
+        readinessProbe:
+{{ toYaml .Values.readinessProbe | indent 10 }}
+        {{- end }}
 
 ---
 apiVersion: v1
@@ -42,6 +48,6 @@ spec:
   ports:
   - name: http-{{ .Chart.Name }}
     protocol: TCP
-    port: {{ .Values.apimanagerecho.network.targetport }}
+    port: {{ .Values.network.targetport }}
   selector:
     app: {{ .Chart.Name }}
diff --git a/kubernetes/helm_charts/core/apimanagerecho/values.j2 b/kubernetes/helm_charts/core/apimanagerecho/values.j2
index 8fa19a3ddcf26b106610a030175a4e89c3d7bc58..7e1953927f10960974cd9db9627e0b6ade683a30 100755
--- a/kubernetes/helm_charts/core/apimanagerecho/values.j2
+++ b/kubernetes/helm_charts/core/apimanagerecho/values.j2
@@ -4,19 +4,20 @@ namespace: {{ namespace }}
 imagepullsecrets: {{ imagepullsecrets }}
 dockerhub: {{ dockerhub }}
 
-apimanagerecho:
-  replicaCount: {{apimanagerecho_replicacount|default(1)}}
-  repository: {{apimanagerecho_repository|default('echo-server')}}
-  image_tag: 1.5.0-gold
-  resources:
+replicaCount: {{apimanagerecho_replicacount|default(1)}}
+repository: {{apimanagerecho_repository|default('echo-server')}}
+image_tag: 1.5.0-gold
+resources:
   requests:
     cpu: {{api_managerecho_cpu_req|default('50m')}}
     memory: {{api_managerecho_mem_req|default('50Mi')}}
   limits:
     cpu: {{api_managerecho_cpu_limit|default('500m')}}
-    memory: {{api_managerecho_memory_limit|default('500Mi')}}  
-  network:
-    port: 9595
-    targetport: 9595
-  strategy:
-    type: RollingUpdate
+    memory: {{api_managerecho_mem_limit|default('500Mi')}}  
+network:
+  port: 9595
+  targetport: 9595
+strategy:
+  type: RollingUpdate
+
+{{ api_managerecho_liveness_readiness | to_nice_yaml }}
diff --git a/kubernetes/helm_charts/core/nginx-private-ingress/templates/configmap.yaml b/kubernetes/helm_charts/core/nginx-private-ingress/templates/configmap.yaml
index d5754d7f5bc598d1baeb62be85013798cb6c5c37..5d0455ec91bc1b144ff47908d4be0cc1f40749d9 100644
--- a/kubernetes/helm_charts/core/nginx-private-ingress/templates/configmap.yaml
+++ b/kubernetes/helm_charts/core/nginx-private-ingress/templates/configmap.yaml
@@ -86,7 +86,6 @@ data:
         rewrite ^/report/(.*) /$1 break;
         proxy_pass http://report-service:3030;
       }
-     }
     }
 kind: ConfigMap
 metadata:
diff --git a/kubernetes/helm_charts/core/nginx-private-ingress/templates/deployment.yaml b/kubernetes/helm_charts/core/nginx-private-ingress/templates/deployment.yaml
index 6fab3efa8c71419f8d012c09b13625e5350252d0..6b121a6f2e06981e9b292e2e1d2511ab696db362 100644
--- a/kubernetes/helm_charts/core/nginx-private-ingress/templates/deployment.yaml
+++ b/kubernetes/helm_charts/core/nginx-private-ingress/templates/deployment.yaml
@@ -4,7 +4,7 @@ metadata:
   name: nginx-private-ingress
   namespace: {{ .Values.namespace }}
 spec:
-  replicas: 1
+  replicas: {{ .Values.replicaCount }}
   selector:
     matchLabels:
       app: nginx-private-ingress
diff --git a/kubernetes/helm_charts/core/nginx-private-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-private-ingress/values.j2
index 1b9071778e48e1bdcb36148c1b62e5043e191da8..628a79582619087f787ffd5f664007e0d40cbeea 100644
--- a/kubernetes/helm_charts/core/nginx-private-ingress/values.j2
+++ b/kubernetes/helm_charts/core/nginx-private-ingress/values.j2
@@ -3,6 +3,7 @@ imagepullsecrets: {{ imagepullsecrets }}
 dockerhub: {{ dockerhub }}
 repository: {{proxy_repository|default('proxy')}}
 image_tag: {{ image_tag }}
+replicaCount: {{nginx_private_ingress_replicacount|default(1)}}
 nginx_private_ingress_type: {{ nginx_private_ingress_type | default('LoadBalancer') }}
 private_ingress_custom_annotations: {{ private_ingress_custom_annotations | d('false') | lower }}
 kube_dns_ip: {{kube_dns_ip}}
@@ -11,3 +12,10 @@ private_ingress_annotation:
 {% if nginx_private_ingress_ip is defined %}
 nginx_private_ingress_ip: {{ nginx_private_ingress_ip }}
 {% endif %}
+resources:
+  requests:
+    cpu: {{nginx_private_ingress_cpu_req | default('100m')}}
+    memory: {{nginx_private_ingress_mem_req | default('100Mi')}}
+  limits:
+    cpu: {{nginx_private_ingress_cpu_limit | default('500m')}}
+    memory: {{nginx_private_ingress_mem_limit | default('500Mi')}}
diff --git a/kubernetes/helm_charts/core/telemetrydplogstash/values.j2 b/kubernetes/helm_charts/core/telemetrydplogstash/values.j2
new file mode 100644
index 0000000000000000000000000000000000000000..6a611c5efe64474420ea6de82ede10429614a2b1
--- /dev/null
+++ b/kubernetes/helm_charts/core/telemetrydplogstash/values.j2
@@ -0,0 +1,23 @@
+### Default variable file for cert-service ###
+
+namespace: {{ namespace }}
+imagepullsecrets: {{ imagepullsecrets }}
+dockerhub: sunbird
+
+replicaCount: {{telemetry_logstash_replicacount|default(1)}}
+repository: 'telemetry_logstash'
+image_tag: 5.6-alpine
+resources:
+  requests:
+    cpu: {{telemetry_logstash_cpu_req|default('100m')}}
+    memory: {{telemetry_logstash_mem_req|default('100Mi')}}
+  limits:
+    cpu: {{telemetry_logstash_cpu_limit|default('1')}}
+    memory: {{telemetry_logstash_mem_limit|default('1024Mi')}}
+network:
+  port: 5044
+  targetport: 5044
+strategy:
+  type: RollingUpdate
+  maxsurge: 1 
+  maxunavailable: 1