diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000000000000000000000000000000000000..e18365b30a2fa0acc075a4f76bee22f894e3778a
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "images/proxy/nginx-lua-prometheus"]
+	path = images/proxy/nginx-lua-prometheus
+	url = https://github.com/knyar/nginx-lua-prometheus.git
diff --git a/images/proxy/Dockerfile b/images/proxy/Dockerfile
index e2c441b0e31a244ba75ae42a2a0f0f100abce1ee..da74f9731c8a70e17d09948aa5d8d85c306131dc 100644
--- a/images/proxy/Dockerfile
+++ b/images/proxy/Dockerfile
@@ -157,7 +157,7 @@ RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
     && ln -sf /dev/stderr /var/log/nginx/error.log
 
 COPY nginx.conf /etc/nginx/nginx.conf
-copy prometheus.lua /etc/nginx/lua_modules/
+copy nginx-lua-prometheus/*.lua /etc/nginx/lua_modules/
 
 EXPOSE 80
 
diff --git a/images/proxy/Jenkinsfile b/images/proxy/Jenkinsfile
index 6e42f8551c3fbe5ec678ccd4e06d13f2d4529d5d..8b2839365013ecde8628f485f9386ebad7f87e43 100644
--- a/images/proxy/Jenkinsfile
+++ b/images/proxy/Jenkinsfile
@@ -17,6 +17,7 @@ node('build-slave') {
             cleanWs()
             if (params.github_release_tag == "") {
                 checkout scm
+                sh("git submodule update --init --recursive")
                 commit_hash = sh(script: 'git rev-parse --short HEAD', returnStdout: true).trim()
                 branch_name = sh(script: 'git name-rev --name-only HEAD | rev | cut -d "/" -f1| rev', returnStdout: true).trim()
                 build_tag = branch_name + "_" + commit_hash + "_" + env.BUILD_NUMBER
@@ -24,6 +25,7 @@ node('build-slave') {
             } else {
                 def scmVars = checkout scm
                 checkout scm: [$class: 'GitSCM', branches: [[name: "refs/tags/$params.github_release_tag"]], userRemoteConfigs: [[url: scmVars.GIT_URL]]]
+                sh("git submodule update --init --recursive")
                 build_tag = params.github_release_tag + "_" +  env.BUILD_NUMBER
                 println(ANSI_BOLD + ANSI_YELLOW + "Tag specified, building from tag: " + params.github_release_tag + ANSI_NORMAL)
             }
diff --git a/images/proxy/nginx-lua-prometheus b/images/proxy/nginx-lua-prometheus
new file mode 160000
index 0000000000000000000000000000000000000000..71ac9082637ff1269f195e4f4f5e61836782b8d0
--- /dev/null
+++ b/images/proxy/nginx-lua-prometheus
@@ -0,0 +1 @@
+Subproject commit 71ac9082637ff1269f195e4f4f5e61836782b8d0
diff --git a/images/proxy/nginx.conf b/images/proxy/nginx.conf
index bd1f1af3de28e3e1ea22cc807136f1d0dc2a4a64..fcf5621b7130375d1583265cfb2c021ecb3fa703 100644
--- a/images/proxy/nginx.conf
+++ b/images/proxy/nginx.conf
@@ -23,24 +23,36 @@ http {
     access_log  /var/log/nginx/access.log  main;
 
     # Shared dictionary to store metrics
-    lua_shared_dict prometheus_metrics 10M;
+    lua_shared_dict prometheus_metrics 100M;
     lua_package_path "/etc/nginx/lua_modules/?.lua";
+
+    # Defining upstream cache status for nginx metrics
+    map $upstream_cache_status $cache_status {
+      default  $upstream_cache_status;
+      ''       "NONE";
+    }
+
     # Defining metrics
-    init_by_lua '
+    init_worker_by_lua_block {
       prometheus = require("prometheus").init("prometheus_metrics")
       metric_requests = prometheus:counter(
-        "nginx_http_requests_total", "Number of HTTP requests", {"host", "status", "request_method"})
+          "nginx_http_requests_total", "Number of HTTP requests", {"host", "status", "request_method", "cache_status"})
       metric_latency = prometheus:histogram(
         "nginx_http_request_duration_seconds", "HTTP request latency", {"host"})
       metric_connections = prometheus:gauge(
         "nginx_http_connections", "Number of HTTP connections", {"state"})
-    ';
-
-    # Collecting metrics
-    log_by_lua '
-      metric_requests:inc(1, {ngx.var.server_name, ngx.var.status, ngx.var.request_method})
+    }
+    log_by_lua_block {
+        metric_requests:inc(1, {ngx.var.server_name, ngx.var.status, ngx.var.request_method, ngx.var.cache_status })
       metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.server_name})
-    ';
+    }
+
+    # local caching for images and files
+    proxy_cache_path /tmp/proxy_cache levels=1:2 keys_zone=proxy_cache:5m max_size=10m inactive=60m use_temp_path=off;
+
+    # cache framework
+    proxy_cache_path /tmp/framework_cache levels=1:2 keys_zone=framework_cache:5m max_size=700m inactive=60m use_temp_path=off;
+
 
     header_filter_by_lua_block {
      ngx.header["server"] = nil
@@ -60,12 +72,12 @@ http {
    server {
      listen 9145;
      location /metrics {
-       content_by_lua '
-         metric_connections:set(ngx.var.connections_reading, {"reading"})
-         metric_connections:set(ngx.var.connections_waiting, {"waiting"})
-         metric_connections:set(ngx.var.connections_writing, {"writing"})
-         prometheus:collect()
-       ';
+       content_by_lua_block {
+          metric_connections:set(ngx.var.connections_reading, {"reading"})
+          metric_connections:set(ngx.var.connections_waiting, {"waiting"})
+          metric_connections:set(ngx.var.connections_writing, {"writing"})
+          prometheus:collect()
+        }
      }
    }
 }
diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2
index 6defc4f523228ce98f5e0e4675b9989c2ac848ca..867ac7c2e6528c27cefc0f1a1f4f98f3854499a1 100644
--- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2
+++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2
@@ -691,24 +691,29 @@ nginxconfig: |
       access_log  /var/log/nginx/access.log  main;
 
       # Shared dictionary to store metrics
-      lua_shared_dict prometheus_metrics 10M;
+      lua_shared_dict prometheus_metrics 100M;
       lua_package_path "/etc/nginx/lua_modules/?.lua";
+
+      # Defining upstream cache status for nginx metrics
+      map $upstream_cache_status $cache_status {
+        default  $upstream_cache_status;
+        ''       "NONE";
+      }
+
       # Defining metrics
-      init_by_lua '
+      init_worker_by_lua_block {
         prometheus = require("prometheus").init("prometheus_metrics")
         metric_requests = prometheus:counter(
-          "nginx_http_requests_total", "Number of HTTP requests", {"host", "status", "request_method"})
+          "nginx_http_requests_total", "Number of HTTP requests", {"host", "status", "request_method", "cache_status"})
         metric_latency = prometheus:histogram(
           "nginx_http_request_duration_seconds", "HTTP request latency", {"host"})
         metric_connections = prometheus:gauge(
           "nginx_http_connections", "Number of HTTP connections", {"state"})
-      ';
-
-      # Collecting metrics
-      log_by_lua '
-        metric_requests:inc(1, {ngx.var.server_name, ngx.var.status, ngx.var.request_method})
+      }
+      log_by_lua_block {
+        metric_requests:inc(1, {ngx.var.server_name, ngx.var.status, ngx.var.request_method, ngx.var.cache_status })
         metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.server_name})
-      ';
+      }
 
       header_filter_by_lua_block {
        ngx.header["server"] = nil
@@ -752,12 +757,12 @@ nginxconfig: |
      server {
        listen 9145;
        location /metrics {
-         content_by_lua '
-           metric_connections:set(ngx.var.connections_reading, {"reading"})
-           metric_connections:set(ngx.var.connections_waiting, {"waiting"})
-           metric_connections:set(ngx.var.connections_writing, {"writing"})
-           prometheus:collect()
-         ';
+         content_by_lua_block {
+            metric_connections:set(ngx.var.connections_reading, {"reading"})
+            metric_connections:set(ngx.var.connections_waiting, {"waiting"})
+            metric_connections:set(ngx.var.connections_writing, {"writing"})
+            prometheus:collect()
+          }
        }
      }
   }