diff --git a/README.md b/README.md
index c65a4d520c25fdb3949cf9c43e4ae011afa5d465..e4818bc843172b631af77749c1c9b596e18ac00d 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,11 @@
 
 
 
-# python-crawler-quickstart
 
-Python based Web crawler Quick Start Project. 
+# python-webscraping-quickstart
+
+Python based Web-scraping Quick Start Project. 
+
 
 For Scraping the project uses Selenium & Scrapy framework.
 
@@ -35,7 +37,9 @@ python app.py
 Successful local deployment should show Server is up on port 5001.
 ## Documentation
 
-For Scripting and configuration documentation, refer `./docs` folder
+
+For Scripting and configuration documentation, refer [Documentation](docs/README.md). 
+
 
 ## API Reference
 
@@ -69,6 +73,19 @@ _The following are mandatory Request Body Parameters_
 | :-------- | :------- | :-------------------------------- |
 | `JobId`   | `string` | `(required) uuid of a job`        |
 
+
+### API Authorization
+
+Currently the projects uses basic aurthorization for authentication.
+
+Set the following environment_variable:
+| Variables             | Type     | Description                        |
+| :--------             | :------- | :--------------------------------  |
+| `BASIC_HTTP_USERNAME` | `string` |  username for server               |
+| `BASIC_HTTP_PASSWORD` | `string` |  password for server               |
+
+
+
 ## Authors
 
 - [@dileep-gadiraju](https://github.com/dileep-gadiraju)
diff --git a/deploy/dev.env b/deploy/dev.env
new file mode 100644
index 0000000000000000000000000000000000000000..887c058d0e19d668ca9f5047f8a1e92334560b45
--- /dev/null
+++ b/deploy/dev.env
@@ -0,0 +1,7 @@
+BASIC_HTTP_USERNAME=test
+BASIC_HTTP_PASSWORD=generic@123#
+ELASTIC_DB_URL=https://localhost:9200
+BLOB_SAS_TOKEN=XXXXX
+BLOB_ACCOUNT_URL=YYYYY
+BLOB_CONTAINER_NAME=ZZZZZ
+MAX_RUNNING_JOBS=4
diff --git a/deploy/web-scraping.yml b/deploy/web-scraping.yml
new file mode 100644
index 0000000000000000000000000000000000000000..664dfd0ae9d7271ba3e60a66837ad76c6934aa6f
--- /dev/null
+++ b/deploy/web-scraping.yml
@@ -0,0 +1,22 @@
+version: '3.7'
+services:
+  web-scraping-project:
+    deploy:
+      replicas: 1
+      update_config:
+        parallelism: 3
+        delay: 10s
+      restart_policy:
+        condition: on-failure
+    ports:
+      - "5001:5001"
+    env_file:
+    - ./dev.env
+
+    networks:
+      - frontend
+
+networks:
+  frontend:
+    driver: overlay
+    external: true
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..4ad44d54d3e53c4258a4c9dfe542a34c8db5b0b7
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,15 @@
+# Configuration README
+
+[Configure config.py](config.md)
+
+[Configure agents](agents.md)
+
+[Configure azure](azure.md)
+
+[Configure Environment Variables](env-variables.md)
+
+[Configure ElasticSearch Log](eslog.md)
+
+[Configure scripts.py](scripts.md)
+
+[docker deployment](docker.md)
\ No newline at end of file
diff --git a/docs/agents.md b/docs/agents.md
new file mode 100644
index 0000000000000000000000000000000000000000..05a1bf77e712254205e2c56734db6be149a2d8de
--- /dev/null
+++ b/docs/agents.md
@@ -0,0 +1,48 @@
+
+# Agent Configurations
+
+To include new agents, add the agent_data to `/static/agents.json`
+
+format: 
+
+```
+    {
+        "agentId": "MY-AGENT-1",
+        "description": "Crawler For my_agent_1",
+        "provider": "AGENT-PROVIDER-X",
+        "URL": "https://www.my-agent.com",
+        "scripts": {
+            "scriptType1": "myAgentScript1",
+            "scriptType2": "myAgentScript2",
+            "scriptType3": "myAgentScript3",
+            ...
+        }
+    }
+```
+
+example: 
+
+```
+    [
+        {
+            "agentId": "APPLIED-SELENIUM",
+            "description": "Crawler For Applied",
+            "provider": "Applied",
+            "URL": "https://www.applied.com",
+            "scripts": {
+                "info": "AppliedSelenium",
+                "pdf": "AppliedSelenium"
+            }
+        },
+        {
+            "agentId": "GRAINGER-SELENIUM",
+            "description": "Crawler For Grainger",
+            "provider": "Grainger",
+            "URL": "https://www.grainger.com",
+            "scripts": {
+                "info": "GraingerSelenium",
+                "pdf": "GraingerSelenium"
+            }
+        }
+    ]
+```
\ No newline at end of file
diff --git a/docs/azure.md b/docs/azure.md
new file mode 100644
index 0000000000000000000000000000000000000000..e41b360044ef28ff7103c9402d73f9d7ecd4af60
--- /dev/null
+++ b/docs/azure.md
@@ -0,0 +1,31 @@
+# Azure
+
+1. Initialize BlobStorage object.
+```
+blob_storage = BlobStorage()
+```
+
+2. Set the folder for storage.
+```
+blob_storage.set_agent_folder(folder_name)
+```
+arguments:
+    
+* folder_name : Name of the folder.
+
+
+3. Upload it to BlobStorage.
+
+```
+b_status, b_str = blob_storage.upload_file(file_name, data, overwrite)
+```
+arguments:
+    
+* file_name : Name of the file.
+* data : data to be uploaded.
+* overwrite : (boolean), flag for overwriting the data to BlobStorage.
+
+return:
+
+* b_status : (boolean), if the data has uploaded or not.
+* b_str : Exception if the data is not uploaded.
diff --git a/docs/config.md b/docs/config.md
new file mode 100644
index 0000000000000000000000000000000000000000..e954a4dc9ab291ee252ad9c601076743c9667556
--- /dev/null
+++ b/docs/config.md
@@ -0,0 +1,59 @@
+
+# Configure config.py
+
+* [Server configuration](#Server-configuration)
+* [Agent configuration](#Agent-configuration)
+* [AzureBlob configuration](#AzureBlob-configuration)
+* [ElasticSearch variables](#ElasticSearch-DB-variables)
+* [Logging configuration](#Logging-configuration)
+
+## Server configuration
+
+| Variables             | Type      | Description                       |
+| :--------             | :-------  | :-------------------------        |
+| `SERVER_HOST`         | `string`  |  host for Server                  |
+| `SERVER_PORT`         | `string`  |  port for Server                  |
+| `SERVER_DEBUG`        | `bool`    |  debugging for Server             |
+| `SERVER_CORS`         | `bool`    |  CORS policy for Server           |
+| `SERVER_STATIC_PATH`  | `string`  |  static folder path for Server    |
+| `API_URL_PREFIX`      | `string`  |  url prefix for Server            |
+| `API_MANDATORY_PARAMS`| `list`    |  mandatory parameters for request |
+| `BASIC_HTTP_USERNAME` | `string`  |  username to access Server        |
+| `BASIC_HTTP_PASSWORD` | `string`  |  password to access Server        |
+
+## Agent configuration
+
+| Variables              | Type      | Description                                     |
+| :--------              | :-------  | :-------------------------                      |
+| `AGENT_SCRIPT_TYPES`   | `dict`    |  types of scraping_scripts                      |
+| `AGENT_CONFIG_PATH`    | `string`  |  file_path for agent_configuration(json file)   |
+| `AGENT_CONFIG_PKL_PATH`| `string`  |  file_path for agent_configuration(pickle file) |
+
+## AzureBlob configuration
+
+| Variables             | Type     | Description                       |
+| :--------             | :------- | :-------------------------        |
+| `BLOB_INTIGRATION`    | `bool`   |  enable/disable AzureBlob Storage |
+| `BLOB_SAS_TOKEN`      | `string` |  SAS Token for AzureBlob Storage  |
+| `BLOB_ACCOUNT_URL`    | `string` |  Account URL for AzureBlob Storage|
+| `BLOB_CONTAINER_NAME` | `string` |  Container for AzureBlob Storage  |
+
+## ElasticSearch DB variables
+
+| Variables        | Type     | Description                          |
+| :--------        | :------- | :-------------------------           |
+| `ELASTIC_DB_URL` | `string` |  URL of ElasticSearch Server         |
+| `ES_LOG_INDEX`   | `string` |  Info Logging Index in ElasticSearch |
+| `ES_JOB_INDEX`   | `string` |  Job  Logging Index in ElasticSearch |
+| `ES_DATA_INDEX`  | `string` |  Data Logging Index in ElasticSearch |
+
+## Logging configuration
+
+| Variables                     | Type     | Description                    |
+| :--------                     | :------- | :-------------------------     |
+| `JOB_OUTPUT_PATH`             | `string` |  folder_path for JOB output    |
+| `MAX_RUNNING_JOBS`            | `int`    |  Max No. of Running Jobs       |
+| `MAX_WAITING_JOBS`            | `int`    |  Max No. of Waiting Jobs       |
+| `JOB_RUNNING_STATUS`          | `string` |  Status for Running Jobs       |
+| `JOB_COMPLETED_SUCCESS_STATUS`| `string` |  Status for Successfull Jobs   |
+| `JOB_COMPLETED_FAILED_STATUS` | `string` |  Status for Failed Jobs        |
diff --git a/docs/contracts.yaml b/docs/contracts.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/docs/docker.md b/docs/docker.md
new file mode 100644
index 0000000000000000000000000000000000000000..8c8e51ea3fd4089bba2d010e17cc43545036f5bd
--- /dev/null
+++ b/docs/docker.md
@@ -0,0 +1,21 @@
+# Docker Deployment
+
+* Stop and remove existing containers with name `web-scraping-project`.
+```
+docker stop web-scraping-project 
+docker rm web-scraping-project
+```
+
+* Build Docker image: `web-scraping-project`
+```
+docker build -t web-scraping-project ./src/
+```
+_Note: ./src/ contains Dockerfile_
+
+
+* Spawn: `web-scraping-project`.
+```
+docker run --name web-scraping-project -p 5001:5001 --env-file ./deploy/dev.env -it web-scraping-project
+```
+
+_Note: Here environment file (--env-file) refers from local storage_
\ No newline at end of file
diff --git a/docs/env-variables.md b/docs/env-variables.md
new file mode 100644
index 0000000000000000000000000000000000000000..d1892cc9f956f1b837fec25f07666d9f69a974e9
--- /dev/null
+++ b/docs/env-variables.md
@@ -0,0 +1,15 @@
+
+# Environment-Variables
+
+The Following are supported Environment-Variables
+
+| Variables             | Type      | Description                       |
+| :--------             | :-------  | :-------------------------        |
+| `BASIC_HTTP_USERNAME` | `string`  |  username for server              |
+| `BASIC_HTTP_PASSWORD` | `string`  |  password for server              |
+| `ELASTIC_DB_URL`      | `string`  |  URL of elasticsearch_DB          |
+| `BLOB_SAS_TOKEN`      | `string`  |  azure blob_storage SAS token     |
+| `BLOB_ACCOUNT_URL`    | `string`  |  azure blob_storage account_URL   |
+| `BLOB_CONTAINER_NAME` | `string`  |  azure blob_storage container_name|
+| `MAX_RUNNING_JOBS`    | `int`     |  maximum jobs running at a time   |
+| `MAX_WAITING_JOBS`    | `int`     |  maximum jobs waiting at a time   |
diff --git a/docs/eslog.md b/docs/eslog.md
new file mode 100644
index 0000000000000000000000000000000000000000..edd783ed3d7bb6f6ca07793aa339e322e478bba7
--- /dev/null
+++ b/docs/eslog.md
@@ -0,0 +1,53 @@
+
+# ElasticSearch Log
+
+* Initialize Log object.
+```
+log = Log(agentRunContext)
+```
+
+* Types of logs:
+    
+    1. log.job : it shows the job status, logs are added to `config.ES_JOB_INDEX`.
+        
+        Syntax:
+        ```
+        log.job(status, message)
+        ```
+        
+        Examples:
+        ```
+        log.job(config.JOB_RUNNING_STATUS, 'job Started')
+        # your code goes here
+        try:
+            log.job(config.JOB_COMPLETED_SUCCESS_STATUS, 'Job Completed')
+        except:
+            log.job(config.JOB_COMPLETED_FAILED_STATUS, 'Job Failed')
+        ```
+
+    2. log.info : it shows the job info, logs are added to `config.ES_LOG_INDEX`.
+
+        Syntax:
+        ```
+        log.info(info_type, message)
+        ```
+        Examples:
+        ```
+        log.info('info', 'This is generalization project')
+        log.info('warning', 'Script is taking more than usual time')
+        log.info('exception', 'No Products Available')
+        ```
+    3. log.data : it shows the job data, logs are added to `config.ES_DATA_INDEX`.
+        
+        Syntax:
+        ```
+        log.data(data)
+        ```
+        Example:
+        ```
+        data = {
+            "A" : "123",
+            "B" : "Generic Project"
+        }
+        log.data(data)
+        ```
diff --git a/docs/scripts.md b/docs/scripts.md
new file mode 100644
index 0000000000000000000000000000000000000000..da8bbba8f565828b14b1f8de255b2ab4e60456b5
--- /dev/null
+++ b/docs/scripts.md
@@ -0,0 +1,32 @@
+
+# Scripts
+
+1. Create `python_file` in the respective scriptType folder in `./src/scripts`.
+
+2. Format of the script `my_agent_script.py`.
+```
+# imports
+
+# create a function
+def myAgentScript(agentRunContext):
+    log = Log(agentRunContext)
+    try:
+    
+        log.job(config.JOB_RUNNING_STATUS, Job Started')
+
+        # Your script
+        # Goes here
+
+        log.job(config.JOB_COMPLETED_SUCCESS_STATUS, Successfully Scraped Dats')
+
+    except Exception as e:
+        log.job(config.JOB_COMPLETED_FAILED_STATUS, str(e))
+        log.info('exception', traceback.format_exc())
+
+```
+3. Add script to `init.py` as 
+```
+from .my_agent_script import myAgentScript
+```
+
+
diff --git a/src/.gitignore b/src/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..d9b46a2cb4b6bd04de8aabaa0f154263bd2fee00
--- /dev/null
+++ b/src/.gitignore
@@ -0,0 +1,8 @@
+**__pycache__
+*.vscode
+*.log
+/env
+exp_result.py
+**.DS_Store
+/upload/*
+#
\ No newline at end of file
diff --git a/src/Dockerfile b/src/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..89f1dc413c159e286e92ee5630689daf8cd1e433
--- /dev/null
+++ b/src/Dockerfile
@@ -0,0 +1,9 @@
+FROM python:3.9-slim
+COPY / /app
+WORKDIR /app
+RUN apt update
+
+RUN pip3 install -r requirements.txt
+COPY start.sh /usr/bin/start.sh
+RUN chmod +x /usr/bin/start.sh
+CMD ["/usr/bin/start.sh"]
diff --git a/src/app.py b/src/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..59fb56575401520f6a579c16a1b9b759abdc6a07
--- /dev/null
+++ b/src/app.py
@@ -0,0 +1,75 @@
+import json
+import os
+import sys
+
+from flask import Flask
+from flask.blueprints import Blueprint
+from flask_basicauth import BasicAuth
+from flask_cors import CORS
+
+# local imports
+import config
+import routes
+from models import AgentUtils
+
+# flask server
+server = Flask(__name__)
+
+# server configuration
+config.SERVER_STATIC_PATH = server.static_folder
+server.config['BASIC_AUTH_USERNAME'] = config.BASIC_HTTP_USERNAME
+server.config['BASIC_AUTH_PASSWORD'] = config.BASIC_HTTP_PASSWORD
+# basic_auth for server
+basic_auth = BasicAuth(server)
+
+# load agents config
+with open(os.path.join(config.SERVER_STATIC_PATH, config.AGENT_CONFIG_PATH), 'r') as f:
+    agent_list = json.load(f)
+
+__import__("scripts")
+
+my_scripts = sys.modules["scripts"]
+
+agentUtils = AgentUtils()
+agentUtils.filepath = os.path.join(
+    config.SERVER_STATIC_PATH, config.AGENT_CONFIG_PKL_PATH)
+pkl_agent_list = agentUtils.listAgents()
+len_diff = len(agent_list) - len(pkl_agent_list)
+for i in range(len(agent_list)-1, len(agent_list)-len_diff-1, -1):
+    agent = agent_list[i]
+    agent_script = dict()
+    for type in config.AGENT_SCRIPT_TYPES.values():
+        agent_script[type] = my_scripts.__dict__[
+            type].__dict__[agent['scripts'][type]]
+    agentUtils.addAgent(agent['agentId'],
+                        agent['description'],
+                        agent['provider'],
+                        agent_script,
+                        agent['URL'])
+
+
+# server CORS policy
+if config.SERVER_CORS:
+    cors = CORS(server, resources={r"/api/*": {"origins": "*"}})
+
+# add blueprint routes to server
+for blueprint in vars(routes).values():
+    if isinstance(blueprint, Blueprint):
+        server.register_blueprint(blueprint, url_prefix=config.API_URL_PREFIX)
+
+# sample route
+
+
+@server.route('/')
+def home():
+    return "<h1>HI</h1>"
+
+
+# start server
+if __name__ == "__main__":
+    print('starting server at {} at port {}'.format(
+        config.SERVER_HOST, config.SERVER_PORT))
+    server.run(host=config.SERVER_HOST,
+               port=config.SERVER_PORT,
+               debug=config.SERVER_DEBUG,
+               threaded=True)
diff --git a/src/common/__init__.py b/src/common/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb508f7e76e247e9fe8903c52b5e7989a0c11d26
--- /dev/null
+++ b/src/common/__init__.py
@@ -0,0 +1,4 @@
+from .scraping_utils import get_driver
+from .elastic_wrapper import Log
+from .errors import ValueMissing, FormatError, BadRequestError
+from .blob_storage import BlobStorage
diff --git a/src/common/blob_storage.py b/src/common/blob_storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..0811d3de8e9b3dd9e005d797f5448d265e171f51
--- /dev/null
+++ b/src/common/blob_storage.py
@@ -0,0 +1,40 @@
+import os
+
+import config
+from azure.storage.blob import BlobServiceClient
+
+
+class BlobStorage(object):
+    def __init__(self,overwrite=False):
+        self.blob_service_client = BlobServiceClient(
+            account_url=config.BLOB_ACCOUNT_URL, credential=config.BLOB_SAS_TOKEN)
+        self.root_folder = None
+        self.overwrite = overwrite
+
+    @property
+    def root_folder(self):
+        return self._root_folder
+
+    @root_folder.setter
+    def root_folder(self, rf):
+        self._root_folder = rf
+
+    @property
+    def blob_service_client(self):
+        return self._blob_service_client
+
+    @blob_service_client.setter
+    def blob_service_client(self, bsc):
+        self._blob_service_client = bsc
+
+    def set_agent_folder(self, agent_folder):
+        self.root_folder = agent_folder
+
+    def upload_file(self,file_name,file_contents):
+        upload_file_path = os.path.join(self.root_folder,file_name)
+        blob_client = self.blob_service_client.get_blob_client(container=config.CONTAINER_NAME,blob=upload_file_path)
+        try:
+            blob_client.upload_blob(file_contents,overwrite=self.overwrite)
+        except Exception as e:
+            return False,str(e)
+        return True,'true'
diff --git a/src/common/elastic_wrapper.py b/src/common/elastic_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c31ab28ae1c32be81644d55aaa58d6b94fa00d8
--- /dev/null
+++ b/src/common/elastic_wrapper.py
@@ -0,0 +1,80 @@
+import config
+from elasticsearch import Elasticsearch
+import json
+import time
+
+
+class Log(object):
+
+    @classmethod
+    def from_default(cls):
+        return cls(None)
+
+    def __init__(self, agentRunContext):
+        self.agentRunContext = agentRunContext
+        self.es_client = Elasticsearch([config.ELASTIC_DB_URL])
+
+    def __populate_context(self):
+        data = {
+            'agentId': self.agentRunContext.requestBody['agentId'],
+            'jobId': self.agentRunContext.jobId,
+            'jobType': self.agentRunContext.jobType,
+            'timestamp': int(time.time()*1000),
+            'buildNumber': config.BUILD_NUMBER
+        }
+        return data
+
+    def __index_data_to_es(self, index, data):
+        if self.es_client.ping():
+            self.es_client.index(index=index, body=json.dumps(data))
+        else:
+            with open('logger.txt', 'a+') as f:
+                f.write(json.dumps(data)+'\n')
+
+    def info(self, info_type, message):
+        info_data = self.__populate_context()
+        info_data['type'] = info_type
+        info_data['message'] = message
+        self.__index_data_to_es(config.ES_LOG_INDEX, info_data)
+
+    def data(self, data):
+        data.update(self.__populate_context())
+        self.__index_data_to_es(config.ES_DATA_INDEX, data)
+
+    def job(self, status, message):
+        job_data = self.__populate_context()
+        job_data['status'] = status
+        job_data['message'] = message
+        self.__index_data_to_es(config.ES_JOB_INDEX, job_data)
+
+    def get_status(self, jobId):
+        print(jobId)
+        if not self.es_client.ping():
+            return {'status': 'ES_CONNECTION_FAILED', 'message': "Not able to connect to ES DB"}
+        else:
+            search_param = {
+                "sort": [
+                    {
+                        "timestamp": {
+                            "order": "desc"
+                        }
+                    }
+                ],
+                "query": {
+                    "bool": {
+                        "must": [
+                            {"match": {
+                                "jobId.keyword": jobId
+                            }}
+                        ]
+                    }
+                }
+            }
+            res = self.es_client.search(
+                index=config.ES_JOB_INDEX, body=search_param)
+
+            if len(res['hits']['hits']) > 0:
+                source = res['hits']['hits'][0]['_source']
+                return {'status': source['status'], 'message': source['message']}
+            else:
+                return {'status': 'JOBID_NOT_FOUND', 'message': "Please check the given jobId"}
diff --git a/src/common/errors.py b/src/common/errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..783f3f4fb3dc24c1981a21e37965476959036694
--- /dev/null
+++ b/src/common/errors.py
@@ -0,0 +1,124 @@
+from flask import jsonify
+
+
+class RestAPIError(Exception):
+    def __init__(self, status_code=500, payload=None):
+        self.status_code = status_code
+        self.payload = payload
+
+    def to_response(self):
+        return jsonify({'error': self.payload}), self.status_code
+
+
+class BadRequestError(RestAPIError):
+    def __init__(self, payload=None):
+        super().__init__(400, payload)
+
+
+class InternalServerErrorError(RestAPIError):
+    def __init__(self, payload=None):
+        super().__init__(500, payload)
+
+
+class FormatError(Exception):
+    def __init__(self, code, message):
+        self._code = code
+        self._message = message
+
+    @property
+    def code(self):
+        return self._code
+
+    @property
+    def message(self):
+        return self._message
+
+    def __str__(self):
+        return self.__class__.__name__ + ': ' + self.message
+
+
+class WorkflowkeyError(Exception):
+    def __init__(self, code, message):
+        self._code = code
+        self._message = message
+
+    @property
+    def code(self):
+        return self._code
+
+    @property
+    def message(self):
+        return self._message
+
+    def __str__(self):
+        return self.__class__.__name__ + ': ' + self.message
+
+
+class FileErrors(Exception):
+    def __init__(self, code, message):
+        self._code = code
+        self._message = message
+
+    @property
+    def code(self):
+        return self._code
+
+    @property
+    def message(self):
+        return self._message
+
+    def __repr__(self):
+        return {"code": self.code, "message": self.__class__.__name__ + ': ' + self.message}
+
+
+class FileEncodingError(Exception):
+    def __init__(self, code, message):
+        self._code = code
+        self._message = message
+
+    @property
+    def code(self):
+        return self._code
+
+    @property
+    def message(self):
+        return self._message
+
+    def __str__(self):
+        return self.__class__.__name__ + ': ' + self.message
+
+
+class ServiceError(Exception):
+    def __init__(self, code, message):
+        self._code = code
+        self._message = message
+
+    @property
+    def code(self):
+        return self._code
+
+    @property
+    def message(self):
+        return self._message
+
+    def __str__(self):
+        return self.__class__.__name__ + ': ' + self.message
+
+
+class ValueMissing(Exception):
+    def __init__(self, message):
+        self.message = message
+
+    @property
+    def message(self):
+        return self._message
+
+    @message.setter
+    def message(self, value):
+        self._message = value
+
+    def __str__(self):
+        return self.message
+
+    def __repr__(self):
+        return self.message
diff --git a/src/common/scraping_utils.py b/src/common/scraping_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..803ff765f48d41fd8870f3ca752e36260face0f6
--- /dev/null
+++ b/src/common/scraping_utils.py
@@ -0,0 +1,50 @@
+import os
+from pathlib import Path
+
+import config
+from selenium import webdriver
+from selenium.webdriver.chrome.options import Options
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
+
+chrome_path = Service(config.CHROMEDRIVER_PATH)
+
+
+def enable_download_headless(browser, download_dir):
+    browser.command_executor._commands["send_command"] = (
+        "POST", '/session/$sessionId/chromium/send_command')
+    params = {'cmd': 'Page.setDownloadBehavior', 'params': {
+        'behavior': 'allow', 'downloadPath': download_dir}}
+    browser.execute("send_command", params)
+
+
+def get_driver(temp_directory):
+    Path(temp_directory).mkdir(parents=True, exist_ok=True)
+    download_dir = os.path.join(temp_directory)
+    chrome_options = Options()
+    d = DesiredCapabilities.CHROME
+    d['goog:loggingPrefs'] = {'browser': 'ALL'}
+    chrome_options.add_argument("--headless")
+    chrome_options.add_argument("--window-size=1920x1080")
+    chrome_options.add_argument("--disable-notifications")
+    chrome_options.add_argument('--no-sandbox')
+    chrome_options.add_argument('--verbose')
+    chrome_options.add_argument('--log-level=3')
+    chrome_options.add_argument('--disable-gpu')
+    chrome_options.add_argument('--disable-dev-shm-usage')
+    chrome_options.page_load_strategy = 'normal'
+    chrome_options.add_argument(
+        '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36')
+    chrome_options.add_argument('--disable-software-rasterizer')
+    chrome_options.add_experimental_option("prefs", {
+        "download.default_directory": str(download_dir),
+        "download.prompt_for_download": False,
+        "download.directory_upgrade": True,
+        "safebrowsing_for_trusted_sources_enabled": False,
+        "safebrowsing.enabled": False,
+        "plugins.always_open_pdf_externally": True
+    })
+    driver = webdriver.Chrome(
+        service=chrome_path, options=chrome_options, desired_capabilities=d)
+    enable_download_headless(driver, download_dir)
+    return driver
diff --git a/src/common/scrapy_utils.py b/src/common/scrapy_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b86268b0ba0f625ae49ade0938f546cf801c690a
--- /dev/null
+++ b/src/common/scrapy_utils.py
@@ -0,0 +1 @@
+ # scrapy config goes here !
\ No newline at end of file
diff --git a/src/config.py b/src/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a908b2a126f8cd60664696d431bbae146c5493b
--- /dev/null
+++ b/src/config.py
@@ -0,0 +1,67 @@
+import os
+
+# ------------------server configuration--------------------------
+
+SERVER_HOST = '0.0.0.0'
+SERVER_PORT = 5001
+SERVER_DEBUG = True
+SERVER_CORS = False
+SERVER_STATIC_PATH = ''
+
+# API configuration
+API_URL_PREFIX = "/general"
+BUILD_NUMBER = 'BUILD_NUMBER_001'
+API_MANDATORY_PARAMS = ['agentId', 'search', 'type']
+
+# Application configuration
+BASIC_HTTP_USERNAME = os.environ.get('BASIC_HTTP_USERNAME')
+BASIC_HTTP_PASSWORD = os.environ.get('BASIC_HTTP_PASSWORD')
+
+
+# ------------------agent configuration---------------------------
+# AGENT_SCRIPT_TYPES = { 'JOB_TYPE_1' : 'JOB_TYPE_1_FOLDER', 'JOB_TYPE_2' : 'JOB_TYPE_2_FOLDER' }
+AGENT_SCRIPT_TYPES = {
+    'INFORMATION': 'info',
+    'PDF': 'pdf'
+}
+# agent configuration file
+AGENT_CONFIG_PATH = 'agent_configs/agents.json'
+AGENT_CONFIG_PKL_PATH = 'agent_configs/agents.pkl'
+
+# ------------------AzureBlob Variable----------------------------
+
+# AzureBlob variable
+BLOB_INTIGRATION = False
+BLOB_SAS_TOKEN = os.environ.get('BLOB_SAS_TOKEN')
+BLOB_ACCOUNT_URL = os.environ.get('BLOB_ACCOUNT_URL')
+BLOB_CONTAINER_NAME = os.environ.get('CONTAINER_NAME')
+
+# ------------------Queuing variables-----------------------------
+
+# Queuing variables
+MAX_RUNNING_JOBS = int(os.environ.get('MAX_RUNNING_JOBS', 4))
+MAX_WAITING_JOBS = int(os.environ.get('MAX_WAITING_JOBS', 10))
+
+# ------------------ElasticSearch DB variables--------------------
+
+ELASTIC_DB_URL = os.environ.get('ELASTIC_DB_URL')
+
+# ES index variables
+ES_LOG_INDEX = 'general-app-logs'
+ES_JOB_INDEX = 'general-job-stats'
+ES_DATA_INDEX = 'general-crawled-data'
+
+# ------------------Logging variables-----------------------------
+
+JOB_OUTPUT_PATH = "output"
+
+# JobStatus variables
+JOB_RUNNING_STATUS = 'RUNNING'
+JOB_COMPLETED_SUCCESS_STATUS = 'COMPLETED_SUCCESS'
+JOB_COMPLETED_FAILED_STATUS = 'COMPLETED_FAILED'
+
+# ------------------Driver Variables-------------------------------
+
+CHROMEDRIVER_PATH = 'C:\\Drivers\\chromedriver_win32\\chromedriver.exe'
+
+# -----------------------------------------------------------------
diff --git a/src/models/__init__.py b/src/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5c62125bdb5a28eace87ba2694883e76a165e52
--- /dev/null
+++ b/src/models/__init__.py
@@ -0,0 +1,5 @@
+from .response import CustomResponse
+from .status import Status
+
+from .job import JobModel
+from .agent_utils import AgentUtils
diff --git a/src/models/agent_class.py b/src/models/agent_class.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fd140f2a8f458b6cf65723bb845b5781cfb55af
--- /dev/null
+++ b/src/models/agent_class.py
@@ -0,0 +1,53 @@
+class Agent(object):
+    def __init__(self, agentId, description, provider, scripts, URL):
+        self.provider = provider
+        self.description = description
+        self.agentId = agentId
+        self.scripts = scripts
+        self.URL = URL
+
+    @property
+    def agentId(self):
+        return self._agentId
+
+    @agentId.setter
+    def agentId(self, value):
+        self._agentId = value
+
+    @property
+    def description(self):
+        return self._description
+
+    @description.setter
+    def description(self, value):
+        self._description = value
+
+    @property
+    def provider(self):
+        return self._provider
+
+    @provider.setter
+    def provider(self, value):
+        self._provider = value
+
+    @property
+    def scripts(self):
+        return self._scripts
+
+    @scripts.setter
+    def scripts(self, value):
+        self._scripts = value
+
+    @property
+    def URL(self):
+        return self._URL
+
+    @URL.setter
+    def URL(self, value):
+        self._URL = value
+
+    def __str__(self):
+        str_1 = 'id: {0} , description: {1} , provider: {2} , scripts: {3} , URL: {4}'
+        str_1 = str_1.format(self.agentId, self.description,
+                             self.provider, self.scripts, self.URL)
+        return str_1
diff --git a/src/models/agent_utils.py b/src/models/agent_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcbd6c4cb40861c3fe8e2c36c36d4198cbe63044
--- /dev/null
+++ b/src/models/agent_utils.py
@@ -0,0 +1,53 @@
+import os
+import pickle
+
+from .agent_class import Agent
+
+
+class AgentUtils:
+
+    def __init__(self):
+        self.filepath = None
+
+    @property
+    def filepath(self):
+        return self._filepath
+
+    @filepath.setter
+    def filepath(self, value):
+        self._filepath = value
+
+    def __readPklFile(self):
+        if os.path.exists(self.filepath):
+            file_pi = open(self.filepath, 'rb')
+            agent_list = pickle.load(file_pi)
+            return agent_list
+        else:
+            return []
+
+    def __writePklFile(self, agent_list):
+        file_pi = open(self.filepath, 'wb')
+        pickle.dump(agent_list, file_pi)
+
+    def addAgent(self, agentId, description, provider, scripts, URL):
+        agent = Agent(agentId, description, provider, scripts, URL)
+        agent_list = self.__readPklFile()
+        for old_agent in agent_list:
+            if old_agent.agentId == agent.agentId:
+                print('The agent already exists', agent)
+                return
+        agent_list.append(agent)
+        self.__writePklFile(agent_list)
+
+    def listAgents(self):
+        return_list = []
+        agent_list = self.__readPklFile()
+        for old_agent in agent_list:
+            agent = {}
+            agent['agentId'] = old_agent.agentId
+            agent['description'] = old_agent.description
+            agent['provider'] = old_agent.provider
+            agent['scripts'] = old_agent.scripts
+            agent['URL'] = old_agent.URL
+            return_list.append(agent)
+        return return_list
diff --git a/src/models/job.py b/src/models/job.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc9b7d5b8adeb0a21ebcc13aa904c3005448776e
--- /dev/null
+++ b/src/models/job.py
@@ -0,0 +1,11 @@
+from common import Log
+
+
+class JobModel(object):
+
+    def status(self, jobId):
+        '''
+            connect to ES DB and get the status of jobId
+        '''
+        log = Log.from_default()
+        return log.get_status(jobId)
diff --git a/src/models/response.py b/src/models/response.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf53ddd3b26e4929693cf9a826e314f2a931b757
--- /dev/null
+++ b/src/models/response.py
@@ -0,0 +1,13 @@
+from flask import jsonify
+
+
+class CustomResponse:
+    def __init__(self, statuscode, data):
+        self.statuscode = statuscode
+        self.statuscode['data'] = data
+
+    def getres(self):
+        return jsonify(self.statuscode)
+
+    def getresjson(self):
+        return self.statuscode
diff --git a/src/models/status.py b/src/models/status.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6f4ffeb6f2385a05a43c751ea756a5b878a3c4e
--- /dev/null
+++ b/src/models/status.py
@@ -0,0 +1,59 @@
+import enum
+
+
+class Status(enum.Enum):
+    SUCCESS = {
+        'ok': True,
+        'http': {'status': 200},
+        'why': "request successful"
+    }
+    FAILURE = {
+        'ok': False,
+        'http': {'status': 500},
+        'why': 'request failed'
+    }
+    ERR_SYSTEM = {
+        'ok': False,
+        'http': {'status': 500},
+        'why': "Internal Server Error"
+    }
+    ERR_INVALID_DATA = {
+        'ok': False,
+        'http': {'status': 400},
+        'why': "Invalid Data"
+    }
+    ERR_MISSING_PARAMETERS = {
+        'ok': False,
+        'http': {'status': 400},
+        'why': "Data Missing"
+    }
+    CORRUPT_FILE = {
+        'ok': False,
+        'http': {'status': 500},
+        'why': 'uploaded file is corrupt'
+    }
+    DATA_NOT_FOUND = {
+        'ok': False,
+        'http': {'status': 404},
+        'why': 'data not found'
+    }
+    OPERATION_NOT_PERMITTED = {
+        'ok': False,
+        'http': {'status': 400},
+        'why': 'operation not permitted'
+    }
+    ERR_GATEWAY = {
+        'ok': False,
+        'http': {'status': 400},
+        'why': 'gateway error'
+    }
+    ERR_NOTFOUND_FILE = {
+        'ok': False,
+        'http': {'status': 400},
+        'why': 'file not found'
+    }
+    ERR_SCHEMA_VALIDATION = {
+        'ok': False,
+        'http': {'status': 400},
+        'why': 'please refer api contract to check your request structure'
+    }
diff --git a/src/repositories/__init__.py b/src/repositories/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..26a8811c7fb167df28a2ca55d8ec2f386b4f5edb
--- /dev/null
+++ b/src/repositories/__init__.py
@@ -0,0 +1,2 @@
+from .agent import AgentRepo
+from .job import JobRepo
diff --git a/src/repositories/agent.py b/src/repositories/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d5953e8cd31e6abf41080d09c73dcad6d73b99b
--- /dev/null
+++ b/src/repositories/agent.py
@@ -0,0 +1,42 @@
+import uuid
+from concurrent.futures import ThreadPoolExecutor
+
+import config
+from common.elastic_wrapper import Log
+from models import AgentUtils
+
+
+class AgentRepo:
+    def __init__(self):
+        self.agentUtils = AgentUtils()
+        self.executor = ThreadPoolExecutor(max_workers=config.MAX_RUNNING_JOBS)
+
+    def list(self, filepath):
+        self.agentUtils.filepath = filepath
+        result = self.agentUtils.listAgents()
+        for agent in result:
+            agent.pop('scripts')
+        return result
+
+    def run(self, agentRunContext, filepath):
+        threadStarted = False
+        agentRunContext.jobId = str(uuid.uuid4())
+        self.agentUtils.filepath = filepath
+        agents_list = self.agentUtils.listAgents()
+        threadStarted = False
+        for agent in agents_list:
+            if agent['agentId'] == agentRunContext.requestBody['agentId']:
+                agentRunContext.URL = agent['URL']
+                threadStarted = True
+                if self.executor._work_queue.qsize() < config.MAX_WAITING_JOBS:
+                    log = Log(agentRunContext)
+                    log.job(config.JOB_RUNNING_STATUS, "JOB in waiting state.")
+                    del log
+                    self.executor.submit(
+                        agent['scripts'][config.AGENT_SCRIPT_TYPES[agentRunContext.jobType]], agentRunContext)
+                else:
+                    return {'message': 'Already many jobs are in Waiting ... Please retry after some time.'}
+        if threadStarted:
+            return {'jobId': agentRunContext.jobId}
+        else:
+            return None
diff --git a/src/repositories/job.py b/src/repositories/job.py
new file mode 100644
index 0000000000000000000000000000000000000000..96cec74306167de947343ecd0d7f300de92c9ce3
--- /dev/null
+++ b/src/repositories/job.py
@@ -0,0 +1,9 @@
+from models import JobModel
+
+
+class JobRepo:
+    def __init__(self):
+        self.jobModel = JobModel()
+
+    def status(self, jobId):
+        return self.jobModel.status(jobId)
diff --git a/src/requirements.txt b/src/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..35a2525c0f44eba2536c917e83c8f1a391c0e47f
--- /dev/null
+++ b/src/requirements.txt
@@ -0,0 +1,17 @@
+elasticsearch==8.0.0
+Flask==1.1.2
+Jinja2==2.11.3
+MarkupSafe==1.1.1
+Werkzeug==1.0.1
+itsdangerous==1.1.0
+Flask-Cors==3.0.10
+Flask-RESTful==0.3.9
+uuid==1.30
+selenium==4.2.0
+Flask-BasicAuth==0.2.0
+Flask-HTTPBasicAuth==1.0.1
+pandas==1.4.2
+python-dateutil==2.8.1
+beautifulsoup4==4.9.3
+azure-storage-blob==12.10.0b1
+scrapy==2.6.1
diff --git a/src/resources/__init__.py b/src/resources/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3f134692d1ed4d76fe63db898ca9dbf90b6f5d5
--- /dev/null
+++ b/src/resources/__init__.py
@@ -0,0 +1,2 @@
+from .agent import AgentListResource, AgentRunResource
+from .job import JobStatusResource
diff --git a/src/resources/agent.py b/src/resources/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..67eaba00821a333589b3cd5bf25929e7c984ef51
--- /dev/null
+++ b/src/resources/agent.py
@@ -0,0 +1,83 @@
+import os
+import traceback
+
+import config
+from app import basic_auth
+from common import ValueMissing
+from flask import request
+from flask_restful import Resource
+from models import CustomResponse, Status
+from repositories import AgentRepo
+from utilities import AgentRunContext
+
+agentRepo = AgentRepo()
+
+
+def mandatory_param(req):
+    e_value = Status.ERR_MISSING_PARAMETERS
+    param_list = list()
+    for param in config.API_MANDATORY_PARAMS:
+        if req.get(param) is None:
+            param_list.append(param)
+    if len(param_list) > 0:
+        return ",".join(param_list), e_value
+    else:
+        return None, e_value
+
+
+def check_job_type(req):
+    e_value = Status.ERR_INVALID_DATA
+    if req.get('type') in config.AGENT_SCRIPT_TYPES.keys():
+        return req.get('type'), e_value
+    else:
+        return None, e_value
+
+
+class AgentListResource(Resource):
+    @basic_auth.required
+    def get(self):
+        try:
+            result = agentRepo.list(os.path.join(
+                config.SERVER_STATIC_PATH, config.AGENT_CONFIG_PKL_PATH))
+            if result != None:
+                res = CustomResponse(Status.SUCCESS.value, result)
+                return res.getres()
+            else:
+                res = CustomResponse(
+                    Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
+                return res.getresjson(), 400
+        except Exception:
+            res = CustomResponse(
+                Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
+            return res.getresjson(), 400
+
+
+class AgentRunResource(Resource):
+    @basic_auth.required
+    def post(self):
+        try:
+            req = request.get_json()
+            # check mandatory params
+            miss, e_value = mandatory_param(req)
+            if miss is not None:
+                raise ValueMissing(miss+' - mandatory')
+
+            # check if valid JOB_TYPE
+            miss, e_value = check_job_type(req)
+            if miss is None:
+                raise ValueMissing('invalid type')
+
+            agentRunContext = AgentRunContext(req, miss)
+            result = agentRepo.run(agentRunContext, os.path.join(
+                config.SERVER_STATIC_PATH, config.AGENT_CONFIG_PKL_PATH))
+            if result != None:
+                res = CustomResponse(Status.SUCCESS.value, result)
+                return res.getres()
+            else:
+                res = CustomResponse(
+                    Status.ERR_GLOBAL_INVALID_DATA.value, "Invalid Agent ID")
+                return res.getresjson(), 400
+        except Exception as e:
+            print(traceback.format_exc())
+            res = CustomResponse(e_value.value, str(e))
+            return res.getresjson(), 400
diff --git a/src/resources/job.py b/src/resources/job.py
new file mode 100644
index 0000000000000000000000000000000000000000..5260dcd84c1658a99b49eeea100580d67cd1dafc
--- /dev/null
+++ b/src/resources/job.py
@@ -0,0 +1,26 @@
+from app import basic_auth
+from flask import request
+from flask_restful import Resource
+from models import CustomResponse, Status
+from repositories import JobRepo
+
+jobRepo = JobRepo()
+
+
+class JobStatusResource(Resource):
+    @basic_auth.required
+    def get(self):
+        try:
+            result = jobRepo.status(request.args.get('jobId'))
+            if result != None:
+                res = CustomResponse(Status.SUCCESS.value, result)
+                return res.getres()
+            else:
+                res = CustomResponse(
+                    Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
+                return res.getresjson(), 400
+        except Exception as e:
+            print(e)
+            res = CustomResponse(
+                Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
+            return res.getresjson(), 400
diff --git a/src/routes/__init__.py b/src/routes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c06e589840d6dac7a758e6260580d9a0cf83226
--- /dev/null
+++ b/src/routes/__init__.py
@@ -0,0 +1,2 @@
+from .agent import AGENT_BLUEPRINT
+from .job import JOB_BLUEPRINT
diff --git a/src/routes/agent.py b/src/routes/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..607fff825f2ce5b123d45f38f9335ee92e0dbbe2
--- /dev/null
+++ b/src/routes/agent.py
@@ -0,0 +1,13 @@
+from flask import Blueprint
+from flask_restful import Api
+from resources import AgentListResource, AgentRunResource
+
+AGENT_BLUEPRINT = Blueprint("agent", __name__)
+
+Api(AGENT_BLUEPRINT).add_resource(
+    AgentListResource, "/agents"
+)
+
+Api(AGENT_BLUEPRINT).add_resource(
+    AgentRunResource, "/run"
+)
diff --git a/src/routes/job.py b/src/routes/job.py
new file mode 100644
index 0000000000000000000000000000000000000000..374cb4003a9ee5308c2bfd9f1d8fa5128c4788ca
--- /dev/null
+++ b/src/routes/job.py
@@ -0,0 +1,9 @@
+from flask import Blueprint
+from flask_restful import Api
+from resources import JobStatusResource
+
+JOB_BLUEPRINT = Blueprint("job", __name__)
+
+Api(JOB_BLUEPRINT).add_resource(
+    JobStatusResource, "/status"
+)
diff --git a/src/scripts/__init__.py b/src/scripts/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f9b7f597d12f55f18f167c660b79956e5efe9f7
--- /dev/null
+++ b/src/scripts/__init__.py
@@ -0,0 +1,2 @@
+from .info import *
+from .pdf import *
diff --git a/src/scripts/info/__init__.py b/src/scripts/info/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5324f1636493f61b476fa793ebb019abf38ee34
--- /dev/null
+++ b/src/scripts/info/__init__.py
@@ -0,0 +1,7 @@
+# Scrapy
+from .applied_scrapy import AppliedScrapy
+from .grainger_scrapy import GraingerScrapy
+
+# Selenium
+from .applied_selenium import AppliedSelenium
+from .grainger_selenium import GraingerSelenium
\ No newline at end of file
diff --git a/src/scripts/info/applied_scrapy.py b/src/scripts/info/applied_scrapy.py
new file mode 100644
index 0000000000000000000000000000000000000000..59a0958b194dbc178a964b0089b222e69f45ef52
--- /dev/null
+++ b/src/scripts/info/applied_scrapy.py
@@ -0,0 +1,67 @@
+import config
+import scrapy
+from common import Log
+from scrapy.crawler import CrawlerRunner
+from twisted.internet import reactor
+
+
+def AppliedScrapy(agentRunContext):
+    log = Log(agentRunContext)
+
+    log.job(config.JOB_RUNNING_STATUS, 'Job Started')
+
+    class AppliedSpider(scrapy.Spider):
+        name = 'applied'
+        custom_settings = {
+            "LOG_ENABLED": False
+        }
+        user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
+
+        def __init__(self, search_param=''):
+            self.api_url = 'https://www.applied.com'
+            self.start_urls = [
+                'https://www.applied.com/search?page=0&search-category=all&override=true&isLevelUp=false&q='+search_param]
+            super().__init__()
+
+        def parse(self, response):
+            # search url parsing
+            for scrape_url in response.xpath('//a[@class="hide-for-print more-detail"]/@href').extract():
+                # extract product url
+                yield scrapy.Request(self.api_url+scrape_url, self.collect_data)
+
+            # extract next page url and re-run function
+            next_page = response.xpath('//a[@class="next"]/@href').get()
+            if next_page is not None:
+                yield scrapy.Request(self.api_url+next_page, self.parse)
+
+        # product url parsing
+        def collect_data(self, response):
+
+            # specification data
+            spec = dict()
+            for trs in response.xpath('//*[@id="specifications"]//table//tr'):
+                key = trs.xpath('./td[1]/text()').get().strip()
+                value = trs.xpath('./td[2]/text()').get().strip()
+                spec[key] = value
+
+            # final data
+            data = {
+                'company': response.xpath('//h1[@itemprop="brand"]/a/text()').get().strip(),
+                'product': response.xpath('//span[@itemprop="mpn name"]/text()').get().strip(),
+                'details': response.xpath('//div[@class="details"]//text()').get().strip(),
+                'item': response.xpath('//div[@class="customer-part-number"]/text()').get().strip(),
+                'description': [x.strip() for x in response.xpath('//div[@class="short-description"]/ul/li/text()').extract()],
+                'specification': spec,
+                'url': response.url.strip(),
+            }
+            log.data(data)
+
+    runner = CrawlerRunner()
+
+    d = runner.crawl(
+        AppliedSpider, search_param=agentRunContext.requestBody.get('search'))
+    d.addBoth(lambda _: reactor.stop())
+    reactor.run()
+
+    log.job(config.JOB_COMPLETED_SUCCESS_STATUS,
+            'Successfully scraped all data')
diff --git a/src/scripts/info/applied_selenium.py b/src/scripts/info/applied_selenium.py
new file mode 100644
index 0000000000000000000000000000000000000000..e86978f82c77949d5fa637e766e4145f46f44288
--- /dev/null
+++ b/src/scripts/info/applied_selenium.py
@@ -0,0 +1,92 @@
+import os
+import time
+import traceback
+import config
+
+from common import Log, get_driver
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.ui import WebDriverWait
+
+
+def AppliedSelenium(agentRunContext):
+    log = Log(agentRunContext)
+    try:
+
+        url = 'https://www.applied.com/search?q=:relevance:FTS:' + \
+            agentRunContext.requestBody['search'] + \
+            '&page=<page>&search-category=all&override=true&isLevelUp=false'
+        download_dir_id = str(agentRunContext.jobId)
+        download_dir = os.path.join(
+            os.getcwd(), 'temp', 'temp-' + download_dir_id)
+
+        driver = get_driver(download_dir)
+        driver.maximize_window()
+
+        driver.get(url)
+        wait = WebDriverWait(driver, 20)
+        log.job(config.JOB_RUNNING_STATUS, 'Job Started')
+
+        try:
+            wait(EC.element_to_be_clickable(
+                (By.ID, "CybotCookiebotDialogBodyButtonAccept")))
+            driver.find_element_by_id(
+                "CybotCookiebotDialogBodyButtonAccept").click()
+        except:
+            pass
+        for page_no in range(1, 1000):
+            driver.get(url.replace('<page>', str(page_no)))
+            time.sleep(2)
+            if 'page' not in driver.current_url:
+                break
+
+            wait.until(EC.presence_of_element_located(
+                (By.CLASS_NAME, 'product-list')))
+
+            for item in driver.find_elements_by_xpath('//a[@itemprop="url"][.="View more details"]'):
+                href = item.get_attribute('href')
+                driver.switch_to.new_window()
+                driver.get(href)
+                time.sleep(2)
+                wait.until(EC.presence_of_element_located((By.TAG_NAME, 'h1')))
+
+                item_dict = {
+                    'brand': driver.find_element_by_tag_name('h1').text.strip(),
+                    'name': driver.find_element_by_xpath('//*[@itemprop= "mpn name"]').text.strip(),
+                    'details': driver.find_element_by_class_name('details').text.strip(),
+                    'item_no': driver.find_element_by_class_name('customer-part-number').text.strip(),
+                    'company': driver.find_element_by_xpath('//h1[@itemprop="brand"]/a').text.strip(),
+                    'product': driver.find_element_by_xpath('//span[@itemprop="mpn name"]').text.strip(),
+                    'details': driver.find_element_by_xpath('//div[@class="details"]').text.strip(),
+                    'item': driver.find_element_by_xpath('//div[@class="customer-part-number"]').text.strip()
+                }
+
+                item_dict['short_description'] = list()
+                des = driver.find_element_by_class_name('short-description')
+                for ele in des.find_elements_by_xpath('.//li'):
+                    item_dict['short_description'].append(ele.text.strip())
+
+                item_dict['specification'] = dict()
+                spe = driver.find_element_by_id('specifications')
+                for table in spe.find_elements_by_xpath('.//table'):
+                    for tr_ele in table.find_elements_by_xpath('./tbody/tr'):
+                        key = str(tr_ele.find_element_by_xpath(
+                            './td[1]').text).strip()
+                        value = str(tr_ele.find_element_by_xpath(
+                            './td[2]').text).strip()
+                        item_dict['specification'][key] = value
+
+                print(item_dict['specification'])
+                try:
+                    log.data(item_dict)
+                except:
+                    pass
+                driver.close()
+                driver.switch_to.window(driver.window_handles[0])
+        log.job(config.JOB_COMPLETED_SUCCESS_STATUS,
+                'Successfully scraped all data')
+    except Exception as e:
+        log.job(config.JOB_COMPLETED_FAILED_STATUS, str(e))
+        log.info('exception', traceback.format_exc())
+
+    driver.quit()
diff --git a/src/scripts/info/grainger_scrapy.py b/src/scripts/info/grainger_scrapy.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc54a6282a8f3999c51a9ff0cc4369fd49f0e509
--- /dev/null
+++ b/src/scripts/info/grainger_scrapy.py
@@ -0,0 +1,108 @@
+import config
+import scrapy
+from common import Log
+from scrapy.crawler import CrawlerRunner
+from twisted.internet import reactor
+
+# search_param=do630 voltage regulator (via category list)
+# search_param=do 360 voltage (via product list)
+# search_param=61HH68 (via direct product page)
+
+null = 'null'
+true = 'true'
+false = 'false'
+
+
+
+def GraingerScrapy(agentRunContext):
+    
+    log = Log(agentRunContext)
+
+    class GraingerScrapy(scrapy.Spider):
+        name = 'GraingerScrapy'
+        user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
+        main_url = 'https://www.grainger.com/'
+
+        def __init__(self, search_param):
+            self.start_urls = [
+                "https://www.grainger.com/search?searchQuery="+search_param]
+            super().__init__()
+
+        def parse(self, response):
+            if 'search?' not in response.url:
+                yield scrapy.Request(url=response.url, callback=self.collect_data)
+            else:
+                if len(response.css('section[aria-label="Category products"]')) > 0:
+                    script = [i.strip() for i in response.css('script::text').extract(
+                    ) if i.strip().startswith('window.__PRELOADED_STATE__')][0]
+                    script = eval(script.split(
+                        '=', 1)[-1].split('window.__UI_CONFIG__')[0].strip()[:-1])
+                    products = list(script['category']['category']
+                                    ['skuToProductMap'].keys())
+                    href = '/product/info?productArray='+','.join(products)
+                    yield scrapy.Request(url=self.main_url+href, callback=self.get_products)
+                else:
+                    # iterate every categories
+                    for href in response.css('a.route::attr(href)').extract():
+                        yield scrapy.Request(url=self.main_url+href, callback=self.parse_category_page)
+
+        def parse_category_page(self, response):
+            script = [i.strip() for i in response.css('script::text').extract(
+            ) if i.strip().startswith('window.__PRELOADED_STATE__')][0]
+            script = eval(script.split('=', 1)
+                        [-1].split('window.__UI_CONFIG__')[0].strip()[:-1])
+            cat_id = script['category']['category']['id']
+            for i in script['category']['collections']:
+                coll_id = i['id']
+                url1 = self.main_url + \
+                    '/experience/pub/api/products/collection/{0}?categoryId={1}'
+                yield scrapy.Request(url=url1.format(coll_id, cat_id), callback=self.get_products)
+
+        def get_products(self, response):
+            data = response.json()
+            if 'products' in data.keys():
+                for i in data['products']:
+                    yield scrapy.Request(url=self.main_url+i['productDetailUrl'], callback=self.collect_data)
+            else:
+                for i in data.values():
+                    if type(i) == dict and 'productDetailUrl' in i.keys():
+                        yield scrapy.Request(url=self.main_url+i['productDetailUrl'], callback=self.collect_data)
+
+        def collect_data(self, response):
+            data = dict()
+            main_content = response.css('.product-detail__content--large')
+            spec = response.css('.specifications')
+            data = {
+                'brand': main_content.css('.product-detail__brand--link::text').get().strip(),
+                'product-heading': main_content.css('.product-detail__heading::text').get().strip(),
+                'url': response.url
+            }
+            for li in main_content.css('.product-detail__product-identifiers-content'):
+                key = li.css(
+                    '.product-detail__product-identifiers-label::text').get().strip()
+                value = li.css(
+                    '.product-detail__product-identifiers-description::text').extract()
+                value = [str(i).strip() for i in value] if len(
+                    value) > 1 else str(value[0]).strip()
+                data[key] = value
+
+            for li in spec.css('.specifications__item'):
+                key = li.css('.specifications__description::text').get()
+                value = li.css('.specifications__value::text').extract()
+                value = [str(i).strip() for i in value] if len(
+                    value) > 1 else str(value[0]).strip()
+                data[key] = value
+
+            log.data(data)
+
+    log.job(config.JOB_RUNNING_STATUS, 'Job Started')
+
+    runner = CrawlerRunner()
+
+    d = runner.crawl(
+        GraingerScrapy, search_param=agentRunContext.requestBody.get('search'))
+    d.addBoth(lambda _: reactor.stop())
+    reactor.run()
+
+    log.job(config.JOB_COMPLETED_SUCCESS_STATUS,
+            'Successfully scraped all data')
\ No newline at end of file
diff --git a/src/scripts/info/grainger_selenium.py b/src/scripts/info/grainger_selenium.py
new file mode 100644
index 0000000000000000000000000000000000000000..150eadaed7b316d507523a22784b4312df76cd11
--- /dev/null
+++ b/src/scripts/info/grainger_selenium.py
@@ -0,0 +1,19 @@
+import os
+import time
+import traceback
+import config
+
+from common import Log, get_driver
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.ui import WebDriverWait
+
+def GraingerSelenium(agentRunContext):
+    log = Log(agentRunContext)
+
+    log.job(config.JOB_RUNNING_STATUS, 'Job Started')
+
+    log.job(config.JOB_RUNNING_STATUS, 'Script Under Development')
+
+    log.job(config.JOB_COMPLETED_SUCCESS_STATUS,
+            'Successfully scraped all data')
diff --git a/src/scripts/pdf/__init__.py b/src/scripts/pdf/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5324f1636493f61b476fa793ebb019abf38ee34
--- /dev/null
+++ b/src/scripts/pdf/__init__.py
@@ -0,0 +1,7 @@
+# Scrapy
+from .applied_scrapy import AppliedScrapy
+from .grainger_scrapy import GraingerScrapy
+
+# Selenium
+from .applied_selenium import AppliedSelenium
+from .grainger_selenium import GraingerSelenium
\ No newline at end of file
diff --git a/src/scripts/pdf/applied_scrapy.py b/src/scripts/pdf/applied_scrapy.py
new file mode 100644
index 0000000000000000000000000000000000000000..02bcfe66623dd305eafe0d17b5a0e429c176b38a
--- /dev/null
+++ b/src/scripts/pdf/applied_scrapy.py
@@ -0,0 +1,16 @@
+import config
+import scrapy
+from common import Log
+from scrapy.crawler import CrawlerRunner
+from twisted.internet import reactor
+
+
+def AppliedScrapy(agentRunContext):
+    log = Log(agentRunContext)
+
+    log.job(config.JOB_RUNNING_STATUS, 'Job Started')
+
+    log.job(config.JOB_RUNNING_STATUS, 'Script Under Development')
+
+    log.job(config.JOB_COMPLETED_SUCCESS_STATUS,
+            'Successfully scraped all data')
diff --git a/src/scripts/pdf/applied_selenium.py b/src/scripts/pdf/applied_selenium.py
new file mode 100644
index 0000000000000000000000000000000000000000..54fbbf2c4604c1b2987d59eca1fba1399c0fc657
--- /dev/null
+++ b/src/scripts/pdf/applied_selenium.py
@@ -0,0 +1,20 @@
+import os
+import time
+import traceback
+import config
+
+from common import Log, get_driver
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.ui import WebDriverWait
+
+
+def AppliedSelenium(agentRunContext):
+    log = Log(agentRunContext)
+
+    log.job(config.JOB_RUNNING_STATUS, 'Job Started')
+
+    log.job(config.JOB_RUNNING_STATUS, 'Script Under Development')
+
+    log.job(config.JOB_COMPLETED_SUCCESS_STATUS,
+            'Successfully scraped all data')
diff --git a/src/scripts/pdf/grainger_scrapy.py b/src/scripts/pdf/grainger_scrapy.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f293fad1ff6415e136885bc9b0e047c5b4e2bc3
--- /dev/null
+++ b/src/scripts/pdf/grainger_scrapy.py
@@ -0,0 +1,105 @@
+
+import scrapy
+from scrapy.pipelines.files import FilesPipeline
+
+# search_param=do630 voltage regulator (via category list)
+# search_param=do 360 voltage (via product list)
+# search_param=61HH68 (via direct product page)
+
+# variables for eval() to parse
+null = 'null'
+true = 'true'
+false = 'false'
+
+def GraingerScrapy(agentRunContext):
+
+    class GeneralFilesItem(scrapy.Item):
+        file_name = scrapy.Field()
+        file_urls = scrapy.Field()
+        files = scrapy.Field
+
+
+    class GenreralFilesPipeline(FilesPipeline):
+        def get_media_requests(self, item, info):
+            for my_url in item.get('file_urls', []):
+                yield scrapy.Request(my_url, meta={'file_name': item.get('file_name')})
+
+        def file_path(self, request, response=None, info=None):
+            return request.meta['file_name']
+
+
+    class GriengerPDFScrapy(scrapy.Spider):
+        name = 'GriengerPDFScrapy'
+        user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
+        main_url = 'https://www.grainger.com/'
+        custom_settings = {
+            'ITEM_PIPELINES': {'grienger_scrapy_pdf.GenreralFilesPipeline': 1},
+            'FILES_STORE': '/home/test/Music/down/'
+        }
+
+        def __init__(self, agentRunContext):
+            self.start_urls = [
+                "https://www.grainger.com/search?searchQuery="+agentRunContext.requestBody['search']]
+            super().__init__()
+            self
+
+        def parse(self, response):
+            if 'search?' not in response.url:
+                yield scrapy.Request(url=response.url, callback=self.collect_data)
+            else:
+                if len(response.css('section[aria-label="Category products"]')) > 0:
+                    script = [i.strip() for i in response.css('script::text').extract(
+                    ) if i.strip().startswith('window.__PRELOADED_STATE__')][0]
+                    script = eval(script.split(
+                        '=', 1)[-1].split('window.__UI_CONFIG__')[0].strip()[:-1])
+                    products = list(script['category']['category']
+                                    ['skuToProductMap'].keys())
+                    href = '/product/info?productArray='+','.join(products)
+                    yield scrapy.Request(url=self.main_url+href, callback=self.get_products)
+                else:
+                    # iterate every categories
+                    for href in response.css('a.route::attr(href)').extract():
+                        yield scrapy.Request(url=self.main_url+href, callback=self.parse_category_page)
+
+        def parse_category_page(self, response):
+            script = [i.strip() for i in response.css('script::text').extract(
+            ) if i.strip().startswith('window.__PRELOADED_STATE__')][0]
+            script = eval(script.split('=', 1)
+                        [-1].split('window.__UI_CONFIG__')[0].strip()[:-1])
+            cat_id = script['category']['category']['id']
+            for i in script['category']['collections']:
+                coll_id = i['id']
+                url1 = self.main_url + \
+                    '/experience/pub/api/products/collection/{0}?categoryId={1}'
+                yield scrapy.Request(url=url1.format(coll_id, cat_id), callback=self.get_products)
+
+        def get_products(self, response):
+            data = response.json()
+            if 'products' in data.keys():
+                for i in data['products']:
+                    yield scrapy.Request(url=self.main_url+i['productDetailUrl'], callback=self.collect_data)
+            else:
+                for i in data.values():
+                    if type(i) == dict and 'productDetailUrl' in i.keys():
+                        yield scrapy.Request(url=self.main_url+i['productDetailUrl'], callback=self.collect_data)
+
+        def collect_data(self, response):
+            data = dict()
+            main_content = response.css('.product-detail__content--large')
+            for li in main_content.css('.product-detail__product-identifiers-content'):
+                key = li.css(
+                    '.product-detail__product-identifiers-label::text').get().strip()
+                value = li.css(
+                    '.product-detail__product-identifiers-description::text').extract()
+                value = [str(i).strip() for i in value] if len(
+                    value) > 1 else str(value[0]).strip()
+                data[key] = value
+
+            for a_tag in response.css('a.documentation__link'):
+                a_href = a_tag.xpath('./@href').get()
+                a_name = a_tag.xpath('./@title').get().strip()
+                filename = data['Item #']+'-'+a_name+'.'+a_href.split('.')[-1]
+                item = GeneralFilesItem()
+                item['file_name'] = filename
+                item['file_urls'] = ['https:'+a_href]
+                yield item
diff --git a/src/scripts/pdf/grainger_selenium.py b/src/scripts/pdf/grainger_selenium.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b49b373c0893ee1a08e484cc7e0498cf0a8c375
--- /dev/null
+++ b/src/scripts/pdf/grainger_selenium.py
@@ -0,0 +1,134 @@
+import os
+import shutil
+import time
+import traceback
+
+
+import config
+from common import Log, get_driver
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.ui import WebDriverWait
+
+
+def single_product(log, driver, download_dir, new_output_dir, win_handle=2):
+    try:
+        doc_section = driver.find_elements(
+            By.XPATH, '//ul[@class="documentation__content"]//li')
+        for link in doc_section:
+            download_link = link.find_element_by_tag_name(
+                'a').get_attribute('href')
+            product_name = str(driver.current_url).split('-')[-1].strip()
+            try:
+                product_name = product_name.split('-')[-1].split('?')[:1][0]
+            except:
+                pass
+            driver.switch_to.new_window()
+            driver.get(download_link)
+            time.sleep(5)
+
+            file_name = os.listdir(download_dir)[0]
+            new_file_name = product_name + "-" + file_name
+            os.rename(os.path.join(download_dir, file_name),
+                      os.path.join(download_dir, new_file_name))
+
+            shutil.move(os.path.join(download_dir, new_file_name),
+                        os.path.join(new_output_dir, new_file_name))
+
+            log.info('info', '{0} Downloaded'.format(new_file_name))
+
+            time.sleep(2)
+            driver.close()
+            driver.switch_to.window(driver.window_handles[win_handle])
+    except Exception as e:
+        log.info('exception', traceback.format_exc())
+
+
+def multi_product(log, wait, driver, download_dir, new_output_dir):
+    # Collecting details for all products available
+    wait.until(EC.visibility_of_element_located(
+        (By.XPATH, '//div[@class = "multi-tiered-category"]')))
+    all_product = driver.find_elements_by_xpath(
+        '//div[@class = "multi-tiered-category"]//ul//li/a')
+
+    all_product = [i.get_attribute('href') for i in all_product]
+
+    c_url = driver.current_url
+
+    for p_url in all_product:
+        driver.switch_to.new_window()
+        driver.get(p_url)
+        time.sleep(2)
+
+        try:
+            wait.until(EC.element_to_be_clickable(
+                (By.XPATH, '//div[@id="feedbackBrowseModal"]//div[@class="modal-footer"]//a[@class = "close"]')))
+            driver.find_element_by_xpath(
+                '//div[@id="feedbackBrowseModal"]//div[@class="modal-footer"]//a[@class = "close"]').click()
+            time.sleep(2)
+        except:
+            pass
+
+        for a_tag in driver.find_elements(By.XPATH, "//tbody//a"):
+            product_url = str(a_tag.get_attribute('href'))
+            driver.switch_to.new_window()
+            driver.get(product_url)
+            time.sleep(2)
+            single_product(log, driver, download_dir, new_output_dir)
+            driver.close()
+            driver.switch_to.window(driver.window_handles[1])
+
+        driver.close()
+        driver.switch_to.window(driver.window_handles[0])
+        driver.get(c_url)
+        time.sleep(5)
+
+
+def GraingerSelenium(agentRunContext):
+    log = Log(agentRunContext)
+    try:
+        download_dir_id = str(agentRunContext.jobId)
+        download_dir = os.path.join(
+            os.getcwd(), 'temp', 'temp-' + download_dir_id)
+
+        # Creating an output directory for storing PDFs
+        try:
+            os.mkdir(os.path.normpath(os.getcwd() +
+                                      os.sep + os.pardir) + '\\output')
+        except:
+            pass
+        output_dir = os.path.normpath(
+            os.getcwd() + os.sep + os.pardir) + '\\output\\'
+        os.mkdir(output_dir + download_dir_id)
+        new_output_dir = os.path.join(output_dir, download_dir_id)
+
+        driver = get_driver(download_dir)
+        driver.maximize_window()
+        driver.get(agentRunContext.URL)
+
+        wait = WebDriverWait(driver, 20)
+
+        log.job(config.JOB_RUNNING_STATUS, 'Job Started')
+
+        # Inputing Search-Param
+        driver.find_element_by_xpath(
+            '//input[@aria-label="Search Query"]').send_keys(agentRunContext.requestBody['search'])
+        time.sleep(2)
+        driver.find_element_by_xpath(
+            '//button[@aria-label="Submit Search Query"]').click()
+        time.sleep(5)
+
+        # If multi_products are there in search params
+        if len(driver.find_elements(By.XPATH, '//div[@class = "multi-tiered-category"]')) > 0:
+            multi_product(log, wait, driver, download_dir, new_output_dir)
+        # If single_products are there in search params
+        else:
+            single_product(log, driver, download_dir, new_output_dir, 0)
+
+        log.job(config.JOB_RUNNING_STATUS, 'Downloaded All Invoices')
+
+    except Exception as e:
+        log.job(config.JOB_COMPLETED_FAILED_STATUS, str(e))
+        log.info('exception', traceback.format_exc())
+
+    driver.quit()
diff --git a/src/start.sh b/src/start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4cb9bdcaa8050439f61fae4327318d12dc1ec92f
--- /dev/null
+++ b/src/start.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+python3 app.py
+# uwsgi --ini uwsgi.ini
diff --git a/src/static/agent_configs/agents.json b/src/static/agent_configs/agents.json
new file mode 100644
index 0000000000000000000000000000000000000000..277c3fe57283ca399b272bd51ef8a7c52d1fb26c
--- /dev/null
+++ b/src/static/agent_configs/agents.json
@@ -0,0 +1,42 @@
+[
+    {
+        "agentId": "APPLIED-SELENIUM",
+        "description": "Crawler For Applied",
+        "provider": "Applied",
+        "URL": "https://www.applied.com",
+        "scripts": {
+            "info": "AppliedSelenium",
+            "pdf": "AppliedSelenium"
+        }
+    },
+    {
+        "agentId": "APPLIED-SCRAPY",
+        "description": "Crawler For Applied",
+        "provider": "Applied",
+        "URL": "https://www.applied.com",
+        "scripts": {
+            "info": "AppliedScrapy",
+            "pdf": "AppliedScrapy"
+        }
+    },
+    {
+        "agentId": "GRAINGER-SELENIUM",
+        "description": "Crawler For Grainger",
+        "provider": "Grainger",
+        "URL": "https://www.grainger.com",
+        "scripts": {
+            "info": "GraingerSelenium",
+            "pdf": "GraingerSelenium"
+        }
+    },
+    {
+        "agentId": "GRAINGER-SCRAPY",
+        "description": "Crawler For Grainger",
+        "provider": "Grainger",
+        "URL": "https://www.grainger.com",
+        "scripts": {
+            "info": "GraingerScrapy",
+            "pdf": "GraingerScrapy"
+        }
+    }
+]
\ No newline at end of file
diff --git a/src/utilities/__init__.py b/src/utilities/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..47d3a32b4f0052f73b5a2fde71c7850f8a4e20c4
--- /dev/null
+++ b/src/utilities/__init__.py
@@ -0,0 +1 @@
+from .agent_run_context import AgentRunContext
\ No newline at end of file
diff --git a/src/utilities/agent_run_context.py b/src/utilities/agent_run_context.py
new file mode 100644
index 0000000000000000000000000000000000000000..a92eb814f304786cc11ac30d73a75dc016ffd045
--- /dev/null
+++ b/src/utilities/agent_run_context.py
@@ -0,0 +1,40 @@
+
+class AgentRunContext(object):
+
+    def __init__(self, req, jobType):
+        self.requestBody = req
+        self.jobId = None
+        self.URL = None
+        self.jobType = jobType
+
+    @property
+    def jobId(self):
+        return self._jobId
+
+    @jobId.setter
+    def jobId(self, value):
+        self._jobId = value
+
+    @property
+    def requestBody(self):
+        return self._requestBody
+
+    @requestBody.setter
+    def requestBody(self, value):
+        self._requestBody = value
+
+    @property
+    def URL(self):
+        return self._URL
+
+    @URL.setter
+    def URL(self, value):
+        self._URL = value
+
+    @property
+    def jobType(self):
+        return self._jobType
+
+    @jobType.setter
+    def jobType(self, value):
+        self._jobType = value
diff --git a/src/uwsgi.ini b/src/uwsgi.ini
new file mode 100644
index 0000000000000000000000000000000000000000..e5c0fbdb07ac216fd34fc9017c23b3e44cee4d5e
--- /dev/null
+++ b/src/uwsgi.ini
@@ -0,0 +1,8 @@
+[uwsgi]
+module = wsgi:wsgi_app
+master = true
+processes = 5
+threads = 2
+http-socket = :5001
+socket = ../vfs.wsgi_app.sock
+chmod-socket = 660
\ No newline at end of file
diff --git a/src/wsgi.py b/src/wsgi.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b31ea07fe4bb1c3b3ff598b933af6e06034a4c4
--- /dev/null
+++ b/src/wsgi.py
@@ -0,0 +1,3 @@
+from app import server as wsgi_app
+if __name__ == "__main__":
+    wsgi_app.run()
diff --git a/test/scraping-api-collection.json b/test/scraping-api-collection.json
new file mode 100644
index 0000000000000000000000000000000000000000..13d8d9a60d8641708acb4ad379a295ed20db305b
--- /dev/null
+++ b/test/scraping-api-collection.json
@@ -0,0 +1,192 @@
+{
+	"info": {
+		"_postman_id": "9a1bcfd6-80ac-49a6-ad43-da29f9f6c9d0",
+		"name": "scraping-api-collections",
+		"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json",
+		"_exporter_id": "14608642"
+	},
+	"item": [
+		{
+			"name": "agent-list",
+			"request": {
+				"auth": {
+					"type": "basic",
+					"basic": [
+						{
+							"key": "password",
+							"value": "YYYY",
+							"type": "string"
+						},
+						{
+							"key": "username",
+							"value": "XXXX",
+							"type": "string"
+						},
+						{
+							"key": "showPassword",
+							"value": false,
+							"type": "boolean"
+						}
+					]
+				},
+				"method": "GET",
+				"header": [],
+				"url": {
+					"raw": "http://0.0.0.0:5001/general/agents",
+					"protocol": "http",
+					"host": [
+						"0",
+						"0",
+						"0",
+						"0"
+					],
+					"port": "5001",
+					"path": [
+						"general",
+						"agents"
+					]
+				},
+				"description": "Retrieves the list for available agents."
+			},
+			"response": []
+		},
+		{
+			"name": "job-status",
+			"request": {
+				"auth": {
+					"type": "basic",
+					"basic": [
+						{
+							"key": "password",
+							"value": "YYYY",
+							"type": "string"
+						},
+						{
+							"key": "username",
+							"value": "XXXX",
+							"type": "string"
+						},
+						{
+							"key": "showPassword",
+							"value": false,
+							"type": "boolean"
+						}
+					]
+				},
+				"method": "GET",
+				"header": [],
+				"url": {
+					"raw": "http://0.0.0.0:5001/general/status?jobId",
+					"protocol": "http",
+					"host": [
+						"0",
+						"0",
+						"0",
+						"0"
+					],
+					"port": "5001",
+					"path": [
+						"general",
+						"status"
+					],
+					"query": [
+						{
+							"key": "jobId",
+							"value": null
+						}
+					]
+				},
+				"description": "Retrieves the status of given jobID."
+			},
+			"response": []
+		},
+		{
+			"name": "agent-run",
+			"request": {
+				"auth": {
+					"type": "basic",
+					"basic": [
+						{
+							"key": "password",
+							"value": "YYYY",
+							"type": "string"
+						},
+						{
+							"key": "username",
+							"value": "XXXX",
+							"type": "string"
+						},
+						{
+							"key": "showPassword",
+							"value": false,
+							"type": "boolean"
+						}
+					]
+				},
+				"method": "POST",
+				"header": [],
+				"body": {
+					"mode": "raw",
+					"raw": "{\r\n    \"agentId\": \"AGENT-ID\",\r\n    \"type\": \"TYPE\",\r\n    \"search\": \"MY_SEARCH_PARAM\"\r\n}",
+					"options": {
+						"raw": {
+							"language": "json"
+						}
+					}
+				},
+				"url": {
+					"raw": "http://0.0.0.0:5001/general/run",
+					"protocol": "http",
+					"host": [
+						"0",
+						"0",
+						"0",
+						"0"
+					],
+					"port": "5001",
+					"path": [
+						"general",
+						"run"
+					]
+				},
+				"description": "initiated the JOB for given parameters."
+			},
+			"response": []
+		}
+	],
+	"auth": {
+		"type": "basic",
+		"basic": [
+			{
+				"key": "password",
+				"value": "generic@123#",
+				"type": "string"
+			},
+			{
+				"key": "username",
+				"value": "test",
+				"type": "string"
+			}
+		]
+	},
+	"event": [
+		{
+			"listen": "prerequest",
+			"script": {
+				"type": "text/javascript",
+				"exec": [
+					""
+				]
+			}
+		},
+		{
+			"listen": "test",
+			"script": {
+				"type": "text/javascript",
+				"exec": [
+					""
+				]
+			}
+		}
+	]
+}
\ No newline at end of file
diff --git a/training/Web Scraping.pptx b/training/Web Scraping.pptx
new file mode 100644
index 0000000000000000000000000000000000000000..4109746a421bb25c0e5975cfe8deaeeea8ca0298
Binary files /dev/null and b/training/Web Scraping.pptx differ
diff --git a/training/scrapy/scrapy_basics.ipynb b/training/scrapy/scrapy_basics.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..69596e98e4381d67e85d29dbce6eeb2de719aec1
--- /dev/null
+++ b/training/scrapy/scrapy_basics.ipynb
@@ -0,0 +1,221 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Scrapy documentation\n",
+    "\n",
+    "Scrapy is a fast high-level web crawling and web scraping framework, used to crawl websites and extract structured data from their pages.\n",
+    "\n",
+    "It can be used for a wide range of purposes, from data mining to monitoring and automated testing."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## INSTALLATION\n",
+    "\n",
+    "you can install Scrapy and its dependencies from PyPI with:\n",
+    "\n",
+    "> pip install Scrapy\n",
+    "\n",
+    "For more information see [Installation documentation](https://docs.scrapy.org/en/latest/intro/install.html)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "----"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### SAMPLE SPIDER CODE\n",
+    "\n",
+    "\n",
+    "```\n",
+    "# file_name = quotes_spider.py\n",
+    "import scrapy\n",
+    "\n",
+    "\n",
+    "class QuotesSpider(scrapy.Spider):\n",
+    "    name = 'quotes'\n",
+    "    start_urls = [\n",
+    "        'https://quotes.toscrape.com/tag/humor/',\n",
+    "    ]\n",
+    "\n",
+    "    def parse(self, response):\n",
+    "        for quote in response.css('div.quote'):\n",
+    "            yield {\n",
+    "                'author': quote.xpath('span/small/text()').get(),\n",
+    "                'text': quote.css('span.text::text').get(),\n",
+    "            }\n",
+    "\n",
+    "        next_page = response.css('li.next a::attr(\"href\")').get()\n",
+    "        if next_page is not None:\n",
+    "            yield response.follow(next_page, self.parse)\n",
+    "```"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "to run your scrapy spider:\n",
+    "> scrapy runspider quotes_spider.py -o quotes.json"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## What just happened?\n",
+    "\n",
+    "When you ran the command `scrapy runspider quotes_spider.py`, Scrapy looked for a Spider definition inside it and ran it through its crawler engine.\n",
+    "\n",
+    "The crawl started by making requests to the URLs defined in the start_urls attribute (in this case, only the URL for quotes in humor category) and called the default callback method parse, passing the response object as an argument. In the parse callback, we loop through the quote elements using a CSS Selector, yield a Python dict with the extracted quote text and author, look for a link to the next page and schedule another request using the same parse method as callback.\n",
+    "\n",
+    "Here you notice one of the main advantages about Scrapy: requests are scheduled and processed asynchronously. This means that Scrapy doesn’t need to wait for a request to be finished and processed, it can send another request or do other things in the meantime. This also means that other requests can keep going even if some request fails or an error happens while handling it."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Simplest way to dump all my scraped items into a JSON/CSV/XML file?\n",
+    "\n",
+    "To dump into a JSON file:\n",
+    "\n",
+    "> scrapy crawl myspider -O items.json\n",
+    "\n",
+    "To dump into a CSV file:\n",
+    "\n",
+    "> scrapy crawl myspider -O items.csv\n",
+    "\n",
+    "To dump into a XML file:\n",
+    "\n",
+    "> scrapy crawl myspider -O items.xml\n",
+    "\n",
+    "For more information see [Feed exports](https://docs.scrapy.org/en/latest/topics/feed-exports.html)\n",
+    "\n",
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "scrapy project example : [quotesbot](https://github.com/scrapy/quotesbot)\n",
+    "\n",
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Learn to Extract data\n",
+    "\n",
+    "The best way to learn how to extract data with Scrapy is trying selectors using the Scrapy shell. \n",
+    "\n",
+    "Run:\n",
+    "\n",
+    "> scrapy shell 'https://quotes.toscrape.com/page/1/'\n",
+    "\n",
+    "Using the shell, you can try selecting elements using CSS with the response object:\n",
+    "\n",
+    "> ->>> response.css('title')\n",
+    "\n",
+    "> [< Selector xpath='descendant-or-self::title' data='< title >Quotes to Scrape</ title>'>]\n",
+    "\n",
+    "The result of running response.css('title') is a list-like object called SelectorList, which represents a list of Selector objects that wrap around XML/HTML elements and allow you to run further queries to fine-grain the selection or extract the data.\n",
+    "\n",
+    "To extract the text from the title above, you can do:\n",
+    "\n",
+    "> ->>>response.css('title::text').getall()\n",
+    "\n",
+    "> ['Quotes to Scrape']\n",
+    "\n",
+    "There are two things to note here: one is that we’ve added ::text to the CSS query, to mean we want to select only the text elements directly inside < title> element. \n",
+    "\n",
+    "The other thing is that the result of calling .getall() is a list: it is possible that a selector returns more than one result, so we extract them all. When you know you just want the first result, as in this case, you can do:\n",
+    "\n",
+    "> ->>>response.css('title::text').get()\n",
+    "\n",
+    "> 'Quotes to Scrape'\n",
+    "\n",
+    "As an alternative, you could’ve written:\n",
+    "\n",
+    "> ->>>response.css('title::text')[0].get()\n",
+    "\n",
+    "> 'Quotes to Scrape'\n",
+    "\n",
+    "---\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Run Scrapy from a script\n",
+    "\n",
+    "You can use the API to run Scrapy from a script, instead of the typical way of running Scrapy via `scrapy crawl`.\n",
+    "\n",
+    "Remember that Scrapy is built on top of the Twisted asynchronous networking library, so you need to run it inside the Twisted reactor.\n",
+    "\n",
+    "The first utility you can use to run your spiders is `scrapy.crawler.CrawlerProcess`. \n",
+    "\n",
+    "This class will start a Twisted reactor for you, configuring the logging and setting shutdown handlers. This class is the one used by all Scrapy commands.\n",
+    "\n",
+    "Note that you will also have to shutdown the Twisted reactor yourself after the spider is finished. This can be achieved by adding callbacks to the deferred returned by the `CrawlerRunner.crawl` method.\n",
+    "\n",
+    "Here’s an example of its usage, along with a callback to manually stop the reactor after MySpider has finished running.\n",
+    "\n",
+    "```\n",
+    "from twisted.internet import reactor\n",
+    "import scrapy\n",
+    "from scrapy.crawler import CrawlerRunner\n",
+    "from scrapy.utils.log import configure_logging\n",
+    "\n",
+    "class MySpider(scrapy.Spider):\n",
+    "    # Your spider definition\n",
+    "    ...\n",
+    "\n",
+    "configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})\n",
+    "runner = CrawlerRunner()\n",
+    "\n",
+    "d = runner.crawl(MySpider)\n",
+    "d.addBoth(lambda _: reactor.stop())\n",
+    "reactor.run() # the script will block here until the crawling is finished\n",
+    "```"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/training/scrapy/scripts/AppliedScrapy.py b/training/scrapy/scripts/AppliedScrapy.py
new file mode 100644
index 0000000000000000000000000000000000000000..1903147aa8b4bcc2e273432f0a174378b09900c8
--- /dev/null
+++ b/training/scrapy/scripts/AppliedScrapy.py
@@ -0,0 +1,52 @@
+
+import json
+import time
+
+from elasticsearch import Elasticsearch
+
+import scrapy
+from scrapy import Request
+
+class AppliedSpider(scrapy.Spider):
+    name = 'applied'
+    user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
+
+    def __init__(self, search_param=''):
+        self.api_url = 'https://www.applied.com'
+        self.start_urls = [
+            'https://www.applied.com/search?page=0&search-category=all&override=true&isLevelUp=false&q='+search_param]
+        super().__init__()
+
+    def collect_data(self, response):
+        # product url parsing
+
+        # specification data
+        spec = dict()
+        for trs in response.xpath('//*[@id="specifications"]//table//tr'):
+            key = trs.xpath('./td[1]/text()').get().strip()
+            value = trs.xpath('./td[2]/text()').get().strip()
+            spec[key] = value
+
+        # final data
+        data = {
+            'company': response.xpath('//h1[@itemprop="brand"]/a/text()').get().strip(),
+            'product': response.xpath('//span[@itemprop="mpn name"]/text()').get().strip(),
+            'details': response.xpath('//div[@class="details"]//text()').get().strip(),
+            'item': response.xpath('//div[@class="customer-part-number"]/text()').get().strip(),
+            'description': [x.strip() for x in response.xpath('//div[@class="short-description"]/ul/li/text()').extract()],
+            'specification': spec,
+            'url': response.url.strip(),
+            'timestamp': int(time.time()*1000)
+        }
+        yield data
+
+    def parse(self, response):
+        # search url parsing
+        for scrape_url in response.xpath('//a[@class="hide-for-print more-detail"]/@href').extract():
+            # extract product url
+            yield Request(self.api_url+scrape_url, self.collect_data)
+
+        # extract next page url and re-run function
+        next_page = response.xpath('//a[@class="next"]/@href').get()
+        if next_page is not None:
+            yield Request(self.api_url+next_page, self.parse)
diff --git a/training/scrapy/scripts/RSSpiderScrapy.py b/training/scrapy/scripts/RSSpiderScrapy.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ec5c848dc18f4857aac843b27bdaaacaa26550b
--- /dev/null
+++ b/training/scrapy/scripts/RSSpiderScrapy.py
@@ -0,0 +1,24 @@
+import scrapy
+
+class RSSpider(scrapy.Spider):
+    crawler = 'RSSpider'
+    name = 'RSSpider'
+    main_domain = 'https://in.rsdelivers.com'
+    start_urls = ['https://in.rsdelivers.com/productlist/search?query=749']
+
+    def parse(self,response):
+        for ele in response.css('a.snippet'):
+            my_href = ele.xpath('./@href').get()
+            yield scrapy.Request(url=self.main_domain+my_href,callback=self.collect_data)
+
+    def collect_data(self,response):
+        data = dict()
+        meta_data = response.css('div.row-inline::text').extract()
+        for i in range(0,100,3):
+            try:
+                data[meta_data[i]] = meta_data[i+2]
+            except:
+                break
+        data['title'] = str(response.css('h1.title::text').get()).strip()
+        data['url'] = response.url
+        yield data
diff --git a/training/selenium/selenium_automation_selenium.ipynb b/training/selenium/selenium_automation_selenium.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..fad1f08818d469d7fc3de9757149f09cdb5fad78
--- /dev/null
+++ b/training/selenium/selenium_automation_selenium.ipynb
@@ -0,0 +1,213 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## SELENIUM AUTOMATION AND WEBSCRAPING"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Load the Driver "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from selenium import webdriver\n",
+    "from selenium.webdriver.chrome.service import Service\n",
+    "\n",
+    "my_service = Service('/home/amruth/Music/chromedriver')\n",
+    "driver = webdriver.Chrome(service=my_service)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Extra imports"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# to set for supported locators\n",
+    "from selenium.webdriver.common.by import By\n",
+    "\n",
+    "# to handle time related tasks\n",
+    "import time\n",
+    "\n",
+    "# create creds.py with USERNAME,PASSWORD variables\n",
+    "import creds\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To fetch the home URL"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.get(\"https://kronos.tarento.com/login\")\n",
+    "driver.maximize_window()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Login Content"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "time.sleep(1)\n",
+    "driver.find_element(By.XPATH, '//*[@type=\"email\"]').send_keys(creds.USERNAME)\n",
+    "\n",
+    "time.sleep(1)\n",
+    "driver.find_element(By.XPATH, '//*[@type=\"password\"]').send_keys(creds.PASSWORD)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To check if element selected or not"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "time.sleep(2)\n",
+    "driver.execute_script('arguments[0].click();',driver.find_element(By.XPATH, '//*[@type=\"checkbox\"]'))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To click on the login button"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_element(By.XPATH, '//*[@type=\"submit\"]').click()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Scraping the data for the browser"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Login failed\n"
+     ]
+    }
+   ],
+   "source": [
+    "time.sleep(2)\n",
+    "try:\n",
+    "    my_username = driver.find_element(By.XPATH, '//a[@role=\"button\"]').text.strip()\n",
+    "    output = 'logged in as:' + my_username\n",
+    "except:\n",
+    "    output = 'Login failed'    \n",
+    "print(output)    \n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To close the window"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.close()\n",
+    "driver.quit()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "selenium scripts\n",
+    "generalation pdf_scripts\n",
+    "scrapy docs\n",
+    "general refactor"
+   ]
+  }
+ ],
+ "metadata": {
+  "interpreter": {
+   "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
+  },
+  "kernelspec": {
+   "display_name": "Python 3.10.4 64-bit",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.4"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/training/selenium/selenium_basics.ipynb b/training/selenium/selenium_basics.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..40dc4172c57691142af456919df09f075b86b9d1
--- /dev/null
+++ b/training/selenium/selenium_basics.ipynb
@@ -0,0 +1,365 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# SELENIUM-WEBDRIVER-BASICS"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### To install Selenium\n",
+    "\n",
+    "pip install selenium\n",
+    "\n",
+    "for more details refer this link - https://selenium-python.readthedocs.io/ "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### NOTES\n",
+    "1. different versions of chrome and chromedriver will not work\n",
+    "2. for firefox,profile_path is mandatory"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "--------------------------------------------------------------------"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To Initialize the driver"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 57,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from selenium import webdriver\n",
+    "from selenium.webdriver.chrome.service import Service\n",
+    "\n",
+    "my_service = Service('/home/amruth/Music/chromedriver')\n",
+    "driver = webdriver.Chrome(service=my_service)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To fetch a URL\n",
+    "\n",
+    "syntax: driver.get('my_url')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 58,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.get('https://www.google.com/')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To get the current URl"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 59,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'https://www.google.com/'"
+      ]
+     },
+     "execution_count": 59,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "driver.current_url"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To maximum the window"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 35,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.maximize_window()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To get bak to previous page"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 60,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#get a new page\n",
+    "driver.get(\"https://www.cricbuzz.com/\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 61,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#back to previous page with back()\n",
+    "driver.back()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To go to forward page"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 62,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.forward()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To refresh the page"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 63,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.refresh()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To take the screenshot"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 64,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "True"
+      ]
+     },
+     "execution_count": 64,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "driver.save_screenshot(filename='/home/amruth/Pictures/2.png')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To get the sessionID"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 65,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'52cb5dafe613edf285132391b58ed44a'"
+      ]
+     },
+     "execution_count": 65,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "driver.session_id"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To view page source"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.page_source"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To create and switch to new_tab"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 67,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.switch_to.new_window()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To get list of tabs"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 68,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['CDwindow-7A88A3B7E81EE88473EFA8F5FB49CD5D',\n",
+       " 'CDwindow-585DC24AC56D0BC0A12B3FA2796921EF']"
+      ]
+     },
+     "execution_count": 68,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "driver.window_handles"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To close the tab"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 69,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.close()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To switch an old tab"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 70,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.switch_to.window(driver.window_handles[0])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To quit the browser"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 71,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.quit()"
+   ]
+  }
+ ],
+ "metadata": {
+  "interpreter": {
+   "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
+  },
+  "kernelspec": {
+   "display_name": "Python 3.10.4 64-bit",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.4"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/training/selenium/selenium_locators.ipynb b/training/selenium/selenium_locators.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..97b4e0a9f7283288371a6e74c32388dbda95c700
--- /dev/null
+++ b/training/selenium/selenium_locators.ipynb
@@ -0,0 +1,540 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# *What are Locators?*\n",
+    "    \n",
+    "* Locator is a command that tells Selenium IDE which GUI elements its needs to operate on.(say Text Box, Buttons, Check Boxes etc)  \n",
+    "* Identification of correct GUI elements is a prerequisite to creating an automation script."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from selenium import webdriver\n",
+    "from selenium.webdriver.common.by import By\n",
+    "from selenium.webdriver.chrome.service import Service"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "my_service = Service(r\"C:\\Drivers\\chromedriver_win32\\chromedriver.exe\")\n",
+    "driver = webdriver.Chrome(service=my_service)\n",
+    "driver.get(\"https://www.amazon.in/\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Types of Locators"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 1. By Tag Name"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "      \n",
+    "        Syntax: driver.find_element(By.TAG_NAME, 'tag_name')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_elements(By.TAG_NAME, 'input')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_element(By.TAG_NAME, 'input')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_elements(By.TAG_NAME, 'a')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# <button> tag is not available, will throw an error.\n",
+    "driver.find_element(By.TAG_NAME, 'button')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 2. By Name"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "        Syntax: driver.find_element(By.NAME, 'my_name')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# <input type=\"text\" id=\"twotabsearchtextbox\" value=\"\" name=\"field-keywords\" \n",
+    "# autocomplete=\"off\" placeholder=\"\" class=\"nav-input nav-progressive-attribute\" dir=\"auto\" tabindex=\"0\" aria-label=\"Search\">\n",
+    "driver.find_element(By.NAME, 'field-keywords')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# <input data-addnewaddress=\"add-new\" id=\"unifiedLocation1ClickAddress\" name=\"dropdown-selection\" \n",
+    "# type=\"hidden\" value=\"add-new\" class=\"nav-progressive-attribute\">\n",
+    "driver.find_element(By.NAME, 'dropdown-selection')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 3. By ID"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "* The ids are generally unique for an element.\n",
+    "\n",
+    "        Syntax: driver.find_element(By.ID, 'my_id')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# <input type=\"text\" id=\"twotabsearchtextbox\" value=\"\" name=\"field-keywords\" \n",
+    "# autocomplete=\"off\" placeholder=\"\" class=\"nav-input nav-progressive-attribute\" dir=\"auto\" tabindex=\"0\" aria-label=\"Search\">\n",
+    "driver.find_element(By.ID, 'twotabsearchtextbox')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# <div id=\"nav-cart-count-container\">\n",
+    "driver.find_element(By.ID, 'nav-cart-count-container')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 4. By Class Name"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "        Syntax: driver.find_element(By.CLASS_NAME, 'class_name')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# single word class_name\n",
+    "\n",
+    "# <div class=\"nav-search-field \">\n",
+    "driver.find_elements(By.CLASS_NAME, 'nav-search-field ')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# <div class=\"nav-left\">\n",
+    "driver.find_elements(By.CLASS_NAME, 'nav-left')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# multi word class_name"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# If class_name have spaces between them, then exact same class_name (will not work).\n",
+    "driver.find_element(By.CLASS_NAME, 'nav-search-submit nav-sprite')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# put dot \".\" instead on spaces (will work).\n",
+    "driver.find_element(By.CLASS_NAME, 'nav-search-submit.nav-sprite')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 5. By Link Text"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "* The text enclosed within an anchor tag is used to identify a link or hyperlink. \n",
+    "\n",
+    "        Syntax: driver.find_element(By.LINK_TEXT, 'text')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_elements(By.LINK_TEXT, 'Best')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_elements(By.LINK_TEXT, 'Best Sellers')\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 6. By Partial Link Text"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "* The partial text enclosed within an anchor tag is used to identify a link or hyperlink.\n",
+    "\n",
+    "        Syntax: driver.find_element(By.PARTIAL_LINK_TEXT, 'text')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_element(By.PARTIAL_LINK_TEXT, 'Best')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_elements(By.PARTIAL_LINK_TEXT, 'Best Sellers')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 7. By XPATH"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "* The element is identified with the XPath created with the help of HTML attribute, value, and tagName. \n",
+    "* Xpath is of two types absolute and relative. \n",
+    "    * For absolute XPath, we have to traverse from root to the element.\n",
+    "    * For relative XPath, we can start from any position in DOM. \n",
+    "    \n",
+    "* An XPath expression should follow a particular rule- // tagname [@attribute=’value’]. The Tag Name is optional. If it is omitted, the expression should //*[@attribute=’value’].\n",
+    "\n",
+    "        Syntax: driver.find_element(By.XPATH, '//XPATH')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# //tag_name\n",
+    "driver.find_elements(By.XPATH, '//input')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#  //tag_name[@attribute = \"value\"]\n",
+    "driver.find_elements(By.XPATH, '//input[@type=\"text\"]')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# //*[@attribute = \"value\"] \n",
+    "# * means it will search all the tags and will stop at where it finds the attribute+value\n",
+    "driver.find_elements(By.XPATH, '//*[@id=\"nav-xshop\"]')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# //*[@attribute = \"value\"]/tag_name\n",
+    "# / next child\n",
+    "driver.find_elements(By.XPATH, '//div[@class=\"nav-fill\"]/div')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# //*[@attribute = \"value\"]//tag_name\n",
+    "# // consider all child\n",
+    "driver.find_elements(By.XPATH, '//div[@class=\"nav-fill\"]//div')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# //tagname[. = \"text\"]\n",
+    "driver.find_elements(By.XPATH, '//a[. = \"Best Sellers\"]')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# //tag_name/..\n",
+    "# .. means parent of the tag_name\n",
+    "driver.find_elements(By.XPATH, '//input/..')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_elements(By.XPATH, '//*[@id=\"nav-tools\"]/a')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_elements(By.XPATH, '//*[@id=\"nav-tools\"]/a[1]')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "driver.find_elements(By.XPATH, '//*[@id=\"nav-tools\"]/a[last()]')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 8. By CSS Locator"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "* The element is identified with the CSS created with the help of HTML attribute, value, or tagName.\n",
+    "\n",
+    "        Syntax: driver.find_elements(By.CSS_SELECTOR, 'input#txt')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# tag_name\n",
+    "driver.find_elements(By.CSS_SELECTOR, 'input')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# tag_name.class1.class2\n",
+    "driver.find_elements(By.CSS_SELECTOR, 'input.nav-input.nav-progressive-attribute')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# tag_name#id\n",
+    "driver.find_elements(By.CSS_SELECTOR, 'input#twotabsearchtextbox')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# parent_tag_name > child_tag_name\n",
+    "driver.find_elements(By.CSS_SELECTOR, 'div > input')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# #id\n",
+    "driver.find_elements(By.CSS_SELECTOR, '#twotabsearchtextbox')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# #id > parent_tag_name > child_tag_name\n",
+    "driver.find_elements(By.CSS_SELECTOR, '#CardInstanceQNqkNMgnYMdkg9dk0pUzTQ > div > div')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# tag_name[attribute = \"value\"]\n",
+    "driver.find_elements(By.CSS_SELECTOR, 'input[aria-label=\"Search\"]')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "interpreter": {
+   "hash": "14026295373d426cc26cb234867bcbb453b58a52d594499ebe3dcd2adfc69f37"
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.4"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}