Unverified Commit 59a59270 authored by Anil Gupta's avatar Anil Gupta Committed by GitHub
Browse files

Merge pull request #1870 from santhosh-tg/release-5.2.0

Release 5.2.0  - CSP vars change
No related merge requests found
Showing with 216 additions and 59 deletions
+216 -59
......@@ -6,11 +6,14 @@
tasks:
- name: download artifact from azure storage
include_role:
name: artifacts-download-azure
apply:
environment:
AZURE_STORAGE_ACCOUNT: "{{sunbird_artifact_storage_account_name}}"
AZURE_STORAGE_SAS_TOKEN: "{{sunbird_artifact_storage_account_sas}}"
name: azure-cloud-storage
tasks_from: blob-download.yml
vars:
blob_container_name: "{{ cloud_storage_artifacts_bucketname }}"
blob_file_name: "{{ artifact }}"
local_file_or_folder_path: "{{ artifact_path }}"
storage_account_name: "{{ cloud_artifact_storage_accountname }}"
storage_account_key: "{{ cloud_artifact_storage_secret }}"
when: cloud_service_provider == "azure"
- name: download artifact from gcloud storage
......@@ -18,8 +21,20 @@
name: gcp-cloud-storage
tasks_from: download.yml
vars:
gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}"
dest_folder_name: "{{ artifacts_container }}"
dest_file_name: "{{ artifact }}"
gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}"
gcp_path: "{{ artifact }}"
local_file_or_folder_path: "{{ artifact_path }}"
when: cloud_service_provider == "gcloud"
- name: download artifact from aws s3
include_role:
name: aws-cloud-storage
tasks_from: download.yml
vars:
local_file_or_folder_path: "{{ artifact_path }}"
s3_bucket_name: "{{ cloud_storage_artifacts_bucketname }}"
s3_path: "{{ artifact }}"
aws_default_region: "{{ cloud_public_storage_region }}"
aws_access_key_id: "{{ cloud_artifact_storage_accountname }}"
aws_secret_access_key: "{{ cloud_artifact_storage_secret }}"
when: cloud_service_provider == "aws"
......@@ -6,11 +6,15 @@
tasks:
- name: upload artifact to azure storage
include_role:
name: artifacts-upload-azure
apply:
environment:
AZURE_STORAGE_ACCOUNT: "{{sunbird_artifact_storage_account_name}}"
AZURE_STORAGE_SAS_TOKEN: "{{sunbird_artifact_storage_account_sas}}"
name: azure-cloud-storage
tasks_from: blob-upload.yml
vars:
blob_container_name: "{{ cloud_storage_artifacts_bucketname }}"
container_public_access: "off"
blob_file_name: "{{ artifact }}"
local_file_or_folder_path: "{{ artifact_path }}"
storage_account_name: "{{ cloud_artifact_storage_accountname }}"
storage_account_key: "{{ cloud_artifact_storage_secret }}"
when: cloud_service_provider == "azure"
- name: upload artifact to gcloud storage
......@@ -18,8 +22,20 @@
name: gcp-cloud-storage
tasks_from: upload.yml
vars:
gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}"
dest_folder_name: "{{ artifacts_container }}"
dest_file_name: "{{ artifact }}"
gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}"
gcp_path: "{{ artifact }}"
local_file_or_folder_path: "{{ artifact_path }}"
when: cloud_service_provider == "gcloud"
- name: upload artifact to aws s3
include_role:
name: aws-cloud-storage
tasks_from: upload.yml
vars:
local_file_or_folder_path: "{{ artifact_path }}"
s3_bucket_name: "{{ cloud_storage_artifacts_bucketname }}"
s3_path: "{{ artifact }}"
aws_default_region: "{{ cloud_public_storage_region }}"
aws_access_key_id: "{{ cloud_artifact_storage_accountname }}"
aws_secret_access_key: "{{ cloud_artifact_storage_secret }}"
when: cloud_service_provider == "aws"
......@@ -11,8 +11,8 @@
vars:
blob_container_name: "elasticsearch-snapshots"
container_public_access: "off"
storage_account_name: "{{ azure_management_storage_account_name }}"
storage_account_sas_token: "{{ azure_management_storage_account_sas }}"
storage_account_name: "{{ cloud_management_storage_accountname }}"
storage_account_key: "{{ cloud_management_storage_secret }}"
when: cloud_service_provider == "azure"
- hosts: composite-search-cluster
......
aws_cli_url: https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip
\ No newline at end of file
---
- name: Download the installation file
get_url:
url: "{{ aws_cli_url }}"
dest: /tmp/awscliv2.zip
- name: Installing unzip
apt:
name: "{{item}}"
state: latest
with_items:
- zip
- unzip
- name: Unzip the installer
unarchive:
src: /tmp/awscliv2.zip
dest: /tmp/
remote_src: yes
- name: install aws cli
shell: ./aws/install
args:
chdir: /tmp/
s3_bucket_name: ""
s3_path: ""
local_file_or_folder_path: ""
---
- name: delete files and folders recursively
environment:
AWS_DEFAULT_REGION: "{{ aws_default_region }}"
AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}"
shell: "aws s3 rm s3://{{ s3_bucket_name }}/{{ s3_path }} --recursive"
async: 3600
poll: 10
---
- name: delete files from s3
environment:
AWS_DEFAULT_REGION: "{{ aws_default_region }}"
AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}"
shell: "aws s3 rm s3://{{ s3_bucket_name }}/{{ s3_path }}"
async: 3600
poll: 10
---
- name: download files to s3
environment:
AWS_DEFAULT_REGION: "{{ aws_default_region }}"
AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}"
shell: "aws s3 cp s3://{{ s3_bucket_name }}/{{ s3_path }} {{ local_file_or_folder_path }}"
async: 3600
poll: 10
---
- name: delete files from aws S3 bucket
include: delete.yml
- name: delete folders from aws S3 bucket recursively
include: delete-folder.yml
- name: download file from S3
include: download.yml
- name: upload files from a local to aws S3
include: upload.yml
- name: upload files and folder from local directory to aws S3
include: upload-folder.yml
---
- name: upload folder to s3
environment:
AWS_DEFAULT_REGION: "{{ aws_default_region }}"
AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}"
shell: "aws s3 cp {{ local_file_or_folder_path }} s3://{{ s3_bucket_name }}/{{ s3_path }} --recursive"
async: 3600
poll: 10
---
- name: upload files to s3
environment:
AWS_DEFAULT_REGION: "{{ aws_default_region }}"
AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}"
shell: "aws s3 cp {{ local_file_or_folder_path }} s3://{{ s3_bucket_name }}/{{ s3_path }}"
async: 3600
poll: 10
......@@ -64,4 +64,9 @@ blob_container_folder_path: ""
# This variable affects only new containers and has no affect on a container if it already exists
# If the container already exists, the access level will not be changed
# You will need to change the access level from Azure portal or using az storage container set-permission command
container_public_access: ""
\ No newline at end of file
container_public_access: ""
# Creates the container by default before running the specific azure blob tasks
# If you would like to skip container creation (in case of a looped execution),
# you can set this value to False in order to skip the contatiner creation task for every iteration
create_container: True
---
- name: generate SAS token for azcopy
shell: |
sas_expiry=`date -u -d "1 hour" '+%Y-%m-%dT%H:%MZ'`
sas_token=?`az storage container generate-sas -n {{ blob_container_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }} --https-only --permissions dlrw --expiry $sas_expiry -o tsv`
echo $sas_token
register: sas_token
- set_fact:
container_sas_token: "{{ sas_token.stdout}}"
- name: delete files and folders from azure storage using azcopy
shell: "azcopy rm 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive"
shell: "azcopy rm 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ container_sas_token }}' --recursive"
environment:
AZCOPY_CONCURRENT_FILES: "10"
async: 10800
......
---
- name: generate SAS token for azcopy
shell: |
sas_expiry=`date -u -d "1 hour" '+%Y-%m-%dT%H:%MZ'`
sas_token=?`az storage container generate-sas -n {{ blob_container_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }} --https-only --permissions dlrw --expiry $sas_expiry -o tsv`
echo $sas_token
register: sas_token
- set_fact:
container_sas_token: "{{ sas_token.stdout}}"
- name: create container in azure storage if it doesn't exist
include_role:
name: azure-cloud-storage
tasks_from: container-create.yml
when: create_container == True
- name: upload files and folders to azure storage using azcopy
shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive"
shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ container_sas_token }}' --recursive"
environment:
AZCOPY_CONCURRENT_FILES: "10"
async: 10800
poll: 10
\ No newline at end of file
poll: 10
cassandra_root_dir: /etc/cassandra
cassandra_backup_dir: /data/cassandra/backup
data_dir: '/var/lib/cassandra/data'
cassandra_backup_azure_container_name: lp-cassandra-backup
# This variable is added for the below reason -
# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name
# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo
# or other default files and just assign the value to the newly introduced common variable
# 3. After few releases, we will remove the older variables and use only the new variables across the repos
cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}"
cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}"
cloud_storage_cassandrabackup_foldername: lp-cassandra-backup
......@@ -37,22 +37,34 @@
name: azure-cloud-storage
tasks_from: upload-using-azcopy.yml
vars:
blob_container_name: "{{ cassandra_backup_storage }}"
blob_container_name: "{{ cloud_storage_cassandrabackup_foldername }}"
container_public_access: "off"
blob_container_folder_path: ""
local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}"
storage_account_name: "{{ azure_management_storage_account_name }}"
storage_account_sas_token: "{{ azure_management_storage_account_sas }}"
storage_account_name: "{{ cloud_management_storage_accountname }}"
storage_account_key: "{{ cloud_management_storage_secret }}"
when: cloud_service_provider == "azure"
- name: upload backup to S3
include_role:
name: aws-cloud-storage
tasks_from: upload-folder.yml
vars:
local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}"
s3_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}"
s3_path: "{{ cloud_storage_cassandrabackup_foldername }}"
aws_default_region: "{{ cloud_public_storage_region }}"
aws_access_key_id: "{{ cloud_management_storage_accountname }}"
aws_secret_access_key: "{{ cloud_management_storage_secret }}"
when: cloud_service_provider == "aws"
- name: upload file to gcloud storage
include_role:
name: gcp-cloud-storage
tasks_from: upload-batch.yml
vars:
gcp_bucket_name: "{{ gcloud_management_bucket_name }}"
dest_folder_name: "{{ cassandra_backup_storage }}"
dest_folder_path: ""
gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}"
gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}"
local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}"
when: cloud_service_provider == "gcloud"
......
cassandra_backup_azure_container_name: lp-cassandra-backup
user: "{{ ansible_ssh_user }}"
restore_path: /home/{{user}}
backup_folder_name: cassandra_backup
backup_dir: "{{restore_path}}/cassandra_backup"
# This variable is added for the below reason -
# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name
# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo
# or other default files and just assign the value to the newly introduced common variable
# 3. After few releases, we will remove the older variables and use only the new variables across the repos
cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}"
cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}"
cloud_storage_cassandrabackup_foldername: lp-cassandra-backup
......@@ -11,22 +11,35 @@
name: azure-cloud-storage
tasks_from: blob-download.yml
vars:
blob_container_name: "{{ cassandra_backup_storage }}"
blob_container_name: "{{ cloud_storage_cassandrabackup_foldername }}"
blob_file_name: "{{ cassandra_restore_file_name }}"
local_file_or_folder_path: "{{restore_path}}/{{ cassandra_restore_file_name }}"
storage_account_name: "{{ azure_management_storage_account_name }}"
storage_account_key: "{{ azure_management_storage_account_key }}"
storage_account_name: "{{ cloud_management_storage_accountname }}"
storage_account_key: "{{ cloud_management_storage_secret }}"
when: cloud_service_provider == "azure"
- name: download a file from aws s3
become: true
include_role:
name: aws-cloud-storage
tasks_from: download.yml
vars:
s3_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}"
aws_access_key_id: "{{ cloud_management_storage_accountname }}"
aws_secret_access_key: "{{ cloud_management_storage_secret }}"
aws_default_region: "{{ cloud_public_storage_region }}"
local_file_or_folder_path: "{{restore_path}}/{{ cassandra_restore_file_name }}"
s3_path: "{{ cloud_storage_cassandrabackup_foldername }}/{{ cassandra_restore_file_name }}"
when: cloud_service_provider == "aws"
- name: download file from gcloud storage
include_role:
name: gcp-cloud-storage
tasks_from: download.yml
vars:
gcp_bucket_name: "{{ gcloud_management_bucket_name }}"
dest_folder_name: "{{ cassandra_backup_storage }}"
dest_file_name: "{{ cassandra_restore_file_name }}"
local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}"
gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}"
s3_path: "{{ cloud_storage_cassandrabackup_foldername }}/{{ cassandra_restore_file_name }}"
local_file_or_folder_path: "{{restore_path}}/{{ cassandra_restore_file_name }}"
when: cloud_service_provider == "gcloud"
- name: unarchieve backup file
......
snapshot_create_request_body: {
type: azure,
settings: {
container: "{{ es_backup_storage }}",
container: "{{ cloud_storage_esbackup_foldername }}",
base_path: "{{ snapshot_base_path }}_{{ base_path_date }}"
}
}
......@@ -10,11 +10,5 @@ snapshot_create_request_body: {
es_snapshot_host: "localhost"
snapshot_base_path: "default"
es_azure_backup_container_name: "elasticsearch-snapshots"
# This variable is added for the below reason -
# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name
# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo
# or other default files and just assign the value to the newly introduced common variable
# 3. After few releases, we will remove the older variables and use only the new variables across the repos
es_backup_storage: "{{ es_azure_backup_container_name }}"
cloud_storage_esbackup_bucketname: "{{ cloud_storage_management_bucketname }}"
cloud_storage_esbackup_foldername: "elasticsearch-snapshots"
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment