diff --git a/ansible/api-manager.yml b/ansible/api-manager.yml new file mode 100644 index 0000000000000000000000000000000000000000..e71115e2ad5927be5d3c05a0661d960e5041c419 --- /dev/null +++ b/ansible/api-manager.yml @@ -0,0 +1,6 @@ +--- +- name: Kong API + hosts: kong-api + roles: + - {role: kong-api, tags: ['kong-api']} + - {role: kong-consumer, tags: ['kong-consumer']} \ No newline at end of file diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml new file mode 100644 index 0000000000000000000000000000000000000000..783da972cdbb62b9cf211f07a1512aa884959a67 --- /dev/null +++ b/ansible/bootstrap.yml @@ -0,0 +1,12 @@ +- hosts: "{{hosts}}" + roles: + - role: bootstrap_any + tags: + - bootstrap_any + +- hosts: "{{hosts}}" + roles: + - role: bootstrap_swarm + when: swarm_master + tags: + - bootstrap_swarm diff --git a/ansible/cassandra-backup.yml b/ansible/cassandra-backup.yml new file mode 100644 index 0000000000000000000000000000000000000000..aa2b6a4fb7ba5548c16b45d9bdab008098163174 --- /dev/null +++ b/ansible/cassandra-backup.yml @@ -0,0 +1,6 @@ +- hosts: cassandra + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - cassandra-backup \ No newline at end of file diff --git a/ansible/cassandra-data.yml b/ansible/cassandra-data.yml new file mode 100644 index 0000000000000000000000000000000000000000..5e1443305bee754566901df20f695f147abe5f22 --- /dev/null +++ b/ansible/cassandra-data.yml @@ -0,0 +1,6 @@ +- hosts: cassandra + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - cassandra-cql-update \ No newline at end of file diff --git a/ansible/cassandra-restore.yml b/ansible/cassandra-restore.yml new file mode 100644 index 0000000000000000000000000000000000000000..767c77eeaee2c2b56c19149a9cdee7c4e7ef6b36 --- /dev/null +++ b/ansible/cassandra-restore.yml @@ -0,0 +1,6 @@ +- hosts: cassandra + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - cassandra-restore \ No newline at end of file diff --git a/ansible/deploy.yml b/ansible/deploy.yml new file mode 100644 index 0000000000000000000000000000000000000000..b96b4cb17c9a4013bc0a456cfc6ab31247be85a0 --- /dev/null +++ b/ansible/deploy.yml @@ -0,0 +1,71 @@ +- hosts: swarm-bootstrap-manager + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - stack-proxy + tags: + - stack-proxy + +- hosts: swarm-bootstrap-manager + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - stack-api-manager + tags: + - stack-api-manager + +- hosts: swarm-bootstrap-manager + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - stack-sunbird + tags: + - stack-sunbird + +- hosts: swarm-bootstrap-manager + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - stack-logger + tags: + - stack-logger + +- hosts: swarm-bootstrap-manager + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - stack-adminutil + tags: + - stack-adminutil + +- hosts: swarm-bootstrap-manager + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - stack-monitor + tags: + - stack-monitor + +- hosts: swarm-bootstrap-manager + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - stack-keycloak + tags: + - stack-keycloak1 + +- hosts: swarm-bootstrap-manager + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - stack-keycloak + tags: + - stack-keycloak2 \ No newline at end of file diff --git a/ansible/es.yml b/ansible/es.yml new file mode 100644 index 0000000000000000000000000000000000000000..0ca3cce2082cde1afb10584af9b7c6b3ca5a851d --- /dev/null +++ b/ansible/es.yml @@ -0,0 +1,35 @@ +- hosts: es-backup + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - es-azure-snapshot + tags: + - es_backup + +- hosts: es-backup + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - es-azure-restore + tags: + - es_restore + +- hosts: log-es-backup + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - es-azure-snapshot + tags: + - log_es_backup + +- hosts: log-es-backup + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - es-azure-restore + tags: + - log_es_restore diff --git a/ansible/installDeps.sh b/ansible/installDeps.sh new file mode 100644 index 0000000000000000000000000000000000000000..7ea971e00931c9d5d2e3795f508418e90750c2e1 --- /dev/null +++ b/ansible/installDeps.sh @@ -0,0 +1,2 @@ +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/ansible/jenkins-backup.yml b/ansible/jenkins-backup.yml new file mode 100644 index 0000000000000000000000000000000000000000..9d37cfebb1b1a12387d4d6e25337ba7789a35a7c --- /dev/null +++ b/ansible/jenkins-backup.yml @@ -0,0 +1,9 @@ +--- +- name: Jenkins backup + hosts: jenkins-backup + vars_files: + - 'secrets/{{env}}.yml' + roles: + - jenkins-backup-upload + tags: + - jenkins-backup \ No newline at end of file diff --git a/ansible/jenkins.yml b/ansible/jenkins.yml new file mode 100644 index 0000000000000000000000000000000000000000..5e641be0ee4f65a051c47e9e0a0797e220282fc2 --- /dev/null +++ b/ansible/jenkins.yml @@ -0,0 +1,57 @@ +--- +- name: Setup Jenkins Master + hosts: jenkins-master + vars_files: + - 'secrets/{{env}}.yml' + become: yes + pre_tasks: + - name: Ensure backup directory exists + file: dest=/jenkins-backup state=directory owner=jenkins group=jenkins + - name: Ensure zip is installed + apt: name=zip + - name: Ensure secrets directory exists + file: dest=/run/secrets state=directory owner=jenkins group=jenkins + - name: Install vault password + copy: dest=/run/secrets/vault-pass content={{ vault_pass }} owner=jenkins group=jenkins + roles: + - openjdk + - git + - ansible + - azure-cli + - jenkins + tags: + - jenkins-master + +- name: Setup CI proxy + hosts: ci-proxy + vars_files: + - 'secrets/{{env}}.yml' + become: yes + pre_tasks: + - name: Ensure {{ ci_proxy_nginx_ssl_dir }} exists + file: path={{ ci_proxy_nginx_ssl_dir }} state=directory + notify: restart nginx + + - name: Ensure {{ ci_proxy_nginx_ssl_cert_file }} exists + copy: dest={{ ci_proxy_nginx_ssl_cert_file }} content={{ vault_ci_proxy_nginx_ssl_cert }} mode=0600 + notify: restart nginx + + - name: Ensure {{ ci_proxy_nginx_ssl_key_file }} exists + copy: dest={{ ci_proxy_nginx_ssl_key_file }} content={{ vault_ci_proxy_nginx_ssl_key }} mode=0600 + notify: restart nginx + roles: + - role: nginx + nginx_configs: + ssl: + - ssl_certificate_key {{ ci_proxy_nginx_ssl_key_file }} + - ssl_certificate {{ ci_proxy_nginx_ssl_cert_file }} + nginx_sites: + jenkins: + - listen {{ ci_proxy_ssl_port }} ssl + - location /jenkins/ { proxy_pass http://{{ groups['jenkins-master'][0] }}:{{ jenkins_http_port}}{{ jenkins_url_prefix }}/; } + - location / { return 301 https://$host/jenkins; } + jenkins_http_redirect: + - listen 80 + - return 301 https://$host$request_uri + tags: + - ci-proxy diff --git a/ansible/keycloak.yml b/ansible/keycloak.yml new file mode 100644 index 0000000000000000000000000000000000000000..fd0eec1b56a41a896bc1e8758b7c23d41d2c09a0 --- /dev/null +++ b/ansible/keycloak.yml @@ -0,0 +1,15 @@ +--- +- hosts: keycloak + become: true + pre_tasks: + - name: Add group + group: name=keycloak state=present + become: true + - name: Create user + user: name=keycloak group=keycloak shell=/bin/bash createhome=yes update_password=always + become: true + vars_files: + - 'secrets/{{env}}.yml' + roles: + - java8 + - keycloak diff --git a/ansible/mongo.yml b/ansible/mongo.yml new file mode 100644 index 0000000000000000000000000000000000000000..9eca766f1c08bafb8bca8d504b58ad9e9eacf66b --- /dev/null +++ b/ansible/mongo.yml @@ -0,0 +1,9 @@ +--- +# This Playbook would deploy the whole mongodb cluster with replication and sharding. +- hosts: mongodb + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - mongo + diff --git a/ansible/mongobackup.yml b/ansible/mongobackup.yml new file mode 100644 index 0000000000000000000000000000000000000000..dc0625375387a5d89198353ce1dbe13455076986 --- /dev/null +++ b/ansible/mongobackup.yml @@ -0,0 +1,8 @@ +- hosts: mongodb + vars_files: + - 'secrets/{{env}}.yml' + become: yes + roles: + - mongo-backup + + diff --git a/ansible/ops.yml b/ansible/ops.yml new file mode 100644 index 0000000000000000000000000000000000000000..4e8dac055bffe57844ca0fdc1ac523183e2ed806 --- /dev/null +++ b/ansible/ops.yml @@ -0,0 +1,17 @@ +- hosts: all + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - vm-agents-filebeat + tags: + - log-forwarder + +- hosts: all + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - vm-agents-nodeexporter + tags: + - metrics-exporter \ No newline at end of file diff --git a/ansible/postgresql-backup.yml b/ansible/postgresql-backup.yml new file mode 100644 index 0000000000000000000000000000000000000000..c9b2e9065e4d9693b53fc9ea23b6ee72ea9d2762 --- /dev/null +++ b/ansible/postgresql-backup.yml @@ -0,0 +1,8 @@ +- hosts: postgresql-backup + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - postgresql-backup + tags: + - postgresql-backup diff --git a/ansible/postgresql-data-update.yml b/ansible/postgresql-data-update.yml new file mode 100644 index 0000000000000000000000000000000000000000..c4972f45378808ca0155b8df57bd9061071eeebf --- /dev/null +++ b/ansible/postgresql-data-update.yml @@ -0,0 +1,6 @@ +- hosts: postgresql-backup + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - postgresql-data-update \ No newline at end of file diff --git a/ansible/postgresql-restore.yml b/ansible/postgresql-restore.yml new file mode 100644 index 0000000000000000000000000000000000000000..3791f29016dffea0230af6f9137e2cd657c92bb0 --- /dev/null +++ b/ansible/postgresql-restore.yml @@ -0,0 +1,8 @@ +- hosts: postgresql-restore + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - postgresql-restore + tags: + - postgresql-restore diff --git a/ansible/postgresql-slave-to-master-promotion.yml b/ansible/postgresql-slave-to-master-promotion.yml new file mode 100644 index 0000000000000000000000000000000000000000..a49864c299c8ef63489938b98c764ee93d181765 --- /dev/null +++ b/ansible/postgresql-slave-to-master-promotion.yml @@ -0,0 +1,19 @@ +- hosts: postgresql-master + become: yes + gather_facts: false + vars_files: + - 'secrets/{{env}}.yml' + tasks: + - name: Ensure postgresql service is stopped on master + service: name=postgresql state=stopped + tags: + - ensure-postgresql-master-stopped + +- hosts: postgresql-slave + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - postgresql-slave-to-master-promotion + tags: + - postgresql-slave-to-master-promotion diff --git a/ansible/provision.yml b/ansible/provision.yml new file mode 100644 index 0000000000000000000000000000000000000000..c68e4cdabf491256291231bd9fb456da46af18ba --- /dev/null +++ b/ansible/provision.yml @@ -0,0 +1,56 @@ +- hosts: "cassandra" + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - java + - cassandra + tags: + - cassandra + +- hosts: es + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - openjdk + - elasticsearch + tags: + - es + +- hosts: log-es + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - openjdk + - elasticsearch_old + tags: + - log-es + +- hosts: postgresql-master + become: yes + vars_files: + - 'secrets/{{env}}.yml' + pre_tasks: + - name: install setfacl support + become: yes + apt: pkg=acl + roles: + - postgresql-master + tags: + - postgresql-master + +- hosts: postgresql-slave + become: yes + vars_files: + - 'secrets/{{env}}.yml' + pre_tasks: + - name: install setfacl support + become: yes + apt: pkg=acl + roles: + - postgresql-slave + tags: + - postgresql-slave + diff --git a/ansible/reporting.yml b/ansible/reporting.yml new file mode 100644 index 0000000000000000000000000000000000000000..a92f4187ff78bd40948a87a262ad98c5f82c881b --- /dev/null +++ b/ansible/reporting.yml @@ -0,0 +1,6 @@ +- hosts: "cassandra" + become: yes + vars_files: + - 'secrets/{{env}}.yml' + roles: + - db-dumps \ No newline at end of file diff --git a/ansible/roles/.DS_Store b/ansible/roles/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..650567a4bdafec1b531acac49bcffc68a3813af6 Binary files /dev/null and b/ansible/roles/.DS_Store differ diff --git a/ansible/roles/ansible/tasks/main.yml b/ansible/roles/ansible/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..7e2dc8d82e6a72d8cdcadd8bde6d71059cfd078f --- /dev/null +++ b/ansible/roles/ansible/tasks/main.yml @@ -0,0 +1,5 @@ +- name: Add ansible repo + apt_repository: repo=ppa:ansible/ansible update_cache=yes + +- name: Install ansible + apt: name=ansible diff --git a/ansible/roles/azure-cli/tasks/main.yml b/ansible/roles/azure-cli/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..552766437bee6d9d5fff67b5a7761f489407e5de --- /dev/null +++ b/ansible/roles/azure-cli/tasks/main.yml @@ -0,0 +1,11 @@ +- name: Add Azure apt repository + apt_repository: repo='deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ wheezy main' state=present + +- name: Import Azure signing key + apt_key: keyserver=packages.microsoft.com id=417A0893 + +- name: ensure apt-transport-https is installed + apt: name=apt-transport-https + +- name: ensure azure-cli is installed + apt: name=azure-cli update_cache=yes \ No newline at end of file diff --git a/ansible/roles/bootstrap/main.yml b/ansible/roles/bootstrap/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..78180c1137bb77928f9966fbd0a1d68f6169b3a8 --- /dev/null +++ b/ansible/roles/bootstrap/main.yml @@ -0,0 +1,11 @@ + +- name: Cassandra | Install required python packages for cassandra snapshotter via apt-get + become: yes + action: apt pkg={{ item }} state=present update_cache=yes + with_items: + - libffi-dev + - libssl-dev + - build-essential + - python-dev + - lzop + - curl diff --git a/ansible/roles/bootstrap_any/tasks/main.yml b/ansible/roles/bootstrap_any/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..a2378cca8559622c2403a54e03e166936d9edacd --- /dev/null +++ b/ansible/roles/bootstrap_any/tasks/main.yml @@ -0,0 +1,66 @@ +- include_vars: "secrets/{{bootstrap_secret_file}}.yml" + +# - name: APT update +# become: yes +# apt: update_cache=yes + +- name: Create group deployer + group: name=deployer state=present + become: yes + +- name: Create user deployer + user: name=deployer comment="deployer" group=deployer groups=sudo shell=/bin/bash password='{{ sudo_crypt_deployer }}' update_password=always + become: yes + +- name: Add ssh public key deployer + authorized_key: user=deployer key='{{ssh_public_key_deployer}}' + become: yes + +# - name: SSH configuration file change +# become: yes +# template: +# src: sshd.j2 +# dest: /etc/ssh/sshd_config +# group: root +# owner: root +# mode: 0644 + +# - name: Forward ssh-agent +# become: yes +# template: +# src: ssh.j2 +# dest: /etc/ssh/ssh_config +# group: root +# owner: root +# mode: 0644 + +# - name: Creating banner +# become: yes +# template: +# src: banner.j2 +# dest: /etc/issue.net +# group: root +# owner: root +# mode: 0644 + +# - name: Install python-dev +# apt: name=python-dev state=present +# become: yes + +# - name: Install pip +# apt: name=python-pip state=present +# become: yes + +# - name: Install zip +# apt: name=zip state=present +# become: yes + +# - name: Install unzip +# apt: name=unzip state=present +# become: yes + +- name: Create ulimit configuration + template: + src: limits.conf.j2 + dest: "{{ ulimit_config_location }}" + become: yes diff --git a/ansible/roles/bootstrap_any/templates/banner.j2 b/ansible/roles/bootstrap_any/templates/banner.j2 new file mode 100644 index 0000000000000000000000000000000000000000..3410c9e922ded6e488b6c4a5992db29d0a2a95ac --- /dev/null +++ b/ansible/roles/bootstrap_any/templates/banner.j2 @@ -0,0 +1,24 @@ + + +############################################################################### + .__ .__ + ____ ____ ____ |__| ____ ____ ___________|__| ____ ____ +_/ __ \ / \ / ___\| |/ \_/ __ \_/ __ \_ __ \ |/ \ / ___\ +\ ___/| | \/ /_/ > | | \ ___/\ ___/| | \/ | | \/ /_/ > + \___ >___| /\___ /|__|___| /\___ >\___ >__| |__|___| /\___ / + \/ \//_____/ \/ \/ \/ \//_____/ + +############################################################################### + Sunbird Secure Login +############################################################################### + +ALERT! You are entering into a secured area! + +All activities on this system are logged. +This service is restricted to authorized users only. + +Unauthorized access will be fully investigated and reported. + +############################################################################### + + diff --git a/ansible/roles/bootstrap_any/templates/limits.conf.j2 b/ansible/roles/bootstrap_any/templates/limits.conf.j2 new file mode 100644 index 0000000000000000000000000000000000000000..771512870ed38253604cc004432c8e49c1eefc29 --- /dev/null +++ b/ansible/roles/bootstrap_any/templates/limits.conf.j2 @@ -0,0 +1,3 @@ +{% for item in ulimit_config -%} + {{ item.domain }} {{ item.type }} {{ item.item }} {{ item.value }} +{% endfor %} diff --git a/ansible/roles/bootstrap_any/templates/ssh.j2 b/ansible/roles/bootstrap_any/templates/ssh.j2 new file mode 100644 index 0000000000000000000000000000000000000000..7349aefc32ffdc5fd934a80239ef63f95a27c988 --- /dev/null +++ b/ansible/roles/bootstrap_any/templates/ssh.j2 @@ -0,0 +1,54 @@ + +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +Host * + ForwardAgent yes +# ForwardX11 no +# ForwardX11Trusted yes +# RhostsRSAAuthentication no +# RSAAuthentication yes +# PasswordAuthentication yes +# HostbasedAuthentication no +# GSSAPIAuthentication no +# GSSAPIDelegateCredentials no +# GSSAPIKeyExchange no +# GSSAPITrustDNS no +# BatchMode no +# CheckHostIP yes +# AddressFamily any +# ConnectTimeout 0 +# StrictHostKeyChecking ask +# IdentityFile ~/.ssh/identity +# IdentityFile ~/.ssh/id_rsa +# IdentityFile ~/.ssh/id_dsa +# Port 22 +# Protocol 2,1 +# Cipher 3des +# Ciphers aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,aes128-cbc,3des-cbc +# MACs hmac-md5,hmac-sha1,umac-64@openssh.com,hmac-ripemd160 +# EscapeChar ~ +# Tunnel no +# TunnelDevice any:any +# PermitLocalCommand no +# VisualHostKey no +# ProxyCommand ssh -q -W %h:%p gateway.example.com +# RekeyLimit 1G 1h + SendEnv LANG LC_* + HashKnownHosts yes + GSSAPIAuthentication yes + GSSAPIDelegateCredentials no diff --git a/ansible/roles/bootstrap_any/templates/sshd.j2 b/ansible/roles/bootstrap_any/templates/sshd.j2 new file mode 100644 index 0000000000000000000000000000000000000000..a07d5ae7c2e3221c820588fe21e7e05dbcdcf3a0 --- /dev/null +++ b/ansible/roles/bootstrap_any/templates/sshd.j2 @@ -0,0 +1,100 @@ +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +Port 22 +# Use these options to restrict which interfaces/protocols sshd will bind to +#ListenAddress :: +#ListenAddress 0.0.0.0 +Protocol 2 +# HostKeys for protocol version 2 +HostKey /etc/ssh/ssh_host_rsa_key +HostKey /etc/ssh/ssh_host_dsa_key +HostKey /etc/ssh/ssh_host_ecdsa_key +HostKey /etc/ssh/ssh_host_ed25519_key +#Privilege Separation is turned on for security +UsePrivilegeSeparation yes + +#Configure Idle Logout Timeout Interval +ClientAliveInterval 300 +ClientAliveCountMax 0 + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel INFO + +# Authentication: +LoginGraceTime 120 +PermitRootLogin no +StrictModes yes + +AddressFamily inet + +RSAAuthentication yes +PubkeyAuthentication yes +#AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +#IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +#AuthenticationMethods publickey,keyboard-interactive + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication no + +# Kerberos options +#KerberosAuthentication no +#KerberosGetAFSToken no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes + +# GSSAPI options +#GSSAPIAuthentication no +#GSSAPICleanupCredentials yes + +X11Forwarding yes +X11DisplayOffset 10 +PrintMotd no +PrintLastLog yes +TCPKeepAlive yes +#UseLogin no + +#MaxStartups 10:30:60 +Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +Subsystem sftp /usr/lib/openssh/sftp-server + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. + +UsePAM yes + +Ciphers aes128-ctr,aes192-ctr,aes256-ctr +MACs hmac-sha1,umac-64@openssh.com,hmac-ripemd160 diff --git a/ansible/roles/bootstrap_swarm/defaults/main.yml b/ansible/roles/bootstrap_swarm/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..91a78b02417dbea8038f8509eeca053c71466546 --- /dev/null +++ b/ansible/roles/bootstrap_swarm/defaults/main.yml @@ -0,0 +1,8 @@ +--- +networks: + - logger + - "{{ sunbird_network }}" + - api-manager_default + - monitoring + - monitor + - jenkins_default diff --git a/ansible/roles/bootstrap_swarm/tasks/main.yml b/ansible/roles/bootstrap_swarm/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..b352d7f09b63260e170602fe91ffaf1a9b3f7a4f --- /dev/null +++ b/ansible/roles/bootstrap_swarm/tasks/main.yml @@ -0,0 +1,24 @@ +- include_vars: secrets/{{env}}.yml + +- name: Install jq + apt: name=jq state=present + become: yes + +- name: Create secrets + shell: "echo '{{item.value}}' | docker secret create {{item.name}} -" + with_items: "{{swarm_secrets}}" + ignore_errors: true + no_log: true + +- name: Create networks + shell: "docker network create --driver overlay {{item}}" + with_items: + - "{{networks}}" + ignore_errors: true + tags: + - create_network + +# - name: Create networks + +# AGENT +# - name: Create /workspace diff --git a/ansible/roles/cassandra-backup/README.md b/ansible/roles/cassandra-backup/README.md new file mode 100755 index 0000000000000000000000000000000000000000..fac9c2e53a165eda3adf4988729d87f576527005 --- /dev/null +++ b/ansible/roles/cassandra-backup/README.md @@ -0,0 +1,78 @@ +Role Name +========= + +An [Ansible] role to install [Cassandra] + +Requirements +------------ + +- Oracle Java 8 + +Install [Ansible] requirements `ansible-galaxy install -r requirements.yml` + +Role Variables +-------------- + +``` +--- +# defaults file for ansible-cassandra +cassandra_cluster_group: 'cassandra-cluster-nodes' +cassandra_cluster_name: 'Test Cluster' +cassandra_cluster_setup: false +cassandra_commitlog_directory: '/var/lib/cassandra/commitlog' +cassandra_config: false +cassandra_debian_repo_info: + repo: 'deb http://www.apache.org/dist/cassandra/debian 36x main' + repo_key: 'https://www.apache.org/dist/cassandra/KEYS' +cassandra_data_file_directories: + - '/var/lib/cassandra/data' +cassandra_hints_directory: '/var/lib/cassandra/hints' +cassandra_listen_address: "{{ hostvars[inventory_hostname]['ansible_' + cassandra_listen_interface]['ipv4']['address'] }}" +cassandra_listen_interface: 'eth1' +cassandra_log_dir: '/var/log/cassandra' +cassandra_root_dir: '/etc/cassandra' +cassandra_saved_caches_directory: '/var/lib/cassandra/saved_caches' +cassandra_seeds: '127.0.0.1' # Only used if not setting up a cluster +cassandra_version: '3.6' +``` + +Dependencies +------------ + +Reference requirements + +Example Playbook +---------------- + +``` +--- +- hosts: cassandra-cluster-nodes + become: true + vars: + cassandra_cluster_setup: true + cassandra_config: true + pri_domain_name: 'test.vagrant.local' + roles: + - role: ansible-oracle-java8 + - role: ansible-cassandra + tasks: +``` + +License +------- + +BSD + +Author Information +------------------ + + +Larry Smith Jr. +- [@mrlesmithjr] +- [EveryThingShouldBeVirtual] +- mrlesmithjr [at] gmail.com + +[@mrlesmithjr]: <https://twitter.com/mrlesmithjr> +[EveryThingShouldBeVirtual]: <http://everythingshouldbevirtual.com> +[Ansible]: <https://www.ansible.com> +[Cassandra]: <http://cassandra.apache.org/> diff --git a/ansible/roles/cassandra-backup/handlers/main.yml b/ansible/roles/cassandra-backup/handlers/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..c6f499a8fc6d5a18e2560479c66061851cd4d93f --- /dev/null +++ b/ansible/roles/cassandra-backup/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for ansible-cassandra +- name: "restart cassandra" + service: + name: "cassandra" + state: "restarted" diff --git a/ansible/roles/cassandra-backup/meta/main.yml b/ansible/roles/cassandra-backup/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..23b18a800a4645387a83ac0873b6f893d62c081d --- /dev/null +++ b/ansible/roles/cassandra-backup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - azure-cli \ No newline at end of file diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..51ba7f64248133a6ac508972db00b0b035a22698 --- /dev/null +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -0,0 +1,45 @@ +- name: Create the directory + become: yes + file: path=/data/cassandra/ state=directory recurse=yes + +- name: copy the backup script + become: yes + template: src=cassandra_backup.j2 dest=/home/deployer/cassandra_backup.sh mode=0755 + +- name: run the backup script + become: yes + shell: sh /home/deployer/cassandra_backup.sh + +- name: Check doc_root path + shell: ls -all /data/cassandra/backup/ + register: doc_data + +- name: print doc_root to console + debug: + var: doc_data + +- set_fact: + cassandra_backup_gzip_file_name: "cassandra_backup_{{ lookup('pipe', 'date +%Y%m%d') }}.zip" + + +- set_fact: + cassandra_backup_gzip_file_path: "{{ cassandra.backup_dir }}/{{ cassandra_backup_gzip_file_name }}" + +- name: Ensure azure blob storage container exists + command: az storage container create --name {{ cassandra.backup_azure_container_name }} + ignore_errors: true + environment: + AZURE_STORAGE_ACCOUNT: "{{ cassandra.backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ cassandra.backup_azure_storage_access_key }}" + +- name: Upload to azure blob storage + command: az storage blob upload --name {{ cassandra_backup_gzip_file_name }} --file {{ cassandra_backup_gzip_file_path }} --container-name {{ cassandra.backup_azure_container_name }} + environment: + AZURE_STORAGE_ACCOUNT: "{{ cassandra.backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ cassandra.backup_azure_storage_access_key }}" + async: 3600 + poll: 10 + +- name: clean up backup dir after upload + file: path="{{ cassandra.backup_dir }}" state=absent + \ No newline at end of file diff --git a/ansible/roles/cassandra-backup/templates/cassandra_backup.j2 b/ansible/roles/cassandra-backup/templates/cassandra_backup.j2 new file mode 100644 index 0000000000000000000000000000000000000000..191c3b0bd34d2de09d9962ff32e252fc73152ece --- /dev/null +++ b/ansible/roles/cassandra-backup/templates/cassandra_backup.j2 @@ -0,0 +1,63 @@ +#!/bin/sh + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/opt/java/bin + +DATE=`date +%Y%m%d` + +SNAME="snapshot-$DATE" + +BACKUPDIRECTORY="/data/cassandra/backup/" + +if [ ! -d "$BACKUPDIRECTORY" ]; then + echo "Directory $BACKUPDIRECTORY not found, creating..." + mkdir $BACKUPDIRECTORY +fi + +if [ ! -d "$BACKUPDIRECTORY" ]; then + echo "Directory $BACKUPDIRECTORY not found, exit..." + exit +fi + +echo +echo "Snapshot name: $SNAME" +echo "Clear all snapshots" +nodetool -h 127.0.0.1 clearsnapshot + +cd $BACKUPDIRECTORY +pwd +rm -rf * + +echo "Taking snapshot" +nodetool -h 127.0.0.1 snapshot -t $SNAME +SFILES=`ls -1 -d /var/lib/cassandra/data/*/*/snapshots/$SNAME` +for f in $SFILES +do + echo "Process snapshot $f" + TABLE=`echo $f | awk -F/ '{print $(NF-2)}'` + KEYSPACE=`echo $f | awk -F/ '{print $(NF-3)}'` + + if [ ! -d "$BACKUPDIRECTORY/$SNAME" ]; then + mkdir $BACKUPDIRECTORY/$SNAME + fi + + if [ ! -d "$BACKUPDIRECTORY/$SNAME/$KEYSPACE" ]; then + mkdir $BACKUPDIRECTORY/$SNAME/$KEYSPACE + fi + + mkdir $BACKUPDIRECTORY/$SNAME/$KEYSPACE/$TABLE + find $f -maxdepth 1 -type f -exec mv -t $BACKUPDIRECTORY/$SNAME/$KEYSPACE/$TABLE/ {} + +done +cd /var/lib/cassandra/ +zip -r cassandra_backup_`date +%Y%m%d`.zip data +cp -r cassandra_backup_`date +%Y%m%d`.zip $BACKUPDIRECTORY +cd - +# cd $BACKUPDIRECTORY +# zip -r cassandra_backup_`date +%Y%m%d`.zip snap* +echo "Clear Incremental Backups" + +SFILES=`ls -1 -d /var/lib/cassandra/data/*/*/backups/` +for f in $SFILES +do + echo "Clear $f" + rm -f $f* +done \ No newline at end of file diff --git a/ansible/roles/cassandra-backup/vars/main.yml b/ansible/roles/cassandra-backup/vars/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..569e432c18babb4622e5d578ae44deb5173fe4fc --- /dev/null +++ b/ansible/roles/cassandra-backup/vars/main.yml @@ -0,0 +1 @@ +cassandra_root_dir: '/etc/cassandra' \ No newline at end of file diff --git a/ansible/roles/cassandra-cql-update/tasks/main.yml b/ansible/roles/cassandra-cql-update/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..59b1c82545640624c02e3ffff2d1b5944d55dca3 --- /dev/null +++ b/ansible/roles/cassandra-cql-update/tasks/main.yml @@ -0,0 +1,30 @@ +- name: Copy the templates + become: yes + template: src={{item}} dest=/tmp/{{item}} + with_items: + - cassandra.cql + - pageMgmt.csv + - pageSection.csv + tags: + - always + +- name: Run the cassandra.cql command + become: yes + command: "cqlsh -f /tmp/cassandra.cql" + ignore_errors: yes + tags: + - cql + +- name: Run the pageMgmt.csv command + become: yes + command: cqlsh -e "COPY sunbird.page_management(id, appmap,createdby ,createddate ,name ,organisationid ,portalmap ,updatedby ,updateddate ) FROM '/tmp/pageMgmt.csv'" + ignore_errors: yes + tags: + - pagemanagement + +- name: Run the pageSection.csv file + become: yes + command: cqlsh -e "COPY sunbird.page_section(id, alt,createdby ,createddate ,description ,display ,imgurl ,name,searchquery , sectiondatatype ,status , updatedby ,updateddate) FROM '/tmp/cql/pageSection.csv'" + ignore_errors: yes + tags: + - pagesection \ No newline at end of file diff --git a/ansible/roles/cassandra-cql-update/templates/cassandra.cql b/ansible/roles/cassandra-cql-update/templates/cassandra.cql new file mode 100644 index 0000000000000000000000000000000000000000..6940d80cf7e8b05cfaa70b1c92787b01cb3e3c54 --- /dev/null +++ b/ansible/roles/cassandra-cql-update/templates/cassandra.cql @@ -0,0 +1,510 @@ +CREATE KEYSPACE IF NOT EXISTS sunbird WITH replication = {'class':'SimpleStrategy','replication_factor':1}; + +//to change cluster name +//UPDATE system.local SET cluster_name = 'sunbird' where key='local'; +//ALTER USER cassandra WITH PASSWORD 'password'; +USE sunbird; + +/* +creation of id= one way hash of (userId##courseId) here courseId is identifier of course mgmt table +toc url we have to generate through json of content id from ekStep +here status is (default(0),inProgress(1),completed(2)) +progress is no of content completed +*/ +CREATE TABLE IF NOT EXISTS sunbird.course_enrollment(id text, courseId text, courseName text,userId text,enrolledDate text, +description text,tocUrl text,status int,active boolean,delta text,grade text,progress int,lastReadContentId text, +lastReadContentStatus int,addedBy text,courseLogoUrl text,dateTime timestamp,contentId text,PRIMARY KEY (id)); + +CREATE INDEX inx_ce_userId ON sunbird.course_enrollment (userId); +CREATE INDEX inx_ce_courseId ON sunbird.course_enrollment (courseId); +CREATE INDEX inx_ce_course_name ON sunbird.course_enrollment (courseName); +CREATE INDEX inx_ce_status ON sunbird.course_enrollment (status); + +/* +creation of id = one way hash of (userId##contentId##courseId##batchId) +status is (default(0),inProgress(1),completed(2)) +*/ +CREATE TABLE IF NOT EXISTS sunbird.content_consumption(id text, contentId text, courseId text, userId text,viewPosition text,viewCount int,lastAccessTime text, +contentVersion text,completedCount int,status int,result text,score text,grade text,lastUpdatedTime text,lastCompletedTime text,dateTime timestamp,PRIMARY KEY (id)); + +CREATE INDEX inx_cc_userId ON sunbird.content_consumption (userId); +CREATE INDEX inx_cc_contentId ON sunbird.content_consumption (contentId); +CREATE INDEX inx_cc_status ON sunbird.content_consumption (status); +CREATE INDEX inx_cc_courseId ON sunbird.content_consumption (courseId); + +/* +creation of id = using timestamp and env + id and courseId both are same +content id is from ekstep +status DRAFT("draft"), LIVE("live"), RETIRED("retired") +contentType (pdf,video,word doc etc) +tutor map<id,name> +*/ +CREATE TABLE IF NOT EXISTS sunbird.course_management(id text, courseId text, contentId text, courseName text,courseType text, +facultyId text,facultyName text,organisationId text,organisationName text,enrollementStartDate text,enrollementEndDate text, +courseDuration text,description text,status text,addedBy text,addedByName text,publishedBy text,publishedByName text,createdDate text, +publishedDate text,updatedDate text,updatedBy text,updatedByName text,contentType text,createdfor list<text>,noOfLectures int,tocUrl text, +tutor map<text,text>,courseLogoUrl text,courseRating text,userCount int,PRIMARY KEY (id)); + +CREATE INDEX inx_cm_facultyId ON sunbird.course_management (facultyId); +CREATE INDEX inx_cm_organisationId ON sunbird.course_management (organisationId); +CREATE INDEX inx_cm_courseId ON sunbird.course_management (courseId); +CREATE INDEX inx_cm_course_name ON sunbird.course_management (courseName); +CREATE INDEX inx_cm_status ON sunbird.course_management (status); +CREATE INDEX inx_cm_contentId ON sunbird.course_management (contentId); + +/* +creation of id = one way hash of userName +here id and userId both are same + currently username and email is same +email and username is unique +*/ +CREATE TABLE IF NOT EXISTS sunbird.user(id text,userId text,userName text, email text,phone text,aadhaarNo text,createdDate text,updatedDate text,updatedBy text, +lastLoginTime text,status int,firstName text,lastName text,password text,avatar text,gender text,language text,state text,city text,zipcode text,PRIMARY KEY (id)); + +CREATE INDEX inx_u_email ON sunbird.user (email); +CREATE INDEX inx_u_phone ON sunbird.user (phone); +CREATE INDEX inx_u_status ON sunbird.user (status); +CREATE INDEX inx_u_userId ON sunbird.user (userId); +CREATE INDEX inx_u_userName ON sunbird.user (userName); + +//user_auth +//id is auth token +CREATE TABLE IF NOT EXISTS sunbird.user_auth(id text, userId text,createdDate text,updatedDate text,source text,PRIMARY KEY (id)); +CREATE INDEX inx_ua_userId ON sunbird.user_auth (userId); +CREATE INDEX inx_ua_source ON sunbird.user_auth (source); + +//organisation +CREATE TABLE IF NOT EXISTS sunbird.organisation(id text, orgName text, description text,communityId text,createdBy text,createdByName text,createdDate text, +updatedDate text,updatedBy text,status int,relation text,parentOrgId text,orgType text,state text,city text,zipcode text,orgCode text,dateTime timestamp,PRIMARY KEY (id)); + +CREATE INDEX inx_org_orgName ON sunbird.organisation (orgName); +CREATE INDEX inx_org_status ON sunbird.organisation (status); +//page_management +//id= using timestamp and env +CREATE TABLE IF NOT EXISTS sunbird.page_management(id text, name text, appMap text,portalMap text,createdDate text,createdBy text, +updatedDate text,updatedBy text,organisationId text,PRIMARY KEY (id)); + +CREATE INDEX inx_pm_pageName ON sunbird.page_management (name); +CREATE INDEX inx_vm_organisationId ON sunbird.page_management (organisationId); + +//page_section +//id= using timestamp and env +CREATE TABLE IF NOT EXISTS sunbird.page_section(id text, name text, sectionDataType text,description text,display text, +searchQuery text,createdDate text,createdBy text,updatedDate text,updatedBy text,imgUrl text,alt text,status int,PRIMARY KEY (id)); +CREATE INDEX inx_ps_sectionDataType ON sunbird.page_section (sectionDataType); +CREATE INDEX inx_ps_sectionName ON sunbird.page_section (name); + +//Assessment Eval +//id= using timestamp and env +CREATE TABLE IF NOT EXISTS sunbird.assessment_eval(id text, contentId text, courseId text, userId text,assessmentItemId text, +createdDate text,result text,score text,attemptId text,attemptedCount int,PRIMARY KEY (id)); + +CREATE INDEX inx_ae_userId ON sunbird.assessment_eval (userId); +CREATE INDEX inx_ae_contentId ON sunbird.assessment_eval (contentId); +CREATE INDEX inx_ae_assessmentItemId ON sunbird.assessment_eval (assessmentItemId); +CREATE INDEX inx_ae_courseId ON sunbird.assessment_eval (courseId); + +//Assessment item +//id= using timestamp and userId +CREATE TABLE IF NOT EXISTS sunbird.assessment_item(id text, contentId text, courseId text, userId text,assessmentItemId text, +assessmentType text,attemptedDate text,createdDate text,timeTaken int,result text,score text,maxScore text,answers text, +evaluationStatus boolean,processingStatus boolean,attemptId text,PRIMARY KEY (id)); + +CREATE INDEX inx_ai_userId ON sunbird.assessment_item (userId); +CREATE INDEX inx_ai_contentId ON sunbird.assessment_item (contentId); +CREATE INDEX inx_ai_assessmentItemId ON sunbird.assessment_item (assessmentItemId); +CREATE INDEX inx_ai_courseId ON sunbird.assessment_item (courseId); +CREATE INDEX inx_ai_processingStatus ON sunbird.assessment_item (processingStatus); + +ALTER TABLE sunbird.course_management DROP noOfLectures; +ALTER TABLE sunbird.course_management ADD noOfLectures int; +ALTER TABLE sunbird.assessment_item DROP evaluationStatus; +ALTER TABLE sunbird.assessment_item DROP processingStatus; +ALTER TABLE sunbird.assessment_item ADD evaluationStatus boolean; +ALTER TABLE sunbird.assessment_item ADD processingStatus boolean; +ALTER TABLE sunbird.assessment_eval DROP assessmentItemId; +ALTER TABLE sunbird.assessment_eval DROP maxScore; +ALTER TABLE sunbird.page_management ADD organisationId text; +ALTER TABLE sunbird.organisation ADD orgCode text; + +//2017-06-30 changes for user and organisation +ALTER TABLE sunbird.user DROP zipcode; +ALTER TABLE sunbird.user DROP city; +ALTER TABLE sunbird.user DROP state; +ALTER TABLE sunbird.user DROP language; +ALTER TABLE sunbird.user ADD thumbnail text; + ALTER TABLE sunbird.user ADD dob text; + ALTER TABLE sunbird.user ADD regOrgId text; + ALTER TABLE sunbird.user ADD subject list<text>; + ALTER TABLE sunbird.user ADD language list<text>; + ALTER TABLE sunbird.user ADD grade list<text>; + + + CREATE TABLE IF NOT EXISTS sunbird.user_external_identity(id text, userId text, externalId text,source text,isVerified boolean,PRIMARY KEY (id)); +CREATE INDEX inx_uei_userid ON sunbird.user_external_identity (userId); +CREATE INDEX inx_uei_externalId ON sunbird.user_external_identity (externalId); +CREATE INDEX inx_uei_source ON sunbird.user_external_identity (source); + +//Address Type values(permanent, current, office, home) +CREATE TABLE IF NOT EXISTS sunbird.address(id text, userId text, country text,state text,city text,zipCode text,addType text,createdDate text,createdBy text,updatedDate text,updatedBy text, PRIMARY KEY (id)); +CREATE INDEX inx_add_userid ON sunbird.address (userId); +CREATE INDEX inx_add_addType ON sunbird.address (addType); + +CREATE TABLE IF NOT EXISTS sunbird.user_education(id text, userId text, courseName text,duration int,yearOfPassing int,percentage double,grade text,name text,boardOrUniversity text,addressId text,createdDate text,createdBy text,updatedDate text,updatedBy text, PRIMARY KEY (id)); +CREATE INDEX inx_ueu_userid ON sunbird.user_education (userId); + +CREATE TABLE IF NOT EXISTS sunbird.user_job_profile(id text, userId text, jobName text,role text,joiningDate text,endDate text,orgName text,orgId text,subject list<text>,addressId text,boardName text,isVerified boolean,isRejected boolean,verifiedDate text,verifiedBy text,createdDate text,createdBy text,updatedDate text,updatedBy text, PRIMARY KEY (id)); +CREATE INDEX inx_ujp_userid ON sunbird.user_job_profile (userId); + +CREATE TABLE IF NOT EXISTS sunbird.user_org(id text, userId text, role text,orgId text,orgJoinDate text,orgLeftDate text,isApproved boolean, +isRejected boolean,approvedBy text,approvalDate text,updatedDate text,updatedBy text, PRIMARY KEY (id)); +CREATE INDEX inx_uorg_userid ON sunbird.user_org(userId); +CREATE INDEX inx_uorg_orgId ON sunbird.user_org(orgId); + +CREATE TABLE IF NOT EXISTS sunbird.subject(id text, name text, PRIMARY KEY (id)); +CREATE INDEX inx_sb_name ON sunbird.subject(name); + +CREATE TABLE IF NOT EXISTS sunbird.role(id text, name text, PRIMARY KEY (id)); +CREATE INDEX inx_role_name ON sunbird.role(name); + +ALTER TABLE sunbird.organisation DROP city; +ALTER TABLE sunbird.organisation DROP state; +ALTER TABLE sunbird.organisation DROP zipcode; +ALTER TABLE sunbird.organisation DROP relation; +ALTER TABLE sunbird.organisation DROP createdbyname; + +ALTER TABLE sunbird.organisation ADD imgUrl text; +ALTER TABLE sunbird.organisation ADD thumbnail text; +ALTER TABLE sunbird.organisation ADD channel text; +ALTER TABLE sunbird.organisation ADD preferredLanguage text; +ALTER TABLE sunbird.organisation ADD homeUrl text; +ALTER TABLE sunbird.organisation ADD isRootOrg boolean; +ALTER TABLE sunbird.organisation ADD addId text; +ALTER TABLE sunbird.organisation ADD noOfmembers int; +ALTER TABLE sunbird.organisation ADD orgCode text; +ALTER TABLE sunbird.organisation ADD isApproved boolean; +ALTER TABLE sunbird.organisation ADD approvedBy text; +ALTER TABLE sunbird.organisation ADD approvedDate text; +//ALTER TABLE sunbird.organisation ADD isRejected boolean; + +CREATE INDEX inx_org_channel ON sunbird.organisation(channel); +CREATE INDEX inx_org_orgType ON sunbird.organisation(orgType); +CREATE INDEX inx_org_orgCode ON sunbird.organisation(orgCode); + +CREATE TABLE IF NOT EXISTS sunbird.org_type(id text, name text, PRIMARY KEY (id)); +CREATE INDEX inx_ot_name ON sunbird.org_type(name); + +CREATE TABLE IF NOT EXISTS sunbird.org_mapping(id text, orgIdOne text,relation text,orgIdTwo text, PRIMARY KEY (id)); +CREATE INDEX inx_om_orgIdOne ON sunbird.org_mapping(orgIdOne); +CREATE INDEX inx_om_orgIdTwo ON sunbird.org_mapping(orgIdTwo); + +CREATE TABLE IF NOT EXISTS sunbird.role(id text, name text,status int, PRIMARY KEY (id)); +CREATE INDEX inx_ro_master_name ON sunbird.role(name); + +insert into role (id,name,status) values ('r_101','ADMIN',1); +insert into role (id,name,status) values ('r_102','ORG_ADMIN',1); +insert into role (id,name,status) values ('r_103','ORG_MODERATOR',1); +insert into role (id,name,status) values ('r_104','CONTENT_CREATOR',1); +insert into role (id,name,status) values ('r_105','CONTENT_REVIEWER',1); +insert into role (id,name,status) values ('r_106','ORG_MEMBER',1); + + +ALTER TABLE sunbird.user ADD rootOrgId text; +ALTER TABLE sunbird.address ADD addressLine1 text; +ALTER TABLE sunbird.address ADD addressLine2 text; +ALTER TABLE sunbird.user_education ADD degree text; + +insert into sunbird.role (id,name,status) values ('r_101','SYSTEM_ADMINISTRATION',1); +insert into sunbird.role (id,name,status) values ('r_102','ORG_MANAGEMENT',1); +insert into sunbird.role (id,name,status) values ('r_103','MEMBERSHIP_MANAGEMENT',1); +insert into sunbird.role (id,name,status) values ('r_104','CONTENT_CREATION',1); +insert into sunbird.role (id,name,status) values ('r_105','CONTENT_REVIEW',1); +insert into sunbird.role (id,name,status) values ('r_106','CONTENT_CURATION',1); +insert into sunbird.role (id,name,status) values ('r_107','PUBLIC',1); + + +CREATE TABLE IF NOT EXISTS sunbird.master_action(id text, name text, PRIMARY KEY (id)); +CREATE INDEX inx_ma_name ON sunbird.master_action(name); + +CREATE TABLE IF NOT EXISTS sunbird.url_action(id text, url text,name text, PRIMARY KEY (id)); +CREATE INDEX inx_ua_name ON sunbird.url_action(name); +CREATE INDEX inx_ua_url ON sunbird.url_action(url); + +CREATE TABLE IF NOT EXISTS sunbird.action_group(id text, actionId list<text>,groupName text, PRIMARY KEY (id)); +CREATE INDEX inx_uacg_groupName ON sunbird.action_group(groupName); + +CREATE TABLE IF NOT EXISTS sunbird.user_action_role(id text, actionGroupId list<text>,roleId text, PRIMARY KEY (id)); +CREATE INDEX inx_uactr_roleId ON sunbird.user_action_role(roleId); + +insert into sunbird.url_action(id,url,name) values ('1','','suspendOrg'); +insert into sunbird.url_action(id,url,name) values ('2','','suspendUser'); +insert into sunbird.url_action(id,url,name) values ('3','','createOrg'); +insert into sunbird.url_action(id,url,name) values ('4','','updateOrg'); +insert into sunbird.url_action(id,url,name) values ('5','','updateUser'); +insert into sunbird.url_action(id,url,name) values ('6','','addMember'); +insert into sunbird.url_action(id,url,name) values ('7','','removeOrg'); +insert into sunbird.url_action(id,url,name) values ('8','','createUser'); + +insert into sunbird.url_action(id,url,name) values ('9','','removeMember'); +insert into sunbird.url_action(id,url,name) values ('10','','suspendMember'); +insert into sunbird.url_action(id,url,name) values ('11','','createCourse'); +insert into sunbird.url_action(id,url,name) values ('12','','updateCourse'); +insert into sunbird.url_action(id,url,name) values ('13','','createContent'); +insert into sunbird.url_action(id,url,name) values ('14','','updateContent'); +insert into sunbird.url_action(id,url,name) values ('15','','publishCourse'); +insert into sunbird.url_action(id,url,name) values ('16','','publishContent'); + +insert into sunbird.url_action(id,url,name) values ('17','','flagCourse'); +insert into sunbird.url_action(id,url,name) values ('18','','flagContent'); +insert into sunbird.url_action(id,url,name) values ('19','','getProfile'); +insert into sunbird.url_action(id,url,name) values ('20','','updateProfile'); +insert into sunbird.url_action(id,url,name) values ('21','','readCourse'); +insert into sunbird.url_action(id,url,name) values ('22','','readContent'); +insert into sunbird.url_action(id,url,name) values ('23','','rateCourse'); +insert into sunbird.url_action(id,url,name) values ('24','','rateContent'); +insert into sunbird.url_action(id,url,name) values ('25','','searchCourse'); +insert into sunbird.url_action(id,url,name) values ('26','','searchContent'); + +insert into sunbird.action_group(id,actionId,groupName) values ('ag_12',['1','2'],'SYSTEM_ADMINISTRATION'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_13',['3','4','7','8','5'],'ORG_MANAGEMENT'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_14',['6','9','10'],'MEMBERSHIP_MANAGEMENT'); + +insert into sunbird.action_group(id,actionId,groupName) values ('ag_15',['11','12','13','14'],'CONTENT_CREATION'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_16',['15','16'],'CONTENT_REVIEW'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_17',['17','18','10'],'CONTENT_CURATION'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_17',['19','20','21','22','23','24','25','26'],'PUBLIC'); + +ALTER TABLE sunbird.user ADD loginId text; +ALTER TABLE sunbird.user ADD provider text; +ALTER TABLE sunbird.user_external_identity ADD idType text; + +insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_1',['ag_17'],'r_107'); +insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_2',['ag_13'],'r_102'); +insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_3',['ag_14'],'r_103'); + insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_3',['ag_15'],'r_104'); + insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_3',['ag_16'],'r_105'); + insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_3',['ag_12'],'r_101'); + +ALTER TABLE sunbird.organisation DROP addId; +ALTER TABLE sunbird.organisation ADD addressId text; +ALTER TABLE sunbird.user ADD roles List<text>; + + + CREATE TABLE IF NOT EXISTS sunbird.role_group(id text, name text, PRIMARY KEY (id)); + insert into sunbird.role_group (id,name) values ('SYSTEM_ADMINISTRATION','System Administration'); + insert into sunbird.role_group (id,name) values ('ORG_MANAGEMENT','Org Management'); + insert into sunbird.role_group (id,name) values ('MEMBERSHIP_MANAGEMENT','Membership Management'); + insert into sunbird.role_group (id,name) values ('CONTENT_CREATION','Content Creation'); + insert into sunbird.role_group (id,name) values ('CONTENT_CURATION','Content Curation'); + insert into sunbird.role_group (id,name) values ('CONTENT_REVIEW','Content Review'); + drop table sunbird.role; +CREATE TABLE IF NOT EXISTS sunbird.role(id text, name text,roleGroupId List<text>,status int, PRIMARY KEY (id)); +CREATE INDEX inx_ro_master_name ON sunbird.role(name); + insert into sunbird.role (id,name,rolegroupid,status) values ('ADMIN','Admin',['SYSTEM_ADMINISTRATION','ORG_MANAGEMENT'],1); + insert into sunbird.role (id,name,rolegroupid,status) values ('ORG_ADMIN','Org Admin',['ORG_MANAGEMENT','MEMBERSHIP_MANAGEMENT'],1); + insert into sunbird.role (id,name,rolegroupid,status) values ('ORG_MODERATOR','Org Moderator',['MEMBERSHIP_MANAGEMENT'],1); + insert into sunbird.role (id,name,rolegroupid,status) values ('CONTENT_CREATOR','Content Creator',['CONTENT_CREATION'],1); + insert into sunbird.role (id,name,rolegroupid,status) values ('CONTENT_REVIEWER','Content Reviewer',['CONTENT_CREATION','CONTENT_CURATION','CONTENT_REVIEW'],1); + drop table sunbird.url_action; + CREATE TABLE IF NOT EXISTS sunbird.url_action(id text, url list<text>,name text, PRIMARY KEY (id)); + CREATE INDEX inx_ua_name ON sunbird.url_action(name); +CREATE INDEX inx_ua_url ON sunbird.url_action(url); + +insert into sunbird.url_action (id,name) values ('suspendOrg','suspendOrg'); + insert into sunbird.url_action (id,name) values ('suspendUser','suspendUser'); + insert into sunbird.url_action (id,name) values ('createOrg','createOrg'); + insert into sunbird.url_action (id,name) values ('updateOrg','updateOrg'); + insert into sunbird.url_action (id,name) values ('removeOrg','removeOrg'); + insert into sunbird.url_action (id,name) values ('createUser','createUser'); + insert into sunbird.url_action (id,name) values ('updateUser','updateUser'); + insert into sunbird.url_action (id,name) values ('ORG_MANAGEMENT','Org Management'); + insert into sunbird.url_action (id,name) values ('createOrg','createOrg'); + insert into sunbird.url_action (id,name) values ('addMember','addMember'); + insert into sunbird.url_action (id,name) values ('removeMember','removeMember'); + insert into sunbird.url_action (id,name) values ('suspendMember','suspendMember'); + insert into sunbird.url_action (id,name) values ('createCourse','createCourse'); + insert into sunbird.url_action (id,name) values ('updateCourse','updateCourse'); + insert into sunbird.url_action (id,name) values ('createContent','createContent'); + insert into sunbird.url_action (id,name) values ('updateContent','updateContent'); + insert into sunbird.url_action (id,name) values ('flagCourse','flagCourse'); + insert into sunbird.url_action (id,name) values ('flagContent','flagContent'); + insert into sunbird.url_action (id,name) values ('publishCourse','publishCourse'); + insert into sunbird.url_action (id,name) values ('publishContent','publishContent'); + ALTER table sunbird.role_group add url_action_ids list<text>; + + update sunbird.role_group set url_action_ids=['addMember','removeMember','suspendMember'] where id='MEMBERSHIP_MANAGEMENT'; + update sunbird.role_group set url_action_ids=['createCourse','updateCourse','createContent','updateContent'] where id='CONTENT_CREATION'; + update sunbird.role_group set url_action_ids=['suspendOrg','suspendUser'] where id='SYSTEM_ADMINISTRATION'; + update sunbird.role_group set url_action_ids=['publishCourse','publishContent'] where id='CONTENT_REVIEW'; + update sunbird.role_group set url_action_ids=['createOrg','updateOrg','removeOrg','createUser','updateUser'] where id='ORG_MANAGEMENT'; + update sunbird.role_group set url_action_ids=['flagCourse','flagContent'] where id='CONTENT_CURATION'; + + update sunbird.url_action set url=['/v1/course/publish'] where id='publishContent'; +update sunbird.url_action set url=['/v1/user/create'] where id='addMember'; + update sunbird.url_action set url=['v1/course/create'] where id='createCourse'; +update sunbird.url_action set url=['/v1/user/create'] where id='createUser'; + update sunbird.url_action set url=['/v1/course/publish'] where id='publishCourse'; +update sunbird.url_action set url=['/v1/organisation/update'] where id='updateOrg'; + +drop index inx_uorg_orgid; +ALTER TABLE sunbird.user_org DROP orgid; +ALTER TABLE sunbird.user_org ADD organisationid text; +ALTER TABLE sunbird.user_org ADD addedby text; +ALTER TABLE sunbird.user_org ADD addedbyname text; +CREATE INDEX inx_uorg_orgid ON sunbird.user_org (organisationid); + + +/* +creation of id= one way hash of (userId##courseId##batchId) here courseId is identifier of EkStep course +toc url is generated from ekStep +here status is (default(0),inProgress(1),completed(2)) +progress is no of content completed +*/ +CREATE TABLE IF NOT EXISTS sunbird.user_courses(id text, courseId text, courseName text, userId text, batchId text, enrolledDate text, +description text,tocUrl text,status int,active boolean,delta text,grade text,progress int,lastReadContentId text, +lastReadContentStatus int,addedBy text,courseLogoUrl text, dateTime timestamp, contentId text, PRIMARY KEY (id)); + +CREATE INDEX inx_ucs_userId ON sunbird.user_courses (userId); +CREATE INDEX inx_ucs_courseId ON sunbird.user_courses (courseId); +CREATE INDEX inx_ucs_batchId ON sunbird.user_courses (batchId); +CREATE INDEX inx_ucs_course_name ON sunbird.user_courses (courseName); +CREATE INDEX inx_ucs_status ON sunbird.user_courses (status); + +ALTER TABLE sunbird.user_external_identity DROP source; +ALTER TABLE sunbird.user_external_identity ADD provider text; +ALTER TABLE sunbird.user_external_identity ADD externalIdValue text; +DROP INDEX inx_uei_source; +CREATE INDEX inx_uei_provider ON sunbird.user_external_identity (provider); + +//changes 7 July 2017 updated organization table +ALTER TABLE sunbird.organisation ADD rootOrgID text; +ALTER TABLE sunbird.org_mapping ADD rootOrgID text; +CREATE TABLE IF NOT EXISTS sunbird.org_type(id text, name text, PRIMARY KEY (id)); +DROP INDEX sunbird.inx_org_status; +ALTER TABLE sunbird.organisation DROP status ; +ALTER TABLE sunbird.organisation ADD status text; + +CREATE INDEX inx_org_status ON sunbird.organisation (status); + +CREATE INDEX inx_u_loginId ON sunbird.user(loginId); + +ALTER TABLE sunbird.user_job_profile ADD isCurrentJob boolean; +ALTER TABLE sunbird.content_consumption ADD progress int; +ALTER TABLE sunbird.content_consumption DROP viewPosition; + +//changes on 12th july 2017 +ALTER TABLE sunbird.user_job_profile ADD isDeleted boolean; +ALTER TABLE sunbird.user_education ADD isDeleted boolean; +ALTER TABLE sunbird.address ADD isDeleted boolean; +ALTER TABLE sunbird.user_org ADD isDeleted boolean; +ALTER TABLE sunbird.user ADD profileSummary text; + +ALTER TABLE sunbird.organisation ADD source text; +ALTER TABLE sunbird.organisation ADD externalId text; + +ALTER TABLE sunbird.user_org drop role; +ALTER TABLE sunbird.user_org ADD roles list<text>; + +//to export data from csv to cassandra table run below command(for page_section and page_management table) +// change the path of csv file +//COPY sunbird.page_management(id, appmap,createdby ,createddate ,name ,organisationid ,portalmap ,updatedby ,updateddate ) FROM '/tmp/cql/pageMgmt.csv'; + +//COPY sunbird.page_section(id, alt,createdby ,createddate ,description ,display ,imgurl ,name,searchquery , sectiondatatype ,status , updatedby ,updateddate) FROM '/tmp/cql/pageSection.csv'; + +//changes on 14th july 2017 + +ALTER TABLE sunbird.user_org DROP role; +ALTER TABLE sunbird.user_org ADD roles List<text>; + +// insert default root organisation -- July 15,2017 +ALTER TABLE sunbird.organisation ADD isDefault boolean; + +INSERT INTO sunbird.organisation(id , isRootOrg , isDefault, status) values('ORG_001' , true , true, 1); + +ALTER TABLE sunbird.user_courses ADD leafNodesCount int; +// change organisation status field from text to int -- July 19,2017 +drop index inx_org_status; +ALTER TABLE sunbird.organisation DROP status ; +ALTER TABLE sunbird.organisation ADD status int; +CREATE INDEX inx_org_status ON sunbird.organisation (status); + +// add isdeleted field to user table -- July 31,2017 +ALTER TABLE sunbird.user ADD isDeleted boolean; + +//added for course batch +CREATE TABLE IF NOT EXISTS sunbird.course_batch(id text, courseId text,courseCreator text,createdBy text,createdOn text,enrollmentType text,startDate text,endDate text,name text,description text,status int,lastUpdatedOn text,mentors List<text>,participants List<text>,createdFor List<text>,PRIMARY KEY (id)); +CREATE INDEX inx_cou_bat_status ON sunbird.course_batch (status); +CREATE INDEX inx_cou_bat_courseId ON sunbird.course_batch (courseId); +CREATE INDEX inx_cou_bat_courseCreator ON sunbird.course_batch (courseCreator); +CREATE INDEX inx_cou_bat_createdBy ON sunbird.course_batch (createdBy); +CREATE INDEX inx_cou_bat_enrolmentType ON sunbird.course_batch (enrolmentType); +ALTER TABLE sunbird.course_batch DROP createdon; +ALTER TABLE sunbird.course_batch ADD createdDate text; +ALTER TABLE sunbird.course_batch DROP lastupdatedon; +ALTER TABLE sunbird.course_batch ADD updatedDate text; + +ALTER TABLE sunbird.organisation add provider text; +COPY sunbird.organisation (id,source) TO '/tmp/cql/source.csv'; +COPY sunbird.organisation (id,provider) FROM '/tmp/cql/source.csv'; +ALTER TABLE sunbird.organisation DROP source; +//Id is courseId +CREATE TABLE IF NOT EXISTS sunbird.course_publish_status(id text,submitDate text,status int,PRIMARY KEY (id)); + +// convert data type of participants from List to map -- Aug 7,2017 +ALTER TABLE sunbird.course_batch DROP participants; +ALTER TABLE sunbird.course_batch ADD participant map<text,boolean>; + +ALTER TABLE sunbird.course_batch ADD courseAdditionalInfo map<text,text>; +DROP INDEX inx_cou_bat_enrolmentType; +ALTER TABLE sunbird.course_batch DROP enrolmentType; +ALTER TABLE sunbird.course_batch ADD enrollmentType text; +CREATE INDEX inx_cou_bat_enrollmentType ON sunbird.course_batch (enrollmentType); + +// adding the batchId column to content consumption table -- Aug 8,2017 +ALTER TABLE sunbird.content_consumption ADD batchId text; + +CREATE TABLE IF NOT EXISTS sunbird.bulk_upload_process(id text, status int,data text,successResult text,failureResult text,uploadedBy text,uploadedDate text,processStartTime text,processEndTime text,ObjectType text,organisationId text, PRIMARY KEY (id)); +CREATE INDEX inx_status ON sunbird.bulk_upload_process(status); +insert into sunbird.role_group (id,name) values ('COURSE_MENTOR','Course Mentor'); +update sunbird.role_group set url_action_ids=['courseMentor'] where id='COURSE_MENTOR'; + +ALTER TABLE sunbird.course_batch ADD countIncrementStatus boolean; +ALTER TABLE sunbird.course_batch ADD countIncrementDate text; +ALTER TABLE sunbird.course_batch ADD countDecrementStatus boolean; +ALTER TABLE sunbird.course_batch ADD countDecrementDate text; + +CREATE INDEX inx_org_provider ON sunbird.organisation (provider); +CREATE INDEX inx_org_externalId ON sunbird.organisation (externalId); + +insert into sunbird.url_action (id,name) values ('orgupload','orgupload'); +update sunbird.role_group set url_action_ids=['suspendOrg','suspendUser','orgupload'] where id='SYSTEM_ADMINISTRATION'; +update sunbird.url_action set url=['/v1/org/suspend'] where id='suspendOrg'; +update sunbird.url_action set url=['/v1/user/block'] where id='suspendUser'; +update sunbird.url_action set url=['/v1/org/upload'] where id='orgupload'; +insert into sunbird.role (id,name,rolegroupid,status) values ('COURSE_MENTOR','Course Mentor',['COURSE_MENTOR'],1); + +// changes on Aug 16,2017 +ALTER TABLE sunbird.organisation ADD slug text; +ALTER TABLE sunbird.organisation ADD hashTagId text; +ALTER TABLE sunbird.organisation ADD theme text; +ALTER TABLE sunbird.user_org ADD position text; +ALTER TABLE sunbird.course_batch ADD hashTagId text; +CREATE INDEX inx_cps_status ON sunbird.course_publish_status (status); +ALTER TABLE sunbird.user ADD location text; +ALTER TABLE sunbird.organisation ADD contactDetails map<text,text>; +insert into sunbird.role_group (id,name) values ('FLAG_REVIEWER ','Flag Reviewer'); +insert into sunbird.url_action (id,name) values ('FLAG_REVIEWER','flag Review'); +insert into sunbird.role (id,name,rolegroupid,status) values ('FLAG_REVIEWER','Flag Reviewer',['FLAG_REVIEWER'],1); +update sunbird.role_group set url_action_ids=['flagReview'] where id='FLAG_REVIEWER'; +ALTER TABLE sunbird.organisation DROP contactdetails; +ALTER TABLE sunbird.organisation ADD contactdetail text; + +update course_batch set countdecrementstatus=false ,countincrementstatus=false where id in ('01231295903433523233', '01231300152093900880', '01231362610791219226', '012311485318971392184', '012311507396763648187', '01231446621414195215', '012311399697334272167', '012310182931898368117', '01231437156529766411', '0123143731552501769', '012315729890295808125', '01231515693522124824', '012315105801404416176', '012315113121030144181', '0123156336446914560', '0123152537517178880', '012311452395159552176', '012310182079651840116', '01231365904918118428', '012315115229880320201', '01231297804052070441', '01231295967369625632', '01231437649992908813', '01231298538289561658', '01231290682958643227', '012310176900218880115', '012311394333097984166', '012311400451145728168', '01231513667683942421', '012315115088601088183', '0123153123053158404', '01231363201368064027', '01231290412171264026', '0123151248760258560', '01231514986607411223', '012311580166266880203', '01231217800957952058', '01231304911540224098', '0123152769465384961', '012310190924496896119', '01231569596413542494', '01231299360593510461', '01231437480674099212', '012310195742662656126', '012311489586135040185', '01231298390404300857', '012311415973486592169', '01231298670022656060', '01231569429976678493', '01231294824787148831', '012310173385113600114', '01231385960518451255', '012315785629794304137', '01231296418316288037', '012310193544601600118', '012311447751262208177', '012311507213688832188', '01231437595463680010', '0123156778024960005', '012311505273479168186', '01231300602153369682', '01231298422152396859', '0123152820520222722', '012311511755972608189', '012315179912159232103', '012315751464787968136', '012315736241045504130', '01231288911727001615', '01231514371529113622', '01231366031184691229', '01231383864920473645', '012315107363119104182', '012311574182944768202', '01231516784369664088', '01231443633637785614', '01231300661683814481' ); +insert into sunbird.role_group (id,name) values ('COURSE_ADMIN','Course Admin'); +update sunbird.role_group set url_action_ids=['courseAdmin'] where id='COURSE_ADMIN'; +insert into sunbird.role (id,name,rolegroupid,status) values ('COURSE_ADMIN','Course Admin',['COURSE_ADMIN'],1); +insert into sunbird.role_group (id,name) values ('COURSE_CREATOR','Course Creator'); +update sunbird.role_group set url_action_ids=['courseCreator'] where id='COURSE_CREATOR'; +insert into sunbird.role (id,name,rolegroupid,status) values ('COURSE_CREATOR','Course Creator',['COURSE_CREATOR'],1); +update sunbird.role_group set name='Flag Reviewer' where id='FLAG_REVIEWER'; \ No newline at end of file diff --git a/ansible/roles/cassandra-cql-update/templates/pageMgmt.csv b/ansible/roles/cassandra-cql-update/templates/pageMgmt.csv new file mode 100644 index 0000000000000000000000000000000000000000..e010f0fb409461f47f554da6b0e1bca2072fcce4 --- /dev/null +++ b/ansible/roles/cassandra-cql-update/templates/pageMgmt.csv @@ -0,0 +1,6 @@ +0122838911932661768,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 07:17:36:831+0000,Resource,,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",, +01228382486252748821,"[{\"id\":\"01228382278062080019\",\"index\":1,\"group\":1},{\"id\":\"01228382243946496017\",\"index\":1,\"group\":2}]",490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:03:29:672+0000,Course,,"[{\"id\":\"01228382278062080019\",\"index\":1,\"group\":1},{\"id\":\"01228382243946496017\",\"index\":1,\"group\":2}]",, +0122838909618585607,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 07:17:36:827+0000,Resource,,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",, +01228394137835929612,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 08:54:56:574+0000,Resourcessss,,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 09:01:26:990+0000 +01228382478150860822,"[{\"id\":\"01228382278062080019\",\"index\":1,\"group\":1},{\"id\":\"01228382243946496017\",\"index\":1,\"group\":2}]",490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:03:29:679+0000,Course,,"[{\"id\":\"01228382278062080019\",\"index\":1,\"group\":1},{\"id\":\"01228382243946496017\",\"index\":1,\"group\":2}]",, +01228393775303884811,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 08:54:56:572+0000,Resources,,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",, \ No newline at end of file diff --git a/ansible/roles/cassandra-cql-update/templates/pageSection.csv b/ansible/roles/cassandra-cql-update/templates/pageSection.csv new file mode 100644 index 0000000000000000000000000000000000000000..b10991659d0499cc81c5c01cfdb5b7913127476f --- /dev/null +++ b/ansible/roles/cassandra-cql-update/templates/pageSection.csv @@ -0,0 +1,12 @@ +01228383133972889627,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:10:20:043+0000,,"{\"name\":{\"en\":\"Popular Template\",\"hi\":\"????????\"}}",,Popular Template,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Template\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382966064742425,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:09:40:595+0000,,"{\"name\":{\"en\":\"Popular Worksheet\",\"hi\":\"????????\"}}",,Popular Worksheet,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Worksheet\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382278062080019,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:00:55:600+0000,,"{\"name\":{\"en\":\"Latest Courses\",\"hi\":\"????????\"}}",,Latest Courses,"{\"request\":{\"filters\":{\"contentType\":[\"Course\"],\"objectType\":[\"Content\"],\"status\":[\"Live\"]},\"sort_by\":{\"lastPublishedOn\":\"desc\"},\"limit\":10}}",course,1,, +01228383082462412826,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:09:40:595+0000,,"{\"name\":{\"en\":\"Popular Worksheet\",\"hi\":\"????????\"}}",,Popular Worksheet,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Worksheet\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382897002905629,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:11:14:511+0000,,"{\"name\":{\"en\":\"Popular Simulation\",\"hi\":\"????????\"}}",,Popular Simulation,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Simulation\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382681137152020,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:00:55:601+0000,,"{\"name\":{\"en\":\"Latest Courses\",\"hi\":\"????????\"}}",,Latest Courses,"{\"request\":{\"filters\":{\"contentType\":[\"Course\"],\"objectType\":[\"Content\"],\"status\":[\"Live\"]},\"sort_by\":{\"lastPublishedOn\":\"desc\"},\"limit\":10}}",course,1,, +01228382662997606424,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:08:56:272+0000,,"{\"name\":{\"en\":\"Popular Story\",\"hi\":\"????????\"}}",,Popular Story,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Story\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382337862041618,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:00:16:949+0000,,"{\"name\":{\"en\":\"Popular Courses\",\"hi\":\"????????\"}}",,Popular Courses,"{\"request\":{\"filters\":{\"contentType\":[\"Course\"],\"objectType\":[\"Content\"],\"status\":[\"Live\"]},\"sort_by\":{\"name\":\"asc\"},\"limit\":10}}",course,1,, +01228383171081011228,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:10:20:049+0000,,"{\"name\":{\"en\":\"Popular Template\",\"hi\":\"????????\"}}",,Popular Template,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Template\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382243946496017,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:00:16:949+0000,,"{\"name\":{\"en\":\"Popular Courses\",\"hi\":\"????????\"}}",,Popular Courses,"{\"request\":{\"filters\":{\"contentType\":[\"Course\"],\"objectType\":[\"Content\"],\"status\":[\"Live\"]},\"sort_by\":{\"name\":\"asc\"},\"limit\":10}}",course,1,, +01228383384379392023,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:08:56:272+0000,,"{\"name\":{\"en\":\"Popular Story\",\"hi\":\"????????\"}}",,Popular Story,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Story\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228383260541747230,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:11:14:512+0000,,"{\"name\":{\"en\":\"Popular Simulation\",\"hi\":\"????????\"}}",,Popular Simulation,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Simulation\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, \ No newline at end of file diff --git a/ansible/roles/cassandra-restore/README.md b/ansible/roles/cassandra-restore/README.md new file mode 100755 index 0000000000000000000000000000000000000000..fac9c2e53a165eda3adf4988729d87f576527005 --- /dev/null +++ b/ansible/roles/cassandra-restore/README.md @@ -0,0 +1,78 @@ +Role Name +========= + +An [Ansible] role to install [Cassandra] + +Requirements +------------ + +- Oracle Java 8 + +Install [Ansible] requirements `ansible-galaxy install -r requirements.yml` + +Role Variables +-------------- + +``` +--- +# defaults file for ansible-cassandra +cassandra_cluster_group: 'cassandra-cluster-nodes' +cassandra_cluster_name: 'Test Cluster' +cassandra_cluster_setup: false +cassandra_commitlog_directory: '/var/lib/cassandra/commitlog' +cassandra_config: false +cassandra_debian_repo_info: + repo: 'deb http://www.apache.org/dist/cassandra/debian 36x main' + repo_key: 'https://www.apache.org/dist/cassandra/KEYS' +cassandra_data_file_directories: + - '/var/lib/cassandra/data' +cassandra_hints_directory: '/var/lib/cassandra/hints' +cassandra_listen_address: "{{ hostvars[inventory_hostname]['ansible_' + cassandra_listen_interface]['ipv4']['address'] }}" +cassandra_listen_interface: 'eth1' +cassandra_log_dir: '/var/log/cassandra' +cassandra_root_dir: '/etc/cassandra' +cassandra_saved_caches_directory: '/var/lib/cassandra/saved_caches' +cassandra_seeds: '127.0.0.1' # Only used if not setting up a cluster +cassandra_version: '3.6' +``` + +Dependencies +------------ + +Reference requirements + +Example Playbook +---------------- + +``` +--- +- hosts: cassandra-cluster-nodes + become: true + vars: + cassandra_cluster_setup: true + cassandra_config: true + pri_domain_name: 'test.vagrant.local' + roles: + - role: ansible-oracle-java8 + - role: ansible-cassandra + tasks: +``` + +License +------- + +BSD + +Author Information +------------------ + + +Larry Smith Jr. +- [@mrlesmithjr] +- [EveryThingShouldBeVirtual] +- mrlesmithjr [at] gmail.com + +[@mrlesmithjr]: <https://twitter.com/mrlesmithjr> +[EveryThingShouldBeVirtual]: <http://everythingshouldbevirtual.com> +[Ansible]: <https://www.ansible.com> +[Cassandra]: <http://cassandra.apache.org/> diff --git a/ansible/roles/cassandra-restore/handlers/main.yml b/ansible/roles/cassandra-restore/handlers/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..c6f499a8fc6d5a18e2560479c66061851cd4d93f --- /dev/null +++ b/ansible/roles/cassandra-restore/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for ansible-cassandra +- name: "restart cassandra" + service: + name: "cassandra" + state: "restarted" diff --git a/ansible/roles/cassandra-restore/meta/main.yml b/ansible/roles/cassandra-restore/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..23b18a800a4645387a83ac0873b6f893d62c081d --- /dev/null +++ b/ansible/roles/cassandra-restore/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - azure-cli \ No newline at end of file diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..6aac8383fda300dc4f295aad8b9ef474fb94f540 --- /dev/null +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -0,0 +1,50 @@ +- name: Stop the cassandra + become: yes + service: name=cassandra state=stopped + +- set_fact: + cassandra_restore_gzip_file_path: "{{ cassandra.restore_dir }}/{{ cassandra_restore_gzip_file_name }}" + +- name: Download to azure blob storage + command: az storage blob download --name {{ cassandra_restore_gzip_file_name }} --file {{ cassandra_restore_gzip_file_path }} --container-name {{ cassandra.backup_azure_container_name }} + environment: + AZURE_STORAGE_ACCOUNT: "{{ cassandra.backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ cassandra.backup_azure_storage_access_key }}" + async: 3600 + poll: 10 + +- name: unarchieve restore artifact + become: yes + unarchive: src={{user_home}}/{{ cassandra_restore_gzip_file_name }} dest={{user_home}}/ copy=no + +- name: Remove the restroe artefact + become: yes + file: path={{user_home}}/cassandra* state=absent + +- name: Remove the old data + become: yes + file: path=/var/lib/cassandra/data/sunbird state=absent + +- name: Replace the new data + become: yes + command: mv {{user_home}}/data/sunbird /var/lib/cassandra/data/ + + +- name: remove data + become: yes + file: path=/home/deployer/data state=absent + +- name: change the permissions + become: yes + file: path=/var/lib/cassandra/data owner=cassandra group=cassandra recurse=yes + +- name: copy the backup script + become: yes + template: src=nodetool.j2 dest={{user_home}}/nodetool.sh mode=0755 + + +- name: Start the cassandra + become: yes + service: name=cassandra state=started + + \ No newline at end of file diff --git a/ansible/roles/cassandra-restore/templates/nodetool.j2 b/ansible/roles/cassandra-restore/templates/nodetool.j2 new file mode 100644 index 0000000000000000000000000000000000000000..e9e89d27a5d25163c2201e3d0e3ee2e376cb0f92 --- /dev/null +++ b/ansible/roles/cassandra-restore/templates/nodetool.j2 @@ -0,0 +1,13 @@ +#!/bin/sh +snapshot={{snapshot}} +for keyspace in sunbird +do + echo $keyspace + cd {{user_home}}/snapshot-$snapshot/$keyspace + for table in * + do + echo $table + table_name=`echo $table | cut -d "-" -f1` + nodetool refresh -- $keyspace $table_name + done +done \ No newline at end of file diff --git a/ansible/roles/cassandra-restore/vars/main.yml b/ansible/roles/cassandra-restore/vars/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..569e432c18babb4622e5d578ae44deb5173fe4fc --- /dev/null +++ b/ansible/roles/cassandra-restore/vars/main.yml @@ -0,0 +1 @@ +cassandra_root_dir: '/etc/cassandra' \ No newline at end of file diff --git a/ansible/roles/cassandra/README.md b/ansible/roles/cassandra/README.md new file mode 100755 index 0000000000000000000000000000000000000000..fac9c2e53a165eda3adf4988729d87f576527005 --- /dev/null +++ b/ansible/roles/cassandra/README.md @@ -0,0 +1,78 @@ +Role Name +========= + +An [Ansible] role to install [Cassandra] + +Requirements +------------ + +- Oracle Java 8 + +Install [Ansible] requirements `ansible-galaxy install -r requirements.yml` + +Role Variables +-------------- + +``` +--- +# defaults file for ansible-cassandra +cassandra_cluster_group: 'cassandra-cluster-nodes' +cassandra_cluster_name: 'Test Cluster' +cassandra_cluster_setup: false +cassandra_commitlog_directory: '/var/lib/cassandra/commitlog' +cassandra_config: false +cassandra_debian_repo_info: + repo: 'deb http://www.apache.org/dist/cassandra/debian 36x main' + repo_key: 'https://www.apache.org/dist/cassandra/KEYS' +cassandra_data_file_directories: + - '/var/lib/cassandra/data' +cassandra_hints_directory: '/var/lib/cassandra/hints' +cassandra_listen_address: "{{ hostvars[inventory_hostname]['ansible_' + cassandra_listen_interface]['ipv4']['address'] }}" +cassandra_listen_interface: 'eth1' +cassandra_log_dir: '/var/log/cassandra' +cassandra_root_dir: '/etc/cassandra' +cassandra_saved_caches_directory: '/var/lib/cassandra/saved_caches' +cassandra_seeds: '127.0.0.1' # Only used if not setting up a cluster +cassandra_version: '3.6' +``` + +Dependencies +------------ + +Reference requirements + +Example Playbook +---------------- + +``` +--- +- hosts: cassandra-cluster-nodes + become: true + vars: + cassandra_cluster_setup: true + cassandra_config: true + pri_domain_name: 'test.vagrant.local' + roles: + - role: ansible-oracle-java8 + - role: ansible-cassandra + tasks: +``` + +License +------- + +BSD + +Author Information +------------------ + + +Larry Smith Jr. +- [@mrlesmithjr] +- [EveryThingShouldBeVirtual] +- mrlesmithjr [at] gmail.com + +[@mrlesmithjr]: <https://twitter.com/mrlesmithjr> +[EveryThingShouldBeVirtual]: <http://everythingshouldbevirtual.com> +[Ansible]: <https://www.ansible.com> +[Cassandra]: <http://cassandra.apache.org/> diff --git a/ansible/roles/cassandra/files/cassandra-prometheus-2.0.0-jar-with-dependencies.jar b/ansible/roles/cassandra/files/cassandra-prometheus-2.0.0-jar-with-dependencies.jar new file mode 100644 index 0000000000000000000000000000000000000000..e801fa6d1b3006ee8c93487b3d49ac5eb82a107a Binary files /dev/null and b/ansible/roles/cassandra/files/cassandra-prometheus-2.0.0-jar-with-dependencies.jar differ diff --git a/ansible/roles/cassandra/files/jmx_prometheus_httpserver-0.11.jar b/ansible/roles/cassandra/files/jmx_prometheus_httpserver-0.11.jar new file mode 100644 index 0000000000000000000000000000000000000000..d0dfe48b72ba2ff966026ad8fd8d05ef0b27a94d Binary files /dev/null and b/ansible/roles/cassandra/files/jmx_prometheus_httpserver-0.11.jar differ diff --git a/ansible/roles/cassandra/files/jmx_prometheus_javaagent-0.10.jar b/ansible/roles/cassandra/files/jmx_prometheus_javaagent-0.10.jar new file mode 100644 index 0000000000000000000000000000000000000000..3fae7755fcf78534ab8cc156acaa08c90e05c301 Binary files /dev/null and b/ansible/roles/cassandra/files/jmx_prometheus_javaagent-0.10.jar differ diff --git a/ansible/roles/cassandra/handlers/main.yml b/ansible/roles/cassandra/handlers/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..c6f499a8fc6d5a18e2560479c66061851cd4d93f --- /dev/null +++ b/ansible/roles/cassandra/handlers/main.yml @@ -0,0 +1,6 @@ +--- +# handlers file for ansible-cassandra +- name: "restart cassandra" + service: + name: "cassandra" + state: "restarted" diff --git a/ansible/roles/cassandra/tasks/main.yml b/ansible/roles/cassandra/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..50ab4d9c6a5cfdd59da0c9533b0d33c49274ee8f --- /dev/null +++ b/ansible/roles/cassandra/tasks/main.yml @@ -0,0 +1,211 @@ +- name: Cassandra | Copy JMX agent jar file to lib directory + become: yes + copy: src=cassandra-prometheus-2.0.0-jar-with-dependencies.jar dest=/usr/share/cassandra/lib + tags: + - provision + when: env == 'spike' + +- name: Cassandra | Copy JMX agent jar file to lib directory + become: yes + copy: src=jmx_prometheus_javaagent-0.10.jar dest=/usr/share/cassandra/lib + tags: + - provision + when: env == 'spike' + +- name: Cassandra | Copy JMX agent config to /etc/cassandra directory + become: yes + template: src=cassandra_jmx.yaml dest=/etc/cassandra + tags: + - provision + when: env == 'spike' + +- name: Cassandra | Update the cassandra env configuration + become: yes + template: src=cassandra-env.sh dest=/etc/cassandra + tags: + - provision + when: env == 'spike' + +- name: Cassandra | Copy jmx_httpserver.yml + become: yes + template: src=jmx_httpserver.yml dest=/etc/cassandra + tags: + - provision + when: env == 'spike' + +- name: Cassandra | Copy run_jmx_httpserver.sh + become: yes + template: src=run_jmx_httpserver.sh dest=/etc/cassandra + tags: + - provision + when: env == 'spike' + +- name: Cassandra | Copy jmx_prometheus_httpserver-0.11.jar + become: yes + copy: src=jmx_prometheus_httpserver-0.11.jar dest=/usr/share/cassandra/lib + tags: + - provision + when: env == 'spike' + +- name: Cassandra | Update the cassandra host configuration + become: yes + template: src=hosts.j2 dest=/etc/hosts + +- name: debian | Adding Cassandra Debian Repo Key + apt_key: + url: "{{ cassandra_repo_key }}" + state: "present" + +- name: debian | Adding Cassandra Debian Repo + apt_repository: + repo: "{{ cassandra_repo }}" + state: "present" + +- name: debian | Installing Cassandra + apt: + name: "cassandra" + state: "present" + +- name: config | Configuring Cassandra + template: + src: "cassandra.yaml.j2" + dest: "{{ cassandra.root_dir }}/cassandra.yaml" + owner: "root" + group: "root" + mode: 0644 + notify: "restart cassandra" +# - name: Create the directory +# become: yes +# file: path=/data/cassandra/ state=directory recurse=yes +# tags: +# - backup + +# - name: copy the backup script +# become: yes +# template: src=cassandra_backup.j2 dest=/home/deployer/cassandra_backup.sh mode=0755 +# tags: +# - backup + +# - name: run the backup script +# become: yes +# shell: sh /home/deployer/cassandra_backup.sh +# tags: +# - backup + +# - name: Add Azure apt repository +# apt_repository: repo='deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ wheezy main' state=present +# tags: +# - backup + +# - name: Import Azure signing key +# apt_key: keyserver=packages.microsoft.com id=417A0893 +# tags: +# - backup + +# - name: ensure apt-transport-https is installed +# apt: name=apt-transport-https +# tags: +# - backup + +# - name: ensure azure-cli is installed +# apt: name=azure-cli +# tags: +# - backup + +# - set_fact: +# cassandra_backup_gzip_file_name: "cassandra_backup_{{ lookup('pipe', 'date +%Y%m%d') }}.zip" +# tags: +# - backup + +# - set_fact: +# cassandra_backup_gzip_file_path: "{{ cassandra.backup_dir }}/{{ cassandra_backup_gzip_file_name }}" +# tags: +# - backup + +# - name: Ensure azure blob storage container exists +# command: az storage container create --name {{ cassandra.backup_azure_container_name }} +# ignore_errors: true +# environment: +# AZURE_STORAGE_ACCOUNT: "{{ cassandra.backup_azure_storage_account_name }}" +# AZURE_STORAGE_KEY: "{{ cassandra.backup_azure_storage_access_key }}" +# tags: +# - backup + +# - name: Upload to azure blob storage +# command: az storage blob upload --name {{ cassandra_backup_gzip_file_name }} --file {{ cassandra_backup_gzip_file_path }} --container-name {{ cassandra.backup_azure_container_name }} +# environment: +# AZURE_STORAGE_ACCOUNT: "{{ cassandra.backup_azure_storage_account_name }}" +# AZURE_STORAGE_KEY: "{{ cassandra.backup_azure_storage_access_key }}" +# async: 3600 +# poll: 10 +# tags: +# - backup + +# - name: clean up backup dir after upload +# file: path="{{ cassandra.backup_dir }}" state=absent +# tags: +# - backup + +# - name: Stop the cassandra +# become: yes +# service: name=cassandra state=stopped +# tags: +# - restore + +# - set_fact: +# cassandra_restore_gzip_file_path: "{{ cassandra.restore_dir }}/{{ cassandra_restore_gzip_file_name }}" +# tags: +# - restore + +# - name: Download to azure blob storage +# command: az storage blob download --name {{ cassandra_restore_gzip_file_name }} --file {{ cassandra_restore_gzip_file_path }} --container-name {{ cassandra.backup_azure_container_name }} +# environment: +# AZURE_STORAGE_ACCOUNT: "{{ cassandra.backup_azure_storage_account_name }}" +# AZURE_STORAGE_KEY: "{{ cassandra.backup_azure_storage_access_key }}" +# async: 3600 +# poll: 10 +# tags: +# - restore + +# - name: unarchieve restore artifact +# become: yes +# unarchive: src={{user_home}}/{{ cassandra_restore_gzip_file_name }} dest={{user_home}}/ copy=no +# tags: +# - restore + +# - name: Remove the restroe artefact +# become: yes +# file: path={{user_home}}/cassandra* state=absent +# tags: +# - restore + +# - name: Remove the old data +# become: yes +# file: path=/var/lib/cassandra/data/* state=absent +# tags: +# - restore + +# - name: Replace the new data +# become: yes +# copy: src={{user_home}}/snapshot-{{snapshot}}/* dest=/var/lib/cassandra/data/ +# tags: +# - restore + +# - name: copy the backup script +# become: yes +# template: src=nodetool.j2 dest={{user_home}}/nodetool.sh mode=0755 +# tags: +# - restore + + +# - name: Start the cassandra +# become: yes +# service: name=cassandra state=started +# tags: +# - restore + +# - name: run the backup script +# become: yes +# shell: sh {{user_home}}/nodetool.sh +# tags: +# - restore diff --git a/ansible/roles/cassandra/templates/cassandra-env.sh b/ansible/roles/cassandra/templates/cassandra-env.sh new file mode 100644 index 0000000000000000000000000000000000000000..b3c9eb7f1f8f8c7d87d69ed8009dccab8c36c38b --- /dev/null +++ b/ansible/roles/cassandra/templates/cassandra-env.sh @@ -0,0 +1,297 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +calculate_heap_sizes() +{ + case "`uname`" in + Linux) + system_memory_in_mb=`free -m | awk '/:/ {print $2;exit}'` + system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` + ;; + FreeBSD) + system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + SunOS) + system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` + system_cpu_cores=`psrinfo | wc -l` + ;; + Darwin) + system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + *) + # assume reasonable defaults for e.g. a modern desktop or + # cheap server + system_memory_in_mb="2048" + system_cpu_cores="2" + ;; + esac + + # some systems like the raspberry pi don't report cores, use at least 1 + if [ "$system_cpu_cores" -lt "1" ] + then + system_cpu_cores="1" + fi + + # set max heap size based on the following + # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB)) + # calculate 1/2 ram and cap to 1024MB + # calculate 1/4 ram and cap to 8192MB + # pick the max + half_system_memory_in_mb=`expr $system_memory_in_mb / 2` + quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2` + if [ "$half_system_memory_in_mb" -gt "1024" ] + then + half_system_memory_in_mb="1024" + fi + if [ "$quarter_system_memory_in_mb" -gt "8192" ] + then + quarter_system_memory_in_mb="8192" + fi + if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ] + then + max_heap_size_in_mb="$half_system_memory_in_mb" + else + max_heap_size_in_mb="$quarter_system_memory_in_mb" + fi + MAX_HEAP_SIZE="${max_heap_size_in_mb}M" + + # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size) + max_sensible_yg_per_core_in_mb="100" + max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores` + + desired_yg_in_mb=`expr $max_heap_size_in_mb / 4` + + if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ] + then + HEAP_NEWSIZE="${max_sensible_yg_in_mb}M" + else + HEAP_NEWSIZE="${desired_yg_in_mb}M" + fi +} + +# Determine the sort of JVM we'll be running on. +java_ver_output=`"${JAVA:-java}" -version 2>&1` +jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}' | cut -d\- -f1` +JVM_VERSION=${jvmver%_*} +JVM_PATCH_VERSION=${jvmver#*_} + +if [ "$JVM_VERSION" \< "1.8" ] ; then + echo "Cassandra 3.0 and later require Java 8u40 or later." + exit 1; +fi + +if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" -lt 40 ] ; then + echo "Cassandra 3.0 and later require Java 8u40 or later." + exit 1; +fi + +jvm=`echo "$java_ver_output" | grep -A 1 'java version' | awk 'NR==2 {print $1}'` +case "$jvm" in + OpenJDK) + JVM_VENDOR=OpenJDK + # this will be "64-Bit" or "32-Bit" + JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'` + ;; + "Java(TM)") + JVM_VENDOR=Oracle + # this will be "64-Bit" or "32-Bit" + JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'` + ;; + *) + # Help fill in other JVM values + JVM_VENDOR=other + JVM_ARCH=unknown + ;; +esac + +#GC log path has to be defined here because it needs to access CASSANDRA_HOME +JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc.log" + +# Here we create the arguments that will get passed to the jvm when +# starting cassandra. + +# Read user-defined JVM options from jvm.options file +JVM_OPTS_FILE=$CASSANDRA_CONF/jvm.options +for opt in `grep "^-" $JVM_OPTS_FILE` +do + JVM_OPTS="$JVM_OPTS $opt" +done + +# Check what parameters were defined on jvm.options file to avoid conflicts +echo $JVM_OPTS | grep -q Xmn +DEFINED_XMN=$? +echo $JVM_OPTS | grep -q Xmx +DEFINED_XMX=$? +echo $JVM_OPTS | grep -q Xms +DEFINED_XMS=$? +echo $JVM_OPTS | grep -q UseConcMarkSweepGC +USING_CMS=$? +echo $JVM_OPTS | grep -q UseG1GC +USING_G1=$? + +# Override these to set the amount of memory to allocate to the JVM at +# start-up. For production use you may wish to adjust this for your +# environment. MAX_HEAP_SIZE is the total amount of memory dedicated +# to the Java heap. HEAP_NEWSIZE refers to the size of the young +# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set +# or not (if you set one, set the other). +# +# The main trade-off for the young generation is that the larger it +# is, the longer GC pause times will be. The shorter it is, the more +# expensive GC will be (usually). +# +# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause +# times. If in doubt, and if you do not particularly want to tweak, go with +# 100 MB per physical CPU core. + +#MAX_HEAP_SIZE="4G" +#HEAP_NEWSIZE="800M" + +# Set this to control the amount of arenas per-thread in glibc +#export MALLOC_ARENA_MAX=4 + +# only calculate the size if it's not set manually +if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" -o $USING_G1 -eq 0 ]; then + calculate_heap_sizes +elif [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" -a $USING_G1 -ne 0 ]; then + echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs when using CMS GC (see cassandra-env.sh)" + exit 1 +fi + +if [ "x$MALLOC_ARENA_MAX" = "x" ] ; then + export MALLOC_ARENA_MAX=4 +fi + +# We only set -Xms and -Xmx if they were not defined on jvm.options file +# If defined, both Xmx and Xms should be defined together. +if [ $DEFINED_XMX -ne 0 ] && [ $DEFINED_XMS -ne 0 ]; then + JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}" + JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}" +elif [ $DEFINED_XMX -ne 0 ] || [ $DEFINED_XMS -ne 0 ]; then + echo "Please set or unset -Xmx and -Xms flags in pairs on jvm.options file." + exit 1 +fi + +# We only set -Xmn flag if it was not defined in jvm.options file +# and if the CMS GC is being used +# If defined, both Xmn and Xmx should be defined together. +if [ $DEFINED_XMN -eq 0 ] && [ $DEFINED_XMX -ne 0 ]; then + echo "Please set or unset -Xmx and -Xmn flags in pairs on jvm.options file." + exit 1 +elif [ $DEFINED_XMN -ne 0 ] && [ $USING_CMS -eq 0 ]; then + JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}" +fi + +if [ "$JVM_ARCH" = "64-Bit" ] && [ $USING_CMS -eq 0 ]; then + JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark" +fi + +# provides hints to the JIT compiler +JVM_OPTS="$JVM_OPTS -XX:CompileCommandFile=$CASSANDRA_CONF/hotspot_compiler" + +# add the jamm javaagent +JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.3.0.jar" + +# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR +if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then + JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof" +fi + +# jmx: metrics and administration interface +# +# add this if you're having trouble connecting: +# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>" +# +# see +# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in_jconsole +# for more on configuring JMX through firewalls, etc. (Short version: +# get it working with no firewall first.) +# +# Cassandra ships with JMX accessible *only* from localhost. +# To enable remote JMX connections, uncomment lines below +# with authentication and/or ssl enabled. See https://wiki.apache.org/cassandra/JmxSecurity +# +if [ "x$LOCAL_JMX" = "x" ]; then + LOCAL_JMX=yes +fi + +# Specifies the default port over which Cassandra will be available for +# JMX connections. +# For security reasons, you should not expose this port to the internet. Firewall it if needed. + +JMX_PORT="7199" + +if [ "$LOCAL_JMX" = "yes" ]; then + JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.local.port=$JMX_PORT" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false" +else + JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.port=$JMX_PORT" + # if ssl is enabled the same port cannot be used for both jmx and rmi so either + # pick another value for this property or comment out to use a random port (though see CASSANDRA-7087 for origins) + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" + + # turn on JMX authentication. See below for further options + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=true" + + # jmx ssl options + #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=true" + #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.need.client.auth=true" + #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.protocols=<enabled-protocols>" + #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.cipher.suites=<enabled-cipher-suites>" + #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStore=/path/to/keystore" + #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStorePassword=<keystore-password>" + #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStore=/path/to/truststore" + #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStorePassword=<truststore-password>" +fi + +# jmx authentication and authorization options. By default, auth is only +# activated for remote connections but they can also be enabled for local only JMX +## Basic file based authn & authz +JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" +#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" +## Custom auth settings which can be used as alternatives to JMX's out of the box auth utilities. +## JAAS login modules can be used for authentication by uncommenting these two properties. +## Cassandra ships with a LoginModule implementation - org.apache.cassandra.auth.CassandraLoginModule - +## which delegates to the IAuthenticator configured in cassandra.yaml. See the sample JAAS configuration +## file cassandra-jaas.config +#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" +#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" + +## Cassandra also ships with a helper for delegating JMX authz calls to the configured IAuthorizer, +## uncomment this to use it. Requires one of the two authentication options to be enabled +#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" + +# To use mx4j, an HTML interface for JMX, add mx4j-tools.jar to the lib/ +# directory. +# See http://wiki.apache.org/cassandra/Operations#Monitoring_with_MX4J +# By default mx4j listens on 0.0.0.0:8081. Uncomment the following lines +# to control its listen address and port. +#MX4J_ADDRESS="-Dmx4jaddress=127.0.0.1" +#MX4J_PORT="-Dmx4jport=8081" + +# Cassandra uses SIGAR to capture OS metrics CASSANDRA-7838 +# for SIGAR we have to set the java.library.path +# to the location of the native libraries. +JVM_OPTS="$JVM_OPTS -Djava.library.path=$CASSANDRA_HOME/lib/sigar-bin" + +JVM_OPTS="$JVM_OPTS $MX4J_ADDRESS" +JVM_OPTS="$JVM_OPTS $MX4J_PORT" +JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS" +#JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/cassandra-prometheus-2.0.0-jar-with-dependencies.jar=7400" diff --git a/ansible/roles/cassandra/templates/cassandra.cql b/ansible/roles/cassandra/templates/cassandra.cql new file mode 100644 index 0000000000000000000000000000000000000000..5861b5bfd164bb73c779ffa5f1ade3baee9301f6 --- /dev/null +++ b/ansible/roles/cassandra/templates/cassandra.cql @@ -0,0 +1,413 @@ +CREATE KEYSPACE IF NOT EXISTS sunbird WITH replication = {'class':'SimpleStrategy','replication_factor':1}; + +//to change cluster name +//UPDATE system.local SET cluster_name = 'sunbird' where key='local'; +//ALTER USER cassandra WITH PASSWORD 'password'; +USE sunbird; + +/* +creation of id= one way hash of (userId##courseId) here courseId is identifier of course mgmt table +toc url we have to generate through json of content id from ekStep +here status is (default(0),inProgress(1),completed(2)) +progress is no of content completed +*/ +CREATE TABLE IF NOT EXISTS sunbird.course_enrollment(id text, courseId text, courseName text,userId text,enrolledDate text, +description text,tocUrl text,status int,active boolean,delta text,grade text,progress int,lastReadContentId text, +lastReadContentStatus int,addedBy text,courseLogoUrl text,dateTime timestamp,contentId text,PRIMARY KEY (id)); + +CREATE INDEX inx_ce_userId ON sunbird.course_enrollment (userId); +CREATE INDEX inx_ce_courseId ON sunbird.course_enrollment (courseId); +CREATE INDEX inx_ce_course_name ON sunbird.course_enrollment (courseName); +CREATE INDEX inx_ce_status ON sunbird.course_enrollment (status); + +/* +creation of id = one way hash of (userId##contentId##courseId) +status is (default(0),inProgress(1),completed(2)) +*/ +CREATE TABLE IF NOT EXISTS sunbird.content_consumption(id text, contentId text, courseId text, userId text,viewPosition text,viewCount int,lastAccessTime text, +contentVersion text,completedCount int,status int,result text,score text,grade text,lastUpdatedTime text,lastCompletedTime text,dateTime timestamp,PRIMARY KEY (id)); + +CREATE INDEX inx_cc_userId ON sunbird.content_consumption (userId); +CREATE INDEX inx_cc_contentId ON sunbird.content_consumption (contentId); +CREATE INDEX inx_cc_status ON sunbird.content_consumption (status); +CREATE INDEX inx_cc_courseId ON sunbird.content_consumption (courseId); + +/* +creation of id = using timestamp and env + id and courseId both are same +content id is from ekstep +status DRAFT("draft"), LIVE("live"), RETIRED("retired") +contentType (pdf,video,word doc etc) +tutor map<id,name> +*/ +CREATE TABLE IF NOT EXISTS sunbird.course_management(id text, courseId text, contentId text, courseName text,courseType text, +facultyId text,facultyName text,organisationId text,organisationName text,enrollementStartDate text,enrollementEndDate text, +courseDuration text,description text,status text,addedBy text,addedByName text,publishedBy text,publishedByName text,createdDate text, +publishedDate text,updatedDate text,updatedBy text,updatedByName text,contentType text,createdfor list<text>,noOfLectures int,tocUrl text, +tutor map<text,text>,courseLogoUrl text,courseRating text,userCount int,PRIMARY KEY (id)); + +CREATE INDEX inx_cm_facultyId ON sunbird.course_management (facultyId); +CREATE INDEX inx_cm_organisationId ON sunbird.course_management (organisationId); +CREATE INDEX inx_cm_courseId ON sunbird.course_management (courseId); +CREATE INDEX inx_cm_course_name ON sunbird.course_management (courseName); +CREATE INDEX inx_cm_status ON sunbird.course_management (status); +CREATE INDEX inx_cm_contentId ON sunbird.course_management (contentId); + +/* +creation of id = one way hash of userName +here id and userId both are same + currently username and email is same +email and username is unique +*/ +CREATE TABLE IF NOT EXISTS sunbird.user(id text,userId text,userName text, email text,phone text,aadhaarNo text,createdDate text,updatedDate text,updatedBy text, +lastLoginTime text,status int,firstName text,lastName text,password text,avatar text,gender text,language text,state text,city text,zipcode text,PRIMARY KEY (id)); + +CREATE INDEX inx_u_email ON sunbird.user (email); +CREATE INDEX inx_u_phone ON sunbird.user (phone); +CREATE INDEX inx_u_status ON sunbird.user (status); +CREATE INDEX inx_u_userId ON sunbird.user (userId); +CREATE INDEX inx_u_userName ON sunbird.user (userName); + +//user_auth +//id is auth token +CREATE TABLE IF NOT EXISTS sunbird.user_auth(id text, userId text,createdDate text,updatedDate text,source text,PRIMARY KEY (id)); +CREATE INDEX inx_ua_userId ON sunbird.user_auth (userId); +CREATE INDEX inx_ua_source ON sunbird.user_auth (source); + +//organisation +CREATE TABLE IF NOT EXISTS sunbird.organisation(id text, orgName text, description text,communityId text,createdBy text,createdByName text,createdDate text, +updatedDate text,updatedBy text,status int,relation text,parentOrgId text,orgType text,state text,city text,zipcode text,orgCode text,dateTime timestamp,PRIMARY KEY (id)); + +CREATE INDEX inx_org_orgName ON sunbird.organisation (orgName); +CREATE INDEX inx_org_status ON sunbird.organisation (status); +//page_management +//id= using timestamp and env +CREATE TABLE sunbird.page_management(id text, name text, appMap text,portalMap text,createdDate text,createdBy text, +updatedDate text,updatedBy text,organisationId text,PRIMARY KEY (id)); + +CREATE INDEX inx_pm_pageName ON sunbird.page_management (name); +CREATE INDEX inx_vm_organisationId ON sunbird.page_management (organisationId); + +//page_section +//id= using timestamp and env +CREATE TABLE IF NOT EXISTS sunbird.page_section(id text, name text, sectionDataType text,description text,display text, +searchQuery text,createdDate text,createdBy text,updatedDate text,updatedBy text,imgUrl text,alt text,status int,PRIMARY KEY (id)); +CREATE INDEX inx_ps_sectionDataType ON sunbird.page_section (sectionDataType); +CREATE INDEX inx_ps_sectionName ON sunbird.page_section (name); + +//Assessment Eval +//id= using timestamp and env +CREATE TABLE IF NOT EXISTS sunbird.assessment_eval(id text, contentId text, courseId text, userId text,assessmentItemId text, +createdDate text,result text,score text,attemptId text,attemptedCount int,PRIMARY KEY (id)); + +CREATE INDEX inx_ae_userId ON sunbird.assessment_eval (userId); +CREATE INDEX inx_ae_contentId ON sunbird.assessment_eval (contentId); +CREATE INDEX inx_ae_assessmentItemId ON sunbird.assessment_eval (assessmentItemId); +CREATE INDEX inx_ae_courseId ON sunbird.assessment_eval (courseId); + +//Assessment item +//id= using timestamp and userId +CREATE TABLE IF NOT EXISTS sunbird.assessment_item(id text, contentId text, courseId text, userId text,assessmentItemId text, +assessmentType text,attemptedDate text,createdDate text,timeTaken int,result text,score text,maxScore text,answers text, +evaluationStatus boolean,processingStatus boolean,attemptId text,PRIMARY KEY (id)); + +CREATE INDEX inx_ai_userId ON sunbird.assessment_item (userId); +CREATE INDEX inx_ai_contentId ON sunbird.assessment_item (contentId); +CREATE INDEX inx_ai_assessmentItemId ON sunbird.assessment_item (assessmentItemId); +CREATE INDEX inx_ai_courseId ON sunbird.assessment_item (courseId); +CREATE INDEX inx_ai_processingStatus ON sunbird.assessment_item (processingStatus); + +ALTER TABLE sunbird.course_management DROP noOfLectures; +ALTER TABLE sunbird.course_management ADD noOfLectures int; +ALTER TABLE sunbird.assessment_item DROP evaluationStatus; +ALTER TABLE sunbird.assessment_item DROP processingStatus; +ALTER TABLE sunbird.assessment_item ADD evaluationStatus boolean; +ALTER TABLE sunbird.assessment_item ADD processingStatus boolean; +ALTER TABLE sunbird.assessment_eval DROP assessmentItemId; +ALTER TABLE sunbird.assessment_eval DROP maxScore; +ALTER TABLE sunbird.page_management ADD organisationId text; +ALTER TABLE sunbird.page_management DROP appMap; +ALTER TABLE sunbird.page_management DROP portalMap; +ALTER TABLE sunbird.page_management ADD appMap text; +ALTER TABLE sunbird.page_management ADD portalMap text; +ALTER TABLE sunbird.organisation ADD orgCode text; + +//2017-06-30 changes for user and organisation +ALTER TABLE sunbird.user DROP zipcode; +ALTER TABLE sunbird.user DROP city; +ALTER TABLE sunbird.user DROP state; +ALTER TABLE sunbird.user DROP language; +ALTER TABLE sunbird.user ADD thumbnail text; + ALTER TABLE sunbird.user ADD dob text; + ALTER TABLE sunbird.user ADD regOrgId text; + ALTER TABLE sunbird.user ADD subject list<text>; + ALTER TABLE sunbird.user ADD language list<text>; + ALTER TABLE sunbird.user ADD grade list<text>; + + + CREATE TABLE IF NOT EXISTS sunbird.user_external_identity(id text, userId text, externalId text,source text,isVerified boolean,PRIMARY KEY (id)); +CREATE INDEX inx_uei_userid ON sunbird.user_external_identity (userId); +CREATE INDEX inx_uei_externalId ON sunbird.user_external_identity (externalId); +CREATE INDEX inx_uei_source ON sunbird.user_external_identity (source); + +CREATE TABLE IF NOT EXISTS sunbird.address(id text, userId text, country text,state text,city text,zipCode text,addType text,createdDate text,createdBy text,updatedDate text,updatedBy text, PRIMARY KEY (id)); +CREATE INDEX inx_add_userid ON sunbird.address (userId); +CREATE INDEX inx_add_addType ON sunbird.address (addType); + +CREATE TABLE IF NOT EXISTS sunbird.user_education(id text, userId text, courseName text,duration int,yearOfPassing int,percentage double,grade text,name text,boardOrUniversity text,addressId text,createdDate text,createdBy text,updatedDate text,updatedBy text, PRIMARY KEY (id)); +CREATE INDEX inx_ueu_userid ON sunbird.user_education (userId); + +CREATE TABLE IF NOT EXISTS sunbird.user_job_profile(id text, userId text, jobName text,role text,joiningDate text,endDate text,orgName text,orgId text,subject list<text>,addressId text,boardName text,isVerified boolean,isRejected boolean,verifiedDate text,verifiedBy text,createdDate text,createdBy text,updatedDate text,updatedBy text, PRIMARY KEY (id)); +CREATE INDEX inx_ujp_userid ON sunbird.user_job_profile (userId); + +CREATE TABLE IF NOT EXISTS sunbird.user_org(id text, userId text, role text,orgId text,orgJoinDate text,orgLeftDate text,isApproved boolean, +isRejected boolean,approvedBy text,approvalDate text,updatedDate text,updatedBy text, PRIMARY KEY (id)); +CREATE INDEX inx_uorg_userid ON sunbird.user_org(userId); +CREATE INDEX inx_uorg_orgId ON sunbird.user_org(orgId); + +CREATE TABLE IF NOT EXISTS sunbird.subject(id text, name text, PRIMARY KEY (id)); +CREATE INDEX inx_sb_name ON sunbird.subject(name); + +CREATE TABLE IF NOT EXISTS sunbird.role(id text, name text, PRIMARY KEY (id)); +CREATE INDEX inx_role_name ON sunbird.role(name); + +ALTER TABLE sunbird.organisation DROP city; +ALTER TABLE sunbird.organisation DROP state; +ALTER TABLE sunbird.organisation DROP zipcode; +ALTER TABLE sunbird.organisation DROP relation; +ALTER TABLE sunbird.organisation DROP createdbyname; + +ALTER TABLE sunbird.organisation ADD imgUrl text; +ALTER TABLE sunbird.organisation ADD thumbnail text; +ALTER TABLE sunbird.organisation ADD channel text; +ALTER TABLE sunbird.organisation ADD preferredLanguage text; +ALTER TABLE sunbird.organisation ADD homeUrl text; +ALTER TABLE sunbird.organisation ADD isRootOrg boolean; +ALTER TABLE sunbird.organisation ADD addId text; +ALTER TABLE sunbird.organisation ADD noOfmembers int; +ALTER TABLE sunbird.organisation ADD orgCode text; +ALTER TABLE sunbird.organisation ADD isApproved boolean; +ALTER TABLE sunbird.organisation ADD approvedBy text; +ALTER TABLE sunbird.organisation ADD approvedDate text; +//ALTER TABLE sunbird.organisation ADD isRejected boolean; + +CREATE INDEX inx_org_channel ON sunbird.organisation(channel); +CREATE INDEX inx_org_orgType ON sunbird.organisation(orgType); +CREATE INDEX inx_org_orgCode ON sunbird.organisation(orgCode); + +CREATE TABLE IF NOT EXISTS sunbird.org_type(id text, name text, PRIMARY KEY (id)); +CREATE INDEX inx_ot_name ON sunbird.org_type(name); + +CREATE TABLE IF NOT EXISTS sunbird.org_mapping(id text, orgIdOne text,relation text,orgIdTwo text, PRIMARY KEY (id)); +CREATE INDEX inx_om_orgIdOne ON sunbird.org_mapping(orgIdOne); +CREATE INDEX inx_om_orgIdTwo ON sunbird.org_mapping(orgIdTwo); + +CREATE TABLE IF NOT EXISTS sunbird.role(id text, name text,status int, PRIMARY KEY (id)); +CREATE INDEX inx_ro_master_name ON sunbird.role(name); + +insert into role (id,name,status) values ('r_101','ADMIN',1); +insert into role (id,name,status) values ('r_102','ORG_ADMIN',1); +insert into role (id,name,status) values ('r_103','ORG_MODERATOR',1); +insert into role (id,name,status) values ('r_104','CONTENT_CREATOR',1); +insert into role (id,name,status) values ('r_105','CONTENT_REVIEWER',1); +insert into role (id,name,status) values ('r_106','ORG_MEMBER',1); + + +ALTER TABLE sunbird.user ADD rootOrgId text; +ALTER TABLE sunbird.address ADD addressLine1 text; +ALTER TABLE sunbird.address ADD addressLine2 text; +ALTER TABLE sunbird.user_education ADD degree text; + +insert into sunbird.role (id,name,status) values ('r_101','SYSTEM_ADMINISTRATION',1); +insert into sunbird.role (id,name,status) values ('r_102','ORG_MANAGEMENT',1); +insert into sunbird.role (id,name,status) values ('r_103','MEMBERSHIP_MANAGEMENT',1); +insert into sunbird.role (id,name,status) values ('r_104','CONTENT_CREATION',1); +insert into sunbird.role (id,name,status) values ('r_105','CONTENT_REVIEW',1); +insert into sunbird.role (id,name,status) values ('r_106','CONTENT_CURATION',1); +insert into sunbird.role (id,name,status) values ('r_107','PUBLIC',1); + + +CREATE TABLE IF NOT EXISTS sunbird.master_action(id text, name text, PRIMARY KEY (id)); +CREATE INDEX inx_ma_name ON sunbird.master_action(name); + +CREATE TABLE IF NOT EXISTS sunbird.url_action(id text, url text,name text, PRIMARY KEY (id)); +CREATE INDEX inx_ua_name ON sunbird.url_action(name); +CREATE INDEX inx_ua_url ON sunbird.url_action(url); + +CREATE TABLE IF NOT EXISTS sunbird.action_group(id text, actionId list<text>,groupName text, PRIMARY KEY (id)); +CREATE INDEX inx_uacg_groupName ON sunbird.action_group(groupName); + +CREATE TABLE IF NOT EXISTS sunbird.user_action_role(id text, actionGroupId list<text>,roleId text, PRIMARY KEY (id)); +CREATE INDEX inx_uactr_roleId ON sunbird.user_action_role(roleId); + +insert into sunbird.url_action(id,url,name) values ('1','','suspendOrg'); +insert into sunbird.url_action(id,url,name) values ('2','','suspendUser'); +insert into sunbird.url_action(id,url,name) values ('3','','createOrg'); +insert into sunbird.url_action(id,url,name) values ('4','','updateOrg'); +insert into sunbird.url_action(id,url,name) values ('5','','updateUser'); +insert into sunbird.url_action(id,url,name) values ('6','','addMember'); +insert into sunbird.url_action(id,url,name) values ('7','','removeOrg'); +insert into sunbird.url_action(id,url,name) values ('8','','createUser'); + +insert into sunbird.url_action(id,url,name) values ('9','','removeMember'); +insert into sunbird.url_action(id,url,name) values ('10','','suspendMember'); +insert into sunbird.url_action(id,url,name) values ('11','','createCourse'); +insert into sunbird.url_action(id,url,name) values ('12','','updateCourse'); +insert into sunbird.url_action(id,url,name) values ('13','','createContent'); +insert into sunbird.url_action(id,url,name) values ('14','','updateContent'); +insert into sunbird.url_action(id,url,name) values ('15','','publishCourse'); +insert into sunbird.url_action(id,url,name) values ('16','','publishContent'); + +insert into sunbird.url_action(id,url,name) values ('17','','flagCourse'); +insert into sunbird.url_action(id,url,name) values ('18','','flagContent'); +insert into sunbird.url_action(id,url,name) values ('19','','getProfile'); +insert into sunbird.url_action(id,url,name) values ('20','','updateProfile'); +insert into sunbird.url_action(id,url,name) values ('21','','readCourse'); +insert into sunbird.url_action(id,url,name) values ('22','','readContent'); +insert into sunbird.url_action(id,url,name) values ('23','','rateCourse'); +insert into sunbird.url_action(id,url,name) values ('24','','rateContent'); +insert into sunbird.url_action(id,url,name) values ('25','','searchCourse'); +insert into sunbird.url_action(id,url,name) values ('26','','searchContent'); + +insert into sunbird.action_group(id,actionId,groupName) values ('ag_12',['1','2'],'SYSTEM_ADMINISTRATION'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_13',['3','4','7','8','5'],'ORG_MANAGEMENT'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_14',['6','9','10'],'MEMBERSHIP_MANAGEMENT'); + +insert into sunbird.action_group(id,actionId,groupName) values ('ag_15',['11','12','13','14'],'CONTENT_CREATION'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_16',['15','16'],'CONTENT_REVIEW'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_17',['17','18','10'],'CONTENT_CURATION'); +insert into sunbird.action_group(id,actionId,groupName) values ('ag_17',['19','20','21','22','23','24','25','26'],'PUBLIC'); + +ALTER TABLE sunbird.user ADD loginId text; +ALTER TABLE sunbird.user ADD provider text; +ALTER TABLE sunbird.user_external_identity ADD idType text; + +insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_1',['ag_17'],'r_107'); +insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_2',['ag_13'],'r_102'); +insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_3',['ag_14'],'r_103'); + insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_3',['ag_15'],'r_104'); + insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_3',['ag_16'],'r_105'); + insert into sunbird.user_action_role(id,actiongroupid,roleid) values ('uar_3',['ag_12'],'r_101'); + +ALTER TABLE sunbird.organisation DROP addId; +ALTER TABLE sunbird.organisation ADD addressId text; +ALTER TABLE sunbird.user ADD roles List<text>; + + + CREATE TABLE IF NOT EXISTS sunbird.role_group(id text, name text, PRIMARY KEY (id)); + insert into sunbird.role_group (id,name) values ('SYSTEM_ADMINISTRATION','SYSTEM_ADMINISTRATION'); + insert into sunbird.role_group (id,name) values ('ORG_MANAGEMENT','ORG_MANAGEMENT'); + insert into sunbird.role_group (id,name) values ('MEMBERSHIP_MANAGEMENT','MEMBERSHIP_MANAGEMENT'); + insert into sunbird.role_group (id,name) values ('CONTENT_CREATION','CONTENT_CREATION'); + insert into sunbird.role_group (id,name) values ('CONTENT_CURATION','CONTENT_CURATION'); + insert into sunbird.role_group (id,name) values ('CONTENT_REVIEW','CONTENT_REVIEW'); + drop table sunbird.role; +CREATE TABLE IF NOT EXISTS sunbird.role(id text, name text,roleGroupId List<text>,status int, PRIMARY KEY (id)); +CREATE INDEX inx_ro_master_name ON sunbird.role(name); + insert into sunbird.role (id,name,rolegroupid,status) values ('ADMIN','ADMIN',['SYSTEM_ADMINISTRATION','ORG_MANAGEMENT'],1); + insert into sunbird.role (id,name,rolegroupid,status) values ('ORG_ADMIN','ORG_ADMIN',['ORG_MANAGEMENT','MEMBERSHIP_MANAGEMENT'],1); + insert into sunbird.role (id,name,rolegroupid,status) values ('ORG_MODERATOR','ORG_MODERATOR',['MEMBERSHIP_MANAGEMENT'],1); + insert into sunbird.role (id,name,rolegroupid,status) values ('CONTENT_CREATOR','CONTENT_CREATOR',['CONTENT_CREATION'],1); + insert into sunbird.role (id,name,rolegroupid,status) values ('CONTENT_REVIEWER','CONTENT_REVIEWER',['CONTENT_CREATION','CONTENT_CURATION','CONTENT_REVIEW'],1); + drop table sunbird.url_action; + CREATE TABLE IF NOT EXISTS sunbird.url_action(id text, url list<text>,name text, PRIMARY KEY (id)); + CREATE INDEX inx_ua_name ON sunbird.url_action(name); +CREATE INDEX inx_ua_url ON sunbird.url_action(url); + +insert into sunbird.url_action (id,name) values ('suspendOrg','suspendOrg'); + insert into sunbird.url_action (id,name) values ('suspendUser','suspendUser'); + insert into sunbird.url_action (id,name) values ('createOrg','createOrg'); + insert into sunbird.url_action (id,name) values ('updateOrg','updateOrg'); + insert into sunbird.url_action (id,name) values ('removeOrg','removeOrg'); + insert into sunbird.url_action (id,name) values ('createUser','createUser'); + insert into sunbird.url_action (id,name) values ('updateUser','updateUser'); + insert into sunbird.url_action (id,name) values ('ORG_MANAGEMENT','ORG_MANAGEMENT'); + insert into sunbird.url_action (id,name) values ('createOrg','createOrg'); + insert into sunbird.url_action (id,name) values ('addMember','addMember'); + insert into sunbird.url_action (id,name) values ('removeMember','removeMember'); + insert into sunbird.url_action (id,name) values ('suspendMember','suspendMember'); + insert into sunbird.url_action (id,name) values ('createCourse','createCourse'); + insert into sunbird.url_action (id,name) values ('updateCourse','updateCourse'); + insert into sunbird.url_action (id,name) values ('createContent','createContent'); + insert into sunbird.url_action (id,name) values ('updateContent','updateContent'); + insert into sunbird.url_action (id,name) values ('flagCourse','flagCourse'); + insert into sunbird.url_action (id,name) values ('flagContent','flagContent'); + insert into sunbird.url_action (id,name) values ('publishCourse','publishCourse'); + insert into sunbird.url_action (id,name) values ('publishContent','publishContent'); + ALTER table sunbird.role_group add url_action_ids list<text>; + + update sunbird.role_group set url_action_ids=['addMember','removeMember','suspendMember'] where id='MEMBERSHIP_MANAGEMENT'; + update sunbird.role_group set url_action_ids=['createCourse','updateCourse','createContent','updateContent'] where id='CONTENT_CREATION'; + update sunbird.role_group set url_action_ids=['suspendOrg','suspendUser'] where id='SYSTEM_ADMINISTRATION'; + update sunbird.role_group set url_action_ids=['publishCourse','publishContent'] where id='CONTENT_REVIEW'; + update sunbird.role_group set url_action_ids=['createOrg','updateOrg','removeOrg','createUser','updateUser'] where id='ORG_MANAGEMENT'; + update sunbird.role_group set url_action_ids=['flagCourse','flagContent'] where id='CONTENT_CURATION'; + + update sunbird.url_action set url=['/v1/course/publish'] where id='publishContent'; +update sunbird.url_action set url=['/v1/user/create'] where id='addMember'; + update sunbird.url_action set url=['v1/course/create'] where id='createCourse'; +update sunbird.url_action set url=['/v1/user/create'] where id='createUser'; + update sunbird.url_action set url=['/v1/course/publish'] where id='publishCourse'; +update sunbird.url_action set url=['/v1/organisation/update'] where id='updateOrg'; + +drop index inx_uorg_orgid; +ALTER TABLE sunbird.user_org DROP orgid; +ALTER TABLE sunbird.user_org ADD organisationid text; +ALTER TABLE sunbird.user_org ADD addedby text; +ALTER TABLE sunbird.user_org ADD addedbyname text; +CREATE INDEX inx_uorg_orgid ON sunbird.user_org (organisationid); + + +/* +creation of id= one way hash of (userId##courseId) here courseId is identifier of EkStep course +toc url is generated from ekStep +here status is (default(0),inProgress(1),completed(2)) +progress is no of content completed +*/ +CREATE TABLE IF NOT EXISTS sunbird.user_courses(id text, courseId text, courseName text, userId text, batchId text, enrolledDate text, +description text,tocUrl text,status int,active boolean,delta text,grade text,progress int,lastReadContentId text, +lastReadContentStatus int,addedBy text,courseLogoUrl text, dateTime timestamp, contentId text, PRIMARY KEY (id)); + +CREATE INDEX inx_ucs_userId ON sunbird.user_courses (userId); +CREATE INDEX inx_ucs_courseId ON sunbird.user_courses (courseId); +CREATE INDEX inx_ucs_batchId ON sunbird.user_courses (batchId); +CREATE INDEX inx_ucs_course_name ON sunbird.user_courses (courseName); +CREATE INDEX inx_ucs_status ON sunbird.user_courses (status); + +ALTER TABLE sunbird.user_external_identity DROP source; +ALTER TABLE sunbird.user_external_identity ADD provider text; +ALTER TABLE sunbird.user_external_identity ADD externalIdValue text; +DROP INDEX inx_uei_source; +CREATE INDEX inx_uei_provider ON sunbird.user_external_identity (provider); + +//changes 7 July 2017 updated organization table +ALTER TABLE sunbird.organisation ADD rootOrgID text; +ALTER TABLE sunbird.org_mapping ADD rootOrgID text; +CREATE TABLE IF NOT EXISTS sunbird.org_type(id text, name text, PRIMARY KEY (id)); +DROP INDEX sunbird.inx_org_status; +ALTER TABLE sunbird.organisation DROP status ; +ALTER TABLE sunbird.organisation ADD status text; + +CREATE INDEX inx_org_status ON sunbird.organisation (status); + +CREATE INDEX inx_u_loginId ON sunbird.user(loginId); + +ALTER TABLE sunbird.user_job_profile ADD isCurrentJob boolean; +ALTER TABLE sunbird.content_consumption ADD progress int; +ALTER TABLE sunbird.content_consumption DROP viewPosition; + +//changes on 12th july 2017 +ALTER TABLE sunbird.user_job_profile ADD isDeleted boolean; +ALTER TABLE sunbird.user_education ADD isDeleted boolean; +ALTER TABLE sunbird.address ADD isDeleted boolean; +ALTER TABLE sunbird.user_org ADD isDeleted boolean; +ALTER TABLE sunbird.user ADD profileSummary text; + +ALTER TABLE sunbird.organisation ADD source text; +ALTER TABLE sunbird.organisation ADD externalId text; + +//to export data from csv to cassandra table run below command(for page_section and page_management table) +// change the path of csv file +//COPY sunbird.page_management(id, appmap,createdby ,createddate ,name ,organisationid ,portalmap ,updatedby ,updateddate ) FROM '/tmp/cql/pageMgmt.csv'; + +//COPY sunbird.page_section(id, alt,createdby ,createddate ,description ,display ,imgurl ,name,searchquery , sectiondatatype ,status , updatedby ,updateddate) FROM '/tmp/cql/pageSection.csv'; diff --git a/ansible/roles/cassandra/templates/cassandra.yaml b/ansible/roles/cassandra/templates/cassandra.yaml new file mode 100755 index 0000000000000000000000000000000000000000..a51f993535553c87ce1859b7d30c7fa45aeb00a7 --- /dev/null +++ b/ansible/roles/cassandra/templates/cassandra.yaml @@ -0,0 +1,1041 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# Triggers automatic allocation of num_tokens tokens for this node. The allocation +# algorithm attempts to choose tokens in a way that optimizes replicated load over +# the nodes in the datacenter for the replication strategy used by the specified +# keyspace. +# +# The load assigned to each node will be close to proportional to its number of +# vnodes. +# +# Only supported with the Murmur3Partitioner. +# allocate_tokens_for_keyspace: KEYSPACE + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +# May either be "true" or "false" to enable globally +hinted_handoff_enabled: true +# When hinted_handoff_enabled is true, a black list of data centers that will not +# perform hinted handoff +#hinted_handoff_disabled_datacenters: +# - DC1 +# - DC2 +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours + +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 + +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Directory where Cassandra should store hints. +# If not set, the default directory is $CASSANDRA_HOME/data/hints. +# hints_directory: /var/lib/cassandra/hints + +# How often hints should be flushed from the internal buffers to disk. +# Will *not* trigger fsync. +hints_flush_period_in_ms: 10000 + +# Maximum size for a single hints file, in megabytes. +max_hints_file_size_in_mb: 128 + +# Compression to apply to the hint files. If omitted, hints files +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#hints_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) +authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: AllowAllAuthorizer + +# Part of the Authentication & Authorization backend, implementing IRoleManager; used +# to maintain grants and memberships between roles. +# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, +# which stores role information in the system_auth keyspace. Most functions of the +# IRoleManager require an authenticated login, so unless the configured IAuthenticator +# actually implements authentication, most of this functionality will be unavailable. +# +# - CassandraRoleManager stores role data in the system_auth keyspace. Please +# increase system_auth keyspace replication factor if you use this role manager. +role_manager: CassandraRoleManager + +# Validity period for roles cache (fetching granted roles can be an expensive +# operation depending on the role manager, CassandraRoleManager is one example) +# Granted roles are cached for authenticated sessions in AuthenticatedUser and +# after the period specified here, become eligible for (async) reload. +# Defaults to 2000, set to 0 to disable caching entirely. +# Will be disabled automatically for AllowAllAuthenticator. +roles_validity_in_ms: 2000 + +# Refresh interval for roles cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If roles_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as roles_validity_in_ms. +# roles_update_interval_in_ms: 2000 + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# Validity period for credentials cache. This cache is tightly coupled to +# the provided PasswordAuthenticator implementation of IAuthenticator. If +# another IAuthenticator implementation is configured, this cache will not +# be automatically used and so the following settings will have no effect. +# Please note, credentials are cached in their encrypted form, so while +# activating this cache may reduce the number of queries made to the +# underlying table, it may not bring a significant reduction in the +# latency of individual authentication attempts. +# Defaults to 2000, set to 0 to disable credentials caching. +credentials_validity_in_ms: 2000 + +# Refresh interval for credentials cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If credentials_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as credentials_validity_in_ms. +# credentials_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +# If not set, the default directory is $CASSANDRA_HOME/data/data. +data_file_directories: + - /var/lib/cassandra/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. +commitlog_directory: /var/lib/cassandra/commitlog + +# policy for data disk failures: +# die: shut down gossip and client transports and kill the JVM for any fs errors or +# single-sstable errors, so the node can be replaced. +# stop_paranoid: shut down gossip and client transports even for single-sstable errors, +# kill the JVM for errors during startup. +# stop: shut down gossip and client transports, leaving the node effectively dead, but +# can still be inspected via JMX, kill the JVM for errors during startup. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# policy for commit disk failures: +# die: shut down gossip and Thrift and kill the JVM, so the node can be replaced. +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# stop_commit: shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# ignore: ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the native protocol prepared statement cache +# +# Valid values are either "auto" (omitting the value) or a value greater 0. +# +# Note that specifying a too large value will result in long running GCs and possbily +# out-of-memory errors. Keep the value at a small fraction of the heap. +# +# If you constantly see "prepared statements discarded in the last minute because +# cache limit reached" messages, the first step is to investigate the root cause +# of these messages and check whether prepared statements are used correctly - +# i.e. use bind markers for variable parts. +# +# Do only change the default value, if you really have more prepared statements than +# fit in the cache. In most cases it is not neccessary to change this value. +# Constantly re-preparing statements is a performance penalty. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +prepared_statements_cache_size_mb: + +# Maximum size of the Thrift prepared statement cache +# +# If you do not use Thrift at all, it is safe to leave this value at "auto". +# +# See description of 'prepared_statements_cache_size_mb' above for more information. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +thrift_prepared_statements_cache_size_mb: + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Row cache implementation class name. +# Available implementations: +# org.apache.cassandra.cache.OHCProvider Fully off-heap row cache implementation (default). +# org.apache.cassandra.cache.SerializingCacheProvider This is the row cache implementation availabile +# in previous releases of Cassandra. +# row_cache_class_name: org.apache.cassandra.cache.OHCProvider + +# Maximum size of the row cache in memory. +# Please note that OHC cache implementation requires some additional off-heap memory to manage +# the map structures and some in-flight memory during operations before/after cache entries can be +# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. +# Do not specify more memory that the system can afford in the worst usual situation and leave some +# headroom for OS block level cache. Do never allow your system to swap. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should save the row cache. +# Caches are saved to saved_caches_directory as specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save. +# Specify 0 (which is the default), meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# Maximum size of the counter cache in memory. +# +# Counter cache helps to reduce counter locks' contention for hot counter cells. +# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before +# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration +# of the lock hold, helping with hot counter cell updates, but will not allow skipping +# the read entirely. Only the local (clock, count) tuple of a counter cell is kept +# in memory, not the whole counter, so it's relatively cheap. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. +# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. +counter_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the counter cache (keys only). Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Default is 7200 or 2 hours. +counter_cache_save_period: 7200 + +# Number of keys from the counter cache to save +# Disabled by default, meaning all keys are going to be saved +# counter_cache_keys_to_save: 100 + +# saved caches +# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. +saved_caches_directory: /var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +# Max mutation size is also configurable via max_mutation_size_in_kb setting in +# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. +# +# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must +# be set to at least twice the size of max_mutation_size_in_kb / 1024 +# +commitlog_segment_size_in_mb: 32 + +# Compression to apply to the commit log. If omitted, the commit log +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#commitlog_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map<String, String> of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: "127.0.0.1" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. Same applies to +# "concurrent_counter_writes", since counter writes read the current +# values before incrementing and writing them back. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 + +# For materialized view writes, as there is a read involved, so this should +# be limited by the less of concurrent reads or concurrent writes. +concurrent_materialized_view_writes: 32 + +# Maximum memory to use for sstable chunk cache and buffer pooling. +# 32MB of this are reserved for pooling buffers, the rest is used as an +# cache that holds uncompressed sstable chunks. +# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, +# so is in addition to the memory allocated for heap. The cache also has on-heap +# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size +# if the default 64k chunk size is used). +# Memory is only allocated when needed. +# file_cache_size_in_mb: 512 + +# Flag indicating whether to allocate on or off heap when the sstable buffer +# pool is exhausted, that is when it has exceeded the maximum memory +# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. + +# buffer_pool_use_heap_if_exhausted: true + +# The strategy for optimizing disk read +# Possible values are: +# ssd (for solid state disks, the default) +# spinning (for spinning disks) +# disk_optimization_strategy: ssd + +# Total permitted memory to use for memtables. Cassandra will stop +# accepting writes when the limit is exceeded until a flush completes, +# and will trigger a flush based on memtable_cleanup_threshold +# If omitted, Cassandra will set both to 1/4 the size of the heap. +# memtable_heap_space_in_mb: 2048 +# memtable_offheap_space_in_mb: 2048 + +# Ratio of occupied non-flushing memtable size to total permitted size +# that will trigger a flush of the largest memtable. Larger mct will +# mean larger flushes and hence less compaction, but also less concurrent +# flush activity which can make it difficult to keep your disks fed +# under heavy write load. +# +# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) +# memtable_cleanup_threshold: 0.11 + +# Specify the way Cassandra allocates and manages memtable memory. +# Options are: +# heap_buffers: on heap nio buffers +# offheap_buffers: off heap (direct) nio buffers +# offheap_objects: off heap objects +memtable_allocation_type: heap_buffers + +# Total space to use for commit logs on disk. +# +# If space gets above this value, Cassandra will flush every dirty CF +# in the oldest segment and remove it. So a small total commitlog space +# will tend to cause more flush activity on less-active columnfamilies. +# +# The default value is the smaller of 8192, and 1/4 of the total space +# of the commitlog volume. +# +# commitlog_total_space_in_mb: 8192 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. +# +# memtable_flush_writers defaults to one per data_file_directory. +# +# If your data directories are backed by SSD, you can increase this, but +# avoid having memtable_flush_writers * data_file_directories > number of cores +#memtable_flush_writers: 1 + +# A fixed memory pool size in MB for for SSTable index summaries. If left +# empty, this will default to 5% of the heap size. If the memory usage of +# all index summaries exceeds this limit, SSTables with low read rates will +# shrink their index summaries in order to meet this limit. However, this +# is a best-effort process. In extreme conditions Cassandra may need to use +# more than this amount of memory. +index_summary_capacity_in_mb: + +# How frequently index summaries should be resampled. This is done +# periodically to redistribute memory from the fixed-size pool to sstables +# proportional their recent read rates. Setting to -1 will disable this +# process, leaving existing index summaries at their current sampling level. +index_summary_resize_interval_in_minutes: 60 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address or interface to bind to and tell other Cassandra nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# Set listen_address OR listen_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting listen_address to 0.0.0.0 is always wrong. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +listen_address: localhost +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# When using multiple physical network interfaces, set this +# to true to listen on broadcast_address in addition to +# the listen_address, allowing nodes to communicate in both +# interfaces. +# Ignore this property if the network configuration automatically +# routes between the public and private networks such as EC2. +# listen_on_broadcast_address: false + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +# native_transport_port_ssl: 9142 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# The maximum number of concurrent client connections. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections: -1 + +# The maximum number of concurrent client connections per source ip. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections_per_ip: -1 + +# Whether to start the thrift rpc server. +start_rpc: false + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: localhost +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# RPC address to broadcast to drivers and other Cassandra nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +# broadcast_rpc_address: 1.2.3.4 + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# 2) but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 +# Per sstable indexed key cache entries (the collation index in memory +# mentioned above) exceeding this size will not be held on heap. +# This means that only partition information is held on heap and the +# index entries are read from disk. +# +# Note that this size refers to the size of the +# serialized index information and not the size of the partition. +column_index_cache_size_in_kb: 2 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#concurrent_compactors: 1 + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# When compacting, the replacement sstable(s) can be opened before they +# are completely written, and used in place of the prior sstables for +# any range that has been written. This helps to smoothly transfer reads +# between the sstables, reducing page cache churn and keeping hot rows hot +sstable_preemptive_open_interval_in_mb: 50 + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# When unset, the default is 200 Mbps or 25 MB/s +# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long the coordinator should wait for counter writes to complete +counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Set socket timeout for streaming operation. +# The stream session is failed if no data is received by any of the +# participants within that period. +# Default value is 3600000, which means streams timeout after an hour. +# streaming_socket_timeout_in_ms: 3600000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# IF THE RACK A REPLICA IS PLACED IN CHANGES AFTER THE REPLICA HAS BEEN +# ADDED TO A RING, THE NODE MUST BE DECOMMISSIONED AND REBOOTSTRAPPED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# JVM defaults for supported SSL socket protocols and cipher suites can +# be replaced using custom encryption options. This is not recommended +# unless you have policies in place that dictate certain settings, or +# need to disable vulnerable ciphers or protocols in case the JVM cannot +# be updated. +# FIPS compliant settings can be configured at JVM level and should not +# involve changing encryption settings here: +# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + # require_endpoint_verification: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + # If enabled and optional is set to true encrypted and unencrypted connections are handled. + optional: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: dc + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# TTL for different trace types used during logging of the repair process. +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 + +# UDFs (user defined functions) are disabled by default. +# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. +enable_user_defined_functions: false + +# Enables scripted UDFs (JavaScript UDFs). +# Java UDFs are always enabled, if enable_user_defined_functions is true. +# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. +# This option has no effect, if enable_user_defined_functions is false. +enable_scripted_user_defined_functions: false + +# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. +# Lowering this value on Windows can provide much tighter latency and better throughput, however +# some virtualized environments may see a negative performance impact from changing this setting +# below their system default. The sysinternals 'clockres' tool can confirm your system's default +# setting. +windows_timer_interval: 1 + + +# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from +# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by +# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys +# can still (and should!) be in the keystore and will be used on decrypt operations +# (to handle the case of key rotation). +# +# It is strongly recommended to download and install Java Cryptography Extension (JCE) +# Unlimited Strength Jurisdiction Policy Files for your version of the JDK. +# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) +# +# Currently, only the following file types are supported for transparent data encryption, although +# more are coming in future cassandra releases: commitlog, hints +transparent_data_encryption_options: + enabled: false + chunk_length_kb: 64 + cipher: AES/CBC/PKCS5Padding + key_alias: testing:1 + # CBC IV length for AES needs to be 16 bytes (which is also the default size) + # iv_length: 16 + key_provider: + - class_name: org.apache.cassandra.security.JKSKeyProvider + parameters: + - keystore: conf/.keystore + keystore_password: cassandra + store_type: JCEKS + key_password: cassandra + + +##################### +# SAFETY THRESHOLDS # +##################### + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Log WARN on any batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Fail any batch exceeding this value. 50kb (10x warn threshold) by default. +batch_size_fail_threshold_in_kb: 50 + +# Log WARN on any batches not of type LOGGED than span across more partitions than this limit +unlogged_batch_across_partitions_warn_threshold: 10 + +# Log a warning when compacting partitions larger than this value +compaction_large_partition_warning_threshold_mb: 100 + +# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level +# Adjust the threshold based on your application throughput requirement +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +gc_warn_threshold_in_ms: 1000 diff --git a/ansible/roles/cassandra/templates/cassandra.yaml.j2 b/ansible/roles/cassandra/templates/cassandra.yaml.j2 new file mode 100755 index 0000000000000000000000000000000000000000..9308e9cf22e33fced1626890f3bd9dd94923d825 --- /dev/null +++ b/ansible/roles/cassandra/templates/cassandra.yaml.j2 @@ -0,0 +1,162 @@ +cluster_name: 'Test Cluster' +num_tokens: 256 +max_hint_window_in_ms: 10800000 # 3 hours +hinted_handoff_throttle_in_kb: 1024 +max_hints_delivery_threads: 2 +hints_flush_period_in_ms: 10000 +hints_directory: /var/lib/cassandra/hints +max_hints_file_size_in_mb: 128 +batchlog_replay_throttle_in_kb: 1024 +authenticator: AllowAllAuthenticator +authorizer: AllowAllAuthorizer +role_manager: CassandraRoleManager +roles_validity_in_ms: 2000 +permissions_validity_in_ms: 2000 +credentials_validity_in_ms: 2000 +partitioner: org.apache.cassandra.dht.Murmur3Partitioner +data_file_directories: + - /var/lib/cassandra/data +commitlog_directory: /var/lib/cassandra/commitlog +disk_failure_policy: stop +commit_failure_policy: stop +prepared_statements_cache_size_mb: +thrift_prepared_statements_cache_size_mb: +key_cache_size_in_mb: +key_cache_save_period: 14400 +row_cache_size_in_mb: 0 +row_cache_save_period: 0 +counter_cache_size_in_mb: +counter_cache_save_period: 7200 +saved_caches_directory: /var/lib/cassandra/saved_caches +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 +commitlog_segment_size_in_mb: 32 +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: "<ip1>,<ip2>,<ip3>" + - seeds: {{cassandra.seeds}} +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 +concurrent_materialized_view_writes: 32 +memtable_allocation_type: heap_buffers +index_summary_capacity_in_mb: +index_summary_resize_interval_in_minutes: 60 +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 +storage_port: 7000 +ssl_storage_port: 7001 +listen_address: {{cassandra.listen_address}} +listen_interface: {{cassandra.listen_interface}} +start_native_transport: true +native_transport_port: 9042 +start_rpc: false +rpc_address: {{cassandra.rpc_address}} +broadcast_rpc_address: {{cassandra.broadcast_rpc_address}} +broadcast_address: {{cassandra.broadcast_address}} +rpc_port: 9160 +rpc_keepalive: true +rpc_server_type: sync +thrift_framed_transport_size_in_mb: 15 +incremental_backups: false +snapshot_before_compaction: false +auto_snapshot: true +column_index_size_in_kb: 64 +column_index_cache_size_in_kb: 2 +compaction_throughput_mb_per_sec: 16 +sstable_preemptive_open_interval_in_mb: 50 +read_request_timeout_in_ms: 5000 +range_request_timeout_in_ms: 10000 +write_request_timeout_in_ms: 2000 +counter_write_request_timeout_in_ms: 5000 +cas_contention_timeout_in_ms: 1000 +truncate_request_timeout_in_ms: 60000 +request_timeout_in_ms: 10000 +cross_node_timeout: false +endpoint_snitch: SimpleSnitch +dynamic_snitch_update_interval_in_ms: 100 +dynamic_snitch_reset_interval_in_ms: 600000 +dynamic_snitch_badness_threshold: 0.1 +request_scheduler: org.apache.cassandra.scheduler.NoScheduler +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + # require_endpoint_verification: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + # If enabled and optional is set to true encrypted and unencrypted connections are handled. + optional: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: dc + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 +enable_user_defined_functions: false +enable_scripted_user_defined_functions: false +windows_timer_interval: 1 +transparent_data_encryption_options: + enabled: false + chunk_length_kb: 64 + cipher: AES/CBC/PKCS5Padding + key_alias: testing:1 + # CBC IV length for AES needs to be 16 bytes (which is also the default size) + # iv_length: 16 + key_provider: + - class_name: org.apache.cassandra.security.JKSKeyProvider + parameters: + - keystore: conf/.keystore + keystore_password: cassandra + store_type: JCEKS + key_password: cassandra + + +##################### +# SAFETY THRESHOLDS # +##################### + +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 +batch_size_warn_threshold_in_kb: 5 +batch_size_fail_threshold_in_kb: 50 +unlogged_batch_across_partitions_warn_threshold: 10 +compaction_large_partition_warning_threshold_mb: 100 +gc_warn_threshold_in_ms: 1000 \ No newline at end of file diff --git a/ansible/roles/cassandra/templates/cassandra_backup.j2 b/ansible/roles/cassandra/templates/cassandra_backup.j2 new file mode 100644 index 0000000000000000000000000000000000000000..6069ddb21d83ae34f37fab642cabdbf05c9327a7 --- /dev/null +++ b/ansible/roles/cassandra/templates/cassandra_backup.j2 @@ -0,0 +1,59 @@ +#!/bin/sh + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/opt/java/bin + +DATE=`date +%Y%m%d` + +SNAME="snapshot-$DATE" + +BACKUPDIRECTORY="/data/cassandra/backup/" + +if [ ! -d "$BACKUPDIRECTORY" ]; then + echo "Directory $BACKUPDIRECTORY not found, creating..." + mkdir $BACKUPDIRECTORY +fi + +if [ ! -d "$BACKUPDIRECTORY" ]; then + echo "Directory $BACKUPDIRECTORY not found, exit..." + exit +fi + +echo +echo "Snapshot name: $SNAME" +echo "Clear all snapshots" +nodetool -h 127.0.0.1 clearsnapshot + +cd $BACKUPDIRECTORY +pwd +rm -rf * + +echo "Taking snapshot" +nodetool -h 127.0.0.1 snapshot -t $SNAME +SFILES=`ls -1 -d /data/cassandra/data/*/*/snapshots/$SNAME` +for f in $SFILES +do + echo "Process snapshot $f" + TABLE=`echo $f | awk -F/ '{print $(NF-2)}'` + KEYSPACE=`echo $f | awk -F/ '{print $(NF-3)}'` + + if [ ! -d "$BACKUPDIRECTORY/$SNAME" ]; then + mkdir $BACKUPDIRECTORY/$SNAME + fi + + if [ ! -d "$BACKUPDIRECTORY/$SNAME/$KEYSPACE" ]; then + mkdir $BACKUPDIRECTORY/$SNAME/$KEYSPACE + fi + + mkdir $BACKUPDIRECTORY/$SNAME/$KEYSPACE/$TABLE + find $f -maxdepth 1 -type f -exec mv -t $BACKUPDIRECTORY/$SNAME/$KEYSPACE/$TABLE/ {} + +done +cd $BACKUPDIRECTORY +zip -r cassandra_backup_`date +%Y%m%d`.zip . +cd - +echo "Clear Incremental Backups" +SFILES=`ls -1 -d /data/cassandra/data/*/*/backups/` +for f in $SFILES +do + echo "Clear $f" + rm -f $f* +done \ No newline at end of file diff --git a/ansible/roles/cassandra/templates/cassandra_jmx.yaml b/ansible/roles/cassandra/templates/cassandra_jmx.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96ff64555eb5ede3a66a7bad5611c26fd1af00a8 --- /dev/null +++ b/ansible/roles/cassandra/templates/cassandra_jmx.yaml @@ -0,0 +1,18 @@ +--- +startDelaySeconds: 0 +hostPort: 127.0.0.1:1234 +jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:1234/jmxrmi +ssl: false +lowercaseOutputName: false +lowercaseOutputLabelNames: false +whitelistObjectNames: ["org.apache.cassandra.metrics:*"] +blacklistObjectNames: ["org.apache.cassandra.metrics:type=ColumnFamily,*"] +rules: + - pattern: "^org.apache.cassandra.metrics<type=(\w+), name=(\w+)><>Value: (\d+)" + name: cassandra_$1_$2 + value: $3 + valueFactor: 0.001 + labels: {} + help: "Cassandra metric $1 $2" + type: GAUGE + attrNameSnakeCase: false diff --git a/ansible/roles/cassandra/templates/hosts.j2 b/ansible/roles/cassandra/templates/hosts.j2 new file mode 100644 index 0000000000000000000000000000000000000000..aa67e316e1906f9b6f0513969784b437bac4bd59 --- /dev/null +++ b/ansible/roles/cassandra/templates/hosts.j2 @@ -0,0 +1,10 @@ +127.0.0.1 localhost localhost.localdomain +127.0.0.1 localhost + +# The following lines are desirable for IPv6 capable hosts +::1 ip6-localhost ip6-loopback +fe00::0 ip6-localnet +ff00::0 ip6-mcastprefix +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters +ff02::3 ip6-allhosts \ No newline at end of file diff --git a/ansible/roles/cassandra/templates/jmx_httpserver.yml b/ansible/roles/cassandra/templates/jmx_httpserver.yml new file mode 100644 index 0000000000000000000000000000000000000000..53107ad84d5f06966a7afd8e6a5f9b2a85dba207 --- /dev/null +++ b/ansible/roles/cassandra/templates/jmx_httpserver.yml @@ -0,0 +1,7 @@ +--- +hostPort: localhost:7199 +username: +password: + +whitelistObjectNames: ["org.apache.cassandra.metrics:*"] +blacklistObjectNames: ["org.apache.cassandra.metrics:type=ColumnFamily,*"] diff --git a/ansible/roles/cassandra/templates/nodetool.j2 b/ansible/roles/cassandra/templates/nodetool.j2 new file mode 100644 index 0000000000000000000000000000000000000000..e9e89d27a5d25163c2201e3d0e3ee2e376cb0f92 --- /dev/null +++ b/ansible/roles/cassandra/templates/nodetool.j2 @@ -0,0 +1,13 @@ +#!/bin/sh +snapshot={{snapshot}} +for keyspace in sunbird +do + echo $keyspace + cd {{user_home}}/snapshot-$snapshot/$keyspace + for table in * + do + echo $table + table_name=`echo $table | cut -d "-" -f1` + nodetool refresh -- $keyspace $table_name + done +done \ No newline at end of file diff --git a/ansible/roles/cassandra/templates/pageMgmt.csv b/ansible/roles/cassandra/templates/pageMgmt.csv new file mode 100644 index 0000000000000000000000000000000000000000..e010f0fb409461f47f554da6b0e1bca2072fcce4 --- /dev/null +++ b/ansible/roles/cassandra/templates/pageMgmt.csv @@ -0,0 +1,6 @@ +0122838911932661768,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 07:17:36:831+0000,Resource,,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",, +01228382486252748821,"[{\"id\":\"01228382278062080019\",\"index\":1,\"group\":1},{\"id\":\"01228382243946496017\",\"index\":1,\"group\":2}]",490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:03:29:672+0000,Course,,"[{\"id\":\"01228382278062080019\",\"index\":1,\"group\":1},{\"id\":\"01228382243946496017\",\"index\":1,\"group\":2}]",, +0122838909618585607,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 07:17:36:827+0000,Resource,,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",, +01228394137835929612,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 08:54:56:574+0000,Resourcessss,,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 09:01:26:990+0000 +01228382478150860822,"[{\"id\":\"01228382278062080019\",\"index\":1,\"group\":1},{\"id\":\"01228382243946496017\",\"index\":1,\"group\":2}]",490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:03:29:679+0000,Course,,"[{\"id\":\"01228382278062080019\",\"index\":1,\"group\":1},{\"id\":\"01228382243946496017\",\"index\":1,\"group\":2}]",, +01228393775303884811,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",ad54e968-d52f-30a0-bdba-de182aab43b1,2017-07-08 08:54:56:572+0000,Resources,,"[{\"id\":\"01228383082462412826\",\"index\":1,\"group\":1},{\"id\":\"01228383384379392023\",\"index\":1,\"group\":2}]",, \ No newline at end of file diff --git a/ansible/roles/cassandra/templates/pageSection.csv b/ansible/roles/cassandra/templates/pageSection.csv new file mode 100644 index 0000000000000000000000000000000000000000..b10991659d0499cc81c5c01cfdb5b7913127476f --- /dev/null +++ b/ansible/roles/cassandra/templates/pageSection.csv @@ -0,0 +1,12 @@ +01228383133972889627,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:10:20:043+0000,,"{\"name\":{\"en\":\"Popular Template\",\"hi\":\"????????\"}}",,Popular Template,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Template\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382966064742425,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:09:40:595+0000,,"{\"name\":{\"en\":\"Popular Worksheet\",\"hi\":\"????????\"}}",,Popular Worksheet,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Worksheet\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382278062080019,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:00:55:600+0000,,"{\"name\":{\"en\":\"Latest Courses\",\"hi\":\"????????\"}}",,Latest Courses,"{\"request\":{\"filters\":{\"contentType\":[\"Course\"],\"objectType\":[\"Content\"],\"status\":[\"Live\"]},\"sort_by\":{\"lastPublishedOn\":\"desc\"},\"limit\":10}}",course,1,, +01228383082462412826,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:09:40:595+0000,,"{\"name\":{\"en\":\"Popular Worksheet\",\"hi\":\"????????\"}}",,Popular Worksheet,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Worksheet\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382897002905629,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:11:14:511+0000,,"{\"name\":{\"en\":\"Popular Simulation\",\"hi\":\"????????\"}}",,Popular Simulation,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Simulation\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382681137152020,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:00:55:601+0000,,"{\"name\":{\"en\":\"Latest Courses\",\"hi\":\"????????\"}}",,Latest Courses,"{\"request\":{\"filters\":{\"contentType\":[\"Course\"],\"objectType\":[\"Content\"],\"status\":[\"Live\"]},\"sort_by\":{\"lastPublishedOn\":\"desc\"},\"limit\":10}}",course,1,, +01228382662997606424,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:08:56:272+0000,,"{\"name\":{\"en\":\"Popular Story\",\"hi\":\"????????\"}}",,Popular Story,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Story\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382337862041618,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:00:16:949+0000,,"{\"name\":{\"en\":\"Popular Courses\",\"hi\":\"????????\"}}",,Popular Courses,"{\"request\":{\"filters\":{\"contentType\":[\"Course\"],\"objectType\":[\"Content\"],\"status\":[\"Live\"]},\"sort_by\":{\"name\":\"asc\"},\"limit\":10}}",course,1,, +01228383171081011228,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:10:20:049+0000,,"{\"name\":{\"en\":\"Popular Template\",\"hi\":\"????????\"}}",,Popular Template,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Template\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228382243946496017,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:00:16:949+0000,,"{\"name\":{\"en\":\"Popular Courses\",\"hi\":\"????????\"}}",,Popular Courses,"{\"request\":{\"filters\":{\"contentType\":[\"Course\"],\"objectType\":[\"Content\"],\"status\":[\"Live\"]},\"sort_by\":{\"name\":\"asc\"},\"limit\":10}}",course,1,, +01228383384379392023,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:08:56:272+0000,,"{\"name\":{\"en\":\"Popular Story\",\"hi\":\"????????\"}}",,Popular Story,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Story\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, +01228383260541747230,,490ed1d2-c69d-4cf0-a50c-f37e658c128d,2017-07-08 05:11:14:512+0000,,"{\"name\":{\"en\":\"Popular Simulation\",\"hi\":\"????????\"}}",,Popular Simulation,"{\"request\":{\"query\":\"\",\"filters\":{\"language\":[\"English\"],\"contentType\":[\"Simulation\"]},\"limit\":10,\"sort_by\":{\"lastUpdatedOn\":\"desc\"}}}",content,1,, \ No newline at end of file diff --git a/ansible/roles/cassandra/templates/run_jmx_httpserver.sh b/ansible/roles/cassandra/templates/run_jmx_httpserver.sh new file mode 100755 index 0000000000000000000000000000000000000000..4c8feccf4c16e6ddf40aa96f333b1a63d846c7dd --- /dev/null +++ b/ansible/roles/cassandra/templates/run_jmx_httpserver.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +# Script to run a java application for testing jmx4prometheus. + +# Note: You can use localhost:5556 instead of 5556 for configuring socket hostname. + +java -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=5555 -jar /usr/share/cassandra/lib/jmx_prometheus_httpserver-0.11.jar 5556 /etc/cassandra/jmx_httpserver.yml diff --git a/ansible/roles/cassandra/vars/main.yml b/ansible/roles/cassandra/vars/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..569e432c18babb4622e5d578ae44deb5173fe4fc --- /dev/null +++ b/ansible/roles/cassandra/vars/main.yml @@ -0,0 +1 @@ +cassandra_root_dir: '/etc/cassandra' \ No newline at end of file diff --git a/ansible/roles/db-dumps/defaults/main.yml b/ansible/roles/db-dumps/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..cf4f99a4355bb4960168e7739ef87f17dd8a31bc --- /dev/null +++ b/ansible/roles/db-dumps/defaults/main.yml @@ -0,0 +1,6 @@ +cql_output_root_dir: /tmp/cql +csv_output_root_dir: "{{cql_output_root_dir}}/csv" +cqls: + - sunbird_orgs + - sunbird_user_orgs + - sunbird_users \ No newline at end of file diff --git a/ansible/roles/db-dumps/tasks/main.yml b/ansible/roles/db-dumps/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..509c8af9e5840b1a43c976d301b0bb63543bc301 --- /dev/null +++ b/ansible/roles/db-dumps/tasks/main.yml @@ -0,0 +1,33 @@ +- name: Clean artifact path + file: + state: absent + path: "{{ cql_output_root_dir }}" + +- name: Clean artifact path + file: + state: absent + path: "{{ csv_output_root_dir }}" + +- name: Ensure CQL directory exists + file: + path: "{{ cql_output_root_dir }}" + state: directory + +- name: Ensure CSV directory exists + file: + path: "{{ csv_output_root_dir }}" + state: directory + +- name: Save CQL + template: src="{{ item }}.cql" dest="/tmp/cql/{{ item }}.cql" + with_items: "{{ cqls }}" + +- shell: 'cqlsh --debug -f /tmp/cql/{{ item }}.cql' + with_items: "{{ cqls }}" + +- fetch: + src: "{{ csv_output_root_dir }}/{{ item }}.csv" + dest: ./ + fail_on_missing: yes + flat: yes + with_items: "{{ cqls }}" \ No newline at end of file diff --git a/ansible/roles/db-dumps/templates/sunbird_orgs.cql b/ansible/roles/db-dumps/templates/sunbird_orgs.cql new file mode 100644 index 0000000000000000000000000000000000000000..71807e721cf464ee6bb47bccc5c0b5f004f50d8a --- /dev/null +++ b/ansible/roles/db-dumps/templates/sunbird_orgs.cql @@ -0,0 +1 @@ +copy sunbird.organisation(id,orgname,provider,createddate) to '{{csv_output_root_dir}}/{{item}}.csv' with HEADER=true; diff --git a/ansible/roles/db-dumps/templates/sunbird_user_orgs.cql b/ansible/roles/db-dumps/templates/sunbird_user_orgs.cql new file mode 100644 index 0000000000000000000000000000000000000000..f8a313d8fd15f2fa4a7c18fe60562c1a60eae103 --- /dev/null +++ b/ansible/roles/db-dumps/templates/sunbird_user_orgs.cql @@ -0,0 +1 @@ +copy sunbird.user_org(organisationid,userid,orgjoindate,roles) to '{{csv_output_root_dir}}/{{item}}.csv' with HEADER=true; \ No newline at end of file diff --git a/ansible/roles/db-dumps/templates/sunbird_users.cql b/ansible/roles/db-dumps/templates/sunbird_users.cql new file mode 100644 index 0000000000000000000000000000000000000000..24522e583e99c1d83e650d1e5c9e1f72592e35b0 --- /dev/null +++ b/ansible/roles/db-dumps/templates/sunbird_users.cql @@ -0,0 +1 @@ +copy sunbird.user(id,provider,createddate) to '{{csv_output_root_dir}}/{{item}}.csv' with HEADER=true; \ No newline at end of file diff --git a/ansible/roles/elasticsearch/README.md b/ansible/roles/elasticsearch/README.md new file mode 100755 index 0000000000000000000000000000000000000000..472777e38d1132f232a0c5c35526cfa684950668 --- /dev/null +++ b/ansible/roles/elasticsearch/README.md @@ -0,0 +1,413 @@ +# ansible-elasticsearch +[](https://galaxy.ansible.com/elastic/elasticsearch/) + +**THIS ROLE IS FOR 5.x. FOR 2.x SUPPORT PLEASE USE THE 2.x BRANCH.** + +Ansible role for 5.x Elasticsearch. Currently this works on Debian and RedHat based linux systems. Tested platforms are: + +* Ubuntu 14.04/16.04 +* Debian 8 +* Centos 7 + +The latest Elasticsearch versions of 5.x are actively tested. **Only Ansible versions > 2.2.0 are supported.** + +## Usage + +Create your Ansible playbook with your own tasks, and include the role elasticsearch. You will have to have this repository accessible within the context of playbook, e.g. + +e.g. + +``` +cd /my/repos/ +git clone git@github.com:elastic/ansible-elasticsearch.git +cd /my/ansible/playbook +mkdir -p roles +ln -s /my/repos/ansible-elasticsearch ./roles/elasticsearch +``` + +Then create your playbook yaml adding the role elasticsearch. By default, the user is only required to specify a unique es_instance_name per role application. This should be unique per node. +The application of the elasticsearch role results in the installation of a node on a host. + +The simplest configuration therefore consists of: + +``` +--- +- name: Simple Example + hosts: localhost + roles: + - { role: elasticsearch, es_instance_name: "node1" } + vars: +``` + +The above installs a single node 'node1' on the hosts 'localhost'. + +This role also uses [Ansible tags](http://docs.ansible.com/ansible/playbooks_tags.html). Run your playbook with the `--list-tasks` flag for more information. + +### Basic Elasticsearch Configuration + +All Elasticsearch configuration parameters are supported. This is achieved using a configuration map parameter 'es_config' which is serialized into the elasticsearch.yml file. +The use of a map ensures the Ansible playbook does not need to be updated to reflect new/deprecated/plugin configuration parameters. + +In addition to the es_config map, several other parameters are supported for additional functions e.g. script installation. These can be found in the role's defaults/main.yml file. + +The following illustrates applying configuration parameters to an Elasticsearch instance. By default, Elasticsearch 5.1.2is installed. + +``` +- name: Elasticsearch with custom configuration + hosts: localhost + roles: + #expand to all available parameters + - { role: elasticsearch, es_instance_name: "node1", es_data_dirs: "/opt/elasticsearch/data", es_log_dir: "/opt/elasticsearch/logs", + es_config: { + node.name: "node1", + cluster.name: "custom-cluster", + discovery.zen.ping.unicast.hosts: "localhost:9301", + http.port: 9201, + transport.tcp.port: 9301, + node.data: false, + node.master: true, + bootstrap.memory_lock: true, + } + } + vars: + es_scripts: false + es_templates: false + es_version_lock: false + es_heap_size: 1g + es_api_port:9201 +``` +` +The role utilises Elasticsearch version defaults. The following should be set to ensure a successful cluster forms. + +* ```es_config['http.port']``` - the http port for the node +* ```es_config['transport.tcp.port']``` - the transport port for the node +* ```es_config['discovery.zen.ping.unicast.hosts']``` - the unicast discovery list, in the comma separated format ```"<host>:<port>,<host>:<port>"``` (typically the clusters dedicated masters) +* ```es_config['network.host']``` - sets both network.bind_host and network.publish_host to the same host value. The network.bind_host setting allows to control the host different network components will bind on. + +The network.publish_host setting allows to control the host the node will publish itself within the cluster so other nodes will be able to connect to it. + +See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html for further details on default binding behaviour and available options. +The role makes no attempt to enforce the setting of these are requires users to specify them appropriately. IT is recommended master nodes are listed and thus deployed first where possible. + +A more complex example: + +``` +--- +- name: Elasticsearch with custom configuration + hosts: localhost + roles: + #expand to all available parameters + - { role: elasticsearch, es_instance_name: "node1", es_data_dirs: "/opt/elasticsearch/data", es_log_dir: "/opt/elasticsearch/logs", + es_config: { + node.name: "node1", + cluster.name: "custom-cluster", + discovery.zen.ping.unicast.hosts: "localhost:9301", + http.port: 9201, + transport.tcp.port: 9301, + node.data: false, + node.master: true, + bootstrap.memory_lock: true, + } + } + vars: + es_scripts: false + es_templates: false + es_version_lock: false + es_heap_size: 1g + es_scripts: false + es_templates: false + es_version_lock: false + es_start_service: false + es_plugins_reinstall: false + es_api_port:9201 + es_plugins: + - plugin: ingest-geoip + proxy_host: proxy.example.com + proxy_port: 8080 +``` + +#### Important Note + +**The role uses es_api_host and es_api_port to communicate with the node for actions only achievable via http e.g. to install templates and to check the NODE IS ACTIVE. These default to "localhost" and 9200 respectively. +If the node is deployed to bind on either a different host or port, these must be changed.** + +### Multi Node Server Installations + +The application of the elasticsearch role results in the installation of a node on a host. Specifying the role multiple times for a host therefore results in the installation of multiple nodes for the host. + +An example of a two server deployment is shown below. The first server holds the master and is thus declared first. Whilst not mandatory, this is recommended in any multi node cluster configuration. The second server hosts two data nodes. + +**Note the structure of the below playbook for the data nodes. Whilst a more succinct structures are possible which allow the same role to be applied to a host multiple times, we have found the below structure to be the most reliable with respect to var behaviour. This is the tested approach.** + +``` +- hosts: master_nodes + roles: + - { role: elasticsearch, es_instance_name: "node1", es_heap_size: "1g", + es_config: { + cluster.name: "test-cluster", + discovery.zen.ping.unicast.hosts: "elastic02:9300", + http.port: 9200, + transport.tcp.port: 9300, + node.data: false, + node.master: true, + bootstrap.memory_lock: false, + } + } + vars: + es_scripts: false + es_templates: false + es_version_lock: false + ansible_user: ansible + es_plugins: + - plugin: ingest-geoip + + + +- hosts: data_nodes + roles: + - { role: elasticsearch, es_instance_name: "node1", es_data_dirs: "/opt/elasticsearch", + es_config: { + discovery.zen.ping.unicast.hosts: "elastic02:9300", + http.port: 9200, + transport.tcp.port: 9300, + node.data: true, + node.master: false, + bootstrap.memory_lock: false, + cluster.name: "test-cluster" + } + } + vars: + es_scripts: false + es_templates: false + es_version_lock: false + ansible_user: ansible + es_api_port: 9200 + es_plugins: + - plugin: ingest-geoip + + +- hosts: data_nodes + roles: + - { role: elasticsearch, es_instance_name: "node2", es_api_port:9201, + es_config: { + discovery.zen.ping.unicast.hosts: "elastic02:9300", + http.port: 9201, + transport.tcp.port: 9301, + node.data: true, + node.master: false, + bootstrap.memory_lock: false, + cluster.name: "test-cluster", + } + } + vars: + es_scripts: false + es_templates: false + es_version_lock: false + es_api_port: 9201 + ansible_user: ansible + es_plugins: + - plugin: ingest-geoip + +``` + +Parameters can additionally be assigned to hosts using the inventory file if desired. + +Make sure your hosts are defined in your ```inventory``` file with the appropriate ```ansible_ssh_host```, ```ansible_ssh_user``` and ```ansible_ssh_private_key_file``` values. + +Then run it: + +``` +ansible-playbook -i hosts ./your-playbook.yml +``` + +### Installing X-Pack Features + +X-Pack features, such as Security, are supported. This feature is currently experimental. To enable X-Pack set the parameter `es_enable_xpack` to true and list the required features in the parameter `es_xpack_features`. + +The parameter `es_xpack_features` by default enables all features i.e. it defaults to ["alerting","monitoring","graph","security"] + +The following additional parameters allow X-Pack to be configured: + +* ```es_message_auth_file``` System Key field to allow message authentication. This file should be placed in the 'files' directory. +* ```es_role_mapping``` Role mappings file declared as yml as described [here](https://www.elastic.co/guide/en/x-pack/current/mapping-roles.html) + +``` +es_role_mapping: + power_user: + - "cn=admins,dc=example,dc=com" + user: + - "cn=users,dc=example,dc=com" + - "cn=admins,dc=example,dc=com" +``` + +* ```es_users``` - Users can be declared here as yml. Two sub keys 'native' and 'file' determine the realm under which realm the user is created. Beneath each of these keys users should be declared as yml entries. e.g. + +``` +es_users: + native: + kibana4_server: + password: changeMe + roles: + - kibana4_server + file: + es_admin: + password: changeMe + roles: + - admin + testUser: + password: changeMeAlso! + roles: + - power_user + - user +``` + + +* ```es_roles``` - Elasticsearch roles can be declared here as yml. Two sub keys 'native' and 'file' determine how the role is created i.e. either through a file or http(native) call. Beneath each key list the roles with appropriate permissions, using the file based format described [here] (https://www.elastic.co/guide/en/x-pack/current/file-realm.html) e.g. + +``` +es_roles: + file: + admin: + cluster: + - all + indices: + - names: '*' + privileges: + - all + power_user: + cluster: + - monitor + indices: + - names: '*' + privileges: + - all + user: + indices: + - names: '*' + privileges: + - read + kibana4_server: + cluster: + - monitor + indices: + - names: '.kibana' + privileges: + - all + native: + logstash: + cluster: + - manage_index_templates + indices: + - names: 'logstash-*' + privileges: + - write + - delete + - create_index +``` + +* ```es_xpack_license``` - X-Pack license. The license should be declared as a json blob. Alternative use Ansible vault or copy the license to the target machine as part of a playbook and access via a lookup e.g. + +``` +es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}" +``` + +X-Pack configuration parameters can be added to the elasticsearch.yml file using the normal `es_config` parameter. + +For a full example see [here](https://github.com/elastic/ansible-elasticsearch/blob/master/test/integration/xpack.yml) + +####Important Note for Native Realm Configuration + +In order for native users and roles to be configured, the role calls the Elasticsearch API. Given security is installed this requires definition of two parameters: + +* ```es_api_basic_auth_username``` - admin username +* ```es_api_basic_auth_password``` - admin password + +These can either be set to a user declared in the file based realm, with admin permissions, or the default "elastic" superuser (default password is changeme). + + +### Additional Configuration + +Additional parameters to es_config allow the customization of the Java and Elasticsearch versions, in addition to role behaviour. Options include: + +* ```es_major_version``` (e.g. "5.1" ). Should be consistent with es_version. For versions >= 5.0 this must be "5.x". +* ```es_version``` (e.g. "5.1.2"). +* ```es_api_host``` The host name used for actions requiring HTTP e.g. installing templates. Defaults to "localhost". +* ```es_api_port``` The port used for actions requiring HTTP e.g. installing templates. Defaults to 9200. **CHANGE IF THE HTTP PORT IS NOT 9200** +* ```es_api_basic_auth_username``` The Elasticsearch username for making admin changing actions. Used if Security is enabled. Ensure this user is admin. +* ```es_api_basic_auth_password``` The password associated with the user declared in `es_api_basic_auth_username` +* ```es_start_service``` (true (default) or false) +* ```es_plugins_reinstall``` (true or false (default) ) +* ```es_plugins``` an array of plugin definitions e.g.: +```yml + es_plugins: + - plugin: elasticsearch-cloud-aws +``` +* ```es_allow_downgrades``` For development purposes only. (true or false (default) ) +* ```es_java_install``` If set to false, Java will not be installed. (true (default) or false) +* ```update_java``` Updates Java to the latest version. (true or false (default)) +* ```es_max_map_count``` maximum number of VMA (Virtual Memory Areas) a process can own. Defaults to 262144. +* ```es_max_open_files``` the maximum file descriptor number that can be opened by this process. Defaults to 65536. + +Earlier examples illustrate the installation of plugins using `es_plugins`. For officially supported plugins no version or source delimiter is required. The plugin script will determine the appropriate plugin version based on the target Elasticsearch version. For community based plugins include the full url. This approach should NOT be used for the X-Pack plugin. See X-Pack below for details here. + +If installing Monitoring or Alerting, ensure the license plugin is also specified. Security configuration is currently not supported but planned for later versions. + +* ```es_user``` - defaults to elasticsearch. +* ```es_group``` - defaults to elasticsearch. +* ```es_user_id``` - default is undefined. +* ```es_group_id``` - default is undefined. + +Both ```es_user_id``` and ```es_group_id``` must be set for the user and group ids to be set. + +By default, each node on a host will be installed to use unique pid, plugin, work, data and log directories. These directories are created, using the instance and host name, beneath default locations ] +controlled by the following parameters: + +* ```es_pid_dir``` - defaults to "/var/run/elasticsearch". +* ```es_data_dirs``` - defaults to "/var/lib/elasticsearch". This can be a list or comma separated string e.g. ["/opt/elasticsearch/data-1","/opt/elasticsearch/data-2"] or "/opt/elasticsearch/data-1,/opt/elasticsearch/data-2" +* ```es_log_dir``` - defaults to "/var/log/elasticsearch". +* ```es_restart_on_change``` - defaults to true. If false, changes will not result in Elasticsearch being restarted. +* ```es_plugins_reinstall``` - defaults to false. If true, all currently installed plugins will be removed from a node. Listed plugins will then be re-installed. + +This role ships with sample scripts and templates located in the [files/scripts/](files/scripts) and [files/templates/](files/templates) directories, respectively. These variables are used with the Ansible [with_fileglob](http://docs.ansible.com/ansible/playbooks_loops.html#id4) loop. When setting the globs, be sure to use an absolute path. +* ```es_scripts_fileglob``` - defaults to `<role>/files/scripts/`. +* ```es_templates_fileglob``` - defaults to `<role>/files/templates/`. + +### Proxy + +To define proxy globaly, set the following variables: + +* ```es_proxy_host``` - global proxy host +* ```es_proxy_port``` - global proxy port + +To define proxy only for a particular plugin during its installation: + +``` + es_plugins: + - plugin: elasticsearch-cloud-aws + proxy_host: proxy.example.com + proxy_port: 8080 +``` + +> For plugins installation, proxy_host and proxy_port are used first if they are defined and fallback to the global proxy settings if not. + +## Notes + +* The role assumes the user/group exists on the server. The elasticsearch packages create the default elasticsearch user. If this needs to be changed, ensure the user exists. +* The playbook relies on the inventory_name of each host to ensure its directories are unique +* Changing an instance_name for a role application will result in the installation of a new component. The previous component will remain. +* KitchenCI has been used for testing. This is used to confirm images reach the correct state after a play is first applied. We currently test only the latest version of 5.x on +all supported platforms. +* The role aims to be idempotent. Running the role multiple times, with no changes, should result in no state change on the server. If the configuration is changed, these will be applied and +Elasticsearch restarted where required. +* Systemd is used for Ubuntu versions >= 15, Debian >=8, Centos >=7. All other versions use init for service scripts. +* In order to run x-pack tests a license file with security enabled is required. A trial license is appropriate. Set the environment variable `ES_XPACK_LICENSE_FILE` to the full path of the license file prior to running tests. + +## IMPORTANT NOTES RE PLUGIN MANAGEMENT + +* If the ES version is changed, all plugins will be removed. Those listed in the playbook will be re-installed. This is behaviour is required in ES 5.x. +* If no plugins are listed in the playbook for a node, all currently installed plugins will be removed. +* The role does not currently support automatic detection of differences between installed and listed plugins (other than if none are listed). Should users wish to change installed plugins should set es_plugins_reinstall to true. This will cause all currently installed plugins to be removed and those listed to be installed. Change detection will be implemented in future releases. + +## Questions on Usage + +We welcome questions on how to use the role. However, in order to keep the github issues list focused on "issues" we ask the community to raise questions at https://discuss.elastic.co/c/elasticsearch. This is monitored by the maintainers. diff --git a/ansible/roles/elasticsearch/defaults/main.yml b/ansible/roles/elasticsearch/defaults/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..59f01afd19560cb0c33c5b15376dc4bed88013f9 --- /dev/null +++ b/ansible/roles/elasticsearch/defaults/main.yml @@ -0,0 +1,40 @@ +--- +es_major_version: "5.x" +es_version: "5.2.2" +es_version_lock: false +es_use_repository: true +es_apt_key: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" +es_apt_url: "deb https://artifacts.elastic.co/packages/{{ es_major_version }}/apt stable main" +es_apt_url_old: "deb http://packages.elastic.co/elasticsearch/{{ es_major_version }}/debian stable main" +es_start_service: true +es_java_install: true +update_java: false +es_restart_on_change: true +es_plugins_reinstall: false +es_scripts: false +es_templates: false +es_user: elasticsearch +es_group: elasticsearch +es_config: {} +#Need to provide default directories +es_pid_dir: "/var/run/elasticsearch" +es_data_dirs: "/var/lib/elasticsearch" +es_log_dir: "/var/log/elasticsearch" +es_max_open_files: 65536 +es_max_map_count: 262144 +es_allow_downgrades: false +es_enable_xpack: false +es_xpack_features: ["alerting","monitoring","graph","security"] +#These are used for internal operations performed by ansible. +#They do not effect the current configuration +es_api_host: "localhost" +es_api_port: 9200 + +# Since ansible 2.2 the following variables need to be defined +# to allow the role to be conditionally played with a when condition. +pid_dir: '' +log_dir: '' +conf_dir: '' +data_dirs: '' +# JVM custom parameters +es_jvm_custom_parameters: '' diff --git a/ansible/roles/elasticsearch/files/scripts/calculate-score.groovy b/ansible/roles/elasticsearch/files/scripts/calculate-score.groovy new file mode 100755 index 0000000000000000000000000000000000000000..442c25ca6cf1cf960b4dd65bea9ce83978456a7e --- /dev/null +++ b/ansible/roles/elasticsearch/files/scripts/calculate-score.groovy @@ -0,0 +1 @@ +log(_score * 2) + my_modifier \ No newline at end of file diff --git a/ansible/roles/elasticsearch/files/system_key b/ansible/roles/elasticsearch/files/system_key new file mode 100755 index 0000000000000000000000000000000000000000..91962910d2ac82a5dd768c0d6077bddf45a03aad Binary files /dev/null and b/ansible/roles/elasticsearch/files/system_key differ diff --git a/ansible/roles/elasticsearch/files/templates/basic.json b/ansible/roles/elasticsearch/files/templates/basic.json new file mode 100755 index 0000000000000000000000000000000000000000..3a3871d95e6b595a556c58eb79cc8cedf057a7e9 --- /dev/null +++ b/ansible/roles/elasticsearch/files/templates/basic.json @@ -0,0 +1,11 @@ +{ + "template" : "te*", + "settings" : { + "number_of_shards" : 1 + }, + "mappings" : { + "type1" : { + "_source" : { "enabled" : false } + } + } +} \ No newline at end of file diff --git a/ansible/roles/elasticsearch/filter_plugins/custom.py b/ansible/roles/elasticsearch/filter_plugins/custom.py new file mode 100755 index 0000000000000000000000000000000000000000..82b0efa05cd142bf8c560738b889a654436c8ed4 --- /dev/null +++ b/ansible/roles/elasticsearch/filter_plugins/custom.py @@ -0,0 +1,50 @@ +__author__ = 'dale mcdiarmid' + +import re +import os.path + +def modify_list(values=[], pattern='', replacement='', ignorecase=False): + ''' Perform a `re.sub` on every item in the list''' + if ignorecase: + flags = re.I + else: + flags = 0 + _re = re.compile(pattern, flags=flags) + return [_re.sub(replacement, value) for value in values] + +def append_to_list(values=[], suffix=''): + if isinstance(values, basestring): + values = values.split(',') + return [str(value+suffix) for value in values] + +def array_to_str(values=[],separator=','): + return separator.join(values) + +def extract_role_users(users={}): + role_users=[] + for user,details in users.iteritems(): + if "roles" in details: + for role in details["roles"]: + role_users.append(role+":"+user) + return role_users + +def filename(filename=''): + return os.path.splitext(os.path.basename(filename))[0] + +def filter_reserved(user_roles={}): + not_reserved = [] + for user_role,details in user_roles.items(): + if not "metadata" in details or not "_reserved" in details["metadata"] or not details["metadata"]["_reserved"]: + not_reserved.append(user_role) + return not_reserved + + +class FilterModule(object): + def filters(self): + return {'modify_list': modify_list, + 'append_to_list':append_to_list, + 'array_to_str':array_to_str, + 'extract_role_users':extract_role_users, + 'filter_reserved':filter_reserved, + 'filename':filename} + diff --git a/ansible/roles/elasticsearch/handlers/elasticsearch-templates.yml b/ansible/roles/elasticsearch/handlers/elasticsearch-templates.yml new file mode 100755 index 0000000000000000000000000000000000000000..b1ff63a0f3be62b22172a441bf2be790727b6b59 --- /dev/null +++ b/ansible/roles/elasticsearch/handlers/elasticsearch-templates.yml @@ -0,0 +1,34 @@ +--- + +- name: Ensure elasticsearch is started + service: name={{instance_init_script | basename}} state=started enabled=yes + +- name: Wait for elasticsearch to startup + wait_for: host={{es_api_host}} port={{es_api_port}} delay=10 + +- name: Get template files + find: paths="/etc/elasticsearch/templates" patterns="*.json" + register: templates + +- name: Install templates without auth + uri: + url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item.path | filename}}" + method: PUT + status_code: 200 + body_format: json + body: "{{ lookup('file', item.path) }}" + when: not es_enable_xpack or not es_xpack_features is defined or "security" not in es_xpack_features + with_items: "{{ templates.files }}" + +- name: Install templates with auth + uri: + url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item.path | filename}}" + method: PUT + status_code: 200 + user: "{{es_api_basic_auth_username}}" + password: "{{es_api_basic_auth_password}}" + force_basic_auth: yes + body_format: json + body: "{{ lookup('file', item.path) }}" + when: es_enable_xpack and es_xpack_features is defined and "security" in es_xpack_features + with_items: "{{ templates.files }}" diff --git a/ansible/roles/elasticsearch/handlers/main.yml b/ansible/roles/elasticsearch/handlers/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..55b95af13f7dc14ecfa950c16c411189332485fc --- /dev/null +++ b/ansible/roles/elasticsearch/handlers/main.yml @@ -0,0 +1,15 @@ +- name: reload systemd configuration + command: systemctl daemon-reload + +# Restart service and ensure it is enabled +- name: restart elasticsearch + service: name={{instance_init_script | basename}} state=restarted enabled=yes + when: es_restart_on_change and es_start_service and ((plugin_installed is defined and plugin_installed.changed) or (config_updated is defined and config_updated.changed) or (xpack_state.changed) or (debian_elasticsearch_install_from_repo.changed or redhat_elasticsearch_install_from_repo.changed or elasticsearch_install_from_package.changed)) + +#Templates are a handler as they need to come after a restart e.g. suppose user removes security on a running node and doesn't +#specify es_api_basic_auth_username and es_api_basic_auth_password. The templates will subsequently not be removed if we don't wait for the node to restart. +#Templates done after restart therefore - as a handler. + +- name: load-templates + include: ./handlers/elasticsearch-templates.yml + when: es_templates diff --git a/ansible/roles/elasticsearch/meta/main.yml b/ansible/roles/elasticsearch/meta/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..edd7295329636e2b5445db6c26c0137ceaa5999a --- /dev/null +++ b/ansible/roles/elasticsearch/meta/main.yml @@ -0,0 +1,26 @@ +--- + +allow_duplicates: yes + +galaxy_info: + author: Robin Clarke, Jakob Reiter, Dale McDiarmid + description: Elasticsearch for Linux + company: "Elastic.co" + license: "license (Apache)" + # Require 1.6 for apt deb install + min_ansible_version: 2.2.0 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Debian + versions: + - all + - name: Ubuntu + versions: + - all + categories: + - system + +dependencies: [] diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-Debian-version-lock.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-Debian-version-lock.yml new file mode 100755 index 0000000000000000000000000000000000000000..a932af77fb45c4b28fb26b435d44f7559d2be67c --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-Debian-version-lock.yml @@ -0,0 +1,5 @@ +--- +- name: Debian - hold elasticsearch version + command: apt-mark hold elasticsearch + register: hold_elasticsearch_result + changed_when: "hold_elasticsearch_result.stdout != 'elasticsearch was already set on hold.'" \ No newline at end of file diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-Debian.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-Debian.yml new file mode 100755 index 0000000000000000000000000000000000000000..83748b04e05ddc52e3831092fb362cba8b1596f6 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-Debian.yml @@ -0,0 +1,39 @@ +--- + +- set_fact: force_install=no + +- set_fact: force_install=yes + when: es_allow_downgrades + +- name: Debian - Install apt-transport-https to support https APT downloads + apt: name=apt-transport-https state=present + when: es_use_repository + +- name: Debian - Add Elasticsearch repository key + apt_key: url="{{ es_apt_key }}" state=present + when: es_use_repository and es_apt_key + +- name: Debian - Add elasticsearch repository + apt_repository: repo={{ item.repo }} state={{ item.state}} + with_items: + - { repo: "{{ es_apt_url_old }}", state: "absent" } + - { repo: "{{ es_apt_url }}", state: "present" } + when: es_use_repository + +- name: Debian - Include versionlock + include: elasticsearch-Debian-version-lock.yml + when: es_version_lock + +- name: Debian - Ensure elasticsearch is installed + apt: name=elasticsearch{% if es_version is defined and es_version != "" %}={{ es_version }}{% endif %} state=present force={{force_install}} allow_unauthenticated={{ 'no' if es_apt_key else 'yes' }} cache_valid_time=86400 + when: es_use_repository + register: debian_elasticsearch_install_from_repo + +- name: Debian - Download elasticsearch from url + get_url: url={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.deb{% endif %} dest=/tmp/elasticsearch-{{ es_version }}.deb validate_certs=no + when: not es_use_repository + +- name: Debian - Ensure elasticsearch is installed from downloaded package + apt: deb=/tmp/elasticsearch-{{ es_version }}.deb + when: not es_use_repository + register: elasticsearch_install_from_package diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-RedHat-version-lock.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-RedHat-version-lock.yml new file mode 100755 index 0000000000000000000000000000000000000000..e6fd8381ff2075f4ccd45c8084d01289eef704ad --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-RedHat-version-lock.yml @@ -0,0 +1,6 @@ +--- +- name: RedHat - install yum-version-lock + yum: name=yum-plugin-versionlock state=present update_cache=yes +- name: RedHat - lock elasticsearch version + shell: yum versionlock delete 0:elasticsearch* ; yum versionlock add elasticsearch{% if es_version is defined and es_version != "" %}-{{ es_version }}{% endif %} + diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-RedHat.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-RedHat.yml new file mode 100755 index 0000000000000000000000000000000000000000..18cfe4e79c0098890f236a93aa1c2775f6398a2f --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-RedHat.yml @@ -0,0 +1,25 @@ +--- +- name: Ensure libselinux-python on CentOS 6.x + yum: name=libselinux-python state=present update_cache=yes + when: ( ansible_distribution == "CentOS" ) and ( ansible_distribution_major_version == "6" ) + +- name: RedHat - add Elasticsearch repo + template: src=elasticsearch.repo dest=/etc/yum.repos.d/elasticsearch-{{ es_major_version }}.repo + when: es_use_repository + +- name: RedHat - include versionlock + include: elasticsearch-RedHat-version-lock.yml + when: es_version_lock + +- name: RedHat - Install Elasticsearch + yum: name=elasticsearch{% if es_version is defined and es_version != "" %}-{{ es_version }}{% endif %} state=present update_cache=yes + when: es_use_repository + register: redhat_elasticsearch_install_from_repo + until: '"failed" not in redhat_elasticsearch_install_from_repo' + retries: 5 + delay: 10 + +- name: RedHat - Install Elasticsearch from url + yum: name={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.noarch.rpm{% endif %} state=present + when: not es_use_repository + register: elasticsearch_install_from_package diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-config.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-config.yml new file mode 100755 index 0000000000000000000000000000000000000000..6debfaf54a7ac25e5f6ec0cb9b06d538e5f7f5a0 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-config.yml @@ -0,0 +1,84 @@ +--- +# Configure Elasticsearch Node + +#Create required directories +- name: Create Directories + file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }} + with_items: + - "{{pid_dir}}" + - "{{log_dir}}" + - "{{conf_dir}}" + +- name: Create Data Directories + file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }} + with_items: + - "{{data_dirs}}" + + +#Copy the config template +- name: Copy Configuration File + template: src=elasticsearch.yml.j2 dest={{conf_dir}}/elasticsearch.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes + register: config_updated + notify: restart elasticsearch + +#Copy the instance specific default file +- name: Copy Default File for Instance + template: src=elasticsearch.j2 dest={{instance_default_file}} mode=0644 force=yes + notify: restart elasticsearch + +#Copy the instance specific init file +- name: Copy Debian Init File for Instance + template: src=init/debian/elasticsearch.j2 dest={{instance_init_script}} mode=0755 force=yes + when: ansible_os_family == 'Debian' and not use_system_d + notify: restart elasticsearch + +#Copy the instance specific init file +- name: Copy Redhat Init File for Instance + template: src=init/redhat/elasticsearch.j2 dest={{instance_init_script}} mode=0755 force=yes + when: ansible_os_family == 'RedHat' and not use_system_d + notify: restart elasticsearch + +#Copy the systemd specific file if systemd is installed +- name: Copy Systemd File for Instance + template: src=systemd/elasticsearch.j2 dest={{instance_sysd_script}} mode=0644 force=yes + when: use_system_d + notify: + - reload systemd configuration + - restart elasticsearch + +#Copy the logging.yml +- name: Copy log4j2.properties File for Instance + template: src=log4j2.properties.j2 dest={{conf_dir}}/log4j2.properties owner={{ es_user }} group={{ es_group }} mode=0644 force=yes + notify: restart elasticsearch + +- name: Copy jvm.options File for Instance + template: src=jvm.options.j2 dest={{conf_dir}}/jvm.options owner={{ es_user }} group={{ es_group }} mode=0644 force=yes + notify: restart elasticsearch + +#Clean up un-wanted package scripts to avoid confusion + +- name: Delete Default Init + file: dest=/etc/init.d/elasticsearch state=absent + +- name: Delete Default Environment File + file: dest=/etc/default/elasticsearch state=absent + when: ansible_os_family == 'Debian' + +- name: Delete Default Environment File + file: dest=/etc/sysconfig/elasticsearch state=absent + when: ansible_os_family == 'RedHat' + +- name: Delete Default Sysconfig File + file: dest="{{ sysd_script }}" state=absent + +- name: Delete Default Configuration File + file: dest=/etc/elasticsearch/elasticsearch.yml state=absent + +- name: Delete Default Logging File + file: dest=/etc/elasticsearch/logging.yml state=absent + +- name: Delete Default Logging File + file: dest=/etc/elasticsearch/log4j2.properties state=absent + +- name: Delete Default JVM Options File + file: dest=/etc/elasticsearch/jvm.options state=absent diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-optional-user.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-optional-user.yml new file mode 100755 index 0000000000000000000000000000000000000000..5cf1d38752771c40570b82d2e5e61c1534a236d7 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-optional-user.yml @@ -0,0 +1,18 @@ +--- +#Add the elasticsearch user before installing from packages. +- name: Ensure optional elasticsearch group is created with the correct id. + group: + state: present + name: "{{ es_group }}" + system: yes + gid: "{{ es_group_id }}" + +- name: Ensure optional elasticsearch user is created with the correct id. + user: + state: present + name: "{{ es_user }}" + comment: elasticsearch system user + system: yes + createhome: no + uid: "{{ es_user_id }}" + group: "{{ es_group }}" diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-parameters.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-parameters.yml new file mode 100755 index 0000000000000000000000000000000000000000..0a6dce037865f07c15ef46015f7da3eb180cb95b --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-parameters.yml @@ -0,0 +1,49 @@ +# Check for mandatory parameters + +- fail: msg="es_instance_name must be specified and cannot be blank" + when: es_instance_name is not defined or es_instance_name == '' + +- fail: msg="es_proxy_port must be specified and cannot be blank when es_proxy_host is defined" + when: (es_proxy_port is not defined or es_proxy_port == '') and (es_proxy_host is defined and es_proxy_host != '') + +- debug: msg="WARNING - It is recommended you specify the parameter 'http.port'" + when: es_config['http.port'] is not defined + +- debug: msg="WARNING - It is recommended you specify the parameter 'transport.tcp.port'" + when: es_config['transport.tcp.port'] is not defined + +- debug: msg="WARNING - It is recommended you specify the parameter 'discovery.zen.ping.unicast.hosts'" + when: es_config['discovery.zen.ping.unicast.hosts'] is not defined + +#If the user attempts to lock memory they must specify a heap size +- fail: msg="If locking memory with bootstrap.memory_lock a heap size must be specified" + when: es_config['bootstrap.memory_lock'] is defined and es_config['bootstrap.memory_lock'] == True and es_heap_size is not defined + +#Check if working with security we have an es_api_basic_auth_username and es_api_basic_auth_username - otherwise any http calls wont work +- fail: msg="Enabling security requires an es_api_basic_auth_username and es_api_basic_auth_password to be provided to allow cluster operations" + when: es_enable_xpack and ("security" in es_xpack_features) and es_api_basic_auth_username is not defined and es_api_basic_auth_password is not defined + +- set_fact: instance_default_file={{default_file | dirname}}/{{es_instance_name}}_{{default_file | basename}} +- set_fact: instance_init_script={{init_script | dirname }}/{{es_instance_name}}_{{init_script | basename}} +- set_fact: conf_dir={{ es_conf_dir }}/{{es_instance_name}} +- set_fact: m_lock_enabled={{ es_config['bootstrap.memory_lock'] is defined and es_config['bootstrap.memory_lock'] == True }} + +#TODO - if transport.host is not local maybe error on boostrap checks + + +#Use systemd for the following distributions: +#Ubuntu 15 and up +#Debian 8 and up +#Centos 7 and up +#Relies on elasticsearch distribution installing a serviced script to determine whether one should be copied. + +- set_fact: use_system_d={{(ansible_distribution == 'Debian' and ansible_distribution_version | version_compare('8', '>=')) or (ansible_distribution in ['RedHat','CentOS'] and ansible_distribution_version | version_compare('7', '>=')) or (ansible_distribution == 'Ubuntu' and ansible_distribution_version | version_compare('15', '>=')) }} + +- set_fact: instance_sysd_script={{sysd_script | dirname }}/{{es_instance_name}}_{{sysd_script | basename}} + when: use_system_d +#For directories we also use the {{inventory_hostname}}-{{ es_instance_name }} - this helps if we have a shared SAN. + +- set_fact: instance_suffix={{inventory_hostname}}-{{ es_instance_name }} +- set_fact: pid_dir={{ es_pid_dir }}/{{instance_suffix}} +- set_fact: log_dir={{ es_log_dir }}/{{instance_suffix}} +- set_fact: data_dirs={{ es_data_dirs | append_to_list('/'+instance_suffix) }} diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-plugins.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-plugins.yml new file mode 100755 index 0000000000000000000000000000000000000000..8ec9e25b987e940751861bdeaccaac1d505dde78 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-plugins.yml @@ -0,0 +1,60 @@ +--- + +# es_plugins_reinstall will be set to true if elasticsearch_install_from_repo.changed or elasticsearch_install_from_package.changed +# i.e. we have changed ES version(or we have clean installation of ES), or if no plugins listed. Otherwise it is false and requires explicitly setting. +- set_fact: es_plugins_reinstall=true + when: (((debian_elasticsearch_install_from_repo is defined and debian_elasticsearch_install_from_repo.changed) or (redhat_elasticsearch_install_from_repo is defined and redhat_elasticsearch_install_from_repo.changed)) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) or es_plugins is not defined or es_plugins is none + +- set_fact: list_command="" +#If we are reinstalling all plugins, e.g. to a version change, we need to remove all plugins (inc. x-pack) to install any plugins. Otherwise we don't consider x-pack so the role stays idempotent. +- set_fact: list_command="| grep -vE 'x-pack'" + when: not es_plugins_reinstall + +#List currently installed plugins. We have to list the directories as the list commmand fails if the ES version is different than the plugin version. +- name: Check installed elasticsearch plugins + shell: "ls {{es_home}}/plugins {{list_command}}" + register: installed_plugins + changed_when: False + ignore_errors: yes + environment: + CONF_DIR: "{{ conf_dir }}" + ES_INCLUDE: "{{ instance_default_file }}" + +#if es_plugins_reinstall is set to true we remove ALL plugins +- set_fact: plugins_to_remove="{{ installed_plugins.stdout_lines | default([]) }}" + when: es_plugins_reinstall + +#if the plugins listed are different than those requested, we remove those installed but not listed in the config +- set_fact: plugins_to_remove="{{ installed_plugins.stdout_lines | difference(es_plugins | json_query('[*].plugin')) | default([]) }}" + when: not es_plugins_reinstall + +# This removes any currently installed plugins (to prevent errors when reinstalling) +- name: Remove elasticsearch plugins + command: "{{es_home}}/bin/elasticsearch-plugin remove {{item}} --silent" + ignore_errors: yes + with_items: "{{ plugins_to_remove | default([]) }}" + when: es_plugins_reinstall and plugins_to_remove | length > 0 + notify: restart elasticsearch + register: plugin_removed + environment: + CONF_DIR: "{{ conf_dir }}" + ES_INCLUDE: "{{ instance_default_file }}" + +- name: Install elasticsearch plugins + command: "{{es_home}}/bin/elasticsearch-plugin install {{ item.plugin }} --batch --silent {% if item.proxy_host is defined and item.proxy_host != '' and item.proxy_port is defined and item.proxy_port != ''%} -DproxyHost={{ item.proxy_host }} -DproxyPort={{ item.proxy_port }} {% elif es_proxy_host is defined and es_proxy_host != '' %} -DproxyHost={{ es_proxy_host }} -DproxyPort={{ es_proxy_port }} {% endif %}" + register: plugin_installed + failed_when: "'ERROR' in plugin_installed.stdout" + changed_when: plugin_installed.rc == 0 + with_items: "{{ es_plugins | default([]) }}" + when: not es_plugins is none and es_plugins_reinstall + notify: restart elasticsearch + environment: + CONF_DIR: "{{ conf_dir }}" + ES_INCLUDE: "{{ instance_default_file }}" + until: plugin_installed.rc == 0 + retries: 5 + delay: 5 + +#Set permissions on plugins directory +- name: Set Plugin Directory Permissions + file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-scripts.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-scripts.yml new file mode 100755 index 0000000000000000000000000000000000000000..30867f5b214e9cbced1b5d04c0db46f7abb4b666 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-scripts.yml @@ -0,0 +1,21 @@ +--- + +- set_fact: es_script_dir={{ es_conf_dir }}/{{es_instance_name}} + tags: + - always + +- set_fact: es_script_dir={{es_config['path.scripts']}} + when: es_config['path.scripts'] is defined + tags: + - always + +- name: Create script dir + file: state=directory path={{ es_script_dir }} owner={{ es_user }} group={{ es_group }} recurse=yes + +- name: Copy default scripts to elasticsearch + copy: src=scripts dest={{ es_script_dir }} owner={{ es_user }} group={{ es_group }} + when: es_scripts_fileglob is not defined + +- name: Copy scripts to elasticsearch + copy: src={{ item }} dest={{ es_script_dir }} owner={{ es_user }} group={{ es_group }} + with_fileglob: "{{ es_scripts_fileglob | default('') }}" diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch-templates.yml b/ansible/roles/elasticsearch/tasks/elasticsearch-templates.yml new file mode 100755 index 0000000000000000000000000000000000000000..27c0c42bb3b905ad18bcc2c1fdf3849d337d94f0 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch-templates.yml @@ -0,0 +1,14 @@ +--- + +- file: path=/etc/elasticsearch/templates state=directory owner={{ es_user }} group={{ es_group }} + +- name: Copy default templates to elasticsearch + copy: src=templates dest=/etc/elasticsearch/ owner={{ es_user }} group={{ es_group }} + notify: load-templates + when: es_templates_fileglob is not defined + +- name: Copy templates to elasticsearch + copy: src={{ item }} dest=/etc/elasticsearch/templates owner={{ es_user }} group={{ es_group }} + notify: load-templates + with_fileglob: + - "{{ es_templates_fileglob | default('') }}" \ No newline at end of file diff --git a/ansible/roles/elasticsearch/tasks/elasticsearch.yml b/ansible/roles/elasticsearch/tasks/elasticsearch.yml new file mode 100755 index 0000000000000000000000000000000000000000..e2361d49bf1049d252ed748a30d828c8514b2234 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/elasticsearch.yml @@ -0,0 +1,13 @@ +--- + +- name: Include optional user and group creation. + when: (es_user_id is defined) and (es_group_id is defined) + include: elasticsearch-optional-user.yml + +- name: Include specific Elasticsearch + include: elasticsearch-Debian.yml + when: ansible_os_family == 'Debian' + +- name: Include specific Elasticsearch + include: elasticsearch-RedHat.yml + when: ansible_os_family == 'RedHat' diff --git a/ansible/roles/elasticsearch/tasks/java.yml b/ansible/roles/elasticsearch/tasks/java.yml new file mode 100755 index 0000000000000000000000000000000000000000..c0c63b87bf1ece71c4b5daea0f98f40ec837aa58 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/java.yml @@ -0,0 +1,30 @@ +--- + +- set_fact: java_state="present" + +- set_fact: java_state="latest" + when: update_java == true + +- name: RedHat - Ensure Java is installed + yum: name={{ java }} state={{java_state}} + when: ansible_os_family == 'RedHat' + +- name: Refresh java repo + apt: update_cache=yes + changed_when: false + when: ansible_os_family == 'Debian' + +- name: Debian - Ensure Java is installed + apt: name={{ java }} state={{java_state}} + when: ansible_os_family == 'Debian' + +- shell: java -version 2>&1 | grep OpenJDK + register: open_jdk + ignore_errors: yes + changed_when: false + +#https://github.com/docker-library/openjdk/issues/19 - ensures tests pass due to java 8 broken certs +- name: refresh the java ca-certificates + command: /var/lib/dpkg/info/ca-certificates-java.postinst configure + when: ansible_distribution == 'Ubuntu' and open_jdk.rc == 0 + changed_when: false diff --git a/ansible/roles/elasticsearch/tasks/main.yml b/ansible/roles/elasticsearch/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..4648f8d0c315f74433706f6ca20f6892c584ca30 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: os-specific vars + include_vars: "{{ansible_os_family}}.yml" + tags: + - always + +- name: check-set-parameters + include: elasticsearch-parameters.yml + tags: + - always + +- include: java.yml + when: es_java_install + tags: + - java + +- include: elasticsearch.yml + tags: + - install + +- include: elasticsearch-config.yml + tags: + - config + +- include: elasticsearch-scripts.yml + when: es_scripts + tags: + - scripts + +- include: elasticsearch-plugins.yml + when: es_plugins is defined or es_plugins_reinstall + tags: + - plugins + + #We always execute xpack as we may need to remove features +- include: xpack/elasticsearch-xpack.yml + tags: + - xpack + +- include: elasticsearch-templates.yml + when: es_templates + tags: + - templates + +- meta: flush_handlers + +- name: Wait for elasticsearch to startup + wait_for: host={{es_api_host}} port={{es_api_port}} delay=5 connect_timeout=1 + +- name: activate-license + include: ./xpack/security/elasticsearch-xpack-activation.yml + when: es_enable_xpack and es_xpack_license is defined and es_xpack_license != '' + +#perform security actions here now elasticsearch is started +- include: ./xpack/security/elasticsearch-security-native.yml + when: (es_enable_xpack and '"security" in es_xpack_features') and ((es_users is defined and es_users.native is defined) or (es_roles is defined and es_roles.native is defined)) diff --git a/ansible/roles/elasticsearch/tasks/xpack/elasticsearch-xpack-install.yml b/ansible/roles/elasticsearch/tasks/xpack/elasticsearch-xpack-install.yml new file mode 100755 index 0000000000000000000000000000000000000000..16bbc5ae6744a8c5202270052f1053231a3fec45 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/xpack/elasticsearch-xpack-install.yml @@ -0,0 +1,39 @@ +--- + +#Test if feature is installed +- shell: "{{es_home}}/bin/elasticsearch-plugin list | grep x-pack" + register: x_pack_installed + changed_when: False + failed_when: "'ERROR' in x_pack_installed.stdout" + check_mode: no + ignore_errors: yes + environment: + CONF_DIR: "{{ conf_dir }}" + ES_INCLUDE: "{{ instance_default_file }}" + + +#Remove X-Pack if installed and its not been requested or the ES version has changed +- name: Remove x-pack plugin + command: "{{es_home}}/bin/elasticsearch-plugin remove x-pack" + register: xpack_state + failed_when: "'ERROR' in xpack_state.stdout" + changed_when: xpack_state.rc == 0 + when: x_pack_installed.rc == 0 and (not es_enable_xpack or es_version_changed) + notify: restart elasticsearch + environment: + CONF_DIR: "{{ conf_dir }}" + ES_INCLUDE: "{{ instance_default_file }}" + + +#Install plugin if not installed, or the es version has changed (so removed above), and its been requested +- name: Install x-pack plugin + command: > + {{es_home}}/bin/elasticsearch-plugin install --silent --batch x-pack {% if es_proxy_host is defined and es_proxy_host != '' %} -Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} {% endif %} + register: xpack_state + failed_when: "'ERROR' in xpack_state.stdout" + changed_when: xpack_state.rc == 0 + when: (x_pack_installed.rc == 1 or es_version_changed) and es_enable_xpack + notify: restart elasticsearch + environment: + CONF_DIR: "{{ conf_dir }}" + ES_INCLUDE: "{{ instance_default_file }}" diff --git a/ansible/roles/elasticsearch/tasks/xpack/elasticsearch-xpack.yml b/ansible/roles/elasticsearch/tasks/xpack/elasticsearch-xpack.yml new file mode 100755 index 0000000000000000000000000000000000000000..1e5478230e1550785210dbb313fddeb9a5bf18f1 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/xpack/elasticsearch-xpack.yml @@ -0,0 +1,12 @@ +--- + +- set_fact: es_version_changed={{ ((elasticsearch_install_from_package is defined and (debian_elasticsearch_install_from_repo.changed or redhat_elasticsearch_install_from_repo.changed)) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) }} + +- include: elasticsearch-xpack-install.yml + +#Security configuration +- include: security/elasticsearch-security.yml + +#Add any feature specific configuration here +- name: Set Plugin Directory Permissions + file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes diff --git a/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security-file.yml b/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security-file.yml new file mode 100755 index 0000000000000000000000000000000000000000..b09237c065108f5f8964bf6e807bfe97c0ba13c7 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security-file.yml @@ -0,0 +1,73 @@ +--- +- set_fact: manage_file_users=es_users is defined and es_users.file is defined + +#Ensure x-pack conf directory is created +- name: Ensure x-pack conf directory exists (file) + file: path={{ conf_dir }}/x-pack state=directory owner={{ es_user }} group={{ es_group }} + changed_when: False + when: es_enable_xpack and '"security" in es_xpack_features' + +#List current users +- name: List Users + shell: cat {{conf_dir}}/x-pack/users | awk -F':' '{print $1}' + register: current_file_users + when: manage_file_users + changed_when: False + +- set_fact: users_to_remove={{ current_file_users.stdout_lines | difference (es_users.file.keys()) }} + when: manage_file_users + +#Remove users +- name: Remove Users + command: > + {{es_home}}/bin/x-pack/users userdel {{item}} + with_items: "{{users_to_remove | default([])}}" + when: manage_file_users and (users_to_remove | length > 0) + environment: + CONF_DIR: "{{ conf_dir }}" + ES_HOME: "{{es_home}}" + + +- set_fact: users_to_add={{ es_users.file.keys() | difference (current_file_users.stdout_lines) }} + when: manage_file_users + +#Add users +- name: Add Users + command: > + {{es_home}}/bin/x-pack/users useradd {{item}} -p {{es_users.file[item].password}} + with_items: "{{users_to_add | default([])}}" + when: manage_file_users and users_to_add | length > 0 + no_log: True + environment: + CONF_DIR: "{{ conf_dir }}" + ES_HOME: "{{es_home}}" + +#Set passwords for all users declared - Required as the useradd will not change existing user passwords +- name: Set User Passwords + command: > + {{es_home}}/bin/x-pack/users passwd {{item.key}} -p {{item.value.password}} + with_dict: "{{(es_users | default({'file':{}})).file}}" + when: manage_file_users and es_users.file.keys() | length > 0 + #Currently no easy way to figure out if the password has changed or to know what it currently is so we can skip. + changed_when: False + no_log: True + environment: + CONF_DIR: "{{ conf_dir }}" + ES_HOME: "{{es_home}}" + +- set_fact: users_roles={{es_users.file | extract_role_users}} + when: manage_file_users + +#Copy Roles files +- name: Copy roles.yml File for Instance + template: src=security/roles.yml.j2 dest={{conf_dir}}/x-pack/roles.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes + when: es_roles is defined and es_roles.file is defined + +#Overwrite users_roles file +- name: Copy User Roles + template: src=security/users_roles.j2 dest={{conf_dir}}/x-pack/users_roles mode=0644 force=yes + when: manage_file_users and users_roles | length > 0 + +#Set permission on security directory. E.g. if 2 nodes are installed on the same machine, the second node will not get the users file created at install, causing the files being created at es_users call and then having the wrong Permissions. +- name: Set Security Directory Permissions Recursive + file: state=directory path={{conf_dir}}/x-pack/ owner={{ es_user }} group={{ es_group }} recurse=yes \ No newline at end of file diff --git a/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security-native.yml b/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security-native.yml new file mode 100755 index 0000000000000000000000000000000000000000..96bedfaff4265dbb69d74597eeb4d45738debbb7 --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security-native.yml @@ -0,0 +1,118 @@ +--- + +- set_fact: manage_native_users=false + +- set_fact: manage_native_users=true + when: es_users is defined and es_users.native is defined + +- set_fact: manage_native_roles=false + +- set_fact: manage_native_roles=true + when: es_roles is defined and es_roles.native is defined + +# If playbook runs too fast, Native commands could fail as the Native Realm is not yet up +- name: Wait 15 seconds for the Native Relm to come up + pause: seconds=15 + +#If the node has just has security installed it maybe either stopped or started 1. if stopped, we need to start to load native realms 2. if started, we need to restart to load + +#List current users +- name: List Native Users + uri: + url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user + method: GET + user: "{{es_api_basic_auth_username}}" + password: "{{es_api_basic_auth_password}}" + force_basic_auth: yes + status_code: 200 + register: user_list_response + when: manage_native_users + +#Current users not inc. those reserved +- set_fact: current_users={{ user_list_response.json | filter_reserved }} + when: manage_native_users + +#Identify non declared users +- set_fact: users_to_remove={{ current_users | difference ( es_users.native.keys() ) }} + when: manage_native_users + +#Delete all non required users +- name: Delete Native Users + uri: + url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{item}} + method: DELETE + status_code: 200 + user: "{{es_api_basic_auth_username}}" + password: "{{es_api_basic_auth_password}}" + force_basic_auth: yes + when: manage_native_users and users_to_remove | length > 0 + with_items: "{{users_to_remove | default([]) }}" + +- set_fact: native_users={{ es_users.native }} + when: manage_native_users and es_users.native.keys() > 0 + +#Overwrite all other users +- name: Update Native Users + uri: + url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{item.key}} + method: POST + body_format: json + body: "{{item.value | to_json}}" + status_code: 200 + user: "{{es_api_basic_auth_username}}" + password: "{{es_api_basic_auth_password}}" + force_basic_auth: yes + when: manage_native_users and native_users.keys() > 0 + no_log: True + with_dict: "{{native_users | default({}) }}" + +#List current roles not. inc those reserved +- name: List Native Roles + uri: + url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role + method: GET + body_format: json + user: "{{es_api_basic_auth_username}}" + password: "{{es_api_basic_auth_password}}" + force_basic_auth: yes + status_code: 200 + register: role_list_response + when: manage_native_roles + +- set_fact: current_roles={{ role_list_response.json | filter_reserved }} + when: manage_native_roles +- debug: msg="{{current_roles}}" + +- set_fact: roles_to_remove={{ current_roles | difference ( es_roles.native.keys() ) }} + when: manage_native_roles + + +#Delete all non required roles +- name: Delete Native Roles + uri: + url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role/{{item}} + method: DELETE + status_code: 200 + user: "{{es_api_basic_auth_username}}" + password: "{{es_api_basic_auth_password}}" + force_basic_auth: yes + when: manage_native_roles and roles_to_remove | length > 0 + with_items: "{{roles_to_remove | default([]) }}" + + +- set_fact: native_roles={{ es_roles.native }} + when: manage_native_roles and es_roles.native.keys() > 0 + +#Update other roles +- name: Update Native Roles + uri: + url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role/{{item.key}} + method: POST + body_format: json + body: "{{item.value | to_json}}" + status_code: 200 + user: "{{es_api_basic_auth_username}}" + password: "{{es_api_basic_auth_password}}" + force_basic_auth: yes + when: manage_native_roles and native_roles.keys() > 0 + with_dict: "{{ native_roles | default({})}}" diff --git a/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security.yml b/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security.yml new file mode 100755 index 0000000000000000000000000000000000000000..820c7e3f39cd603b1b893f907552d1eb8bb7c1cf --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-security.yml @@ -0,0 +1,30 @@ +--- +#Security specific configuration done here + +#TODO: 1. Skip users with no password defined or error 2. Passwords | length > 6 + +#-----------------------------FILE BASED REALM---------------------------------------- + +- include: elasticsearch-security-file.yml + when: (es_enable_xpack and '"security" in es_xpack_features') and ((es_users is defined and es_users.file) or (es_roles is defined and es_roles.file is defined)) + +#-----------------------------ROLE MAPPING ---------------------------------------- + +#Copy Roles files +- name: Copy role_mapping.yml File for Instance + template: src=security/role_mapping.yml.j2 dest={{conf_dir}}/x-pack/role_mapping.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes + when: es_role_mapping is defined + +#-----------------------------AUTH FILE---------------------------------------- + +- name: Copy message auth key to elasticsearch + copy: src={{ es_message_auth_file }} dest={{conf_dir}}/x-pack/system_key owner={{ es_user }} group={{ es_group }} mode=0600 force=yes + when: es_message_auth_file is defined + +#------------------------------------------------------------------------------------ + +#Ensure security conf directory is created +- name: Ensure security conf directory exists + file: path={{ conf_dir }}/security state=directory owner={{ es_user }} group={{ es_group }} + changed_when: False + when: es_enable_xpack and '"security" in es_xpack_features' diff --git a/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-xpack-activation.yml b/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-xpack-activation.yml new file mode 100755 index 0000000000000000000000000000000000000000..4b1fd3f368fa62633a94f4f71e103cd5cd5bf4ab --- /dev/null +++ b/ansible/roles/elasticsearch/tasks/xpack/security/elasticsearch-xpack-activation.yml @@ -0,0 +1,37 @@ +--- + +- name: Activate ES license (without security authentication) + uri: + method: PUT + url: "http://{{es_api_host}}:{{es_api_port}}/_license?acknowledge=true" + body_format: json + body: "{{ es_xpack_license }}" + return_content: yes + register: license_activated + no_log: True + when: 'not "security" in es_xpack_features' + failed_when: > + license_activated.status != 200 or + license_activated.json.license_status is not defined or + license_activated.json.license_status != 'valid' + +- name: Activate ES license (with security authentication) + uri: + method: PUT + url: "http://{{es_api_host}}:{{es_api_port}}/_license?acknowledge=true" + user: "{{es_api_basic_auth_username}}" + password: "{{es_api_basic_auth_password}}" + body_format: json + force_basic_auth: yes + body: "{{ es_xpack_license }}" + return_content: yes + register: license_activated + no_log: True + when: '"security" in es_xpack_features' + failed_when: > + license_activated.status != 200 or + license_activated.json.license_status is not defined or + license_activated.json.license_status != 'valid' + +- debug: + msg: "License: {{ license_activated.content }}" diff --git a/ansible/roles/elasticsearch/templates/elasticsearch.j2 b/ansible/roles/elasticsearch/templates/elasticsearch.j2 new file mode 100755 index 0000000000000000000000000000000000000000..0c7f4a6da102065e0afe2bbd3f51b13807f6ef7f --- /dev/null +++ b/ansible/roles/elasticsearch/templates/elasticsearch.j2 @@ -0,0 +1,73 @@ +################################ +# Elasticsearch +################################ + +# Elasticsearch home directory +ES_HOME={{es_home}} + +# Elasticsearch configuration directory +CONF_DIR={{conf_dir}} + +# Elasticsearch data directory +DATA_DIR={{ data_dirs | array_to_str }} + +# Elasticsearch logs directory +LOG_DIR={{log_dir}} + +# Elasticsearch PID directory +PID_DIR={{pid_dir}} + +ES_JVM_OPTIONS={{conf_dir}}/jvm.options + +# Configure restart on package upgrade (true, every other setting will lead to not restarting) +#ES_RESTART_ON_UPGRADE=true + +# Path to the GC log file +#ES_GC_LOG_FILE=/var/log/elasticsearch/gc.log + +################################ +# Elasticsearch service +################################ + +# SysV init.d +# +# When executing the init script, this user will be used to run the elasticsearch service. +# The default value is 'elasticsearch' and is declared in the init.d file. +# Note that this setting is only used by the init script. If changed, make sure that +# the configured user can read and write into the data, work, plugins and log directories. +# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service +ES_USER={{es_user}} +ES_GROUP={{es_group}} + +# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process +ES_STARTUP_SLEEP_TIME=5 + +################################ +# System properties +################################ + +# Specifies the maximum file descriptor number that can be opened by this process +# When using Systemd, this setting is ignored and the LimitNOFILE defined in +# /usr/lib/systemd/system/elasticsearch.service takes precedence +{% if es_max_open_files is defined %} +#MAX_OPEN_FILES +MAX_OPEN_FILES={{es_max_open_files}} +{% endif %} + +# The maximum number of bytes of memory that may be locked into RAM +# Set to "unlimited" if you use the 'bootstrap.memory_lock: true' option +# in elasticsearch.yml (ES_HEAP_SIZE must also be set). +# When using Systemd, the LimitMEMLOCK property must be set +# in /usr/lib/systemd/system/elasticsearch.service +#MAX_LOCKED_MEMORY= +{% if m_lock_enabled %} +MAX_LOCKED_MEMORY=unlimited +{% endif %} + +# Maximum number of VMA (Virtual Memory Areas) a process can own +# When using Systemd, this setting is ignored and the 'vm.max_map_count' +# property is set at boot time in /usr/lib/sysctl.d/elasticsearch.conf +#MAX_MAP_COUNT=262144 +{% if es_max_map_count is defined %} +MAX_MAP_COUNT={{es_max_map_count}} +{% endif %} \ No newline at end of file diff --git a/ansible/roles/elasticsearch/templates/elasticsearch.repo b/ansible/roles/elasticsearch/templates/elasticsearch.repo new file mode 100755 index 0000000000000000000000000000000000000000..fdf04fa0ad6a8c8a54eabb4bac537a561ab816b6 --- /dev/null +++ b/ansible/roles/elasticsearch/templates/elasticsearch.repo @@ -0,0 +1,10 @@ +[elasticsearch-{{ es_major_version }}] +name=Elasticsearch repository for {{ es_major_version }} packages +baseurl=https://artifacts.elastic.co/packages/{{ es_major_version }}/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +{% if es_proxy_host is defined and es_proxy_port is defined %} +proxy=http://{{ es_proxy_host }}:{{es_proxy_port}} +{% endif %} + diff --git a/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2 b/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2 new file mode 100755 index 0000000000000000000000000000000000000000..6d5190c5b93dbcd3f26206606018edc50307dd37 --- /dev/null +++ b/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2 @@ -0,0 +1,33 @@ + +{% if es_config %} +{{ es_config | to_nice_yaml }} +{% endif %} + +{% if es_config['cluster.name'] is not defined %} +cluster.name: elasticsearch +{% endif %} + +{% if es_config['node.name'] is not defined %} +node.name: {{inventory_hostname}}-{{es_instance_name}} +{% endif %} + +#################################### Paths #################################### + +# Path to directory containing configuration (this file and logging.yml): +path.conf: {{ conf_dir }} + +path.data: {{ data_dirs | array_to_str }} + +path.logs: {{ log_dir }} + +{% if not "security" in es_xpack_features %} +xpack.security.enabled: false +{% endif %} + +{% if not "monitoring" in es_xpack_features %} +xpack.monitoring.enabled: false +{% endif %} + +{% if not "alerting" in es_xpack_features %} +xpack.watcher.enabled: false +{% endif %} diff --git a/ansible/roles/elasticsearch/templates/init/debian/elasticsearch.j2 b/ansible/roles/elasticsearch/templates/init/debian/elasticsearch.j2 new file mode 100755 index 0000000000000000000000000000000000000000..5a21e4765fe7b24a3263cf9209082d98152c76b7 --- /dev/null +++ b/ansible/roles/elasticsearch/templates/init/debian/elasticsearch.j2 @@ -0,0 +1,216 @@ +#!/bin/bash +# +# /etc/init.d/elasticsearch -- startup script for Elasticsearch +# +### BEGIN INIT INFO +# Provides: elasticsearch +# Required-Start: $network $remote_fs $named +# Required-Stop: $network $remote_fs $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Starts elasticsearch +# Description: Starts elasticsearch using start-stop-daemon +### END INIT INFO + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +NAME={{es_instance_name}}_{{default_file | basename}} +{% if es_config['node.name'] is defined %} +DESC="Elasticsearch Server - {{es_config['node.name']}}" +{% else %} +DESC="Elasticsearch Server - {{es_instance_name}}" +{% endif %} + +DEFAULT=/etc/default/$NAME + +if [ `id -u` -ne 0 ]; then + echo "You need root privileges to run this script" + exit 1 +fi + +. /lib/lsb/init-functions +if [ -r /etc/default/rcS ]; then + . /etc/default/rcS +fi + +# The following variables can be overwritten in $DEFAULT + +# Run Elasticsearch as this user ID and group ID +ES_USER={{es_user}} +ES_GROUP={{es_group}} + +# Directory where the Elasticsearch binary distribution resides +ES_HOME={{es_home}} + +# Maximum number of open files +{% if es_max_open_files is defined %} +MAX_OPEN_FILES={{es_max_open_files}} +{% endif %} + +# Maximum amount of locked memory +#MAX_LOCKED_MEMORY= +{% if m_lock_enabled %} +MAX_LOCKED_MEMORY=unlimited +{% endif %} + +# Elasticsearch log directory +LOG_DIR={{log_dir}} + +# Elasticsearch data directory +DATA_DIR={{ data_dirs | array_to_str }} + +# Elasticsearch configuration directory +CONF_DIR={{conf_dir}} + +# Maximum number of VMA (Virtual Memory Areas) a process can own +{% if es_max_map_count is defined %} +MAX_MAP_COUNT={{es_max_map_count}} +{% endif %} + +# Elasticsearch PID file directory +PID_DIR={{pid_dir}} + +ES_JVM_OPTIONS="{{conf_dir}}/jvm.options" + +# End of variables that can be overwritten in $DEFAULT + +# overwrite settings from default file +if [ -f "$DEFAULT" ]; then + . "$DEFAULT" +fi + +# CONF_FILE setting was removed +if [ ! -z "$CONF_FILE" ]; then + echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed." + exit 1 +fi + +# Define other required variables +PID_FILE="$PID_DIR/$NAME.pid" +DAEMON={{es_home}}/bin/elasticsearch +DAEMON_OPTS="-d -p $PID_FILE -Edefault.path.home=$ES_HOME -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR" + +export ES_JAVA_OPTS +export JAVA_HOME +export ES_INCLUDE +export ES_JVM_OPTIONS + +# Check DAEMON exists +if [ ! -x "$DAEMON" ]; then + echo "The elasticsearch startup script does not exists or it is not executable, tried: $DAEMON" + exit 1 +fi + +checkJava() { + if [ -x "$JAVA_HOME/bin/java" ]; then + JAVA="$JAVA_HOME/bin/java" + else + JAVA=`which java` + fi + + if [ ! -x "$JAVA" ]; then + echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME" + exit 1 + fi +} + +case "$1" in + start) + checkJava + +{% if es_version | version_compare('5.0', '<') %} + if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; then + log_failure_msg "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set" + exit 1 + fi +{% endif %} + + log_daemon_msg "Starting $DESC" + + pid=`pidofproc -p $PID_FILE elasticsearch` + if [ -n "$pid" ] ; then + log_begin_msg "Already running." + log_end_msg 0 + exit 0 + fi + + # Prepare environment + mkdir -p "$LOG_DIR" "$DATA_DIR" && chown "$ES_USER":"$ES_GROUP" "$LOG_DIR" "$DATA_DIR" + + # Ensure that the PID_DIR exists (it is cleaned at OS startup time) + if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then + mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR" + fi + if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then + touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE" + fi + + if [ -n "$MAX_OPEN_FILES" ]; then + ulimit -n $MAX_OPEN_FILES + fi + + if [ -n "$MAX_LOCKED_MEMORY" ]; then + ulimit -l $MAX_LOCKED_MEMORY + fi + + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then + sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT + fi + + # Start Daemon + start-stop-daemon -d $ES_HOME --start -b --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS + return=$? + if [ $return -eq 0 ]; then + i=0 + timeout=10 + # Wait for the process to be properly started before exiting + until { kill -0 `cat "$PID_FILE"`; } >/dev/null 2>&1 + do + sleep 1 + i=$(($i + 1)) + if [ $i -gt $timeout ]; then + log_end_msg 1 + exit 1 + fi + done + fi + log_end_msg $return + exit $return + ;; + stop) + log_daemon_msg "Stopping $DESC" + + if [ -f "$PID_FILE" ]; then + start-stop-daemon --stop --pidfile "$PID_FILE" \ + --user "$ES_USER" \ + --quiet \ + --retry forever/TERM/20 > /dev/null + if [ $? -eq 1 ]; then + log_progress_msg "$DESC is not running but pid file exists, cleaning up" + elif [ $? -eq 3 ]; then + PID="`cat $PID_FILE`" + log_failure_msg "Failed to stop $DESC (pid $PID)" + exit 1 + fi + rm -f "$PID_FILE" + else + log_progress_msg "(not running)" + fi + log_end_msg 0 + ;; + status) + status_of_proc -p $PID_FILE elasticsearch elasticsearch && exit 0 || exit $? + ;; + restart|force-reload) + if [ -f "$PID_FILE" ]; then + $0 stop + sleep 1 + fi + $0 start + ;; + *) + log_success_msg "Usage: $0 {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/ansible/roles/elasticsearch/templates/init/redhat/elasticsearch.j2 b/ansible/roles/elasticsearch/templates/init/redhat/elasticsearch.j2 new file mode 100755 index 0000000000000000000000000000000000000000..f9060746a194f3acd0e9769bc393fcdae8b12d34 --- /dev/null +++ b/ansible/roles/elasticsearch/templates/init/redhat/elasticsearch.j2 @@ -0,0 +1,205 @@ +#!/bin/sh +# +# elasticsearch <summary> +# +# chkconfig: 2345 80 20 +# description: Starts and stops a single elasticsearch instance on this system +# + +### BEGIN INIT INFO +# Provides: Elasticsearch +# Required-Start: $network $named +# Required-Stop: $network $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: This service manages the elasticsearch daemon +# Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search. +### END INIT INFO + +# +# init.d / servicectl compatibility (openSUSE) +# +if [ -f /etc/rc.status ]; then + . /etc/rc.status + rc_reset +fi + +# +# Source function library. +# +if [ -f /etc/rc.d/init.d/functions ]; then + . /etc/rc.d/init.d/functions +fi + +# Sets the default values for elasticsearch variables used in this script +ES_USER="{{es_user}}" +ES_GROUP="{{es_group}}" +ES_HOME="{{es_home}}" +{% if es_max_open_files is defined %} +MAX_OPEN_FILES={{es_max_open_files}} +{% endif %} +# Maximum number of VMA (Virtual Memory Areas) a process can own +{% if es_max_map_count is defined %} +MAX_MAP_COUNT={{es_max_map_count}} +{% endif %} + +LOG_DIR="{{log_dir}}" +DATA_DIR={{ data_dirs | array_to_str }} +CONF_DIR="{{conf_dir}}" + +PID_DIR="{{pid_dir}}" +ES_JVM_OPTIONS="{{conf_dir}}/jvm.options" + +# Source the default env file +ES_ENV_FILE="{{instance_default_file}}" +if [ -f "$ES_ENV_FILE" ]; then + . "$ES_ENV_FILE" +fi + +# CONF_FILE setting was removed +if [ ! -z "$CONF_FILE" ]; then + echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed." + exit 1 +fi + +exec="$ES_HOME/bin/elasticsearch" +prog="{{es_instance_name}}_{{default_file | basename}}" +pidfile="$PID_DIR/${prog}.pid" + +export ES_JAVA_OPTS +export JAVA_HOME +export ES_INCLUDE +export ES_JVM_OPTIONS +export ES_STARTUP_SLEEP_TIME + +# export unsupported variables so bin/elasticsearch can reject them and inform the user these are unsupported +if test -n "$ES_MIN_MEM"; then export ES_MIN_MEM; fi +if test -n "$ES_MAX_MEM"; then export ES_MAX_MEM; fi +if test -n "$ES_HEAP_SIZE"; then export ES_HEAP_SIZE; fi +if test -n "$ES_HEAP_NEWSIZE"; then export ES_HEAP_NEWSIZE; fi +if test -n "$ES_DIRECT_SIZE"; then export ES_DIRECT_SIZE; fi +if test -n "$ES_USE_IPV4"; then export ES_USE_IPV4; fi +if test -n "$ES_GC_OPTS"; then export ES_GC_OPTS; fi +if test -n "$ES_GC_LOG_FILE"; then export ES_GC_LOG_FILE; fi + +lockfile=/var/lock/subsys/$prog + +# backwards compatibility for old config sysconfig files, pre 0.90.1 +if [ -n $USER ] && [ -z $ES_USER ] ; then + ES_USER=$USER +fi + +if [ ! -x "$exec" ]; then + echo "The elasticsearch startup script does not exists or it is not executable, tried: $exec" + exit 1 +fi + +checkJava() { + if [ -x "$JAVA_HOME/bin/java" ]; then + JAVA="$JAVA_HOME/bin/java" + else + JAVA=`which java` + fi + + if [ ! -x "$JAVA" ]; then + echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME" + exit 1 + fi +} + +start() { + checkJava + [ -x $exec ] || exit 5 + + if [ -n "$MAX_OPEN_FILES" ]; then + ulimit -n $MAX_OPEN_FILES + fi + if [ -n "$MAX_LOCKED_MEMORY" ]; then + ulimit -l $MAX_LOCKED_MEMORY + fi + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then + sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT + fi + + # Ensure that the PID_DIR exists (it is cleaned at OS startup time) + if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then + mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR" + fi + if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then + touch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile" + fi + + cd $ES_HOME + echo -n $"Starting $prog: " + # if not running, start it up here, usually something like "daemon $exec" + daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR + retval=$? + echo + [ $retval -eq 0 ] && touch $lockfile + return $retval +} + +stop() { + echo -n $"Stopping $prog: " + # stop it here, often "killproc $prog" + killproc -p $pidfile -d 86400 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + # run checks to determine if the service is running or use generic status + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $? diff --git a/ansible/roles/elasticsearch/templates/jvm.options.j2 b/ansible/roles/elasticsearch/templates/jvm.options.j2 new file mode 100755 index 0000000000000000000000000000000000000000..0cf7394891b49b803b88eb7ea71a135c81fe9023 --- /dev/null +++ b/ansible/roles/elasticsearch/templates/jvm.options.j2 @@ -0,0 +1,114 @@ +## JVM configuration + +################################################################ +## IMPORTANT: JVM heap size +################################################################ +## +## You should always set the min and max JVM heap +## size to the same value. For example, to set +## the heap to 4 GB, set: +## +## -Xms4g +## -Xmx4g +## +## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html +## for more information +## +################################################################ + +# Xms represents the initial size of total heap space +# Xmx represents the maximum size of total heap space +{% if es_heap_size is defined %} +-Xms{{ es_heap_size }} +{% else %} +-Xms2g +{% endif %} + +{% if es_heap_size is defined %} +-Xmx{{ es_heap_size }} +{% else %} +-Xmx2g +{% endif %} + +################################################################ +## Expert settings +################################################################ +## +## All settings below this section are considered +## expert settings. Don't tamper with them unless +## you understand what you are doing +## +################################################################ + +## GC configuration +-XX:+UseConcMarkSweepGC +-XX:CMSInitiatingOccupancyFraction=75 +-XX:+UseCMSInitiatingOccupancyOnly + +## optimizations + +# disable calls to System#gc +-XX:+DisableExplicitGC + +# pre-touch memory pages used by the JVM during initialization +-XX:+AlwaysPreTouch + +## basic + +# force the server VM +-server + +# set to headless, just in case +-Djava.awt.headless=true + +# ensure UTF-8 encoding by default (e.g. filenames) +-Dfile.encoding=UTF-8 + +# use our provided JNA always versus the system one +-Djna.nosys=true + +# flags to keep Netty from being unsafe +-Dio.netty.noUnsafe=true +-Dio.netty.noKeySetOptimization=true +-Dio.netty.recycler.maxCapacityPerThread=0 + +# log4j 2 +-Dlog4j.shutdownHookEnabled=false +-Dlog4j2.disable.jmx=true +-Dlog4j.skipJansi=true + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails +# heap dumps are created in the working directory of the JVM +-XX:+HeapDumpOnOutOfMemoryError + +# specify an alternative path for heap dumps +# ensure the directory exists and has sufficient space +#-XX:HeapDumpPath=${heap.dump.path} + +## GC logging + +#-XX:+PrintGCDetails +#-XX:+PrintGCTimeStamps +#-XX:+PrintGCDateStamps +#-XX:+PrintClassHistogram +#-XX:+PrintTenuringDistribution +#-XX:+PrintGCApplicationStoppedTime + +# log GC status to a file with time stamps +# ensure the directory exists +#-Xloggc:${loggc} + +# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON. +# If documents were already indexed with unquoted fields in a previous version +# of Elasticsearch, some operations may throw errors. +# +# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided +# only for migration purposes. +#-Delasticsearch.json.allow_unquoted_field_names=true +{% if es_jvm_custom_parameters !='' %} +{% for item in es_jvm_custom_parameters %} +{{ item }} +{% endfor %} +{% endif %} diff --git a/ansible/roles/elasticsearch/templates/log4j2.properties.j2 b/ansible/roles/elasticsearch/templates/log4j2.properties.j2 new file mode 100755 index 0000000000000000000000000000000000000000..3702afff9f328c49e6939b4f6cfcacdb8dd87ebf --- /dev/null +++ b/ansible/roles/elasticsearch/templates/log4j2.properties.j2 @@ -0,0 +1,74 @@ +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs}.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n +appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling + +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n +appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 1GB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 4 + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log +appender.index_search_slowlog_rolling.layout.type = PatternLayout +appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.time.interval = 1 +appender.index_search_slowlog_rolling.policies.time.modulate = true + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/ansible/roles/elasticsearch/templates/security/role_mapping.yml.j2 b/ansible/roles/elasticsearch/templates/security/role_mapping.yml.j2 new file mode 100755 index 0000000000000000000000000000000000000000..2584375226a3707c7fe3f6ef34f53e7f057edc68 --- /dev/null +++ b/ansible/roles/elasticsearch/templates/security/role_mapping.yml.j2 @@ -0,0 +1 @@ +{{ es_role_mapping | to_nice_yaml }} \ No newline at end of file diff --git a/ansible/roles/elasticsearch/templates/security/roles.yml.j2 b/ansible/roles/elasticsearch/templates/security/roles.yml.j2 new file mode 100755 index 0000000000000000000000000000000000000000..9f211f2b07b434fcf5619ff982532b044f7ff269 --- /dev/null +++ b/ansible/roles/elasticsearch/templates/security/roles.yml.j2 @@ -0,0 +1 @@ +{{ es_roles.file | to_nice_yaml }} \ No newline at end of file diff --git a/ansible/roles/elasticsearch/templates/security/users_roles.j2 b/ansible/roles/elasticsearch/templates/security/users_roles.j2 new file mode 100755 index 0000000000000000000000000000000000000000..1c0acfa1d7fefd29b6282a6ad59b5b0a1444495a --- /dev/null +++ b/ansible/roles/elasticsearch/templates/security/users_roles.j2 @@ -0,0 +1 @@ +{{users_roles | join("\n") }} \ No newline at end of file diff --git a/ansible/roles/elasticsearch/templates/systemd/elasticsearch.j2 b/ansible/roles/elasticsearch/templates/systemd/elasticsearch.j2 new file mode 100755 index 0000000000000000000000000000000000000000..dafae587f0c06709a815ff44fc8a1081de3731a0 --- /dev/null +++ b/ansible/roles/elasticsearch/templates/systemd/elasticsearch.j2 @@ -0,0 +1,64 @@ +[Unit] +Description=Elasticsearch-{{es_instance_name}} +Documentation=http://www.elastic.co +Wants=network-online.target +After=network-online.target + +[Service] +Environment=ES_HOME={{es_home}} +Environment=CONF_DIR={{conf_dir}} +Environment=DATA_DIR={{ data_dirs | array_to_str }} +Environment=LOG_DIR={{log_dir}} +Environment=PID_DIR={{pid_dir}} +EnvironmentFile=-{{instance_default_file}} + +WorkingDirectory={{es_home}} + +User={{es_user}} +Group={{es_group}} + +ExecStartPre=/usr/share/elasticsearch/bin/elasticsearch-systemd-pre-exec + +ExecStart={{es_home}}/bin/elasticsearch \ + -p ${PID_DIR}/elasticsearch.pid \ + --quiet \ + -Edefault.path.logs=${LOG_DIR} \ + -Edefault.path.data=${DATA_DIR} \ + -Edefault.path.conf=${CONF_DIR} + + +# StandardOutput is configured to redirect to journalctl since +# some error messages may be logged in standard output before +# elasticsearch logging system is initialized. Elasticsearch +# stores its logs in /var/log/elasticsearch and does not use +# journalctl by default. If you also want to enable journalctl +# logging, you can simply remove the "quiet" option from ExecStart. +StandardOutput=journal +StandardError=inherit + +# Specifies the maximum file descriptor number that can be opened by this process +{% if es_max_open_files is defined %} +LimitNOFILE={{es_max_open_files}} +{% endif %} + +# Specifies the maximum number of bytes of memory that may be locked into RAM +# Set to "infinity" if you use the 'bootstrap.memory_lock: true' option +# in elasticsearch.yml and 'MAX_LOCKED_MEMORY=unlimited' in {{instance_default_file}} +{% if m_lock_enabled %} +LimitMEMLOCK=infinity +{% endif %} + +# Disable timeout logic and wait until process is stopped +TimeoutStopSec=0 + +# SIGTERM signal is used to stop the Java process +KillSignal=SIGTERM + +# Java process is never killed +SendSIGKILL=no + +# When a JVM receives a SIGTERM signal it exits with code 143 +SuccessExitStatus=143 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/roles/elasticsearch/vars/Debian.yml b/ansible/roles/elasticsearch/vars/Debian.yml new file mode 100755 index 0000000000000000000000000000000000000000..071736ef70e9162b00f5b07b838e6d10dd27db2d --- /dev/null +++ b/ansible/roles/elasticsearch/vars/Debian.yml @@ -0,0 +1,4 @@ +--- +java: "{% if es_java is defined %}{{es_java}}{% else %}openjdk-8-jre-headless{% endif %}" +default_file: "/etc/default/elasticsearch" +es_home: "/usr/share/elasticsearch" diff --git a/ansible/roles/elasticsearch/vars/RedHat.yml b/ansible/roles/elasticsearch/vars/RedHat.yml new file mode 100755 index 0000000000000000000000000000000000000000..b0aa42b2bb3f60548b6308c8c5859aee83ee40e8 --- /dev/null +++ b/ansible/roles/elasticsearch/vars/RedHat.yml @@ -0,0 +1,4 @@ +--- +java: "{{ es_java | default('java-1.8.0-openjdk.x86_64') }}" +default_file: "/etc/sysconfig/elasticsearch" +es_home: "/usr/share/elasticsearch" \ No newline at end of file diff --git a/ansible/roles/elasticsearch/vars/main.yml b/ansible/roles/elasticsearch/vars/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..3d50db2eb14105151ecc67f18d83fd1109fa6010 --- /dev/null +++ b/ansible/roles/elasticsearch/vars/main.yml @@ -0,0 +1,7 @@ +--- +es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch" +es_conf_dir: "/etc/elasticsearch" +sysd_script: "/usr/lib/systemd/system/elasticsearch.service" +init_script: "/etc/init.d/elasticsearch" +#add supported features here +supported_xpack_features: ["alerting","monitoring","graph","security"] \ No newline at end of file diff --git a/ansible/roles/elasticsearch_dependencies/main.yml b/ansible/roles/elasticsearch_dependencies/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..b4752a62af76d2f4637c5fb3147a8bc422be002e --- /dev/null +++ b/ansible/roles/elasticsearch_dependencies/main.yml @@ -0,0 +1,6 @@ + +- debug: msg="Installing jmespath" + +- name: Install jmespath + apt: name=jmespath state=present + become: yes diff --git a/ansible/roles/elasticsearch_old/defaults/main.yml b/ansible/roles/elasticsearch_old/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..49db0923b4d0fe74295b66b24598cb83e89269b2 --- /dev/null +++ b/ansible/roles/elasticsearch_old/defaults/main.yml @@ -0,0 +1,57 @@ +--- +# Elasticsearch release and version to install +es_release: "2.4" +es_minor_release: "5" +es_user: elasticsearch +es_logging_level: INFO +es_etc_path_data: /var/lib/elasticsearch +es_default_max_open_files: 65535 +es_etc_index_number_of_shards: 1 +es_default_es_heap_size: 512m +es_version: "{{ es_release }}.{{es_minor_release}}" + +# Wait for elasticsearch to be listening for connections before proceeding +# (e.g. after install / restart) +es_wait_for_listen: yes +es_etc_network_host: 0.0.0.0 + +# Plugins to install, specified as: +# - name: <plugin name, including optional version string> +# url: <optional url to download the plugin from> +# plugin_file: <optional plugin file to check if the plugin is installed> +es_plugins: [] + + +# Other settings can be set here + +# Settings in /etc/defaults/elasticsearch, for example: +# es_default_es_user: elasticsearch +# es_default_es_group: elasticsearch +# +# See templates/elasticsearch for a full list and description of the settings +# Additional settings not on the list can also be added like this: +# +# es_default: +# CUSTOM_VAR_ONE: 1 +# CUSTOM_VAR_TWO: two +# +# This will become: +# +# CUSTOM_VAR_ONE=1 +# CUSTOM_VAR_TWO=two + +# Settings in /etc/elasticsearch/elasticsearch.yml, for example: +# es_etc_cluster_name: elasticsearch +# es_etc_index_number_of_shards: 3 +# See templates/elasticsearch.yml for a full list and description of the settings +# +# Additional settings not on the list can also be added like this: +# +# es_etc: +# http.max_header_size: 16kB +# transport.tcp.connect_timeout: 20s +# +# This will become: +# +# http.max_header_size: 16kB +# transport.tcp.connect_timeout: 20s diff --git a/ansible/roles/elasticsearch_old/handlers/main.yml b/ansible/roles/elasticsearch_old/handlers/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..db49efaf7f5c98a416bebbf02bc4333c68ca557f --- /dev/null +++ b/ansible/roles/elasticsearch_old/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: Restart elasticsearch + service: name=elasticsearch state=restarted + notify: Wait for elasticsearch to be listening for connections + +- name: Wait for elasticsearch to be listening for connections + wait_for: port={{ es_etc_http_port | default(9200) }} + when: es_wait_for_listen diff --git a/ansible/roles/elasticsearch_old/tasks/debian.yml b/ansible/roles/elasticsearch_old/tasks/debian.yml new file mode 100644 index 0000000000000000000000000000000000000000..7b0d97890d451e4f7b637845c9cd88d37e720406 --- /dev/null +++ b/ansible/roles/elasticsearch_old/tasks/debian.yml @@ -0,0 +1,17 @@ +--- +- name: Add ElasticSearch repo public signing key + apt_key: id=46095ACC8548582C1A2699A9D27D666CD88E42B4 url=https://packages.elastic.co/GPG-KEY-elasticsearch state=present + +- name: Add ElasticSearch repository + apt_repository: + repo: 'deb http://packages.elasticsearch.org/elasticsearch/2.x/debian stable main' + state: present + +- name: Copy /etc/default/elasticsearch + template: src=elasticsearch dest=/etc/default/elasticsearch + tags: update_es_config + notify: Restart elasticsearch + +- name: Install ElasticSearch + apt: name=elasticsearch={{ es_version }} state=present + notify: Restart elasticsearch diff --git a/ansible/roles/elasticsearch_old/tasks/main.yml b/ansible/roles/elasticsearch_old/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..3e3b9179d9cfafd7773e6ed712c8c5a860f92a63 --- /dev/null +++ b/ansible/roles/elasticsearch_old/tasks/main.yml @@ -0,0 +1,69 @@ +--- +- include: debian.yml + when: ansible_os_family == "Debian" + +- include: redhat.yml + when: ansible_os_family == "RedHat" + +- name: Copy /etc/elasticsearch/elasticsearch.yml + template: src=elasticsearch.yml dest=/etc/elasticsearch/elasticsearch.yml + tags: update_es_config + notify: Restart elasticsearch + +- name: Creates data directory + file: + path={{ es_etc_path_data }} + state=directory + mode=755 + owner={{ es_user }} + group={{ es_user }} + +- name: Creates scripts directory + file: + path={{ elasticsearch_config }}/scripts + state=directory + mode=755 + owner={{ es_user }} + group={{ es_user }} + +- name: Copy logger file + template: src=logging.yml dest={{ elasticsearch_config }}/logging.yml + sudo: yes + tags: update_es_config + notify: Restart elasticsearch + +- name: Set elasticsearch service to start on boot + service: name=elasticsearch enabled=yes + +- name: Remove plugins + command: bin/plugin remove {{item.name}} + args: + chdir: "{{ es_home }}" + with_items: "{{ es_plugins }}" + ignore_errors: yes + notify: Restart elasticsearch + +- name: Install plugins + command: bin/plugin install {{item.name}} {%if item.url is defined %} url {{item.url}}{% endif %} + args: + chdir: "{{ es_home }}" + creates: "{{ es_home }}/plugins/{{ item.plugin_file | default(item.name) }}" + with_items: "{{ es_plugins }}" + ignore_errors: yes + notify: Restart elasticsearch + +- name: Configure /etc/security/limits.conf + lineinfile: > + dest=/etc/security/limits.conf + line="{{ item.line }}" + state=present + with_items: + - { line: 'elasticsearch - nofile {{ es_default_max_open_files }}' } + - { line: 'elasticsearch - memlock unlimited' } + - { line: 'root - memlock unlimited' } + sudo: yes + notify: Restart elasticsearch + +- name: Start Elasticsearch + service: name=elasticsearch state=started enabled=yes + become: yes diff --git a/ansible/roles/elasticsearch_old/tasks/redhat.yml b/ansible/roles/elasticsearch_old/tasks/redhat.yml new file mode 100644 index 0000000000000000000000000000000000000000..45d095032e4edca356cc213d1142e6f9a1d7f20e --- /dev/null +++ b/ansible/roles/elasticsearch_old/tasks/redhat.yml @@ -0,0 +1,14 @@ +--- +- name: Add ElasticSearch repo public signing key + rpm_key: key=https://packages.elastic.co/GPG-KEY-elasticsearch state=present + +- name: Add ElasticSearch repository + template: src=elasticsearch.repo dest=/etc/yum.repos.d/elasticsearch.repo + +- name: Copy /etc/sysconfig/elasticsearch + template: src=elasticsearch dest=/etc/sysconfig/elasticsearch + notify: Restart elasticsearch + +- name: Install ElasticSearch + yum: name=elasticsearch-{{ es_version }} state=present + notify: Restart elasticsearch diff --git a/ansible/roles/elasticsearch_old/templates/elasticsearch b/ansible/roles/elasticsearch_old/templates/elasticsearch new file mode 100644 index 0000000000000000000000000000000000000000..508debb6c7d7ced99d6c9a69287c91cc7d2df700 --- /dev/null +++ b/ansible/roles/elasticsearch_old/templates/elasticsearch @@ -0,0 +1,67 @@ +# Run Elasticsearch as this user ID and group ID +#ES_USER=elasticsearch +{% if es_default_es_user is defined %}ES_USER={{ es_default_es_user }}{% endif %} + +#ES_GROUP=elasticsearch +{% if es_default_es_group is defined %}ES_GROUP={{ es_default_es_group }}{% endif %} + +# Heap Size (defaults to 256m min, 1g max) +#ES_HEAP_SIZE=2g +{% if es_default_es_heap_size is defined %}ES_HEAP_SIZE={{ es_default_es_heap_size }}{% endif %} + +# Heap new generation +#ES_HEAP_NEWSIZE= +{% if es_default_es_heap_newsize is defined %}ES_HEAP_NEWSIZE={{ es_default_es_heap_newsize }}{% endif %} + +# max direct memory +#ES_DIRECT_SIZE= +{% if es_default_es_direct_size is defined %}ES_DIRECT_SIZE={{ es_default_es_direct_size }}{% endif %} + +# Maximum number of open files, defaults to 65535. +#MAX_OPEN_FILES=65535 +{% if es_default_max_open_files is defined %}MAX_OPEN_FILES={{ es_default_max_open_files }}{% endif %} + +# Maximum locked memory size. Set to "unlimited" if you use the +# bootstrap.mlockall option in es_yml. You must also set +# ES_HEAP_SIZE. +#MAX_LOCKED_MEMORY=unlimited +{% if es_default_max_locked_memory is defined %}MAX_LOCKED_MEMORY={{ es_default_max_locked_memory }}{% endif %} + +# Maximum number of VMA (Virtual Memory Areas) a process can own +#MAX_MAP_COUNT=262144 +{% if es_default_max_map_count is defined %}MAX_MAP_COUNT={{ es_default_max_map_count }}{% endif %} + +# Elasticsearch log directory +#LOG_DIR=/var/log/elasticsearch +{% if es_default_log_dir is defined %}LOG_DIR={{ es_default_log_dir }}{% endif %} + +# Elasticsearch data directory +#DATA_DIR=/var/lib/elasticsearch +{% if es_default_data_dir is defined %}DATA_DIR={{ es_default_data_dir }}{% endif %} + +# Elasticsearch work directory +#WORK_DIR=/tmp/elasticsearch +{% if es_default_work_dir is defined %}WORK_DIR={{ es_default_work_dir }}{% endif %} + +# Elasticsearch configuration directory +#CONF_DIR=/etc/elasticsearch +{% if es_default_conf_dir is defined %}CONF_DIR={{ es_default_conf_dir }}{% endif %} + +# Elasticsearch configuration file (es_yml) +#CONF_FILE=/etc/elasticsearch/elasticsearch.yml +{% if es_default_conf_file is defined %}CONF_FILE={{ es_default_conf_file }}{% endif %} + +# Additional Java OPTS +#ES_JAVA_OPTS= +{% if es_default_es_java_opts is defined %}ES_JAVA_OPTS={{ es_default_es_java_opts }}{% endif %} + +# Configure restart on package upgrade (true, every other setting will lead to not restarting) +#RESTART_ON_UPGRADE=true +{% if es_default_restart_on_upgrade is defined %}RESTART_ON_UPGRADE={{ es_default_restart_on_upgrade }}{% endif %} + +# Additional variables +{% if es_default is defined %} +{% for key, value in es_default.iteritems() %} +{{ key }}={{ value }} +{% endfor %} +{% endif %} diff --git a/ansible/roles/elasticsearch_old/templates/elasticsearch.yml b/ansible/roles/elasticsearch_old/templates/elasticsearch.yml new file mode 100644 index 0000000000000000000000000000000000000000..832f067332b5418049c503a047b8bb4cfad9b8b5 --- /dev/null +++ b/ansible/roles/elasticsearch_old/templates/elasticsearch.yml @@ -0,0 +1,493 @@ +##################### Elasticsearch Configuration Example ##################### + +# This file contains an overview of various configuration settings, +# targeted at operations staff. Application developers should +# consult the guide at <http://elasticsearch.org/guide>. +# +# The installation procedure is covered at +# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>. +# +# Elasticsearch comes with reasonable defaults for most settings, +# so you can try it out without bothering with configuration. +# +# Most of the time, these defaults are just fine for running a production +# cluster. If you're fine-tuning your cluster, or wondering about the +# effect of certain configuration option, please _do ask_ on the +# mailing list or IRC channel [http://elasticsearch.org/community]. + +# Any element in the configuration can be replaced with environment variables +# by placing them in ${...} notation. For example: +# +#node.rack: ${RACK_ENV_VAR} + +# For information on supported formats and syntax for the config file, see +# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html> + + +################################### Cluster ################################### + +# Cluster name identifies your cluster for auto-discovery. If you're running +# multiple clusters on the same network, make sure you're using unique names. +# +#cluster.name: elasticsearch +{% if es_etc_cluster_name is defined %}cluster.name: {{ es_etc_cluster_name }}{% endif %} + + +#################################### Node ##################################### + +# Node names are generated dynamically on startup, so you're relieved +# from configuring them manually. You can tie this node to a specific name: +# +#node.name: "Franz Kafka" +{% if es_etc_node_name is defined %}node.name: {{ es_etc_node_name }}{% endif %} + +# Every node can be configured to allow or deny being eligible as the master, +# and to allow or deny to store the data. +# +# Allow this node to be eligible as a master node (enabled by default): +# +#node.master: true +# +# Allow this node to store data (enabled by default): +# +#node.data: true +{% if es_etc_node_master is defined %}node.master: {{ es_etc_node_master }}{% endif %} + +{% if es_etc_node_data is defined %}node.data: {{ es_etc_node_data }}{% endif %} + +# You can exploit these settings to design advanced cluster topologies. +# +# 1. You want this node to never become a master node, only to hold data. +# This will be the "workhorse" of your cluster. +# +#node.master: false +#node.data: true +# +# 2. You want this node to only serve as a master: to not store any data and +# to have free resources. This will be the "coordinator" of your cluster. +# +#node.master: true +#node.data: false +# +# 3. You want this node to be neither master nor data node, but +# to act as a "search load balancer" (fetching data from nodes, +# aggregating results, etc.) +# +#node.master: false +#node.data: false + +# Use the Cluster Health API [http://localhost:9200/_cluster/health], the +# Node Info API [http://localhost:9200/_nodes] or GUI tools +# such as <http://www.elasticsearch.org/overview/marvel/>, +# <http://github.com/karmi/elasticsearch-paramedic>, +# <http://github.com/lukas-vlcek/bigdesk> and +# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state. + +# A node can have generic attributes associated with it, which can later be used +# for customized shard allocation filtering, or allocation awareness. An attribute +# is a simple key value pair, similar to node.key: value, here is an example: +# +#node.rack: rack314 +{% if es_etc_node_rack is defined %}node.rack: {{ es_etc_node_rack }}{% endif %} + +# By default, multiple nodes are allowed to start from the same installation location +# to disable it, set the following: +#node.max_local_storage_nodes: 1 +{% if es_etc_node_max_local_storage_nodes is defined %}node.max_local_storage_nodes: {{ es_etc_node_max_local_storage_nodes }}{% endif %} + + +#################################### Index #################################### + +# You can set a number of options (such as shard/replica options, mapping +# or analyzer definitions, translog settings, ...) for indices globally, +# in this file. +# +# Note, that it makes more sense to configure index settings specifically for +# a certain index, either when creating it or by using the index templates API. +# +# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and +# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html> +# for more information. + +# Set the number of shards (splits) of an index (5 by default): +# +#index.number_of_shards: 5 +{% if es_etc_index_number_of_shards is defined %}index.number_of_shards: {{ es_etc_index_number_of_shards }}{% endif %} + +# Set the number of replicas (additional copies) of an index (1 by default): +# +#index.number_of_replicas: 1 +{% if es_etc_index_number_of_replicas is defined %}index.number_of_replicas: {{ es_etc_index_number_of_replicas }}{% endif %} + +# Note, that for development on a local machine, with small indices, it usually +# makes sense to "disable" the distributed features: +# +#index.number_of_shards: 1 +#index.number_of_replicas: 0 + +# These settings directly affect the performance of index and search operations +# in your cluster. Assuming you have enough machines to hold shards and +# replicas, the rule of thumb is: +# +# 1. Having more *shards* enhances the _indexing_ performance and allows to +# _distribute_ a big index across machines. +# 2. Having more *replicas* enhances the _search_ performance and improves the +# cluster _availability_. +# +# The "number_of_shards" is a one-time setting for an index. +# +# The "number_of_replicas" can be increased or decreased anytime, +# by using the Index Update Settings API. +# +# Elasticsearch takes care about load balancing, relocating, gathering the +# results from nodes, etc. Experiment with different settings to fine-tune +# your setup. + +# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect +# the index status. + + +#################################### Paths #################################### + +# Path to directory containing configuration (this file and logging.yml): +# +#path.conf: /path/to/conf +{% if es_etc_path_conf is defined %}path.conf: {{ es_etc_path_conf }}{% endif %} + +# Path to directory where to store index data allocated for this node. +# +#path.data: /path/to/data +# +# Can optionally include more than one location, causing data to be striped across +# the locations (a la RAID 0) on a file level, favouring locations with most free +# space on creation. For example: +# +#path.data: /path/to/data1,/path/to/data2 +{% if es_etc_path_data is defined %}path.data: {{ es_etc_path_data }}{% endif %} + +# Path to temporary files: +# +#path.work: /path/to/work +{% if es_etc_path_work is defined %}path.work: {{ es_etc_path_work }}{% endif %} + +# Path to log files: +# +#path.logs: /path/to/logs +{% if es_etc_path_logs is defined %}path.logs: {{ es_etc_path_logs }}{% endif %} + +# Path to where plugins are installed: +# +#path.plugins: /path/to/plugins +{% if es_etc_path_plugins is defined %}path.plugins: {{ es_etc_path_plugins }}{% endif %} + + +#################################### Plugin ################################### + +# If a plugin listed here is not installed for current node, the node will not start. +# +#plugin.mandatory: mapper-attachments,lang-groovy +{% if es_etc_plugin_mandatory is defined %}plugin.mandatory: {{ es_etc_plugin_mandatory }}{% endif %} + + +################################### Memory #################################### + +# Elasticsearch performs poorly when JVM starts swapping: you should ensure that +# it _never_ swaps. +# +# Set this property to true to lock the memory: +# +#bootstrap.mlockall: true +{% if es_etc_bootstrap_mlockall is defined %}bootstrap.mlockall: {{ es_etc_bootstrap_mlockall }}{% endif %} + +# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set +# to the same value, and that the machine has enough memory to allocate +# for Elasticsearch, leaving enough memory for the operating system itself. +# +# You should also make sure that the Elasticsearch process is allowed to lock +# the memory, eg. by using `ulimit -l unlimited`. + + +############################## Network And HTTP ############################### + +# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens +# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node +# communication. (the range means that if the port is busy, it will automatically +# try the next port). + +# Set the bind address specifically (IPv4 or IPv6): +# +#network.bind_host: 192.168.0.1 +{% if es_etc_network_bind_host is defined %}network.bind_host: {{ es_etc_network_bind_host }}{% endif %} + +# Set the address other nodes will use to communicate with this node. If not +# set, it is automatically derived. It must point to an actual IP address. +# +#network.publish_host: 192.168.0.1 +{% if es_etc_network_publish_host is defined %}network.publish_host: {{ es_etc_network_publish_host }}{% endif %} + +# Set both 'bind_host' and 'publish_host': +# +#network.host: 192.168.0.1 +{% if es_etc_network_host is defined %}network.host: {{ es_etc_network_host }}{% endif %} + +# Set a custom port for the node to node communication (9300 by default): +# +#transport.tcp.port: 9300 +{% if es_etc_transport_tcp_port is defined %}transport.tcp.port: {{ es_etc_transport_tcp_port }}{% endif %} + +# Enable compression for all communication between nodes (disabled by default): +# +#transport.tcp.compress: true +{% if es_etc_transport_tcp_compress is defined %}transport.tcp.compress: {{ es_etc_transport_tcp_compress }}{% endif %} + +# Set a custom port to listen for HTTP traffic: +# +#http.port: 9200 +{% if es_etc_http_port is defined %}http.port: {{ es_etc_http_port }}{% endif %} + +# Set a custom allowed content length: +# +#http.max_content_length: 100mb +{% if es_etc_http_max_content_length is defined %}http.max_content_length: {{ es_etc_http_max_content_length }}{% endif %} + +# Disable HTTP completely: +# +#http.enabled: false +{% if es_etc_http_enabled is defined %}http.enabled: {{ es_etc_http_enabled }}{% endif %} + + +################################### Gateway ################################### + +# The gateway allows for persisting the cluster state between full cluster +# restarts. Every change to the state (such as adding an index) will be stored +# in the gateway, and when the cluster starts up for the first time, +# it will read its state from the gateway. + +# There are several types of gateway implementations. For more information, see +# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>. + +# The default gateway type is the "local" gateway (recommended): +# +#gateway.type: local +{% if es_etc_gateway_type is defined %}gateway.type: {{ es_etc_gateway_type }}{% endif %} + +# Settings below control how and when to start the initial recovery process on +# a full cluster restart (to reuse as much local data as possible when using shared +# gateway). + +# Allow recovery process after N nodes in a cluster are up: +# +#gateway.recover_after_nodes: 1 +{% if es_etc_gateway_recover_after_nodes is defined %}gateway.recover_after_nodes: {{ es_etc_gateway_recover_after_nodes }}{% endif %} + +# Set the timeout to initiate the recovery process, once the N nodes +# from previous setting are up (accepts time value): +# +#gateway.recover_after_time: 5m +{% if es_etc_gateway_recover_after_time is defined %}gateway.recover_after_time: {{ es_etc_gateway_recover_after_time }}{% endif %} + +# Set how many nodes are expected in this cluster. Once these N nodes +# are up (and recover_after_nodes is met), begin recovery process immediately +# (without waiting for recover_after_time to expire): +# +#gateway.expected_nodes: 2 +{% if es_etc_gateway_expected_nodes is defined %}gateway.expected_nodes: {{ es_etc_gateway_expected_nodes }}{% endif %} + + +############################# Recovery Throttling ############################# + +# These settings allow to control the process of shards allocation between +# nodes during initial recovery, replica allocation, rebalancing, +# or when adding and removing nodes. + +# Set the number of concurrent recoveries happening on a node: +# +# 1. During the initial recovery +# +#cluster.routing.allocation.node_initial_primaries_recoveries: 4 +{% if es_etc_cluster_routing_allocation_node_initial_primaries_recoveries is defined %}cluster.routing.allocation.node_initial_primaries_recoveries: {{ es_etc_cluster_routing_allocation_node_initial_primaries_recoveries }}{% endif %} + +# +# 2. During adding/removing nodes, rebalancing, etc +# +#cluster.routing.allocation.node_concurrent_recoveries: 2 +{% if es_etc_cluster_routing_allocation_node_concurrent_recoveries is defined %}cluster.routing.allocation.node_concurrent_recoveries: {{ es_etc_cluster_routing_allocation_node_concurrent_recoveries }}{% endif %} + +# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): +# +#indices.recovery.max_bytes_per_sec: 20mb +{% if es_etc_indices_recovery_max_bytes_per_sec is defined %}indices.recovery.max_bytes_per_sec: {{ es_etc_indices_recovery_max_bytes_per_sec }}{% endif %} + +# Set to limit the number of open concurrent streams when +# recovering a shard from a peer: +# +#indices.recovery.concurrent_streams: 5 +{% if es_etc_indices_recovery_concurrent_streams is defined %}indices.recovery.concurrent_streams: {{ es_etc_indices_recovery_concurrent_streams }}{% endif %} + + +################################## Discovery ################################## + +# Discovery infrastructure ensures nodes can be found within a cluster +# and master node is elected. Multicast discovery is the default. + +# Set to ensure a node sees N other master eligible nodes to be considered +# operational within the cluster. This should be set to a quorum/majority of +# the master-eligible nodes in the cluster. +# +#discovery.zen.minimum_master_nodes: 1 +{% if es_etc_discovery_zen_minimum_master_nodes is defined %}discovery.zen.minimum_master_nodes: {{ es_etc_discovery_zen_minimum_master_nodes }}{% endif %} + +# Set the time to wait for ping responses from other nodes when discovering. +# Set this option to a higher value on a slow or congested network +# to minimize discovery failures: +# +#discovery.zen.ping.timeout: 3s +{% if es_etc_discovery_zen_ping_timeout is defined %}discovery.zen.ping.timeout: {{ es_etc_discovery_zen_ping_timeout }}{% endif %} + +# For more information, see +# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html> + +# Unicast discovery allows to explicitly control which nodes will be used +# to discover the cluster. It can be used when multicast is not present, +# or to restrict the cluster communication-wise. +# +# 1. Disable multicast discovery (enabled by default): +# +#discovery.zen.ping.multicast.enabled: false +{% if es_etc_discovery_zen_ping_multicast_enabled is defined %}discovery.zen.ping.multicast.enabled: {{ es_etc_discovery_zen_ping_multicast_enabled }}{% endif %} + +# +# 2. Configure an initial list of master nodes in the cluster +# to perform discovery when new nodes (master or data) are started: +# +#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] +{% if es_etc_discovery_zen_ping_unicast_hosts is defined %}discovery.zen.ping.unicast.hosts: {{ es_etc_discovery_zen_ping_unicast_hosts }}{% endif %} + +# EC2 discovery allows to use AWS EC2 API in order to perform discovery. +# +# You have to install the cloud-aws plugin for enabling the EC2 discovery. +# +# For more information, see +# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html> +# +# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/> +# for a step-by-step tutorial. + +# GCE discovery allows to use Google Compute Engine API in order to perform discovery. +# +# You have to install the cloud-gce plugin for enabling the GCE discovery. +# +# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>. + +# Azure discovery allows to use Azure API in order to perform discovery. +# +# You have to install the cloud-azure plugin for enabling the Azure discovery. +# +# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>. + +################################## Slow Log ################################## + +# Shard level query and fetch threshold logging. + +#index.search.slowlog.threshold.query.warn: 10s +#index.search.slowlog.threshold.query.info: 5s +#index.search.slowlog.threshold.query.debug: 2s +#index.search.slowlog.threshold.query.trace: 500ms +{% if es_etc_index_search_slowlog_threshold_query_warn is defined %}index.search.slowlog.threshold.query.warn: {{ es_etc_index_search_slowlog_threshold_query_warn }}{% endif %} + +{% if es_etc_index_search_slowlog_threshold_query_info is defined %}index.search.slowlog.threshold.query.info: {{ es_etc_index_search_slowlog_threshold_query_info }}{% endif %} + +{% if es_etc_index_search_slowlog_threshold_query_debug is defined %}index.search.slowlog.threshold.query.debug: {{ es_etc_index_search_slowlog_threshold_query_debug }}{% endif %} + +{% if es_etc_index_search_slowlog_threshold_query_trace is defined %}index.search.slowlog.threshold.query.trace: {{ es_etc_index_search_slowlog_threshold_query_trace }}{% endif %} + +#index.search.slowlog.threshold.fetch.warn: 1s +#index.search.slowlog.threshold.fetch.info: 800ms +#index.search.slowlog.threshold.fetch.debug: 500ms +#index.search.slowlog.threshold.fetch.trace: 200ms +{% if es_etc_index_search_slowlog_threshold_fetch_warn is defined %}index.search.slowlog.threshold.fetch.warn: {{ es_etc_index_search_slowlog_threshold_fetch_warn }}{% endif %} + +{% if es_etc_index_search_slowlog_threshold_fetch_info is defined %}index.search.slowlog.threshold.fetch.info: {{ es_etc_index_search_slowlog_threshold_fetch_info }}{% endif %} + +{% if es_etc_index_search_slowlog_threshold_fetch_debug is defined %}index.search.slowlog.threshold.fetch.debug: {{ es_etc_index_search_slowlog_threshold_fetch_debug }}{% endif %} + +{% if es_etc_index_search_slowlog_threshold_fetch_trace is defined %}index.search.slowlog.threshold.fetch.trace: {{ es_etc_index_search_slowlog_threshold_fetch_trace }}{% endif %} + +#index.indexing.slowlog.threshold.index.warn: 10s +#index.indexing.slowlog.threshold.index.info: 5s +#index.indexing.slowlog.threshold.index.debug: 2s +#index.indexing.slowlog.threshold.index.trace: 500ms +{% if es_etc_index_indexing_slowlog_threshold_index_warn is defined %}index.indexing.slowlog.threshold.index.warn: {{ es_etc_index_indexing_slowlog_threshold_index_warn }}{% endif %} + +{% if es_etc_index_indexing_slowlog_threshold_index_info is defined %}index.indexing.slowlog.threshold.index.info: {{ es_etc_index_indexing_slowlog_threshold_index_info }}{% endif %} + +{% if es_etc_index_indexing_slowlog_threshold_index_debug is defined %}index.indexing.slowlog.threshold.index.debug: {{ es_etc_index_indexing_slowlog_threshold_index_debug }}{% endif %} + +{% if es_etc_index_indexing_slowlog_threshold_index_trace is defined %}index.indexing.slowlog.threshold.index.trace: {{ es_etc_index_indexing_slowlog_threshold_index_trace }}{% endif %} + +################################## GC Logging ################################ + +#monitor.jvm.gc.young.warn: 1000ms +#monitor.jvm.gc.young.info: 700ms +#monitor.jvm.gc.young.debug: 400ms +{% if es_etc_monitor_jvm_gc_young_warn is defined %}monitor.jvm.gc.young.warn: {{ es_etc_monitor_jvm_gc_young_warn }}{% endif %} + +{% if es_etc_monitor_jvm_gc_young_info is defined %}monitor.jvm.gc.young.info: {{ es_etc_monitor_jvm_gc_young_info }}{% endif %} + +{% if es_etc_monitor_jvm_gc_young_debug is defined %}monitor.jvm.gc.young.debug: {{ es_etc_monitor_jvm_gc_young_debug }}{% endif %} + +#monitor.jvm.gc.old.warn: 10s +#monitor.jvm.gc.old.info: 5s +#monitor.jvm.gc.old.debug: 2s +{% if es_etc_monitor_jvm_gc_old_warn is defined %}monitor.jvm.gc.old.warn: {{ es_etc_monitor_jvm_gc_old_warn }}{% endif %} + +{% if es_etc_monitor_jvm_gc_old_info is defined %}monitor.jvm.gc.old.info: {{ es_etc_monitor_jvm_gc_old_info }}{% endif %} + +{% if es_etc_monitor_jvm_gc_old_debug is defined %}monitor.jvm.gc.old.debug: {{ es_etc_monitor_jvm_gc_old_debug }}{% endif %} + +################################## Security ################################ + +# Uncomment if you want to enable JSONP as a valid return transport on the +# http server. With this enabled, it may pose a security risk, so disabling +# it unless you need it is recommended (it is disabled by default). +# +#http.jsonp.enable: true +{% if es_etc_http_jsonp_enable is defined %}http.jsonp.enable: {{ es_etc_http_jsonp_enable }}{% endif %} + +############################## Additional Variables ############################ +script.inline: true +script.indexed: true + +{% if es_etc is defined %} +{% for key, value in es_etc.iteritems() %} +{{ key }}: {{ value }} +{% endfor %} +{% endif %} + +threadpool.search.queue_size: 2000 + +{% if es_group is defined %} +cloud: + aws: + region: {{awsregion}} + {% if awskey is defined %} + access_key: {{awskey}} + {% endif %} + {% if awssecret is defined %} + secret_key: {{awssecret}} + {% endif %} + protocol: {{awsprotocol}} + +discovery: + type: ec2 + ec2: + groups: {{es_group}} + host_type: private_ip + any_group: false + availability_zones: {{awsavailabilityzones}} +{% endif %} + +cloud.azure.storage.my_account: { + account: {{ backup_storage_name }}, + key: {{ backup_storage_key }} +} diff --git a/ansible/roles/elasticsearch_old/templates/logging.yml b/ansible/roles/elasticsearch_old/templates/logging.yml new file mode 100644 index 0000000000000000000000000000000000000000..660287c6bb77b6999ee1429f1287c716a072d913 --- /dev/null +++ b/ansible/roles/elasticsearch_old/templates/logging.yml @@ -0,0 +1,67 @@ +# you can override this using by setting a system property, for example -Des.logger.level=DEBUG +es.logger.level: {{ es_logging_level }} +rootLogger: ${es.logger.level}, console, file +logger: + # log action execution errors for easier debugging + action: INFO + # reduce the logging for aws, too much is logged under the default INFO + com.amazonaws: WARN + + # gateway + #gateway: DEBUG + #index.gateway: DEBUG + + # peer shard recovery + #indices.recovery: DEBUG + + # discovery + discovery: INFO + + index.search.slowlog: INFO, index_search_slow_log_file + index.indexing.slowlog: INFO, index_indexing_slow_log_file + +additivity: + index.search.slowlog: false + index.indexing.slowlog: false + +appender: + console: + type: console + layout: + type: consolePattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. + # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html + #file: + #type: extrasRollingFile + #file: ${path.logs}/${cluster.name}.log + #rollingPolicy: timeBased + #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz + #layout: + #type: pattern + #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + index_search_slow_log_file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}_index_search_slowlog.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + + index_indexing_slow_log_file: + type: dailyRollingFile + file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log + datePattern: "'.'yyyy-MM-dd" + layout: + type: pattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/ansible/roles/elasticsearch_old/vars/main.yml b/ansible/roles/elasticsearch_old/vars/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..ea80dc94cb09fdaf5f7813a84776f272dd5da672 --- /dev/null +++ b/ansible/roles/elasticsearch_old/vars/main.yml @@ -0,0 +1,7 @@ +--- +es_home: /usr/share/elasticsearch +es_etc_node_name: "{{ node_name }}" +es_etc_path_logs: /var/log/elasticsearch +elasticsearch_config: /etc/elasticsearch + + diff --git a/ansible/roles/es-azure-restore/tasks/main.yml b/ansible/roles/es-azure-restore/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..1b1f6200c0de9ac089f254aeca2e1d5153cd4975 --- /dev/null +++ b/ansible/roles/es-azure-restore/tasks/main.yml @@ -0,0 +1,18 @@ +--- + +- name: Restore ES from Azure backup + uri: + url: "http://{{ es_restore_host }}:9200/_snapshot/azurebackup/{{snapshot_number}}/_restore" + method: POST + +- name: "Wait for restore to be completed" + uri: + url: "http://{{ es_restore_host }}:9200/_snapshot/azurebackup/{{snapshot_number}}/_status" + method: GET + return_content: yes + status_code: 200 + body_format: json + register: result + until: result.json.snapshots[0].state == 'SUCCESS' + retries: 120 + delay: 10 diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..14b39c8539f6e05dbdc6dfe1db713aabdf0ff1ce --- /dev/null +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -0,0 +1,7 @@ +snapshot_create_request_body: { + type: azure, + settings: { + container: "elasticsearch-snapshots", + base_path: "{{ snapshot_base_path }}" + } +} diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..322b0cdeb85d9abd7688bf217421285665c59426 --- /dev/null +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -0,0 +1,42 @@ +--- + +- name: Create azure snapshot + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/azurebackup" + method: PUT + body: "{{ snapshot_create_request_body | to_json }}" + headers: + Content-Type: "application/json" + +- set_fact: snapshot_number="snapshot_{{ansible_date_time.epoch}}" + +- name: Take new snapshot + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/azurebackup/{{snapshot_number}}" + method: PUT + body: > + {"indices":"*","include_global_state":false} + headers: + Content-Type: "application/json" + +- name: Print all snapshots + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/azurebackup/_all" + method: GET + +- name: Print status of current snapshot + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/azurebackup/{{snapshot_number}}" + method: GET + +- name: "Wait for backup to be completed" + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/azurebackup/{{snapshot_number}}" + method: GET + return_content: yes + status_code: 200 + body_format: json + register: result + until: result.json.snapshots[0].state == 'SUCCESS' + retries: 120 + delay: 10 diff --git a/ansible/roles/git/tasks/main.yml b/ansible/roles/git/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..677936a6e6a3fc78c4a4a686944600246e3bdbf9 --- /dev/null +++ b/ansible/roles/git/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Update + apt: update_cache=yes cache_valid_time=36000 + become: yes + +- name: Install required utils + apt: name={{ item }} state=present + become: yes + with_items: + - git + - git-core diff --git a/ansible/roles/java/tasks/main.yml b/ansible/roles/java/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..715fccf1d69e8ea0c80f515445787c1cbbc3fe81 --- /dev/null +++ b/ansible/roles/java/tasks/main.yml @@ -0,0 +1,17 @@ +- name: Add Java repository to sources + action: apt_repository repo='ppa:webupd8team/java' + +- name: Autoaccept license for Java + action: shell echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections + +- name: Update APT package cache + action: apt update_cache=yes + +- name: Install Java 8 + action: apt pkg=oracle-java8-installer state=latest install_recommends=yes + +- name: Set Java 8 Env + action: apt pkg=oracle-java8-set-default state=latest install_recommends=yes + +- name: setup envkeep + lineinfile: dest=/etc/sudoers line='Defaults env_keep += "JAVA_HOME"' state=present insertafter=EOF create=yes diff --git a/ansible/roles/java8/tasks/main.yml b/ansible/roles/java8/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..46c6e7b2c150919866dbb291450d0015c6397f6f --- /dev/null +++ b/ansible/roles/java8/tasks/main.yml @@ -0,0 +1,31 @@ +- name: Add Java repository to sources + action: apt_repository repo='ppa:webupd8team/java' + become: yes + tags: + - provision + +- name: Autoaccept license for Java + action: shell echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections + become: yes + tags: + - provision +- name: Update APT package cache + action: apt update_cache=yes cache_valid_time=3600 + become: yes + tags: + - provision +- name: Install Java 8 + action: apt pkg=oracle-java8-installer state=latest + become: yes + tags: + - provision +- name: Set Java 8 Env + action: apt pkg=oracle-java8-set-default state=latest install_recommends=yes + become: yes + tags: + - provision +- name: setup envkeep + become: yes + lineinfile: dest=/etc/sudoers line='Defaults env_keep += "JAVA_HOME"' state=present insertafter=EOF create=yes + tags: + - provision diff --git a/ansible/roles/jenkins-backup-upload/README.md b/ansible/roles/jenkins-backup-upload/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e1217a9d9ecb2e7d6385923ec1e8b42d96b58279 --- /dev/null +++ b/ansible/roles/jenkins-backup-upload/README.md @@ -0,0 +1,12 @@ +### Jenkins backup upload + +This role uploads backup taken by [ThinBackup plugin](https://plugins.jenkins.io/thinBackup) + + +### PreRequisites + +* Jenkins should have [ThinBackup plugin](https://plugins.jenkins.io/thinBackup) installed +* Configure [ThinBackup plugin settings](https://ci.server/jenkins/thinBackup/backupsettings) + * Set the backup dir as `/jenkins-backup` + * Ensure backup is minimal by excluding job artifacts etc + * Ensure there is a periodic backup which runs before upload. Example if upload runs `@midnight`, schedule backup at 11PM using cron `0 23 * * *` \ No newline at end of file diff --git a/ansible/roles/jenkins-backup-upload/defaults/main.yml b/ansible/roles/jenkins-backup-upload/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..a1326d770fa5b90fadd3a7c90dc439c506a48922 --- /dev/null +++ b/ansible/roles/jenkins-backup-upload/defaults/main.yml @@ -0,0 +1,5 @@ +jenkins_user: jenkins +jenkins_group: jenkins +jenkins_backup_base_dir: /jenkins-backup +jenkins_backup_azure_container_name: jenkins-backup +jenkins_backup_max_delay_in_days: 1 \ No newline at end of file diff --git a/ansible/roles/jenkins-backup-upload/meta/main.yml b/ansible/roles/jenkins-backup-upload/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..95a74828f2216aa9c85e0023af089ddaa6e3bf7a --- /dev/null +++ b/ansible/roles/jenkins-backup-upload/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: [] + # Can't install as dependencies due to no sudo access during upload. Moved to jenkins playbook + # - azure-cli diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..bfeb341a7ae2ce48fc396b68692362dd08c5cb8d --- /dev/null +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -0,0 +1,24 @@ +- name: ensure backup base directory exists + file: path={{ jenkins_backup_base_dir }} state=directory owner={{ jenkins_user }} group={{ jenkins_group }} + +- name: Run script to find latest backup and upload + shell: | + LATEST_BACKUP_DIR=$(ls -dt */ | head -n 1 | cut -d'/' -f1) + + if (( $(date -r "$LATEST_BACKUP_DIR" +%s) <= $(date -d 'now - {{ jenkins_backup_max_delay_in_days }} days' +%s) )); then + echo "ERROR: Backup $LATEST_BACKUP_DIR is older than {{ jenkins_backup_max_delay_in_days }} days" + exit 1 + fi + + LATEST_BACKUP_DIR_ZIP_FILE="$LATEST_BACKUP_DIR.zip" + zip -r $LATEST_BACKUP_DIR_ZIP_FILE $LATEST_BACKUP_DIR + + az storage container create --name {{ jenkins_backup_azure_container_name }} + az storage blob upload --name $LATEST_BACKUP_DIR_ZIP_FILE --file $LATEST_BACKUP_DIR_ZIP_FILE --container-name {{ jenkins_backup_azure_container_name }} + args: + chdir: "{{ jenkins_backup_base_dir }}" + environment: + AZURE_STORAGE_ACCOUNT: "{{ jenkins_backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ jenkins_backup_azure_storage_access_key }}" + async: 3600 + poll: 10 diff --git a/ansible/roles/jenkins/README.md b/ansible/roles/jenkins/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f919cbb670951f2ac9cad5222f73fca827b2da87 --- /dev/null +++ b/ansible/roles/jenkins/README.md @@ -0,0 +1,104 @@ +# Ansible Role: Jenkins CI + +[](https://travis-ci.org/geerlingguy/ansible-role-jenkins) + +Installs Jenkins CI on RHEL/CentOS and Debian/Ubuntu servers. + +## Requirements + +Requires `curl` to be installed on the server. Also, newer versions of Jenkins require Java 8+ (see the test playbooks inside the `tests/` directory for an example of how to use newer versions of Java for your OS). + +## Role Variables + +Available variables are listed below, along with default values (see `defaults/main.yml`): + + jenkins_hostname: localhost + +The system hostname; usually `localhost` works fine. This will be used during setup to communicate with the running Jenkins instance via HTTP requests. + + jenkins_home: /var/lib/jenkins + +The Jenkins home directory which, amongst others, is being used for storing artifacts, workspaces and plugins. This variable allows you to override the default `/var/lib/jenkins` location. + + jenkins_http_port: 8080 + +The HTTP port for Jenkins' web interface. + + jenkins_admin_username: admin + jenkins_admin_password: admin + +Default admin account credentials which will be created the first time Jenkins is installed. + + jenkins_admin_password_file: "" + +Default admin password file which will be created the first time Jenkins is installed as /var/lib/jenkins/secrets/initialAdminPassword + + jenkins_jar_location: /opt/jenkins-cli.jar + +The location at which the `jenkins-cli.jar` jarfile will be kept. This is used for communicating with Jenkins via the CLI. + + jenkins_plugins: [] + +Jenkins plugins to be installed automatically during provisioning. (_Note_: This feature is currently undergoing some changes due to the `jenkins-cli` authentication changes in Jenkins 2.0, and may not work as expected.) + + jenkins_version: "1.644" + jenkins_pkg_url: "http://www.example.com" + +(Optional) Then Jenkins version can be pinned to any version available on `http://pkg.jenkins-ci.org/debian/` (Debian/Ubuntu) or `http://pkg.jenkins-ci.org/redhat/` (RHEL/CentOS). If the Jenkins version you need is not available in the default package URLs, you can override the URL with your own; set `jenkins_pkg_url` (_Note_: the role depends on the same naming convention that `http://pkg.jenkins-ci.org/` uses). + + jenkins_url_prefix: "" + +Used for setting a URL prefix for your Jenkins installation. The option is added as `--prefix={{ jenkins_url_prefix }}` to the Jenkins initialization `java` invocation, so you can access the installation at a path like `http://www.example.com{{ jenkins_url_prefix }}`. Make sure you start the prefix with a `/` (e.g. `/jenkins`). + + jenkins_connection_delay: 5 + jenkins_connection_retries: 60 + +Amount of time and number of times to wait when connecting to Jenkins after initial startup, to verify that Jenkins is running. Total time to wait = `delay` * `retries`, so by default this role will wait up to 300 seconds before timing out. + + # For RedHat/CentOS (role default): + jenkins_repo_url: http://pkg.jenkins-ci.org/redhat/jenkins.repo + jenkins_repo_key_url: http://pkg.jenkins-ci.org/redhat/jenkins-ci.org.key + # For Debian (role default): + jenkins_repo_url: deb http://pkg.jenkins-ci.org/debian binary/ + jenkins_repo_key_url: http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key + +This role will install the latest version of Jenkins by default (using the official repositories as listed above). You can override these variables (use the correct set for your platform) to install the current LTS version instead: + + # For RedHat/CentOS LTS: + jenkins_repo_url: http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo + jenkins_repo_key_url: http://pkg.jenkins-ci.org/redhat-stable/jenkins-ci.org.key + # For Debian/Ubuntu LTS: + jenkins_repo_url: deb http://pkg.jenkins-ci.org/debian-stable binary/ + jenkins_repo_key_url: http://pkg.jenkins-ci.org/debian-stable/jenkins-ci.org.key + + jenkins_java_options: "-Djenkins.install.runSetupWizard=false" + +Extra Java options for the Jenkins launch command configured in the init file can be set with the var `jenkins_java_options`. By default the option to disable the Jenkins 2.0 setup wizard is added. + + jenkins_init_changes: + - option: "JENKINS_ARGS" + value: "--prefix={{ jenkins_url_prefix }}" + - option: "JENKINS_JAVA_OPTIONS" + value: "{{ jenkins_java_options }}" + +Changes made to the Jenkins init script; the default set of changes set the configured URL prefix and add in configured Java options for Jenkins' startup. You can add other option/value pairs if you need to set other options for the Jenkins init file. + +## Dependencies + + - geerlingguy.java + +## Example Playbook + + - hosts: ci-server + vars: + jenkins_hostname: jenkins.example.com + roles: + - geerlingguy.jenkins + +## License + +MIT (Expat) / BSD + +## Author Information + +This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/). diff --git a/ansible/roles/jenkins/defaults/main.yml b/ansible/roles/jenkins/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..55d708ba8af4054aa007f189d1839d2ac49da398 --- /dev/null +++ b/ansible/roles/jenkins/defaults/main.yml @@ -0,0 +1,25 @@ +--- +# Optional method of pinning a specific version of Jenkins and/or overriding the +# default Jenkins packaging URL. +# jenkins_version: "1.644" +# jenkins_pkg_url: "https://www.example.com" + +jenkins_connection_delay: 5 +jenkins_connection_retries: 60 +jenkins_home: /var/lib/jenkins +jenkins_hostname: localhost +jenkins_http_port: 8080 +jenkins_jar_location: /opt/jenkins-cli.jar +jenkins_plugins: [] +jenkins_url_prefix: "" +jenkins_java_options: "-Djenkins.install.runSetupWizard=false" + +jenkins_admin_username: admin +jenkins_admin_password: admin +jenkins_admin_password_file: "" + +jenkins_init_changes: + - option: "JENKINS_ARGS" + value: "--prefix={{ jenkins_url_prefix }}" + - option: "{{ jenkins_java_options_env_var }}" + value: "{{ jenkins_java_options }}" \ No newline at end of file diff --git a/ansible/roles/jenkins/handlers/main.yml b/ansible/roles/jenkins/handlers/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..4704d240e761e62d24bec518934bb4a3fc5e6609 --- /dev/null +++ b/ansible/roles/jenkins/handlers/main.yml @@ -0,0 +1,9 @@ +--- +- name: restart jenkins + service: name=jenkins state=restarted + +- name: configure default users + template: + src: basic-security.groovy + dest: "{{ jenkins_home }}/init.groovy.d/basic-security.groovy" + register: jenkins_users_config diff --git a/ansible/roles/jenkins/tasks/main.yml b/ansible/roles/jenkins/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..a42fd1925e0002fbcf6aeb7ffc4596ab88723a73 --- /dev/null +++ b/ansible/roles/jenkins/tasks/main.yml @@ -0,0 +1,58 @@ +--- +# Variable setup. +- name: Include OS-Specific variables + include_vars: "{{ ansible_os_family }}.yml" + +- name: Define jenkins_repo_url + set_fact: + jenkins_repo_url: "{{ __jenkins_repo_url }}" + when: jenkins_repo_url is not defined + +- name: Define jenkins_repo_key_url + set_fact: + jenkins_repo_key_url: "{{ __jenkins_repo_key_url }}" + when: jenkins_repo_key_url is not defined + +- name: Define jenkins_pkg_url + set_fact: + jenkins_pkg_url: "{{ __jenkins_pkg_url }}" + when: jenkins_pkg_url is not defined + +# Setup/install tasks. +- include: setup-RedHat.yml + when: ansible_os_family == 'RedHat' + +- include: setup-Debian.yml + when: ansible_os_family == 'Debian' + +# Configure Jenkins init settings. +- include: settings.yml + +# Make sure Jenkins starts, then configure Jenkins. +- name: Ensure Jenkins is started and runs on startup. + service: name=jenkins state=started enabled=yes + +- name: Wait for Jenkins to start up before proceeding. + shell: "curl -D - --silent --max-time 5 http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}/cli/" + register: result + until: (result.stdout.find("403 Forbidden") != -1) or (result.stdout.find("200 OK") != -1) and (result.stdout.find("Please wait while") == -1) + retries: "{{ jenkins_connection_retries }}" + delay: "{{ jenkins_connection_delay }}" + changed_when: false + +- name: Get the jenkins-cli jarfile from the Jenkins server. + get_url: + url: "http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}/jnlpJars/jenkins-cli.jar" + dest: "{{ jenkins_jar_location }}" + register: jarfile_get + until: "'OK' in jarfile_get.msg or 'file already exists' in jarfile_get.msg" + retries: 5 + delay: 10 + +- name: Remove Jenkins security init scripts after first startup. + file: + path: "{{ jenkins_home }}/init.groovy.d/basic-security.groovy" + state: absent + +# Update Jenkins and install configured plugins. +- include: plugins.yml diff --git a/ansible/roles/jenkins/tasks/plugins.yml b/ansible/roles/jenkins/tasks/plugins.yml new file mode 100644 index 0000000000000000000000000000000000000000..9c0788d10b70244d2d0d8661d72f3118edd20741 --- /dev/null +++ b/ansible/roles/jenkins/tasks/plugins.yml @@ -0,0 +1,51 @@ +--- +# Jenkins doesn't allow updates via CLI, though that is required before plugins +# can be installed via CLI. See: https://gist.github.com/rowan-m/1026918 +- name: Create Jenkins updates folder. + file: + path: "{{ jenkins_home }}/updates" + owner: jenkins + group: jenkins + mode: 0755 + state: directory + register: jenkins_plugins_folder_create + +- name: Update Jenkins plugin data. + shell: curl -L https://updates.jenkins-ci.org/update-center.json | sed '1d;$d' > "{{ jenkins_home }}/updates/default.json" + args: + creates: "{{ jenkins_home }}/updates/default.json" + +- name: Permissions for default.json updates info. + file: + path: "{{ jenkins_home }}/updates/default.json" + owner: jenkins + group: jenkins + mode: 0755 + when: jenkins_plugins_folder_create.changed + +- name: Check if we're using a password file for authentication + stat: + path: "{{ jenkins_admin_password_file }}" + register: adminpasswordfile + +- name: Install Jenkins plugins using password. + command: > + java -jar {{ jenkins_jar_location }} -s http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix | default('') }}/ + install-plugin {{ item }} + --username {{ jenkins_admin_username }} + --password {{ jenkins_admin_password }} + creates="{{ jenkins_home }}/plugins/{{ item }}.jpi" + with_items: "{{ jenkins_plugins }}" + when: jenkins_admin_password != "" + notify: restart jenkins + +- name: Install Jenkins plugins using password-file. + command: > + java -jar {{ jenkins_jar_location }} -s http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix | default('') }}/ + install-plugin {{ item }} + --username {{ jenkins_admin_username }} + --password-file {{ jenkins_admin_password_file }} + creates={{ jenkins_home }}/plugins/{{ item }}.jpi + with_items: "{{ jenkins_plugins }}" + when: adminpasswordfile.stat.exists == True + notify: restart jenkins diff --git a/ansible/roles/jenkins/tasks/settings.yml b/ansible/roles/jenkins/tasks/settings.yml new file mode 100755 index 0000000000000000000000000000000000000000..79b3694a9a90308c9afb3d39ee385148e353159d --- /dev/null +++ b/ansible/roles/jenkins/tasks/settings.yml @@ -0,0 +1,47 @@ +--- +- name: Modify variables in init file + lineinfile: + dest: "{{ jenkins_init_file }}" + insertafter: '^{{ item.option }}=' + regexp: '^{{ item.option}}=\"\${{ item.option }} ' + line: '{{ item.option }}="${{ item.option }} {{ item.value }}"' + state: present + with_items: + "{{ jenkins_init_changes }}" + register: jenkins_init_prefix + +- name: Set the Jenkins home directory + lineinfile: + dest: "{{ jenkins_init_file }}" + regexp: '^JENKINS_HOME=.*' + line: 'JENKINS_HOME={{ jenkins_home }}' + register: jenkins_home_config + +- name: Immediately restart Jenkins on init config changes. + service: name=jenkins state=restarted + when: jenkins_init_prefix.changed + +- name: Set HTTP port in Jenkins config. + lineinfile: + backrefs: yes + dest: "{{ jenkins_init_file }}" + regexp: '^{{ jenkins_http_port_param }}=' + line: '{{ jenkins_http_port_param }}={{ jenkins_http_port }}' + register: jenkins_http_config + +- name: Create custom init scripts directory. + file: + path: "{{ jenkins_home }}/init.groovy.d" + state: directory + owner: jenkins + group: jenkins + mode: 0775 + +- name: Trigger handlers immediately in case Jenkins was installed + meta: flush_handlers + +- name: Immediately restart Jenkins on http or user changes. + service: name=jenkins state=restarted + when: (jenkins_users_config is defined and jenkins_users_config.changed) or + (jenkins_http_config is defined and jenkins_http_config.changed) or + (jenkins_home_config is defined and jenkins_home_config.changed) diff --git a/ansible/roles/jenkins/tasks/setup-Debian.yml b/ansible/roles/jenkins/tasks/setup-Debian.yml new file mode 100644 index 0000000000000000000000000000000000000000..2095d08a5c4928f9ca512f667f19a19c6fbc219e --- /dev/null +++ b/ansible/roles/jenkins/tasks/setup-Debian.yml @@ -0,0 +1,50 @@ +--- +- name: Ensure dependencies are installed. + apt: + name: + - curl + - apt-transport-https + state: installed + +- name: Add Jenkins apt repository key. + apt_key: + url: "{{ jenkins_repo_key_url }}" + state: present + when: jenkins_version is undefined + +- name: Add Jenkins apt repository. + apt_repository: + repo: "{{ jenkins_repo_url }}" + state: present + update_cache: yes + when: jenkins_version is undefined + +- name: Download specific Jenkins version. + get_url: + url: "{{ jenkins_pkg_url }}/jenkins_{{ jenkins_version }}_all.deb" + dest: "/tmp/jenkins_{{ jenkins_version }}_all.deb" + when: jenkins_version is defined + +- name: Check if we downloaded a specific version of Jenkins. + stat: + path: "/tmp/jenkins_{{ jenkins_version }}_all.deb" + register: specific_version + +- name: Install our specific version of Jenkins. + apt: + deb: "/tmp/jenkins_{{ jenkins_version }}_all.deb" + state: installed + when: specific_version.stat.exists + notify: configure default users + +- name: Validate Jenkins is installed and register package name. + apt: + name: jenkins + state: present + notify: configure default users + +- name: Install Jenkins from repository. + apt: + name: jenkins + state: installed + when: jenkins_version is undefined diff --git a/ansible/roles/jenkins/tasks/setup-RedHat.yml b/ansible/roles/jenkins/tasks/setup-RedHat.yml new file mode 100644 index 0000000000000000000000000000000000000000..9bd739d3d773a9464e575c976e7f361da54d6c77 --- /dev/null +++ b/ansible/roles/jenkins/tasks/setup-RedHat.yml @@ -0,0 +1,49 @@ +--- +- name: Ensure dependencies are installed. + package: + name: + - curl + - libselinux-python + - initscripts + state: installed + +- name: Ensure Jenkins repo is installed. + get_url: + url: "{{ jenkins_repo_url }}" + dest: /etc/yum.repos.d/jenkins.repo + +- name: Add Jenkins repo GPG key. + rpm_key: + state: present + key: "{{ jenkins_repo_key_url }}" + +- name: Download specific Jenkins version. + get_url: + url: "{{ jenkins_pkg_url }}/jenkins-{{ jenkins_version }}-1.1.noarch.rpm" + dest: "/tmp/jenkins.rpm" + when: jenkins_version is defined + +- name: Check if we downloaded a specific version of Jenkins. + stat: + path: "/tmp/jenkins.rpm" + register: specific_version + +- name: Install our specific version of Jenkins. + package: + name: "/tmp/jenkins.rpm" + state: installed + when: specific_version.stat.exists + notify: configure default users + +- name: Validate Jenkins is installed and register package name. + package: + name: jenkins + state: present + when: not specific_version.stat.exists + notify: configure default users + +- name: Install Jenkins from repository. + package: + name: jenkins + state: installed + when: jenkins_version is undefined diff --git a/ansible/roles/jenkins/templates/basic-security.groovy b/ansible/roles/jenkins/templates/basic-security.groovy new file mode 100755 index 0000000000000000000000000000000000000000..847aa5a7c9ca42671ebc9fc8598ae2f1e5dacb13 --- /dev/null +++ b/ansible/roles/jenkins/templates/basic-security.groovy @@ -0,0 +1,19 @@ +#!groovy +import hudson.security.* +import jenkins.model.* + +def instance = Jenkins.getInstance() + +println "--> Checking if security has been set already" + +if (!instance.isUseSecurity()) { + println "--> creating local user 'admin'" + + def hudsonRealm = new HudsonPrivateSecurityRealm(false) + hudsonRealm.createAccount('{{ jenkins_admin_username }}', '{{ jenkins_admin_password }}') + instance.setSecurityRealm(hudsonRealm) + + def strategy = new FullControlOnceLoggedInAuthorizationStrategy() + instance.setAuthorizationStrategy(strategy) + instance.save() +} diff --git a/ansible/roles/jenkins/vars/Debian.yml b/ansible/roles/jenkins/vars/Debian.yml new file mode 100644 index 0000000000000000000000000000000000000000..697aeac2a1c8093d4f31290fb89192044edf8795 --- /dev/null +++ b/ansible/roles/jenkins/vars/Debian.yml @@ -0,0 +1,7 @@ +--- +__jenkins_repo_url: deb http://pkg.jenkins.io/debian binary/ +__jenkins_repo_key_url: http://pkg.jenkins.io/debian/jenkins.io.key +__jenkins_pkg_url: http://pkg.jenkins.io/debian/binary +jenkins_init_file: /etc/default/jenkins +jenkins_http_port_param: HTTP_PORT +jenkins_java_options_env_var: JAVA_ARGS diff --git a/ansible/roles/jenkins/vars/RedHat.yml b/ansible/roles/jenkins/vars/RedHat.yml new file mode 100644 index 0000000000000000000000000000000000000000..82d884551e012fb9918d7715e9d93d6f0af3c0ad --- /dev/null +++ b/ansible/roles/jenkins/vars/RedHat.yml @@ -0,0 +1,7 @@ +--- +__jenkins_repo_url: https://pkg.jenkins.io/redhat/jenkins.repo +__jenkins_repo_key_url: https://pkg.jenkins.io/redhat/jenkins.io.key +__jenkins_pkg_url: https://pkg.jenkins.io/redhat +jenkins_init_file: /etc/sysconfig/jenkins +jenkins_http_port_param: JENKINS_PORT +jenkins_java_options_env_var: JENKINS_JAVA_OPTIONS diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..df68310728cc7450de942c0987f7c8c75d2d49e5 --- /dev/null +++ b/ansible/roles/kong-api/defaults/main.yml @@ -0,0 +1,17 @@ +--- +kong_admin_api_url: "http://localhost:8001" + +kong_apis: [] +# Example: +# kong_apis: +# - name: "readContent" +# request_path: "/v3/public/content/read" +# upstream_url: "{{ learning_service_url }}/v3/public/content/read" +# strip_request_path: true +# plugins: +# - {name: 'jwt'} +# - {name: 'cors'} +# - {name: 'statsd', config.metrics: "{{ statsd_metrics }}" } +# - {name: 'acl', config.whitelist: 'contentUser'} +# - {name: 'rate-limiting', config.hour: "{{ medium_rate_limit_per_hour }}"} +# - {name: 'request-size-limiting', config.allowed_payload_size: "{{ medium_request_size_limit }}" } \ No newline at end of file diff --git a/ansible/roles/kong-api/tasks/main.yml b/ansible/roles/kong-api/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..2059390e2f8e5d5a16c116cf31a3cb19377ca72a --- /dev/null +++ b/ansible/roles/kong-api/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Save api details to json file + copy: dest=/tmp/kong_apis.json content="{{ kong_apis | to_nice_json}}" mode=0644 + +- name: Copy kong api scripts + copy: src=static-files/kong-api-scripts dest=/tmp mode=0755 + +- name: Run script to save apis + shell: "python /tmp/kong-api-scripts/kong_apis.py /tmp/kong_apis.json --kong-admin-api-url={{ kong_admin_api_url }}" diff --git a/ansible/roles/kong-consumer/defaults/main.yml b/ansible/roles/kong-consumer/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..fc37d27dfac42480dfe28692b745fe1181f405ef --- /dev/null +++ b/ansible/roles/kong-consumer/defaults/main.yml @@ -0,0 +1,15 @@ +--- +kong_admin_api_url: "http://localhost:8001" + +# List all consumer groups in group_vars +kong_all_consumer_groups: [] + +# List the consumers need to be present as state: present +# List the consumers need to be absent as state: absent +# This role will not update / delete consumers not listed here +kong_consumers: + - username: api-management-test-user + groups: "{{ kong_all_consumer_groups }}" + state: present + - username: add-any-consumer-to-be-deleted + state: absent diff --git a/ansible/roles/kong-consumer/tasks/main.yml b/ansible/roles/kong-consumer/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..2d96c19ef93d7a09e9ee3acd9b4b043dd2e956e4 --- /dev/null +++ b/ansible/roles/kong-consumer/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Save kong_consumers to json file + copy: dest=/tmp/kong_consumers.json content="{{ kong_consumers | to_nice_json}}" mode=0644 + +- name: Copy kong api scripts + copy: src=static-files/kong-api-scripts dest=/tmp mode=0755 + +- name: Run script to save consumers + shell: "python /tmp/kong-api-scripts/kong_consumers.py /tmp/kong_consumers.json --kong-admin-api-url={{ kong_admin_api_url }}" diff --git a/ansible/roles/mongo-backup/meta/main.yml b/ansible/roles/mongo-backup/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..23b18a800a4645387a83ac0873b6f893d62c081d --- /dev/null +++ b/ansible/roles/mongo-backup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - azure-cli \ No newline at end of file diff --git a/ansible/roles/mongo-backup/tasks/backup_config.yml b/ansible/roles/mongo-backup/tasks/backup_config.yml new file mode 100644 index 0000000000000000000000000000000000000000..f64392bb1cd718cd483490cb458c7129b61d1b2f --- /dev/null +++ b/ansible/roles/mongo-backup/tasks/backup_config.yml @@ -0,0 +1,55 @@ +- name: copy the backup_config template + template: src=backup_config.j2 dest=/home/deployer/backup_config.js mode=0777 + + +- name: create backup directory for config + become: yes + file: path=/home/deployer/mongo-backups state=directory + +- name: create backup directory for config + become: yes + file: path=/home/deployer/mongo-backups/dump state=directory + +- name: backup backup_config for config + become: yes + shell: mongodump -o /home/deployer/mongo-backups/dump + ignore_errors: yes + +- name: zip the dump + become: yes + shell: zip -r "mongo_backup_{{type}}_`date +%Y%m%d`.zip" dump/ + args: + chdir: /home/deployer/mongo-backups + +- set_fact: + mongo_backup_gzip_file_name: "mongo_backup_{{type}}_{{ lookup('pipe', 'date +%Y%m%d') }}.zip" + + +- set_fact: + mongo_backup_gzip_file_path: "{{ mongo.backup_dir }}/{{ mongo_backup_gzip_file_name }}" + +- name: Ensure azure blob storage container exists + command: az storage container create --name {{ mongo.backup_azure_container_name }} + ignore_errors: true + environment: + AZURE_STORAGE_ACCOUNT: "{{ mongo.backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ mongo.backup_azure_storage_access_key }}" + +- name: Upload to azure blob storage + command: az storage blob upload --name {{ mongo_backup_gzip_file_name }} --file {{ mongo_backup_gzip_file_path }} --container-name {{ mongo.backup_azure_container_name }} + environment: + AZURE_STORAGE_ACCOUNT: "{{ mongo.backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ mongo.backup_azure_storage_access_key }}" + async: 3600 + poll: 10 + +- name: clean up backup dir after upload + file: path="{{ mongo.backup_dir }}" state=absent + + +- name: run the template for backup config + become: yes + shell: mongo < /home/deployer/backup_config.js + async: 15 + poll: 0 + diff --git a/ansible/roles/mongo-backup/tasks/backup_replica.yml b/ansible/roles/mongo-backup/tasks/backup_replica.yml new file mode 100644 index 0000000000000000000000000000000000000000..f72898953e5c8343ca46559e67e382637e323d08 --- /dev/null +++ b/ansible/roles/mongo-backup/tasks/backup_replica.yml @@ -0,0 +1,55 @@ +- name: copy the template backup_replica + template: src=backup_replica.j2 dest=/home/deployer/backup_replica.js mode=0777 + +- name: create backup directory + become: yes + file: path=/home/deployer/mongo-backups state=directory + +- name: create backup directory + become: yes + file: path=/home/deployer/mongo-backups/dump state=directory + +- name: backup backup_replica + become: yes + shell: mongodump -o /home/deployer/mongo-backups/dump + ignore_errors: yes + +- name: zip the dump + become: yes + shell: zip -r "mongo_backup_{{type}}_`date +%Y%m%d`.zip" dump/ + args: + chdir: /home/deployer/mongo-backups + +- set_fact: + mongo_backup_gzip_file_name: "mongo_backup_{{type}}_{{ lookup('pipe', 'date +%Y%m%d') }}.zip" + + +- set_fact: + mongo_backup_gzip_file_path: "{{ mongo.backup_dir }}/{{ mongo_backup_gzip_file_name }}" + +- name: Ensure azure blob storage container exists + command: az storage container create --name {{ mongo.backup_azure_container_name }} + ignore_errors: true + environment: + AZURE_STORAGE_ACCOUNT: "{{ mongo.backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ mongo.backup_azure_storage_access_key }}" + +- name: Upload to azure blob storage + command: az storage blob upload --name {{ mongo_backup_gzip_file_name }} --file {{ mongo_backup_gzip_file_path }} --container-name {{ mongo.backup_azure_container_name }} + environment: + AZURE_STORAGE_ACCOUNT: "{{ mongo.backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ mongo.backup_azure_storage_access_key }}" + async: 3600 + poll: 10 + +- name: clean up backup dir after upload + file: path="{{ mongo.backup_dir }}" state=absent + + +- name: run the template backup_replica + become: yes + shell: mongo < /home/deployer/backup_replica.js + async: 15 + poll: 0 + + diff --git a/ansible/roles/mongo-backup/tasks/lock_config.yml b/ansible/roles/mongo-backup/tasks/lock_config.yml new file mode 100644 index 0000000000000000000000000000000000000000..e050ce799354292b41b68897baebeb2649653396 --- /dev/null +++ b/ansible/roles/mongo-backup/tasks/lock_config.yml @@ -0,0 +1,10 @@ +- name: copy the template lock_config + template: src=lock_config.j2 dest=/home/deployer/lock_config.js mode=0777 + +- name: run the template lock_config + become: yes + shell: mongo < /home/deployer/lock_config.js + async: 15 + poll: 0 + + diff --git a/ansible/roles/mongo-backup/tasks/lock_replica.yml b/ansible/roles/mongo-backup/tasks/lock_replica.yml new file mode 100644 index 0000000000000000000000000000000000000000..7beed1f801ef089eeb5bf18322374d627dd05869 --- /dev/null +++ b/ansible/roles/mongo-backup/tasks/lock_replica.yml @@ -0,0 +1,10 @@ +- name: copy the template lock_shard + template: src=lock_shard.j2 dest=/home/deployer/lock_shard.js mode=0777 + +- name: run the template lock_shard + become: yes + shell: "mongo < /home/deployer/lock_shard.js" + async: 15 + poll: 0 + + diff --git a/ansible/roles/mongo-backup/tasks/main.yml b/ansible/roles/mongo-backup/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..594ccefb499de73b579a26cf7ddfbbf0b6a1bdd9 --- /dev/null +++ b/ansible/roles/mongo-backup/tasks/main.yml @@ -0,0 +1,21 @@ +- name: install zip + become: yes + apt: name=zip state=present + +- include: stop_query.yml + when: query is defined + +- include: lock_replica.yml + when: shard is defined + +- include: lock_config.yml + when: config is defined + +- include: backup_config.yml + when: config is defined + +- include: backup_replica.yml + when: shard is defined + +- include: start_query.yml + when: query is defined diff --git a/ansible/roles/mongo-backup/tasks/start_query.yml b/ansible/roles/mongo-backup/tasks/start_query.yml new file mode 100644 index 0000000000000000000000000000000000000000..4686f91eeeaeaeae3386ba62b4d670fd3c25eff3 --- /dev/null +++ b/ansible/roles/mongo-backup/tasks/start_query.yml @@ -0,0 +1,9 @@ +- name: copy the template start_balancer + template: src=start_balancer.j2 dest=/home/deployer/start_balancer.js mode=0777 + +- name: run the template start_balancer + become: yes + shell: mongo < /home/deployer/start_balancer.js + async: 15 + poll: 0 + diff --git a/ansible/roles/mongo-backup/tasks/stop_query.yml b/ansible/roles/mongo-backup/tasks/stop_query.yml new file mode 100644 index 0000000000000000000000000000000000000000..409fdf5ad6d3818f44b2dfda91144f107833f56c --- /dev/null +++ b/ansible/roles/mongo-backup/tasks/stop_query.yml @@ -0,0 +1,9 @@ +- name: copy the template stop_balancer + template: src=stop_balancer.j2 dest=/home/deployer/stop_balancer.js mode=0777 + +- name: run the template stop_balancer + become: yes + shell: mongo < /home/deployer/stop_balancer.js + async: 15 + poll: 0 + diff --git a/ansible/roles/mongo-backup/templates/backup_config.j2 b/ansible/roles/mongo-backup/templates/backup_config.j2 new file mode 100644 index 0000000000000000000000000000000000000000..9e6e3fe38b2ffc192110aa5eb51da1281c2b48dd --- /dev/null +++ b/ansible/roles/mongo-backup/templates/backup_config.j2 @@ -0,0 +1,2 @@ +use config; +db.fsyncUnlock(); \ No newline at end of file diff --git a/ansible/roles/mongo-backup/templates/backup_replica.j2 b/ansible/roles/mongo-backup/templates/backup_replica.j2 new file mode 100644 index 0000000000000000000000000000000000000000..78986821dd14925a60d1ae5bee6f14bc3a5c8b27 --- /dev/null +++ b/ansible/roles/mongo-backup/templates/backup_replica.j2 @@ -0,0 +1 @@ +db.fsyncUnlock(); diff --git a/ansible/roles/mongo-backup/templates/lock_config.j2 b/ansible/roles/mongo-backup/templates/lock_config.j2 new file mode 100644 index 0000000000000000000000000000000000000000..445d306311766e88935b03c9cc7e3f69f850356e --- /dev/null +++ b/ansible/roles/mongo-backup/templates/lock_config.j2 @@ -0,0 +1,2 @@ +use config; +db.fsyncLock(); diff --git a/ansible/roles/mongo-backup/templates/lock_shard.j2 b/ansible/roles/mongo-backup/templates/lock_shard.j2 new file mode 100644 index 0000000000000000000000000000000000000000..6e5a03609e2836353dcc87aa41f60c5197557f91 --- /dev/null +++ b/ansible/roles/mongo-backup/templates/lock_shard.j2 @@ -0,0 +1 @@ +db.fsyncLock(); diff --git a/ansible/roles/mongo-backup/templates/start_balancer.j2 b/ansible/roles/mongo-backup/templates/start_balancer.j2 new file mode 100644 index 0000000000000000000000000000000000000000..25abdd283962ee7fb62b9324ae17a1a3bbcdd33a --- /dev/null +++ b/ansible/roles/mongo-backup/templates/start_balancer.j2 @@ -0,0 +1,2 @@ +use config; +sh.setBalancerState(true); diff --git a/ansible/roles/mongo-backup/templates/stop_balancer.j2 b/ansible/roles/mongo-backup/templates/stop_balancer.j2 new file mode 100644 index 0000000000000000000000000000000000000000..f26faa4c0a3de6d9919862011f878a8586fdfdf9 --- /dev/null +++ b/ansible/roles/mongo-backup/templates/stop_balancer.j2 @@ -0,0 +1,2 @@ +use config; +sh.stopBalancer(); \ No newline at end of file diff --git a/ansible/roles/mongo/files/Centos-ali.repo b/ansible/roles/mongo/files/Centos-ali.repo new file mode 100755 index 0000000000000000000000000000000000000000..5976ed04ea11f06ed45712fdc11e775b7fd10311 --- /dev/null +++ b/ansible/roles/mongo/files/Centos-ali.repo @@ -0,0 +1,57 @@ +# CentOS-Base.repo +# +# The mirror system uses the connecting IP address of the client and the +# update status of each mirror to pick mirrors that are updated to and +# geographically close to the client. You should use this for CentOS updates +# unless you are manually picking other mirrors. +# +# If the mirrorlist= does not work for you, as a fall back you can try the +# remarked out baseurl= line instead. +# +# + +[base] +name=CentOS-$releasever - Base - mirrors.aliyun.com +failovermethod=priority +baseurl=http://mirrors.aliyun.com/centos/$releasever/os/$basearch/ +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os +gpgcheck=1 +gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 + +#released updates +[updates] +name=CentOS-$releasever - Updates - mirrors.aliyun.com +failovermethod=priority +baseurl=http://mirrors.aliyun.com/centos/$releasever/updates/$basearch/ +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates +gpgcheck=1 +gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 + +#additional packages that may be useful +[extras] +name=CentOS-$releasever - Extras - mirrors.aliyun.com +failovermethod=priority +baseurl=http://mirrors.aliyun.com/centos/$releasever/extras/$basearch/ +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras +gpgcheck=1 +gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 + +#additional packages that extend functionality of existing packages +[centosplus] +name=CentOS-$releasever - Plus - mirrors.aliyun.com +failovermethod=priority +baseurl=http://mirrors.aliyun.com/centos/$releasever/centosplus/$basearch/ +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus +gpgcheck=1 +enabled=0 +gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 + +#contrib - packages by Centos Users +[contrib] +name=CentOS-$releasever - Contrib - mirrors.aliyun.com +failovermethod=priority +baseurl=http://mirrors.aliyun.com/centos/$releasever/contrib/$basearch/ +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=contrib +gpgcheck=1 +enabled=0 +gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 diff --git a/ansible/roles/mongo/files/RPM-GPG-KEY-EPEL-6 b/ansible/roles/mongo/files/RPM-GPG-KEY-EPEL-6 new file mode 100755 index 0000000000000000000000000000000000000000..7a2030489d2e2985192e8fc883becdf50a80bdd3 --- /dev/null +++ b/ansible/roles/mongo/files/RPM-GPG-KEY-EPEL-6 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.5 (GNU/Linux) + +mQINBEvSKUIBEADLGnUj24ZVKW7liFN/JA5CgtzlNnKs7sBg7fVbNWryiE3URbn1 +JXvrdwHtkKyY96/ifZ1Ld3lE2gOF61bGZ2CWwJNee76Sp9Z+isP8RQXbG5jwj/4B +M9HK7phktqFVJ8VbY2jfTjcfxRvGM8YBwXF8hx0CDZURAjvf1xRSQJ7iAo58qcHn +XtxOAvQmAbR9z6Q/h/D+Y/PhoIJp1OV4VNHCbCs9M7HUVBpgC53PDcTUQuwcgeY6 +pQgo9eT1eLNSZVrJ5Bctivl1UcD6P6CIGkkeT2gNhqindRPngUXGXW7Qzoefe+fV +QqJSm7Tq2q9oqVZ46J964waCRItRySpuW5dxZO34WM6wsw2BP2MlACbH4l3luqtp +Xo3Bvfnk+HAFH3HcMuwdaulxv7zYKXCfNoSfgrpEfo2Ex4Im/I3WdtwME/Gbnwdq +3VJzgAxLVFhczDHwNkjmIdPAlNJ9/ixRjip4dgZtW8VcBCrNoL+LhDrIfjvnLdRu +vBHy9P3sCF7FZycaHlMWP6RiLtHnEMGcbZ8QpQHi2dReU1wyr9QgguGU+jqSXYar +1yEcsdRGasppNIZ8+Qawbm/a4doT10TEtPArhSoHlwbvqTDYjtfV92lC/2iwgO6g +YgG9XrO4V8dV39Ffm7oLFfvTbg5mv4Q/E6AWo/gkjmtxkculbyAvjFtYAQARAQAB +tCFFUEVMICg2KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAjYEEwECACAFAkvS +KUICGw8GCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRA7Sd8qBgi4lR/GD/wLGPv9 +qO39eyb9NlrwfKdUEo1tHxKdrhNz+XYrO4yVDTBZRPSuvL2yaoeSIhQOKhNPfEgT +9mdsbsgcfmoHxmGVcn+lbheWsSvcgrXuz0gLt8TGGKGGROAoLXpuUsb1HNtKEOwP +Q4z1uQ2nOz5hLRyDOV0I2LwYV8BjGIjBKUMFEUxFTsL7XOZkrAg/WbTH2PW3hrfS +WtcRA7EYonI3B80d39ffws7SmyKbS5PmZjqOPuTvV2F0tMhKIhncBwoojWZPExft +HpKhzKVh8fdDO/3P1y1Fk3Cin8UbCO9MWMFNR27fVzCANlEPljsHA+3Ez4F7uboF +p0OOEov4Yyi4BEbgqZnthTG4ub9nyiupIZ3ckPHr3nVcDUGcL6lQD/nkmNVIeLYP +x1uHPOSlWfuojAYgzRH6LL7Idg4FHHBA0to7FW8dQXFIOyNiJFAOT2j8P5+tVdq8 +wB0PDSH8yRpn4HdJ9RYquau4OkjluxOWf0uRaS//SUcCZh+1/KBEOmcvBHYRZA5J +l/nakCgxGb2paQOzqqpOcHKvlyLuzO5uybMXaipLExTGJXBlXrbbASfXa/yGYSAG +iVrGz9CE6676dMlm8F+s3XXE13QZrXmjloc6jwOljnfAkjTGXjiB7OULESed96MR +XtfLk0W5Ab9pd7tKDR6QHI7rgHXfCopRnZ2VVQ== +=V/6I +-----END PGP PUBLIC KEY BLOCK----- diff --git a/ansible/roles/mongo/files/epel.repo.j2 b/ansible/roles/mongo/files/epel.repo.j2 new file mode 100755 index 0000000000000000000000000000000000000000..8c5cb932d0b39b126b24e5fc86f81b12e7f07741 --- /dev/null +++ b/ansible/roles/mongo/files/epel.repo.j2 @@ -0,0 +1,26 @@ +[epel] +name=Extra Packages for Enterprise Linux 6 - $basearch +baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch +#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch +failovermethod=priority +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 + +[epel-debuginfo] +name=Extra Packages for Enterprise Linux 6 - $basearch - Debug +#baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch +failovermethod=priority +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 +gpgcheck=1 + +[epel-source] +name=Extra Packages for Enterprise Linux 6 - $basearch - Source +#baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch +failovermethod=priority +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 +gpgcheck=1 diff --git a/ansible/roles/mongo/files/mongodb-org-3.2.repo b/ansible/roles/mongo/files/mongodb-org-3.2.repo new file mode 100755 index 0000000000000000000000000000000000000000..c91b6a06ffd61d2d06de7a5a6362450fd9eb65a7 --- /dev/null +++ b/ansible/roles/mongo/files/mongodb-org-3.2.repo @@ -0,0 +1,6 @@ +[mongodb-org-3.2] +name=MongoDB Repository +baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/3.2/x86_64/ +gpgcheck=1 +enabled=1 +gpgkey=https://www.mongodb.org/static/pgp/server-3.2.asc \ No newline at end of file diff --git a/ansible/roles/mongo/files/pip.conf b/ansible/roles/mongo/files/pip.conf new file mode 100755 index 0000000000000000000000000000000000000000..883b7826625281dd678ec4dcc2b7ee69165bdc85 --- /dev/null +++ b/ansible/roles/mongo/files/pip.conf @@ -0,0 +1,2 @@ +[global] +index-url = https://pypi.tuna.tsinghua.edu.cn/simple \ No newline at end of file diff --git a/ansible/roles/mongo/handlers/main.yml b/ansible/roles/mongo/handlers/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..51fd5e65acc718784e181dbafb6eadc3e3b80900 --- /dev/null +++ b/ansible/roles/mongo/handlers/main.yml @@ -0,0 +1,8 @@ +--- +# Handler for mongod +- name: restart iptables + service: name=iptables state=restarted + +- name: update_cache + become: yes + apt: update_cache=yes \ No newline at end of file diff --git a/ansible/roles/mongo/tasks/common.yml b/ansible/roles/mongo/tasks/common.yml new file mode 100644 index 0000000000000000000000000000000000000000..7041903d44b352d8e295f88f313c595da4669743 --- /dev/null +++ b/ansible/roles/mongo/tasks/common.yml @@ -0,0 +1,48 @@ +- name: Create the mongod user + user: name=mongod comment="MongoD" + +- name: Create the data directory for the namenode metadata + file: path={{ mongodb_datadir_prefix }} owner=mongod group=mongod state=directory setype=mongod_var_lib_t recurse=true + +- name: create log directory for mongodb + file: path=/var/log/mongo state=directory owner=mongod group=mongod setype=mongod_log_t recurse=true + +- name: create run directory for mongodb + file: path=/var/run/mongo state=directory owner=mongod group=mongod setype=mongod_var_run_t seuser=system_u recurse=true + +- name: Install the mongodb package + apt: name={{ item }} + with_items: + - python-dev + - python-pip + +- name: Install the latest pymongo package + pip: name=pymongo state=latest use_mirrors=no + + +- name: Install Repository keys + apt_key: keyserver=keyserver.ubuntu.com id=EA312927 + notify: update_cache + +- name: Install MongoDB repository + apt_repository: repo='deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse' state=present + notify: update_cache + +- name: Install MongoDB + apt: name={{item}} + with_items: + - mongodb-org=3.2.15 + - mongodb-org-server=3.2.15 + - mongodb-org-shell=3.2.15 + - mongodb-org-mongos=3.2.15 + - mongodb-org-tools=3.2.15 + - daemon + +- name: copy the mongo config file + template: src=mongod.j2 dest=/etc/mongod.conf + +- name: start MongoDB + service: name=mongod state=started + + + diff --git a/ansible/roles/mongo/tasks/config.yml b/ansible/roles/mongo/tasks/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..7db932d9d4616deff1981f211cfbb7ebc8baebaf --- /dev/null +++ b/ansible/roles/mongo/tasks/config.yml @@ -0,0 +1,9 @@ +- name: dir is created mongo-metadata + file: path=/mongo-metadata state=directory + +- name: Run the mongo command for config + shell: mongod --configsvr --dbpath /mongo-metadata --port 27019 & + async: 15 + poll: 0 + + diff --git a/ansible/roles/mongo/tasks/main.yml b/ansible/roles/mongo/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..5f5132747b70de81e33764cd5bdf9c3284de213f --- /dev/null +++ b/ansible/roles/mongo/tasks/main.yml @@ -0,0 +1,16 @@ +- name: install zip + become: yes + apt: name=zip state=present + +- include: common.yml + +- include: config.yml + when: config is defined + +- include: query.yml + when: query is defined + +- include: shard.yml + when: shard is defined + + diff --git a/ansible/roles/mongo/tasks/query.yml b/ansible/roles/mongo/tasks/query.yml new file mode 100644 index 0000000000000000000000000000000000000000..7412fa33a0fe543bc380ced92d1af41cb5ffd82b --- /dev/null +++ b/ansible/roles/mongo/tasks/query.yml @@ -0,0 +1,7 @@ +- name: stop the mongod of query server + service: name=mongod state=stopped + +- name: run the query mongodb command + shell: mongos --configdb {{mongo.config_ip}}:27019 & + async: 15 + poll: 0 diff --git a/ansible/roles/mongo/tasks/shard.yml b/ansible/roles/mongo/tasks/shard.yml new file mode 100644 index 0000000000000000000000000000000000000000..e60be876e6f347d477b68d44b8adeffc65faf2dc --- /dev/null +++ b/ansible/roles/mongo/tasks/shard.yml @@ -0,0 +1,10 @@ +- name: copy the template add_shards.yml + template: src=add_shards.j2 dest=/home/deployer/add_shards.js mode=0777 + +- name: run the template lock_config + become: yes + shell: mongo --host {{mongo.query_ip}} --port 27017 < /home/deployer/add_shards.js + async: 15 + poll: 0 + + diff --git a/ansible/roles/mongo/templates/add_shards.j2 b/ansible/roles/mongo/templates/add_shards.j2 new file mode 100644 index 0000000000000000000000000000000000000000..f387e04b71df3f7fb6ea2c5859752876497ac4de --- /dev/null +++ b/ansible/roles/mongo/templates/add_shards.j2 @@ -0,0 +1,3 @@ +sh.addShard( "{{mongo.shard_ip1}}:27017" ); +use portal; +sh.enableSharding("portal"); \ No newline at end of file diff --git a/ansible/roles/mongo/templates/hosts.j2 b/ansible/roles/mongo/templates/hosts.j2 new file mode 100755 index 0000000000000000000000000000000000000000..8cf742cf74bfe3f3f6f1bc2746fed467cddf345c --- /dev/null +++ b/ansible/roles/mongo/templates/hosts.j2 @@ -0,0 +1,4 @@ +127.0.0.1 localhost +{% for host in groups['all'] %} +{{ hostvars[host]['ansible_' + iface].ipv4.address }} {{ host }} +{% endfor %} diff --git a/ansible/roles/mongo/templates/iptables.j2 b/ansible/roles/mongo/templates/iptables.j2 new file mode 100755 index 0000000000000000000000000000000000000000..d0c6528df96cea2675d18fcbab156c6320f3781b --- /dev/null +++ b/ansible/roles/mongo/templates/iptables.j2 @@ -0,0 +1,27 @@ +# Firewall configuration written by system-config-firewall +# Manual customization of this file is not recommended. +*filter +:INPUT ACCEPT [0:0] +:FORWARD ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +{% if 'mongoc_servers' in group_names %} +-A INPUT -p tcp --dport 7777 -j ACCEPT +{% endif %} +{% if 'mongos_servers' in group_names %} +-A INPUT -p tcp --dport 8888 -j ACCEPT +{% endif %} +{% if 'mongo_servers' in group_names %} +{% for host in groups['mongo_servers'] %} +-A INPUT -p tcp --dport {{ hostvars[host]['mongod_port'] }} -j ACCEPT +{% endfor %} +{% endif %} +-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT +-A INPUT -p icmp -j ACCEPT +-A INPUT -i lo -j ACCEPT +-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT +-A INPUT -j REJECT --reject-with icmp-host-prohibited +-A FORWARD -j REJECT --reject-with icmp-host-prohibited +COMMIT + + + diff --git a/ansible/roles/mongo/templates/mongod.j2 b/ansible/roles/mongo/templates/mongod.j2 new file mode 100644 index 0000000000000000000000000000000000000000..7632d016ec0e54626d8e39325de1923c5c88baf2 --- /dev/null +++ b/ansible/roles/mongo/templates/mongod.j2 @@ -0,0 +1,41 @@ +# mongod.conf + +# for documentation of all options, see: +# http://docs.mongodb.org/manual/reference/configuration-options/ + +# Where and how to store data. +storage: + dbPath: /var/lib/mongodb + journal: + enabled: true +# engine: +# mmapv1: +# wiredTiger: + +# where to write logging data. +systemLog: + destination: file + logAppend: true + path: /var/log/mongodb/mongod.log + +# network interfaces +net: + port: 27017 + bindIp: 0.0.0.0 + + +#processManagement: + +#security: + +#operationProfiling: + +#replication: + +#sharding: + +## Enterprise-Only Options: + +#auditLog: + +#snmp: \ No newline at end of file diff --git a/ansible/roles/nginx/README.md b/ansible/roles/nginx/README.md new file mode 100755 index 0000000000000000000000000000000000000000..b7e0676ac3cd493358378d8a688c82b8b4e21633 --- /dev/null +++ b/ansible/roles/nginx/README.md @@ -0,0 +1,367 @@ +nginx +===== + +This role installs and configures the nginx web server. The user can specify +any http configuration parameters they wish to apply their site. Any number of +sites can be added with configurations of your choice. + +[](https://travis-ci.org/jdauphant/ansible-role-nginx) +[](https://galaxy.ansible.com/jdauphant/nginx/) + +Requirements +------------ + +This role requires Ansible 2.0 or higher and platform requirements are listed +in the metadata file. (Some older version of the role support Ansible 1.4) +For FreeBSD a working pkgng setup is required (see: https://www.freebsd.org/doc/handbook/pkgng-intro.html ) + +Install +------- + +```sh +ansible-galaxy install jdauphant.nginx +``` + +Role Variables +-------------- + +The variables that can be passed to this role and a brief description about +them are as follows. (For all variables, take a look at [defaults/main.yml](defaults/main.yml)) + +```yaml +# The user to run nginx +nginx_user: "www-data" + +# A list of directives for the events section. +nginx_events_params: + - worker_connections 512 + - debug_connection 127.0.0.1 + - use epoll + - multi_accept on + +# A list of hashes that define the servers for nginx, +# as with http parameters. Any valid server parameters +# can be defined here. +nginx_sites: + default: + - listen 80 + - server_name _ + - root "/usr/share/nginx/html" + - index index.html + foo: + - listen 8080 + - server_name localhost + - root "/tmp/site1" + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { try_files $uri $uri/ /index.html; } + bar: + - listen 9090 + - server_name ansible + - root "/tmp/site2" + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { + try_files $uri $uri/ /index.html; + allow 127.0.0.1; + deny all; + } + +# A list of hashes that define additional configuration +nginx_configs: + proxy: + - proxy_set_header X-Real-IP $remote_addr + - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for + upstream: + - upstream foo { server 127.0.0.1:8080 weight=10; } + geo: + - geo $local { + default 0; + 127.0.0.1 1; + } + gzip: + - gzip on + - gzip_disable msie6 + +# A list of hashes that define configuration snippets +nginx_snippets: + error_pages: + - error_page 500 /http_errors/500.html + - error_page 502 /http_errors/502.html + - error_page 503 /http_errors/503.html + - error_page 504 /http_errors/504.html + +# A list of hashes that define user/password files +nginx_auth_basic_files: + demo: + - foo:$apr1$mEJqnFmy$zioG2q1iDWvRxbHuNepIh0 # foo:demo , generated by : htpasswd -nb foo demo + - bar:$apr1$H2GihkSo$PwBeV8cVWFFQlnAJtvVCQ. # bar:demo , generated by : htpasswd -nb bar demo + +``` + +Examples +======== + +## 1) Install nginx with HTTP directives of choice, but with no sites configured and no additionnal configuration: + +```yaml +- hosts: all + roles: + - {role: nginx, + nginx_http_params: ["sendfile on", "access_log /var/log/nginx/access.log"] + } +``` + +## 2) Install nginx with different HTTP directives than in the previous example, but no +sites configured and no additional configuration. + +```yaml +- hosts: all + roles: + - {role: nginx, + nginx_http_params: ["tcp_nodelay on", "error_log /var/log/nginx/error.log"]} +``` + +Note: Please make sure the HTTP directives passed are valid, as this role +won't check for the validity of the directives. See the nginx documentation +for details. + +## 3) Install nginx and add a site to the configuration. + +```yaml +- hosts: all + + roles: + - role: nginx + nginx_http_params: + - sendfile "on" + - access_log "/var/log/nginx/access.log" + nginx_sites: + bar: + - listen 8080 + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { try_files $uri $uri/ /index.html; } + nginx_configs: + proxy: + - proxy_set_header X-Real-IP $remote_addr + - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for +``` + +## 4) Install nginx and add extra variables to default config + +```yaml +-hosts: all + vars: + - my_extra_params: + - client_max_body_size 200M +# retain defaults and add additional `client_max_body_size` param + roles: + - role: jdauphant.nginx + nginx_http_params: "{{ nginx_http_default_params + my_extra_params }}" +``` + +Note: Each site added is represented by a list of hashes, and the configurations +generated are populated in /etc/nginx/site-available/ and linked from /etc/nginx/site-enable/ to /etc/nginx/site-available. + +The file name for the specific site configuration is specified in the hash +with the key "file_name", any valid server directives can be added to the hash. +Additional configurations are created in /etc/nginx/conf.d/ + +## 5) Install Nginx, add 2 sites (different method) and add additional configuration + +```yaml +--- +- hosts: all + roles: + - role: nginx + nginx_http_params: + - sendfile on + - access_log /var/log/nginx/access.log + nginx_sites: + foo: + - listen 8080 + - server_name localhost + - root /tmp/site1 + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { try_files $uri $uri/ /index.html; } + bar: + - listen 9090 + - server_name ansible + - root /tmp/site2 + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { try_files $uri $uri/ /index.html; } + nginx_configs: + proxy: + - proxy_set_header X-Real-IP $remote_addr + - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for +``` + +## 6) Install Nginx, add 2 sites, add additional configuration and an upstream configuration block + +```yaml +--- +- hosts: all + roles: + - role: nginx + nginx_error_log_level: info + nginx_http_params: + - sendfile on + - access_log /var/log/nginx/access.log + nginx_sites: + foo: + - listen 8080 + - server_name localhost + - root /tmp/site1 + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { try_files $uri $uri/ /index.html; } + bar: + - listen 9090 + - server_name ansible + - root /tmp/site2 + - if ( $host = example.com ) { rewrite ^(.*)$ http://www.example.com$1 permanent; } + - location / { + try_files $uri $uri/ /index.html; + auth_basic "Restricted"; + auth_basic_user_file auth_basic/demo; + } + - location /images/ { try_files $uri $uri/ /index.html; } + nginx_configs: + proxy: + - proxy_set_header X-Real-IP $remote_addr + - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for + upstream: + # Results in: + # upstream foo_backend { + # server 127.0.0.1:8080 weight=10; + # } + - upstream foo_backend { server 127.0.0.1:8080 weight=10; } + nginx_auth_basic_files: + demo: + - foo:$apr1$mEJqnFmy$zioG2q1iDWvRxbHuNepIh0 # foo:demo , generated by : htpasswd -nb foo demo + - bar:$apr1$H2GihkSo$PwBeV8cVWFFQlnAJtvVCQ. # bar:demo , generated by : htpasswd -nb bar demo +``` + +## 7) Install Nginx, add a site and use special yaml syntax to make the location blocks multiline for clarity + +```yaml +--- +- hosts: all + roles: + - role: nginx + nginx_http_params: + - sendfile on + - access_log /var/log/nginx/access.log + nginx_sites: + foo: + - listen 443 ssl + - server_name foo.example.com + - set $myhost foo.example.com + - | + location / { + proxy_set_header Host foo.example.com; + } + - | + location ~ /v2/users/.+?/organizations { + if ($request_method = PUT) { + set $myhost bar.example.com; + } + if ($request_method = DELETE) { + set $myhost bar.example.com; + } + proxy_set_header Host $myhost; + } +``` +## 8) Example to use this role with my ssl-certs role to generate or copie ssl certificate ( https://galaxy.ansible.com/list#/roles/3115 ) +```yaml + - hosts: all + roles: + - jdauphant.ssl-certs + - role: jdauphant.nginx + nginx_configs: + ssl: + - ssl_certificate_key {{ssl_certs_privkey_path}} + - ssl_certificate {{ssl_certs_cert_path}} + nginx_sites: + default: + - listen 443 ssl + - server_name _ + - root "/usr/share/nginx/html" + - index index.html +``` +## 9) Site configuration using a custom template. +Instead of defining a site config file using a list of attributes, +you may use a hash/dictionary that includes the filename of an alternate template. +Additional values are accessible within the template via the `item.value` variable. +```yaml +- hosts: all + + roles: + - role: nginx + nginx_sites: + custom_bar: + template: custom_bar.conf.j2 + server_name: custom_bar.example.com +``` +Custom template: custom_bar.conf.j2: +```handlebars +# {{ ansible_managed }} +upstream backend { + server 10.0.0.101; +} +server { + server_name {{ item.value.server_name }}; + location / { + proxy_pass http://backend; + } +} +``` +Using a custom template allows for unlimited flexibility in configuring the site config file. +This example demonstrates the common practice of configuring a site server block +in the same file as its complementary upstream block. +If you use this option: +* _The hash **must** include a `template:` value, or the configuration task will fail._ +* _This role cannot check tha validity of your custom template. +If you use this method, the conf file formatting provided by this role is unavailable, +and it is up to you to provide a template with valid content and formatting for NGINX._ + +## 10) Install Nginx, add 2 sites, use snippets to configure access controls +```yaml +--- +- hosts: all + roles: + - role: nginx + nginx_http_params: + - sendfile on + - access_log /var/log/nginx/access.log + nginx_snippets: + accesslist_devel: + - allow 192.168.0.0/24 + - deny all + nginx_sites: + foo: + - listen 8080 + - server_name localhost + - root /tmp/site1 + - include snippets/accesslist_devel.conf + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { try_files $uri $uri/ /index.html; } + bar: + - listen 9090 + - server_name ansible + - root /tmp/site2 + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { try_files $uri $uri/ /index.html; } +``` + +Dependencies +------------ + +None + +License +------- +BSD + +Author Information +------------------ + +- Original : Benno Joy +- Modified by : DAUPHANT Julien diff --git a/ansible/roles/nginx/defaults/main.yml b/ansible/roles/nginx/defaults/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..28bb8a0296cf2a441ab207b659a8688319f78b62 --- /dev/null +++ b/ansible/roles/nginx/defaults/main.yml @@ -0,0 +1,52 @@ +--- +nginx_pkgs: + - nginx + +nginx_install_epel_repo: True + +nginx_official_repo: False +nginx_official_repo_mainline: False + +keep_only_specified: False + +nginx_installation_type: "packages" +nginx_binary_name: "nginx" +nginx_service_name: "{{nginx_binary_name}}" +nginx_conf_dir: "{% if ansible_os_family == 'FreeBSD' %}/usr/local/etc/nginx{% else %}/etc/nginx{% endif %}" + +nginx_user: "{% if ansible_os_family == 'RedHat' or ansible_os_family == 'Suse' %}nginx{% elif ansible_os_family == 'Debian' %}www-data{% elif ansible_os_family == 'FreeBSD' %}www{% endif %}" +nginx_group: "{{nginx_user}}" + +nginx_pid_file: '/var/run/{{nginx_service_name}}.pid' + +nginx_worker_processes: "{% if ansible_processor_vcpus is defined %}{{ ansible_processor_vcpus }}{% else %}auto{% endif %}" +nginx_worker_rlimit_nofile: 1024 +nginx_log_dir: "/var/log/nginx" +nginx_error_log_level: "error" + +nginx_extra_root_params: [] +nginx_events_params: + - worker_connections {% if nginx_max_clients is defined %}{{nginx_max_clients}}{% else %}512{% endif %} + +nginx_http_params: "{{ nginx_http_default_params }}" + +nginx_stream_params: [] + +nginx_sites: + default: + - listen 80 default_server + - server_name _ + - root "{% if ansible_os_family == 'FreeBSD' %}/usr/local/www/nginx-dist{% else %}/usr/share/nginx/html{% endif %}" + - index index.html +nginx_remove_sites: [] + +nginx_configs: {} +nginx_snippets: {} +nginx_stream_configs: {} +nginx_remove_configs: [] +nginx_remove_snippets: [] + +nginx_auth_basic_files: {} +nginx_remove_auth_basic_files: [] + +nginx_daemon_mode: "on" diff --git a/ansible/roles/nginx/handlers/main.yml b/ansible/roles/nginx/handlers/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..af8aa2bef1871d977193d5aef1bdfce24bd1d368 --- /dev/null +++ b/ansible/roles/nginx/handlers/main.yml @@ -0,0 +1,30 @@ +--- + +- name: restart nginx + debug: msg="checking config first" + changed_when: True + notify: + - check nginx configuration + - restart nginx - after config check + +- name: reload nginx + debug: msg="checking config first" + changed_when: True + notify: + - check nginx configuration + - reload nginx - after config check + +- name: check nginx configuration + command: "{{ nginx_binary_name }} -t" + register: result + changed_when: "result.rc != 0" + check_mode: no + when: nginx_installation_type in nginx_installation_types_using_service + +- name: restart nginx - after config check + service: name={{ nginx_service_name }} state=restarted + when: nginx_installation_type in nginx_installation_types_using_service and nginx_daemon_mode == "on" + +- name: reload nginx - after config check + service: name={{ nginx_service_name }} state=reloaded + when: nginx_installation_type in nginx_installation_types_using_service and nginx_daemon_mode == "on" diff --git a/ansible/roles/nginx/meta/main.yml b/ansible/roles/nginx/meta/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..a8a829a3531377a6eed0b1ac2fecfec93e9b169d --- /dev/null +++ b/ansible/roles/nginx/meta/main.yml @@ -0,0 +1,32 @@ +--- +galaxy_info: + author: "DAUPHANT Julien" + license: BSD + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - all + - name: Fedora + versions: + - all + - name: opensuse + versions: + - all + - name: Ubuntu + versions: + - all + - name: Debian + versions: + - all + - name: FreeBSD + versions: + - 10.0 + - 10.1 + - 10.2 + - 10.3 + - 11.0 + galaxy_tags: + - web +allow_duplicates: yes +dependencies: [] diff --git a/ansible/roles/nginx/tasks/configuration.yml b/ansible/roles/nginx/tasks/configuration.yml new file mode 100755 index 0000000000000000000000000000000000000000..7067329ce7aab521bdc37778b8d371413f3204c2 --- /dev/null +++ b/ansible/roles/nginx/tasks/configuration.yml @@ -0,0 +1,60 @@ +--- +- name: Copy the nginx configuration file + template: + src: nginx.conf.j2 + dest: "{{ nginx_conf_dir }}/nginx.conf" + notify: + - restart nginx + +- name: Ensure auth_basic files created + template: + src: auth_basic.j2 + dest: "{{ nginx_conf_dir }}/auth_basic/{{ item.key }}" + owner: root + group: "{{ nginx_group }}" + mode: 0750 + with_dict: "{{ nginx_auth_basic_files }}" + +- name: Create the configurations for sites + template: + src: "{{ item.value.template | default('site.conf.j2') }}" + dest: "{{ nginx_conf_dir }}/sites-available/{{ item.key }}.conf" + with_dict: "{{ nginx_sites }}" + when: item.key not in nginx_remove_sites + notify: + - reload nginx + +- name: Create links for sites-enabled + file: + state: link + src: "{{ nginx_conf_dir }}/sites-available/{{ item.key }}.conf" + dest: "{{ nginx_conf_dir }}/sites-enabled/{{ item.key }}.conf" + with_dict: "{{ nginx_sites }}" + when: item.key not in nginx_remove_sites + notify: + - reload nginx + +- name: Create the configurations for independent config file + template: + src: config.conf.j2 + dest: "{{ nginx_conf_dir }}/conf.d/{{ item.key }}.conf" + with_dict: "{{ nginx_configs }}" + notify: + - reload nginx + +- name: Create configuration snippets + template: + src: config.conf.j2 + dest: "{{ nginx_conf_dir }}/snippets/{{ item.key }}.conf" + with_dict: "{{ nginx_snippets }}" + notify: + - reload nginx + +- name: Create the configurations for independent config file for streams + template: + src: config_stream.conf.j2 + dest: "{{ nginx_conf_dir }}/conf.d/stream/{{ item.key }}.conf" + with_dict: "{{ nginx_stream_configs }}" + notify: + - reload nginx + when: nginx_stream_params or nginx_stream_configs diff --git a/ansible/roles/nginx/tasks/ensure-dirs.yml b/ansible/roles/nginx/tasks/ensure-dirs.yml new file mode 100755 index 0000000000000000000000000000000000000000..0c5baca8a9fcd98a48a178b4b56dc01e417dca0a --- /dev/null +++ b/ansible/roles/nginx/tasks/ensure-dirs.yml @@ -0,0 +1,25 @@ +--- +- name: Create the directories for site specific configurations + file: + path: "{{nginx_conf_dir}}/{{ item }}" + state: directory + owner: root + group: "{{nginx_group}}" + mode: 0755 + with_items: + - "sites-available" + - "sites-enabled" + - "auth_basic" + - "conf.d" + - "conf.d/stream" + - "snippets" + - "modules-available" + - "modules-enabled" + +- name: Ensure log directory exist + file: + path: "{{ nginx_log_dir }}" + state: directory + owner: "{{nginx_user}}" + group: "{{nginx_group}}" + mode: 0755 diff --git a/ansible/roles/nginx/tasks/installation.packages.yml b/ansible/roles/nginx/tasks/installation.packages.yml new file mode 100755 index 0000000000000000000000000000000000000000..73367388c74016be6989c05231618607a7862174 --- /dev/null +++ b/ansible/roles/nginx/tasks/installation.packages.yml @@ -0,0 +1,15 @@ +--- +- name: Install the epel packages for EL distributions + package: name=epel-release state=present + when: nginx_is_el|bool and nginx_install_epel_repo|bool + +- name: Install the nginx packages from official repo for EL distributions + yum: name={{ item }} state=present enablerepo="nginx" + with_items: "{{ nginx_pkgs }}" + when: nginx_is_el|bool and nginx_official_repo + +- name: Install the nginx packages for all other distributions + package: name={{ item }} state=present + with_items: "{{ nginx_pkgs }}" + environment: "{{ nginx_env }}" + when: not nginx_is_el|bool or not nginx_official_repo diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..870b6388babfa6844bf9e2143955dd9bd38e5e5d --- /dev/null +++ b/ansible/roles/nginx/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- include: selinux.yml + when: ansible_selinux and ansible_selinux.status == "enabled" + tags: [packages, selinux, nginx] + +- include: nginx-official-repo.yml + when: nginx_official_repo == True + tags: [packages, nginx] + +- include: installation.packages.yml + when: nginx_installation_type == "packages" + tags: [packages, nginx] + +- include: ensure-dirs.yml + tags: [configuration, nginx] + +- include: remove-defaults.yml + when: not keep_only_specified + tags: [configuration, nginx] + +- include: remove-extras.yml + when: keep_only_specified + tags: [configuration, nginx] + +- include: remove-unwanted.yml + tags: [configuration, nginx] + +- include: configuration.yml + tags: [configuration, nginx] + +- name: Start the nginx service + service: name={{ nginx_service_name }} state=started enabled=yes + when: nginx_installation_type in nginx_installation_types_using_service and nginx_daemon_mode == "on" + tags: [service, nginx] diff --git a/ansible/roles/nginx/tasks/nginx-official-repo.yml b/ansible/roles/nginx/tasks/nginx-official-repo.yml new file mode 100755 index 0000000000000000000000000000000000000000..8876c77b234f6fc44feebc6908a38f24e8115f22 --- /dev/null +++ b/ansible/roles/nginx/tasks/nginx-official-repo.yml @@ -0,0 +1,34 @@ +--- +- name: Ensure APT official nginx key + apt_key: url=http://nginx.org/keys/nginx_signing.key + environment: "{{ nginx_env }}" + when: ansible_os_family == 'Debian' + +- name: Ensure APT official nginx repository + apt_repository: repo="deb http://nginx.org/packages/{{ ansible_distribution|lower }}/ {{ ansible_distribution_release }} nginx" + environment: "{{ nginx_env }}" + when: ansible_os_family == 'Debian' and not nginx_official_repo_mainline + +- name: Ensure APT official nginx repository (mainline) + apt_repository: repo="deb http://nginx.org/packages/mainline/{{ ansible_distribution|lower }}/ {{ ansible_distribution_release }} nginx" + environment: "{{ nginx_env }}" + when: ansible_os_family == 'Debian' and nginx_official_repo_mainline + +- name: Ensure RPM official nginx key + rpm_key: key=http://nginx.org/keys/nginx_signing.key + environment: "{{ nginx_env }}" + when: ansible_os_family == 'RedHat' + +- name: Ensure YUM official nginx repository + template: src=nginx.repo.j2 dest=/etc/yum.repos.d/nginx.repo + when: ansible_os_family == 'RedHat' + +- name: Ensure zypper official nginx repository + zypper_repository: repo="http://nginx.org/packages/sles/12" name="nginx" disable_gpg_check=yes + environment: "{{ nginx_env }}" + when: ansible_distribution == 'SLES' and ansible_distribution_version == '12' and not nginx_official_repo_mainline + +- name: Ensure zypper official nginx repository (mainline) + zypper_repository: repo="http://nginx.org/packages/mainline/sles/12" name="nginx" disable_gpg_check=yes + environment: "{{ nginx_env }}" + when: ansible_distribution == 'SLES' and ansible_distribution_version == '12' and nginx_official_repo_mainline diff --git a/ansible/roles/nginx/tasks/remove-defaults.yml b/ansible/roles/nginx/tasks/remove-defaults.yml new file mode 100755 index 0000000000000000000000000000000000000000..860e2c4a1d1308a1122988136d805607afb4da9e --- /dev/null +++ b/ansible/roles/nginx/tasks/remove-defaults.yml @@ -0,0 +1,16 @@ +--- +- name: Disable the default site + file: + path: "{{nginx_conf_dir}}/sites-enabled/default" + state: absent + notify: + - reload nginx + +- name: Remove the default configuration + file: + path: "{{nginx_conf_dir}}/conf.d/default.conf" + state: absent + when: > + 'default' not in nginx_configs.keys() + notify: + - reload nginx diff --git a/ansible/roles/nginx/tasks/remove-extras.yml b/ansible/roles/nginx/tasks/remove-extras.yml new file mode 100755 index 0000000000000000000000000000000000000000..f971111deb3f5cdb9b018face51c34bd4f617295 --- /dev/null +++ b/ansible/roles/nginx/tasks/remove-extras.yml @@ -0,0 +1,30 @@ +--- +- name: Find enabled sites + shell: ls -1 {{nginx_conf_dir}}/sites-enabled || true + register: enabled_sites + changed_when: False + +- name: Disable unmanaged sites + file: + path: "{{nginx_conf_dir}}/sites-enabled/{{ item }}" + state: absent + with_items: "{{ enabled_sites.stdout_lines | default([]) }}" + # 'item.conf' => 'item' + when: item[:-5] not in nginx_sites.keys() + notify: + - reload nginx + +- name: Find config files + shell: find {{nginx_conf_dir}}/conf.d -maxdepth 1 -type f -name '*.conf' -exec basename {} \; + register: config_files + changed_when: False + +- name: Remove unmanaged config files + file: + name: "{{nginx_conf_dir}}/conf.d/{{ item }}" + state: absent + with_items: "{{ config_files.stdout_lines | default([]) }}" + # 'item.conf' => 'item' + when: item[:-5] not in nginx_configs.keys() + notify: + - reload nginx diff --git a/ansible/roles/nginx/tasks/remove-unwanted.yml b/ansible/roles/nginx/tasks/remove-unwanted.yml new file mode 100755 index 0000000000000000000000000000000000000000..686472275668216bbe2de67460e8b4da2753d7f5 --- /dev/null +++ b/ansible/roles/nginx/tasks/remove-unwanted.yml @@ -0,0 +1,34 @@ +--- +- name: Remove unwanted sites + file: + path: "{{nginx_conf_dir}}/{{ item[0] }}/{{ item[1] }}.conf" + state: absent + with_nested: + - ['sites-enabled', 'sites-available'] + - "{{ nginx_remove_sites }}" + notify: + - reload nginx + +- name: Remove unwanted conf + file: + path: "{{nginx_conf_dir}}/conf.d/{{ item[1] }}.conf" + state: absent + with_items: "{{ nginx_remove_configs }}" + notify: + - reload nginx + +- name: Remove unwanted snippets + file: + path: "{{ nginx_conf_dir }}/snippets/{{ item[1] }}.conf" + state: absent + with_items: "{{ nginx_remove_snippets }}" + notify: + - reload nginx + +- name: Remove unwanted auth_basic_files + file: + path: "{{nginx_conf_dir}}/auth_basic/{{ item[1] }}" + state: absent + with_items: "{{ nginx_remove_auth_basic_files }}" + notify: + - reload nginx diff --git a/ansible/roles/nginx/tasks/selinux.yml b/ansible/roles/nginx/tasks/selinux.yml new file mode 100755 index 0000000000000000000000000000000000000000..7f385d7b5e01137863e316166aaf75d1666decf3 --- /dev/null +++ b/ansible/roles/nginx/tasks/selinux.yml @@ -0,0 +1,17 @@ +--- +- name: Install the selinux python module + package: name={{ item }} state=present + with_items: + - libselinux-python + - libsemanage-python + when: ansible_os_family == "RedHat" + +- name: Install the selinux python module + package: name={{ item }} state=present + with_items: + - python-selinux + - python-semanage + when: ansible_os_family == "Debian" + +- name: Set SELinux boolean to allow nginx to set rlimit + seboolean: name=httpd_setrlimit state=yes persistent=yes diff --git a/ansible/roles/nginx/templates/auth_basic.j2 b/ansible/roles/nginx/templates/auth_basic.j2 new file mode 100755 index 0000000000000000000000000000000000000000..912117466a130fefebf482ac40279295ab0595c9 --- /dev/null +++ b/ansible/roles/nginx/templates/auth_basic.j2 @@ -0,0 +1,5 @@ +#{{ ansible_managed }} + +{% for v in item.value %} +{{ v }} +{% endfor %} diff --git a/ansible/roles/nginx/templates/config.conf.j2 b/ansible/roles/nginx/templates/config.conf.j2 new file mode 100755 index 0000000000000000000000000000000000000000..bcf36419d8077a6b747f1f2b864e22959aa7defd --- /dev/null +++ b/ansible/roles/nginx/templates/config.conf.j2 @@ -0,0 +1,9 @@ +#{{ ansible_managed }} + +{% for v in item.value %} +{% if v.find('\n') != -1 %} +{{v}} +{% else %} +{% if v != "" %}{{ v.replace(";",";\n ").replace(" {"," {\n ").replace(" }"," \n}\n") }}{% if v.find('{') == -1%}; +{% endif %}{% endif %}{% endif %} +{% endfor %} diff --git a/ansible/roles/nginx/templates/config_stream.conf.j2 b/ansible/roles/nginx/templates/config_stream.conf.j2 new file mode 100755 index 0000000000000000000000000000000000000000..bcf36419d8077a6b747f1f2b864e22959aa7defd --- /dev/null +++ b/ansible/roles/nginx/templates/config_stream.conf.j2 @@ -0,0 +1,9 @@ +#{{ ansible_managed }} + +{% for v in item.value %} +{% if v.find('\n') != -1 %} +{{v}} +{% else %} +{% if v != "" %}{{ v.replace(";",";\n ").replace(" {"," {\n ").replace(" }"," \n}\n") }}{% if v.find('{') == -1%}; +{% endif %}{% endif %}{% endif %} +{% endfor %} diff --git a/ansible/roles/nginx/templates/nginx.conf.j2 b/ansible/roles/nginx/templates/nginx.conf.j2 new file mode 100755 index 0000000000000000000000000000000000000000..368f840432664805a1beb3558c17f4ce71c0ea70 --- /dev/null +++ b/ansible/roles/nginx/templates/nginx.conf.j2 @@ -0,0 +1,52 @@ +#{{ ansible_managed }} +user {{ nginx_user }} {{ nginx_group }}; + +worker_processes {{ nginx_worker_processes }}; + +{% if nginx_pid_file %} +pid {{ nginx_pid_file }}; +{% endif %} + +worker_rlimit_nofile {{ nginx_worker_rlimit_nofile }}; + +include {{ nginx_conf_dir }}/modules-enabled/*.conf; + +{% if nginx_extra_root_params is defined and nginx_extra_root_params is iterable %} +{% for line in nginx_extra_root_params %} +{{ line }}; +{% endfor %} +{% endif %} + +events { +{% for v in nginx_events_params %} + {{ v }}; +{% endfor %} +} + + +http { + + include {{ nginx_conf_dir }}/mime.types; + default_type application/octet-stream; +{% for v in nginx_http_params %} + {{ v }}; +{% endfor %} + + include {{ nginx_conf_dir }}/conf.d/*.conf; + include {{ nginx_conf_dir }}/sites-enabled/*; +} + +{% if nginx_stream_params or nginx_stream_configs %} +stream { + +{% for v in nginx_stream_params %} + {{ v }}; +{% endfor %} + + include {{ nginx_conf_dir }}/conf.d/stream/*.conf; +} +{% endif %} + +{% if nginx_daemon_mode == "off" %} +daemon off; +{% endif %} diff --git a/ansible/roles/nginx/templates/nginx.repo.j2 b/ansible/roles/nginx/templates/nginx.repo.j2 new file mode 100755 index 0000000000000000000000000000000000000000..ff02a4bef2318c30b51daa09e717c19db1206dc4 --- /dev/null +++ b/ansible/roles/nginx/templates/nginx.repo.j2 @@ -0,0 +1,8 @@ +[nginx] +name=nginx repo +{% if nginx_official_repo_mainline %} +baseurl=http://nginx.org/packages/mainline/{{"rhel" if ansible_distribution == "RedHat" else "centos"}}/{{ansible_distribution_version.split('.')[0]}}/{{ansible_architecture}}/ +{% else %} +baseurl=http://nginx.org/packages/{{"rhel" if ansible_distribution == "RedHat" else "centos"}}/{{ansible_distribution_version.split('.')[0]}}/{{ansible_architecture}}/ +{% endif %} +enabled=1 diff --git a/ansible/roles/nginx/templates/site.conf.j2 b/ansible/roles/nginx/templates/site.conf.j2 new file mode 100755 index 0000000000000000000000000000000000000000..74611d03bfb9f4abae1fb8d1bee32dff4ec88c1c --- /dev/null +++ b/ansible/roles/nginx/templates/site.conf.j2 @@ -0,0 +1,11 @@ +#{{ ansible_managed }} + +server { +{% for v in item.value %} +{% if v.find('\n') != -1 %} + {{v.replace("\n","\n ")}} +{% else %} + {% if v != "" %}{{ v.replace(";",";\n ").replace(" {"," {\n ").replace(" }"," \n }\n") }}{% if v.find('{') == -1%}; +{% endif %}{% endif %}{% endif %} +{% endfor %} +} diff --git a/ansible/roles/nginx/test/custom_bar.conf.j2 b/ansible/roles/nginx/test/custom_bar.conf.j2 new file mode 100755 index 0000000000000000000000000000000000000000..d6b05ff7afd5c73d12dd4fbea9971710392cd89c --- /dev/null +++ b/ansible/roles/nginx/test/custom_bar.conf.j2 @@ -0,0 +1,10 @@ +# {{ ansible_managed }} +upstream backend { + server 10.0.0.101; +} +server { + server_name {{ item.value.server_name }}; + location / { + proxy_pass http://backend; + } +} diff --git a/ansible/roles/nginx/test/example-vars.yml b/ansible/roles/nginx/test/example-vars.yml new file mode 100755 index 0000000000000000000000000000000000000000..640a651988fb43273698e441422efe4790be51f7 --- /dev/null +++ b/ansible/roles/nginx/test/example-vars.yml @@ -0,0 +1,86 @@ +--- +# The user to run nginx +nginx_user: "www-data" + +nginx_hhvm: | + add_header X-backend hhvm; + try_files $uri $uri/ /index.php?$args; + location ~ \.(hh|php)$ { + try_files $uri =404; + fastcgi_pass unix:/var/run/hhvm/sock; + fastcgi_index index.php; + fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; + include fastcgi_params; + } + +# A list of directives for the events section. +nginx_events_params: + - worker_connections 512 + +# A list of hashs that define the servers for nginx, +# as with http parameters. Any valid server parameters +# can be defined here. + +nginx_sites: + default: + - listen 80 + - server_name _ + - root "/usr/share/nginx/html" + - index index.html + foo: + - listen 8080 + - server_name localhost + - root "/tmp/site1" + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { try_files $uri $uri/ /index.html; } + bar: + - listen 9090 + - server_name ansible + - root "/tmp/site2" + - location / { try_files $uri $uri/ /index.html; } + - location /images/ { + try_files $uri $uri/ /index.html; + allow 127.0.0.1; + deny all; + } + - auth_basic "Restricted" + - auth_basic_user_file auth_basic/demo + hhvm_test: + - | + listen 80; + server_name test_hhvm; + root "/tmp/hhvm"; + {{nginx_hhvm}} + custom_bar: + template: custom_bar.conf.j2 + server_name: bar.example.com + +# A list of hashs that define additional configuration +nginx_configs: + proxy: + - proxy_set_header X-Real-IP $remote_addr + - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for + upstream: + - upstream foo { server 127.0.0.1:8080 weight=10; } + geo: + - geo $local { + default 0; + 127.0.0.1 1; + } + gzip: + - gzip on + - gzip_disable msie6 + +# A list of hashes that define configuration snippets +nginx_snippets: + error_pages: + - error_page 500 /http_errors/500.html + - error_page 502 /http_errors/502.html + - error_page 503 /http_errors/503.html + - error_page 504 /http_errors/504.html + +# A list of hashs that define uer/password files +nginx_auth_basic_files: + demo: + - foo:$apr1$mEJqnFmy$zioG2q1iDWvRxbHuNepIh0 # foo:demo , generated by : htpasswd -nb foo demo + - bar:$apr1$H2GihkSo$PwBeV8cVWFFQlnAJtvVCQ. # bar:demo , generated by : htpasswd -nb bar demo diff --git a/ansible/roles/nginx/test/test.yml b/ansible/roles/nginx/test/test.yml new file mode 100755 index 0000000000000000000000000000000000000000..47b15ee789feb6b84bb43f893b5bf40e1f2da038 --- /dev/null +++ b/ansible/roles/nginx/test/test.yml @@ -0,0 +1,6 @@ +--- +- hosts: "{{hosts_group|default('all')}}" + vars_files: + - 'example-vars.yml' + roles: + - "{{role_name|default('nginx')}}" diff --git a/ansible/roles/nginx/vars/main.yml b/ansible/roles/nginx/vars/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..6644d64bc9e4024e5d3fb12b7984d58ff5ce2da5 --- /dev/null +++ b/ansible/roles/nginx/vars/main.yml @@ -0,0 +1,17 @@ +--- +nginx_env: + RUNLEVEL: 1 + +nginx_installation_types_using_service: ["packages", "configuration-only"] + +nginx_is_el: "{{ ansible_distribution in ['RedHat', 'CentOS'] }}" + +nginx_http_default_params: + - sendfile "on" + - tcp_nopush "on" + - tcp_nodelay "on" + - keepalive_timeout "65" + - access_log "{{nginx_log_dir}}/access.log" + - "error_log {{nginx_log_dir}}/error.log {{nginx_error_log_level}}" + - server_tokens off + - types_hash_max_size 2048 diff --git a/ansible/roles/openjdk/tasks/main.yml b/ansible/roles/openjdk/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..48d79457e2935e3a72cfdc4dc2d4ab4262185169 --- /dev/null +++ b/ansible/roles/openjdk/tasks/main.yml @@ -0,0 +1,11 @@ +- name: installing repo for open jdk in Ubuntu + apt_repository: repo='ppa:openjdk-r/ppa' + +- name: ensure oracle jdk 8 is absent + apt: name='oracle-java8-installer' state=absent + +- name: ensure oracle jdk 8 set default is absent + apt: name='oracle-java8-set-default' state=absent + +- name: ensure openjdk 8 is present + apt: name='openjdk-8-jdk' state=present \ No newline at end of file diff --git a/ansible/roles/ops-docker-gc/tasks/main.yml b/ansible/roles/ops-docker-gc/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..9b90c5e9343a0a731fc8bbdb9cba2bb3d821a1fe --- /dev/null +++ b/ansible/roles/ops-docker-gc/tasks/main.yml @@ -0,0 +1,2 @@ +- name: Docker GC + shell: "docker run --rm -e "GRACE_PERIOD_SECONDS=3600" -v /var/run/docker.sock:/var/run/docker.sock -v /etc:/etc:ro spotify/docker-gc" \ No newline at end of file diff --git a/ansible/roles/ops-docker-gc/tasks/monitor-config/files/alertrules.nodes b/ansible/roles/ops-docker-gc/tasks/monitor-config/files/alertrules.nodes new file mode 100644 index 0000000000000000000000000000000000000000..832b2151569e847ee6333fd7b940d66f15bf3072 --- /dev/null +++ b/ansible/roles/ops-docker-gc/tasks/monitor-config/files/alertrules.nodes @@ -0,0 +1,39 @@ +ALERT high_cpu_usage_on_node + IF sum(rate(process_cpu_seconds_total[5m])) by (instance) * 100 > 70 + FOR 5m + ANNOTATIONS { + summary = "HIGH CPU USAGE WARNING ON '{{ $labels.instance }}'", + description = "{{ $labels.instance }} ({{ $labels.host }}) is using a LOT of CPU. CPU usage is {{ humanize $value}}%.", + } + +ALERT high_memory_usage_on_node + IF ((node_memory_MemTotal-node_memory_MemAvailable)/node_memory_MemTotal)*100 > 80 + FOR 5m + ANNOTATIONS { + summary = "HIGH MEMORY USAGE WARNING TASK ON '{{ $labels.instance }}'", + description = "{{ $labels.instance }} ({{ $labels.host }}) is using a LOT of MEMORY. MEMORY usage is over {{ humanize $value}}%.", + } + +ALERT high_la_usage_on_node + IF node_load5 > 5 + FOR 5m + ANNOTATIONS { + summary = "HIGH LOAD AVERAGE WARNING ON '{{ $labels.instance }}'", + description = "{{ $labels.instance }} ({{ $labels.host }}) has a high load average. CPU usage is {{ humanize $value}}%.", + } + +ALERT monitoring_service_down + IF up == 0 + FOR 5m + ANNOTATIONS { + summary = "MONITORING SERVICE DOWN WARNING: NODE '{{ $labels.host }}'", + description = "The monitoring service '{{ $labels.job }}' is down.", + } + +ALERT node_running_out_of_disk_space + IF (node_filesystem_size{fstype="aufs", mountpoint="/"} - node_filesystem_free{fstype="aufs", mountpoint="/"}) * 100/ node_filesystem_size{fstype="aufs", mountpoint="/"} > 80 + FOR 5m + ANNOTATIONS { + summary = "LOW DISK SPACE WARING: NODE '{{ $labels.instance }}' ", + description = "More than 80% of disk used. Disk usage {{ humanize $value }} GB.", + } diff --git a/ansible/roles/ops-docker-gc/tasks/monitor-config/files/alertrules.task b/ansible/roles/ops-docker-gc/tasks/monitor-config/files/alertrules.task new file mode 100644 index 0000000000000000000000000000000000000000..7bcf9009b67c65ac1ec33c2a913ddf227f49ede5 --- /dev/null +++ b/ansible/roles/ops-docker-gc/tasks/monitor-config/files/alertrules.task @@ -0,0 +1,15 @@ +ALERT high_cpu_usage_on_container + IF sum(rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[1m])) by (container_label_com_docker_swarm_task_name,instance) * 100 > 10 + FOR 5m + ANNOTATIONS { + summary = "HIGH CPU USAGE WARNING: TASK '{{ $labels.container_label_com_docker_swarm_task_name }}' on '{{ $labels.instance }}'", + description = "{{ $labels.container_label_com_docker_swarm_task_name }} is using a LOT of CPU. CPU usage is {{ humanize $value}}%.", + } + +ALERT container_eating_memory + IF sum(container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"}) by (container_label_com_docker_swarm_task_name,instance,container_label_com_docker_swarm_service_name) > 2800000000 + FOR 5m + ANNOTATIONS { + summary = "HIGH MEMORY USAGE WARNING: TASK '{{ $labels.container_label_com_docker_swarm_task_name }}' on '{{ $labels.instance }}'", + description = "{{ $labels.container_label_com_docker_swarm_service_name }} is eating up a LOT of memory. Memory consumption of {{ $labels.container_label_com_docker_swarm_service_name }} is at {{ humanize $value}}.", + } diff --git a/ansible/roles/ops-docker-gc/tasks/monitor-config/tasks/main.yml b/ansible/roles/ops-docker-gc/tasks/monitor-config/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..306a09e42d63a7887205da59aac25d192c9521f1 --- /dev/null +++ b/ansible/roles/ops-docker-gc/tasks/monitor-config/tasks/main.yml @@ -0,0 +1,60 @@ +--- +- include_vars: secrets/{{env}}.yml + +- name: Ensure dockerdata directory exists + file: + path: /var/dockerdata + state: directory + owner: root + group: root + +- name: Ensure alertmanager directory exists + file: + path: /var/dockerdata/alertmanager + state: directory + owner: root + group: root + +- name: Ensure alertmanager data directory exists + file: + path: /var/dockerdata/alertmanager/data + state: directory + owner: root + group: root + +- name: Save alertmanager config file + template: src=alertmanagerconfig.yml dest=/var/dockerdata/alertmanager/alertmanagerconfig.yml mode=0644 + +- name: Ensure grafana directory exists + file: + path: /var/dockerdata/grafana + state: directory + owner: root + group: root + +- name: Ensure prometheus directory exists + file: + path: /var/dockerdata/prometheus + state: directory + owner: root + group: root + +- name: Ensure prometheus rules directory exists + file: + path: /var/dockerdata/prometheus/rules + state: directory + owner: root + group: root + +- name: Save node config file + copy: src=alertrules.nodes dest=/var/dockerdata/prometheus/rules/alertrules.nodes mode=0644 + +- name: Save container config file + copy: src=alertrules.task dest=/var/dockerdata/prometheus/rules/alertrules.task mode=0644 + +- name: Ensure prometheus data directory exists + file: + path: /var/dockerdata/prometheus/data + state: directory + owner: root + group: root \ No newline at end of file diff --git a/ansible/roles/ops-docker-gc/tasks/monitor-config/templates/alertmanagerconfig.yml b/ansible/roles/ops-docker-gc/tasks/monitor-config/templates/alertmanagerconfig.yml new file mode 100644 index 0000000000000000000000000000000000000000..507f1a2fc572f31332710dcb3a498fdfa39fa630 --- /dev/null +++ b/ansible/roles/ops-docker-gc/tasks/monitor-config/templates/alertmanagerconfig.yml @@ -0,0 +1,20 @@ +global: + resolve_timeout: 5m + +route: + receiver: 'slack' + repeat_interval: 15m + group_interval: 5m + group_wait: 1m + routes: + - receiver: 'slack' + +receivers: + - name: 'slack' + slack_configs: + - send_resolved: true + api_url: "{{slack_url}}" + username: 'Prometheus - Alerter' + channel: 'monitor_alert' + text: 'Instances: {{ '{{' }} range .Alerts {{ '}}' }}{{ '{{' }} .Labels.instance {{ '}}' }}{{ '{{' }} end {{ '}}' }}' + icon_emoji: ':dart:' diff --git a/ansible/roles/ops-docker-gc/tasks/monitor-config/templates/prometheus.yml b/ansible/roles/ops-docker-gc/tasks/monitor-config/templates/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..58dc612871462f0da009bf8453f41a34f82946cd --- /dev/null +++ b/ansible/roles/ops-docker-gc/tasks/monitor-config/templates/prometheus.yml @@ -0,0 +1,31 @@ +global: + scrape_interval: 5s + evaluation_interval: 15s + external_labels: + monitor: 'Prometheus-Monitor' + +rule_files: + - '/etc/prometheus-rules/alertrules.nodes' + - '/etc/prometheus-rules/alertrules.task' + +scrape_configs: + - job_name: 'cadvisor' + dns_sd_configs: + - names: + - 'tasks.cadvisor' + type: 'A' + port: 8080 + + - job_name: 'node-exporter' + dns_sd_configs: + - names: + - 'tasks.node-exporter' + type: 'A' + port: 9100 + + - job_name: 'alertmanager' + dns_sd_configs: + - names: + - 'tasks.alertmanager' + type: 'A' + port: 9093 diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..1c2b49a0a92cc5cc2bf52d092e450c69296f5dc2 --- /dev/null +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -0,0 +1,6 @@ +postgresql_backup_dir: /tmp/postgresql-backup +postgresql_user: postgres +postgresql_backup_azure_container_name: postgresql-backup + +# Set these vars per environment as show in example below +# postgresql_backup_azure_storage_account_name: ntpbackupsstaging diff --git a/ansible/roles/postgresql-backup/meta/main.yml b/ansible/roles/postgresql-backup/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..23b18a800a4645387a83ac0873b6f893d62c081d --- /dev/null +++ b/ansible/roles/postgresql-backup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - azure-cli \ No newline at end of file diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..9b11fd4514594d8e3ef7db87263ff3a089402cfb --- /dev/null +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -0,0 +1,32 @@ +- name: ensure backup dir exists + file: path="{{ postgresql_backup_dir }}" state=directory owner={{ postgresql_user }} group={{ postgresql_user }} + +- set_fact: + postgresql_backup_gzip_file_name: "postgresql_backup_{{ lookup('pipe', 'date +%Z-%Y-%m-%d-%H-%M-%S') }}.sql.gz" + +- set_fact: + postgresql_backup_gzip_file_path: "{{ postgresql_backup_dir }}/{{ postgresql_backup_gzip_file_name }}" + +- name: Save backup + command: bash -lc "pg_dumpall | gzip > {{ postgresql_backup_gzip_file_path }}" + become_user: "{{ postgresql_user }}" + async: 3600 + poll: 10 + +- name: Ensure azure blob storage container exists + command: az storage container create --name {{ postgresql_backup_azure_container_name }} + ignore_errors: true + environment: + AZURE_STORAGE_ACCOUNT: "{{ postgresql_backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ postgresql_backup_azure_storage_access_key }}" + +- name: Upload to azure blob storage + command: az storage blob upload --name {{ postgresql_backup_gzip_file_name }} --file {{ postgresql_backup_gzip_file_path }} --container-name {{ postgresql_backup_azure_container_name }} + environment: + AZURE_STORAGE_ACCOUNT: "{{ postgresql_backup_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ postgresql_backup_azure_storage_access_key }}" + async: 3600 + poll: 10 + +- name: clean up backup dir after upload + file: path="{{ postgresql_backup_dir }}" state=absent \ No newline at end of file diff --git a/ansible/roles/postgresql-data-update/tasks/main.yml b/ansible/roles/postgresql-data-update/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..df6548f80d5e49e201ec9bfee1fc73b08e940d21 --- /dev/null +++ b/ansible/roles/postgresql-data-update/tasks/main.yml @@ -0,0 +1,11 @@ +- name: Copy the templates + become: yes + template: src={{item}} dest=/tmp/{{item}} + with_items: + - tables_postgres.sql + +- name: Run the postgresql command + become: yes + environment: + PGPASSWORD: "{{ application_postgres_password }}" + command: "psql -h 127.0.0.1 -U {{application_postgres_user}} -d {{application_postgres_database}} -a -f /tmp/tables_postgres.sql" \ No newline at end of file diff --git a/ansible/roles/postgresql-data-update/templates/tables_postgres.sql b/ansible/roles/postgresql-data-update/templates/tables_postgres.sql new file mode 100644 index 0000000000000000000000000000000000000000..b72eaf5d2453ec6814f74b40f9980626d3f61755 --- /dev/null +++ b/ansible/roles/postgresql-data-update/templates/tables_postgres.sql @@ -0,0 +1,193 @@ +tables_postgres.sql +9072cfb 7 days ago + manzarul.haque issue #485 feat:quartz chnages +0 contributors +RawBlameHistory +188 lines (169 sloc) 6.54 KB +-- Thanks to Patrick Lightbody for submitting this... +-- +-- In your Quartz properties file, you'll need to set +-- org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate + +drop table qrtz_fired_triggers; +DROP TABLE QRTZ_PAUSED_TRIGGER_GRPS; +DROP TABLE QRTZ_SCHEDULER_STATE; +DROP TABLE QRTZ_LOCKS; +drop table qrtz_simple_triggers; +drop table qrtz_cron_triggers; +drop table qrtz_simprop_triggers; +DROP TABLE QRTZ_BLOB_TRIGGERS; +drop table qrtz_triggers; +drop table qrtz_job_details; +drop table qrtz_calendars; + +CREATE TABLE qrtz_job_details + ( + SCHED_NAME VARCHAR(120) NOT NULL, + JOB_NAME VARCHAR(200) NOT NULL, + JOB_GROUP VARCHAR(200) NOT NULL, + DESCRIPTION VARCHAR(250) NULL, + JOB_CLASS_NAME VARCHAR(250) NOT NULL, + IS_DURABLE BOOL NOT NULL, + IS_NONCONCURRENT BOOL NOT NULL, + IS_UPDATE_DATA BOOL NOT NULL, + REQUESTS_RECOVERY BOOL NOT NULL, + JOB_DATA BYTEA NULL, + PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) +); + +CREATE TABLE qrtz_triggers + ( + SCHED_NAME VARCHAR(120) NOT NULL, + TRIGGER_NAME VARCHAR(200) NOT NULL, + TRIGGER_GROUP VARCHAR(200) NOT NULL, + JOB_NAME VARCHAR(200) NOT NULL, + JOB_GROUP VARCHAR(200) NOT NULL, + DESCRIPTION VARCHAR(250) NULL, + NEXT_FIRE_TIME BIGINT NULL, + PREV_FIRE_TIME BIGINT NULL, + PRIORITY INTEGER NULL, + TRIGGER_STATE VARCHAR(16) NOT NULL, + TRIGGER_TYPE VARCHAR(8) NOT NULL, + START_TIME BIGINT NOT NULL, + END_TIME BIGINT NULL, + CALENDAR_NAME VARCHAR(200) NULL, + MISFIRE_INSTR SMALLINT NULL, + JOB_DATA BYTEA NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), + FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) + REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP) +); + +CREATE TABLE qrtz_simple_triggers + ( + SCHED_NAME VARCHAR(120) NOT NULL, + TRIGGER_NAME VARCHAR(200) NOT NULL, + TRIGGER_GROUP VARCHAR(200) NOT NULL, + REPEAT_COUNT BIGINT NOT NULL, + REPEAT_INTERVAL BIGINT NOT NULL, + TIMES_TRIGGERED BIGINT NOT NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), + FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) + REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) +); + +CREATE TABLE qrtz_cron_triggers + ( + SCHED_NAME VARCHAR(120) NOT NULL, + TRIGGER_NAME VARCHAR(200) NOT NULL, + TRIGGER_GROUP VARCHAR(200) NOT NULL, + CRON_EXPRESSION VARCHAR(120) NOT NULL, + TIME_ZONE_ID VARCHAR(80), + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), + FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) + REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) +); + +CREATE TABLE qrtz_simprop_triggers + ( + SCHED_NAME VARCHAR(120) NOT NULL, + TRIGGER_NAME VARCHAR(200) NOT NULL, + TRIGGER_GROUP VARCHAR(200) NOT NULL, + STR_PROP_1 VARCHAR(512) NULL, + STR_PROP_2 VARCHAR(512) NULL, + STR_PROP_3 VARCHAR(512) NULL, + INT_PROP_1 INT NULL, + INT_PROP_2 INT NULL, + LONG_PROP_1 BIGINT NULL, + LONG_PROP_2 BIGINT NULL, + DEC_PROP_1 NUMERIC(13,4) NULL, + DEC_PROP_2 NUMERIC(13,4) NULL, + BOOL_PROP_1 BOOL NULL, + BOOL_PROP_2 BOOL NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), + FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) + REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) +); + +CREATE TABLE qrtz_blob_triggers + ( + SCHED_NAME VARCHAR(120) NOT NULL, + TRIGGER_NAME VARCHAR(200) NOT NULL, + TRIGGER_GROUP VARCHAR(200) NOT NULL, + BLOB_DATA BYTEA NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), + FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) + REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) +); + +CREATE TABLE qrtz_calendars + ( + SCHED_NAME VARCHAR(120) NOT NULL, + CALENDAR_NAME VARCHAR(200) NOT NULL, + CALENDAR BYTEA NOT NULL, + PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) +); + + +CREATE TABLE qrtz_paused_trigger_grps + ( + SCHED_NAME VARCHAR(120) NOT NULL, + TRIGGER_GROUP VARCHAR(200) NOT NULL, + PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) +); + +CREATE TABLE qrtz_fired_triggers + ( + SCHED_NAME VARCHAR(120) NOT NULL, + ENTRY_ID VARCHAR(95) NOT NULL, + TRIGGER_NAME VARCHAR(200) NOT NULL, + TRIGGER_GROUP VARCHAR(200) NOT NULL, + INSTANCE_NAME VARCHAR(200) NOT NULL, + FIRED_TIME BIGINT NOT NULL, + SCHED_TIME BIGINT NOT NULL, + PRIORITY INTEGER NOT NULL, + STATE VARCHAR(16) NOT NULL, + JOB_NAME VARCHAR(200) NULL, + JOB_GROUP VARCHAR(200) NULL, + IS_NONCONCURRENT BOOL NULL, + REQUESTS_RECOVERY BOOL NULL, + PRIMARY KEY (SCHED_NAME,ENTRY_ID) +); + +CREATE TABLE qrtz_scheduler_state + ( + SCHED_NAME VARCHAR(120) NOT NULL, + INSTANCE_NAME VARCHAR(200) NOT NULL, + LAST_CHECKIN_TIME BIGINT NOT NULL, + CHECKIN_INTERVAL BIGINT NOT NULL, + PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) +); + +CREATE TABLE qrtz_locks + ( + SCHED_NAME VARCHAR(120) NOT NULL, + LOCK_NAME VARCHAR(40) NOT NULL, + PRIMARY KEY (SCHED_NAME,LOCK_NAME) +); + +create index idx_qrtz_j_req_recovery on qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY); +create index idx_qrtz_j_grp on qrtz_job_details(SCHED_NAME,JOB_GROUP); + +create index idx_qrtz_t_j on qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP); +create index idx_qrtz_t_jg on qrtz_triggers(SCHED_NAME,JOB_GROUP); +create index idx_qrtz_t_c on qrtz_triggers(SCHED_NAME,CALENDAR_NAME); +create index idx_qrtz_t_g on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP); +create index idx_qrtz_t_state on qrtz_triggers(SCHED_NAME,TRIGGER_STATE); +create index idx_qrtz_t_n_state on qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); +create index idx_qrtz_t_n_g_state on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); +create index idx_qrtz_t_next_fire_time on qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME); +create index idx_qrtz_t_nft_st on qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); +create index idx_qrtz_t_nft_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); +create index idx_qrtz_t_nft_st_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); +create index idx_qrtz_t_nft_st_misfire_grp on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); + +create index idx_qrtz_ft_trig_inst_name on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME); +create index idx_qrtz_ft_inst_job_req_rcvry on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); +create index idx_qrtz_ft_j_g on qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP); +create index idx_qrtz_ft_jg on qrtz_fired_triggers(SCHED_NAME,JOB_GROUP); +create index idx_qrtz_ft_t_g on qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); +create index idx_qrtz_ft_tg on qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP); + + +commit; diff --git a/ansible/roles/postgresql-master/defaults/main.yml b/ansible/roles/postgresql-master/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..8fe5e719ebc23618de9af5929c46b4783f15364e --- /dev/null +++ b/ansible/roles/postgresql-master/defaults/main.yml @@ -0,0 +1 @@ +postgresql_archive_dir: "{{ postgresql_data_dir }}/archive" diff --git a/ansible/roles/postgresql-master/meta/main.yml b/ansible/roles/postgresql-master/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..37464275447e599ccd30d0323ca3beab1c815be3 --- /dev/null +++ b/ansible/roles/postgresql-master/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - postgresql \ No newline at end of file diff --git a/ansible/roles/postgresql-master/tasks/main.yml b/ansible/roles/postgresql-master/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..74bb60b13839bc385d46383f32b9a04e29c1c51f --- /dev/null +++ b/ansible/roles/postgresql-master/tasks/main.yml @@ -0,0 +1,2 @@ +- name: Ensure archive directory exists + file: path={{ postgresql_archive_dir }} state=directory owner={{ postgresql_user }} group={{ postgresql_group }} \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/defaults/main.yml b/ansible/roles/postgresql-restore/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..0c6b17f851ee1adbabbf7dd4010c6d1486b945a9 --- /dev/null +++ b/ansible/roles/postgresql-restore/defaults/main.yml @@ -0,0 +1,12 @@ +postgresql_restore_dir: /tmp/postgresql-restore +postgresql_user: postgres +postgresql_port: 5432 +postgresql_cluster_version: 9.5 +postgresql_cluster_name: main +postgresql_restore_azure_container_name: postgresql-backup + +# Set these vars per environment as show in example below +# postgresql_restore_azure_storage_account_name: ntpbackupsstaging + +# Pass the parameter +# postgresql_restore_gzip_file_name: <postgresql_restore_gzip_file_name> diff --git a/ansible/roles/postgresql-restore/meta/main.yml b/ansible/roles/postgresql-restore/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..23b18a800a4645387a83ac0873b6f893d62c081d --- /dev/null +++ b/ansible/roles/postgresql-restore/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - azure-cli \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..47f9aa0f053c25f0dd8b3d0b6bc9f68e2a545476 --- /dev/null +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -0,0 +1,40 @@ +- name: ensure restore dir exists + file: path="{{ postgresql_restore_dir }}" state=directory owner={{ postgresql_user }} group={{ postgresql_user }} + +- set_fact: + postgresql_restore_gzip_file_path: "{{ postgresql_restore_dir }}/{{ postgresql_restore_gzip_file_name }}" + +- name: Download restore file from azure + command: az storage blob download --container-name {{ postgresql_restore_azure_container_name }} --name {{ postgresql_restore_gzip_file_name }} --file {{ postgresql_restore_gzip_file_path }} + environment: + AZURE_STORAGE_ACCOUNT: "{{ postgresql_restore_azure_storage_account_name }}" + AZURE_STORAGE_KEY: "{{ postgresql_restore_azure_storage_access_key }}" + async: 3600 + poll: 10 + +- name: ensure postgresql service is stopped + service: name=postgresql state=stopped + +- name: wait for postgresql to be stopped + wait_for: port={{ postgresql_port }} state=stopped + +- name: drop cluster + command: pg_dropcluster {{ postgresql_cluster_version }} {{ postgresql_cluster_name }} + become_user: "{{ postgresql_user }}" + ignore_errors: true + +- name: create cluster + command: pg_createcluster {{ postgresql_cluster_version }} {{ postgresql_cluster_name }} + become_user: "{{ postgresql_user }}" + +- name: ensure postgresql service is started + service: name=postgresql state=started + +- name: wait for postgresql to be started + wait_for: port={{ postgresql_port }} state=started + +- name: restore backup file + command: bash -lc "gunzip < {{ postgresql_restore_gzip_file_path }} | psql" + async: 3600 + poll: 10 + become_user: "{{ postgresql_user }}" diff --git a/ansible/roles/postgresql-slave-to-master-promotion/defaults/main.yml b/ansible/roles/postgresql-slave-to-master-promotion/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..ee87fc003a7947d730f1db2ebbbabd994a082a47 --- /dev/null +++ b/ansible/roles/postgresql-slave-to-master-promotion/defaults/main.yml @@ -0,0 +1,3 @@ +postgresql_user: postgres +postgresql_port: 5432 +postgres_slave_to_master_trigger_file: /tmp/postgresql.trigger.{{ postgresql_port }} \ No newline at end of file diff --git a/ansible/roles/postgresql-slave-to-master-promotion/tasks/main.yml b/ansible/roles/postgresql-slave-to-master-promotion/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..b3cd77a83b4ae09dc1d3896b19515b898301ecf0 --- /dev/null +++ b/ansible/roles/postgresql-slave-to-master-promotion/tasks/main.yml @@ -0,0 +1,2 @@ +- name: ensure trigger file exists + file: path="{{ postgres_slave_to_master_trigger_file }}" state=touch owner={{ postgresql_user }} group={{ postgresql_user }} diff --git a/ansible/roles/postgresql-slave/defaults/main.yml b/ansible/roles/postgresql-slave/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..61b33903c2f77898d3a6cdaac1817118e74db30e --- /dev/null +++ b/ansible/roles/postgresql-slave/defaults/main.yml @@ -0,0 +1,6 @@ +postgresql_master_host: "{{ groups['postgresql-master'][0] }}" +postgresql_port: 5432 +postgres_replication_user_name: replication +postgres_replication_user_password: "{{ vault_postgres_replication_user_password }}" +postgres_slave_to_master_trigger_file: /tmp/postgresql.trigger.{{ postgresql_port }} +postgresql_archive_dir: "{{ postgresql_data_dir }}/archive" diff --git a/ansible/roles/postgresql-slave/meta/main.yml b/ansible/roles/postgresql-slave/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..37464275447e599ccd30d0323ca3beab1c815be3 --- /dev/null +++ b/ansible/roles/postgresql-slave/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - postgresql \ No newline at end of file diff --git a/ansible/roles/postgresql-slave/tasks/main.yml b/ansible/roles/postgresql-slave/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..a9463c90916590c8c4f271ead1ab40cd420438f9 --- /dev/null +++ b/ansible/roles/postgresql-slave/tasks/main.yml @@ -0,0 +1,25 @@ +- name: Stop postgresql service + service: name={{ postgresql_daemon }} state=stopped + +- set_fact: + postgresql_data_dir_backup_path: "{{ postgresql_data_dir }}-old-{{ lookup('pipe', 'date +%Z-%Y-%m-%d-%H-%M-%S') }}" + +- name: Move the old data directory to a backup location + command: mv {{ postgresql_data_dir }} {{ postgresql_data_dir_backup_path }} + +- name: Backup data from master + command: pg_basebackup -h {{ postgresql_master_host }} -D {{ postgresql_data_dir }} -U {{ postgres_replication_user_name }} -P --xlog-method=stream + environment: + PGPASSWORD: "{{ postgres_replication_user_password }}" + become_user: "{{ postgresql_user }}" + async: 3600 + poll: 10 + +- name: ensure trigger file does not exist + file: path="{{ postgres_slave_to_master_trigger_file }}" state=absent + +- name: Add recovery.conf + template: src=recovery.conf dest={{ postgresql_data_dir }}/recovery.conf owner={{ postgresql_user }} mode=0644 + +- name: Start postgresql service + service: name={{ postgresql_daemon }} state=started \ No newline at end of file diff --git a/ansible/roles/postgresql-slave/templates/recovery.conf b/ansible/roles/postgresql-slave/templates/recovery.conf new file mode 100644 index 0000000000000000000000000000000000000000..39be7e21572b8bb7c814c2f9c901e47928f70cde --- /dev/null +++ b/ansible/roles/postgresql-slave/templates/recovery.conf @@ -0,0 +1,18 @@ +# Specifies whether to start the server as a standby. In streaming replication, +# this parameter must to be set to on. +standby_mode = 'on' + +# Specifies a connection string which is used for the standby server to connect +# with the primary. +primary_conninfo = 'host={{ postgresql_master_host }} port={{ postgresql_port }} user={{ postgres_replication_user_name }} password={{ postgres_replication_user_password }}' + +# Specifies a trigger file whose presence should cause streaming replication to +# end (i.e., failover). +trigger_file = '{{ postgres_slave_to_master_trigger_file }}' + +# Specifies a command to load archive segments from the WAL archive. If +# wal_keep_segments is a high enough number to retain the WAL segments +# required for the standby server, this may not be necessary. But +# a large workload can cause segments to be recycled before the standby +# is fully synchronized, requiring you to start again from a new base backup. +restore_command = 'cp {{ postgresql_archive_dir }}/%f "%p"' \ No newline at end of file diff --git a/ansible/roles/postgresql/.gitignore b/ansible/roles/postgresql/.gitignore new file mode 100755 index 0000000000000000000000000000000000000000..c9b2377e32b550d3275298fa99782276b3f61f94 --- /dev/null +++ b/ansible/roles/postgresql/.gitignore @@ -0,0 +1,2 @@ +*.retry +tests/test.sh diff --git a/ansible/roles/postgresql/.travis.yml b/ansible/roles/postgresql/.travis.yml new file mode 100755 index 0000000000000000000000000000000000000000..b1978d1986193b9e9f054b18ed16179141efc3a1 --- /dev/null +++ b/ansible/roles/postgresql/.travis.yml @@ -0,0 +1,45 @@ +--- +services: docker + +env: + - distro: centos7 + postgresql_bin_dir: /usr/bin + postgresql_data_dir: /var/lib/pgsql/data + - distro: centos6 + postgresql_bin_dir: /usr/bin + postgresql_data_dir: /var/lib/pgsql/data + - distro: ubuntu1604 + postgresql_bin_dir: /usr/lib/postgresql/9.5/bin + postgresql_data_dir: /var/lib/postgresql/9.5/main + - distro: ubuntu1404 + postgresql_bin_dir: /usr/lib/postgresql/9.3/bin + postgresql_data_dir: /var/lib/postgresql/9.3/main + - distro: debian8 + postgresql_bin_dir: /usr/lib/postgresql/9.4/bin + postgresql_data_dir: /var/lib/postgresql/9.4/main + - distro: debian9 + postgresql_bin_dir: /usr/lib/postgresql/9.6/bin + postgresql_data_dir: /var/lib/postgresql/9.6/main + +script: + # Configure test script so we can run extra tests after playbook is run. + - export container_id=$(date +%s) + - export cleanup=false + + # Download test shim. + - wget -O ${PWD}/tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/ + - chmod +x ${PWD}/tests/test.sh + + # Run tests. + - ${PWD}/tests/test.sh + + # Check PostgreSQL status. + - 'docker exec -u postgres ${container_id} ${postgresql_bin_dir}/pg_ctl -D ${postgresql_data_dir} status' + +after_failure: + # Check what happened on systemd systems. + - 'docker exec --tty ${container_id} env TERM=xterm systemctl -l status postgresql.service' + - 'docker exec --tty ${container_id} env TERM=xterm journalctl -xe --no-pager' + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/ansible/roles/postgresql/LICENSE b/ansible/roles/postgresql/LICENSE new file mode 100755 index 0000000000000000000000000000000000000000..4275cf3c10aae9c3992998fbf54f90bae9615960 --- /dev/null +++ b/ansible/roles/postgresql/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Jeff Geerling + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/ansible/roles/postgresql/README.md b/ansible/roles/postgresql/README.md new file mode 100755 index 0000000000000000000000000000000000000000..04580d70811c0083ca353071abd2a3251446c022 --- /dev/null +++ b/ansible/roles/postgresql/README.md @@ -0,0 +1,134 @@ +# Ansible Role: PostgreSQL + +[](https://travis-ci.org/geerlingguy/ansible-role-postgresql) + +Installs and configures PostgreSQL server on RHEL/CentOS or Debian/Ubuntu servers. + +## Requirements + +No special requirements; note that this role requires root access, so either run it in a playbook with a global `become: yes`, or invoke the role in your playbook like: + + - hosts: database + roles: + - role: geerlingguy.postgresql + become: yes + +## Role Variables + +Available variables are listed below, along with default values (see `defaults/main.yml`): + + postgresql_enablerepo: "" + +(RHEL/CentOS only) You can set a repo to use for the PostgreSQL installation by passing it in here. + + postgresql_python_library: python-psycopg2 + +Library used by Ansible to communicate with PostgreSQL. If you are using Python 3 (e.g. set via `ansible_python_interpreter`), you should change this to `python3-psycopg2`. + + postgresql_user: postgres + postgresql_group: postgres + +The user and group under which PostgreSQL will run. + + postgresql_unix_socket_directories: + - /var/run/postgresql + +The directories (usually one, but can be multiple) where PostgreSQL's socket will be created. + + postgresql_global_config_options: + - option: unix_socket_directories + value: '{{ postgresql_unix_socket_directories | join(",") }}' + +Global configuration options that will be set in `postgresql.conf`. Note that for RHEL/CentOS 6 (or very old versions of PostgreSQL), you need to at least override this variable and set the `option` to `unix_socket_directory`. + + postgresql_hba_entries: + - { type: local, database: all, user: postgres, auth_method: peer } + - { type: local, database: all, user: all, auth_method: peer } + - { type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5 } + - { type: host, database: all, user: all, address: '::1/128', auth_method: md5 } + +Configure [host based authentication](https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html) entries to be set in the `pg_hba.conf`. Options for entries include: + + - `type` (required) + - `database` (required) + - `user` (required) + - `address` (one of this or the following two are required) + - `ip_address` + - `ip_mask` + - `auth_method` (required) + - `auth_options` (optional) + +If overriding, make sure you copy all of the existing entries from `defaults/main.yml` if you need to preserve existing entries. + + postgresql_locales: + - 'en_US.UTF-8' + +(Debian/Ubuntu only) Used to generate the locales used by PostgreSQL databases. + + postgresql_databases: + - name: exampledb # required; the rest are optional + lc_collate: # defaults to 'en_US.UTF-8' + lc_ctype: # defaults to 'en_US.UTF-8' + encoding: # defaults to 'UTF-8' + template: # defaults to 'template0' + login_host: # defaults to 'localhost' + login_password: # defaults to not set + login_user: # defaults to 'postgresql_user' + login_unix_socket: # defaults to 1st of postgresql_unix_socket_directories + port: # defaults to not set + state: # defaults to 'present' + +A list of databases to ensure exist on the server. Only the `name` is required; all other properties are optional. + + postgresql_users: + - name: jdoe #required; the rest are optional + password: # defaults to not set + priv: # defaults to not set + role_attr_flags: # defaults to not set + db: # defaults to not set + login_host: # defaults to 'localhost' + login_password: # defaults to not set + login_user: # defaults to '{{ postgresql_user }}' + login_unix_socket: # defaults to 1st of postgresql_unix_socket_directories + port: # defaults to not set + state: # defaults to 'present' + +A list of users to ensure exist on the server. Only the `name` is required; all other properties are optional. + + postgresql_version: [OS-specific] + postgresql_data_dir: [OS-specific] + postgresql_bin_path: [OS-specific] + postgresql_config_path: [OS-specific] + postgresql_daemon: [OS-specific] + postgresql_packages: [OS-specific] + +OS-specific variables that are set by include files in this role's `vars` directory. These shouldn't be overridden unless you're using a verison of PostgreSQL that wasn't installed using system packages. + +## Dependencies + +None. + +## Example Playbook + + - hosts: database + become: yes + vars_files: + - vars/main.yml + roles: + - geerlingguy.postgresql + +*Inside `vars/main.yml`*: + + postgresql_databases: + - name: example_db + postgresql_users: + - name: example_user + password: supersecure + +## License + +MIT / BSD + +## Author Information + +This role was created in 2016 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/). diff --git a/ansible/roles/postgresql/defaults/main.yml b/ansible/roles/postgresql/defaults/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..78e61b22af26c0b2da247773cb8cbb1b1baf626a --- /dev/null +++ b/ansible/roles/postgresql/defaults/main.yml @@ -0,0 +1,57 @@ +--- +# RHEL/CentOS only. Set a repository to use for PostgreSQL installation. +postgresql_enablerepo: "" + +postgresql_python_library: python-psycopg2 +postgresql_user: postgres +postgresql_group: postgres + +postgresql_unix_socket_directories: + - /var/run/postgresql + +# Global configuration options that will be set in postgresql.conf. +postgresql_global_config_options: + - option: unix_socket_directories + value: '{{ postgresql_unix_socket_directories | join(",") }}' + - option: log_destination + value: 'syslog' + +# Host based authentication (hba) entries to be added to the pg_hba.conf. This +# variable's defaults reflect the defaults that come with a fresh installation. +postgresql_hba_entries: + - { type: local, database: all, user: postgres, auth_method: peer } + - { type: local, database: all, user: all, auth_method: peer } + - { type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5 } + - { type: host, database: all, user: all, address: '::1/128', auth_method: md5 } + +# Debian only. Used to generate the locales used by PostgreSQL databases. +postgresql_locales: + - 'en_US.UTF-8' + +# Databases to ensure exist. +postgresql_databases: [] + # - name: exampledb # required; the rest are optional + # lc_collate: # defaults to 'en_US.UTF-8' + # lc_ctype: # defaults to 'en_US.UTF-8' + # encoding: # defaults to 'UTF-8' + # template: # defaults to 'template0' + # login_host: # defaults to 'localhost' + # login_password: # defaults to not set + # login_user: # defaults to '{{ postgresql_user }}' + # login_unix_socket: # defaults to 1st of postgresql_unix_socket_directories + # port: # defaults to not set + # state: # defaults to 'present' + +# Users to ensure exist. +postgresql_users: [] + # - name: jdoe #required; the rest are optional + # password: # defaults to not set + # priv: # defaults to not set + # role_attr_flags: # defaults to not set + # db: # defaults to not set + # login_host: # defaults to 'localhost' + # login_password: # defaults to not set + # login_user: # defaults to '{{ postgresql_user }}' + # login_unix_socket: # defaults to 1st of postgresql_unix_socket_directories + # port: # defaults to not set + # state: # defaults to 'present' diff --git a/ansible/roles/postgresql/handlers/main.yml b/ansible/roles/postgresql/handlers/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..523ad1b17572ec80dd87a62264781bb8b53e92fd --- /dev/null +++ b/ansible/roles/postgresql/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart postgresql + service: "name={{ postgresql_daemon }} state=restarted sleep=5" diff --git a/ansible/roles/postgresql/meta/main.yml b/ansible/roles/postgresql/meta/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..db0b759d63f50425eff5f62a412df7d21efd6d2a --- /dev/null +++ b/ansible/roles/postgresql/meta/main.yml @@ -0,0 +1,25 @@ +--- +dependencies: [] + +galaxy_info: + author: geerlingguy + description: PostgreSQL server for Linux. + company: "Midwestern Mac, LLC" + license: "license (BSD, MIT)" + min_ansible_version: 2.0 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Ubuntu + versions: + - all + - name: Debian + versions: + - all + galaxy_tags: + - database + - postgresql + - postgres + - rdbms diff --git a/ansible/roles/postgresql/tasks/configure.yml b/ansible/roles/postgresql/tasks/configure.yml new file mode 100755 index 0000000000000000000000000000000000000000..d69965b7bea173a0c6ee8bb876bd0d2a1eee8128 --- /dev/null +++ b/ansible/roles/postgresql/tasks/configure.yml @@ -0,0 +1,28 @@ +--- +- name: Configure global settings. + lineinfile: + dest: "{{ postgresql_config_path }}/postgresql.conf" + regexp: "^#?{{ item.option }}.+$" + line: "{{ item.option }} = '{{ item.value }}'" + state: "{{ item.state | default('present') }}" + with_items: "{{ postgresql_global_config_options }}" + notify: restart postgresql + +- name: Configure host based authentication (if entries are configured). + template: + src: "templates/pg_hba.conf.j2" + dest: "{{ postgresql_config_path }}/pg_hba.conf" + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + mode: 0600 + notify: restart postgresql + when: postgresql_hba_entries + +- name: Ensure PostgreSQL unix socket dirs exist. + file: + path: "{{ item }}" + state: directory + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + mode: 02775 + with_items: "{{ postgresql_unix_socket_directories }}" diff --git a/ansible/roles/postgresql/tasks/databases.yml b/ansible/roles/postgresql/tasks/databases.yml new file mode 100755 index 0000000000000000000000000000000000000000..fa782ea9f602de806abac6e6672b8f90356708b2 --- /dev/null +++ b/ansible/roles/postgresql/tasks/databases.yml @@ -0,0 +1,21 @@ +--- +- name: Ensure PostgreSQL databases are present. + postgresql_db: + name: "{{ item.name }}" + lc_collate: "{{ item.lc_collate | default('en_US.UTF-8') }}" + lc_ctype: "{{ item.lc_ctype | default('en_US.UTF-8') }}" + encoding: "{{ item.encoding | default('UTF-8') }}" + template: "{{ item.template | default('template0') }}" + login_host: "{{ item.login_host | default('localhost') }}" + login_password: "{{ item.login_password | default(omit) }}" + login_user: "{{ item.login_user | default(postgresql_user) }}" + login_unix_socket: "{{ item.login_unix_socket | default(postgresql_unix_socket_directories[0]) }}" + port: "{{ item.port | default(omit) }}" + owner: "{{ item.owner | default(postgresql_user) }}" + state: "{{ item.state | default('present') }}" + with_items: "{{ postgresql_databases }}" + become: yes + become_user: "{{ postgresql_user }}" + # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 + vars: + ansible_ssh_pipelining: true diff --git a/ansible/roles/postgresql/tasks/initialize.yml b/ansible/roles/postgresql/tasks/initialize.yml new file mode 100755 index 0000000000000000000000000000000000000000..44ffd9b310487d000e5527c1a235ae3f4a7dc4f5 --- /dev/null +++ b/ansible/roles/postgresql/tasks/initialize.yml @@ -0,0 +1,29 @@ +--- +- name: Set PostgreSQL environment variables. + template: + src: postgres.sh.j2 + dest: /etc/profile.d/postgres.sh + mode: 0644 + notify: restart postgresql + +- name: Ensure PostgreSQL data directory exists. + file: + path: "{{ postgresql_data_dir }}" + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + state: directory + mode: 0700 + +- name: Check if PostgreSQL database is initialized. + stat: + path: "{{ postgresql_data_dir }}/PG_VERSION" + register: pgdata_dir_version + +- name: Ensure PostgreSQL database is initialized. + command: "{{ postgresql_bin_path }}/initdb -D {{ postgresql_data_dir }}" + when: not pgdata_dir_version.stat.exists + become: yes + become_user: "{{ postgresql_user }}" + # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 + vars: + ansible_ssh_pipelining: true diff --git a/ansible/roles/postgresql/tasks/main.yml b/ansible/roles/postgresql/tasks/main.yml new file mode 100755 index 0000000000000000000000000000000000000000..3e51114781d7530def42532acd1488f93f5f5374 --- /dev/null +++ b/ansible/roles/postgresql/tasks/main.yml @@ -0,0 +1,26 @@ +--- +# Variable configuration. +- include: variables.yml + static: no + +# Setup/install tasks. +- include: setup-RedHat.yml + when: ansible_os_family == 'RedHat' + static: no + +- include: setup-Debian.yml + when: ansible_os_family == 'Debian' + static: no + +- include: initialize.yml +- include: configure.yml + +- name: Ensure PostgreSQL is started and enabled on boot. + service: + name: "{{ postgresql_daemon }}" + state: started + enabled: yes + +# Configure PostgreSQL. +- include: databases.yml +- include: users.yml diff --git a/ansible/roles/postgresql/tasks/setup-Debian.yml b/ansible/roles/postgresql/tasks/setup-Debian.yml new file mode 100755 index 0000000000000000000000000000000000000000..fdfcc44f0c6eb49311cbee90be59442bb914976e --- /dev/null +++ b/ansible/roles/postgresql/tasks/setup-Debian.yml @@ -0,0 +1,22 @@ +--- +- name: Ensure PostgreSQL Python libraries are installed. + apt: + name: "{{ postgresql_python_library }}" + state: installed + +- name: Ensure PostgreSQL packages are installed. + apt: + name: "{{ item }}" + state: installed + with_items: "{{ postgresql_packages }}" + +- name: Ensure all configured locales are present. + locale_gen: "name={{ item }} state=present" + with_items: "{{ postgresql_locales }}" + register: locale_gen_result + +- name: Force-restart PostgreSQL after new locales are generated. + service: + name: "{{ postgresql_daemon }}" + state: restarted + when: locale_gen_result.changed diff --git a/ansible/roles/postgresql/tasks/setup-RedHat.yml b/ansible/roles/postgresql/tasks/setup-RedHat.yml new file mode 100755 index 0000000000000000000000000000000000000000..d0ce7c7de2eb082ef9f8ea778fdeda2557f73e43 --- /dev/null +++ b/ansible/roles/postgresql/tasks/setup-RedHat.yml @@ -0,0 +1,13 @@ +--- +- name: Ensure PostgreSQL packages are installed. + package: + name: "{{ item }}" + state: installed + enablerepo: "{{ postgresql_enablerepo }}" + with_items: "{{ postgresql_packages }}" + +- name: Ensure PostgreSQL Python libraries are installed. + package: + name: "{{ postgresql_python_library }}" + state: installed + enablerepo: "{{ postgresql_enablerepo }}" diff --git a/ansible/roles/postgresql/tasks/users.yml b/ansible/roles/postgresql/tasks/users.yml new file mode 100755 index 0000000000000000000000000000000000000000..33e5ebdbba049e54d20d48c12695b279608d01a3 --- /dev/null +++ b/ansible/roles/postgresql/tasks/users.yml @@ -0,0 +1,21 @@ +--- +- name: Ensure PostgreSQL users are present. + postgresql_user: + name: "{{ item.name }}" + password: "{{ item.password | default(omit) }}" + priv: "{{ item.priv | default(omit) }}" + role_attr_flags: "{{ item.role_attr_flags | default(omit) }}" + db: "{{ item.db | default(omit) }}" + login_host: "{{ item.login_host | default('localhost') }}" + login_password: "{{ item.login_password | default(omit) }}" + login_user: "{{ item.login_user | default(postgresql_user) }}" + login_unix_socket: "{{ item.login_unix_socket | default(postgresql_unix_socket_directories[0]) }}" + port: "{{ item.port | default(omit) }}" + state: "{{ item.state | default('present') }}" + with_items: "{{ postgresql_users }}" + no_log: true + become: yes + become_user: "{{ postgresql_user }}" + # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 + vars: + ansible_ssh_pipelining: true diff --git a/ansible/roles/postgresql/tasks/variables.yml b/ansible/roles/postgresql/tasks/variables.yml new file mode 100755 index 0000000000000000000000000000000000000000..530990d7ea5e9f224afa357e3a673a2dfac41a45 --- /dev/null +++ b/ansible/roles/postgresql/tasks/variables.yml @@ -0,0 +1,39 @@ +--- +# Variable configuration. +- name: Include OS-specific variables (Debian). + include_vars: "{{ ansible_distribution }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + when: ansible_os_family == 'Debian' + +- name: Include OS-specific variables (RedHat). + include_vars: "{{ ansible_os_family }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + when: ansible_os_family == 'RedHat' + +- name: Define postgresql_packages. + set_fact: + postgresql_packages: "{{ __postgresql_packages | list }}" + when: postgresql_packages is not defined + +- name: Define postgresql_daemon. + set_fact: + postgresql_daemon: "{{ __postgresql_daemon }}" + when: postgresql_daemon is not defined + +- name: Define postgresql_version. + set_fact: + postgresql_version: "{{ __postgresql_version }}" + when: postgresql_version is not defined + +- name: Define postgresql_data_dir. + set_fact: + postgresql_data_dir: "{{ __postgresql_data_dir }}" + when: postgresql_data_dir is not defined + +- name: Define postgresql_bin_path. + set_fact: + postgresql_bin_path: "{{ __postgresql_bin_path }}" + when: postgresql_bin_path is not defined + +- name: Define postgresql_config_path. + set_fact: + postgresql_config_path: "{{ __postgresql_config_path }}" + when: postgresql_config_path is not defined diff --git a/ansible/roles/postgresql/templates/pg_hba.conf.j2 b/ansible/roles/postgresql/templates/pg_hba.conf.j2 new file mode 100755 index 0000000000000000000000000000000000000000..05cc8a0abe28e6e4e5ca56c182303687df763c85 --- /dev/null +++ b/ansible/roles/postgresql/templates/pg_hba.conf.j2 @@ -0,0 +1,9 @@ +{{ ansible_managed | comment }} +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# See: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + +{% for client in postgresql_hba_entries %} +{{ client.type }} {{ client.database }} {{ client.user }} {{ client.address|default('') }} {{ client.ip_address|default('') }} {{ client.ip_mask|default('') }} {{ client.auth_method }} {{ client.auth_options|default("") }} +{% endfor %} diff --git a/ansible/roles/postgresql/templates/postgres.sh.j2 b/ansible/roles/postgresql/templates/postgres.sh.j2 new file mode 100755 index 0000000000000000000000000000000000000000..72640647defd01d06dfd0c6ff48d2edf8053febf --- /dev/null +++ b/ansible/roles/postgresql/templates/postgres.sh.j2 @@ -0,0 +1,2 @@ +export PGDATA={{ postgresql_data_dir }} +export PATH=$PATH:{{ postgresql_bin_path }} diff --git a/ansible/roles/postgresql/tests/README.md b/ansible/roles/postgresql/tests/README.md new file mode 100755 index 0000000000000000000000000000000000000000..6fb211721f9bee8f01ecbc7b0609dbfbbdda48cb --- /dev/null +++ b/ansible/roles/postgresql/tests/README.md @@ -0,0 +1,11 @@ +# Ansible Role tests + +To run the test playbook(s) in this directory: + + 1. Install and start Docker. + 1. Download the test shim (see .travis.yml file for the URL) into `tests/test.sh`: + - `wget -O tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/` + 1. Make the test shim executable: `chmod +x tests/test.sh`. + 1. Run (from the role root directory) `distro=[distro] playbook=[playbook] ./tests/test.sh` + +If you don't want the container to be automatically deleted after the test playbook is run, add the following environment variables: `cleanup=false container_id=$(date +%s)` diff --git a/ansible/roles/postgresql/tests/test.yml b/ansible/roles/postgresql/tests/test.yml new file mode 100755 index 0000000000000000000000000000000000000000..45dd9b554f2a95da5b29bd1aeba27e8ad4864cbf --- /dev/null +++ b/ansible/roles/postgresql/tests/test.yml @@ -0,0 +1,24 @@ +--- +- hosts: all + + vars: + postgresql_databases: + - name: example + postgresql_users: + - name: jdoe + + pre_tasks: + - name: Update apt cache. + apt: update_cache=yes cache_valid_time=600 + when: ansible_os_family == 'Debian' + + - name: Set custom variables for old CentOS 6 PostgreSQL install. + set_fact: + postgresql_hba_entries: [] + postgresql_global_config_options: + - option: unix_socket_directory + value: '{{ postgresql_unix_socket_directories[0] }}' + when: ansible_os_family == 'RedHat' and ansible_distribution_version.split('.')[0] == '6' + + roles: + - role_under_test diff --git a/ansible/roles/postgresql/vars/Debian-7.yml b/ansible/roles/postgresql/vars/Debian-7.yml new file mode 100755 index 0000000000000000000000000000000000000000..4d3a8ead0b2d886546a5ad159d078cd8a9800548 --- /dev/null +++ b/ansible/roles/postgresql/vars/Debian-7.yml @@ -0,0 +1,10 @@ +--- +__postgresql_version: "9.1" +__postgresql_data_dir: "/var/lib/postgresql/{{ __postgresql_version }}/main" +__postgresql_bin_path: "/usr/lib/postgresql/{{ __postgresql_version }}/bin" +__postgresql_config_path: "/etc/postgresql/{{ __postgresql_version }}/main" +__postgresql_daemon: postgresql +__postgresql_packages: + - postgresql + - postgresql-contrib + - libpq-dev \ No newline at end of file diff --git a/ansible/roles/postgresql/vars/Debian-8.yml b/ansible/roles/postgresql/vars/Debian-8.yml new file mode 100755 index 0000000000000000000000000000000000000000..f6ad232b51f6eced9f52b7f777940f8c67fffc04 --- /dev/null +++ b/ansible/roles/postgresql/vars/Debian-8.yml @@ -0,0 +1,10 @@ +--- +__postgresql_version: "9.4" +__postgresql_data_dir: "/var/lib/postgresql/{{ __postgresql_version }}/main" +__postgresql_bin_path: "/usr/lib/postgresql/{{ __postgresql_version }}/bin" +__postgresql_config_path: "/etc/postgresql/{{ __postgresql_version }}/main" +__postgresql_daemon: postgresql +__postgresql_packages: + - postgresql + - postgresql-contrib + - libpq-dev diff --git a/ansible/roles/postgresql/vars/Debian-9.yml b/ansible/roles/postgresql/vars/Debian-9.yml new file mode 100755 index 0000000000000000000000000000000000000000..7b57d53241143a4cc1e8aa46a4c64c0e8e450179 --- /dev/null +++ b/ansible/roles/postgresql/vars/Debian-9.yml @@ -0,0 +1,10 @@ +--- +__postgresql_version: "9.6" +__postgresql_data_dir: "/var/lib/postgresql/{{ __postgresql_version }}/main" +__postgresql_bin_path: "/usr/lib/postgresql/{{ __postgresql_version }}/bin" +__postgresql_config_path: "/etc/postgresql/{{ __postgresql_version }}/main" +__postgresql_daemon: postgresql +__postgresql_packages: + - postgresql + - postgresql-contrib + - libpq-dev diff --git a/ansible/roles/postgresql/vars/RedHat-6.yml b/ansible/roles/postgresql/vars/RedHat-6.yml new file mode 100755 index 0000000000000000000000000000000000000000..8923c50dd952a19db4ac7686954b300777c36406 --- /dev/null +++ b/ansible/roles/postgresql/vars/RedHat-6.yml @@ -0,0 +1,11 @@ +--- +__postgresql_version: "8.4" +__postgresql_data_dir: "/var/lib/pgsql/data" +__postgresql_bin_path: "/usr/bin" +__postgresql_config_path: "/var/lib/pgsql/data" +__postgresql_daemon: postgresql +__postgresql_packages: + - postgresql + - postgresql-server + - postgresql-contrib + - postgresql-libs diff --git a/ansible/roles/postgresql/vars/RedHat-7.yml b/ansible/roles/postgresql/vars/RedHat-7.yml new file mode 100755 index 0000000000000000000000000000000000000000..1d5c517dff348e2a2e0402c99c2eb8c4594205a6 --- /dev/null +++ b/ansible/roles/postgresql/vars/RedHat-7.yml @@ -0,0 +1,11 @@ +--- +__postgresql_version: "9.2" +__postgresql_data_dir: "/var/lib/pgsql/data" +__postgresql_bin_path: "/usr/bin" +__postgresql_config_path: "/var/lib/pgsql/data" +__postgresql_daemon: postgresql +__postgresql_packages: + - postgresql + - postgresql-server + - postgresql-contrib + - postgresql-libs diff --git a/ansible/roles/postgresql/vars/Ubuntu-14.yml b/ansible/roles/postgresql/vars/Ubuntu-14.yml new file mode 100755 index 0000000000000000000000000000000000000000..bd6c174757c4871325c65b49845b00bf5399fd04 --- /dev/null +++ b/ansible/roles/postgresql/vars/Ubuntu-14.yml @@ -0,0 +1,10 @@ +--- +__postgresql_version: "9.3" +__postgresql_data_dir: "/var/lib/postgresql/{{ __postgresql_version }}/main" +__postgresql_bin_path: "/usr/lib/postgresql/{{ __postgresql_version }}/bin" +__postgresql_config_path: "/etc/postgresql/{{ __postgresql_version }}/main" +__postgresql_daemon: postgresql +__postgresql_packages: + - postgresql + - postgresql-contrib + - libpq-dev diff --git a/ansible/roles/postgresql/vars/Ubuntu-16.yml b/ansible/roles/postgresql/vars/Ubuntu-16.yml new file mode 100755 index 0000000000000000000000000000000000000000..cf2ebb8dd983fbc00a7107f5586dc074ffbecc74 --- /dev/null +++ b/ansible/roles/postgresql/vars/Ubuntu-16.yml @@ -0,0 +1,10 @@ +--- +__postgresql_version: "9.5" +__postgresql_data_dir: "/var/lib/postgresql/{{ __postgresql_version }}/main" +__postgresql_bin_path: "/usr/lib/postgresql/{{ __postgresql_version }}/bin" +__postgresql_config_path: "/etc/postgresql/{{ __postgresql_version }}/main" +__postgresql_daemon: postgresql +__postgresql_packages: + - postgresql + - postgresql-contrib + - libpq-dev diff --git a/ansible/roles/remote-postgresql-db/defaults/main.yml b/ansible/roles/remote-postgresql-db/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..6b03ddba2812ac814990e1fca0b37460602a3282 --- /dev/null +++ b/ansible/roles/remote-postgresql-db/defaults/main.yml @@ -0,0 +1,7 @@ +db_host: localhost +db_port: 5432 +db_admin_user: postgres +db_admin_password: postgres +db_encoding: UTF-8 +db_lc_collate: en_US.UTF-8 +db_lc_ctype: en_US.UTF-8 diff --git a/ansible/roles/remote-postgresql-db/tasks/main.yml b/ansible/roles/remote-postgresql-db/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..e26b25c7c1703245df9a555d23d4d06b9d32036e --- /dev/null +++ b/ansible/roles/remote-postgresql-db/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- name: Ensure database is created + postgresql_db: name={{ db_name }} + login_host={{ db_host }} + port={{ db_port }} + login_user={{ db_admin_user }} + login_password={{ db_admin_password }} + encoding='{{ db_encoding }}' + lc_collate='{{ db_lc_collate }}' + lc_ctype='{{ db_lc_ctype }}' + state=present + +- name: Ensure user has access to the database + postgresql_user: name={{ db_user }} + password={{ db_password }} + no_password_changes=true + priv=ALL + state=present + login_host={{ db_host }} + port={{ db_port }} + login_user={{ db_admin_user }} + login_password={{ db_admin_password }} + db={{ db_name }} \ No newline at end of file diff --git a/ansible/roles/stack-adminutil/tasks/main.yml b/ansible/roles/stack-adminutil/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..21e989b75ca6b096a585d7171b30cd28255015d0 --- /dev/null +++ b/ansible/roles/stack-adminutil/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: Ensure env directory exists + file: + path: /home/deployer/env + state: directory + owner: root + group: root + +- name: Ensure stack directory exists + file: + path: /home/deployer/stack + state: directory + owner: root + group: root + +- name: Save stack file + template: src=stack-adminutil.yml dest=/home/deployer/stack/stack-adminutil.yml mode=0644 + +- name: Deploy stack + shell: "docker stack deploy -c stack-adminutil.yml adminutil" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-adminutil/templates/stack-adminutil.yml b/ansible/roles/stack-adminutil/templates/stack-adminutil.yml new file mode 100644 index 0000000000000000000000000000000000000000..1fea83406bcbb40db7ee77b86041b186cdc85c45 --- /dev/null +++ b/ansible/roles/stack-adminutil/templates/stack-adminutil.yml @@ -0,0 +1,38 @@ +version: '3.1' + +services: + adminutil: + image: sunbird/adminutil:{{image_tag}} + environment: + JAVA_OPTS: -Xms{{adminutil__initial_heap_size}} -Xmx{{adminutil__max_heap_size}} + SERVER_PORT: {{ adminutil__port }} + AM_ADMIN_API_ENDPOINT: {{ kong_url }} + SPRING_PROFILES_ACTIVE: {{ adminutil__spring_profile }} + DEFAULT_CONSUMER_GROUP: {{ adminutil__default_consumer_group }} + ENDPOINTS_HEALTH_ID: {{ adminutil__health_id }} + ENDPOINTS_HEALTH_SENSITIVE: "{{ adminutil__is_health_sensitive }}" + ENDPOINTS_METRICS_ID: {{ adminutil__metrics_id }} + ENDPOINTS_METRICS_SENSITIVE: "{{ adminutil__is_metrics_sensitive }}" + ports: + - {{ adminutil__port }}:4000 + networks: + - api-manager_default + deploy: + replicas: {{ adminutil.replicas | default(1) }} + resources: + reservations: + memory: "{{ adminutil.reservation_memory | default('512M') }}" + limits: + memory: "{{ adminutil.limit_memory | default('512M') }}" + update_config: + parallelism: 1 + delay: 5s + healthcheck: + test: wget -q --spider http://localhost:4000/health || exit 1 + interval: 10s + timeout: 5s + retries: 5 + +networks: + api-manager_default: + external: true diff --git a/ansible/roles/stack-api-manager/defaults/main.yml b/ansible/roles/stack-api-manager/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..a3f62e99f7b7e9e1b5c7593b87e5be5e4cfcb1c2 --- /dev/null +++ b/ansible/roles/stack-api-manager/defaults/main.yml @@ -0,0 +1,2 @@ +--- +kong_database: postgres \ No newline at end of file diff --git a/ansible/roles/stack-api-manager/tasks/main.yml b/ansible/roles/stack-api-manager/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..436beb920c2d42fbd51558987458f501a23a7515 --- /dev/null +++ b/ansible/roles/stack-api-manager/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Ensure env directory exists + file: + path: /home/deployer/env + state: directory + owner: root + group: root + +- name: Ensure stack directory exists + file: + path: /home/deployer/stack + state: directory + owner: root + group: root + +- name: Save configurations into an env file + template: src=api-manager.env dest=/home/deployer/env/api-manager.env mode=0644 + +- name: Save stack file + template: src=stack-api-manager.yml dest=/home/deployer/stack/api-manager.yml mode=0644 + +- name: Deploy stack + shell: "docker stack deploy -c api-manager.yml api-manager" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-api-manager/templates/api-manager.env b/ansible/roles/stack-api-manager/templates/api-manager.env new file mode 100644 index 0000000000000000000000000000000000000000..f8f7a6debd94aea070815f3146a3453d883c4ecc --- /dev/null +++ b/ansible/roles/stack-api-manager/templates/api-manager.env @@ -0,0 +1,7 @@ +KONG_LOG_LEVEL=info +KONG_DATABASE={{ kong_database }} +KONG_PG_HOST={{ kong_postgres_host }} +KONG_PG_SSL=true +KONG_PG_USER={{ kong_postgres_user }} +KONG_PG_PASSWORD={{ kong_postgres_password }} +KONG_PG_DATABASE={{ kong_postgres_database }} diff --git a/ansible/roles/stack-api-manager/templates/stack-api-manager.yml b/ansible/roles/stack-api-manager/templates/stack-api-manager.yml new file mode 100644 index 0000000000000000000000000000000000000000..07ada52c2d4b40f239af767e797a92e18dea7690 --- /dev/null +++ b/ansible/roles/stack-api-manager/templates/stack-api-manager.yml @@ -0,0 +1,56 @@ +version: '3' + +services: + kong: + image: sunbird/kong:0.9.9 + env_file: + /home/deployer/env/api-manager.env + ports: + - 8000:8000 + - 8001:8001 + networks: + - api-manager_default + - "{{ sunbird_network }}" + deploy: + replicas: {{ kong.replicas | default(1) }} + resources: + reservations: + memory: "{{ kong.reservation_memory | default('64M') }}" + limits: + memory: "{{ kong.limit_memory | default('256M') }}" + update_config: + parallelism: 1 + delay: 30s + healthcheck: + test: curl -f http://localhost:8001/status || exit 1 + interval: 10s + timeout: 5s + retries: 10 + + echo: + image: "{{hub_org}}/{{echo_server_image_name}}:{{echo_server_image_tag}}" + networks: + - api-manager_default + ports: + - 9595:9595 + deploy: + replicas: {{ echo_service.replicas | default(1) }} + resources: + reservations: + memory: "{{ echo_service.reservation_memory | default('8M') }}" + limits: + memory: "{{ echo_service.limit_memory | default('16M') }}" + update_config: + parallelism: 1 + delay: 5s + healthcheck: + test: wget -q -s http://localhost:9595/hello || exit 1 + interval: 5s + timeout: 5s + retries: 3 + +networks: + api-manager_default: + external: true + {{ sunbird_network }}: + external: true diff --git a/ansible/roles/stack-keycloak/tasks/common.yml b/ansible/roles/stack-keycloak/tasks/common.yml new file mode 100644 index 0000000000000000000000000000000000000000..14291ed1f3fc2de79fa660dc9fafba4302b82c76 --- /dev/null +++ b/ansible/roles/stack-keycloak/tasks/common.yml @@ -0,0 +1,29 @@ +--- +- name: Ensure env directory exists + file: + path: /home/deployer/env + state: directory + owner: root + group: root + +- name: Ensure stack directory exists + file: + path: /home/deployer/stack + state: directory + owner: root + group: root + +# - name: Save actor configurations into an env file +# template: src=sunbird_actor.env dest=/home/deployer/env/sunbird_actor.env mode=0644 + +# - name: Save content configurations into an env file +# template: src=sunbird_content.env dest=/home/deployer/env/sunbird_content.env mode=0644 + +# - name: Save learner configurations into an env file +# template: src=sunbird_learner.env dest=/home/deployer/env/sunbird_learner.env mode=0644 + +# - name: Save player configurations into an env file +# template: src=sunbird_player.env dest=/home/deployer/env/sunbird_player.env mode=0644 + +- name: Save service configurations into an env file + template: src="sunbird_{{service_name}}.env" dest="/home/deployer/env/sunbird_{{service_name}}.env" mode=0644 diff --git a/ansible/roles/stack-keycloak/tasks/keycloak1_service.yml b/ansible/roles/stack-keycloak/tasks/keycloak1_service.yml new file mode 100644 index 0000000000000000000000000000000000000000..278b1d1b4e584842dc2adb7cff8364bb99aae980 --- /dev/null +++ b/ansible/roles/stack-keycloak/tasks/keycloak1_service.yml @@ -0,0 +1,9 @@ +--- +- name: Remove keycloak1 service + shell: "docker service rm keycloak1" + ignore_errors: yes + +- name: Deploy keycloak1 service + shell: "docker service create --replicas {{ keycloak1.replicas | default(1) }} -p 8080:8080 --name keycloak1 --hostname keycloak1 --reserve-memory {{ keycloak1.reservation_memory | default('768M') }} --limit-memory {{ keycloak1.limit_memory | default('1024M') }} --network {{ sunbird_network }} --env-file /home/deployer/env/sunbird_keycloak1.env {{hub_org}}/{{image_name}}:{{image_tag}}" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-keycloak/tasks/keycloak2_service.yml b/ansible/roles/stack-keycloak/tasks/keycloak2_service.yml new file mode 100644 index 0000000000000000000000000000000000000000..130368099e293679824578feafcb0fda90993763 --- /dev/null +++ b/ansible/roles/stack-keycloak/tasks/keycloak2_service.yml @@ -0,0 +1,9 @@ +--- +- name: Remove keycloak2 service + shell: "docker service rm keycloak2" + ignore_errors: yes + +- name: Deploy keycloak2 service + shell: "docker service create --replicas {{ keycloak2.replicas | default(1) }} -p 8082:8080 --name keycloak2 --hostname keycloak2 --reserve-memory {{ keycloak2.reservation_memory | default('768M') }} --limit-memory {{ keycloak2.limit_memory | default('1024M') }} --network {{ sunbird_network }} --env-file /home/deployer/env/sunbird_keycloak2.env {{hub_org}}/{{image_name}}:{{image_tag}}" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-keycloak/tasks/main.yml b/ansible/roles/stack-keycloak/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..3001b89de9a15290f5a71e09a3c6150b8bafae72 --- /dev/null +++ b/ansible/roles/stack-keycloak/tasks/main.yml @@ -0,0 +1,7 @@ +- include: common.yml + +- include: keycloak1_service.yml + when: deploy_keycloak1 is defined + +- include: keycloak2_service.yml + when: deploy_keycloak2 is defined diff --git a/ansible/roles/stack-keycloak/templates/sunbird_keycloak1.env b/ansible/roles/stack-keycloak/templates/sunbird_keycloak1.env new file mode 100644 index 0000000000000000000000000000000000000000..f0b9b5b7395e20a03d907644cb91d3f1736c5852 --- /dev/null +++ b/ansible/roles/stack-keycloak/templates/sunbird_keycloak1.env @@ -0,0 +1,6 @@ +KEYCLOAK_LOGLEVEL=DEBUG +POSTGRES_PORT_5432_TCP_ADDR={{keycloak_postgres_host}} +POSTGRES_PORT_5432_TCP_PORT=5432 +POSTGRES_USER={{keycloak_postgres_user}} +POSTGRES_PASSWORD={{keycloak_postgres_password}} +TCPPING_INITIAL_HOSTS=keycloak1 \ No newline at end of file diff --git a/ansible/roles/stack-keycloak/templates/sunbird_keycloak2.env b/ansible/roles/stack-keycloak/templates/sunbird_keycloak2.env new file mode 100644 index 0000000000000000000000000000000000000000..bab8522c01c213eb28563d410198fef3e3333030 --- /dev/null +++ b/ansible/roles/stack-keycloak/templates/sunbird_keycloak2.env @@ -0,0 +1,6 @@ +KEYCLOAK_LOGLEVEL=DEBUG +POSTGRES_PORT_5432_TCP_ADDR={{keycloak_postgres_host}} +POSTGRES_PORT_5432_TCP_PORT=5432 +POSTGRES_USER={{keycloak_postgres_user}} +POSTGRES_PASSWORD={{keycloak_postgres_password}} +TCPPING_INITIAL_HOSTS=keycloak2 \ No newline at end of file diff --git a/ansible/roles/stack-logger/tasks/main.yml b/ansible/roles/stack-logger/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..6120b30b81a74e5d490fd28db57f02e40726848f --- /dev/null +++ b/ansible/roles/stack-logger/tasks/main.yml @@ -0,0 +1,57 @@ +--- + +- name: Ensure stack directory exists + file: + path: /home/deployer/stack + state: directory + owner: root + group: root + +- name: Ensure config directory exists + file: + path: /home/deployer/config + state: directory + owner: root + group: root + +- name: Save stack file + template: src=stack-logger.yml dest=/home/deployer/stack/logger.yml mode=0644 + +- name: Save logstash config + template: src=logstash.conf dest=/home/deployer/config/logstash.conf mode=0644 + +- name: Save kibana config + template: src=kibana.yml dest=/home/deployer/config/kibana.yml mode=0644 + +- name: Save authenticated email file + template: src=authenticated_email.txt dest=/home/deployer/config/authenticated_email.txt mode=0644 + +- name: Remove proxy stack + shell: "docker stack rm logger" + ignore_errors: yes + +- name: Remove old logstash docker config + shell: "docker config rm logstash.conf" + ignore_errors: yes + +- name: Remove old kibana docker config + shell: "docker config rm kibana.yml" + ignore_errors: yes + +- name: Remove old authenticated email docker config + shell: "docker config rm authenticated_email_kibana_oauth" + ignore_errors: yes + +- name: Save logstash config as docker config + shell: "docker config create logstash.conf /home/deployer/config/logstash.conf" + +- name: Save kibana config as docker config + shell: "docker config create kibana.yml /home/deployer/config/kibana.yml" + +- name: Save authenticated emails as docker config + shell: "docker config create authenticated_email_kibana_oauth /home/deployer/config/authenticated_email.txt" + +- name: Deploy stack + shell: "docker stack deploy -c logger.yml logger" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-logger/templates/authenticated_email.txt b/ansible/roles/stack-logger/templates/authenticated_email.txt new file mode 100644 index 0000000000000000000000000000000000000000..e3338cb32a3c5339ea54e73aa1e30ca7901898ad --- /dev/null +++ b/ansible/roles/stack-logger/templates/authenticated_email.txt @@ -0,0 +1,2 @@ +shailesh.kochhar@gmail.com +vijetha.nayak@riflexions.com \ No newline at end of file diff --git a/ansible/roles/stack-logger/templates/kibana.yml b/ansible/roles/stack-logger/templates/kibana.yml new file mode 100644 index 0000000000000000000000000000000000000000..b4e789aa55a95b9ef89b13549039424fff5d4a22 --- /dev/null +++ b/ansible/roles/stack-logger/templates/kibana.yml @@ -0,0 +1,67 @@ +# Kibana is served by a back end server. This controls which port to use. +port: 5601 + +server.basePath: "/dashboard" + +# The host to bind the server to. +host: "0.0.0.0" + +# The Elasticsearch instance to use for all your queries. +# This would use local elasticsearch as recommended in +# https://www.elastic.co/guide/en/kibana/current/production.html#load-balancing +elasticsearch_url: "http://{{logger_es_host}}:{{logger_es_port}}" + +# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false, +# then the host you use to connect to *this* Kibana instance will be sent. +elasticsearch_preserve_host: true + +# Kibana uses an index in Elasticsearch to store saved searches, visualizations +# and dashboards. It will create a new index if it doesn't already exist. +kibana_index: ".kibana" + +# If your Elasticsearch is protected with basic auth, this is the user credentials +# used by the Kibana server to perform maintence on the kibana_index at statup. Your Kibana +# users will still need to authenticate with Elasticsearch (which is proxied thorugh +# the Kibana server) +# kibana_elasticsearch_username: user +# kibana_elasticsearch_password: pass + + +# The default application to load. +default_app_id: "dashboard" + +# Time in milliseconds to wait for responses from the back end or elasticsearch. +# This must be > 0 +request_timeout: 300000 + +# Time in milliseconds for Elasticsearch to wait for responses from shards. +# Set to 0 to disable. +shard_timeout: 0 + +# Set to false to have a complete disregard for the validity of the SSL +# certificate. +verify_ssl: true + +# If you need to provide a CA certificate for your Elasticsarech instance, put +# the path of the pem file here. +# ca: /path/to/your/CA.pem + +# SSL for outgoing requests from the Kibana Server (PEM formatted) +# ssl_key_file: /path/to/your/server.key +# ssl_cert_file: /path/to/your/server.crt + +# Set the path to where you would like the process id file to be created. +# pid_file: /var/run/kibana.pid + +# Plugins that are included in the build, and no longer found in the plugins/ folder +bundled_plugin_ids: + - plugins/dashboard/index + - plugins/discover/index + - plugins/doc/index + - plugins/kibana/index + - plugins/markdown_vis/index + - plugins/metric_vis/index + - plugins/settings/index + - plugins/table_vis/index + - plugins/vis_types/index + - plugins/visualize/index diff --git a/ansible/roles/stack-logger/templates/logstash.conf b/ansible/roles/stack-logger/templates/logstash.conf new file mode 100644 index 0000000000000000000000000000000000000000..f3b7d188c20ee7adb59b3b604146c4cb78547cc6 --- /dev/null +++ b/ansible/roles/stack-logger/templates/logstash.conf @@ -0,0 +1,35 @@ +input { + syslog { port => 51415 } + beats { + port => 5044 + codec => "json" + } +} + +filter { + if [message] =~ /(Time has been changed|.timer)/ { drop{} } + if [type] == "log" { + grok { + match => [ "message", "%{SYSLOGLINE}" ] + overwrite => [ "message" ] + } + grok { + match => [ "message", "%{GREEDYDATA:program}\[%{DATA:pid}\] %{GREEDYDATA:message}"] + overwrite => ["program","pid","message"] + } + date { + match => ["timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601"] + remove_field => "timestamp" + } + } +} + +output { + elasticsearch { + hosts => ["{{logger_es_host}}:{{logger_es_port}}"] + } + # Remove in production + stdout { + codec => rubydebug + } +} diff --git a/ansible/roles/stack-logger/templates/stack-logger.yml b/ansible/roles/stack-logger/templates/stack-logger.yml new file mode 100644 index 0000000000000000000000000000000000000000..1eb7a7aa53ed489a6e624e6f71c735fe64585b1e --- /dev/null +++ b/ansible/roles/stack-logger/templates/stack-logger.yml @@ -0,0 +1,96 @@ +version: '3.3' + +services: + kibana: + image: kibana:4.6 + environment: + - LOGSPOUT=ignore + depends_on: + - logger_elasticsearch + ports: + - '5601:5601' + configs: + - source: kibana.yml + target: /opt/kibana/config/kibana.yml + networks: + - logger + + logstash: + image: logstash:2.4.1 + command: logstash -f /conf/logstash.conf + deploy: + replicas: 2 + environment: + - LOGSPOUT=ignore + configs: + - source: logstash.conf + target: /conf/logstash.conf + depends_on: + - logger_elasticsearch + ports: + - '51415:51415' + - '5044:5044' + networks: + - logger + + logspout: + image: gliderlabs/logspout + command: syslog+tcp://logger_logstash:51415 + deploy: + mode: global + environment: + - SYSLOG_FORMAT=rfc3164 + depends_on: + - logger_logstash + volumes: + - '/var/run/docker.sock:/tmp/docker.sock' + networks: + - logger + - api-manager_default + - jenkins_default + - monitor + - "{{ sunbird_network }}" + + oauth: + image: mryu/oauth2-proxy + command: | + -cookie-secure=false + -upstream=http://kibana:5601/ + -redirect-url={{kibana_oauth_redirect_url}} + -http-address=0.0.0.0:4111 + -email-domain=sahajsoft.com + -email-domain=ilimi.in + -email-domain=ekstep.org + -email-domain=tarento.com + -authenticated-emails-file=/authenticated_email.txt + environment: + - OAUTH2_PROXY_CLIENT_ID={{google_client_id}} + - OAUTH2_PROXY_CLIENT_SECRET={{google_client_secret}} + - OAUTH2_PROXY_COOKIE_SECRET={{cookie_secret}} + ports: + - "4111:4111" + configs: + - source: authenticated_email_kibana_oauth + target: /authenticated_email.txt + networks: + - logger + +configs: + authenticated_email_kibana_oauth: + external: true + kibana.yml: + external: true + logstash.conf: + external: true + +networks: + api-manager_default: + external: true + jenkins_default: + external: true + monitor: + external: true + {{ sunbird_network }}: + external: true + logger: + external: true diff --git a/ansible/roles/stack-monitor/defaults/main.yml b/ansible/roles/stack-monitor/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..6c72d61d4223394651300b6cbd4633885def1377 --- /dev/null +++ b/ansible/roles/stack-monitor/defaults/main.yml @@ -0,0 +1,54 @@ +node_exporter: + reservation_memory: 16M + limit_memory: 32M +cadvisor: + reservation_memory: 64M + limit_memory: 128M +elasticsearch_exporter: + reservation_memory: 8M + limit_memory: 16M +postgres_exporter: + reservation_memory: 8M + limit_memory: 16M +statsd_exporter: + reservation_memory: 8M + limit_memory: 16M +blackbox_exporter: + reservation_memory: 16M + limit_memory: 32M +jsonpath_exporter: + reservation_memory: 8M + limit_memory: 16M + +enable_postgres_availability_check: true +kong_cluster_expected_number_of_nodes: "{{ kong.replicas | default(1) }}" + +monitor_config_files_dest_dir: /opt/docker/stacks/monitor/config + +monitor_config_templates: +- file_name: prometheus.yml + config_name: prometheus.yml +- file_name: alertmanagerconfig.yml + config_name: alertmanagerconfig.yml +- file_name: blackboxconfig.yml + config_name: blackboxconfig.yml +- file_name: postgresmasterqueries.yml + config_name: postgresmasterqueries.yml +- file_name: postgresslavequeries.yml + config_name: postgresslavequeries.yml +- file_name: statsd_mapping.yml + config_name: statsd_mapping.yml +- file_name: kong_cluster_exporter_config.yml + config_name: kong_cluster_exporter_config.yml +- file_name: alertrules.nodes + config_name: prom_node_rules +- file_name: alertrules.task + config_name: prom_container_rules +- file_name: alertrules.es + config_name: prom_es_rules +- file_name: alertrules.kong + config_name: prom_kong_rules +- file_name: alertrules.postgresql + config_name: prom_postgresql_rules +- file_name: jmx_httpserver.yml + config_name: jmx_httpserver.yml diff --git a/ansible/roles/stack-monitor/tasks/main.yml b/ansible/roles/stack-monitor/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..73516acdd39fe264a7e4add2c9aca04d79e5e7b5 --- /dev/null +++ b/ansible/roles/stack-monitor/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Ensure stack directory exists + file: + path: /opt/docker/stacks/monitor/stack + state: directory + owner: '{{ root_owner }}' + group: '{{ root_group }}' + +- name: Ensure config directory exists + file: + path: "{{ monitor_config_files_dest_dir }}" + state: directory + owner: '{{ root_owner }}' + group: '{{ root_group }}' + +- name: Save stack file + template: src=stack-monitor.yml dest=/opt/docker/stacks/monitor/stack/monitor.yml mode=0644 + +- name: Save stack file + shell: "cat /opt/docker/stacks/monitor/stack/monitor.yml" + +- name: Save prometheus config {{ item.file_name }} + template: src={{ item.file_name }} dest="{{ monitor_config_files_dest_dir }}/{{ item.file_name }}" mode=0644 + with_items: "{{ monitor_config_templates }}" + +- name: Remove monitor stack + shell: "docker stack rm monitor" + ignore_errors: yes + +- name: Remove old docker config {{ item.config_name }} + shell: "docker config rm {{ item.config_name }}" + with_items: "{{ monitor_config_templates }}" + ignore_errors: yes + +- name: Save docker config {{ item.config_name }} + shell: "docker config create {{ item.config_name }} {{ monitor_config_files_dest_dir }}/{{ item.file_name }}" + with_items: "{{ monitor_config_templates }}" + ignore_errors: yes + +- name: Deploy stack + shell: "docker stack deploy -c monitor.yml monitor" + args: + chdir: /opt/docker/stacks/monitor/stack \ No newline at end of file diff --git a/ansible/roles/stack-monitor/templates/alertmanagerconfig.yml b/ansible/roles/stack-monitor/templates/alertmanagerconfig.yml new file mode 100644 index 0000000000000000000000000000000000000000..4bc290c16202aa4ae2c0770f8d4a77d3a40d49cc --- /dev/null +++ b/ansible/roles/stack-monitor/templates/alertmanagerconfig.yml @@ -0,0 +1,20 @@ +global: + resolve_timeout: 5m + +route: + receiver: 'slack' + repeat_interval: 15m + group_interval: 5m + group_wait: 1m + routes: + - receiver: 'slack' + +receivers: + - name: 'slack' + slack_configs: + - send_resolved: true + api_url: "{{slack_url}}" + username: 'Monitor - Alerter' + channel: "{{slack_channel}}" + text: "Instances: {% raw %}{{range .Alerts}}\n{{.Labels.instance}} : {{.Annotations.description}}{{end}}{% endraw %}" + icon_emoji: ':dart:' diff --git a/ansible/roles/stack-monitor/templates/alertrules.es b/ansible/roles/stack-monitor/templates/alertrules.es new file mode 100644 index 0000000000000000000000000000000000000000..dda7a96915ebd7d08f71d472fa77355c7f6b8521 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/alertrules.es @@ -0,0 +1,17 @@ +# calculate filesytem used and free percent +elasticsearch_filesystem_data_used_percent = 100 * (elasticsearch_filesystem_data_size_bytes - elasticsearch_filesystem_data_free_bytes) / elasticsearch_filesystem_data_size_bytes +elasticsearch_filesystem_data_free_percent = 100 - elasticsearch_filesystem_data_used_percent + +# alert if too few nodes are running +ALERT ElasticsearchTooFewNodesRunning + IF elasticsearch_cluster_health_number_of_node < 3 + FOR 5m + LABELS {severity="critical"} + ANNOTATIONS {description="There are only {% raw %}{{$value}}{% endraw %} < 3 ElasticSearch nodes running", summary="ElasticSearch running on less than 3 nodes"} + +# alert if heap usage is over 90% +ALERT ElasticsearchHeapTooHigh + IF elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"} > 0.9 + FOR 15m + LABELS {severity="critical"} + ANNOTATIONS {description="The heap usage is over 90% for 15m", summary="ElasticSearch node {% raw %}{{$labels.node}}{% endraw %} heap usage is high"} diff --git a/ansible/roles/stack-monitor/templates/alertrules.kong b/ansible/roles/stack-monitor/templates/alertrules.kong new file mode 100644 index 0000000000000000000000000000000000000000..628664112c964c95c7f01158a14413ef35dcffd7 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/alertrules.kong @@ -0,0 +1,10 @@ +ALERT KongClusterUnhealthy +IF kong_cluster_alive_nodes != {{ kong_cluster_expected_number_of_nodes }} +FOR 5m +LABELS { + severity = "critical", +} +ANNOTATIONS { + summary = "Kong cluster is unhealthy", + description = "Number of live nodes : {% raw %}{{$value}}{% endraw %} not equal to : {{ kong_cluster_expected_number_of_nodes }}", +} diff --git a/ansible/roles/stack-monitor/templates/alertrules.nodes b/ansible/roles/stack-monitor/templates/alertrules.nodes new file mode 100644 index 0000000000000000000000000000000000000000..88b94309a290788e6deded1b43a475c01420e516 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/alertrules.nodes @@ -0,0 +1,39 @@ +ALERT high_cpu_usage_on_node + IF sum(rate(process_cpu_seconds_total[5m])) by (instance) * 100 > 70 + FOR 5m + ANNOTATIONS { + summary = "HIGH CPU USAGE WARNING ON '{% raw %}{{ $labels.instance }}{% endraw %}'", + description = "{% raw %}{{ $labels.instance }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of CPU. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.", + } + +ALERT high_memory_usage_on_node + IF ((node_memory_MemTotal-node_memory_MemAvailable)/node_memory_MemTotal)*100 > 80 + FOR 5m + ANNOTATIONS { + summary = "HIGH MEMORY USAGE WARNING TASK ON '{% raw %}{{ $labels.instance }}{% endraw %}'", + description = "{% raw %}{{ $labels.instance }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) is using a LOT of MEMORY. MEMORY usage is over {% raw %}{{ humanize $value}}{% endraw %}%.", + } + +ALERT high_la_usage_on_node + IF node_load5 > 5 + FOR 5m + ANNOTATIONS { + summary = "HIGH LOAD AVERAGE WARNING ON '{% raw %}{{ $labels.instance }}{% endraw %}'", + description = "{% raw %}{{ $labels.instance }}{% endraw %} ({% raw %}{{ $labels.host }}{% endraw %}) has a high load average. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.", + } + +ALERT monitoring_service_down + IF up == 0 + FOR 5m + ANNOTATIONS { + summary = "MONITORING SERVICE DOWN WARNING: NODE '{% raw %}{{ $labels.host }}{% endraw %}'", + description = "The monitoring service '{% raw %}{{ $labels.job }}{% endraw %}' is down.", + } + +ALERT node_running_out_of_disk_space + IF (node_filesystem_size{fstype="aufs", mountpoint="/"} - node_filesystem_free{fstype="aufs", mountpoint="/"}) * 100/ node_filesystem_size{fstype="aufs", mountpoint="/"} > 80 + FOR 5m + ANNOTATIONS { + summary = "LOW DISK SPACE WARING: NODE '{% raw %}{{ $labels.instance }}{% endraw %}' ", + description = "More than 80% of disk used. Disk usage {% raw %}{{ humanize $value }}{% endraw %} GB.", + } diff --git a/ansible/roles/stack-monitor/templates/alertrules.postgresql b/ansible/roles/stack-monitor/templates/alertrules.postgresql new file mode 100644 index 0000000000000000000000000000000000000000..49841412d3df26ad531315000665414a95dd9400 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/alertrules.postgresql @@ -0,0 +1,67 @@ +{% if enable_postgres_availability_check %} +ALERT PostgresUnavailable +IF pg_exporter_last_scrape_error == 1 +FOR 5m +LABELS { + severity = "critical", +} +ANNOTATIONS { + summary = "PostgreSQL unavailable", + description = "PostgreSQL unavailable as per job: {% raw %}{{$labels.job}}{% endraw %}", +} +{% endif %} + +ALERT PostgresHighNumberOfConnections +IF sum(pg_stat_database_numbackends) > 90 +FOR 5m +LABELS { + severity = "critical", +} +ANNOTATIONS { + summary = "PostgreSQL high number of connections", + description = "Number of connections is above the high water mark: {% raw %}{{$value}}{% endraw %}", +} + +ALERT PostgresMasterInRecovery +IF pg_server_standby_status_in_recovery{job="master-postgres-exporter"} == 1 +FOR 5m +LABELS { + severity = "critical", +} +ANNOTATIONS { + summary = "PostgreSQL master is in recovery", + description = "PostgreSQL master is in recovery. pg_server_standby_status_in_recovery: {% raw %}{{$value}}{% endraw %}", +} + +ALERT PostgresSlaveNotInRecovery +IF pg_server_standby_status_in_recovery{job="slave-postgres-exporter"} == 0 +FOR 5m +LABELS { + severity = "critical", +} +ANNOTATIONS { + summary = "PostgreSQL slave is not in recovery", + description = "PostgreSQL slave is not in recovery. pg_server_standby_status_in_recovery: {% raw %}{{$value}}{% endraw %}", +} + +ALERT PostgresHighReplicationByteLag +IF pg_stat_replication_byte_lag > 1000000 +FOR 5m +LABELS { + severity = "critical", +} +ANNOTATIONS { + summary = "PostgreSQL replication byte lag is high", + description = "The replication byte lag for salve: {% raw %}{{$labels.slave_addr}}{% endraw %} is above the high water mark: {% raw %}{{$value}}{% endraw %}", +} + +ALERT PostgresHighReplicationLagSeconds +IF pg_replication_lag > 60 +FOR 5m +LABELS { + severity = "critical", +} +ANNOTATIONS { + summary = "PostgreSQL replication lag time is high", + description = "The replication lag between the master and slave is above the high water mark: {% raw %}{{$value}}{% endraw %}", +} \ No newline at end of file diff --git a/ansible/roles/stack-monitor/templates/alertrules.task b/ansible/roles/stack-monitor/templates/alertrules.task new file mode 100644 index 0000000000000000000000000000000000000000..d7469c89d9be4ae33972a2765e1cfb288f1f0bf1 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/alertrules.task @@ -0,0 +1,16 @@ +ALERT high_cpu_usage_on_container + IF sum(rate(container_cpu_usage_seconds_total{container_label_com_docker_swarm_task_name=~".+"}[1m])) by (container_label_com_docker_swarm_task_name,instance) * 100 > 70 + FOR 5m + ANNOTATIONS { + summary = "HIGH CPU USAGE WARNING: TASK '{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %}' on '{% raw %}{{ $labels.instance }}{% endraw %}'", + description = "{% raw %}{{ $labels.container_label_com_docker_swarm_task_name }}{% endraw %} is using a LOT of CPU. CPU usage is {% raw %}{{ humanize $value}}{% endraw %}%.", + } + +## Disabled memory alerts till prometheus is moved out into a VM +# ALERT container_eating_memory +# IF sum(# container_memory_usage_bytes{container_label_com_docker_swarm_task_name=~".+"}) by (# container_label_com_docker_swarm_task_name,instance,container_label_com_docker_swarm# _service_name) > 2800000000 +# FOR 5m +# ANNOTATIONS { +# summary = "HIGH MEMORY USAGE WARNING: TASK '{% raw %}{{ # $labels.container_label_com_docker_swarm_task_name }}{% endraw %}' on '{% raw %}{{ $labels.instance # }}{% endraw %}'", +# description = "{% raw %}{{ $labels.container_label_com_docker_swarm_service_name }}{% endraw %} is # eating up a LOT of memory. Memory consumption of {% raw %}{{ # $labels.container_label_com_docker_swarm_service_name }}{% endraw %} is at {% raw %}{{ humanize # $value}}{% endraw %}.", +# } diff --git a/ansible/roles/stack-monitor/templates/blackboxconfig.yml b/ansible/roles/stack-monitor/templates/blackboxconfig.yml new file mode 100644 index 0000000000000000000000000000000000000000..d611a6e82ffe83cc647a0765bd303a0124eddff9 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/blackboxconfig.yml @@ -0,0 +1,36 @@ +modules: + http_2xx: + prober: http + timeout: 5s + http: + valid_http_versions: ["HTTP/1.1", "HTTP/2"] + valid_status_codes: [] # Defaults to 2xx + method: GET + composite_search: + prober: http + timeout: 5s + http: + method: POST + headers: + Content-Type: application/json + Authorization: Bearer {{ kong__test_jwt }} + body: '{ + "request": { + "filters":{ + "contentType": "Course", + "objectType": ["Content"], + "status": ["Live"] + }, + "limit": 1, + "fields": ["name", "contentType"] + } + }' + page_assemble: + prober: http + timeout: 5s + http: + method: POST + headers: + Content-Type: application/json + Authorization: Bearer {{ kong__test_jwt }} + body: '{"request": {"source": "web", "name": "Resource", "filters": {}}}' diff --git a/ansible/roles/stack-monitor/templates/jmx_httpserver.yml b/ansible/roles/stack-monitor/templates/jmx_httpserver.yml new file mode 100644 index 0000000000000000000000000000000000000000..376a5ab67c04093f76e0d36cdf20bc19147ad89b --- /dev/null +++ b/ansible/roles/stack-monitor/templates/jmx_httpserver.yml @@ -0,0 +1,15 @@ +--- +hostPort: {{ cassandra.listen_address }}:7199 +username: +password: + +whitelistObjectNames: ["org.apache.cassandra.metrics:type=ColumnFamily,*"] +rules: + - pattern: '^org.apache.cassandra.metrics<type=(\w+), name=(\w+)><>Value: (\d+)' + name: cassandra_$1_$2 + value: $3 + valueFactor: 0.001 + labels: {} + help: "Cassandra metric $1 $2" + type: GAUGE + attrNameSnakeCase: false diff --git a/ansible/roles/stack-monitor/templates/kong_cluster_exporter_config.yml b/ansible/roles/stack-monitor/templates/kong_cluster_exporter_config.yml new file mode 100644 index 0000000000000000000000000000000000000000..a3d6de17aa914b1915a8b1c1ea5c826fbbe14eb1 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/kong_cluster_exporter_config.yml @@ -0,0 +1,14 @@ +exporter_port: 9158 +log_level: info +json_data_url: http://api-manager_kong:8001/cluster +metric_name_prefix: kong_cluster +metrics: +- name: total_nodes + description: Total number of nodes in kong cluster + path: $.total +- name: alive_nodes + description: Number of live nodes in kong cluster + path: count($.data[@.status is "alive"]) +- name: failed_nodes + description: Number of failed nodes in kong cluster + path: count($.data[@.status is "failed"]) diff --git a/ansible/roles/stack-monitor/templates/postgresmasterqueries.yml b/ansible/roles/stack-monitor/templates/postgresmasterqueries.yml new file mode 100644 index 0000000000000000000000000000000000000000..2bfe29fe7f667e76fad8c339cdb6b4c9720ddd06 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/postgresmasterqueries.yml @@ -0,0 +1,33 @@ +pg_server_standby_status: + query: "select CASE WHEN pg_is_in_recovery() = 't' THEN 1 ELSE 0 END AS in_recovery" + metrics: + - in_recovery: + usage: "GAUGE" + description: "Server is in recovery" + +pg_database: + query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size FROM pg_database" + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size: + usage: "GAUGE" + description: "Disk space used by the database" + +pg_stat_replication: + query: | + SELECT client_hostname AS slave_hostname + , client_addr AS slave_addr + , pg_xlog_location_diff(pg_stat_replication.sent_location, pg_stat_replication.replay_location) AS byte_lag + FROM pg_stat_replication; + metrics: + - slave_hostname: + usage: "LABEL" + description: "Host name of slave" + - slave_addr: + usage: "LABEL" + description: "Address of slave" + - byte_lag: + usage: "GAUGE" + description: "Number of bytes slave lagging behind master" \ No newline at end of file diff --git a/ansible/roles/stack-monitor/templates/postgresslavequeries.yml b/ansible/roles/stack-monitor/templates/postgresslavequeries.yml new file mode 100644 index 0000000000000000000000000000000000000000..8ee131b72176015599d7b36f10e17b76202b4d28 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/postgresslavequeries.yml @@ -0,0 +1,13 @@ +pg_server_standby_status: + query: "select CASE WHEN pg_is_in_recovery() = 't' THEN 1 ELSE 0 END AS in_recovery" + metrics: + - in_recovery: + usage: "GAUGE" + description: "Server is in recovery" + +pg_replication: + query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))::INT as lag" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind master in seconds" diff --git a/ansible/roles/stack-monitor/templates/prometheus.yml b/ansible/roles/stack-monitor/templates/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..133dd9e69a2ece45d319220d890e4a6667b9fe37 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/prometheus.yml @@ -0,0 +1,117 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + external_labels: + monitor: 'Prometheus-Monitor' + +rule_files: + - '/etc/prometheus-rules/alertrules.nodes' + - '/etc/prometheus-rules/alertrules.task' + - '/etc/prometheus-rules/alertrules.es' + - '/etc/prometheus-rules/alertrules.postgresql' + - '/etc/prometheus-rules/alertrules.kong' + +scrape_configs: + - job_name: 'cadvisor' + dns_sd_configs: + - names: + - 'tasks.cadvisor' + type: 'A' + port: 8080 + + - job_name: 'node-exporter' + dns_sd_configs: + - names: + - 'tasks.node-exporter' + type: 'A' + port: 9100 + + - job_name: 'alertmanager' + dns_sd_configs: + - names: + - 'tasks.alertmanager' + type: 'A' + port: 9093 + + - job_name: 'elasticsearch-exporter' + static_configs: + - targets: ['monitor_elasticsearch_exporter:9108'] + + - job_name: 'master-postgres-exporter' + static_configs: + - targets: ['monitor_master_postgres_exporter:9187'] + + {% if groups['postgresql-slave'][0] is defined %} + # This empty line ensures indentation is correct after ansible jinja2 template is materialized + - job_name: 'slave-postgres-exporter' + static_configs: + - targets: ['monitor_slave_postgres_exporter:9187'] + {% endif %} + + - job_name: 'statsd-exporter' + static_configs: + - targets: ['monitor_statsd_exporter:9102'] + + - job_name: 'kong-cluster-exporter' + static_configs: + - targets: ['monitor_kong_cluster_exporter:9158'] + + - job_name: 'availability_composite_search' + metrics_path: /probe + params: + module: [composite_search] # Look for a HTTP 200 response. + static_configs: + - targets: + - https://{{api__host}}/api/composite/v1/search + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: monitor_blackbox-exporter:9115 # Blackbox exporter. + + - job_name: 'availability_page_assemble' + metrics_path: /probe + params: + module: [page_assemble] # Look for a HTTP 200 response. + static_configs: + - targets: + - https://{{api__host}}/api/data/v1/page/assemble + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: monitor_blackbox-exporter:9115 # Blackbox exporter. + + - job_name: 'availability_general' + metrics_path: /probe + params: + module: [http_2xx] # Look for a HTTP 200 response. + static_configs: + - targets: + - https://{{api__host}} + - https://{{api__host}}/grafana + - https://{{api__host}}/dashboard + - https://{{api__host}}/auth/realms/sunbird/protocol/openid-connect/auth?client_id=portal&state=foo&redirect_uri=https%3A%2F%2F{{api__host}}%2Fprivate%2Findex%3Fauth_callback%3D1&scope=openid&response_type=code + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: monitor_blackbox-exporter:9115 # Blackbox exporter. + + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'cassandra-exporter' + static_configs: + - targets: ['cassandra_jmx_exporter:5556'] + + - job_name: 'vm-node-exporter' + static_configs: + - targets: {{ groups[env] | difference(["localhost"]) | map('regex_replace', '^(.*)$', '\\1:9101' ) | list | to_yaml }} diff --git a/ansible/roles/stack-monitor/templates/stack-monitor.yml b/ansible/roles/stack-monitor/templates/stack-monitor.yml new file mode 100644 index 0000000000000000000000000000000000000000..32786b03badb49927229e47b54baaf42d80b2ab4 --- /dev/null +++ b/ansible/roles/stack-monitor/templates/stack-monitor.yml @@ -0,0 +1,286 @@ +version: "3.3" + + +services: + prometheus: + image: quay.io/prometheus/prometheus:v1.7.1 + ports: + - "9090:9090" + networks: + - monitoring + volumes: + - /var/dockerdata/prometheus/data:/prometheus + command: "-config.file=/etc/prometheus/prometheus.yml -alertmanager.url http://alertmanager:9093 -storage.local.path=/prometheus -web.console.libraries=/etc/prometheus/console_libraries -storage.local.target-heap-size=157286400 -storage.local.retention=168h0m0s -web.console.templates=/etc/prometheus/consoles -web.external-url=https://{{ api__host }}/prometheus" + labels: + com.docker.stack.namespace: "monitoring" + com.docker.service.name: "prometheus" + configs: + - source: prometheus.yml + target: /etc/prometheus/prometheus.yml + - source: prom_node_rules + target: /etc/prometheus-rules/alertrules.nodes + - source: prom_container_rules + target: /etc/prometheus-rules/alertrules.task + - source: prom_es_rules + target: /etc/prometheus-rules/alertrules.es + - source: prom_postgresql_rules + target: /etc/prometheus-rules/alertrules.postgresql + - source: prom_kong_rules + target: /etc/prometheus-rules/alertrules.kong + deploy: + replicas: 1 + placement: + constraints: + - "node.labels.prometheus==1" + resources: + reservations: + memory: "{{ prometheus.reservation_memory | default('2G') }}" + limits: + memory: "{{ prometheus.limit_memory | default('4G') }}" + + blackbox-exporter: + image: quay.io/prometheus/blackbox-exporter:v0.8.1 + ports: + - "9115:9115" + command: "--config.file=/etc/blackbox-exporter/blackboxconfig.yml" + networks: + - monitoring + configs: + - source: blackboxconfig.yml + target: /etc/blackbox-exporter/blackboxconfig.yml + deploy: + resources: + reservations: + memory: "{{ blackbox_exporter.reservation_memory }}" + limits: + memory: "{{ blackbox_exporter.limit_memory }}" + + cadvisor: + image: blep/cadvisor_bugfix1556:v0.26.1 + ports: + - "8081:8080" + networks: + - monitoring + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + deploy: + mode: global + resources: + reservations: + memory: "{{ cadvisor.reservation_memory }}" + limits: + memory: "{{ cadvisor.limit_memory }}" + + node-exporter: + image: basi/node-exporter:v0.1.1 + ports: + - "9100:9100" + networks: + - monitoring + environment: + - HOST_HOSTNAME=/etc/hostname + volumes: + - /proc:/host/proc + - /sys:/host/sys + - /:/rootfs + - /etc/hostname:/etc/hostname + command: [ -collector.procfs=/host/proc,-collector.sysfs=/host/proc,-collector.filesystem.ignored-mount-points="^/(sys|proc|dev|host|etc)($$|/)",-collector.textfile.directory=/etc/node-exporter/] + deploy: + mode: global + resources: + reservations: + memory: "{{ node_exporter.reservation_memory }}" + limits: + memory: "{{ node_exporter.limit_memory }}" + + alertmanager: + image: prom/alertmanager:v0.8.0 + ports: + - "9093:9093" + networks: + - monitoring + volumes: + - /var/dockerdata/alertmanager/data:/etc/alertmanager/data + command: [-config.file=/etc/alertmanager/alertmanagerconfig.yml,-storage.path=/alertmanager/data] + configs: + - source: alertmanagerconfig.yml + target: /etc/alertmanager/alertmanagerconfig.yml + deploy: + replicas: 1 + placement: + constraints: + - "node.labels.monitor==1" + + elasticsearch_exporter: + image: justwatch/elasticsearch_exporter:1.0.1 + ports: + - "9108:9108" + networks: + - monitoring + command: + - '-es.uri=http://{{ monitor.es_host }}:9200' + - '-es.all=true' + labels: + com.docker.stack.namespace: "monitoring" + com.docker.service.name: "elasticsearch_exporter" + deploy: + replicas: 1 + resources: + reservations: + memory: "{{ elasticsearch_exporter.reservation_memory }}" + limits: + memory: "{{ elasticsearch_exporter.limit_memory }}" + + master_postgres_exporter: + image: wrouesnel/postgres_exporter:v0.2.2 + ports: + - 9187:9187 + networks: + - monitoring + command: + - '-extend.query-path=/etc/postgres_exporter/postgresmasterqueries.yml' + environment: + - DATA_SOURCE_NAME=postgresql://{{ postgres_exporter_user }}:{{ postgres_exporter_password }}@{{ groups['postgresql-master'][0]}}:{{ postgres_exporter_postgres_port }}/postgres?sslmode=disable + configs: + - source: postgresmasterqueries.yml + target: /etc/postgres_exporter/postgresmasterqueries.yml + deploy: + resources: + reservations: + memory: "{{ postgres_exporter.reservation_memory }}" + limits: + memory: "{{ postgres_exporter.limit_memory }}" + + + {% if groups['postgresql-slave'][0] is defined %} + # This empty line ensures indentation is correct after ansible jinja2 template is materialized + slave_postgres_exporter: + image: wrouesnel/postgres_exporter:v0.2.2 + ports: + - 9188:9187 + networks: + - monitoring + command: + - '-extend.query-path=/etc/postgres_exporter/postgresslavequeries.yml' + environment: + - DATA_SOURCE_NAME=postgresql://{{ postgres_exporter_user }}:{{ postgres_exporter_password }}@{{ groups['postgresql-slave'][0]}}:{{ postgres_exporter_postgres_port }}/postgres?sslmode=disable + configs: + - source: postgresslavequeries.yml + target: /etc/postgres_exporter/postgresslavequeries.yml + deploy: + resources: + reservations: + memory: "{{ postgres_exporter.reservation_memory }}" + limits: + memory: "{{ postgres_exporter.limit_memory }}" + {% endif %} + + statsd_exporter: + image: prom/statsd-exporter:master + ports: + - 9102:9102 + - 9125:9125 + - 9125:9125/udp + networks: + - monitoring + - api-manager_default + command: + - '-statsd.mapping-config=/etc/statsd_exporter/statsd_mapping.yml' + configs: + - source: statsd_mapping.yml + target: /etc/statsd_exporter/statsd_mapping.yml + deploy: + resources: + reservations: + memory: "{{ statsd_exporter.reservation_memory }}" + limits: + memory: "{{ statsd_exporter.limit_memory }}" + + kong_cluster_exporter: + image: sunbird/prometheus-jsonpath-exporter:v0.0.1 + ports: + - 9158:9158 + networks: + - monitoring + - api-manager_default + command: /etc/kong_cluster_exporter/config.yml + configs: + - source: kong_cluster_exporter_config.yml + target: /etc/kong_cluster_exporter/config.yml + deploy: + resources: + reservations: + memory: "{{ jsonpath_exporter.reservation_memory }}" + limits: + memory: "{{ jsonpath_exporter.limit_memory }}" + + grafana: + image: grafana/grafana:4.4.3 + ports: + - "3001:3000" + networks: + - monitoring + volumes: + - /var/dockerdata/grafana:/var/lib/grafana + command: -e "PROMETHEUS_ENDPOINT=http://prometheus:9090" + environment: + - GF_SERVER_ROOT_URL=http://grafana.local.com/grafana + - GF_SECURITY_ADMIN_PASSWORD={{grafana_admin_password}} + labels: + com.docker.stack.namespace: "monitoring" + com.docker.service.name: "grafana" + deploy: + replicas: 1 + placement: + constraints: + - "node.labels.grafana==1" + + cassandra_jmx_exporter: + image: sunbird/cassandra_jmx_exporter:0.11 + ports: + - "5556:5556" + environment: + - JAVA_OPTS=-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=5555 + -Djava.util.logging.config.file=/opt/app/logging.properties + configs: + - source: jmx_httpserver.yml + target: /opt/app/jmx_httpserver.yml + networks: + - monitoring + +configs: + prometheus.yml: + external: true + prom_node_rules: + external: true + prom_container_rules: + external: true + prom_es_rules: + external: true + prom_postgresql_rules: + external: true + prom_kong_rules: + external: true + alertmanagerconfig.yml: + external: true + blackboxconfig.yml: + external: true + postgresmasterqueries.yml: + external: true + postgresslavequeries.yml: + external: true + statsd_mapping.yml: + external: true + jmx_httpserver.yml: + external: true + kong_cluster_exporter_config.yml: + external: true + +networks: + monitoring: + external: true + api-manager_default: + external: true diff --git a/ansible/roles/stack-monitor/templates/statsd_mapping.yml b/ansible/roles/stack-monitor/templates/statsd_mapping.yml new file mode 100644 index 0000000000000000000000000000000000000000..88855fb3daeb1154a899d5554995be1ea673910b --- /dev/null +++ b/ansible/roles/stack-monitor/templates/statsd_mapping.yml @@ -0,0 +1,26 @@ +mappings: +- match: kong.*.request.count + labels: + name: "kong_request" + api: "$1" +- match: kong.*.request.status.* + labels: + name: "kong_request_status" + api: "$1" + status: "$2" +- match: kong.*.latency + labels: + name: "kong_latency" + api: "$1" +- match: kong.*.upstream_latency + labels: + name: "kong_upstream_latency" + api: "$1" +- match: kong.*.request.size + labels: + name: "kong_request_size" + api: "$1" +- match: kong.*.response.size + labels: + name: "kong_response_size" + api: "$1" diff --git a/ansible/roles/stack-proxy/defaults/main.yml b/ansible/roles/stack-proxy/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..25b292f03dbad05797898939065917c0c4006cd6 --- /dev/null +++ b/ansible/roles/stack-proxy/defaults/main.yml @@ -0,0 +1,2 @@ +--- +hub_org: sunbird diff --git a/ansible/roles/stack-proxy/tasks/main.yml b/ansible/roles/stack-proxy/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..fed5ca6bd45fb22faff1047b0db7a3f3081afdaf --- /dev/null +++ b/ansible/roles/stack-proxy/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: Ensure env directory exists + file: + path: /home/deployer/env + state: directory + owner: root + group: root + +- name: Ensure stack directory exists + file: + path: /home/deployer/stack + state: directory + owner: root + group: root + +- name: Ensure config directory exists + file: + path: /home/deployer/config + state: directory + owner: root + group: root + +- name: Save configurations into an env file + template: src=proxy.env dest=/home/deployer/env/proxy.env mode=0644 + +- name: Save stack file + template: src=stack-proxy.yml dest=/home/deployer/stack/proxy.yml mode=0644 + +- name: Save proxy-default.conf + template: src=proxy-default.conf dest=/home/deployer/config/proxy-default.conf mode=0644 + +- name: Remove proxy stack + shell: "docker stack rm {{image_name}}-{{env}}" + ignore_errors: yes + +- name: Remove old proxy-default.conf docker config + shell: "docker config rm proxy-default.conf" + ignore_errors: yes + +- name: Save proxy-default.conf as docker config + shell: "docker config create proxy-default.conf /home/deployer/config/proxy-default.conf" + +- debug: msg="Image details= {{hub_org}}:{{image_name}}:{{image_tag}}" + +- debug: msg="server name= {{proxy.server_name}}" + +- name: Deploy stack + shell: "docker stack deploy -c proxy.yml {{image_name}}-{{env}}" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-proxy/templates/proxy-default.conf b/ansible/roles/stack-proxy/templates/proxy-default.conf new file mode 100644 index 0000000000000000000000000000000000000000..66d21a5a8a67b20165238d0483edd1c631a6011f --- /dev/null +++ b/ansible/roles/stack-proxy/templates/proxy-default.conf @@ -0,0 +1,124 @@ +server { + listen 80; + listen [::]:80; + server_name {{ proxy.server_name }}; + + return 301 https://{{ proxy.server_name }}$request_uri; +} + +server { + client_max_body_size 50M; + listen 443 ssl; + server_name {{ proxy.server_name }}; + ssl_certificate /run/secrets/site.crt; + ssl_certificate_key /run/secrets/site.key; + + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-SSL on; + proxy_set_header X-Forwarded-Proto $scheme; + + ignore_invalid_headers off; #pass through headers from Jenkins which are considered invalid by Nginx server. + resolver 127.0.0.11 valid=5s; + +{% if keycloak | default(False) %} + + location ~* ^/auth/realms/(.+)/token/introspect/ { + return 301 https://$host/api/auth/v1/realms/$1/token/introspect; + } + location ~* ^/auth/realms/(.+)/token/ { + return 301 https://$host/api/auth/v1/realms/$1/token/; + } + location ~* ^/auth/realms/(.+)/userinfo/ { + return 301 https://$host/api/auth/v1/realms/$1/userinfo/; + } + location ~* ^/auth/realms/(.+)/logout/ { + return 301 https://$host/api/auth/v1/realms/$1/logout/; + } + location ~* ^/auth/realms/(.+)/certs/ { + return 301 https://$host/api/auth/v1/realms/$1/certs/; + } + location ~* ^/auth/realms/(.+)/clients-registrations/ { + return 301 https://$host/api/auth/v1/realms/$1/clients-registrations/; + } + + location /auth/ { + proxy_pass {{ keycloak_url }}/auth/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +{% endif %} + + location /api/ { + set $target http://api-manager_kong:8000; + rewrite ^/api/(.*) /$1 break; + proxy_pass $target; + + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_connect_timeout 1; + proxy_send_timeout 30; + proxy_read_timeout 30; + } + + location /dashboard/ { + set $target http://logger_oauth:4111; + rewrite ^/dashboard/(.*) /$1 break; + proxy_pass $target; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_connect_timeout 1; + proxy_send_timeout 30; + proxy_read_timeout 30; + + sub_filter '</footer>' '</footer><script>document.getElementsByName("rd")[0] && document.getElementsByName("rd")[0].value === "/" && (document.getElementsByName("rd")[0].value = "/dashboard")</script>'; + sub_filter_once on; + } + + location /grafana/ { + set $target http://monitor_grafana:3000; + rewrite ^/grafana/(.*) /$1 break; + proxy_pass $target; + } + + location /oauth2/ { + set $target http://logger_oauth:4111; + proxy_pass $target; + } + + location /prometheus/ { + set $target http://monitor_prometheus:9090/prometheus; + #rewrite ^/prometheus/(.*) /$1 break; + proxy_pass $target; + + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_connect_timeout 1; + proxy_send_timeout 30; + proxy_read_timeout 30; + + auth_basic "Prometheus Administration Console"; + auth_basic_user_file /run/secrets/prom_admin_creds; + } + + location / { + set $target http://player_player:3000; + rewrite ^/(.*) /$1 break; + proxy_pass $target; + + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_connect_timeout 1; + proxy_send_timeout 30; + proxy_read_timeout 30; + proxy_set_header X-Forwarded-Proto $scheme; + } +} diff --git a/ansible/roles/stack-proxy/templates/proxy.env b/ansible/roles/stack-proxy/templates/proxy.env new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ansible/roles/stack-proxy/templates/stack-proxy.yml b/ansible/roles/stack-proxy/templates/stack-proxy.yml new file mode 100644 index 0000000000000000000000000000000000000000..5426e09509b586e638ed56024fe9ee4940d4b4f7 --- /dev/null +++ b/ansible/roles/stack-proxy/templates/stack-proxy.yml @@ -0,0 +1,61 @@ +version: '3.3' + +services: + proxy: + image: "{{hub_org}}/{{image_name}}:{{image_tag}}" + ports: + - "443:443" + - "80:80" + deploy: + replicas: {{ proxy.replicas | default(1) }} + resources: + reservations: + memory: "{{ proxy.reservation_memory | default('32M') }}" + limits: + memory: "{{ proxy.limit_memory | default('128M') }}" + update_config: + parallelism: 1 + delay: 30s + secrets: + - site.key + - site.crt + - prom_admin_creds + configs: + - source: proxy-default.conf + target: /etc/nginx/conf.d/default.conf + networks: + - default + - api-manager_default + - jenkins_default + - monitor + - {{ sunbird_network }} + - logger + - monitoring + +secrets: + site.key: + external: true + site.crt: + external: true + prom_admin_creds: + external: true + +configs: + proxy-default.conf: + external: true + +networks: + api-manager_default: + external: true + jenkins_default: + external: true + monitor: + external: true + {{ sunbird_network }}: + external: true + logger: + external: true + monitoring: + external: true + default: + driver: overlay diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..ed97d539c095cf1413af30cc23dea272095b97dd --- /dev/null +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/stack-sunbird/tasks/actor_service.yml b/ansible/roles/stack-sunbird/tasks/actor_service.yml new file mode 100644 index 0000000000000000000000000000000000000000..86579ce9d1bb114a4916ba42d61d84d9d15a8797 --- /dev/null +++ b/ansible/roles/stack-sunbird/tasks/actor_service.yml @@ -0,0 +1,9 @@ +--- +- name: Remove actor service + shell: "docker service rm actor-service" + ignore_errors: yes + +- name: Deploy actor service + shell: "docker service create --replicas {{ actor.replicas | default(1) }} -p 8088:8088 --name actor-service --hostname actor-service --reserve-memory {{ actor.reservation_memory | default('768M') }} --limit-memory {{ actor.limit_memory | default('1024M') }} --network {{ sunbird_network }} --env-file /home/deployer/env/sunbird_actor-service.env {{hub_org}}/{{image_name}}:{{image_tag}}" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-sunbird/tasks/common.yml b/ansible/roles/stack-sunbird/tasks/common.yml new file mode 100644 index 0000000000000000000000000000000000000000..14291ed1f3fc2de79fa660dc9fafba4302b82c76 --- /dev/null +++ b/ansible/roles/stack-sunbird/tasks/common.yml @@ -0,0 +1,29 @@ +--- +- name: Ensure env directory exists + file: + path: /home/deployer/env + state: directory + owner: root + group: root + +- name: Ensure stack directory exists + file: + path: /home/deployer/stack + state: directory + owner: root + group: root + +# - name: Save actor configurations into an env file +# template: src=sunbird_actor.env dest=/home/deployer/env/sunbird_actor.env mode=0644 + +# - name: Save content configurations into an env file +# template: src=sunbird_content.env dest=/home/deployer/env/sunbird_content.env mode=0644 + +# - name: Save learner configurations into an env file +# template: src=sunbird_learner.env dest=/home/deployer/env/sunbird_learner.env mode=0644 + +# - name: Save player configurations into an env file +# template: src=sunbird_player.env dest=/home/deployer/env/sunbird_player.env mode=0644 + +- name: Save service configurations into an env file + template: src="sunbird_{{service_name}}.env" dest="/home/deployer/env/sunbird_{{service_name}}.env" mode=0644 diff --git a/ansible/roles/stack-sunbird/tasks/learner_service.yml b/ansible/roles/stack-sunbird/tasks/learner_service.yml new file mode 100644 index 0000000000000000000000000000000000000000..cd160df823ac19ed5e5330eda5c77b765d6fa1b8 --- /dev/null +++ b/ansible/roles/stack-sunbird/tasks/learner_service.yml @@ -0,0 +1,13 @@ +--- +- name: Remove learner service from stack (previous mode of deploy) + shell: "docker service rm learner_service_learner_service" + ignore_errors: yes + +- name: Remove learner service + shell: "docker service rm learner-service" + ignore_errors: yes + +- name: Deploy learner service + shell: "docker service create --replicas {{ learner.replicas | default(1) }} -p 9000:9000 --name learner-service --hostname learner-service --reserve-memory {{ learner.reservation_memory | default('768M') }} --limit-memory {{ learner.limit_memory | default('1024M') }} --network {{ sunbird_network }} --env-file /home/deployer/env/sunbird_learner-service.env --health-cmd \"curl -f http://localhost:9000/health || exit 1\" --health-interval 10s --health-timeout 5s --health-retries 5 {{hub_org}}/{{image_name}}:{{image_tag}}" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-sunbird/tasks/main.yml b/ansible/roles/stack-sunbird/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..ce29fa62cfc4441a317a9a3871b914288ad45315 --- /dev/null +++ b/ansible/roles/stack-sunbird/tasks/main.yml @@ -0,0 +1,12 @@ +--- + - include: common.yml + + - include: service_stack.yml + when: deploy_stack is defined + + + - include: actor_service.yml + when: deploy_actor is defined + + - include: learner_service.yml + when: deploy_learner is defined diff --git a/ansible/roles/stack-sunbird/tasks/service_stack.yml b/ansible/roles/stack-sunbird/tasks/service_stack.yml new file mode 100644 index 0000000000000000000000000000000000000000..c56095d1ff352c592f6c53c32ab0d9179471fbc2 --- /dev/null +++ b/ansible/roles/stack-sunbird/tasks/service_stack.yml @@ -0,0 +1,8 @@ +--- +- name: Save stack file + template: src="stack_{{service_name}}.yml" dest="/home/deployer/stack/stack_{{service_name}}.yml" mode=0644 + +- name: Deploy stack + shell: "docker stack deploy -c stack_{{service_name}}.yml {{service_name}}" + args: + chdir: /home/deployer/stack diff --git a/ansible/roles/stack-sunbird/templates/stack_content_service.yml b/ansible/roles/stack-sunbird/templates/stack_content_service.yml new file mode 100644 index 0000000000000000000000000000000000000000..5b2637d79b26249916d1525e71fa7d74b408f9eb --- /dev/null +++ b/ansible/roles/stack-sunbird/templates/stack_content_service.yml @@ -0,0 +1,27 @@ +version: '3.1' + +services: + content_service: + image: "{{hub_org}}/{{image_name}}:{{image_tag}}" + deploy: + replicas: {{ content.replicas | default(1) }} + resources: + reservations: + memory: "{{ content.reservation_memory | default('64M') }}" + limits: + memory: "{{ content.limit_memory | default('256M') }}" + healthcheck: + test: curl -f http://localhost:5000/health || exit 1 + interval: 10s + timeout: 5s + retries: 5 + env_file: + /home/deployer/env/sunbird_content_service.env + ports: + - "5000:5000" + networks: + - "{{ sunbird_network }}" + +networks: + {{ sunbird_network }}: + external: true diff --git a/ansible/roles/stack-sunbird/templates/stack_learner_service.yml b/ansible/roles/stack-sunbird/templates/stack_learner_service.yml new file mode 100644 index 0000000000000000000000000000000000000000..fa49d850e4994bd601ed7c8fff43427cc67344b5 --- /dev/null +++ b/ansible/roles/stack-sunbird/templates/stack_learner_service.yml @@ -0,0 +1,26 @@ +version: '3.1' +services: + learner_service: + image: "{{hub_org}}/{{image_name}}:{{image_tag}}" + env_file: + /home/deployer/env/sunbird_learner_service.env + deploy: + replicas: {{ learner.replicas | default(1) }} + resources: + reservations: + memory: "{{ learner.reservation_memory | default('256M') }}" + limits: + memory: "{{ learner.limit_memory | default('512M') }}" + healthcheck: + test: curl -f http://localhost:9000/health || exit 1 + interval: 10s + timeout: 5s + retries: 5 + ports: + - "9000:9000" + - "8099:8099" + networks: + - "{{ sunbird_network }}" +networks: + {{ sunbird_network }}: + external: true diff --git a/ansible/roles/stack-sunbird/templates/stack_player.yml b/ansible/roles/stack-sunbird/templates/stack_player.yml new file mode 100644 index 0000000000000000000000000000000000000000..a37e2236afca7b6c3802980b118a11f10b8d90af --- /dev/null +++ b/ansible/roles/stack-sunbird/templates/stack_player.yml @@ -0,0 +1,27 @@ +version: '3.1' + +services: + player: + image: "{{hub_org}}/{{image_name}}:{{image_tag}}" + deploy: + replicas: {{ player.replicas | default(1) }} + resources: + reservations: + memory: "{{ player.reservation_memory | default('64M') }}" + limits: + memory: "{{ player.limit_memory | default('256M') }}" + healthcheck: + test: wget -q -s http://localhost:3000 || exit 1 + interval: 10s + timeout: 5s + retries: 5 + env_file: + /home/deployer/env/sunbird_player.env + ports: + - "3000:3000" + networks: + - "{{ sunbird_network }}" + +networks: + {{ sunbird_network }}: + external: true diff --git a/ansible/roles/stack-sunbird/templates/sunbird_actor-service.env b/ansible/roles/stack-sunbird/templates/sunbird_actor-service.env new file mode 100644 index 0000000000000000000000000000000000000000..684e489f56eae3a6680ce011a71e2ae947c4f891 --- /dev/null +++ b/ansible/roles/stack-sunbird/templates/sunbird_actor-service.env @@ -0,0 +1,29 @@ +sunbird_cassandra_host={{sunbird.cassandra_host}} +sunbird_cassandra_port=9042 +sunbird_es_host={{sunbird.es_host}} +sunbird_es_port=9300,9300,9300 +sunbird_cassandra_username=cassandra +sunbird_cassandra_password=password +sunbird_learner_actor_host=actor-service +sunbird_learner_actor_port=8088 +actor_hostname=actor-service +bind_hostname=0.0.0.0 +sunbird_sso_url={{sunbird.sso_url}} +sunbird_sso_realm={{sunbird.sso_realm}} +sunbird_sso_username={{sunbird_sso_username}} +sunbird_sso_password={{sunbird_sso_password}} +sunbird_sso_client_id={{sunbird.sso_client_id}} +ekstep_content_search_base_url={{sunbird.ekstep_content_search_base_url}} +ekstep_authorization={{sunbird_ekstep_authorization}} +sunbird_pg_host={{sunbird.pg_host}} +sunbird_pg_port={{sunbird.pg_port}} +sunbird_pg_db={{sunbird.pg_db}} +sunbird_pg_user={{sunbird.pg_user}} +sunbird_pg_password={{sunbird.pg_password}} +sunbird_installation={{sunbird.installation}} +ekstep_api_base_url={{sunbird.ekstep_api_base_url}} +sunbird_mail_server_host={{sunbird.mail_server_host}} +sunbird_mail_server_port={{sunbird.mail_server_port}} +sunbird_mail_server_username={{sunbird.mail_server_username}} +sunbird_mail_server_password={{sunbird.mail_server_password}} +sunbird_mail_server_from_email={{sunbird.mail_server_from_email}} \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/sunbird_content_service.env b/ansible/roles/stack-sunbird/templates/sunbird_content_service.env new file mode 100644 index 0000000000000000000000000000000000000000..006795721e2848c73499356fb0ea23fbe2abb5dc --- /dev/null +++ b/ansible/roles/stack-sunbird/templates/sunbird_content_service.env @@ -0,0 +1,5 @@ +sunbird_mongo_ip={{sunbird.mongo_ip}} +sunbird_mongo_port={{sunbird.mongo_port}} +ekstep_api_base_url={{sunbird.ekstep_api_base_url}} +ekstep_api_key={{sunbird_ekstep_api_key}} +ekstep_proxy_base_url={{sunbird.ekstep_proxy_base_url}} \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env b/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env new file mode 100644 index 0000000000000000000000000000000000000000..b18e06ca2d92fd710a2d4da556f44a1c1cea0309 --- /dev/null +++ b/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env @@ -0,0 +1,2 @@ +sunbird_learnerstate_actor_host=actor-service +sunbird_learnerstate_actor_port=8088 diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env new file mode 100644 index 0000000000000000000000000000000000000000..2ba695341ca905ad7514a6e56fd5e56b56c0ca95 --- /dev/null +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -0,0 +1,19 @@ +sunbird_port=3000 +sunbird_content_player_url={{sunbird.content_player_url}} +sunbird_learner_player_url={{sunbird.learner_player_url}} +sunbird_content_proxy_url=http://content_service_content_service:5000 +sunbird_echo_api_url={{sunbird.echo_api_url}} +sunbird_autocreate_trampoline_user=false +sunbird_telemetry_packet_size=20 +sunbird_mongodb_ip={{sunbird.mongo_ip}} +sunbird_mongodb_port={{sunbird.mongo_port}} +sunbird_mongodb_ttl=1 +sunbird_portal_realm={{sunbird.portal_realm}} +sunbird_portal_auth_server_url={{sunbird.portal_auth_server_url}} +sunbird_portal_auth_server_client={{sunbird.portal_auth_server_client}} +sunbird_trampoline_client_id={{sunbird.trampoline_client_id}} +sunbird_trampoline_secret={{sunbird_trampoline_secret}} +sunbird_appid={{sunbird.appid}} +ekstep_env={{sunbird.env}} +sunbird_default_tenant={{sunbird.default_tenant}} +sunbird_api_auth_token={{sunbird_api_auth_token}} diff --git a/ansible/roles/swarm-agent-docker-prune/files/swarm-agent-docker-prune.sh b/ansible/roles/swarm-agent-docker-prune/files/swarm-agent-docker-prune.sh new file mode 100644 index 0000000000000000000000000000000000000000..d6c40352644d8774e83bc869ee618bf359a39fad --- /dev/null +++ b/ansible/roles/swarm-agent-docker-prune/files/swarm-agent-docker-prune.sh @@ -0,0 +1,9 @@ +#!/bin/sh +set -e +master_node_ip=$1 +agent_nodes=$(ssh -i /run/secrets/ops-private-key ops@$master_node_ip "docker node ls -f role=worker --format {{.Hostname}}") +for agent_node in $agent_nodes; do + echo "" + echo "Cleaning node: $agent_node" + eval `ssh-agent -s` && ssh-add /run/secrets/ops-private-key && ssh -A -o StrictHostKeyChecking=no ops@$master_node_ip "ssh -o StrictHostKeyChecking=no $agent_node 'docker container prune -f && docker image prune -f'" +done; \ No newline at end of file diff --git a/ansible/roles/swarm-agent-docker-prune/tasks/main.yml b/ansible/roles/swarm-agent-docker-prune/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..5f201cead32f6c6204cb6758e1fd25b260a1ade8 --- /dev/null +++ b/ansible/roles/swarm-agent-docker-prune/tasks/main.yml @@ -0,0 +1,7 @@ +- name: copy script swarm-agent-docker-prune.sh + copy: src=swarm-agent-docker-prune.sh dest=/tmp mode=0755 + +- name: run script swarm-agent-docker-prune.sh + command: "/tmp/swarm-agent-docker-prune.sh {{ groups['swarm-bootstrap-manager'][0] }}" + async: 3600 + poll: 10 diff --git a/ansible/roles/vm-agents-filebeat/README.md b/ansible/roles/vm-agents-filebeat/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4c9d226b5753c060686204d709084107945efa20 --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/README.md @@ -0,0 +1,83 @@ +Ansible Filebeat role +========= + +[](https://galaxy.ansible.com/detail#/role/6293) [](https://travis-ci.org/DavidWittman/ansible-filebeat) + +Installs Elastic's Filebeat for forwarding logs. + +Role Variables +-------------- + + - `filebeat_version` - The version of filebeat to install. Defaults to `1.3.1`. + - `filebeat_config` - YAML representation of your filebeat config. This is templated directly into the configuration file as YAML. See the [example configuration](https://github.com/elastic/beats/blob/master/filebeat/filebeat.full.yml) for an exhaustive list of configuration options. Defaults to: + + ``` yaml + filebeat_config: + filebeat: + prospectors: + - paths: + - /var/log/messages + - /var/log/*.log + input_type: log + output: + file: + path: /tmp/filebeat + filename: filebeat + logging: + to_syslog: true + level: error + ``` + - `filebeat_ca_cert` - If provided, the contents of this variable will be placed into the file identified by `filebeat_ca_path` on the target host. You can then include the `filebeat_ca_path` within your configuration to properly authenticate your TLS connections to Logstash/Elasticsearch/etc. + + If you wish to load your CA certificate from a file, use the `file` lookup plugin, e.g.: + ``` yaml + filebeat_ca_cert: "{{ lookup('file', '/path/to/ca.crt') }}" + ``` + - `filebeat_ca_path` - If a CA certificate is provided in `filebeat_ca_cert`, it will be created at this path. + +Similar to the CA variables, you can upload SSL certificates and keys for filebeat using these variables: + + - `filebeat_ssl_cert` - Contents of the SSL certificate + - `filebeat_ssl_cert_path` - Destination of the certificate on the Ansible controlled host + - `filebeat_ssl_key` - Contents of the SSL key + - `filebeat_ssl_key_path` - Destination of the SSL key on the Ansible controlled host + +You can also store the config in separate `filebeat.yml` file and include it using [lookup](http://docs.ansible.com/ansible/playbooks_lookups.html#intro-to-lookups-getting-file-contents): + +``` yaml +filebeat_config: "{{ lookup('file', './filebeat.yml')|from_yaml }}" +``` + +Common Configurations +--------------------- + +Connecting to Elasticsearch: + + ``` yaml + filebeat_config: + filebeat: + prospectors: + - paths: + - /var/log/messages + - /var/log/*.log + input_type: log + output: + elasticsearch: + hosts: + - "http://localhost:9200" + username: "bob" + password: "12345" + logging: + to_syslog: true + level: error + ``` + +License +------- + +BSD + +Author Information +------------------ + +David Wittman diff --git a/ansible/roles/vm-agents-filebeat/defaults/main.yml b/ansible/roles/vm-agents-filebeat/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..828c33ec0efb6230390cff64319423236baa8a9b --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/defaults/main.yml @@ -0,0 +1,49 @@ +--- +# The version of filebeat to install +filebeat_version: 1.3.1 + +# `filebeat_config` is templated directly into filebeat.yml for the config. +# You are expected to override this variable, as these configurations are +# only suited for development purposes. +# See https://github.com/elastic/beats/blob/master/filebeat/filebeat.yml for +# an exhaustive list of configurations. +filebeat_config: + filebeat: + prospectors: + - paths: + - /var/log/messages + - /var/log/*.log + input_type: log + output: + file: + path: /tmp/filebeat + filename: filebeat + logging: + to_syslog: true + level: error + +# The contents of this variable will be placed into the `filebeat_ca_path` +# This should either be set to a string containing your CA certificate or +# use a lookup plugin to retrieve it. +# ex: +# filebeat_ca_cert: "{{ lookup('file', '/path/to/ca.crt') }}" +filebeat_ca_cert: null +# Path to which the above certificate will be uploaded +filebeat_ca_path: /etc/filebeat/ca.crt + +# Similar to the above but for ssl cert and ssl key +filebeat_ssl_cert: null +filebeat_ssl_cert_path: /etc/filebeat/ssl.crt +filebeat_ssl_key: null +filebeat_ssl_key_path: /etc/filebeat/ssl.key + +# Repository settings +filebeat_gpg_url: https://packages.elastic.co/GPG-KEY-elasticsearch +## Debian +filebeat_apt_repo_v1: "deb https://packages.elastic.co/beats/apt stable main" +filebeat_apt_repo_v5: "deb https://artifacts.elastic.co/packages/5.x/apt stable main" +filebeat_apt_repo: "{{ filebeat_version|version_compare('5', '<')|ternary(filebeat_apt_repo_v1, filebeat_apt_repo_v5) }}" +## Redhat +filebeat_repo_url_v1: https://packages.elastic.co/beats/yum/el/$basearch +filebeat_repo_url_v5: https://artifacts.elastic.co/packages/5.x/yum +filebeat_repo_url: "{{ filebeat_version|version_compare('5', '<')|ternary(filebeat_repo_url_v1, filebeat_repo_url_v5) }}" diff --git a/ansible/roles/vm-agents-filebeat/handlers/main.yml b/ansible/roles/vm-agents-filebeat/handlers/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..8c75d6516ee92affd6938a5884d94f77b108151f --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: restart filebeat + service: + name: filebeat + state: restarted + when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' + +- name: restart filebeat + command: ./filebeat -c filebeat.yml -d "publish" + args: + chdir: "{{filebeat.base_path}}" + when: ansible_os_family == 'Darwin' \ No newline at end of file diff --git a/ansible/roles/vm-agents-filebeat/library/yumrepo b/ansible/roles/vm-agents-filebeat/library/yumrepo new file mode 100755 index 0000000000000000000000000000000000000000..e2052cea1911645766d53061cb53baf1beb08cbd --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/library/yumrepo @@ -0,0 +1,560 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2015, Jiri Tyr <jiri.tyr@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +import ConfigParser +import os + + +DOCUMENTATION = ''' +--- +module: yumrepo +author: Jiri Tyr (@jtyr) +version_added: '2.0' +short_description: Add and remove YUM repositories +description: + - Add or remove YUM repositories in RPM-based Linux distributions. + +options: + bandwidth: + required: false + default: 0 + description: + - Maximum available network bandwidth in bytes/second. Used with the + I(throttle) option. + - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth + throttling will be disabled. If I(throttle) is expressed as a data rate + (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth + throttling). + baseurl: + required: false + default: None + description: + - URL to the directory where the yum repository's 'repodata' directory + lives. + - This or the I(mirrorlist) parameter is required. + cost: + required: false + default: 1000 + description: + - Relative cost of accessing this repository. Useful for weighing one + repo's packages as greater/less than any other. + description: + required: false + default: None + description: + - A human readable string describing the repository. + enabled: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - This tells yum whether or not use this repository. + enablegroups: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - Determines whether yum will allow the use of package groups for this + repository. + exclude: + required: false + default: None + description: + - List of packages to exclude from updates or installs. This should be a + space separated list. Shell globs using wildcards (eg. C(*) and C(?)) + are allowed. + - The list can also be a regular YAML array. + failovermethod: + required: false + choices: [roundrobin, priority] + default: roundrobin + description: + - C(roundrobin) randomly selects a URL out of the list of URLs to start + with and proceeds through each of them as it encounters a failure + contacting the host. + - C(priority) starts from the first baseurl listed and reads through them + sequentially. + file: + required: false + default: None + description: + - File to use to save the repo in. Defaults to the value of I(name). + gpgcakey: + required: false + default: None + description: + - A URL pointing to the ASCII-armored CA key file for the repository. + gpgcheck: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Tells yum whether or not it should perform a GPG signature check on + packages. + gpgkey: + required: false + default: None + description: + - A URL pointing to the ASCII-armored GPG key file for the repository. + http_caching: + required: false + choices: [all, packages, none] + default: all + description: + - Determines how upstream HTTP caches are instructed to handle any HTTP + downloads that Yum does. + - C(all) means that all HTTP downloads should be cached. + - C(packages) means that only RPM package downloads should be cached (but + not repository metadata downloads). + - C(none) means that no HTTP downloads should be cached. + includepkgs: + required: false + default: None + description: + - List of packages you want to only use from a repository. This should be + a space separated list. Shell globs using wildcards (eg. C(*) and C(?)) + are allowed. Substitution variables (e.g. C($releasever)) are honored + here. + - The list can also be a regular YAML array. + keepalive: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - This tells yum whether or not HTTP/1.1 keepalive should be used with + this repository. This can improve transfer speeds by using one + connection when downloading multiple files from a repository. + metadata_expire: + required: false + default: 21600 + description: + - Time (in seconds) after which the metadata will expire. + - Default value is 6 hours. + metalink: + required: false + default: None + description: + - Specifies a URL to a metalink file for the repomd.xml, a list of + mirrors for the entire repository are generated by converting the + mirrors for the repomd.xml file to a baseurl. + mirrorlist: + required: false + default: None + description: + - Specifies a URL to a file containing a list of baseurls. + - This or the I(baseurl) parameter is required. + mirrorlist_expire: + required: false + default: 21600 + description: + - Time (in seconds) after which the mirrorlist locally cached will + expire. + - Default value is 6 hours. + name: + required: true + description: + - Unique repository ID. + password: + required: false + default: None + description: + - Password to use with the username for basic authentication. + protect: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Protect packages from updates from other repositories. + proxy: + required: false + default: None + description: + - URL to the proxy server that yum should use. + proxy_password: + required: false + default: None + description: + - Username to use for proxy. + proxy_username: + required: false + default: None + description: + - Password for this proxy. + repo_gpgcheck: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - This tells yum whether or not it should perform a GPG signature check + on the repodata from this repository. + reposdir: + required: false + default: /etc/yum.repos.d + description: + - Directory where the C(.repo) files will be stored. + retries: + required: false + default: 10 + description: + - Set the number of times any attempt to retrieve a file should retry + before returning an error. Setting this to C(0) makes yum try forever. + skip_if_unavailable: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - If set to C(yes) yum will continue running if this repository cannot be + contacted for any reason. This should be set carefully as all repos are + consulted for any given command. + sslcacert: + required: false + default: None + description: + - Path to the directory containing the databases of the certificate + authorities yum should use to verify SSL certificates. + ssl_check_cert_permissions: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Whether yum should check the permissions on the paths for the + certificates on the repository (both remote and local). + - If we can't read any of the files then yum will force + I(skip_if_unavailable) to be true. This is most useful for non-root + processes which use yum on repos that have client cert files which are + readable only by root. + sslclientcert: + required: false + default: None + description: + - Path to the SSL client certificate yum should use to connect to + repos/remote sites. + sslclientkey: + required: false + default: None + description: + - Path to the SSL client key yum should use to connect to repos/remote + sites. + sslverify: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - Defines whether yum should verify SSL certificates/hosts at all. + state: + required: false + choices: [absent, present] + default: present + description: + - A source string state. + throttle: + required: false + default: None + description: + - Enable bandwidth throttling for downloads. + - This option can be expressed as a absolute data rate in bytes/sec. An + SI prefix (k, M or G) may be appended to the bandwidth value. + timeout: + required: false + default: 30 + description: + - Number of seconds to wait for a connection before timing out. + username: + required: false + default: None + description: + - Username to use for basic authentication to a repo or really any url. + +extends_documentation_fragment: files + +notes: + - All comments will be removed if modifying an existing repo file. + - Section order is preserved in an existing repo file. + - Parameters in a section are ordered alphabetically in an existing repo + file. + - The repo file will be automatically deleted if it contains no repository. +''' + +EXAMPLES = ''' +- name: Add repository + yumrepo: + name: epel + description: EPEL YUM repo + baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/ + +- name: Add multiple repositories into the same file (1/2) + yumrepo: + name: epel + description: EPEL YUM repo + file: external_repos + baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/ + gpgcheck: no +- name: Add multiple repositories into the same file (2/2) + yumrepo: + name: rpmforge + description: RPMforge YUM repo + file: external_repos + baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge + mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge + enabled: no + +- name: Remove repository + yumrepo: + name: epel + state: absent + +- name: Remove repository from a specific repo file + yumrepo: + name: epel + file: external_repos + state: absent +''' + +RETURN = ''' +repo: + description: repository name + returned: success + type: string + sample: "epel" +state: + description: state of the target, after execution + returned: success + type: string + sample: "present" +''' + + +class YumRepo(object): + # Class global variables + module = None + params = None + section = None + repofile = ConfigParser.RawConfigParser() + + # List of parameters which will be allowed in the repo file output + allowed_params = [ + 'bandwidth', 'baseurl', 'cost', 'enabled', 'enablegroups', 'exclude', + 'failovermethod', 'gpgcakey', 'gpgcheck', 'gpgkey', 'http_caching', + 'includepkgs', 'keepalive', 'metadata_expire', 'metalink', + 'mirrorlist', 'mirrorlist_expire', 'name', 'password', 'protect', + 'proxy', 'proxy_password', 'proxy_username', 'repo_gpgcheck', + 'retries', 'skip_if_unavailable', 'sslcacert', + 'ssl_check_cert_permissions', 'sslclientcert', 'sslclientkey', + 'sslverify', 'throttle', 'timeout', 'username'] + + # List of parameters which can be a list + list_params = ['exclude', 'includepkgs'] + + def __init__(self, module): + # To be able to use fail_json + self.module = module + # Shortcut for the params + self.params = self.module.params + # Section is always the repoid + self.section = self.params['repoid'] + + # Check if repo directory exists + repos_dir = self.params['reposdir'] + if not os.path.isdir(repos_dir): + self.module.fail_json( + msg='Repo directory "%s" does not exist.' % repos_dir) + + # Get the given or the default repo file name + repo_file = self.params['repoid'] + if self.params['file'] is not None: + repo_file = self.params['file'] + + # Set dest; also used to set dest parameter for the FS attributes + self.params['dest'] = os.path.join(repos_dir, "%s.repo" % repo_file) + + # Read the repo file if it exists + if os.path.isfile(self.params['dest']): + self.repofile.read(self.params['dest']) + + def add(self): + # Remove already existing repo and create a new one + if self.repofile.has_section(self.section): + self.repofile.remove_section(self.section) + + # Add section + self.repofile.add_section(self.section) + + # Baseurl/mirrorlist is not required because for removal we need only + # the repo name. This is why we check if the baseurl/mirrorlist is + # defined. + if (self.params['baseurl'], self.params['mirrorlist']) == (None, None): + self.module.fail_json( + msg='Paramater "baseurl" or "mirrorlist" is required for ' + 'adding a new repo.') + + # Set options + for key, value in sorted(self.params.items()): + if key in self.list_params and isinstance(value, list): + # Join items into one string for specific parameters + value = ' '.join(value) + elif isinstance(value, bool): + # Convert boolean value to integer + value = int(value) + + # Set the value only if it was defined (default is None) + if value is not None and key in self.allowed_params: + self.repofile.set(self.section, key, value) + + def save(self): + if len(self.repofile.sections()): + # Write data into the file + try: + fd = open(self.params['dest'], 'wb') + except IOError: + self.module.fail_json( + msg='Cannot open repo file %s.' % + self.params['dest']) + + try: + try: + self.repofile.write(fd) + except Error: + self.module.fail_json( + msg='Cannot write repo file %s.' % + self.params['dest']) + finally: + fd.close() + else: + # Remove the file if there are not repos + try: + os.remove(self.params['dest']) + except OSError: + self.module.fail_json( + msg='Cannot remove empty repo file %s.' % + self.params['dest']) + + def remove(self): + # Remove section if exists + if self.repofile.has_section(self.section): + self.repofile.remove_section(self.section) + + def dump(self): + repo_string = "" + + # Compose the repo file + for section in sorted(self.repofile.sections()): + repo_string += "[%s]\n" % section + + for key, value in sorted(self.repofile.items(section)): + repo_string += "%s = %s\n" % (key, value) + + repo_string += "\n" + + return repo_string + + +def main(): + # Module settings + module = AnsibleModule( + argument_spec=dict( + bandwidth=dict(), + baseurl=dict(), + cost=dict(), + description=dict(), + enabled=dict(type='bool'), + enablegroups=dict(type='bool'), + exclude=dict(), + failovermethod=dict(choices=['roundrobin', 'priority']), + file=dict(), + gpgcakey=dict(), + gpgcheck=dict(type='bool'), + gpgkey=dict(), + http_caching=dict(choices=['all', 'packages', 'none']), + includepkgs=dict(), + keepalive=dict(type='bool'), + metadata_expire=dict(), + metalink=dict(), + mirrorlist=dict(), + mirrorlist_expire=dict(), + name=dict(required=True), + password=dict(no_log=True), + protect=dict(type='bool'), + proxy=dict(), + proxy_password=dict(no_log=True), + proxy_username=dict(), + repo_gpgcheck=dict(type='bool'), + reposdir=dict(default='/etc/yum.repos.d'), + retries=dict(), + skip_if_unavailable=dict(type='bool'), + sslcacert=dict(), + ssl_check_cert_permissions=dict(type='bool'), + sslclientcert=dict(), + sslclientkey=dict(), + sslverify=dict(type='bool'), + state=dict(choices=['present', 'absent'], default='present'), + throttle=dict(), + timeout=dict(), + username=dict(), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + + # Rename "name" and "description" to ensure correct key sorting + module.params['repoid'] = module.params['name'] + module.params['name'] = module.params['description'] + del module.params['description'] + + # Instantiate the YumRepo object + yumrepo = YumRepo(module) + + # Get repo status before change + yumrepo_before = yumrepo.dump() + + # Perform action depending on the state + if state == 'present': + yumrepo.add() + elif state == 'absent': + yumrepo.remove() + + # Get repo status after change + yumrepo_after = yumrepo.dump() + + # Compare repo states + changed = yumrepo_before != yumrepo_after + + # Save the file only if not in check mode and if there was a change + if not module.check_mode and changed: + yumrepo.save() + + # Change file attributes if needed + if os.path.isfile(module.params['dest']): + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) + + # Print status of the change + module.exit_json(changed=changed, repo=name, state=state) + + +# Import module snippets +from ansible.module_utils.basic import * + + +if __name__ == '__main__': + main() diff --git a/ansible/roles/vm-agents-filebeat/meta/main.yml b/ansible/roles/vm-agents-filebeat/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..6343b79654b96be5c9397cd9ae7d7229c635d1ef --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + author: David Wittman + description: Installs Elastic's Filebeat for forwarding logs. + license: BSD + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - all + - name: Ubuntu + versions: + - all + categories: + - monitoring + - system +dependencies: [] diff --git a/ansible/roles/vm-agents-filebeat/tasks/darwin.yml b/ansible/roles/vm-agents-filebeat/tasks/darwin.yml new file mode 100644 index 0000000000000000000000000000000000000000..dfccae57edaa0f2b4be22eaa76053340e3f0150a --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/tasks/darwin.yml @@ -0,0 +1,17 @@ +--- +- name: download filebeat + get_url: + url: https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.5.1-darwin-x86_64.tar.gz + checksum: md5:6297ad27d6ddea291a4594b199d2fa21 + dest: /tmp/filebeat.tar.gz + +- unarchive: + src: /tmp/filebeat.tar.gz + dest: /tmp/ + +- file: + path: /tmp/filebeat + state: absent + +- name: copying over + command: mv /tmp/filebeat-5.5.1-darwin-x86_64 /tmp/filebeat \ No newline at end of file diff --git a/ansible/roles/vm-agents-filebeat/tasks/debian.yml b/ansible/roles/vm-agents-filebeat/tasks/debian.yml new file mode 100644 index 0000000000000000000000000000000000000000..c48d58e4eac9a46719e1d97a40e8fc0d74219b3a --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/tasks/debian.yml @@ -0,0 +1,17 @@ +--- +- name: add elastic gpg key + apt_key: + url: "{{ filebeat_gpg_url }}" + +- name: add beats repository + apt_repository: + repo: "{{ filebeat_apt_repo }}" + state: present + update_cache: yes + +- name: install filebeat + apt: + name: filebeat={{ filebeat_version }} + state: present + notify: + - restart filebeat diff --git a/ansible/roles/vm-agents-filebeat/tasks/main.yml b/ansible/roles/vm-agents-filebeat/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..f935f9ce1ec79588eca850e45428f15b90d458ce --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- debug: + msg: "Installing on {{ansible_os_family}}" + +- include: redhat.yml + when: ansible_os_family == 'RedHat' + +- include: debian.yml + when: ansible_os_family == 'Debian' + +- include: darwin.yml + when: ansible_os_family == 'Darwin' + +- name: create filebeat.yml + template: + src: filebeat.yml.j2 + dest: "{{filebeat.base_path}}/filebeat.yml" + notify: + - restart filebeat + +- name: copy ca certificate if required + copy: + content: "{{ filebeat_ca_cert }}" + dest: "{{ filebeat_ca_path }}" + owner: root + group: root + mode: 0400 + when: filebeat_ca_cert != None + +- name: copy ssl certificate if required + copy: + content: "{{ filebeat_ssl_cert }}" + dest: "{{ filebeat_ssl_cert_path }}" + owner: root + group: root + mode: 0400 + when: filebeat_ssl_cert != None + +- name: copy ssl key if required + copy: + content: "{{ filebeat_ssl_key }}" + dest: "{{ filebeat_ssl_key_path }}" + owner: root + group: root + mode: 0400 + when: filebeat_ssl_key != None + +- name: flush handlers to prevent start then restart + meta: flush_handlers + +- name: start and enable filebeat + service: + name: filebeat + state: started + enabled: true + when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' + +- name: start and enable filebeat + command: ./filebeat -c filebeat.yml -d "publish" + args: + chdir: "{{filebeat.base_path}}" + when: ansible_os_family == 'Darwin' diff --git a/ansible/roles/vm-agents-filebeat/tasks/redhat.yml b/ansible/roles/vm-agents-filebeat/tasks/redhat.yml new file mode 100644 index 0000000000000000000000000000000000000000..f125b785dddce0b2da488123ceb9073dfeb1710f --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/tasks/redhat.yml @@ -0,0 +1,19 @@ +--- +- name: add rpm key for elastic.co + rpm_key: + key: "{{ filebeat_gpg_url }}" + +- name: create beats yum repo + yumrepo: + name: beats + baseurl: "{{ filebeat_repo_url }}" + enabled: yes + gpgcheck: yes + gpgkey: "{{ filebeat_gpg_url }}" + +- name: install filebeat + yum: + name: "filebeat-{{ filebeat_version }}" + state: present + notify: + - restart filebeat diff --git a/ansible/roles/vm-agents-filebeat/templates/filebeat.yml.j2 b/ansible/roles/vm-agents-filebeat/templates/filebeat.yml.j2 new file mode 100644 index 0000000000000000000000000000000000000000..6085929e9232036d85bfd8615db784b43eba08d1 --- /dev/null +++ b/ansible/roles/vm-agents-filebeat/templates/filebeat.yml.j2 @@ -0,0 +1,2 @@ +# {{ ansible_managed }} +{{ filebeat.config|to_yaml }} diff --git a/ansible/roles/vm-agents-logstash/LICENSE b/ansible/roles/vm-agents-logstash/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4275cf3c10aae9c3992998fbf54f90bae9615960 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Jeff Geerling + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/ansible/roles/vm-agents-logstash/defaults/main.yml b/ansible/roles/vm-agents-logstash/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..532d627c4458517d05825a428c6c92c039cb7bbc --- /dev/null +++ b/ansible/roles/vm-agents-logstash/defaults/main.yml @@ -0,0 +1,17 @@ +--- +logstash_listen_port_beats: 5044 + +logstash_elasticsearch_hosts: + - http://localhost:9200 + +logstash_local_syslog_path: /var/log/syslog +logstash_monitor_local_syslog: true + +logstash_ssl_dir: /etc/pki/logstash +logstash_ssl_certificate_file: "" +logstash_ssl_key_file: "" + +logstash_enabled_on_boot: yes + +logstash_install_plugins: + - logstash-input-beats diff --git a/ansible/roles/vm-agents-logstash/files/filters/10-syslog.conf b/ansible/roles/vm-agents-logstash/files/filters/10-syslog.conf new file mode 100644 index 0000000000000000000000000000000000000000..7d5e419054bf51bedbbe0727e45471d40568432a --- /dev/null +++ b/ansible/roles/vm-agents-logstash/files/filters/10-syslog.conf @@ -0,0 +1,16 @@ +filter { + if [type] == "syslog" { + if [message] =~ /last message repeated [0-9]+ times/ { + drop { } + } + grok { + match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" } + add_field => [ "received_at", "%{@timestamp}" ] + add_field => [ "received_from", "%{host}" ] + } + syslog_pri { } + date { + match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] + } + } +} diff --git a/ansible/roles/vm-agents-logstash/files/filters/11-nginx.conf b/ansible/roles/vm-agents-logstash/files/filters/11-nginx.conf new file mode 100644 index 0000000000000000000000000000000000000000..dc206e5c27b2baf730bbfb2dba8917a1a3da56db --- /dev/null +++ b/ansible/roles/vm-agents-logstash/files/filters/11-nginx.conf @@ -0,0 +1,7 @@ +filter { + if [type] == "nginx" { + grok { + match => { "message" => "%{COMBINEDAPACHELOG}" } + } + } +} diff --git a/ansible/roles/vm-agents-logstash/files/filters/12-apache.conf b/ansible/roles/vm-agents-logstash/files/filters/12-apache.conf new file mode 100644 index 0000000000000000000000000000000000000000..c804bab8a718f6803e5ee5d35349a23312c40577 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/files/filters/12-apache.conf @@ -0,0 +1,10 @@ +filter { + if [type] == "apache" { + grok { + match => { "message" => "%{COMBINEDAPACHELOG}"} + } + date { + match => [ "timestamp", "dd/MMM/yyyy:HH:mm:ss Z" ] + } + } +} diff --git a/ansible/roles/vm-agents-logstash/files/filters/14-solr.conf b/ansible/roles/vm-agents-logstash/files/filters/14-solr.conf new file mode 100644 index 0000000000000000000000000000000000000000..43cb28badeb2e256b09c4e2d56f7ebdac027ce2e --- /dev/null +++ b/ansible/roles/vm-agents-logstash/files/filters/14-solr.conf @@ -0,0 +1,15 @@ +filter { + if [type] == "solr" { + if [message] =~ /org\.apache\.solr\.core\.SolrCore execute/ { + drop { } + } + grok { + match => { "message" => "<%{POSINT:priority}>%{SYSLOGLINE}"} + } + multiline { + pattern => "(([^\s]+)Exception.+)|(at:.+)" + stream_identity => "%{logsource}.%{@type}" + what => "previous" + } + } +} diff --git a/ansible/roles/vm-agents-logstash/files/filters/15-drupal.conf b/ansible/roles/vm-agents-logstash/files/filters/15-drupal.conf new file mode 100644 index 0000000000000000000000000000000000000000..03a762720f95fc9fc2fd782854a94624e5e7496d --- /dev/null +++ b/ansible/roles/vm-agents-logstash/files/filters/15-drupal.conf @@ -0,0 +1,7 @@ +filter { + if [type] == "drupal" { + grok { + match => ["message", "%{SYSLOGTIMESTAMP} %{HOSTNAME} %{WORD}: %{URI:drupal_vhost}\|%{NUMBER:drupal_timestamp}\|(?<drupal_action>[^\|]*)\|%{IP:drupal_ip}\|(?<drupal_request_uri>[^\|]*)\|(?<drupal_referer>[^\|]*)\|(?<drupal_uid>[^\|]*)\|(?<drupal_link>[^\|]*)\|%{GREEDYDATA:drupal_message}" ] + } + } +} diff --git a/ansible/roles/vm-agents-logstash/files/logstash.repo b/ansible/roles/vm-agents-logstash/files/logstash.repo new file mode 100644 index 0000000000000000000000000000000000000000..c37547ba111e0d264c2e883bab2fdb1c25a2ef9c --- /dev/null +++ b/ansible/roles/vm-agents-logstash/files/logstash.repo @@ -0,0 +1,6 @@ +[logstash-2.3] +name=Logstash repository for 2.3.x packages +baseurl=http://packages.elastic.co/logstash/2.3/centos +gpgcheck=1 +gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch +enabled=1 \ No newline at end of file diff --git a/ansible/roles/vm-agents-logstash/handlers/main.yml b/ansible/roles/vm-agents-logstash/handlers/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..56f376c8e7d7c517db895519d15328f2619e5510 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart logstash + service: name=logstash state=restarted diff --git a/ansible/roles/vm-agents-logstash/meta/main.yml b/ansible/roles/vm-agents-logstash/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..7c150f874e01b5eba4caecda9825ff1655df1930 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/meta/main.yml @@ -0,0 +1,24 @@ +--- +dependencies: [] + +galaxy_info: + author: geerlingguy + description: Logstash for Linux. + company: "Midwestern Mac, LLC" + license: "license (BSD, MIT)" + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Debian + versions: + - all + - name: Ubuntu + versions: + - all + galaxy_tags: + - web + - system + - monitoring diff --git a/ansible/roles/vm-agents-logstash/tasks/config.yml b/ansible/roles/vm-agents-logstash/tasks/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..9574891a32f3b7f8fe5b37089da4da4f688f95dd --- /dev/null +++ b/ansible/roles/vm-agents-logstash/tasks/config.yml @@ -0,0 +1,44 @@ +--- +- name: Create Logstash configuration files. + template: + src: "{{ item }}.j2" + dest: "/etc/logstash/conf.d/{{ item }}" + owner: root + group: root + mode: 0644 + with_items: + - 01-beats-input.conf + - 30-elasticsearch-output.conf + notify: restart logstash + +- name: Create Logstash filters. + copy: + src: "filters/{{ item }}" + dest: "/etc/logstash/conf.d/{{ item }}" + owner: root + group: root + mode: 0644 + with_items: + - 10-syslog.conf + - 11-nginx.conf + - 12-apache.conf + - 14-solr.conf + - 15-drupal.conf + notify: restart logstash + +- name: Create Logstash configuration file for local syslog. + template: + src: 02-local-syslog-input.conf.j2 + dest: /etc/logstash/conf.d/02-local-syslog-input.conf + owner: root + group: root + mode: 0644 + when: logstash_monitor_local_syslog + notify: restart logstash + +- name: Ensure configuration for local syslog is absent if disabled. + file: + path: /etc/logstash/conf.d/02-local-syslog-input.conf + state: absent + when: not logstash_monitor_local_syslog + notify: restart logstash diff --git a/ansible/roles/vm-agents-logstash/tasks/main.yml b/ansible/roles/vm-agents-logstash/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..5da843cf155ffb2077bb592ab1887be640bded1f --- /dev/null +++ b/ansible/roles/vm-agents-logstash/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Include OS Specific setup tasks + include: setup-{{ ansible_os_family }}.yml + +- include: config.yml +- include: ssl.yml +- include: plugins.yml + +- name: Ensure Logstash is started and enabled on boot. + service: + name: logstash + state: started + enabled: "{{ logstash_enabled_on_boot }}" diff --git a/ansible/roles/vm-agents-logstash/tasks/plugins.yml b/ansible/roles/vm-agents-logstash/tasks/plugins.yml new file mode 100644 index 0000000000000000000000000000000000000000..8a90e62591489a9682caee90816f9639f03b1dc7 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/tasks/plugins.yml @@ -0,0 +1,15 @@ +--- +- name: Get list of installed plugins. + command: > + ./bin/logstash-plugin list + chdir=/opt/logstash + register: logstash_plugins_list + changed_when: false + +- name: Install configured plugins. + command: > + ./bin/logstash-plugin install {{ item }} + chdir=/opt/logstash + with_items: "{{ logstash_install_plugins }}" + when: "item not in logstash_plugins_list.stdout" + notify: restart logstash diff --git a/ansible/roles/vm-agents-logstash/tasks/setup-Debian.yml b/ansible/roles/vm-agents-logstash/tasks/setup-Debian.yml new file mode 100644 index 0000000000000000000000000000000000000000..5f52c62aa1e5d01ddc1016f39bc4664baef21d05 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/tasks/setup-Debian.yml @@ -0,0 +1,29 @@ +--- +- name: Add Elasticsearch apt key. + apt_key: + url: http://packages.elasticsearch.org/GPG-KEY-elasticsearch + state: present + +- name: Add Logstash repository. + apt_repository: + repo: 'deb http://packages.elasticsearch.org/logstash/2.3/debian stable main' + state: present + +- name: Check if Logstash is already installed. + stat: path=/etc/init.d/logstash + register: logstash_installed + +- name: Update apt cache if repository just added. + apt: update_cache=yes + when: logstash_installed.stat.exists == false + +- name: Install Logstash. + apt: pkg=logstash state=present + +- name: Add Logstash user to adm group (Debian). + user: + name: logstash + group: logstash + groups: adm + when: ansible_os_family == "Debian" + notify: restart logstash diff --git a/ansible/roles/vm-agents-logstash/tasks/setup-RedHat.yml b/ansible/roles/vm-agents-logstash/tasks/setup-RedHat.yml new file mode 100644 index 0000000000000000000000000000000000000000..57d1ae5bb02853367967b0cc747b2870977c9448 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/tasks/setup-RedHat.yml @@ -0,0 +1,14 @@ +--- +- name: Add Elasticsearch GPG key. + rpm_key: + key: http://packages.elasticsearch.org/GPG-KEY-elasticsearch + state: present + +- name: Add Logstash repository. + copy: + src: logstash.repo + dest: /etc/yum.repos.d/logstash.repo + mode: 0644 + +- name: Install Logstash. + yum: pkg=logstash state=installed diff --git a/ansible/roles/vm-agents-logstash/tasks/ssl.yml b/ansible/roles/vm-agents-logstash/tasks/ssl.yml new file mode 100644 index 0000000000000000000000000000000000000000..d8fc8b91912fabe2bb8d3d3fd2120879e9df712b --- /dev/null +++ b/ansible/roles/vm-agents-logstash/tasks/ssl.yml @@ -0,0 +1,17 @@ +--- +- name: Ensure Logstash SSL key pair directory exists. + file: + path: "{{ logstash_ssl_dir }}" + state: directory + when: logstash_ssl_key_file + +- name: Copy SSL key and cert for logstash-forwarder. + copy: + src: "{{ item }}" + dest: "{{ logstash_ssl_dir }}/{{ item | basename }}" + mode: 0644 + with_items: + - "{{ logstash_ssl_key_file }}" + - "{{ logstash_ssl_certificate_file }}" + notify: restart logstash + when: logstash_ssl_key_file diff --git a/ansible/roles/vm-agents-logstash/templates/01-beats-input.conf.j2 b/ansible/roles/vm-agents-logstash/templates/01-beats-input.conf.j2 new file mode 100644 index 0000000000000000000000000000000000000000..433ab1bf0051e54af963f5de7b084b8c32a4fa36 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/templates/01-beats-input.conf.j2 @@ -0,0 +1,11 @@ +input { + beats { + port => {{ logstash_listen_port_beats }} +{% if logstash_ssl_certificate_file and logstash_ssl_key_file %} + ssl => true + ssl_certificate => "{{ logstash_ssl_dir }}/{{ logstash_ssl_certificate_file | basename }}" + ssl_key => "{{ logstash_ssl_dir }}/{{ logstash_ssl_key_file | basename }}" + ssl_verify_mode => "force_peer" +{% endif %} + } +} diff --git a/ansible/roles/vm-agents-logstash/templates/02-local-syslog-input.conf.j2 b/ansible/roles/vm-agents-logstash/templates/02-local-syslog-input.conf.j2 new file mode 100644 index 0000000000000000000000000000000000000000..54723728d1d42c34bc549f9572e44161bd402ba4 --- /dev/null +++ b/ansible/roles/vm-agents-logstash/templates/02-local-syslog-input.conf.j2 @@ -0,0 +1,5 @@ +input { + file { + path => "{{ logstash_local_syslog_path }}" + } +} diff --git a/ansible/roles/vm-agents-logstash/templates/30-elasticsearch-output.conf.j2 b/ansible/roles/vm-agents-logstash/templates/30-elasticsearch-output.conf.j2 new file mode 100644 index 0000000000000000000000000000000000000000..ce7fc14337e0fd5b8fccbf4b145ff4fd6df78cdd --- /dev/null +++ b/ansible/roles/vm-agents-logstash/templates/30-elasticsearch-output.conf.j2 @@ -0,0 +1,7 @@ +output { + elasticsearch { + hosts => {{ logstash_elasticsearch_hosts | to_json }} + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} diff --git a/ansible/roles/vm-agents-nodeexporter/LICENSE b/ansible/roles/vm-agents-nodeexporter/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8dada3edaf50dbc082c9a125058f25def75e625a --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/roles/vm-agents-nodeexporter/README.md b/ansible/roles/vm-agents-nodeexporter/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ff133ce855bede2ab8281b4420f57955956ff17a --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/README.md @@ -0,0 +1,29 @@ +Prometheus node_exporter +======================== + +Ansible [Prometheus](https://prometheus.io) [node_exporter](https://github.com/prometheus/node_exporter) role + +Requirements +------------ + +* Debian Jessie or newer + +Role Variables +-------------- + +see `defaults/main.yml` + +Example Playbook +---------------- + + - hosts: all + roles: + - role: SphericalElephant.prometheus-node-exporter + prometheus_node_exporter_file_sd: True + prometheus_node_exporter_file_sd_locations: + - { host: prometheus01.in.example.com, path: "/etc/prometheus/endpoints/node-{{ inventory_hostname }}.yml" } + +License +------- + +Apache 2.0 diff --git a/ansible/roles/vm-agents-nodeexporter/defaults/main.yml b/ansible/roles/vm-agents-nodeexporter/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..117b63c92058276778413bf08d74741e9a172364 --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/defaults/main.yml @@ -0,0 +1,38 @@ +--- +prometheus_node_exporter: True +prometheus_node_exporter_collectors_enable: + # - conntrack + - diskstats + # - entropy + # - filefd + - filesystem + - loadavg + # - mdadm + - meminfo + - netdev + - netstat + # - sockstat + - stat + - textfile + - time + # - uname + # - vmstat +prometheus_node_exporter_web_listen_port: "9101" +prometheus_node_exporter_web_listen_address: ":{{ prometheus_node_exporter_web_listen_port }}" +prometheus_node_exporter_collector_netdev_ignored_devices: "^$" + +prometheus_node_exporter_parameters: + - "-collectors.enabled={{ prometheus_node_exporter_collectors_enable | join(',') }}" + - "-web.listen-address={{ prometheus_node_exporter_web_listen_address }}" + - '-log.level=info' + - '-collector.diskstats.ignored-devices=^(ram|loop|fd)\d+$' + - '-collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)' + - '-collector.netdev.ignored-devices="{{ prometheus_node_exporter_collector_netdev_ignored_devices }}"' + - '-collector.textfile.directory=/var/lib/prometheus/node-exporter' + +prometheus_node_exporter_file_sd: False +# prometheus_node_exporter_file_sd_locations: +# - { host: prometheus01.in.example.com, path: "/etc/prometheus/endpoints/node-{{ inventory_hostname }}.yml" } +prometheus_node_exporter_file_sd_locations: [] +prometheus_node_exporter_file_sd_labels: + "job": "node" diff --git a/ansible/roles/vm-agents-nodeexporter/handlers/main.yml b/ansible/roles/vm-agents-nodeexporter/handlers/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..9c62baf6a093e64feebe1faf8dd3a053fcf47f9c --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart prometheus-node-exporter + service: + name: prometheus-node-exporter + state: restarted diff --git a/ansible/roles/vm-agents-nodeexporter/meta/.galaxy_install_info b/ansible/roles/vm-agents-nodeexporter/meta/.galaxy_install_info new file mode 100644 index 0000000000000000000000000000000000000000..92ba7b898e622a0d365f5b1415c1476c72d6cfd7 --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/meta/.galaxy_install_info @@ -0,0 +1 @@ +{install_date: 'Fri Jul 22 14:49:27 2016', version: master} diff --git a/ansible/roles/vm-agents-nodeexporter/meta/main.yml b/ansible/roles/vm-agents-nodeexporter/meta/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..50b06968ac2888ed791a3a3f89f698a5806e0b83 --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/meta/main.yml @@ -0,0 +1,20 @@ +galaxy_info: + author: Farhad Shahbazi + description: Prometheus node_exporter + company: Spherical Elephant GmbH + license: Apache + min_ansible_version: 2.0 + platforms: + - name: Debian + versions: + - jessie + - sid + - stretch + + galaxy_tags: + - prometheus + - monitoring + - metrics + - node_exporter + +dependencies: [] diff --git a/ansible/roles/vm-agents-nodeexporter/tasks/main.yml b/ansible/roles/vm-agents-nodeexporter/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..6c08b14ab39b9142c2ca943f0bf1c948dcce4796 --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/tasks/main.yml @@ -0,0 +1,5 @@ +--- + +- include: prometheus-node-exporter.yml + when: prometheus_node_exporter + tags: prometheus-node-exporter diff --git a/ansible/roles/vm-agents-nodeexporter/tasks/prometheus-node-exporter.yml b/ansible/roles/vm-agents-nodeexporter/tasks/prometheus-node-exporter.yml new file mode 100644 index 0000000000000000000000000000000000000000..509b68ab55437a8dca0e7d3879883baf691a0281 --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/tasks/prometheus-node-exporter.yml @@ -0,0 +1,38 @@ +--- +- name: install dependencies + apt: + name: "{{item}}" + state: present + with_items: + - conntrack + +- name: install prometheus-node-exporter + apt: + name: prometheus-node-exporter + state: present + +- name: /etc/default/prometheus-node-exporter + template: + dest: /etc/default/prometheus-node-exporter + src: prometheus-node-exporter.j2 + owner: root + group: root + mode: 0644 + notify: restart prometheus-node-exporter + +- name: start and enable the prometheus-node-exporter service + service: + name: prometheus-node-exporter + state: started + enabled: yes + +# - name: copy file_sd_config to prometheus hosts +# template: +# dest: "{{ item.path }}" +# src: file_sd_config.yml.j2 +# owner: root +# group: root +# mode: 0644 +# delegate_to: "{{ item.host }}" +# with_items: "{{ prometheus_node_exporter_file_sd_locations }}" +# when: prometheus_node_exporter_file_sd diff --git a/ansible/roles/vm-agents-nodeexporter/templates/file_sd_config.yml.j2 b/ansible/roles/vm-agents-nodeexporter/templates/file_sd_config.yml.j2 new file mode 100644 index 0000000000000000000000000000000000000000..b841780ff1a920e9233017c9b6ba216713555f1e --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/templates/file_sd_config.yml.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +- targets: ['{{ inventory_hostname }}:{{ prometheus_node_exporter_web_listen_port }}'] + labels: +{% for label, value in prometheus_node_exporter_file_sd_labels.iteritems() %} + "{{ label }}": "{{ value }}" +{% endfor %} diff --git a/ansible/roles/vm-agents-nodeexporter/templates/prometheus-node-exporter.j2 b/ansible/roles/vm-agents-nodeexporter/templates/prometheus-node-exporter.j2 new file mode 100644 index 0000000000000000000000000000000000000000..e2dad81d1608462c936938c42f6ab94328405836 --- /dev/null +++ b/ansible/roles/vm-agents-nodeexporter/templates/prometheus-node-exporter.j2 @@ -0,0 +1,58 @@ +# {{ ansible_managed }} +# Set the command-line arguments to pass to the server. +ARGS='{{ prometheus_node_exporter_parameters | join(" ") }}' + +# Prometheus-node-exporter supports the following options: +# +# -collector.diskstats.ignored-devices string +# Regexp of devices to ignore for diskstats. +# (default "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$") +# -collector.filesystem.ignored-fs-types string +# Regexp of filesystem types to ignore for filesystem collector. +# (default "^(sys|proc)fs$") +# -collector.filesystem.ignored-mount-points string +# Regexp of mount points to ignore for filesystem collector. +# (default "^/(sys|proc|dev)($|/)") +# -collector.megacli.command string +# Command to run megacli. (default "megacli") +# -collector.netdev.ignored-devices string +# Regexp of net devices to ignore for netdev collector. (default "^$") +# -collector.ntp.protocol-version int +# NTP protocol version (default 4) +# -collector.ntp.server string +# NTP server to use for ntp collector. +# -collector.procfs string +# procfs mountpoint. (default "/proc") +# -collector.supervisord.url string +# XML RPC endpoint (default "http://localhost:9001/RPC2") +# -collector.sysfs string +# sysfs mountpoint. (default "/sys") +# -collector.systemd.private +# Establish a private, direct connection to systemd without dbus. +# -collector.systemd.unit-blacklist string +# Regexp of systemd units to blacklist. Units must both match whitelist +# and not match blacklist to be included. +# -collector.systemd.unit-whitelist string +# Regexp of systemd units to whitelist. Units must both match whitelist +# and not match blacklist to be included. (default ".+") +# -collector.textfile.directory string +# Directory to read text files with metrics from. +# -collectors.enabled string +# Comma-separated list of collectors to use. +# (default "conntrack,diskstats,entropy,filefd,filesystem,hwmon,\ +# loadavg,mdadm,meminfo,netdev,netstat,sockstat,stat,textfile,time,\ +# uname,vmstat") +# -collectors.print +# If true, print available collectors and exit. +# -log.format value +# Set the log target and format. +# Example: "logger:syslog?appname=bob&local=7" or +# "logger:stdout?json=true" (default "logger:stderr") +# -log.level value +# Only log messages with the given severity or above. +# Valid levels: [debug, info, warn, error, fatal] (default "info") +# -web.listen-address string +# Address on which to expose metrics and web interface. +# (default ":9101") +# -web.telemetry-path string +# Path under which to expose metrics. (default "/metrics") diff --git a/ansible/run_command.yml b/ansible/run_command.yml new file mode 100644 index 0000000000000000000000000000000000000000..c396342de6cfe47eb2f0e3985c7872d8f4da958d --- /dev/null +++ b/ansible/run_command.yml @@ -0,0 +1,17 @@ +# This playbook uses all as host. Run this playbook with --limit pattern +- hosts: all + vars_files: + - 'secrets/{{env}}.yml' + tasks: + - name: Create file with run command command + copy: content="{{ command }}" dest="/tmp/ansible-run-command.sh" mode="755" + - name: Run command + command: bash -lc "/tmp/ansible-run-command.sh" + async: 1800 + poll: 10 + register: log + ignore_errors: true + - debug: + var: log + verbosity: 4 + become: yes \ No newline at end of file diff --git a/ansible/static-files/kong-api-scripts/__init__.py b/ansible/static-files/kong-api-scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ansible/static-files/kong-api-scripts/common.py b/ansible/static-files/kong-api-scripts/common.py new file mode 100644 index 0000000000000000000000000000000000000000..855fa4094ee9fa10e7cb192a5bb8ec344bced70d --- /dev/null +++ b/ansible/static-files/kong-api-scripts/common.py @@ -0,0 +1,26 @@ +import urllib2, json + +# Due to issue https://github.com/Mashape/kong/issues/1912 +# We can't loop through all apis page by page +# Hence this work around which fetches apis with page size limited to max_page_size +# max_page_size ensures we don't bring down DB by fetching lot of rows +# If we reach a state we have more apis than max_page_size, +# Increase value of max_page_size judiciously +def get_apis(kong_admin_api_url): + max_page_size = 1000 + apis_url_with_size_limit = "{}/apis?size={}".format(kong_admin_api_url, max_page_size) + apis_response = json.loads(urllib2.urlopen(apis_url_with_size_limit).read()) + total_apis = apis_response["total"] + if(total_apis > max_page_size): + raise Exception("There are {} apis existing in system which is more than max_page_size={}. Please increase max_page_size in ansible/kong_apis.py if this is expected".format(total_apis, max_page_size)) + else: + return apis_response["data"] + +def json_request(method, url, data=None): + request_body = json.dumps(data) if data is not None else None + request = urllib2.Request(url, request_body) + if data: + request.add_header('Content-Type', 'application/json') + request.get_method = lambda: method + response = urllib2.urlopen(request) + return response diff --git a/ansible/static-files/kong-api-scripts/kong_api_csv_to_yaml.py b/ansible/static-files/kong-api-scripts/kong_api_csv_to_yaml.py new file mode 100644 index 0000000000000000000000000000000000000000..5b886ef1de7d7ea3a49bc4efb597b9e404c63202 --- /dev/null +++ b/ansible/static-files/kong-api-scripts/kong_api_csv_to_yaml.py @@ -0,0 +1,37 @@ +import argparse, sys +from collections import OrderedDict +import csv +import yaml + +def setup_yaml(): + """ https://stackoverflow.com/a/31609484/69362 """ + represent_dict_order = lambda self, data: self.represent_mapping('tag:yaml.org,2002:map', data.items()) + yaml.add_representer(OrderedDict, represent_dict_order) + +def convert_csv_to_yaml(apis_csv_file): + reader = csv.DictReader(apis_csv_file, delimiter=',') + apis = [] + for row in reader: + apis.append(OrderedDict([ + ('name', row['NAME']), + ('request_path', row['REQUEST PATH']), + ('upstream_url', row['UPSTREAM PATH']), + ('strip_request_path', True), + ('plugins', [ + OrderedDict([('name', 'jwt')]), + OrderedDict([('name', 'cors')]), + "{{ statsd_pulgin }}", + OrderedDict([('name', 'acl'), ('config.whitelist', row["WHITELIST GROUP"])]), + OrderedDict([('name', 'rate-limiting'), ('config.hour', row["RATE LIMIT"]), ('config.limit_by', row["LIMIT BY"])]), + OrderedDict([('name', 'request-size-limiting'), ('config.allowed_payload_size', row["REQUEST SIZE LIMIT"])]), + ]) + ])) + yaml.dump(apis, sys.stdout, default_flow_style=False) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Converts APIs CSV to yaml that can be used in ansible') + parser.add_argument('apis_csv_file_path', help='Path of the csv file containing apis data') + args = parser.parse_args() + setup_yaml() + with open(args.apis_csv_file_path) as apis_csv_file: + convert_csv_to_yaml(apis_csv_file) diff --git a/ansible/static-files/kong-api-scripts/kong_apis.py b/ansible/static-files/kong-api-scripts/kong_apis.py new file mode 100644 index 0000000000000000000000000000000000000000..6d56fbec816839450357e9e0532a98e43f630512 --- /dev/null +++ b/ansible/static-files/kong-api-scripts/kong_apis.py @@ -0,0 +1,83 @@ +import urllib2, argparse, json + +from common import get_apis, json_request + +def save_apis(kong_admin_api_url, input_apis): + apis_url = "{}/apis".format(kong_admin_api_url) + saved_apis = get_apis(kong_admin_api_url) + + print("Number of input APIs : {}".format(len(input_apis))) + print("Number of existing APIs : {}".format(len(saved_apis))) + + input_api_names = [api["name"] for api in input_apis] + saved_api_names = [api["name"] for api in saved_apis] + + print("Input APIs : {}".format(input_api_names)) + print("Existing APIs : {}".format(saved_api_names)) + + input_apis_to_be_created = [input_api for input_api in input_apis if input_api["name"] not in saved_api_names] + input_apis_to_be_updated = [input_api for input_api in input_apis if input_api["name"] in saved_api_names] + saved_api_to_be_deleted = [saved_api for saved_api in saved_apis if saved_api["name"] not in input_api_names] + + for input_api in input_apis_to_be_created: + print("Adding API {}".format(input_api["name"])) + json_request("POST", apis_url, _sanitized_api_data(input_api)) + + for input_api in input_apis_to_be_updated: + print("Updating API {}".format(input_api["name"])) + saved_api_id = [saved_api["id"] for saved_api in saved_apis if saved_api["name"] == input_api["name"]][0] + input_api["id"] = saved_api_id + json_request("PATCH", apis_url + "/" + saved_api_id, _sanitized_api_data(input_api)) + + for saved_api in saved_api_to_be_deleted: + print("Deleting API {}".format(saved_api["name"])); + json_request("DELETE", apis_url + "/" + saved_api["id"], "") + + for input_api in input_apis: + _save_plugins_for_api(kong_admin_api_url, input_api) + +def _save_plugins_for_api(kong_admin_api_url, input_api_details): + api_name = input_api_details["name"] + input_plugins = input_api_details["plugins"] + api_pugins_url = kong_admin_api_url + "/apis/" + api_name + "/plugins" + saved_api_details = json.loads(urllib2.urlopen(api_pugins_url).read()) + saved_plugins = saved_api_details["data"] + input_plugin_names = [input_plugin["name"] for input_plugin in input_plugins] + saved_plugin_names = [saved_plugin["name"] for saved_plugin in saved_plugins] + + input_plugins_to_be_created = [input_plugin for input_plugin in input_plugins if input_plugin["name"] not in saved_plugin_names] + input_plugins_to_be_updated = [input_plugin for input_plugin in input_plugins if input_plugin["name"] in saved_plugin_names] + saved_plugins_to_be_deleted = [saved_plugin for saved_plugin in saved_plugins if saved_plugin["name"] not in input_plugin_names] + + for input_plugin in input_plugins_to_be_created: + print("Adding plugin {} for API {}".format(input_plugin["name"], api_name)); + json_request("POST", api_pugins_url, input_plugin) + + for input_plugin in input_plugins_to_be_updated: + print("Updating plugin {} for API {}".format(input_plugin["name"], api_name)); + saved_plugin_id = [saved_plugin["id"] for saved_plugin in saved_plugins if saved_plugin["name"] == input_plugin["name"]][0] + input_plugin["id"] = saved_plugin_id + json_request("PATCH", api_pugins_url + "/" + saved_plugin["id"], input_plugin) + + for saved_plugin in saved_plugins_to_be_deleted: + print("Deleting plugin {} for API {}".format(saved_plugin["name"], api_name)); + json_request("DELETE", api_pugins_url + "/" + saved_plugin["id"], "") + +def _sanitized_api_data(input_api): + keys_to_ignore = ['plugins'] + sanitized_api_data = dict((key, input_api[key]) for key in input_api if key not in keys_to_ignore) + return sanitized_api_data + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Configure kong apis') + parser.add_argument('apis_file_path', help='Path of the json file containing apis data') + parser.add_argument('--kong-admin-api-url', help='Admin url for kong', default='http://localhost:8001') + args = parser.parse_args() + with open(args.apis_file_path) as apis_file: + input_apis = json.load(apis_file) + try: + save_apis(args.kong_admin_api_url, input_apis) + except urllib2.HTTPError as e: + error_message = e.read() + print(error_message) + raise \ No newline at end of file diff --git a/ansible/static-files/kong-api-scripts/kong_apis_report.py b/ansible/static-files/kong-api-scripts/kong_apis_report.py new file mode 100644 index 0000000000000000000000000000000000000000..6306ae920e9c0cb245acf26022c7eaec5174f201 --- /dev/null +++ b/ansible/static-files/kong-api-scripts/kong_apis_report.py @@ -0,0 +1,20 @@ +import urllib2, argparse, json, csv + +from common import get_apis, json_request + +def create_api_report_csv(kong_admin_api_url, report_file_path): + saved_apis = get_apis(kong_admin_api_url) + with open(report_file_path, 'w') as csvfile: + fieldnames = ['Name', 'Path'] + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + for api in saved_apis: + writer.writerow({'Name': api['name'], 'Path': api['request_path']}) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Generate report for APIs on-boarded') + parser.add_argument('report_file_path', help='Report file path') + parser.add_argument('--kong-admin-api-url', help='Admin url for kong', default='http://localhost:8001') + args = parser.parse_args() + + create_api_report_csv(args.kong_admin_api_url, args.report_file_path) diff --git a/ansible/static-files/kong-api-scripts/kong_consumers.py b/ansible/static-files/kong-api-scripts/kong_consumers.py new file mode 100644 index 0000000000000000000000000000000000000000..013ebf7661df304d37ec9cec9d5d808972ea0794 --- /dev/null +++ b/ansible/static-files/kong-api-scripts/kong_consumers.py @@ -0,0 +1,112 @@ +import urllib2, argparse, json +import jwt + +from common import json_request + +def _consumer_exists(kong_admin_api_url, username): + consumers_url = "{}/consumers".format(kong_admin_api_url) + try: + urllib2.urlopen(consumers_url + "/" + username) + return True + except urllib2.HTTPError as e: + if(e.code == 404): + return False + else: + raise + +def _ensure_consumer_exists(kong_admin_api_url, consumer): + username = consumer['username'] + consumers_url = "{}/consumers".format(kong_admin_api_url) + if(not _consumer_exists(kong_admin_api_url, username)): + print("Adding consumer {}".format(username)); + json_request("POST", consumers_url, {'username': username}) + + +def save_consumers(kong_admin_api_url, consumers): + consumers_url = "{}/consumers".format(kong_admin_api_url) + consumers_to_be_present = [consumer for consumer in consumers if consumer['state'] == 'present'] + consumers_to_be_absent = [consumer for consumer in consumers if consumer['state'] == 'absent'] + + for consumer in consumers_to_be_absent: + username = consumer['username'] + if(_consumer_exists(kong_admin_api_url, username)): + print("Deleting consumer {}".format(username)); + json_request("DELETE", consumers_url + "/" + username, "") + + for consumer in consumers_to_be_present: + username = consumer['username'] + _ensure_consumer_exists(kong_admin_api_url, consumer) + _save_groups_for_consumer(kong_admin_api_url, consumer) + jwt_credential = _get_first_or_create_jwt_credential(kong_admin_api_url, consumer) + credential_algorithm = jwt_credential['algorithm'] + if credential_algorithm == 'HS256': + jwt_token = jwt.encode({'iss': jwt_credential['key']}, jwt_credential['secret'], algorithm=credential_algorithm) + print("JWT token for {} is : {}".format(username, jwt_token)) + if 'print_credentials' in consumer: + print("Credentials for consumer {}, key: {}, secret: {}".format(username, jwt_credential['key'], jwt_credential['secret'])) + + +def _get_first_or_create_jwt_credential(kong_admin_api_url, consumer): + username = consumer["username"] + credential_algorithm = consumer.get('credential_algorithm', 'HS256') + consumer_jwt_credentials_url = kong_admin_api_url + "/consumers/" + username + "/jwt" + saved_credentials_details = json.loads(urllib2.urlopen(consumer_jwt_credentials_url).read()) + saved_credentials = saved_credentials_details["data"] + saved_credentials_for_algorithm = [saved_credential for saved_credential in saved_credentials if saved_credential['algorithm'] == credential_algorithm] + if(len(saved_credentials_for_algorithm) > 0): + print("Updating credentials for consumer {} for algorithm {}".format(username, credential_algorithm)); + this_credential = saved_credentials_for_algorithm[0] + credential_data = { + "rsa_public_key": consumer.get('credential_rsa_public_key', this_credential.get("rsa_public_key", '')), + "key": consumer.get('credential_iss', this_credential['key']) + } + this_credential_url = "{}/{}".format(consumer_jwt_credentials_url, this_credential["id"]) + response = json_request("PATCH", this_credential_url, credential_data) + jwt_credential = json.loads(response.read()) + return jwt_credential + else: + print("Creating jwt credentials for consumer {}".format(username)); + credential_data = { + "algorithm": credential_algorithm, + } + if 'credential_rsa_public_key' in consumer: + credential_data["rsa_public_key"] = consumer['credential_rsa_public_key'] + if 'credential_iss' in consumer: + credential_data["key"] = consumer['credential_iss'] + response = json_request("POST", consumer_jwt_credentials_url, credential_data) + jwt_credential = json.loads(response.read()) + return jwt_credential + +def _save_groups_for_consumer(kong_admin_api_url, consumer): + username = consumer["username"] + input_groups = consumer["groups"] + consumer_acls_url = kong_admin_api_url + "/consumers/" + username + "/acls" + saved_acls_details = json.loads(urllib2.urlopen(consumer_acls_url).read()) + saved_acls = saved_acls_details["data"] + saved_groups = [acl["group"] for acl in saved_acls] + print("Existing groups for consumer {} : {}".format(username, saved_groups)) + print("Required groups for consumer {} : {}".format(username, input_groups)) + input_groups_to_be_created = [input_group for input_group in input_groups if input_group not in saved_groups] + saved_groups_to_be_deleted = [saved_group for saved_group in saved_groups if saved_group not in input_groups] + + for input_group in input_groups_to_be_created: + print("Adding group {} for consumer {}".format(input_group, username)); + json_request("POST", consumer_acls_url, {'group': input_group}) + + for saved_group in saved_groups_to_be_deleted: + print("Deleting group {} for consumer {}".format(saved_group, username)); + json_request("DELETE", consumer_acls_url + "/" + saved_group, "") + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Configure kong consumers') + parser.add_argument('consumers_file_path', help='Path of the json file containing consumer data') + parser.add_argument('--kong-admin-api-url', help='Admin url for kong', default='http://localhost:8001') + args = parser.parse_args() + with open(args.consumers_file_path) as consumers_file: + input_consumers = json.load(consumers_file) + try: + save_consumers(args.kong_admin_api_url, input_consumers) + except urllib2.HTTPError as e: + error_message = e.read() + print(error_message) + raise diff --git a/ansible/swarm-agent-docker-prune.yml b/ansible/swarm-agent-docker-prune.yml new file mode 100644 index 0000000000000000000000000000000000000000..cc558c64f99a2a4a422c0539ab80708ba8c2ef60 --- /dev/null +++ b/ansible/swarm-agent-docker-prune.yml @@ -0,0 +1,3 @@ +- hosts: swarm-agent-docker-prune + roles: + - swarm-agent-docker-prune \ No newline at end of file diff --git a/cloud/monitoring/grafana/Agent Details-1500628974657.json b/cloud/monitoring/grafana/Agent Details-1500628974657.json new file mode 100644 index 0000000000000000000000000000000000000000..1341eb4564e4ad7cd54aab19a43bdd76441250b4 --- /dev/null +++ b/cloud/monitoring/grafana/Agent Details-1500628974657.json @@ -0,0 +1,1582 @@ +{ + "__inputs": [ + { + "name": "DS_TEST", + "label": "test", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.4.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [] + }, + "description": "A simple overview of the most important Docker metrics export by node-expoter on docker swarm mode. ", + "editable": true, + "gnetId": 2666, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": false, + "rows": [ + { + "collapse": false, + "height": "100px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "decimals": null, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 15, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "host", + "targets": [ + { + "expr": "host{instance=~\"$server\"}", + "format": "table", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Host", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": true, + "id": 23, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(node_cpu{mode=\"idle\", instance=~\"$server\"}[1m])) * 100 / count_scalar(node_cpu{mode=\"user\", instance=~\"$server\"})", + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "timeFrom": "10s", + "title": "CPU Idle", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 11, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "node_load1{instance=~\"$server\"} / count by(job, instance)(count by(job, instance, cpu)(node_cpu{instance=~\"$server\"}))", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 20 + } + ], + "thresholds": "0.8,0.9", + "title": "Load", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": true, + "id": 14, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "node_memory_MemAvailable{instance=~\"$server\"}", + "interval": "30s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 60 + } + ], + "thresholds": "", + "timeFrom": "10s", + "title": "Available Memory", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": true, + "id": 17, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(node_filesystem_free{fstype=\"aufs\",instance=~\"$server\"})", + "interval": "30s", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 30 + } + ], + "thresholds": "", + "timeFrom": "10s", + "title": "Free Storage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "decimals": 1, + "editable": true, + "error": false, + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 27, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "time() - node_boot_time{instance=~\"$server\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Uptime", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "200px", + "panels": [ + { + "alerting": {}, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "fill": 3, + "grid": {}, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 5, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "avg without (cpu) (irate(node_cpu{instance=~\"$server\", mode!=\"idle\"}[5m]))", + "intervalFactor": 2, + "legendFormat": "{{mode}}", + "refId": "A", + "step": 4, + "target": "" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 4, + "grid": {}, + "id": 20, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 7, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "irate(node_network_receive_bytes{device!=\"lo\",instance=~\"$server\"}[1m])", + "intervalFactor": 1, + "legendFormat": "In: {{ device }}", + "metric": "node_network_receive_bytes", + "refId": "A", + "step": 1 + }, + { + "expr": "irate(node_network_transmit_bytes{device!=\"lo\",instance=~\"$server\"}[1m])", + "intervalFactor": 1, + "legendFormat": "Out: {{ device }}", + "metric": "node_network_transmit_bytes", + "refId": "B", + "step": 1 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "200px", + "panels": [ + { + "alerting": {}, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Free", + "zindex": 3 + } + ], + "spaceLength": 10, + "span": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node_memory_MemTotal{instance=~\"$server\"} - node_memory_MemFree{instance=~\"$server\"} - node_memory_Cached{instance=~\"$server\"} - node_memory_Buffers{instance=~\"$server\"} - node_memory_Slab{instance=~\"$server\"}", + "intervalFactor": 2, + "legendFormat": "Used", + "metric": "memo", + "refId": "A", + "step": 2, + "target": "" + }, + { + "expr": "node_memory_Buffers{instance=~\"$server\"}", + "intervalFactor": 2, + "legendFormat": "Buffers", + "refId": "C", + "step": 2 + }, + { + "expr": "node_memory_Cached{instance=~\"$server\"} + node_memory_Slab{instance=~\"$server\"}", + "intervalFactor": 2, + "legendFormat": "Cached", + "refId": "D", + "step": 2 + }, + { + "expr": "node_memory_MemFree{instance=~\"$server\"}", + "hide": false, + "intervalFactor": 2, + "legendFormat": "Free", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 5, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(node_memory_MemAvailable{instance=~\"$server\"} / node_memory_MemTotal{instance=~\"$server\"}) * 100", + "intervalFactor": 2, + "refId": "A", + "step": 20, + "target": "" + } + ], + "thresholds": "10, 20", + "title": "Available memory", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "200px", + "panels": [ + { + "alerting": {}, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "read", + "yaxis": 1 + }, + { + "alias": "{instance=\"172.17.0.1:9100\"}", + "yaxis": 2 + }, + { + "alias": "io time", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (instance) (irate(node_disk_reads_completed{instance=~\"$server\"}[5m]))", + "hide": false, + "intervalFactor": 4, + "legendFormat": "reads per second", + "refId": "A", + "step": 8, + "target": "" + }, + { + "expr": "sum by (instance) (irate(node_disk_writes_completed{instance=~\"$server\"}[5m]))", + "intervalFactor": 4, + "legendFormat": "writes per second", + "refId": "B", + "step": 8 + }, + { + "expr": "sum by (instance) (irate(node_disk_io_time_ms{instance=~\"$server\"}[5m]))", + "intervalFactor": 4, + "legendFormat": "io time", + "refId": "C", + "step": 8 + }, + { + "expr": "sum by (instance) (irate(node_disk_reads_completed{instance=~\"$server\"}[5m])) + sum by (instance) (irate(node_disk_writes_completed{instance=~\"$server\"}[5m]))", + "intervalFactor": 2, + "legendFormat": "IOPS", + "refId": "D", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "IOPs", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 22, + "legend": { + "alignAsTable": false, + "avg": true, + "current": false, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "read", + "yaxis": 1 + }, + { + "alias": "written", + "yaxis": 1 + }, + { + "alias": "io time", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(node_disk_bytes_read{instance=~\"$server\"}[1m]))", + "interval": "", + "intervalFactor": 1, + "legendFormat": "read", + "metric": "node_disk_bytes_read", + "refId": "A", + "step": 2 + }, + { + "expr": "sum(irate(node_disk_bytes_written{instance=~\"$server\"}[1m]))", + "intervalFactor": 1, + "legendFormat": "written", + "metric": "node_disk_bytes_written", + "refId": "B", + "step": 2 + }, + { + "expr": "sum(irate(node_disk_io_time_ms{instance=~\"$server\"}[1m]))", + "intervalFactor": 1, + "legendFormat": "io time", + "metric": "node_disk_io_time_ms", + "refId": "C", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "I/O Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 7, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "min(node_filesystem_free{fstype=~\"xfs|ext4\",instance=~\"$server\"} / node_filesystem_size{fstype=~\"xfs|ext4\",instance=~\"$server\"})", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 20, + "target": "" + } + ], + "thresholds": "0.10, 0.25", + "title": "Free Filesystem Space (Lowest)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 4, + "grid": {}, + "id": 24, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Used", + "color": "#890F02" + }, + { + "alias": "Free", + "color": "#7EB26D" + } + ], + "spaceLength": 10, + "span": 5, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node_memory_SwapTotal{instance=~\"$server\"} - node_memory_SwapFree{instance=~\"$server\"}", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Used", + "refId": "A", + "step": 10 + }, + { + "expr": "node_memory_SwapFree{instance=~\"$server\"}", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Free", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Swap Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 25, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_vmstat_pswpin{instance=~\"$server\"}[1m]) * 4096 or irate(node_vmstat_pswpin{instance=~\"$server\"}[5m]) * 4096", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "In", + "refId": "A", + "step": 10 + }, + { + "expr": "rate(node_vmstat_pswpout{instance=~\"$server\"}[1m]) * 4096 or irate(node_vmstat_pswpout{instance=~\"$server\"}[5m]) * 4096", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Out", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Swap I/O", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "decimals": null, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": true, + "id": 26, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(node_memory_SwapFree{instance=~\"$server\"} /node_memory_SwapTotal{instance=~\"$server\"}) * 100", + "interval": "30s", + "intervalFactor": 2, + "refId": "A", + "step": 60 + } + ], + "thresholds": "", + "timeFrom": "10s", + "title": "Free Swap", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "prometheus" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "${DS_TEST}", + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "server", + "options": [], + "query": "label_values(node_boot_time, instance)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Agent Details", + "version": 2 +} \ No newline at end of file diff --git a/cloud/monitoring/grafana/Availability-1500625971529.json b/cloud/monitoring/grafana/Availability-1500625971529.json new file mode 100644 index 0000000000000000000000000000000000000000..2bddff9a6fdc9b6c07a1e4ae14202f1fb3887948 --- /dev/null +++ b/cloud/monitoring/grafana/Availability-1500625971529.json @@ -0,0 +1,174 @@ +{ + "__inputs": [ + { + "name": "DS_TEST", + "label": "test", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.4.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + } + ], + "annotations": { + "list": [] + }, + "editMode": false, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "rows": [ + { + "collapse": false, + "height": 426, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "fill": 1, + "id": 1, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "up{job=~\"availability.*\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Availability", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": "1", + "min": "0", + "show": false + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Availability", + "version": 2 +} \ No newline at end of file diff --git a/cloud/monitoring/grafana/Container Details-1500629042962.json b/cloud/monitoring/grafana/Container Details-1500629042962.json new file mode 100644 index 0000000000000000000000000000000000000000..f0f1eba091260ad722943f582399d5b33327fd31 --- /dev/null +++ b/cloud/monitoring/grafana/Container Details-1500629042962.json @@ -0,0 +1,710 @@ +{ + "__inputs": [ + { + "name": "DS_TEST", + "label": "test", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.4.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [] + }, + "description": "Docker monitoring with Prometheus and cAdvisor", + "editable": true, + "gnetId": 193, + "graphTooltip": 1, + "hideControls": false, + "id": null, + "links": [], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "50", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "20", + "id": 7, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(container_last_seen{image!=\"\"})", + "intervalFactor": 2, + "legendFormat": "", + "metric": "container_last_seen", + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Running containers", + "transparent": true, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "mbytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "20", + "id": 5, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{image!=\"\"})/1024/1024", + "intervalFactor": 2, + "legendFormat": "", + "metric": "container_memory_usage_bytes", + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Total Memory Usage", + "transparent": true, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "height": "20", + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(container_cpu_user_seconds_total{image!=\"\"}[5m]) * 100)", + "intervalFactor": 2, + "legendFormat": "", + "metric": "container_memory_usage_bytes", + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Total CPU Usage", + "transparent": true, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(container_cpu_user_seconds_total{image!=\"\"}[5m]) * 100", + "intervalFactor": 2, + "legendFormat": "{{name}}", + "metric": "cpu", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 1, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "container_memory_usage_bytes{image!=\"\"}", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{name}}", + "metric": "container_memory_usage_bytes", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(container_network_receive_bytes_total{image!=\"\"}[5m])", + "intervalFactor": 2, + "legendFormat": "{{name}}", + "metric": "container_network_receive_bytes_total", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network Rx", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(container_network_transmit_bytes_total{image!=\"\"}[5m])", + "intervalFactor": 2, + "legendFormat": "{{name}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network Tx", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "docker" + ], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Container Details", + "version": 2 +} \ No newline at end of file diff --git a/cloud/monitoring/grafana/Docker-Swarm-Monitor-1500629206547.json b/cloud/monitoring/grafana/Docker-Swarm-Monitor-1500629206547.json new file mode 100644 index 0000000000000000000000000000000000000000..cad0f3e0a668ba942afd9f549b4748844f80418d --- /dev/null +++ b/cloud/monitoring/grafana/Docker-Swarm-Monitor-1500629206547.json @@ -0,0 +1,1144 @@ +{ + "__inputs": [ + { + "name": "DS_TEST", + "label": "test", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.4.1" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [] + }, + "description": "Docker swarm monitor with Prometheus, cAdvisor, Node Exporter and Grafana", + "editable": true, + "gnetId": 2603, + "graphTooltip": 1, + "hideControls": false, + "id": null, + "links": [], + "rows": [ + { + "collapse": false, + "height": 195, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "decimals": 0, + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 2, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "time() - node_boot_time{instance=~\"$server:.*\"}", + "intervalFactor": 2, + "refId": "A", + "step": 240 + } + ], + "thresholds": "", + "title": "Up Time", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 3, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(rate(container_last_seen{name=~\".+\"}[$interval]))", + "intervalFactor": 2, + "refId": "B", + "step": 240 + } + ], + "thresholds": "", + "title": "Containers", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "((node_memory_MemAvailable{instance=~\"$server:.*\"}) / node_memory_MemTotal{instance=~\"$server:.*\"})", + "format": "time_series", + "intervalFactor": 1, + "refId": "A", + "step": 120 + } + ], + "thresholds": "10,20", + "title": "Available Memory (in %)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_TEST}", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "id": 11, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "node_memory_MemAvailable{instance=~\"$server:.*\"}", + "interval": "30s", + "intervalFactor": 1, + "refId": "A", + "step": 120 + } + ], + "thresholds": "", + "title": "Available Memory (in GB)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 122, 40, 0.89)", + "rgba(14, 211, 40, 0.97)" + ], + "datasource": "${DS_TEST}", + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 9, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "min(( node_filesystem_free{fstype=~\"xfs|ext4\",instance=~\"$server:.*\"} )/ node_filesystem_size{fstype=~\"xfs|ext4\",instance=~\"$server:.*\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 240 + } + ], + "thresholds": "0.10, 0.25", + "title": "Available Disk Space (in %)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 122, 40, 0.89)", + "rgba(14, 211, 40, 0.97)" + ], + "datasource": "${DS_TEST}", + "format": "bytes", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "id": 10, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(node_filesystem_free{fstype=\"aufs\",instance=~\"$server:.*\"})", + "interval": "30s", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 120 + } + ], + "thresholds": "", + "title": "Available Disk Space (in GB)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "R1", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 239, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "decimals": null, + "fill": 5, + "height": "270px", + "id": 1, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{name=~\".+\"}[$interval])) by (name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{name}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage per Container", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "R2", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 300, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "fill": 3, + "height": "270px", + "id": 5, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss{name=~\".+\"}) by (name)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{name}}", + "metric": "container_memory_rss", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage per Container", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "R3", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 285, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "fill": 1, + "height": "270px", + "id": 6, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_network_receive_bytes_total{name=~\".+\"}[$interval])) by (name)", + "intervalFactor": 2, + "legendFormat": "{{name}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Incoming Network Traffic per Container", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "R4", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 226, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "fill": 1, + "height": "270px", + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_network_transmit_bytes_total{name=~\".+\"}[$interval])) by (name)", + "intervalFactor": 2, + "legendFormat": "{{name}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Outgoing Network Traffic per Container", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "R5", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST}", + "fill": 1, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_filesystem_free{fstype=~\"xfs|ext4\"}[$interval])) by (instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Panel Title", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "prometheus", + "cAdvisor", + "node-exporter", + "alertmanager" + ], + "templating": { + "list": [ + { + "auto": true, + "auto_count": 50, + "auto_min": "50s", + "current": { + "text": "auto", + "value": "$__auto_interval" + }, + "hide": 0, + "label": "Interval", + "name": "interval", + "options": [ + { + "selected": true, + "text": "auto", + "value": "$__auto_interval" + }, + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "2m", + "value": "2m" + }, + { + "selected": false, + "text": "3m", + "value": "3m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "7m", + "value": "7m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "30s,1m,2m,3m,5m,7m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "refresh": 2, + "type": "interval" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_TEST}", + "hide": 0, + "includeAll": false, + "label": "Node", + "multi": true, + "name": "server", + "options": [], + "query": "label_values(node_boot_time, instance)", + "refresh": 1, + "regex": "/([^:]+):.*/", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Docker-Swarm-Monitor", + "version": 4 +} \ No newline at end of file diff --git a/cloud/monitoring/grafana/dashboards/swarm.json b/cloud/monitoring/grafana/dashboards/swarm.json new file mode 100644 index 0000000000000000000000000000000000000000..9991c0c6f8553ce415eb65ae8f0ad6add429d515 --- /dev/null +++ b/cloud/monitoring/grafana/dashboards/swarm.json @@ -0,0 +1,693 @@ +{ + "__inputs": [ + { + "name": "DS_INFLUX", + "label": "influx", + "description": "", + "type": "datasource", + "pluginId": "influxdb", + "pluginName": "InfluxDB" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.2.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "influxdb", + "name": "InfluxDB", + "version": "1.0.0" + } + ], + "annotations": { + "list": [] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": false, + "rows": [ + { + "collapse": false, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "${DS_INFLUX}", + "fill": 1, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Memory {host: $tag_machine, container: $tag_container_name}", + "dsType": "influxdb", + "groupBy": [ + { + "params": [ + "machine" + ], + "type": "tag" + }, + { + "params": [ + "container_name" + ], + "type": "tag" + } + ], + "measurement": "memory_usage", + "policy": "default", + "query": "SELECT \"value\" FROM \"memory_usage\" WHERE \"container_name\" =~ /^$container$/ AND \"machine\" =~ /^$host$/ AND $timeFilter", + "rawQuery": false, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + } + ] + ], + "tags": [ + { + "key": "container_name", + "operator": "=~", + "value": "/^$container$*/" + }, + { + "condition": "AND", + "key": "machine", + "operator": "=~", + "value": "/^$host$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "${DS_INFLUX}", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "CPU {host: $tag_machine, container: $tag_container_name}", + "dsType": "influxdb", + "groupBy": [ + { + "params": [ + "machine" + ], + "type": "tag" + }, + { + "params": [ + "container_name" + ], + "type": "tag" + } + ], + "measurement": "cpu_usage_total", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [ + "10s" + ], + "type": "derivative" + } + ] + ], + "tags": [ + { + "key": "container_name", + "operator": "=~", + "value": "/^$container$*/" + }, + { + "condition": "AND", + "key": "machine", + "operator": "=~", + "value": "/^$host$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "hertz", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "${DS_INFLUX}", + "fill": 1, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Usage {host: $tag_machine, container: $tag_container_name}", + "dsType": "influxdb", + "groupBy": [ + { + "params": [ + "container_name" + ], + "type": "tag" + }, + { + "params": [ + "machine" + ], + "type": "tag" + } + ], + "measurement": "fs_usage", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + } + ] + ], + "tags": [ + { + "key": "machine", + "operator": "=~", + "value": "/^$host$/" + }, + { + "condition": "AND", + "key": "container_name", + "operator": "=~", + "value": "/^$container$*/" + } + ] + }, + { + "alias": "Limit {host: $tag_machine, container: $tag_container_name}", + "dsType": "influxdb", + "groupBy": [ + { + "params": [ + "container_name" + ], + "type": "tag" + }, + { + "params": [ + "machine" + ], + "type": "tag" + } + ], + "measurement": "fs_limit", + "policy": "default", + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + } + ] + ], + "tags": [ + { + "key": "machine", + "operator": "=~", + "value": "/^$host$/" + }, + { + "condition": "AND", + "key": "container_name", + "operator": "=~", + "value": "/^$container$*/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "File System", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "${DS_INFLUX}", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "RX {host: $tag_machine, container: $tag_container_name}", + "dsType": "influxdb", + "groupBy": [ + { + "params": [ + "container_name" + ], + "type": "tag" + }, + { + "params": [ + "machine" + ], + "type": "tag" + } + ], + "measurement": "rx_bytes", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [ + "10s" + ], + "type": "derivative" + } + ] + ], + "tags": [ + { + "key": "machine", + "operator": "=~", + "value": "/^$host$/" + }, + { + "condition": "AND", + "key": "container_name", + "operator": "=~", + "value": "/^$container$*/" + } + ] + }, + { + "alias": "TX {host: $tag_machine, container: $tag_container_name}", + "dsType": "influxdb", + "groupBy": [ + { + "params": [ + "container_name" + ], + "type": "tag" + }, + { + "params": [ + "machine" + ], + "type": "tag" + } + ], + "measurement": "tx_bytes", + "policy": "default", + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [ + "10s" + ], + "type": "derivative" + } + ] + ], + "tags": [ + { + "key": "machine", + "operator": "=~", + "value": "/^$host$/" + }, + { + "condition": "AND", + "key": "container_name", + "operator": "=~", + "value": "/^$container$*/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": "", + "current": {}, + "datasource": "${DS_INFLUX}", + "hide": 0, + "includeAll": true, + "label": "Host", + "multi": false, + "name": "host", + "options": [], + "query": "show tag values with key = \"machine\"", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_INFLUX}", + "hide": 0, + "includeAll": false, + "label": "Container", + "multi": false, + "name": "container", + "options": [], + "query": "show tag values with key = \"container_name\" WHERE machine =~ /^$host$/", + "refresh": 1, + "regex": "/([^.]+)/", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "cAdvisor", + "version": 3 +} \ No newline at end of file diff --git a/cloud/stacks/jenkins/jenkins-documentation-slave.yml b/cloud/stacks/jenkins/jenkins-documentation-slave.yml new file mode 100644 index 0000000000000000000000000000000000000000..8bef6a66105e56440fcbaceb93557656d75ace02 --- /dev/null +++ b/cloud/stacks/jenkins/jenkins-documentation-slave.yml @@ -0,0 +1,29 @@ +version: '3.1' + +services: + + documentation-jenkins-slave: + image: sunbird/documentation-jenkins-swarm-agent:0.0.1 + environment: + - USER_NAME_SECRET=/run/secrets/${JENKINS_USER_SECRET:-jenkins-user} + - PASSWORD_SECRET=/run/secrets/${JENKINS_PASS_SECRET:-jenkins-pass} + - COMMAND_OPTIONS=-master ${JENKINS_URL} -labels ${DOCUMENTATION_SLAVE_LABEL} -executors 1 -name documentation-jenkins-slave + volumes: + - /var/run/docker.sock:/var/run/docker.sock + secrets: + - jenkins-user + - jenkins-pass + deploy: + replicas: 1 + networks: + - jenkins_default + +secrets: + jenkins-user: + external: true + jenkins-pass: + external: true + +networks: + jenkins_default: + external: true diff --git a/cloud/stacks/jenkins/jenkins-reporter-slave.yml b/cloud/stacks/jenkins/jenkins-reporter-slave.yml new file mode 100644 index 0000000000000000000000000000000000000000..a4e67d759e5b539e6bb32f96065ab7b812f2b621 --- /dev/null +++ b/cloud/stacks/jenkins/jenkins-reporter-slave.yml @@ -0,0 +1,29 @@ +version: '3.1' + +services: + + reporter-jenkins-slave: + image: sunbird/reporter-jenkins-swarm-agent + environment: + - USER_NAME_SECRET=/run/secrets/${JENKINS_USER_SECRET:-jenkins-user} + - PASSWORD_SECRET=/run/secrets/${JENKINS_PASS_SECRET:-jenkins-pass} + - COMMAND_OPTIONS=-master ${JENKINS_URL} -labels ${REPORT_SLAVE_LABEL} -executors 1 -name reporter-jenkins-slave + volumes: + - /var/run/docker.sock:/var/run/docker.sock + secrets: + - jenkins-user + - jenkins-pass + deploy: + replicas: 1 + networks: + - jenkins_default + +secrets: + jenkins-user: + external: true + jenkins-pass: + external: true + +networks: + jenkins_default: + external: true diff --git a/cloud/stacks/jenkins/jenkins-slave.yml b/cloud/stacks/jenkins/jenkins-slave.yml new file mode 100644 index 0000000000000000000000000000000000000000..e4244fbe8e20a20d6d3105d43a7bcfcbf649fa95 --- /dev/null +++ b/cloud/stacks/jenkins/jenkins-slave.yml @@ -0,0 +1,78 @@ +version: '3.1' + +services: + + jenkins-slave: + image: vfarcic/jenkins-swarm-agent + environment: + - USER_NAME_SECRET=/run/secrets/${JENKINS_USER_SECRET:-jenkins-user} + - PASSWORD_SECRET=/run/secrets/${JENKINS_PASS_SECRET:-jenkins-pass} + - COMMAND_OPTIONS=-master ${JENKINS_URL} -labels ${GENERAL_SLAVE_LABEL} -executors 1 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + secrets: + - jenkins-user + - jenkins-pass + - hub-pass + - source: deployer-ssh-key + target: deployer-ssh-key + mode: 0600 + - source: ops-private-key + target: ops-private-key + mode: 0600 + - source: vault-pass + target: vault-pass + mode: 0600 + deploy: + replicas: 3 + resources: + reservations: + memory: 256M + limits: + memory: 512M + + networks: + - jenkins_default + + api-manager-jenkins-slave: + image: sunbird/api-manager-jenkins-swarm-agent + environment: + - USER_NAME_SECRET=/run/secrets/${JENKINS_USER_SECRET:-jenkins-user} + - PASSWORD_SECRET=/run/secrets/${JENKINS_PASS_SECRET:-jenkins-pass} + - COMMAND_OPTIONS=-master ${JENKINS_URL} -labels ${API_SLAVE_LABEL} -executors 1 -name api-manager-jenkins-slave + volumes: + - /var/run/docker.sock:/var/run/docker.sock + secrets: + - jenkins-user + - jenkins-pass + - vault-pass + deploy: + replicas: 1 + resources: + reservations: + memory: 256M + limits: + memory: 512M + networks: + - jenkins_default + - api-manager_default + +secrets: + jenkins-user: + external: true + jenkins-pass: + external: true + hub-pass: + external: true + deployer-ssh-key: + external: true + ops-private-key: + external: true + vault-pass: + external: true + +networks: + jenkins_default: + external: true + api-manager_default: + external: true diff --git a/cloud/stacks/jenkins/jenkins.yml b/cloud/stacks/jenkins/jenkins.yml new file mode 100644 index 0000000000000000000000000000000000000000..6ae793c6015c8dc98cb27ae8398864b135d30d44 --- /dev/null +++ b/cloud/stacks/jenkins/jenkins.yml @@ -0,0 +1,36 @@ +version: '3' + +services: + + jenkins: + image: jenkinsci/jenkins:2.69-alpine + ports: + - "50000:50000" + - "8080:8080" + environment: + - JENKINS_OPTS="--prefix=/jenkins" + volumes: + - jenkins:/var/jenkins_home + deploy: + labels: + - com.df.notify=true + - com.df.distribute=true + - com.df.servicePath=/jenkins + - com.df.port=8080 + placement: + constraints: + - "node.labels.jenkins==1" + + +volumes: + jenkins: + driver: local + +# docker service create --name jenkins \ +# -p 8082:8080 \ +# -p 50000:51000 \ +# -e JENKINS_OPTS="--prefix=/jenkins" \ +# --reserve-memory 300m \ +# --constraint "node.labels.build.role==master" \ +# --mount type=volume,volume-driver=local,source={{.Service.Name}}-{{.Task.Slot}}-vol,destination=/var/jenkins_home \ +# jenkinsci/jenkins:lts-alpine diff --git a/cloud/stacks/mongo/mongo.yml b/cloud/stacks/mongo/mongo.yml new file mode 100644 index 0000000000000000000000000000000000000000..e78f7762ca6c96e7672274374e6783546c3d8ed4 --- /dev/null +++ b/cloud/stacks/mongo/mongo.yml @@ -0,0 +1,5 @@ +version: '3' + +services: + test_mongo: + image: mongoclient/mongoclient diff --git a/cloud/stacks/monitoring/monitor.yml b/cloud/stacks/monitoring/monitor.yml new file mode 100644 index 0000000000000000000000000000000000000000..11dbb1d663fb2f64372c791426b77e23425e0577 --- /dev/null +++ b/cloud/stacks/monitoring/monitor.yml @@ -0,0 +1,58 @@ +version: '3' + +services: + + influx: + image: influxdb + volumes: + - influx:/var/lib/influxdb + deploy: + replicas: 1 + placement: + constraints: + - node.labels.influx==1 + networks: + - monitor + + grafana: + image: grafana/grafana + ports: + - 3013:3000 + volumes: + - grafana:/var/lib/grafana + depends_on: + - influx + deploy: + replicas: 1 + placement: + constraints: + - node.labels.grafana==1 + networks: + - monitor + + cadvisor: + image: google/cadvisor + command: -logtostderr -docker_only -storage_driver=influxdb -storage_driver_db=cadvisor -storage_driver_host=influx:8086 + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + depends_on: + - influx + deploy: + mode: global + networks: + - monitor + +volumes: + influx: + driver: local + grafana: + driver: local + +networks: + monitor: + external: true + viz: + external: true \ No newline at end of file diff --git a/cloud/stacks/monitoring/vizualiser.yml b/cloud/stacks/monitoring/vizualiser.yml new file mode 100644 index 0000000000000000000000000000000000000000..9c5c916d49621707ebcdd2793ce86fc7d179a68d --- /dev/null +++ b/cloud/stacks/monitoring/vizualiser.yml @@ -0,0 +1,18 @@ +# docker service create --name=viz --publish=8080:8080/tcp --constraint=node.role==manager --mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock dockersamples/visualizer + +version: '3.1' + +services: + viz: + image: dockersamples/visualizer + ports: + - 3014:8080 + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + deploy: + replicas: 1 + placement: + constraints: + - node.role==manager + + diff --git a/cloud/stacks/sunbird/sunbird.yml b/cloud/stacks/sunbird/sunbird.yml new file mode 100644 index 0000000000000000000000000000000000000000..41a8d953b5102dede82befd3d5d4d890d0b36ee3 --- /dev/null +++ b/cloud/stacks/sunbird/sunbird.yml @@ -0,0 +1,39 @@ +version: '3.1' +services: + player: + image : "manojrpms/sunbird_player_image:latest" + deploy: + replicas: 1 + environment: + sunbird_port: 3000 + sunbird_content_player_url: "http://content_service_content_service:5000/api/sb/v1/" + sunbird_learner_player_url: "http://learner_service_learner_service:9000/v1/" + ports: + - "3000:3000" + networks: + - default + + content: + image : "manojrpms/sunbird_content_service_image:latest" + environment: + sunbird_mongo_ip: 10.10.2.5 + deploy: + replicas: 1 + ports: + - "5000:5000" + networks: + - default + learner: + image : "manojrpms/sunbird_learner_service_image:latest" + environment: + sunbird_learnerstate_actor_host: actor-service + sunbird_learnerstate_actor_port: 8088 + deploy: + replicas: 1 + ports: + - "9000:9000" + networks: + - default +networks: + default: + driver: overlay diff --git a/images/api-manager-jenkins-swarm-agent/Dockerfile b/images/api-manager-jenkins-swarm-agent/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..f2162c4dec3092bec22b4b3841db050d78e08093 --- /dev/null +++ b/images/api-manager-jenkins-swarm-agent/Dockerfile @@ -0,0 +1,11 @@ +FROM vfarcic/jenkins-swarm-agent + +USER root +RUN apk --update add sudo && \ + apk --update add python py-pip openssl ca-certificates && \ + apk --update add --virtual build-dependencies python-dev libffi-dev openssl-dev build-base && \ + pip install --upgrade pip cffi && \ + pip install "ansible==2.1.3" +RUN pip install PyJWT +RUN apk add --virtual build-deps gcc python-dev musl-dev && \ + apk add py2-psycopg2 diff --git a/images/cassandra_jmx_exporter/Dockerfile b/images/cassandra_jmx_exporter/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..6d7e474059c825cc80dfbb29d0d51c967a45b440 --- /dev/null +++ b/images/cassandra_jmx_exporter/Dockerfile @@ -0,0 +1,19 @@ +FROM openjdk:8-jre-alpine + +ENV APP_HOME=/opt/app +RUN mkdir -p $APP_HOME +WORKDIR $APP_HOME + +COPY /images/cassandra_jmx_exporter/jmx_prometheus_httpserver-0.11.jar jmx_prometheus_httpserver-0.11.jar +COPY /images/cassandra_jmx_exporter/logging.properties logging.properties + +EXPOSE 5556 + +ENTRYPOINT /usr/bin/java ${JAVA_OPTS} -jar jmx_prometheus_httpserver-0.11.jar 5556 $APP_HOME/jmx_httpserver.yml + +#!/usr/bin/env bash +# Script to run a java application for testing jmx4prometheus. + +# Note: You can use localhost:5556 instead of 5556 for configuring socket hostname. + +#java -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=5555 -jar /usr/share/cassandra/lib/jmx_prometheus_httpserver-0.11.jar 5556 /etc/cassandra/jmx_httpserver.yml diff --git a/images/cassandra_jmx_exporter/build.sh b/images/cassandra_jmx_exporter/build.sh new file mode 100755 index 0000000000000000000000000000000000000000..cb40a6f9d48114d66ff9e3e724372daccec71c65 --- /dev/null +++ b/images/cassandra_jmx_exporter/build.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# Build script +# set -o errexit +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/cassandra_jmx_exporter/metadata.sh) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +docker build -f ./images/cassandra_jmx_exporter/Dockerfile -t ${org}/${name}:${version} . diff --git a/images/cassandra_jmx_exporter/dockerPushToRepo.sh b/images/cassandra_jmx_exporter/dockerPushToRepo.sh new file mode 100755 index 0000000000000000000000000000000000000000..2e1d5c8b30cd7121de0b81eaccb2d7aad3a42e83 --- /dev/null +++ b/images/cassandra_jmx_exporter/dockerPushToRepo.sh @@ -0,0 +1,18 @@ +#!/bin/sh +# Build script +# set -o errexit +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/cassandra_jmx_exporter/metadata.sh) + +org=$(e "${m}" "org") +hubuser=$(e "${m}" "hubuser") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} + +docker login -u "${hubuser}" -p`cat /run/secrets/hub-pass` +docker push ${org}/${name}:${version} +docker logout diff --git a/images/cassandra_jmx_exporter/jmx_prometheus_httpserver-0.11.jar b/images/cassandra_jmx_exporter/jmx_prometheus_httpserver-0.11.jar new file mode 100644 index 0000000000000000000000000000000000000000..d0dfe48b72ba2ff966026ad8fd8d05ef0b27a94d Binary files /dev/null and b/images/cassandra_jmx_exporter/jmx_prometheus_httpserver-0.11.jar differ diff --git a/images/cassandra_jmx_exporter/logging.properties b/images/cassandra_jmx_exporter/logging.properties new file mode 100644 index 0000000000000000000000000000000000000000..8cc767d5850e6cd913b37e9976f73b6b522ec837 --- /dev/null +++ b/images/cassandra_jmx_exporter/logging.properties @@ -0,0 +1,3 @@ +java.util.logging.ConsoleHandler.level=INFO +io.prometheus.jmx.level=INFO +io.prometheus.jmx.shaded.io.prometheus.jmx.level=INFO diff --git a/images/cassandra_jmx_exporter/metadata.sh b/images/cassandra_jmx_exporter/metadata.sh new file mode 100755 index 0000000000000000000000000000000000000000..4c474b3c0cbc9d22b7665983053284b871bd39ce --- /dev/null +++ b/images/cassandra_jmx_exporter/metadata.sh @@ -0,0 +1,3 @@ +#!/bin/sh +# return version +echo '{"name":"cassandra_jmx_exporter","version":"0.11","org":"sunbird","hubuser":"purplesunbird"}' diff --git a/images/documentation-jenkins-swarm-agent/Dockerfile b/images/documentation-jenkins-swarm-agent/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5f7a1b733e1880b612794a7c2d486620e648f62d --- /dev/null +++ b/images/documentation-jenkins-swarm-agent/Dockerfile @@ -0,0 +1,4 @@ +FROM vfarcic/jenkins-swarm-agent + +USER root +RUN apk -v --update add ruby=2.4.1-r3 nodejs=6.10.3-r1 ruby-bundler=1.15.0-r0 diff --git a/images/documentation-jenkins-swarm-agent/build.sh b/images/documentation-jenkins-swarm-agent/build.sh new file mode 100755 index 0000000000000000000000000000000000000000..f24bd2a7673330ed28d3c431906fa2c10d2f66a2 --- /dev/null +++ b/images/documentation-jenkins-swarm-agent/build.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# Build script +# set -o errexit +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/documentation-jenkins-swarm-agent/metadata.sh) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +docker build -f ./images/documentation-jenkins-swarm-agent/Dockerfile -t ${org}/${name}:${version} . diff --git a/images/documentation-jenkins-swarm-agent/dockerPushToRepo.sh b/images/documentation-jenkins-swarm-agent/dockerPushToRepo.sh new file mode 100755 index 0000000000000000000000000000000000000000..e9043f794b37b45ab1144a43d9e259a9001b501a --- /dev/null +++ b/images/documentation-jenkins-swarm-agent/dockerPushToRepo.sh @@ -0,0 +1,16 @@ +#!/bin/sh +# Build script +# set -o errexit +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/documentation-jenkins-swarm-agent/metadata.sh) + +org=$(e "${m}" "org") +hubuser=$(e "${m}" "hubuser") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +docker login -u "${hubuser}" -p`cat /run/secrets/hub-pass` +docker push ${org}/${name}:${version} +docker logout diff --git a/images/documentation-jenkins-swarm-agent/metadata.sh b/images/documentation-jenkins-swarm-agent/metadata.sh new file mode 100755 index 0000000000000000000000000000000000000000..94d66f75bc14a46e62b50a34a7ecd9c96b9756c3 --- /dev/null +++ b/images/documentation-jenkins-swarm-agent/metadata.sh @@ -0,0 +1,3 @@ +#!/bin/sh +# return version +echo '{"name":"documentation-jenkins-swarm-agent","version":"0.0.1","org":"sunbird","hubuser":"purplesunbird"}' diff --git a/images/echo-server/Dockerfile b/images/echo-server/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8e42060285daba94e1276ad9c03879748ff8f361 --- /dev/null +++ b/images/echo-server/Dockerfile @@ -0,0 +1,8 @@ +FROM python:2.7.13-alpine + +COPY server.py server.py + +ENV ECHO_SERVER_PORT 9595 +EXPOSE 9595 + +CMD ["python", "server.py"] diff --git a/images/echo-server/Jenkinsfile.sample b/images/echo-server/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..1e227222310e8efb48de0eef85e40a75f9ec4665 --- /dev/null +++ b/images/echo-server/Jenkinsfile.sample @@ -0,0 +1,34 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./images/echo-server/installDeps.sh') + + } + + stage('Deploy to Staging'){ + step ([$class: 'CopyArtifact', + projectName: 'Tag_As_Silver/Echo_Server_Tag_Silver', + filter: 'metadata.json']); + sh 'METADATA_FILE=metadata.json ARTIFACT_LABEL=silver ENV=staging ./images/echo-server/deploy.sh' + archive includes: "metadata.json" + + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/images/echo-server/README.md b/images/echo-server/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cb308bfa0553bee26510557d308b088f9eb4b7dd --- /dev/null +++ b/images/echo-server/README.md @@ -0,0 +1,15 @@ +# Echo server + +A simple echo server using python alpine image to echo the request path + +### How to run + +``` +docker run -p 9595:9595 sunbird/echo-server:latest +``` + +### Test + +``` +curl localhost:9595/hello +``` \ No newline at end of file diff --git a/images/echo-server/build.sh b/images/echo-server/build.sh new file mode 100755 index 0000000000000000000000000000000000000000..305af822bccbc2d2d80169d7d4c05559f09e8bea --- /dev/null +++ b/images/echo-server/build.sh @@ -0,0 +1,14 @@ +#!/bin/sh +# Build script +# set -o errexit +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/echo-server/metadata.sh) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +cd images/echo-server +docker build -f Dockerfile -t ${org}/${name}:${version}-bronze . diff --git a/images/echo-server/dockerPushToRepo.sh b/images/echo-server/dockerPushToRepo.sh new file mode 100755 index 0000000000000000000000000000000000000000..6f3a52e88f52af838f2d79ae220f31f83c8f4d5d --- /dev/null +++ b/images/echo-server/dockerPushToRepo.sh @@ -0,0 +1,18 @@ +#!/bin/sh +# Build script +# set -o errexit +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/echo-server/metadata.sh) + +org=$(e "${m}" "org") +hubuser=$(e "${m}" "hubuser") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} + +docker login -u "${hubuser}" -p`cat /run/secrets/hub-pass` +docker push ${org}/${name}:${version}-${artifactLabel} +docker logout diff --git a/images/echo-server/installDeps.sh b/images/echo-server/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/images/echo-server/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/images/echo-server/metadata.sh b/images/echo-server/metadata.sh new file mode 100755 index 0000000000000000000000000000000000000000..48ab307ecb63faaa23deaea538ece21a97653f7f --- /dev/null +++ b/images/echo-server/metadata.sh @@ -0,0 +1,3 @@ +#!/bin/sh +# return version +echo '{"name":"echo-server","version":"0.0.1","org":"sunbird","hubuser":"purplesunbird"}' diff --git a/images/echo-server/server.py b/images/echo-server/server.py new file mode 100644 index 0000000000000000000000000000000000000000..5f71543a33b848cf83c4fb3451d17e8fa8505581 --- /dev/null +++ b/images/echo-server/server.py @@ -0,0 +1,22 @@ +#!/usr/bin/python +import os +from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer + +PORT_NUMBER = int(os.environ['ECHO_SERVER_PORT']) + +class EchoRequestHandler(BaseHTTPRequestHandler): + + def do_GET(self): + self.send_response(200) + self.send_header("Content-type", "text/plain") + self.end_headers() + self.wfile.write(self.path) + return + +try: + server = HTTPServer(('', PORT_NUMBER), EchoRequestHandler) + print 'Started httpserver on port ' , PORT_NUMBER + server.serve_forever() +except KeyboardInterrupt: + print '^C received, shutting down the web server' + server.socket.close() \ No newline at end of file diff --git a/images/jekyll-jenkins-swarm-agent/Dockerfile b/images/jekyll-jenkins-swarm-agent/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..3c43132a543d833cd4b8161a914a4d1c13a95dbe --- /dev/null +++ b/images/jekyll-jenkins-swarm-agent/Dockerfile @@ -0,0 +1,33 @@ +FROM vfarcic/jenkins-swarm-agent +MAINTAINER Shashank Teotia <shashankteotia@gmail.com> + +ENV LANGUAGE=en_US +ENV LANG=en_US.UTF-8 +ENV JEKYLL_VERSION=2.5.3 +ENV JEKYLL_ENV=production +ENV TZ=Asia/Calcutta +ENV LC_ALL=en_US +COPY copy / + +RUN apk update +RUN apk add zlib-dev build-base libxml2-dev \ + libxslt-dev readline-dev libffi-dev ruby-dev \ + yaml-dev zlib-dev libffi-dev less + +RUN apk add zlib libxml2 ruby-io-console \ + readline libxslt ruby yaml libffi ruby-irb \ + ruby-json ruby-rake ruby-rdoc git openssl \ + nodejs tzdata python nodejs-npm + +RUN yes | gem install --force --no-ri --no-rdoc jekyll -v ${JEKYLL_VERSION} + +RUN mkdir -p /usr/share/ruby +RUN gem install bundler --no-ri --no-rdoc +RUN gem clean +# RUN cleanup + +# WORKDIR /srv/jekyll +# CMD ["jekyll", "--help"] +# VOLUME /srv/jekyll +# EXPOSE 35729 4000 + diff --git a/images/jekyll-jenkins-swarm-agent/Dockerfile.ubuntu b/images/jekyll-jenkins-swarm-agent/Dockerfile.ubuntu new file mode 100644 index 0000000000000000000000000000000000000000..cc6f95866e6a5be2d078dedc9fbb2fd7b53373be --- /dev/null +++ b/images/jekyll-jenkins-swarm-agent/Dockerfile.ubuntu @@ -0,0 +1,25 @@ +FROM ubuntu:16.04 +MAINTAINER Lakhan Mandloi <lakhan_m@tekditechnologies.com> + +USER root +# Update everything +RUN apt update && \ + apt install -y build-essential \ + zlib1g-dev \ + git \ + locales \ + curl \ + ruby-full && \ + curl -sL https://deb.nodesource.com/setup_8.x | bash - && apt install -y nodejs + +RUN gem install bundle \ + liquid-rails \ + documentation \ + branch_io \ + rubygems-update && \ + gem update --system + +RUN locale-gen en_US.UTF-8 && \ + export LANG=en_US.UTF-8 && \ + export LANGUAGE=en_US:en && \ + export LC_ALL=en_US.UTF-8 diff --git a/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/connected b/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/connected new file mode 100755 index 0000000000000000000000000000000000000000..2f5a4d6a8324ebfb9e94f056d35e95c504df3aec --- /dev/null +++ b/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/connected @@ -0,0 +1,22 @@ +#!/bin/bash -l +set -e + +[ "$CONNECTED" = "true" ] && exit 0 +[ "$CONNECTED" = "false" ] && exit 1 +[ -f "/connected" ] && [ -f "/not-connected" ] && rm -rf /{,not-}connected +[ -f "/not-connected" ] && exit 1 +[ -f "/connected" ] && exit 0 + +# -- +# If we aren't connected, or forced as connected, or not +# connected then we should check with WGet (because of Proxies) +# whether we are connected to the internet. +# -- + +if wget -qs https://google.com >/dev/null; then + touch /connected + exit 0 +else + touch /not-connected + exit 1 +fi diff --git a/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/default-args b/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/default-args new file mode 100755 index 0000000000000000000000000000000000000000..958f727cf7e8f1fac70684c58dd6db389f3c206d --- /dev/null +++ b/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/default-args @@ -0,0 +1,16 @@ +#!/usr/bin/env ruby +require "shellwords" + +# -- + +def build?; %W(b build).include?(ARGV[0]); end +def debug?; ENV["JEKYLL_DEBUG"] == "true" || ENV["VERBOSE"] == "true"; end +def serve?; %W(s serve server).include?(ARGV[0]); end + +ARGV.shift and ARGV.unshift("serve") if "s" == ARGV[0] +ARGV.push("--force_polling") if (ENV["FORCE_POLLING"] || ENV["POLLING"]) && (build? || serve?) +ARGV.push("--drafts") if ENV["DRAFTS"] || ENV["BUILD_DRAFTS"] +ARGV.unshift(ARGV.shift, "-H", "0.0.0.0") if serve? +ARGV.push("--verbose") if debug? + +$stdout.puts Shellwords.shelljoin(ARGV) diff --git a/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/depends b/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/depends new file mode 100755 index 0000000000000000000000000000000000000000..a32b15161e1fc82b6032cbdf47d4f531fb32755c --- /dev/null +++ b/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/depends @@ -0,0 +1,34 @@ +#!/bin/bash +set -e + +if [ ! -f "/updated" ]; then + if connected; then + if [ -f ".apk" ]; then + apk add --no-progress $(cat .apk) + fi + fi +fi + +chown -R jekyll:jekyll $PWD +if [ -f "Gemfile" ]; then + if [ "$BUNDLE_CACHE" = "true" ]; then + sudo -EHu jekyll bundle config --local path vendor/bundle + sudo -EHu jekyll bundle config --local disable_shared_gems true + sudo -EHu jekyll bundle config --local jobs 2 + fi + + # -- + + if [ "$1" = "install" ]; then + if ! sudo -EHu jekyll bundle check; then + sudo -EHu jekyll bundle config ignore_messages true + # Prevent some weird Nokogiri errors that happen lately. + sudo -EHu jekyll bundle config build.nokogiri --use-system-libraries + sudo -EHu jekyll bundle config disable_version_check true + sudo -EHu jekyll bundle install + fi + + else + sudo -EHu jekyll bundle "$@" + fi +fi diff --git a/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/jekyll b/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/jekyll new file mode 100755 index 0000000000000000000000000000000000000000..651ded2ec531e31908fc5a2c01cffe1f4a6e7e8a --- /dev/null +++ b/images/jekyll-jenkins-swarm-agent/copy/all/usr/local/bin/jekyll @@ -0,0 +1,24 @@ +#!/bin/bash +set -e + +: ${JEKYLL_UID:=$UID} +ARGS=$(default-args $@) +export JEKYLL_UID + +if [ "$JEKYLL_UID" != "0" ]; then + if [ "$JEKYLL_UID" != "$(id -u jekyll)" ]; then + reset-user jekyll:"$JEKYLL_UID" + fi +fi + +chown -R jekyll:jekyll $PWD +if connected + then depends install +fi + +if [ -f Gemfile ]; then + sudo -EHu jekyll bundle exec ruby \ + /usr/bin/jekyll $ARGS +else + sudo -EHu jekyll /usr/bin/jekyll $ARGS +fi diff --git a/images/jekyll-jenkins-swarm-agent/docker-compose.yml b/images/jekyll-jenkins-swarm-agent/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..cf8827398f9ee8811828ae3018e68e1706590f72 --- /dev/null +++ b/images/jekyll-jenkins-swarm-agent/docker-compose.yml @@ -0,0 +1,34 @@ +version: '3.1' + +services: + + jekyll-jenkins-slave: + image: sunbird/jekyll-jenkins-swarm-agent:2.5.3 + environment: + - USER_NAME_SECRET=/run/secrets/${JENKINS_USER_SECRET:-jenkins-user} + - PASSWORD_SECRET=/run/secrets/${JENKINS_PASS_SECRET:-jenkins-pass} + - COMMAND_OPTIONS=-master ${JENKINS_URL} -labels ${SITE_SLAVE_LABEL} -executors 1 -name site-jenkins-slave + volumes: + - /var/run/docker.sock:/var/run/docker.sock + secrets: + - jenkins-user + - jenkins-pass + deploy: + replicas: 1 + resources: + reservations: + memory: 512M + limits: + memory: 1024M + networks: + - jenkins_default + +secrets: + jenkins-user: + external: true + jenkins-pass: + external: true + +networks: + jenkins_default: + external: true diff --git a/images/keycloak/Dockerfile b/images/keycloak/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..283a51ac4f57cae1fb5ce7648008a730f51f078d --- /dev/null +++ b/images/keycloak/Dockerfile @@ -0,0 +1,11 @@ +FROM jboss/keycloak-postgres:latest + +RUN rm /opt/jboss/keycloak/standalone/configuration/standalone-ha.xml +COPY ./images/keycloak/standalone-ha.xml /opt/jboss/keycloak/standalone/configuration/ +COPY ./images/keycloak/postgresql-9.4.1212.jar /opt/jboss/keycloak/modules/system/layers/keycloak/org/postgresql/main/ +COPY ./images/keycloak/module.xml /opt/jboss/keycloak/modules/system/layers/keycloak/org/postgresql/main/ +COPY ./images/keycloak/docker-entrypoint.sh /opt/jboss/ + +ENTRYPOINT ["/opt/jboss/docker-entrypoint.sh"] + +CMD ["-b", "0.0.0.0", "--server-config", "standalone-ha.xml"] \ No newline at end of file diff --git a/images/keycloak/build.sh b/images/keycloak/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..bdea21eb0b90463abb09a4c0aec637dd93596b06 --- /dev/null +++ b/images/keycloak/build.sh @@ -0,0 +1,14 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/keycloak/metadata.sh) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +docker build -f ./images/keycloak/Dockerfile -t ${org}/${name}:${version}-bronze . diff --git a/images/keycloak/docker-entrypoint.sh b/images/keycloak/docker-entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..4d93dc98a85ca24e1269e89530161986dcf8e5ba --- /dev/null +++ b/images/keycloak/docker-entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +export HOSTNAME_IP=$(hostname -i) +echo "hostname -i returned: $HOSTNAME_IP" + +exec /opt/jboss/keycloak/bin/standalone.sh -Djboss.bind.address.private=$HOSTNAME_IP -Djboss.jgroups.tcpping.initial_hosts=$TCPPING_INITIAL_HOSTS $@ +exit $? \ No newline at end of file diff --git a/images/keycloak/dockerPushToRepo.sh b/images/keycloak/dockerPushToRepo.sh new file mode 100644 index 0000000000000000000000000000000000000000..fda2f01beafd7564b8a856f71db71e93c3124210 --- /dev/null +++ b/images/keycloak/dockerPushToRepo.sh @@ -0,0 +1,20 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/keycloak/metadata.sh) + +org=$(e "${m}" "org") +hubuser=$(e "${m}" "hubuser") +name=$(e "${m}" "name") +version=$(e "${m}" "version") +artifactLabel=${ARTIFACT_LABEL:-bronze} + + +docker login -u "${hubuser}" -p`cat /run/secrets/hub-pass` +docker push ${org}/${name}:${version}-${artifactLabel} +docker logout + diff --git a/images/keycloak/installDeps.sh b/images/keycloak/installDeps.sh new file mode 100644 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/images/keycloak/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/images/keycloak/metadata.sh b/images/keycloak/metadata.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c9273a7f4b98f020b69acccac67e44c9ca530a2 --- /dev/null +++ b/images/keycloak/metadata.sh @@ -0,0 +1,4 @@ +#!/bin/sh +# return version +echo '{"name":"keycloak_image","version":"3.2.1.Final","org":"sunbird","hubuser":"purplesunbird"}' + diff --git a/images/keycloak/module.xml b/images/keycloak/module.xml new file mode 100755 index 0000000000000000000000000000000000000000..bcfe36a40660252723eb6ce3ca98e01307a26fbc --- /dev/null +++ b/images/keycloak/module.xml @@ -0,0 +1,10 @@ +<?xml version="1.0" encoding="UTF-8"?> +<module xmlns="urn:jboss:module:1.1" name="org.postgresql"> + <resources> + <resource-root path="postgresql-9.4.1212.jar"/> + </resources> + <dependencies> + <module name="javax.api"/> + <module name="javax.transaction.api"/> + </dependencies> +</module> diff --git a/images/keycloak/postgresql-9.4.1212.jar b/images/keycloak/postgresql-9.4.1212.jar new file mode 100755 index 0000000000000000000000000000000000000000..b0de752d8802b2133da7ad75f10807e094231bfb Binary files /dev/null and b/images/keycloak/postgresql-9.4.1212.jar differ diff --git a/images/keycloak/standalone-ha.xml b/images/keycloak/standalone-ha.xml new file mode 100755 index 0000000000000000000000000000000000000000..36b5cf62479c8b274cc31cb55269cecc6ed0349f --- /dev/null +++ b/images/keycloak/standalone-ha.xml @@ -0,0 +1,570 @@ +<?xml version='1.0' encoding='UTF-8'?> + +<server xmlns="urn:jboss:domain:4.0"> + <extensions> + <extension module="org.jboss.as.clustering.infinispan"/> + <extension module="org.jboss.as.clustering.jgroups"/> + <extension module="org.jboss.as.connector"/> + <extension module="org.jboss.as.deployment-scanner"/> + <extension module="org.jboss.as.ee"/> + <extension module="org.jboss.as.ejb3"/> + <extension module="org.jboss.as.jaxrs"/> + <extension module="org.jboss.as.jdr"/> + <extension module="org.jboss.as.jmx"/> + <extension module="org.jboss.as.jpa"/> + <extension module="org.jboss.as.jsf"/> + <extension module="org.jboss.as.logging"/> + <extension module="org.jboss.as.mail"/> + <extension module="org.jboss.as.modcluster"/> + <extension module="org.jboss.as.naming"/> + <extension module="org.jboss.as.remoting"/> + <extension module="org.jboss.as.security"/> + <extension module="org.jboss.as.transactions"/> + <extension module="org.keycloak.keycloak-server-subsystem"/> + <extension module="org.wildfly.extension.bean-validation"/> + <extension module="org.wildfly.extension.io"/> + <extension module="org.wildfly.extension.request-controller"/> + <extension module="org.wildfly.extension.security.manager"/> + <extension module="org.wildfly.extension.undertow"/> + </extensions> + <management> + <security-realms> + <security-realm name="ManagementRealm"> + <authentication> + <local default-user="$local" skip-group-loading="true"/> + <properties path="mgmt-users.properties" relative-to="jboss.server.config.dir"/> + </authentication> + <authorization map-groups-to-roles="false"> + <properties path="mgmt-groups.properties" relative-to="jboss.server.config.dir"/> + </authorization> + </security-realm> + <security-realm name="ApplicationRealm"> + <authentication> + <local default-user="$local" allowed-users="*" skip-group-loading="true"/> + <properties path="application-users.properties" relative-to="jboss.server.config.dir"/> + </authentication> + <authorization> + <properties path="application-roles.properties" relative-to="jboss.server.config.dir"/> + </authorization> + </security-realm> + </security-realms> + <audit-log> + <formatters> + <json-formatter name="json-formatter"/> + </formatters> + <handlers> + <file-handler name="file" formatter="json-formatter" relative-to="jboss.server.data.dir" path="audit-log.log"/> + </handlers> + <logger log-boot="true" log-read-only="false" enabled="false"> + <handlers> + <handler name="file"/> + </handlers> + </logger> + </audit-log> + <management-interfaces> + <http-interface security-realm="ManagementRealm" http-upgrade-enabled="true"> + <socket-binding http="management-http"/> + </http-interface> + </management-interfaces> + <access-control provider="simple"> + <role-mapping> + <role name="SuperUser"> + <include> + <user name="$local"/> + </include> + </role> + </role-mapping> + </access-control> + </management> + <profile> + <subsystem xmlns="urn:jboss:domain:logging:3.0"> + <console-handler name="CONSOLE"> + <level name="INFO"/> + <formatter> + <named-formatter name="COLOR-PATTERN"/> + </formatter> + </console-handler> + <periodic-rotating-file-handler name="FILE" autoflush="true"> + <formatter> + <named-formatter name="PATTERN"/> + </formatter> + <file relative-to="jboss.server.log.dir" path="server.log"/> + <suffix value=".yyyy-MM-dd"/> + <append value="true"/> + </periodic-rotating-file-handler> + <syslog-handler name="SYSLOG" enabled="true"> + <app-name value="keycloak"/> + <facility value="local-use-7"/> + <formatter> + <syslog-format syslog-type="RFC5424"/> + </formatter> + <hostname value="${jboss.host.name}"/> + <level name="INFO"/> + <server-address value="0.0.0.0"/> + <port value="514"/> + </syslog-handler> + <logger category="com.arjuna"> + <level name="WARN"/> + </logger> + <logger category="org.jboss.as.config"> + <level name="DEBUG"/> + </logger> + <logger category="sun.rmi"> + <level name="WARN"/> + </logger> + <root-logger> + <level name="INFO"/> + <handlers> + <handler name="CONSOLE"/> + <handler name="FILE"/> + <handler name="SYSLOG"/> + </handlers> + </root-logger> + <formatter name="PATTERN"> + <pattern-formatter pattern="%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%c] (%t) %s%e%n"/> + </formatter> + <formatter name="COLOR-PATTERN"> + <pattern-formatter pattern="%K{level}%d{HH:mm:ss,SSS} %-5p [%c] (%t) %s%e%n"/> + </formatter> + </subsystem> + <subsystem xmlns="urn:jboss:domain:bean-validation:1.0"/> + <subsystem xmlns="urn:jboss:domain:datasources:4.0"> + <datasources> + <datasource jndi-name="java:jboss/datasources/ExampleDS" pool-name="ExampleDS" enabled="true" use-java-context="true"> + <connection-url>jdbc:h2:mem:test;DB_CLOSE_DELAY=-1;DB_CLOSE_ON_EXIT=FALSE</connection-url> + <driver>h2</driver> + <security> + <user-name>sa</user-name> + <password>sa</password> + </security> + </datasource> + <datasource jndi-name="java:jboss/datasources/KeycloakOrigDS" pool-name="KeycloakOrigDS" enabled="false" use-java-context="true"> + <connection-url>jdbc:h2:${jboss.server.data.dir}/keycloak;AUTO_SERVER=TRUE</connection-url> + <driver>h2</driver> + <security> + <user-name>sa</user-name> + <password>sa</password> + </security> + </datasource> + <datasource jndi-name="java:jboss/datasources/KeycloakDS" pool-name="KeycloakDS" enabled="true" use-java-context="true"> + <!-- Require both connection-url and connection-property[name="url"] + This is because of a Wildfly issue. Since the postgres driver is using a + datasource-class the connection parameters have to be set via + <connection-property>. However, deleting <connection-url> causes a Wildfly + configuration parsing error + + See this issue for detail: https://issues.jboss.org/browse/WFLY-6157 + It is fixed for Wildfly 11.0.0.Alpha1 + --> + <connection-url>jdbc:postgresql://thisnotwork:5432/keycloak</connection-url> + <connection-property name="url">jdbc:postgresql://${env.POSTGRES_PORT_5432_TCP_ADDR}:${env.POSTGRES_PORT_5432_TCP_PORT:5432}/${env.POSTGRES_DATABASE:keycloak}</connection-property> + <driver>postgresql</driver> + <pool> + <max-pool-size>20</max-pool-size> + </pool> + <security> + <user-name>${env.POSTGRES_USER:keycloak}</user-name> + <password>${env.POSTGRES_PASSWORD:password}</password> + </security> + <validation> + <background-validation>true</background-validation> + <background-validation-millis>10000</background-validation-millis> + <valid-connection-checker class-name="org.jboss.jca.adapters.jdbc.extensions.postgres.PostgreSQLValidConnectionChecker"/> + <exception-sorter class-name="org.jboss.jca.adapters.jdbc.extensions.postgres.PostgreSQLExceptionSorter"/> + </validation> + </datasource> + <drivers> + <driver name="h2" module="com.h2database.h2"> + <xa-datasource-class>org.h2.jdbcx.JdbcDataSource</xa-datasource-class> + </driver> + <driver name="postgresql" module="org.postgresql"> + <xa-datasource-class>org.postgresql.xa.PGXADataSource</xa-datasource-class> + <datasource-class>org.postgresql.ds.PGPoolingDataSource</datasource-class> + </driver> + </drivers> + </datasources> + </subsystem> + <subsystem xmlns="urn:jboss:domain:deployment-scanner:2.0"> + <deployment-scanner path="deployments" relative-to="jboss.server.base.dir" scan-interval="5000" runtime-failure-causes-rollback="${jboss.deployment.scanner.rollback.on.failure:false}"/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:ee:4.0"> + <spec-descriptor-property-replacement>false</spec-descriptor-property-replacement> + <concurrent> + <context-services> + <context-service name="default" jndi-name="java:jboss/ee/concurrency/context/default" use-transaction-setup-provider="true"/> + </context-services> + <managed-thread-factories> + <managed-thread-factory name="default" jndi-name="java:jboss/ee/concurrency/factory/default" context-service="default"/> + </managed-thread-factories> + <managed-executor-services> + <managed-executor-service name="default" jndi-name="java:jboss/ee/concurrency/executor/default" context-service="default" hung-task-threshold="60000" keepalive-time="5000"/> + </managed-executor-services> + <managed-scheduled-executor-services> + <managed-scheduled-executor-service name="default" jndi-name="java:jboss/ee/concurrency/scheduler/default" context-service="default" hung-task-threshold="60000" keepalive-time="3000"/> + </managed-scheduled-executor-services> + </concurrent> + <default-bindings context-service="java:jboss/ee/concurrency/context/default" datasource="java:jboss/datasources/ExampleDS" managed-executor-service="java:jboss/ee/concurrency/executor/default" managed-scheduled-executor-service="java:jboss/ee/concurrency/scheduler/default" managed-thread-factory="java:jboss/ee/concurrency/factory/default"/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:ejb3:4.0"> + <session-bean> + <stateless> + <bean-instance-pool-ref pool-name="slsb-strict-max-pool"/> + </stateless> + <stateful default-access-timeout="5000" cache-ref="distributable" passivation-disabled-cache-ref="simple"/> + <singleton default-access-timeout="5000"/> + </session-bean> + <pools> + <bean-instance-pools> + <!-- Automatically configure pools. Alternatively, max-pool-size can be set to a specific value --> + <strict-max-pool name="slsb-strict-max-pool" derive-size="from-worker-pools" instance-acquisition-timeout="5" instance-acquisition-timeout-unit="MINUTES"/> + <strict-max-pool name="mdb-strict-max-pool" derive-size="from-cpu-count" instance-acquisition-timeout="5" instance-acquisition-timeout-unit="MINUTES"/> + </bean-instance-pools> + </pools> + <caches> + <cache name="simple"/> + <cache name="distributable" passivation-store-ref="infinispan" aliases="passivating clustered"/> + </caches> + <passivation-stores> + <passivation-store name="infinispan" cache-container="ejb" max-size="10000"/> + </passivation-stores> + <async thread-pool-name="default"/> + <timer-service thread-pool-name="default" default-data-store="default-file-store"> + <data-stores> + <file-data-store name="default-file-store" path="timer-service-data" relative-to="jboss.server.data.dir"/> + </data-stores> + </timer-service> + <remote connector-ref="http-remoting-connector" thread-pool-name="default"/> + <thread-pools> + <thread-pool name="default"> + <max-threads count="10"/> + <keepalive-time time="100" unit="milliseconds"/> + </thread-pool> + </thread-pools> + <default-security-domain value="other"/> + <default-missing-method-permissions-deny-access value="true"/> + <log-system-exceptions value="true"/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:io:1.1"> + <worker name="default"/> + <buffer-pool name="default"/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:infinispan:4.0"> + <cache-container name="keycloak" jndi-name="infinispan/Keycloak"> + <transport lock-timeout="60000"/> + <local-cache name="realms"> + <eviction max-entries="10000" strategy="LRU"/> + </local-cache> + <local-cache name="users"> + <eviction max-entries="10000" strategy="LRU"/> + </local-cache> + <distributed-cache name="sessions" mode="SYNC" owners="1"/> + <distributed-cache name="authenticationSessions" mode="SYNC" owners="1"/> + <distributed-cache name="offlineSessions" mode="SYNC" owners="1"/> + <distributed-cache name="loginFailures" mode="SYNC" owners="1"/> + <local-cache name="authorization"> + <eviction max-entries="10000" strategy="LRU"/> + </local-cache> + <replicated-cache name="work" mode="SYNC"/> + <local-cache name="keys"> + <eviction max-entries="1000" strategy="LRU"/> + <expiration max-idle="3600000"/> + </local-cache> + <distributed-cache name="actionTokens" mode="SYNC" owners="2"> + <eviction max-entries="-1" strategy="NONE"/> + <expiration max-idle="-1" interval="300000"/> + </distributed-cache> + </cache-container> + <cache-container name="server" aliases="singleton cluster" default-cache="default" module="org.wildfly.clustering.server"> + <transport lock-timeout="60000"/> + <replicated-cache name="default" mode="SYNC"> + <transaction mode="BATCH"/> + </replicated-cache> + </cache-container> + <cache-container name="web" default-cache="dist" module="org.wildfly.clustering.web.infinispan"> + <transport lock-timeout="60000"/> + <distributed-cache name="dist" mode="ASYNC" l1-lifespan="0" owners="2"> + <locking isolation="REPEATABLE_READ"/> + <transaction mode="BATCH"/> + <file-store/> + </distributed-cache> + </cache-container> + <cache-container name="ejb" aliases="sfsb" default-cache="dist" module="org.wildfly.clustering.ejb.infinispan"> + <transport lock-timeout="60000"/> + <distributed-cache name="dist" mode="ASYNC" l1-lifespan="0" owners="2"> + <locking isolation="REPEATABLE_READ"/> + <transaction mode="BATCH"/> + <file-store/> + </distributed-cache> + </cache-container> + <cache-container name="hibernate" default-cache="local-query" module="org.hibernate.infinispan"> + <transport lock-timeout="60000"/> + <local-cache name="local-query"> + <eviction strategy="LRU" max-entries="10000"/> + <expiration max-idle="100000"/> + </local-cache> + <invalidation-cache name="entity" mode="SYNC"> + <transaction mode="NON_XA"/> + <eviction strategy="LRU" max-entries="10000"/> + <expiration max-idle="100000"/> + </invalidation-cache> + <replicated-cache name="timestamps" mode="ASYNC"/> + </cache-container> + </subsystem> + <subsystem xmlns="urn:jboss:domain:jaxrs:1.0"/> + <subsystem xmlns="urn:jboss:domain:jca:4.0"> + <archive-validation enabled="true" fail-on-error="true" fail-on-warn="false"/> + <bean-validation enabled="true"/> + <default-workmanager> + <short-running-threads> + <core-threads count="50"/> + <queue-length count="50"/> + <max-threads count="50"/> + <keepalive-time time="10" unit="seconds"/> + </short-running-threads> + <long-running-threads> + <core-threads count="50"/> + <queue-length count="50"/> + <max-threads count="50"/> + <keepalive-time time="10" unit="seconds"/> + </long-running-threads> + </default-workmanager> + <cached-connection-manager/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:jdr:1.0"/> + <subsystem xmlns="urn:jboss:domain:jgroups:4.0"> + <channels default="ee"> + <channel name="ee" stack="tcp"/> + </channels> + <stacks default="tcp"> + <stack name="udp"> + <transport type="UDP" socket-binding="jgroups-udp"/> + <protocol type="PING"/> + <protocol type="MERGE3"/> + <protocol type="FD_SOCK" socket-binding="jgroups-udp-fd"/> + <protocol type="FD_ALL"/> + <protocol type="VERIFY_SUSPECT"/> + <protocol type="pbcast.NAKACK2"/> + <protocol type="UNICAST3"/> + <protocol type="pbcast.STABLE"/> + <protocol type="pbcast.GMS"/> + <protocol type="UFC"/> + <protocol type="MFC"/> + <protocol type="FRAG2"/> + </stack> + <stack name="tcp"> + <transport type="TCP" socket-binding="jgroups-tcp"/> + <protocol type="TCPPING"> + <property name="initial_hosts">${jboss.jgroups.tcpping.initial_hosts}</property> + <property name="port_range">10</property> + <property name="timeout">3000</property> + <property name="num_initial_members">2</property> + </protocol> +<!-- <protocol type="MPING" socket-binding="jgroups-mping"/> --> + <protocol type="MERGE3"/> + <protocol type="FD_SOCK" socket-binding="jgroups-tcp-fd"/> + <protocol type="FD"/> + <protocol type="VERIFY_SUSPECT"/> + <protocol type="pbcast.NAKACK2"/> + <protocol type="UNICAST3"/> + <protocol type="pbcast.STABLE"/> + <protocol type="pbcast.GMS"/> + <protocol type="MFC"/> + <protocol type="FRAG2"/> + </stack> + </stacks> + </subsystem> + <subsystem xmlns="urn:jboss:domain:jmx:1.3"> + <expose-resolved-model/> + <expose-expression-model/> + <remoting-connector/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:jpa:1.1"> + <jpa default-datasource="" default-extended-persistence-inheritance="DEEP"/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:jsf:1.0"/> + <subsystem xmlns="urn:jboss:domain:mail:2.0"> + <mail-session name="default" jndi-name="java:jboss/mail/Default"> + <smtp-server outbound-socket-binding-ref="mail-smtp"/> + </mail-session> + </subsystem> + <subsystem xmlns="urn:jboss:domain:modcluster:2.0"> + <mod-cluster-config advertise-socket="modcluster" connector="ajp"> + <dynamic-load-provider> + <load-metric type="cpu"/> + </dynamic-load-provider> + </mod-cluster-config> + </subsystem> + <subsystem xmlns="urn:jboss:domain:naming:2.0"> + <remote-naming/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:remoting:3.0"> + <endpoint/> + <http-connector name="http-remoting-connector" connector-ref="default" security-realm="ApplicationRealm"/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:request-controller:1.0"/> + <subsystem xmlns="urn:jboss:domain:security-manager:1.0"> + <deployment-permissions> + <maximum-set> + <permission class="java.security.AllPermission"/> + </maximum-set> + </deployment-permissions> + </subsystem> + <subsystem xmlns="urn:jboss:domain:security:1.2"> + <security-domains> + <security-domain name="other" cache-type="default"> + <authentication> + <login-module code="Remoting" flag="optional"> + <module-option name="password-stacking" value="useFirstPass"/> + </login-module> + <login-module code="RealmDirect" flag="required"> + <module-option name="password-stacking" value="useFirstPass"/> + </login-module> + </authentication> + </security-domain> + <security-domain name="jboss-web-policy" cache-type="default"> + <authorization> + <policy-module code="Delegating" flag="required"/> + </authorization> + </security-domain> + <security-domain name="jboss-ejb-policy" cache-type="default"> + <authorization> + <policy-module code="Delegating" flag="required"/> + </authorization> + </security-domain> + <security-domain name="jaspitest" cache-type="default"> + <authentication-jaspi> + <login-module-stack name="dummy"> + <login-module code="Dummy" flag="optional"/> + </login-module-stack> + <auth-module code="Dummy"/> + </authentication-jaspi> + </security-domain> + </security-domains> + </subsystem> + <subsystem xmlns="urn:jboss:domain:transactions:3.0"> + <core-environment> + <process-id> + <uuid/> + </process-id> + </core-environment> + <recovery-environment socket-binding="txn-recovery-environment" status-socket-binding="txn-status-manager"/> + </subsystem> + <subsystem xmlns="urn:jboss:domain:undertow:3.0"> + <buffer-cache name="default"/> + <server name="default-server"> + <ajp-listener name="ajp" socket-binding="ajp"/> + <http-listener name="default" socket-binding="http" redirect-socket="https" proxy-address-forwarding="true"/> + <host name="default-host" alias="localhost"> + <location name="/" handler="welcome-content"/> + <filter-ref name="server-header"/> + <filter-ref name="x-powered-by-header"/> + </host> + </server> + <servlet-container name="default"> + <jsp-config/> + <websockets/> + </servlet-container> + <handlers> + <file name="welcome-content" path="${jboss.home.dir}/welcome-content"/> + </handlers> + <filters> + <response-header name="server-header" header-name="Server" header-value="WildFly/10"/> + <response-header name="x-powered-by-header" header-name="X-Powered-By" header-value="Undertow/1"/> + </filters> + </subsystem> + <subsystem xmlns="urn:jboss:domain:keycloak-server:1.1"> + <web-context>auth</web-context> + <providers> + <provider>classpath:${jboss.home.dir}/providers/*</provider> + </providers> + <master-realm-name>master</master-realm-name> + <scheduled-task-interval>900</scheduled-task-interval> + <theme> + <staticMaxAge>2592000</staticMaxAge> + <cacheThemes>true</cacheThemes> + <cacheTemplates>true</cacheTemplates> + <dir>${jboss.home.dir}/themes</dir> + </theme> + <spi name="eventsStore"> + <provider name="jpa" enabled="true"> + <properties> + <property name="exclude-events" value="["REFRESH_TOKEN"]"/> + </properties> + </provider> + </spi> + <spi name="userCache"> + <provider name="default" enabled="true"/> + </spi> + <spi name="userSessionPersister"> + <default-provider>jpa</default-provider> + </spi> + <spi name="timer"> + <default-provider>basic</default-provider> + </spi> + <spi name="connectionsHttpClient"> + <provider name="default" enabled="true"/> + </spi> + <spi name="connectionsJpa"> + <provider name="default" enabled="true"> + <properties> + <property name="dataSource" value="java:jboss/datasources/KeycloakDS"/> + <property name="initializeEmpty" value="true"/> + <property name="migrationStrategy" value="update"/> + <property name="migrationExport" value="${jboss.home.dir}/keycloak-database-update.sql"/> + </properties> + </provider> + </spi> + <spi name="realmCache"> + <provider name="default" enabled="true"/> + </spi> + <spi name="connectionsInfinispan"> + <default-provider>default</default-provider> + <provider name="default" enabled="true"> + <properties> + <property name="cacheContainer" value="java:comp/env/infinispan/Keycloak"/> + </properties> + </provider> + </spi> + <spi name="jta-lookup"> + <default-provider>${keycloak.jta.lookup.provider:jboss}</default-provider> + <provider name="jboss" enabled="true"/> + </spi> + <spi name="publicKeyStorage"> + <provider name="infinispan" enabled="true"> + <properties> + <property name="minTimeBetweenRequests" value="10"/> + </properties> + </provider> + </spi> + </subsystem> + </profile> + <interfaces> + <interface name="management"> + <inet-address value="${jboss.bind.address.management:127.0.0.1}"/> + </interface> + <interface name="public"> + <inet-address value="${jboss.bind.address:127.0.0.1}"/> + </interface> + <interface name="private"> + <inet-address value="${jboss.bind.address.private:127.0.0.1}"/> + </interface> + </interfaces> + <socket-binding-group name="standard-sockets" default-interface="public" port-offset="${jboss.socket.binding.port-offset:0}"> + <socket-binding name="management-http" interface="management" port="${jboss.management.http.port:9990}"/> + <socket-binding name="management-https" interface="management" port="${jboss.management.https.port:9993}"/> + <socket-binding name="ajp" port="${jboss.ajp.port:8009}"/> + <socket-binding name="http" port="${jboss.http.port:8080}"/> + <socket-binding name="https" port="${jboss.https.port:8443}"/> + <socket-binding name="jgroups-mping" interface="private" port="0" multicast-address="${jboss.default.multicast.address:230.0.0.4}" multicast-port="45700"/> + <socket-binding name="jgroups-tcp" interface="private" port="7600"/> + <socket-binding name="jgroups-tcp-fd" interface="private" port="57600"/> + <socket-binding name="jgroups-udp" interface="private" port="55200" multicast-address="${jboss.default.multicast.address:230.0.0.4}" multicast-port="45688"/> + <socket-binding name="jgroups-udp-fd" interface="private" port="54200"/> + <socket-binding name="modcluster" port="0" multicast-address="224.0.1.105" multicast-port="23364"/> + <socket-binding name="txn-recovery-environment" port="4712"/> + <socket-binding name="txn-status-manager" port="4713"/> + <outbound-socket-binding name="mail-smtp"> + <remote-destination host="localhost" port="25"/> + </outbound-socket-binding> + </socket-binding-group> +</server> diff --git a/images/kong/Dockerfile b/images/kong/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..3e578e79fe570d294d74f5358515844593fefd38 --- /dev/null +++ b/images/kong/Dockerfile @@ -0,0 +1,35 @@ +#FROM kong:0.9.9 + +# ensure Kong logs go to the log pipe from our entrypoint and so to docker logging +#RUN mkdir -p /usr/local/kong/logs \ +# && ln -sf /dev/stdout /usr/local/kong/logs/serf.log \ +# && ln -sf /dev/stderr /usr/local/kong/logs/error.log \ +# && ln -sf /dev/stdout /usr/local/kong/logs/access.log + + +#========================================================================= + + +FROM centos:7 + +ENV KONG_VERSION 0.9.9 + +RUN yum install -y wget https://github.com/Mashape/kong/releases/download/$KONG_VERSION/kong-$KONG_VERSION.el7.noarch.rpm && \ + yum clean all + +RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.1.3/dumb-init_1.1.3_amd64 && \ + chmod +x /usr/local/bin/dumb-init + +RUN yum install -y net-tools + +COPY ./images/kong/docker-entrypoint.sh /docker-entrypoint.sh +ENTRYPOINT ["/docker-entrypoint.sh"] + +# ensure Kong logs go to the log pipe from our entrypoint and so to docker logging +RUN mkdir -p /usr/local/kong/logs \ + && ln -sf /tmp/logpipe /usr/local/kong/logs/access.log \ + && ln -sf /tmp/logpipe /usr/local/kong/logs/serf.log \ + && ln -sf /tmp/logpipe /usr/local/kong/logs/error.log + +EXPOSE 8000 8443 8001 7946 +CMD ["kong", "start"] diff --git a/images/kong/build.sh b/images/kong/build.sh new file mode 100755 index 0000000000000000000000000000000000000000..2cbf1a95c0a6261386afedf43bbf89abebf7af0e --- /dev/null +++ b/images/kong/build.sh @@ -0,0 +1,14 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/kong/metadata.sh) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +docker build -f ./images/kong/Dockerfile -t ${org}/${name}:${version} . diff --git a/images/kong/docker-entrypoint.sh b/images/kong/docker-entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..6d9395d1841442d153eaa92c13d9e33c23cc15c5 --- /dev/null +++ b/images/kong/docker-entrypoint.sh @@ -0,0 +1,23 @@ +#!/usr/local/bin/dumb-init /bin/bash +set -e + +# Make a pipe for the logs so we can ensure Kong logs get directed to docker logging +# see https://github.com/docker/docker/issues/6880 +# also, https://github.com/docker/docker/issues/31106, https://github.com/docker/docker/issues/31243 +# https://github.com/docker/docker/pull/16468, https://github.com/behance/docker-nginx/pull/51 +rm -f /tmp/logpipe +mkfifo -m 666 /tmp/logpipe +# This child process will still receive signals as per https://github.com/Yelp/dumb-init#session-behavior +cat <> /tmp/logpipe 1>&2 & + +# NOTE, to configure the `File Log` plugin to route logs to Docker logging, direct `config.path` at `/tmp/logpipe` + +# Disabling nginx daemon mode +export KONG_NGINX_DAEMON="off" +# Ensure kong listens on correct ip address https://github.com/Mashape/docker-kong/issues/93 +IP_ADDR=`ifconfig eth0 | awk '$1 == "inet" {gsub(/\/.*$/, "", $2); print $2}'` +export KONG_CLUSTER_LISTEN="$IP_ADDR:7946" + +echo "KONG_CLUSTER_LISTEN: $KONG_CLUSTER_LISTEN" + +exec "$@" diff --git a/images/kong/dockerPushToRepo.sh b/images/kong/dockerPushToRepo.sh new file mode 100755 index 0000000000000000000000000000000000000000..9a8d6a4572e87cf2a7509329e634ec8d332652e9 --- /dev/null +++ b/images/kong/dockerPushToRepo.sh @@ -0,0 +1,17 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/kong/metadata.sh) + +org=$(e "${m}" "org") +hubuser=$(e "${m}" "hubuser") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +docker login -u "${hubuser}" -p`cat /run/secrets/hub-pass` +docker push ${org}/${name}:${version} +docker logout diff --git a/images/kong/installDeps.sh b/images/kong/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/images/kong/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/images/kong/metadata.sh b/images/kong/metadata.sh new file mode 100755 index 0000000000000000000000000000000000000000..103abaac7faddbb9609c83f0142e954dafc50713 --- /dev/null +++ b/images/kong/metadata.sh @@ -0,0 +1,3 @@ +#!/bin/sh +# return version +echo '{"name":"kong","version":"0.9.9","org":"sunbird","hubuser":"purplesunbird"}' diff --git a/images/logger/Jenkinsfile b/images/logger/Jenkinsfile new file mode 100644 index 0000000000000000000000000000000000000000..a4540c355db973714a6431d796b10b7f98b40840 --- /dev/null +++ b/images/logger/Jenkinsfile @@ -0,0 +1,31 @@ +#!groovy + +node('general-dev') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./images/logger/installDeps.sh') + + } + + stage('Deploy'){ + + sh 'ARTIFACT_LABEL=bronze ENV=dev ./images/logger/deploy.sh' + + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/images/logger/Jenkinsfile.sample b/images/logger/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..0eac89f7a0b041938ce45708a4a2b23a7e6ec9d4 --- /dev/null +++ b/images/logger/Jenkinsfile.sample @@ -0,0 +1,31 @@ +#!groovy + +node('general-dev') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./images/logger/installDeps.sh') + + } + + stage('Deploy'){ + + sh 'ARTIFACT_LABEL=bronze ENV=staging ./images/logger/deploy.sh' + + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/images/logger/deploy.sh b/images/logger/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..6e99a80173c6a4e6be104bd8fe60303c70f49a95 --- /dev/null +++ b/images/logger/deploy.sh @@ -0,0 +1,7 @@ +#!/bin/sh +# Build script +# set -o errexit + +ansible-playbook --version + +ansible-playbook -i ansible/inventory/${ENV} ansible/deploy.yml --tags "stack-logger" --vault-password-file /run/secrets/vault-pass diff --git a/images/logger/docker-compose.yml b/images/logger/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..ffbc632492bc88b16b03881ad64c54a061b2c51c --- /dev/null +++ b/images/logger/docker-compose.yml @@ -0,0 +1 @@ +#moved to ansible \ No newline at end of file diff --git a/images/logger/installDeps.sh b/images/logger/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/images/logger/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/images/postgres_exporter/Dockerfile b/images/postgres_exporter/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..955261bd1ba98c92393516c8e004d171fee7d597 --- /dev/null +++ b/images/postgres_exporter/Dockerfile @@ -0,0 +1,12 @@ +FROM alpine:3.6 + +ARG POSTGRES_EXPORTER_VERSION=v0.2.2 + +RUN apk --no-cache add openssl + +RUN wget -O /usr/local/bin/postgres_exporter https://github.com/wrouesnel/postgres_exporter/releases/download/$POSTGRES_EXPORTER_VERSION/postgres_exporter && \ + chmod +x /usr/local/bin/postgres_exporter + +EXPOSE 9187 + +ENTRYPOINT ["/usr/local/bin/postgres_exporter"] \ No newline at end of file diff --git a/images/proxy/Dockerfile b/images/proxy/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4afc4a8395a52215f9f3c35361ce4fca5cbacd77 --- /dev/null +++ b/images/proxy/Dockerfile @@ -0,0 +1,5 @@ +FROM nginx:alpine + +RUN rm -rf /etc/nginx/conf.d + +RUN rm -rf /usr/share/nginx/html diff --git a/images/proxy/build.sh b/images/proxy/build.sh new file mode 100755 index 0000000000000000000000000000000000000000..a5529a4b5086cc7826910b5eff0537c5d46ff8e5 --- /dev/null +++ b/images/proxy/build.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# Build script +# set -o errexit +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/proxy/metadata.sh) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +docker build -f ./images/proxy/Dockerfile -t ${org}/${name}:${version}-bronze . diff --git a/images/proxy/dockerPushToRepo.sh b/images/proxy/dockerPushToRepo.sh new file mode 100755 index 0000000000000000000000000000000000000000..1521138e90512bc0c29e9d677029b4557b97f222 --- /dev/null +++ b/images/proxy/dockerPushToRepo.sh @@ -0,0 +1,18 @@ +#!/bin/sh +# Build script +# set -o errexit +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(./images/proxy/metadata.sh) + +org=$(e "${m}" "org") +hubuser=$(e "${m}" "hubuser") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} + +docker login -u "${hubuser}" -p`cat /run/secrets/hub-pass` +docker push ${org}/${name}:${version}-${artifactLabel} +docker logout diff --git a/images/proxy/metadata.sh b/images/proxy/metadata.sh new file mode 100755 index 0000000000000000000000000000000000000000..bdaa91c48177c9694cf2d9cc8cc1f2ad44c8b860 --- /dev/null +++ b/images/proxy/metadata.sh @@ -0,0 +1,3 @@ +#!/bin/sh +# return version +echo '{"name":"proxy","version":"0.0.1","org":"sunbird","hubuser":"purplesunbird"}' diff --git a/images/reporter-jenkins-swarm-agent/Dockerfile b/images/reporter-jenkins-swarm-agent/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..3edfcfd8ebd8509a1746eb8d5bbe1dc7e3512f2d --- /dev/null +++ b/images/reporter-jenkins-swarm-agent/Dockerfile @@ -0,0 +1,10 @@ +FROM vfarcic/jenkins-swarm-agent +ENV CQLSH_VERSION=5.0.3 + +RUN apk --update add tar curl && \ + pip install cassandra-driver + +RUN mkdir -p /usr/var/cqlsh && \ + curl -SL https://pypi.python.org/packages/12/a7/13aff4ad358ff4abef6823d872154d0955ff6796739fcaaa2c80a6940aa6/cqlsh-${CQLSH_VERSION}.tar.gz \ + | tar xzvC /usr/var && \ + /usr/var/cqlsh-${CQLSH_VERSION}/cqlsh --version \ No newline at end of file diff --git a/pipelines/adminutils/Jenkinsfile.sample b/pipelines/adminutils/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..6684dc3d8ea279a91dd556f5f4ee6a74398aea5c --- /dev/null +++ b/pipelines/adminutils/Jenkinsfile.sample @@ -0,0 +1,27 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + stage('Checkout'){ + checkout scm + } + stage('Pre-Build'){ + sh('./pipelines/adminutils/installDeps.sh') + } + stage('Deploy'){ + step ([$class: 'CopyArtifact', + projectName: 'Tag_As_Silver/AM_AdminUtils_Tag_Silver', + filter: 'metadata.json']); + sh 'METADATA_FILE=metadata.json ARTIFACT_LABEL=silver ENV=staging ./pipelines/adminutils/deploy.sh' + archive includes: "metadata.json" + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/pipelines/adminutils/deploy.sh b/pipelines/adminutils/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..0dcb3cf5e4220e96ac00eb3ed06ff13e0df056d6 --- /dev/null +++ b/pipelines/adminutils/deploy.sh @@ -0,0 +1,28 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" +echo "ANSIBLE_PATH: $ANSIBLE_PATH" + +ansible-playbook --version +ansible-playbook -i ansible/inventory/${env} ansible/deploy.yml --tags "stack-adminutil" --extra-vars "hub_org=${org} image_name=${name} image_tag=${version}-${artifactLabel}" --vault-password-file /run/secrets/vault-pass +# ENV=${env} ORG=${org} NAME=${name} \ +# TAG=${version}-${artifactLabel} \ +# docker stack deploy -c ./pipelines/proxy/docker-compose.yml ${name}-${env} diff --git a/pipelines/adminutils/installDeps.sh b/pipelines/adminutils/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..4bd4763793309f59824979313bcb8b4fb004b1da --- /dev/null +++ b/pipelines/adminutils/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq bash +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/adminutils/metadata.json b/pipelines/adminutils/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..d66f241143c7ef0eb5e718da632a0e9e30a6fcfd --- /dev/null +++ b/pipelines/adminutils/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "adminutil", + "version": "0.0.1-SNAPSHOT", + "org": "sunbird", + "hubuser": "purplesunbird" +} diff --git a/pipelines/am-onboard-apis/Jenkinsfile.sample b/pipelines/am-onboard-apis/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..c148704f90a7995ff13779f8d462adcddf6fa044 --- /dev/null +++ b/pipelines/am-onboard-apis/Jenkinsfile.sample @@ -0,0 +1,15 @@ +pipeline { + agent { + label 'kong-staging' + } + stages { + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging api-manager.yml --tags kong-api --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/am-onboard-consumers/Jenkinsfile.sample b/pipelines/am-onboard-consumers/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..4a789dc9812571107002030dca29be0b6775407d --- /dev/null +++ b/pipelines/am-onboard-consumers/Jenkinsfile.sample @@ -0,0 +1,15 @@ +pipeline { + agent { + label 'kong-staging' + } + stages { + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging api-manager.yml --tags kong-consumer --vault-password-file /run/secrets/vault-pass -v + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/api-manager/Jenkinsfile.sample b/pipelines/api-manager/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..b7541c660dfc3823d73edcfdecf6b77b2f32b214 --- /dev/null +++ b/pipelines/api-manager/Jenkinsfile.sample @@ -0,0 +1,34 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./pipelines/api-manager/installDeps.sh') + + } + + stage('Deploy'){ + + step ([$class: 'CopyArtifact', + projectName: 'Tag_As_Silver/Echo_Server_Tag_Silver', + filter: 'metadata.json']); + sh 'METADATA_FILE=metadata.json ENV=staging ./pipelines/api-manager/deploy.sh' + archive includes: "metadata.json" + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/pipelines/api-manager/deploy.sh b/pipelines/api-manager/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..194c29df3aa274522365b2ef3d703456154153eb --- /dev/null +++ b/pipelines/api-manager/deploy.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" +ansible-playbook --version +ansible-playbook -i ansible/inventory/$ENV ansible/deploy.yml --tags "stack-api-manager" --extra-vars "hub_org=${org} echo_server_image_name=${name} echo_server_image_tag=${version}-${artifactLabel}" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/api-manager/installDeps.sh b/pipelines/api-manager/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..f99345239c97eb8bfa2e97d498c5200477ee72b2 --- /dev/null +++ b/pipelines/api-manager/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add ansible=2.3.0.0-r1 +apk -v add jq diff --git a/pipelines/api-manager/metadata.json b/pipelines/api-manager/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..0103cf310f1885fe025d2dd33488582129ec9404 --- /dev/null +++ b/pipelines/api-manager/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "echo-server", + "version": "0.0.1", + "org": "sunbird", + "hubuser": "purplesunbird" +} \ No newline at end of file diff --git a/pipelines/application-elasticsearch/Jenkinsfile.sample b/pipelines/application-elasticsearch/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..fdd5dfe9235b4880336b622478b81aca0cc1769e --- /dev/null +++ b/pipelines/application-elasticsearch/Jenkinsfile.sample @@ -0,0 +1,22 @@ +pipeline { + agent { + label 'general-staging' + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging provision.yml --tags es --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/cassandra-backup/Jenkinsfile.sample b/pipelines/cassandra-backup/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..fee231896422b1a7a5d870aec267c43e4d3d612d --- /dev/null +++ b/pipelines/cassandra-backup/Jenkinsfile.sample @@ -0,0 +1,32 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + + + stage('Pre-Build'){ + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + + stage('Build'){ + + sh('ansible-playbook -i ansible/inventory/staging ansible/cassandra-backup.yml --vault-password-file /run/secrets/vault-pass ') + } + + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} \ No newline at end of file diff --git a/pipelines/cassandra-restore/Jenkinsfile.sample b/pipelines/cassandra-restore/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..0203cae04c054167608ba635707d0c031bebda1f --- /dev/null +++ b/pipelines/cassandra-restore/Jenkinsfile.sample @@ -0,0 +1,34 @@ +#!groovy + +node('general-staging') { + + properties([parameters([string(defaultValue: '', description: 'time ', name: 'snapshot')]), pipelineTriggers([])]) + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + + stage('Build'){ + + sh('ansible-playbook -i ansible/inventory/staging ansible/cassandra-restore.yml --vault-password-file /run/secrets/vault-pass --extra-vars "cassandra_restore_gzip_file_name=cassandra_backup_${snapshot}.zip snapshot=${snapshot}"') + } + + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} + + diff --git a/pipelines/cassandra/Jenkinsfile.sample b/pipelines/cassandra/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..72543ff348df555f5579b89d242eb7cfac08bdf2 --- /dev/null +++ b/pipelines/cassandra/Jenkinsfile.sample @@ -0,0 +1,32 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + + + stage('Pre-Build'){ + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + + stage('Build'){ + + sh('ansible-playbook -i ansible/inventory/staging ansible/provision.yml --tags cassandra --vault-password-file /run/secrets/vault-pass ') + } + + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} \ No newline at end of file diff --git a/pipelines/cassandra/installDeps.sh b/pipelines/cassandra/installDeps.sh new file mode 100644 index 0000000000000000000000000000000000000000..49a927e6e5a655126d92a0baf5954f006b0893fc --- /dev/null +++ b/pipelines/cassandra/installDeps.sh @@ -0,0 +1 @@ +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/dockerTag.sh b/pipelines/dockerTag.sh new file mode 100755 index 0000000000000000000000000000000000000000..cb006feba568e93a3bf5c7a014217fa2992d5783 --- /dev/null +++ b/pipelines/dockerTag.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +hubuser=$(e "${m}" "hubuser") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-gold} +PREVIOUS_LABEL=silver + +docker pull ${org}/${name}:${version}-${PREVIOUS_LABEL} +docker image tag ${org}/${name}:${version}-${PREVIOUS_LABEL} ${org}/${name}:${version}-${artifactLabel} + +docker login -u "${hubuser}" -p`cat /run/secrets/hub-pass` +docker push ${org}/${name}:${version}-${artifactLabel} +docker logout diff --git a/pipelines/documentation-jenkins-slave/Jenkinsfile.deploy-slave b/pipelines/documentation-jenkins-slave/Jenkinsfile.deploy-slave new file mode 100644 index 0000000000000000000000000000000000000000..902988b8efb971dcffd397f72cbf48e1efb69883 --- /dev/null +++ b/pipelines/documentation-jenkins-slave/Jenkinsfile.deploy-slave @@ -0,0 +1,19 @@ +pipeline { + agent { + label 'general-dev' + } + stages { + stage('Deploy') { + steps { + step ([$class: 'CopyArtifact', + projectName: 'Build/Documentation_Jenkins_Slave', + filter: 'metadata.json']); + + sh 'METADATA_FILE=metadata.json ENV=dev ./pipelines/documentation-jenkins-slave/deploy.sh' + + archive includes: "metadata.json" + } + } + } +} + diff --git a/pipelines/documentation-jenkins-slave/deploy.sh b/pipelines/documentation-jenkins-slave/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..625c46c1d6bbd093ed89a18e4b38e8a1da55e7b9 --- /dev/null +++ b/pipelines/documentation-jenkins-slave/deploy.sh @@ -0,0 +1,11 @@ +#!/bin/sh +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} + +env=${ENV:-null} + +echo "env: ${env}" + +ansible-playbook -i ansible/inventory/$ENV ansible/jenkins-slave.yml --tags "documentation-jenkins-slave" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/es-backup/Jenkinsfile.sample b/pipelines/es-backup/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..b17f44dcc1a3bf305835ae6f49c5d41b887daa8f --- /dev/null +++ b/pipelines/es-backup/Jenkinsfile.sample @@ -0,0 +1,20 @@ +pipeline { + agent { + label 'general-staging' + } + triggers { + cron('H 0 * * *') + } + stages { + stage('Pre-Build'){ + steps { + sh('./pipelines/es-backup/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh 'ENV=staging ./pipelines/es-backup/deploy.sh' + } + } + } +} diff --git a/pipelines/es-backup/deploy.sh b/pipelines/es-backup/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..663c26c73f9b07610e36ff9b6acb5aa9835ad13b --- /dev/null +++ b/pipelines/es-backup/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +ansible-playbook --version +ANSIBLE_FORCE_COLOR=true ansible-playbook -i ansible/inventory/$ENV ansible/es.yml --tags "es_backup" -v --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/es-backup/installDeps.sh b/pipelines/es-backup/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/pipelines/es-backup/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/es-restore/Jenkinsfile.sample b/pipelines/es-restore/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..ca04227546ffce8b08f1c48998273eab07c44ce5 --- /dev/null +++ b/pipelines/es-restore/Jenkinsfile.sample @@ -0,0 +1,17 @@ +pipeline { + agent { + label 'general-staging' + } + stages { + stage('Pre-Build'){ + steps { + sh('./pipelines/es-restore/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh 'ENV=staging ./pipelines/es-restore/deploy.sh' + } + } + } +} diff --git a/pipelines/es-restore/deploy.sh b/pipelines/es-restore/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..a0eed000b0e0bb21c6c290170413799cfe2bd4a2 --- /dev/null +++ b/pipelines/es-restore/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +ansible-playbook --version +ANSIBLE_FORCE_COLOR=true ansible-playbook -i ansible/inventory/$ENV ansible/es.yml --tags "es_restore" --extra-vars "snapshot_number=$SNAPSHOT_NUMBER" -v --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/es-restore/installDeps.sh b/pipelines/es-restore/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/pipelines/es-restore/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/jenkins-backup-upload/Jenkinsfile.admin b/pipelines/jenkins-backup-upload/Jenkinsfile.admin new file mode 100644 index 0000000000000000000000000000000000000000..e9554f46f902a9ce3fb28ce2503669d4b3309f92 --- /dev/null +++ b/pipelines/jenkins-backup-upload/Jenkinsfile.admin @@ -0,0 +1,18 @@ +pipeline { + agent { + label 'master' + } + triggers { + cron('@midnight') + } + stages { + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/admin jenkins-backup.yml --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/keycloak1/deploy.sh b/pipelines/keycloak1/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..2677a35b36a984627ca134679e7a2f92316676d2 --- /dev/null +++ b/pipelines/keycloak1/deploy.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# Build script +# set -o errexit + +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" +echo "ANSIBLE_PATH: $ANSIBLE_PATH" + +ansible-playbook --version +ansible-playbook -i ansible/inventory/$ENV ansible/deploy.yml --tags "stack-keycloak1" --extra-vars "hub_org=${org} image_name=${name} image_tag=${version}-${artifactLabel} service_name=keycloak1 deploy_keycloak1=True" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/keycloak1/installDeps.sh b/pipelines/keycloak1/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..edee05c04554e798b0c1b504891b1c01b84ac05d --- /dev/null +++ b/pipelines/keycloak1/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/keycloak1/metadata.json b/pipelines/keycloak1/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..f947f2f66940b4530548f6b4e11df8c4b967341c --- /dev/null +++ b/pipelines/keycloak1/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "keycloak_image", + "version": "3.2.1.Final", + "org": "sunbird", + "hubuser": "purplesunbird" +} \ No newline at end of file diff --git a/pipelines/keycloak2/deploy.sh b/pipelines/keycloak2/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..b1df8b1f969dc3433948599c225b5cc02943dba9 --- /dev/null +++ b/pipelines/keycloak2/deploy.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# Build script +# set -o errexit + +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" +echo "ANSIBLE_PATH: $ANSIBLE_PATH" + +ansible-playbook --version +ansible-playbook -i ansible/inventory/$ENV ansible/deploy.yml --tags "stack-keycloak2" --extra-vars "hub_org=${org} image_name=${name} image_tag=${version}-${artifactLabel} service_name=keycloak2 deploy_keycloak2=True" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/keycloak2/installDeps.sh b/pipelines/keycloak2/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..edee05c04554e798b0c1b504891b1c01b84ac05d --- /dev/null +++ b/pipelines/keycloak2/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/keycloak2/metadata.json b/pipelines/keycloak2/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..f947f2f66940b4530548f6b4e11df8c4b967341c --- /dev/null +++ b/pipelines/keycloak2/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "keycloak_image", + "version": "3.2.1.Final", + "org": "sunbird", + "hubuser": "purplesunbird" +} \ No newline at end of file diff --git a/pipelines/log-es-backup/Jenkinsfile.sample b/pipelines/log-es-backup/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..b7763f2bd999b414f0b537097bf0af81a57bac2c --- /dev/null +++ b/pipelines/log-es-backup/Jenkinsfile.sample @@ -0,0 +1,20 @@ +pipeline { + agent { + label 'general-staging' + } + triggers { + cron('H 0 * * *') + } + stages { + stage('Pre-Build'){ + steps { + sh('./pipelines/log-es-backup/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh 'ENV=staging ./pipelines/log-es-backup/deploy.sh' + } + } + } +} diff --git a/pipelines/log-es-backup/deploy.sh b/pipelines/log-es-backup/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..868dccaca241cbe685e408ae327b2ae8987d0163 --- /dev/null +++ b/pipelines/log-es-backup/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# Build script +# set -o errexit +set -e +ansible-playbook --version +ANSIBLE_FORCE_COLOR=true ansible-playbook -i ansible/inventory/$ENV ansible/es.yml --tags "log_es_backup" -v --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/log-es-backup/installDeps.sh b/pipelines/log-es-backup/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/pipelines/log-es-backup/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/log-es-restore/deploy.sh b/pipelines/log-es-restore/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..2f4de577eb51b519ff471ec38e41127f3087a7bc --- /dev/null +++ b/pipelines/log-es-restore/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# Build script +# set -o errexit + +ansible-playbook --version +ANSIBLE_FORCE_COLOR=true ansible-playbook -i ansible/inventory/$ENV ansible/es.yml --tags "log_es_restore" --extra-vars "snapshot_number=$SNAPSHOT_NUMBER" -v --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/log-es-restore/installDeps.sh b/pipelines/log-es-restore/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/pipelines/log-es-restore/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/log-forwarder/Jenkinsfile.sample b/pipelines/log-forwarder/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..7b9143790de5ef8ea66f34e7701529507a4da189 --- /dev/null +++ b/pipelines/log-forwarder/Jenkinsfile.sample @@ -0,0 +1,20 @@ +pipeline { + agent { + label 'general-staging' + } + triggers { + cron('H 0 * * *') + } + stages { + stage('Pre-Build'){ + steps { + sh('./pipelines/log-forwarder/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh 'ENV=staging ./pipelines/log-forwarder/deploy.sh' + } + } + } +} diff --git a/pipelines/log-forwarder/deploy.sh b/pipelines/log-forwarder/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..c4959e24497dc505ba4b8f3d6499021eb3c6c58a --- /dev/null +++ b/pipelines/log-forwarder/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# Build script +# set -o errexit + +ansible-playbook --version +ANSIBLE_FORCE_COLOR=true ansible-playbook -i ansible/inventory/$ENV ansible/ops.yml --limit '!localhost' --tags "log-forwarder" -v --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/log-forwarder/installDeps.sh b/pipelines/log-forwarder/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..ccc9677a01ce3a259ac4fbdd036aa03371300b89 --- /dev/null +++ b/pipelines/log-forwarder/installDeps.sh @@ -0,0 +1,4 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/logger-elasticsearch/Jenkinsfile.sample b/pipelines/logger-elasticsearch/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..474ae6faae1403f22310ec68173b3edf9049f5f9 --- /dev/null +++ b/pipelines/logger-elasticsearch/Jenkinsfile.sample @@ -0,0 +1,22 @@ +pipeline { + agent { + label 'general-staging' + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging provision.yml --tags log-es --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/logger/deploy.sh b/pipelines/logger/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..6e99a80173c6a4e6be104bd8fe60303c70f49a95 --- /dev/null +++ b/pipelines/logger/deploy.sh @@ -0,0 +1,7 @@ +#!/bin/sh +# Build script +# set -o errexit + +ansible-playbook --version + +ansible-playbook -i ansible/inventory/${ENV} ansible/deploy.yml --tags "stack-logger" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/logger/installDeps.sh b/pipelines/logger/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..acc98d5f3a7c36eae50fec5ee92cf7d0a002cb10 --- /dev/null +++ b/pipelines/logger/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/metrics-exporter/Jenkinsfile.sample b/pipelines/metrics-exporter/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..5f71cc86e1a8448840ef794c9275205ef5e59d0e --- /dev/null +++ b/pipelines/metrics-exporter/Jenkinsfile.sample @@ -0,0 +1,20 @@ +pipeline { + agent { + label 'general-staging' + } + triggers { + cron('H 0 * * *') + } + stages { + stage('Pre-Build'){ + steps { + sh('./pipelines/metrics-exporter/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh 'ENV=staging ./pipelines/metrics-exporter/deploy.sh' + } + } + } +} diff --git a/pipelines/metrics-exporter/deploy.sh b/pipelines/metrics-exporter/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..de48d365ca3c0dc8de1ce463e3d1e5708a6b7961 --- /dev/null +++ b/pipelines/metrics-exporter/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# Build script +# set -o errexit + +ansible-playbook --version +ANSIBLE_FORCE_COLOR=true ansible-playbook -i ansible/inventory/$ENV ansible/ops.yml --limit '!localhost' --tags "metrics-exporter" -v --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/metrics-exporter/installDeps.sh b/pipelines/metrics-exporter/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..ccc9677a01ce3a259ac4fbdd036aa03371300b89 --- /dev/null +++ b/pipelines/metrics-exporter/installDeps.sh @@ -0,0 +1,4 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/mongodb-backup/Jenkinsfile.sample b/pipelines/mongodb-backup/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..3098103c8f57dbd3b9f2e81671808b5924f2b633 --- /dev/null +++ b/pipelines/mongodb-backup/Jenkinsfile.sample @@ -0,0 +1,32 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + + + stage('Pre-Build'){ + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + + stage('Build'){ + + sh('ansible-playbook -i ansible/inventory/staging ansible/mongobackup.yml --vault-password-file /run/secrets/vault-pass ') + } + + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} \ No newline at end of file diff --git a/pipelines/mongodb-backup/installDeps.sh b/pipelines/mongodb-backup/installDeps.sh new file mode 100644 index 0000000000000000000000000000000000000000..49a927e6e5a655126d92a0baf5954f006b0893fc --- /dev/null +++ b/pipelines/mongodb-backup/installDeps.sh @@ -0,0 +1 @@ +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/mongodb/installDeps.sh b/pipelines/mongodb/installDeps.sh new file mode 100644 index 0000000000000000000000000000000000000000..49a927e6e5a655126d92a0baf5954f006b0893fc --- /dev/null +++ b/pipelines/mongodb/installDeps.sh @@ -0,0 +1 @@ +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/monitor/Jenkinsfile.sample b/pipelines/monitor/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..93c80c93abd9aa01161e5970770de1f88615abe5 --- /dev/null +++ b/pipelines/monitor/Jenkinsfile.sample @@ -0,0 +1,31 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./pipelines/monitor/installDeps.sh') + + } + + stage('Deploy'){ + + sh 'TARGET_ENV=staging ./pipelines/monitor/deploy.sh' + + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/pipelines/monitor/deploy.sh b/pipelines/monitor/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..8068046b0681334f680ff9a09de5b29b55bdbb73 --- /dev/null +++ b/pipelines/monitor/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# Build script +# set -o errexit + +ansible-playbook --version +ansible-playbook -i ansible/inventory/${TARGET_ENV} ansible/deploy.yml --tags "stack-monitor" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/monitor/installDeps.sh b/pipelines/monitor/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..ccc9677a01ce3a259ac4fbdd036aa03371300b89 --- /dev/null +++ b/pipelines/monitor/installDeps.sh @@ -0,0 +1,4 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/postgresql-backup/Jenkinsfile.sample b/pipelines/postgresql-backup/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..497b0ea9e96f45b2a23158d6da2806fe08bebaf9 --- /dev/null +++ b/pipelines/postgresql-backup/Jenkinsfile.sample @@ -0,0 +1,24 @@ +pipeline { + agent { + label 'general-staging' + } + triggers { + cron('@midnight') + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging postgresql-backup.yml --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/postgresql-data-update/Jenkinsfile.sample b/pipelines/postgresql-data-update/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..7cff5cd9b9deb08bc19d454d40c516c7bbea4105 --- /dev/null +++ b/pipelines/postgresql-data-update/Jenkinsfile.sample @@ -0,0 +1,21 @@ +pipeline { + agent { + label 'general-staging' + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging postgresql-data-update.yml --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/postgresql-master/Jenkinsfile.sample b/pipelines/postgresql-master/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..05d072e1f2d1a0d8cd84394317f47a9bff96ba07 --- /dev/null +++ b/pipelines/postgresql-master/Jenkinsfile.sample @@ -0,0 +1,21 @@ +pipeline { + agent { + label 'general-staging' + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging provision.yml --tags postgresql-master --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/postgresql-restore/Jenkinsfile.sample b/pipelines/postgresql-restore/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..baea33c38904ebab2e49f00c345b299e3c2ca9c3 --- /dev/null +++ b/pipelines/postgresql-restore/Jenkinsfile.sample @@ -0,0 +1,37 @@ +pipeline { + agent { + label 'general-staging' + } + parameters { + string(name: 'postgresql_restore_gzip_file_name', defaultValue: '', description: 'Specify the backup file name in azure which should be restored') + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging postgresql-restore.yml --extra-vars "postgresql_restore_gzip_file_name=${postgresql_restore_gzip_file_name}" --vault-password-file /run/secrets/vault-pass + ''' + } + } + // The postgresql config is set to defaults after restore task which recreates pg cluster + // We need to build master job to ensure configuration is as expected + // We need to build slave job to ensure it has latest restore data + stage('Configure Postgresql Master') { + steps { + build 'Postgresql_Master' + } + } + stage('Configure Postgresql Slave') { + steps { + build 'Postgresql_Slave' + } + } + } +} \ No newline at end of file diff --git a/pipelines/postgresql-slave-to-master-promotion/Jenkinsfile.sample b/pipelines/postgresql-slave-to-master-promotion/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..c267f47f47e98f661cbdbe5c294e60e1f766a0ad --- /dev/null +++ b/pipelines/postgresql-slave-to-master-promotion/Jenkinsfile.sample @@ -0,0 +1,22 @@ +pipeline { + agent { + label 'general-staging' + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging postgresql-slave-to-master-promotion.yml --tags ensure-postgresql-master-stopped --vault-password-file /run/secrets/vault-pass || echo "Ignoring error while stopping master" + ansible-playbook -i inventory/staging postgresql-slave-to-master-promotion.yml --tags postgresql-slave-to-master-promotion --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/postgresql-slave/Jenkinsfile.sample b/pipelines/postgresql-slave/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..2aae125290bcce6ed02a7f87a2a554de4234c4b5 --- /dev/null +++ b/pipelines/postgresql-slave/Jenkinsfile.sample @@ -0,0 +1,21 @@ +pipeline { + agent { + label 'general-staging' + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -i inventory/staging provision.yml --tags postgresql-slave --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file diff --git a/pipelines/proxy/Jenkinsfile.sample b/pipelines/proxy/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..cf39631134f1e9b79eb35c521ab42fbb9c1076bb --- /dev/null +++ b/pipelines/proxy/Jenkinsfile.sample @@ -0,0 +1,25 @@ +pipeline { + agent { + label 'general-staging' + } + stages { + stage('Pre-Build'){ + steps { + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + } + + stage('Deploy') { + steps { + step ([$class: 'CopyArtifact', + projectName: 'Dev/Proxy', + filter: 'metadata.json']); + + sh 'METADATA_FILE=metadata.json ARTIFACT_LABEL=bronze ENV=staging ./pipelines/proxy/deploy.sh' + + archive includes: "metadata.json" + } + } + } +} \ No newline at end of file diff --git a/pipelines/proxy/deploy.sh b/pipelines/proxy/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..be0a7ed992d984153498afc07693db8b25e064cc --- /dev/null +++ b/pipelines/proxy/deploy.sh @@ -0,0 +1,23 @@ +#!/bin/sh +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" + + +ansible-playbook --version +ansible-playbook -i ansible/inventory/$ENV ansible/deploy.yml --tags "stack-proxy" --extra-vars "hub_org=${org} image_name=${name} image_tag=${version}-${artifactLabel}" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/proxy/metadata.json b/pipelines/proxy/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..28d3fc61c0b14571a2f40977131df4ef6e01ef50 --- /dev/null +++ b/pipelines/proxy/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "proxy", + "version": "0.0.1", + "org": "sunbird", + "hubuser": "purplesunbird" +} \ No newline at end of file diff --git a/pipelines/sunbird-actor-service/Jenkinsfile.sample b/pipelines/sunbird-actor-service/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..34da7fa335adf4c759cdec33884ca2742c65c952 --- /dev/null +++ b/pipelines/sunbird-actor-service/Jenkinsfile.sample @@ -0,0 +1,34 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./pipelines/sunbird-actor-service/installDeps.sh') + + } + + stage('Deploy'){ + step ([$class: 'CopyArtifact', + projectName: 'Dev/Sunbird_ActorService_Deploy_Dev', + filter: 'metadata.json']); + sh 'METADATA_FILE=metadata.json ARTIFACT_LABEL=silver ENV=staging ./pipelines/sunbird-actor-service/deploy.sh' + archive includes: "metadata.json" + } + + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/pipelines/sunbird-actor-service/deploy.sh b/pipelines/sunbird-actor-service/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..71fb4c1c4b5dd4b583b48d84e88b3470238796db --- /dev/null +++ b/pipelines/sunbird-actor-service/deploy.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# Build script +# set -o errexit + +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" +echo "ANSIBLE_PATH: $ANSIBLE_PATH" + +ansible-playbook --version +ansible-playbook -i ansible/inventory/$ENV ansible/deploy.yml --tags "stack-sunbird" --extra-vars "hub_org=${org} image_name=${name} image_tag=${version}-${artifactLabel} service_name=actor-service deploy_actor=True" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/sunbird-actor-service/installDeps.sh b/pipelines/sunbird-actor-service/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..edee05c04554e798b0c1b504891b1c01b84ac05d --- /dev/null +++ b/pipelines/sunbird-actor-service/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/sunbird-actor-service/metadata.json b/pipelines/sunbird-actor-service/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..950e154eadadfd1c5d86ca9a8e17c00804f71ce7 --- /dev/null +++ b/pipelines/sunbird-actor-service/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "actor-service", + "version": "0.0.1", + "org": "sunbird", + "hubuser": "purplesunbird" +} \ No newline at end of file diff --git a/pipelines/sunbird-bootstrap/Jenkinsfile.db.sample b/pipelines/sunbird-bootstrap/Jenkinsfile.db.sample new file mode 100644 index 0000000000000000000000000000000000000000..a1fe617a3b4af3ddf60fcc2d19fdefecdaf850de --- /dev/null +++ b/pipelines/sunbird-bootstrap/Jenkinsfile.db.sample @@ -0,0 +1,25 @@ +#!groovy + +node('general-staging') { + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + checkout scm + } + + stage('Pre-Build'){ + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + + stage('Build'){ + sh('ansible-playbook -i ansible/inventory/db-bootstrap ansible/bootstrap.yml --extra-vars "hosts=staging bootstrap_secret_file=bootstrap" --vault-password-file /run/secrets/vault-pass ') + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } +} diff --git a/pipelines/sunbird-bootstrap/Jenkinsfile.sample b/pipelines/sunbird-bootstrap/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..b200f1fa9222ef1d4b09b28b40e7d0ca9a8b0725 --- /dev/null +++ b/pipelines/sunbird-bootstrap/Jenkinsfile.sample @@ -0,0 +1,32 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + + + stage('Pre-Build'){ + sh('chmod a+x ansible/installDeps.sh') + sh('./ansible/installDeps.sh') + } + + stage('Build'){ + + sh('ansible-playbook -i ansible/inventory/admin ansible/bootstrap.yml --extra-vars "hosts=staging" --vault-password-file /run/secrets/vault-pass ') + } + + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/pipelines/sunbird-bootstrap/installDeps.sh b/pipelines/sunbird-bootstrap/installDeps.sh new file mode 100644 index 0000000000000000000000000000000000000000..49a927e6e5a655126d92a0baf5954f006b0893fc --- /dev/null +++ b/pipelines/sunbird-bootstrap/installDeps.sh @@ -0,0 +1 @@ +apk -v add ansible=2.3.0.0-r1 diff --git a/pipelines/sunbird-content-service/Jenkinsfile.sample b/pipelines/sunbird-content-service/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..f4cece38a3e520cafff51d24c4e5b7bcee5929d1 --- /dev/null +++ b/pipelines/sunbird-content-service/Jenkinsfile.sample @@ -0,0 +1,34 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./pipelines/sunbird-content-service/installDeps.sh') + + } + + stage('Deploy'){ + step ([$class: 'CopyArtifact', + projectName: 'Dev/Sunbird_ContentService_Deploy_Dev', + filter: 'metadata.json']); + sh 'METADATA_FILE=metadata.json ARTIFACT_LABEL=silver ENV=staging ./pipelines/sunbird-content-service/deploy.sh' + archive includes: "metadata.json" + } + + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/pipelines/sunbird-content-service/deploy.sh b/pipelines/sunbird-content-service/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..d76111e223cff5d17f9ac803c5e618b120c58d9b --- /dev/null +++ b/pipelines/sunbird-content-service/deploy.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# Build script +# set -o errexit + +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" +echo "ANSIBLE_PATH: $ANSIBLE_PATH" + +ansible-playbook --version +ansible-playbook -i ansible/inventory/$ENV ansible/deploy.yml --tags "stack-sunbird" --extra-vars "hub_org=${org} image_name=${name} image_tag=${version}-${artifactLabel} service_name=content_service deploy_stack=True" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/sunbird-content-service/installDeps.sh b/pipelines/sunbird-content-service/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..edee05c04554e798b0c1b504891b1c01b84ac05d --- /dev/null +++ b/pipelines/sunbird-content-service/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/sunbird-content-service/metadata.json b/pipelines/sunbird-content-service/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..c5b9f7f9f6c17dc86b5cfadcdf1bd811c23b8fbe --- /dev/null +++ b/pipelines/sunbird-content-service/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "content_service", + "version": "0.0.1", + "org": "sunbird", + "hubuser": "purplesunbird" +} \ No newline at end of file diff --git a/pipelines/sunbird-learner-service/Jenkinsfile.sample b/pipelines/sunbird-learner-service/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..aa21c6bccceb2114c4f5e8c079be6e93b127cb05 --- /dev/null +++ b/pipelines/sunbird-learner-service/Jenkinsfile.sample @@ -0,0 +1,33 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./pipelines/sunbird-learner-service/installDeps.sh') + + } + + stage('Deploy'){ + step ([$class: 'CopyArtifact', + projectName: 'Dev/Sunbird_LearnerService_Deploy_Dev', + filter: 'metadata.json']); + sh 'METADATA_FILE=metadata.json ARTIFACT_LABEL=silver ENV=staging ./pipelines/sunbird-learner-service/deploy.sh' + archive includes: "metadata.json" + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/pipelines/sunbird-learner-service/deploy.sh b/pipelines/sunbird-learner-service/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..6b1a2e3953ed6adcf5b44813f7fc6cac472f9e1b --- /dev/null +++ b/pipelines/sunbird-learner-service/deploy.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# Build script +# set -o errexit + +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" +echo "ANSIBLE_PATH: $ANSIBLE_PATH" + +ansible-playbook --version +ansible-playbook -i ansible/inventory/$ENV ansible/deploy.yml --tags "stack-sunbird" --extra-vars "hub_org=${org} image_name=${name} image_tag=${version}-${artifactLabel} service_name=learner-service deploy_learner=True" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/sunbird-learner-service/installDeps.sh b/pipelines/sunbird-learner-service/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..edee05c04554e798b0c1b504891b1c01b84ac05d --- /dev/null +++ b/pipelines/sunbird-learner-service/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/sunbird-learner-service/metadata.json b/pipelines/sunbird-learner-service/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..4c1ea900718a2bbe3cfe17f8d31a748daf0e9213 --- /dev/null +++ b/pipelines/sunbird-learner-service/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "learner_service", + "version": "0.0.1", + "org": "sunbird", + "hubuser": "purplesunbird" +} \ No newline at end of file diff --git a/pipelines/sunbird-player/Jenkinsfile.sample b/pipelines/sunbird-player/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..0ad38e805977fcb2aabae89e903a424bf4174a94 --- /dev/null +++ b/pipelines/sunbird-player/Jenkinsfile.sample @@ -0,0 +1,34 @@ +#!groovy + +node('general-staging') { + + currentBuild.result = "SUCCESS" + + try { + + stage('Checkout'){ + + checkout scm + } + + stage('Pre-Build'){ + + sh('./pipelines/sunbird-player/installDeps.sh') + + } + + stage('Deploy'){ + step ([$class: 'CopyArtifact', + projectName: 'Dev/Sunbird_Player_Deploy_Dev', + filter: 'metadata.json']); + sh 'METADATA_FILE=metadata.json ARTIFACT_LABEL=silver ENV=staging ./pipelines/sunbird-player/deploy.sh' + archive includes: "metadata.json" + } + + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + +} diff --git a/pipelines/sunbird-player/deploy.sh b/pipelines/sunbird-player/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..ee9df8f02ec1e3c984b0be170cc44d9a9509fe3e --- /dev/null +++ b/pipelines/sunbird-player/deploy.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# Build script +# set -o errexit + +#!/bin/sh +# Build script +# set -o errexit +set -e +e () { + echo $( echo ${1} | jq ".${2}" | sed 's/\"//g') +} +m=$(cat $METADATA_FILE) + +org=$(e "${m}" "org") +name=$(e "${m}" "name") +version=$(e "${m}" "version") + +artifactLabel=${ARTIFACT_LABEL:-bronze} +env=${ENV:-null} + +echo "artifactLabel: ${artifactLabel}" +echo "env: ${env}" +echo "org: ${org}" +echo "name: ${name}" +echo "version: ${version}" +echo "ANSIBLE_PATH: $ANSIBLE_PATH" + +ansible-playbook --version +ansible-playbook -i ansible/inventory/$ENV ansible/deploy.yml --tags "stack-sunbird" --extra-vars "hub_org=${org} image_name=${name} image_tag=${version}-${artifactLabel} service_name=player deploy_stack=True" --vault-password-file /run/secrets/vault-pass diff --git a/pipelines/sunbird-player/installDeps.sh b/pipelines/sunbird-player/installDeps.sh new file mode 100755 index 0000000000000000000000000000000000000000..7e4676ee49bf3d85eddd98a5284402a2f12d7a36 --- /dev/null +++ b/pipelines/sunbird-player/installDeps.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# Build script +# set -o errexit +apk -v --update --no-cache add jq +apk -v --update --no-cache add ansible=2.3.0.0-r1 diff --git a/pipelines/sunbird-player/metadata.json b/pipelines/sunbird-player/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..657eb7dc36cd835d3d2ff550d1643515d21daeb7 --- /dev/null +++ b/pipelines/sunbird-player/metadata.json @@ -0,0 +1,6 @@ +{ + "name": "player", + "version": "0.0.6", + "org": "sunbird", + "hubuser": "purplesunbird" +} \ No newline at end of file diff --git a/pipelines/swarm-agent-docker-prune/Jenkinsfile.sample b/pipelines/swarm-agent-docker-prune/Jenkinsfile.sample new file mode 100644 index 0000000000000000000000000000000000000000..67ae6ebf103fea576f829526cc434d711ee56ed5 --- /dev/null +++ b/pipelines/swarm-agent-docker-prune/Jenkinsfile.sample @@ -0,0 +1,18 @@ +pipeline { + agent { + label 'general-staging' + } + triggers { + cron('@midnight') + } + stages { + stage('Deploy') { + steps { + sh ''' + cd ansible + ansible-playbook -v -i inventory/staging swarm-agent-docker-prune.yml --vault-password-file /run/secrets/vault-pass + ''' + } + } + } +} \ No newline at end of file