From f709cc31a61f579b9c43219a7cef868a3322bfc7 Mon Sep 17 00:00:00 2001 From: Riku Rouvila Date: Fri, 24 Nov 2023 15:44:17 +0200 Subject: [PATCH 1/6] split playbooks to different task modules, use only one playbook for all deployment sizes --- infrastructure/server-setup/example-1.ini | 9 +- infrastructure/server-setup/example-3.ini | 17 +- infrastructure/server-setup/example-5.ini | 21 +- infrastructure/server-setup/playbook-1.yml | 538 ----------------- infrastructure/server-setup/playbook-3.yml | 555 ------------------ infrastructure/server-setup/playbook-5.yml | 542 ----------------- infrastructure/server-setup/playbook.yml | 169 ++++++ infrastructure/server-setup/tasks/checks.yml | 13 + infrastructure/server-setup/tasks/crontab.yml | 7 + .../server-setup/tasks/data-partition.yml | 212 +++++++ .../server-setup/tasks/decrypt-on-boot.yml | 29 + .../server-setup/tasks/elasticsearch.yml | 5 + .../server-setup/tasks/fail2ban.yml | 14 + infrastructure/server-setup/tasks/mongodb.yml | 9 + infrastructure/server-setup/tasks/swap.yml | 44 ++ infrastructure/server-setup/tasks/swarm.yml | 17 + infrastructure/server-setup/tasks/traefik.yml | 5 + infrastructure/server-setup/tasks/ufw.yml | 40 ++ 18 files changed, 575 insertions(+), 1671 deletions(-) delete mode 100644 infrastructure/server-setup/playbook-1.yml delete mode 100644 infrastructure/server-setup/playbook-3.yml delete mode 100644 infrastructure/server-setup/playbook-5.yml create mode 100644 infrastructure/server-setup/playbook.yml create mode 100644 infrastructure/server-setup/tasks/checks.yml create mode 100644 infrastructure/server-setup/tasks/crontab.yml create mode 100644 infrastructure/server-setup/tasks/data-partition.yml create mode 100644 infrastructure/server-setup/tasks/decrypt-on-boot.yml create mode 100644 infrastructure/server-setup/tasks/elasticsearch.yml create mode 100644 infrastructure/server-setup/tasks/fail2ban.yml create mode 100644 infrastructure/server-setup/tasks/mongodb.yml create mode 100644 infrastructure/server-setup/tasks/swap.yml create mode 100644 infrastructure/server-setup/tasks/swarm.yml create mode 100644 infrastructure/server-setup/tasks/traefik.yml create mode 100644 infrastructure/server-setup/tasks/ufw.yml diff --git a/infrastructure/server-setup/example-1.ini b/infrastructure/server-setup/example-1.ini index 3f2777aa1..97970d27e 100644 --- a/infrastructure/server-setup/example-1.ini +++ b/infrastructure/server-setup/example-1.ini @@ -8,11 +8,6 @@ ; Copyright (C) The OpenCRVS Authors located at https://github.com/opencrvs/opencrvs-core/blob/master/AUTHORS. [docker-manager-first] ; Uncomment the line below -; manager1 ansible_host="ENTER YOUR MANAGER HOST IP" +ENTER_HOSTNAME_1 ansible_host="ENTER YOUR MANAGER HOST IP" data_label=data1 -; Below you can assign 1 node to be the data node, use the node's HOSTNAME in these variables. -; These node will be used by databases to permanently store data. -; Used for Mongo replica sets -[all:vars] -; Uncomment the line below -; data1_hostname=ENTER_HOSTNAME_1 +[docker-workers] diff --git a/infrastructure/server-setup/example-3.ini b/infrastructure/server-setup/example-3.ini index abee8efe2..be642c038 100644 --- a/infrastructure/server-setup/example-3.ini +++ b/infrastructure/server-setup/example-3.ini @@ -8,19 +8,10 @@ ; Copyright (C) The OpenCRVS Authors located at https://github.com/opencrvs/opencrvs-core/blob/master/AUTHORS. [docker-manager-first] ; Uncomment the line below -; manager1 ansible_host="ENTER YOUR MANAGER HOST IP" +ENTER_HOSTNAME_1 ansible_host="ENTER YOUR MANAGER HOST IP" data_label=data1 [docker-workers] -; We recommend you add 2 workers for a usual production deployment +; We recommend you add 4 workers for a scaled production deployment ; Uncomment the lines below -; worker1 ansible_host="ENTER YOUR WORKER 1 HOST IP" -; worker2 ansible_host="ENTER YOUR WORKER 2 HOST IP" - -; Below you can assign 3 node to be data nodes, use the node's HOSTNAME in these variables. -; These node will be used by databases to permanently store data. -; Used for Mongo replica sets -[all:vars] -; Uncomment the lines below -; data1_hostname=ENTER_HOSTNAME_1 -; data2_hostname=ENTER_HOSTNAME_2 -; data3_hostname=ENTER_HOSTNAME_3 +ENTER_HOSTNAME_2 ansible_host="ENTER YOUR WORKER HOST IP" data_label=data2 +ENTER_HOSTNAME_3 ansible_host="ENTER YOUR WORKER HOST IP" data_label=data3 diff --git a/infrastructure/server-setup/example-5.ini b/infrastructure/server-setup/example-5.ini index f1fed1688..27199d630 100644 --- a/infrastructure/server-setup/example-5.ini +++ b/infrastructure/server-setup/example-5.ini @@ -8,23 +8,12 @@ ; Copyright (C) The OpenCRVS Authors located at https://github.com/opencrvs/opencrvs-core/blob/master/AUTHORS. [docker-manager-first] ; Uncomment the line below -; manager1 ansible_host="ENTER YOUR MANAGER HOST IP" +ENTER_HOSTNAME_1 ansible_host="ENTER YOUR MANAGER HOST IP" data_label=data1 [docker-workers] ; We recommend you add 4 workers for a scaled production deployment ; Uncomment the lines below -; worker1 ansible_host="ENTER YOUR WORKER 1 HOST IP" -; worker2 ansible_host="ENTER YOUR WORKER 2 HOST IP" -; worker3 ansible_host="ENTER YOUR WORKER 3 HOST IP" -; worker4 ansible_host="ENTER YOUR WORKER 4 HOST IP" - -; Below you can assign 5 node to be data nodes, use the node's HOSTNAME in these variables. -; These node will be used by databases to permanently store data. -; Used for Mongo replica sets -[all:vars] -; Uncomment the lines below -; data1_hostname=ENTER_HOSTNAME_1 -; data2_hostname=ENTER_HOSTNAME_2 -; data3_hostname=ENTER_HOSTNAME_3 -; data4_hostname=ENTER_HOSTNAME_4 -; data5_hostname=ENTER_HOSTNAME_5 +ENTER_HOSTNAME_2 ansible_host="ENTER YOUR WORKER 1 HOST IP" data_label=data2 +ENTER_HOSTNAME_3 ansible_host="ENTER YOUR WORKER 2 HOST IP" data_label=data3 +ENTER_HOSTNAME_4 ansible_host="ENTER YOUR WORKER 3 HOST IP" data_label=data4 +ENTER_HOSTNAME_5 ansible_host="ENTER YOUR WORKER 4 HOST IP" data_label=data5 diff --git a/infrastructure/server-setup/playbook-1.yml b/infrastructure/server-setup/playbook-1.yml deleted file mode 100644 index 144f7355f..000000000 --- a/infrastructure/server-setup/playbook-1.yml +++ /dev/null @@ -1,538 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -# -# OpenCRVS is also distributed under the terms of the Civil Registration -# & Healthcare Disclaimer located at http://opencrvs.org/license. -# -# Copyright (C) The OpenCRVS Authors located at https://github.com/opencrvs/opencrvs-core/blob/master/AUTHORS. ---- -- hosts: localhost - tasks: - - name: Create MongoDB replicate key file locally - local_action: shell openssl rand -base64 755 > /tmp/mongodb-keyfile -- hosts: all - become: yes - become_method: sudo - tasks: - - name: 'Check mandatory variables are defined' - assert: - that: - - mongodb_admin_username is defined - - mongodb_admin_password is defined - - elasticsearch_superuser_password is defined - - disk_encryption_key is defined - - encrypted_disk_size is defined - - - name: Update apt-get - apt: - update_cache: yes - cache_valid_time: 3600 - - - include_tasks: - file: tasks/application.yml - apply: - tags: - - application - tags: - - application - - - name: Setting global variables accessed by country configuration playbook - ansible.builtin.set_fact: - mongodb_admin_password={{ mongodb_admin_password }} - mongodb_admin_username={{ mongodb_admin_username }} - elasticsearch_superuser_password={{elasticsearch_superuser_password}} - disk_encryption_key={{ disk_encryption_key }} - - - include_tasks: - file: tasks/tools.yml - apply: - tags: - - tools - tags: - - tools - - - include_tasks: - file: tasks/docker.yml - apply: - tags: - - docker - tags: - - docker - - - include_tasks: - file: tasks/deployment-user.yml - apply: - tags: - - deployment-user - - users - tags: - - deployment-user - - users - - - name: 'Setup crontab to backup the opencrvs data' - cron: - name: 'backup opencrvs' - minute: '0' - hour: '0' - job: 'cd / && bash /opt/opencrvs/infrastructure/emergency-backup-metadata.sh --ssh_user={{ external_backup_server_user }} --ssh_host={{ external_backup_server_ip }} --ssh_port={{ external_backup_server_ssh_port }} --production_ip={{ manager_production_server_ip }} --remote_dir={{ external_backup_server_remote_directory }} --replicas=1 >> /var/log/opencrvs-backup.log 2>&1' - when: external_backup_server_ip is defined - - - name: Copy MongoDB replication security key file to nodes - copy: src=/tmp/mongodb-keyfile dest=/mongodb-keyfile mode=0400 force=no - - - name: Change access right of key file - file: - path: /mongodb-keyfile - state: file - owner: 1000 - group: 1000 - - - name: 'Precheck if encrypted file system exists so we dont try to bootstrap' - stat: - path: /cryptfs_file_sparse.img - get_checksum: False - get_md5: False - register: encryptedFileSystemPreCheck - - - name: 'Bootstrap encrypted data folder' - script: ../cryptfs/bootstrap.sh -s {{encrypted_disk_size}} -p {{disk_encryption_key}} - when: (not encryptedFileSystemPreCheck.stat.exists) - - - name: Wait for encrypted file system - ansible.builtin.wait_for: - path: /cryptfs_file_sparse.img - state: present - - - name: 'Register encrypted file system' - stat: - path: /cryptfs_file_sparse.img - get_checksum: False - get_md5: False - register: encryptedFileSystemPostCheck - - - name: 'Mount encrypted data folder' - script: ../cryptfs/mount.sh -p {{disk_encryption_key}} - when: encryptedFileSystemPostCheck.stat.exists - - # https://stackoverflow.com/a/24765946 - - name: Create swap file - command: - dd if=/dev/zero of={{ swap_file_path }} bs=1024 count={{ swap_file_size_mb }}k - creates="{{ swap_file_path }}" - tags: - - swap.file.create - - - name: Change swap file permissions - file: path="{{ swap_file_path }}" - owner=root - group=root - mode=0600 - tags: - - swap.file.permissions - - - name: 'Check swap file type' - command: file {{ swap_file_path }} - register: swapfile - tags: - - swap.file.mkswap - - - name: Make swap file - command: 'sudo mkswap {{ swap_file_path }}' - when: swapfile.stdout.find('swap file') == -1 - tags: - - swap.file.mkswap - - - name: Write swap entry in fstab - mount: name=none - src={{ swap_file_path }} - fstype=swap - opts=sw - passno=0 - dump=0 - state=present - tags: - - swap.fstab - - - name: Mount swap - command: 'swapon {{ swap_file_path }}' - when: ansible_swaptotal_mb < 1 - tags: - - swap.file.swapon - - - name: Check mongo data directory - stat: - path: /data/mongo - register: mongo_data - - - name: 'Create mongo data directory' - file: - path: /data/mongo - state: directory - when: not mongo_data.stat.exists - - - name: Check mongo data backup directory - stat: - path: /data/backups/mongo - register: mongo_data_backup - - - name: 'Create mongo backup directory' - file: - path: /data/backups/mongo - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not mongo_data_backup.stat.exists - - - name: 'Create traefik data directory' - file: - path: /data/traefik - state: directory - - - name: Check elasticsearch data directory - stat: - path: /data/elasticsearch - register: elasticsearch_data - - - name: 'Create elasticsearch data directory' - file: - path: /data/elasticsearch - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not elasticsearch_data.stat.exists - - - name: Check elasticsearch data backup directory - stat: - path: /data/backups/elasticsearch - register: elasticsearch_data_backup - - - name: 'Create elasticsearch backup directory' - file: - path: /data/backups/elasticsearch - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not elasticsearch_data_backup.stat.exists - - - name: Check metabase data directory - stat: - path: /data/metabase - register: metabase_data - - - name: 'Create metabase data directory' - file: - path: /data/metabase - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not metabase_data.stat.exists - - - name: Check metabase data backup directory - stat: - path: /data/backups/metabase - register: metabase_data_backup - - - name: 'Create metabase backup directory' - file: - path: /data/backups/metabase - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not metabase_data_backup.stat.exists - - - name: Check influxdb data directory - stat: - path: /data/influxdb - register: influxdb_data - - - name: 'Create influxdb data directory' - file: - path: /data/influxdb - state: directory - when: not influxdb_data.stat.exists - - - name: Check influxdb data backup directory - stat: - path: /data/backups/influxdb - register: influxdb_data_backup - - - name: 'Create influxdb backup directory' - file: - path: /data/backups/influxdb - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not influxdb_data_backup.stat.exists - - - name: Check minio data directory - stat: - path: /data/minio - register: minio_data - - - name: 'Create minio data directory' - file: - path: /data/minio - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not minio_data.stat.exists - - - name: Check minio data backup directory - stat: - path: /data/backups/minio - register: minio_data_backup - - - name: 'Create minio backup directory' - file: - path: /data/backups/minio - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not minio_data_backup.stat.exists - - - name: Check vsexport data directory - stat: - path: /data/vsexport - register: vsexport_data - - - name: 'Create vsexport data directory' - file: - path: /data/vsexport - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not vsexport_data.stat.exists - - - name: Check vsexport data backup directory - stat: - path: /data/backups/vsexport - register: vsexport_data_backup - - - name: 'Create vsexport backup directory' - file: - path: /data/backups/vsexport - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not vsexport_data_backup.stat.exists - - - name: 'Install UFW' - apt: - name: ufw - state: present - - - name: 'Allow OpenSSH through UFW' - ufw: - rule: allow - name: OpenSSH - - - name: 'Install Fail2Ban' - apt: - name: fail2ban - state: present - - - name: 'Copy fail2ban jail.local' - copy: - src: ../jail.local - dest: /etc/fail2ban/ - - - name: 'Start fail2ban and reload jail.local' - service: - name: fail2ban - state: restarted - - - name: 'Copy logrotate script' - copy: - src: ../logrotate.conf - dest: /etc/ - - - name: 'Save system logs to Papertrail' - register: papaertrailSystemLogs - shell: ' cd / && wget -qO - --header="X-Papertrail-Token: {{ papertrail_token }}" \ https://papertrailapp.com/destinations/16712142/setup.sh | sudo bash >> /var/log/papertrail.log 2>&1' - when: papertrail_token is defined - # Docker swarm ports - Note: all published docker container port will override UFW rules! - - name: 'Allow secure docker client communication' - ufw: - rule: allow - port: 2376 - proto: tcp - - name: 'Allow docker swarm communication among nodes - TCP' - ufw: - rule: allow - port: 7946 - proto: tcp - - name: 'Allow docker swarm communication among nodes - UDP' - ufw: - rule: allow - port: 7946 - proto: udp - - name: 'Allow docker overlay network traffic' - ufw: - rule: allow - port: 4789 - proto: udp - - - name: 'Deny everything else and enable UFW' - ufw: - state: enabled - default: deny - direction: incoming - - - name: 'Create secrets directory' - file: - path: /data/secrets - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - - - name: Save secrets into encrypted folder for access by scripts - ansible.builtin.copy: - dest: /data/secrets/opencrvs.secrets - group: 1000 - owner: 1000 - mode: g+rwx - content: | - MONGODB_ADMIN_PASSWORD={{ mongodb_admin_password }} - MONGODB_ADMIN_USER={{ mongodb_admin_username }} - ELASTICSEARCH_ADMIN_PASSWORD={{elasticsearch_superuser_password}} - ELASTICSEARCH_ADMIN_USER=elastic - - - name: Save disk encryption key into a file as an example (in production use a hardware security module) - ansible.builtin.copy: - dest: /root/disk-encryption-key.txt - group: 1000 - owner: 1000 - mode: g+rwx - content: | - DISK_ENCRYPTION_KEY={{ disk_encryption_key }} - - - name: Copy reboot.service systemd file. Must decrypt disk on reboot - ansible.builtin.copy: - dest: /etc/systemd/system/reboot.service - group: 1000 - owner: 1000 - mode: g+rwx - content: | - [Unit] - Description=Mount encrypted dir - - [Service] - ExecStart=bash /opt/opencrvs/infrastructure/cryptfs/decrypt.sh -key /root/disk-encryption-key.txt >> /var/log/cryptfs-reboot.log 2>&1 - - [Install] - WantedBy=multi-user.target - when: encryptedFileSystemPostCheck.stat.exists - - - name: 'Setup systemd to mount encrypted folder' - shell: systemctl daemon-reload && systemctl enable reboot.service - when: encryptedFileSystemPostCheck.stat.exists - - # MOSIP integration requires wireguard and some secrets to be installed - # Here is an example configuration that we use on our 3 node demo production environment - # Ensure that the MOSIP supplied peer conf file containing your keys already exists on your servers in /etc/wireguard/ - # - name: Install wireguard package - # apt: - # name: wireguard - # state: present - # update_cache: yes - - # - name: Copy mosip wireguard peer 1 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_1_path }}' - # dest: /etc/wireguard/mosip-peer1.conf - # when: ansible_hostname == data1_hostname - - # - name: 'Run wireguard peer 1' - # shell: systemctl enable wg-quick@mosip-peer1 && systemctl start wg-quick@mosip-peer1 - # when: ansible_hostname == data1_hostname - - # - name: Copy mosip wireguard peer 2 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_2_path }}' - # dest: /etc/wireguard/mosip-peer2.conf - # when: ansible_hostname == data2_hostname - - # - name: 'Run wireguard peer 2' - # shell: systemctl enable wg-quick@mosip-peer2 && systemctl start wg-quick@mosip-peer2 - # when: ansible_hostname == data2_hostname - - # - name: Copy mosip wireguard peer 3 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_3_path }}' - # dest: /etc/wireguard/mosip-peer3.conf - # when: ansible_hostname == data3_hostname - - # - name: 'Run wireguard peer 3' - # shell: systemctl enable wg-quick@mosip-peer3 && systemctl start wg-quick@mosip-peer3 - # when: ansible_hostname == data3_hostname - - # - name: 'Create mosip secrets directory' - # file: - # path: /data/secrets/mosip - # state: directory - # group: 1000 - # owner: 1000 - # mode: g+rwx - - # - name: Copy mosip encrypt cert file - # ansible.builtin.copy: - # src: '{{ mosip_seeder_encrypt_cert_path }}' - # dest: /data/secrets/mosip/{{ mosip_seeder_encrypt_cert_filename }} - - # - name: Copy mosip encrypt sig file - # ansible.builtin.copy: - # src: '{{ mosip_seeder_encrypt_sig_path }}' - # dest: /data/secrets/mosip/{{ mosip_seeder_encrypt_sig_filename }} - -- hosts: docker-manager-first - become: yes - become_method: sudo - tasks: - - name: 'Allow secure docker swarm node communication (managers only)' - ufw: - rule: allow - port: 2377 - proto: tcp - - - name: 'Create primary swarm manager' - shell: docker swarm init --advertise-addr {{ ansible_default_ipv4.address }} - when: "docker_info.stdout.find('Swarm: inactive') != -1" - - - name: 'Get docker swarm manager token' - shell: docker swarm join-token -q manager - register: manager_token - - - name: 'Get docker swarm worker token' - shell: docker swarm join-token -q worker - register: worker_token - - - name: 'Set higher max map count for elastic search' - sysctl: - name: vm.max_map_count - value: 262144 - state: present - - - name: 'Create acme file for traefik' - file: - path: /data/traefik/acme.json - state: touch - mode: '600' - -- hosts: docker-manager-first - become: yes - become_method: sudo - tasks: - - name: 'Label node as data1' - shell: docker node update --label-add data1=true {{ data1_hostname }} diff --git a/infrastructure/server-setup/playbook-3.yml b/infrastructure/server-setup/playbook-3.yml deleted file mode 100644 index 2c04767f6..000000000 --- a/infrastructure/server-setup/playbook-3.yml +++ /dev/null @@ -1,555 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -# -# OpenCRVS is also distributed under the terms of the Civil Registration -# & Healthcare Disclaimer located at http://opencrvs.org/license. -# -# Copyright (C) The OpenCRVS Authors located at https://github.com/opencrvs/opencrvs-core/blob/master/AUTHORS. ---- -- hosts: localhost - tasks: - - name: Create MongoDB replicate key file locally - local_action: shell openssl rand -base64 755 > /tmp/mongodb-keyfile -- hosts: all - become: yes - become_method: sudo - tasks: - - name: 'Create app directory' - file: - path: /opt/opencrvs - state: directory - - name: 'Check mandatory variables are defined' - assert: - that: - - mongodb_admin_username is defined - - mongodb_admin_password is defined - - elasticsearch_superuser_password is defined - - disk_encryption_key is defined - - encrypted_disk_size is defined - - - name: Setting global variables accessed by country configuration playbook - ansible.builtin.set_fact: - mongodb_admin_password={{ mongodb_admin_password }} - mongodb_admin_username={{ mongodb_admin_username }} - elasticsearch_superuser_password={{elasticsearch_superuser_password}} - disk_encryption_key={{ disk_encryption_key }} - # MOSIP integration requires wireguard and some secrets to be installed - # Here is an example configuration that we use on our 3 node demo production environment - # mosip_wireguard_peer_1_path={{ mosip_wireguard_peer_1_path }} - # mosip_wireguard_peer_2_path={{ mosip_wireguard_peer_2_path }} - # mosip_wireguard_peer_3_path={{ mosip_wireguard_peer_3_path }} - # mosip_seeder_encrypt_cert_path={{ mosip_seeder_encrypt_cert_path }} - # mosip_seeder_encrypt_cert_filename={{ mosip_seeder_encrypt_cert_filename }} - # mosip_seeder_encrypt_sig_path={{ mosip_seeder_encrypt_sig_path }} - # mosip_seeder_encrypt_sig_filename={{ mosip_seeder_encrypt_sig_filename }} - - include_tasks: - file: tasks/tools.yml - apply: - tags: - - tools - tags: - - tools - - - include_tasks: - file: tasks/docker.yml - apply: - tags: - - docker - tags: - - docker - - - name: 'Setup crontab to backup the opencrvs data' - cron: - name: 'backup opencrvs' - minute: '0' - hour: '0' - job: 'cd / && bash /opt/opencrvs/infrastructure/emergency-backup-metadata.sh --ssh_user={{ external_backup_server_user }} --ssh_host={{ external_backup_server_ip }} --ssh_port={{ external_backup_server_ssh_port }} --production_ip={{ manager_production_server_ip }} --remote_dir={{ external_backup_server_remote_directory }} --replicas=3 >> /var/log/opencrvs-backup.log 2>&1' - when: external_backup_server_ip is defined - - - name: Copy MongoDB replication security key file to nodes - copy: src=/tmp/mongodb-keyfile dest=/mongodb-keyfile mode=0400 force=no - - - name: Change access right of key file - file: - path: /mongodb-keyfile - state: file - owner: 1000 - group: 1000 - mode: 0400 - - - name: 'Precheck if encrypted file system exists so we dont try to bootstrap' - stat: - path: /cryptfs_file_sparse.img - get_checksum: False - get_md5: False - register: encryptedFileSystemPreCheck - - - name: 'Bootstrap encrypted data folder' - script: ../cryptfs/bootstrap.sh -s {{encrypted_disk_size}} -p {{disk_encryption_key}} - when: (not encryptedFileSystemPreCheck.stat.exists) - - - name: Wait for encrypted file system - ansible.builtin.wait_for: - path: /cryptfs_file_sparse.img - state: present - - - name: 'Register encrypted file system' - stat: - path: /cryptfs_file_sparse.img - get_checksum: False - get_md5: False - register: encryptedFileSystemPostCheck - - - name: 'Mount encrypted data folder' - script: ../cryptfs/mount.sh -p {{disk_encryption_key}} - when: encryptedFileSystemPostCheck.stat.exists - - # https://stackoverflow.com/a/24765946 - - name: Create swap file - command: - dd if=/dev/zero of={{ swap_file_path }} bs=1024 count={{ swap_file_size_mb }}k - creates="{{ swap_file_path }}" - tags: - - swap.file.create - - - name: Change swap file permissions - file: path="{{ swap_file_path }}" - owner=root - group=root - mode=0600 - tags: - - swap.file.permissions - - - name: 'Check swap file type' - command: file {{ swap_file_path }} - register: swapfile - tags: - - swap.file.mkswap - - - name: Make swap file - command: 'sudo mkswap {{ swap_file_path }}' - when: swapfile.stdout.find('swap file') == -1 - tags: - - swap.file.mkswap - - - name: Write swap entry in fstab - mount: name=none - src={{ swap_file_path }} - fstype=swap - opts=sw - passno=0 - dump=0 - state=present - tags: - - swap.fstab - - - name: Mount swap - command: 'swapon {{ swap_file_path }}' - when: ansible_swaptotal_mb < 1 - tags: - - swap.file.swapon - - - name: Check mongo data directory - stat: - path: /data/mongo - register: mongo_data - - - name: 'Create mongo data directory' - file: - path: /data/mongo - state: directory - when: not mongo_data.stat.exists - - - name: Check mongo data backup directory - stat: - path: /data/backups/mongo - register: mongo_data_backup - - - name: 'Create mongo backup directory' - file: - path: /data/backups/mongo - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not mongo_data_backup.stat.exists - - - name: 'Create traefik data directory' - file: - path: /data/traefik - state: directory - - - name: Check elasticsearch data directory - stat: - path: /data/elasticsearch - register: elasticsearch_data - - - name: 'Create elasticsearch data directory' - file: - path: /data/elasticsearch - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not elasticsearch_data.stat.exists - - - name: Check elasticsearch data backup directory - stat: - path: /data/backups/elasticsearch - register: elasticsearch_data_backup - - - name: 'Create elasticsearch backup directory' - file: - path: /data/backups/elasticsearch - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not elasticsearch_data_backup.stat.exists - - - name: Check metabase data directory - stat: - path: /data/metabase - register: metabase_data - - - name: 'Create metabase data directory' - file: - path: /data/metabase - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not metabase_data.stat.exists - - - name: Check metabase data backup directory - stat: - path: /data/backups/metabase - register: metabase_data_backup - - - name: 'Create metabase backup directory' - file: - path: /data/backups/metabase - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not metabase_data_backup.stat.exists - - - name: Check influxdb data directory - stat: - path: /data/influxdb - register: influxdb_data - - - name: 'Create influxdb data directory' - file: - path: /data/influxdb - state: directory - when: not influxdb_data.stat.exists - - - name: Check influxdb data backup directory - stat: - path: /data/backups/influxdb - register: influxdb_data_backup - - - name: 'Create influxdb backup directory' - file: - path: /data/backups/influxdb - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not influxdb_data_backup.stat.exists - - - name: Check minio data directory - stat: - path: /data/minio - register: minio_data - - - name: 'Create minio data directory' - file: - path: /data/minio - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not minio_data.stat.exists - - - name: Check minio data backup directory - stat: - path: /data/backups/minio - register: minio_data_backup - - - name: 'Create minio backup directory' - file: - path: /data/backups/minio - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not minio_data_backup.stat.exists - - - name: Check vsexport data directory - stat: - path: /data/vsexport - register: vsexport_data - - - name: 'Create vsexport data directory' - file: - path: /data/vsexport - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not vsexport_data.stat.exists - - - name: Check vsexport data backup directory - stat: - path: /data/backups/vsexport - register: vsexport_data_backup - - - name: 'Create vsexport backup directory' - file: - path: /data/backups/vsexport - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not vsexport_data_backup.stat.exists - - - name: 'Install UFW' - apt: - name: ufw - state: present - - - name: 'Allow OpenSSH through UFW' - ufw: - rule: allow - name: OpenSSH - - - name: 'Install Fail2Ban' - apt: - name: fail2ban - state: present - - - name: 'Copy fail2ban jail.local' - copy: - src: ../jail.local - dest: /etc/fail2ban/ - - - name: 'Start fail2ban and reload jail.local' - service: - name: fail2ban - state: restarted - - - name: 'Copy logrotate script' - copy: - src: ../logrotate.conf - dest: /etc/ - - - name: Create deploy logfile - ansible.builtin.file: - path: /var/log/setup-deploy-config.log - owner: '{{ ansible_user }}' - group: '{{ ansible_user }}' - state: touch - mode: 'u+rwX,g+rwX,o-rwx' - - - name: 'Save system logs to Papertrail' - register: papaertrailSystemLogs - shell: ' cd / && wget -qO - --header="X-Papertrail-Token: {{ papertrail_token }}" \ https://papertrailapp.com/destinations/16712142/setup.sh | sudo bash >> /var/log/papertrail.log 2>&1' - when: papertrail_token is defined - # Docker swarm ports - Note: all published docker container port will override UFW rules! - - name: 'Allow secure docker client communication' - ufw: - rule: allow - port: 2376 - proto: tcp - - name: 'Allow docker swarm communication among nodes - TCP' - ufw: - rule: allow - port: 7946 - proto: tcp - - name: 'Allow docker swarm communication among nodes - UDP' - ufw: - rule: allow - port: 7946 - proto: udp - - name: 'Allow docker overlay network traffic' - ufw: - rule: allow - port: 4789 - proto: udp - - name: Allow all access to tcp port 443 - ufw: - rule: allow - port: '443' - proto: tcp - - - name: 'Deny everything else and enable UFW' - ufw: - state: enabled - default: deny - direction: incoming - - - name: 'Create secrets directory' - file: - path: /data/secrets - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - - - name: Save secrets into encrypted folder for access by scripts - ansible.builtin.copy: - dest: /data/secrets/opencrvs.secrets - group: 1000 - owner: 1000 - mode: g+rwx - content: | - MONGODB_ADMIN_PASSWORD={{ mongodb_admin_password }} - MONGODB_ADMIN_USER={{ mongodb_admin_username }} - ELASTICSEARCH_ADMIN_PASSWORD={{elasticsearch_superuser_password}} - ELASTICSEARCH_ADMIN_USER=elastic - - - name: Save disk encryption key into a file as an example (in production use a hardware security module) - ansible.builtin.copy: - dest: /root/disk-encryption-key.txt - group: 1000 - owner: 1000 - mode: g+rwx - content: | - DISK_ENCRYPTION_KEY={{ disk_encryption_key }} - - - name: Copy reboot.service systemd file. Must decrypt disk on reboot - ansible.builtin.copy: - dest: /etc/systemd/system/reboot.service - group: 1000 - owner: 1000 - mode: g+rwx - content: | - [Unit] - Description=Mount encrypted dir - - [Service] - ExecStart=bash /opt/opencrvs/infrastructure/cryptfs/decrypt.sh -key /root/disk-encryption-key.txt >> /var/log/cryptfs-reboot.log 2>&1 - - [Install] - WantedBy=multi-user.target - when: encryptedFileSystemPostCheck.stat.exists - - - name: 'Setup systemd to mount encrypted folder' - shell: systemctl daemon-reload && systemctl enable reboot.service - when: encryptedFileSystemPostCheck.stat.exists - - # MOSIP integration requires wireguard and some secrets to be installed - # Here is an example configuration that we use on our 3 node demo production environment - # Ensure that the MOSIP supplied peer conf file containing your keys already exists on your servers in /etc/wireguard/ - # - name: Install wireguard package - # apt: - # name: wireguard - # state: present - # update_cache: yes - - # - name: Copy mosip wireguard peer 1 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_1_path }}' - # dest: /etc/wireguard/mosip-peer1.conf - # when: ansible_hostname == data1_hostname - - # - name: 'Run wireguard peer 1' - # shell: systemctl enable wg-quick@mosip-peer1 && systemctl start wg-quick@mosip-peer1 - # when: ansible_hostname == data1_hostname - - # - name: Copy mosip wireguard peer 2 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_2_path }}' - # dest: /etc/wireguard/mosip-peer2.conf - # when: ansible_hostname == data2_hostname - - # - name: 'Run wireguard peer 2' - # shell: systemctl enable wg-quick@mosip-peer2 && systemctl start wg-quick@mosip-peer2 - # when: ansible_hostname == data2_hostname - - # - name: Copy mosip wireguard peer 3 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_3_path }}' - # dest: /etc/wireguard/mosip-peer3.conf - # when: ansible_hostname == data3_hostname - - # - name: 'Run wireguard peer 3' - # shell: systemctl enable wg-quick@mosip-peer3 && systemctl start wg-quick@mosip-peer3 - # when: ansible_hostname == data3_hostname - - # - name: 'Create mosip secrets directory' - # file: - # path: /data/secrets/mosip - # state: directory - # group: 1000 - # owner: 1000 - # mode: g+rwx - - # - name: Copy mosip encrypt cert file - # ansible.builtin.copy: - # src: '{{ mosip_seeder_encrypt_cert_path }}' - # dest: /data/secrets/mosip/{{ mosip_seeder_encrypt_cert_filename }} - - # - name: Copy mosip encrypt sig file - # ansible.builtin.copy: - # src: '{{ mosip_seeder_encrypt_sig_path }}' - # dest: /data/secrets/mosip/{{ mosip_seeder_encrypt_sig_filename }} - -- hosts: docker-manager-first - become: yes - become_method: sudo - tasks: - - name: 'Allow secure docker swarm node communication (managers only)' - ufw: - rule: allow - port: 2377 - proto: tcp - - - name: 'Create primary swarm manager' - shell: docker swarm init --advertise-addr {{ ansible_default_ipv4.address }} - when: "docker_info.stdout.find('Swarm: inactive') != -1" - - - name: 'Get docker swarm manager token' - shell: docker swarm join-token -q manager - register: manager_token - - - name: 'Get docker swarm worker token' - shell: docker swarm join-token -q worker - register: worker_token - - - name: 'Set higher max map count for elastic search' - sysctl: - name: vm.max_map_count - value: 262144 - state: present - - - name: 'Create acme file for traefik' - file: - path: /data/traefik/acme.json - state: touch - mode: '600' - -- hosts: docker-workers - become: yes - become_method: sudo - tasks: - - name: 'Join as a worker' - shell: "docker swarm join --token {{ hostvars['manager1']['worker_token']['stdout'] }} {{ hostvars['manager1']['ansible_default_ipv4']['address'] }}:2377" - when: "docker_info.stdout.find('Swarm: inactive') != -1" - retries: 3 - delay: 20 - -- hosts: docker-manager-first - become: yes - become_method: sudo - tasks: - - name: 'Label node as data1' - shell: docker node update --label-add data1=true {{ data1_hostname }} - - name: 'Label node as data2' - shell: docker node update --label-add data2=true {{ data2_hostname }} - - name: 'Label node as data3' - shell: docker node update --label-add data3=true {{ data3_hostname }} diff --git a/infrastructure/server-setup/playbook-5.yml b/infrastructure/server-setup/playbook-5.yml deleted file mode 100644 index a17242c4f..000000000 --- a/infrastructure/server-setup/playbook-5.yml +++ /dev/null @@ -1,542 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at https://mozilla.org/MPL/2.0/. -# -# OpenCRVS is also distributed under the terms of the Civil Registration -# & Healthcare Disclaimer located at http://opencrvs.org/license. -# -# Copyright (C) The OpenCRVS Authors located at https://github.com/opencrvs/opencrvs-core/blob/master/AUTHORS. ---- -- hosts: localhost - tasks: - - name: Create MongoDB replicate key file locally - local_action: shell openssl rand -base64 755 > /tmp/mongodb-keyfile -- hosts: all - become: yes - become_method: sudo - tasks: - - name: 'Create app directory' - file: - path: /opt/opencrvs - state: directory - - name: 'Check mandatory variables are defined' - assert: - that: - - mongodb_admin_username is defined - - mongodb_admin_password is defined - - elasticsearch_superuser_password is defined - - disk_encryption_key is defined - - encrypted_disk_size is defined - - - name: Setting global variables accessed by country configuration playbook - ansible.builtin.set_fact: - mongodb_admin_password={{ mongodb_admin_password }} - mongodb_admin_username={{ mongodb_admin_username }} - elasticsearch_superuser_password={{elasticsearch_superuser_password}} - disk_encryption_key={{ disk_encryption_key }} - - - include_tasks: - file: tasks/tools.yml - apply: - tags: - - tools - tags: - - tools - - - include_tasks: - file: tasks/docker.yml - apply: - tags: - - docker - tags: - - docker - - - name: 'Setup crontab to backup the opencrvs data' - cron: - name: 'backup opencrvs' - minute: '0' - hour: '0' - job: 'cd / && bash /opt/opencrvs/infrastructure/emergency-backup-metadata.sh --ssh_user={{ external_backup_server_user }} --ssh_host={{ external_backup_server_ip }} --ssh_port={{ external_backup_server_ssh_port }} --production_ip={{ manager_production_server_ip }} --remote_dir={{ external_backup_server_remote_directory }} --replicas=5 >> /var/log/opencrvs-backup.log 2>&1' - when: external_backup_server_ip is defined - - - name: Copy MongoDB replication security key file to nodes - copy: src=/tmp/mongodb-keyfile dest=/mongodb-keyfile mode=0400 force=no - - - name: Change access right of key file - file: - path: /mongodb-keyfile - state: file - owner: 1000 - group: 1000 - - - name: 'Precheck if encrypted file system exists so we dont try to bootstrap' - stat: - path: /cryptfs_file_sparse.img - get_checksum: False - get_md5: False - register: encryptedFileSystemPreCheck - - - name: 'Bootstrap encrypted data folder' - script: ../cryptfs/bootstrap.sh -s {{encrypted_disk_size}} -p {{disk_encryption_key}} - when: (not encryptedFileSystemPreCheck.stat.exists) - - - name: Wait for encrypted file system - ansible.builtin.wait_for: - path: /cryptfs_file_sparse.img - state: present - - - name: 'Register encrypted file system' - stat: - path: /cryptfs_file_sparse.img - get_checksum: False - get_md5: False - register: encryptedFileSystemPostCheck - - - name: 'Mount encrypted data folder' - script: ../cryptfs/mount.sh -p {{disk_encryption_key}} - when: encryptedFileSystemPostCheck.stat.exists - - # https://stackoverflow.com/a/24765946 - - name: Create swap file - command: - dd if=/dev/zero of={{ swap_file_path }} bs=1024 count={{ swap_file_size_mb }}k - creates="{{ swap_file_path }}" - tags: - - swap.file.create - - - name: Change swap file permissions - file: path="{{ swap_file_path }}" - owner=root - group=root - mode=0600 - tags: - - swap.file.permissions - - - name: 'Check swap file type' - command: file {{ swap_file_path }} - register: swapfile - tags: - - swap.file.mkswap - - - name: Make swap file - command: 'sudo mkswap {{ swap_file_path }}' - when: swapfile.stdout.find('swap file') == -1 - tags: - - swap.file.mkswap - - - name: Write swap entry in fstab - mount: name=none - src={{ swap_file_path }} - fstype=swap - opts=sw - passno=0 - dump=0 - state=present - tags: - - swap.fstab - - - name: Mount swap - command: 'swapon {{ swap_file_path }}' - when: ansible_swaptotal_mb < 1 - tags: - - swap.file.swapon - - - name: Check mongo data directory - stat: - path: /data/mongo - register: mongo_data - - - name: 'Create mongo data directory' - file: - path: /data/mongo - state: directory - when: not mongo_data.stat.exists - - - name: Check mongo data backup directory - stat: - path: /data/backups/mongo - register: mongo_data_backup - - - name: 'Create mongo backup directory' - file: - path: /data/backups/mongo - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not mongo_data_backup.stat.exists - - - name: 'Create traefik data directory' - file: - path: /data/traefik - state: directory - - - name: Check elasticsearch data directory - stat: - path: /data/elasticsearch - register: elasticsearch_data - - - name: 'Create elasticsearch data directory' - file: - path: /data/elasticsearch - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not elasticsearch_data.stat.exists - - - name: Check elasticsearch data backup directory - stat: - path: /data/backups/elasticsearch - register: elasticsearch_data_backup - - - name: 'Create elasticsearch backup directory' - file: - path: /data/backups/elasticsearch - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not elasticsearch_data_backup.stat.exists - - - name: Check metabase data directory - stat: - path: /data/metabase - register: metabase_data - - - name: 'Create metabase data directory' - file: - path: /data/metabase - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not metabase_data.stat.exists - - - name: Check metabase data backup directory - stat: - path: /data/backups/metabase - register: metabase_data_backup - - - name: 'Create metabase backup directory' - file: - path: /data/backups/metabase - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not metabase_data_backup.stat.exists - - - name: Check influxdb data directory - stat: - path: /data/influxdb - register: influxdb_data - - - name: 'Create influxdb data directory' - file: - path: /data/influxdb - state: directory - when: not influxdb_data.stat.exists - - - name: Check influxdb data backup directory - stat: - path: /data/backups/influxdb - register: influxdb_data_backup - - - name: 'Create influxdb backup directory' - file: - path: /data/backups/influxdb - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - when: not influxdb_data_backup.stat.exists - - - name: Check minio data directory - stat: - path: /data/minio - register: minio_data - - - name: 'Create minio data directory' - file: - path: /data/minio - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not minio_data.stat.exists - - - name: Check minio data backup directory - stat: - path: /data/backups/minio - register: minio_data_backup - - - name: 'Create minio backup directory' - file: - path: /data/backups/minio - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not minio_data_backup.stat.exists - - - name: Check vsexport data directory - stat: - path: /data/vsexport - register: vsexport_data - - - name: 'Create vsexport data directory' - file: - path: /data/vsexport - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not vsexport_data.stat.exists - - - name: Check vsexport data backup directory - stat: - path: /data/backups/vsexport - register: vsexport_data_backup - - - name: 'Create vsexport backup directory' - file: - path: /data/backups/vsexport - state: directory - group: 1000 - owner: 1000 - mode: ugo+rwx - when: not vsexport_data_backup.stat.exists - - - name: 'Install UFW' - apt: - name: ufw - state: present - - - name: 'Allow OpenSSH through UFW' - ufw: - rule: allow - name: OpenSSH - - - name: 'Install Fail2Ban' - apt: - name: fail2ban - state: present - - - name: 'Copy fail2ban jail.local' - copy: - src: ../jail.local - dest: /etc/fail2ban/ - - - name: 'Start fail2ban and reload jail.local' - service: - name: fail2ban - state: restarted - - - name: 'Copy logrotate script' - copy: - src: ../logrotate.conf - dest: /etc/ - - - name: 'Save system logs to Papertrail' - register: papaertrailSystemLogs - shell: ' cd / && wget -qO - --header="X-Papertrail-Token: {{ papertrail_token }}" \ https://papertrailapp.com/destinations/16712142/setup.sh | sudo bash >> /var/log/papertrail.log 2>&1' - when: papertrail_token is defined - # Docker swarm ports - Note: all published docker container port will override UFW rules! - - name: 'Allow secure docker client communication' - ufw: - rule: allow - port: 2376 - proto: tcp - - name: 'Allow docker swarm communication among nodes - TCP' - ufw: - rule: allow - port: 7946 - proto: tcp - - name: 'Allow docker swarm communication among nodes - UDP' - ufw: - rule: allow - port: 7946 - proto: udp - - name: 'Allow docker overlay network traffic' - ufw: - rule: allow - port: 4789 - proto: udp - - name: Allow all access to tcp port 443 - ufw: - rule: allow - port: '443' - proto: tcp - - - name: 'Deny everything else and enable UFW' - ufw: - state: enabled - default: deny - direction: incoming - - - name: 'Create secrets directory' - file: - path: /data/secrets - state: directory - group: 1000 - owner: 1000 - mode: g+rwx - - - name: Save secrets into encrypted folder for access by scripts - ansible.builtin.copy: - dest: /data/secrets/opencrvs.secrets - group: 1000 - owner: 1000 - mode: g+rwx - content: | - MONGODB_ADMIN_PASSWORD={{ mongodb_admin_password }} - MONGODB_ADMIN_USER={{ mongodb_admin_username }} - ELASTICSEARCH_ADMIN_PASSWORD={{elasticsearch_superuser_password}} - ELASTICSEARCH_ADMIN_USER=elastic - - - name: Save disk encryption key into a file as an example (in production use a hardware security module) - ansible.builtin.copy: - dest: /root/disk-encryption-key.txt - group: 1000 - owner: 1000 - mode: g+rwx - content: | - DISK_ENCRYPTION_KEY={{ disk_encryption_key }} - - - name: Copy reboot.service systemd file. Must decrypt disk on reboot - ansible.builtin.copy: - dest: /etc/systemd/system/reboot.service - group: 1000 - owner: 1000 - mode: g+rwx - content: | - [Unit] - Description=Mount encrypted dir - - [Service] - ExecStart=bash /opt/opencrvs/infrastructure/cryptfs/decrypt.sh -key /root/disk-encryption-key.txt >> /var/log/cryptfs-reboot.log 2>&1 - - [Install] - WantedBy=multi-user.target - when: encryptedFileSystemPostCheck.stat.exists - - - name: 'Setup systemd to mount encrypted folder' - shell: systemctl daemon-reload && systemctl enable reboot.service - when: encryptedFileSystemPostCheck.stat.exists - - # MOSIP integration requires wireguard and some secrets to be installed - # Here is an example configuration that we use on our 3 node demo production environment - # Ensure that the MOSIP supplied peer conf file containing your keys already exists on your servers in /etc/wireguard/ - # - name: Install wireguard package - # apt: - # name: wireguard - # state: present - # update_cache: yes - - # - name: Copy mosip wireguard peer 1 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_1_path }}' - # dest: /etc/wireguard/mosip-peer1.conf - # when: ansible_hostname == data1_hostname - - # - name: 'Run wireguard peer 1' - # shell: systemctl enable wg-quick@mosip-peer1 && systemctl start wg-quick@mosip-peer1 - # when: ansible_hostname == data1_hostname - - # - name: Copy mosip wireguard peer 2 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_2_path }}' - # dest: /etc/wireguard/mosip-peer2.conf - # when: ansible_hostname == data2_hostname - - # - name: 'Run wireguard peer 2' - # shell: systemctl enable wg-quick@mosip-peer2 && systemctl start wg-quick@mosip-peer2 - # when: ansible_hostname == data2_hostname - - # - name: Copy mosip wireguard peer 3 file - # ansible.builtin.copy: - # src: '{{ mosip_wireguard_peer_3_path }}' - # dest: /etc/wireguard/mosip-peer3.conf - # when: ansible_hostname == data3_hostname - - # - name: 'Run wireguard peer 3' - # shell: systemctl enable wg-quick@mosip-peer3 && systemctl start wg-quick@mosip-peer3 - # when: ansible_hostname == data3_hostname - - # - name: 'Create mosip secrets directory' - # file: - # path: /data/secrets/mosip - # state: directory - # group: 1000 - # owner: 1000 - # mode: g+rwx - - # - name: Copy mosip encrypt cert file - # ansible.builtin.copy: - # src: '{{ mosip_seeder_encrypt_cert_path }}' - # dest: /data/secrets/mosip/{{ mosip_seeder_encrypt_cert_filename }} - - # - name: Copy mosip encrypt sig file - # ansible.builtin.copy: - # src: '{{ mosip_seeder_encrypt_sig_path }}' - # dest: /data/secrets/mosip/{{ mosip_seeder_encrypt_sig_filename }} - -- hosts: docker-manager-first - become: yes - become_method: sudo - tasks: - - name: 'Allow secure docker swarm node communication (managers only)' - ufw: - rule: allow - port: 2377 - proto: tcp - - - name: 'Create primary swarm manager' - shell: docker swarm init --advertise-addr {{ ansible_default_ipv4.address }} - when: "docker_info.stdout.find('Swarm: inactive') != -1" - - - name: 'Get docker swarm manager token' - shell: docker swarm join-token -q manager - register: manager_token - - - name: 'Get docker swarm worker token' - shell: docker swarm join-token -q worker - register: worker_token - - - name: 'Set higher max map count for elastic search' - sysctl: - name: vm.max_map_count - value: 262144 - state: present - - - name: 'Create acme file for traefik' - file: - path: /data/traefik/acme.json - state: touch - mode: '600' - -- hosts: docker-workers - become: yes - become_method: sudo - tasks: - - name: 'Join as a worker' - shell: "docker swarm join --token {{ hostvars['manager1']['worker_token']['stdout'] }} {{ hostvars['manager1']['ansible_default_ipv4']['address'] }}:2377" - when: "docker_info.stdout.find('Swarm: inactive') != -1" - retries: 3 - delay: 20 - -- hosts: docker-manager-first - become: yes - become_method: sudo - tasks: - - name: 'Label node as data1' - shell: docker node update --label-add data1=true {{ data1_hostname }} - - name: 'Label node as data2' - shell: docker node update --label-add data2=true {{ data2_hostname }} - - name: 'Label node as data3' - shell: docker node update --label-add data3=true {{ data3_hostname }} - - name: 'Label node as data4' - shell: docker node update --label-add data4=true {{ data4_hostname }} - - name: 'Label node as data5' - shell: docker node update --label-add data5=true {{ data5_hostname }} diff --git a/infrastructure/server-setup/playbook.yml b/infrastructure/server-setup/playbook.yml new file mode 100644 index 000000000..fc08876b8 --- /dev/null +++ b/infrastructure/server-setup/playbook.yml @@ -0,0 +1,169 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +# +# OpenCRVS is also distributed under the terms of the Civil Registration +# & Healthcare Disclaimer located at http://opencrvs.org/license. +# +# Copyright (C) The OpenCRVS Authors located at https://github.com/opencrvs/opencrvs-core/blob/master/AUTHORS. +--- +- hosts: localhost + tasks: + - name: Create MongoDB replicate key file locally + local_action: shell openssl rand -base64 755 > /tmp/mongodb-keyfile +- hosts: all + become: yes + become_method: sudo + tasks: + - include_tasks: + file: tasks/checks.yml + apply: + tags: + - checks + tags: + - checks + + - include_tasks: + file: tasks/application.yml + apply: + tags: + - application + tags: + - application + + - include_tasks: + file: tasks/tools.yml + apply: + tags: + - tools + tags: + - tools + + - include_tasks: + file: tasks/docker.yml + apply: + tags: + - docker + tags: + - docker + + - include_tasks: + file: tasks/deployment-user.yml + apply: + tags: + - deployment-user + - users + tags: + - deployment-user + - users + + - include_tasks: + file: tasks/crontab.yml + apply: + tags: + - crontab + tags: + - crontab + + - include_tasks: + file: tasks/mongodb.yml + apply: + tags: + - mongodb + tags: + - mongodb + + - include_tasks: + file: tasks/data-partition.yml + apply: + tags: + - data-partition + tags: + - data-partition + + - include_tasks: + file: tasks/swap.yml + apply: + tags: + - swap + tags: + - swap + + - include_tasks: + file: tasks/ufw.yml + apply: + tags: + - ufw + tags: + - ufw + + - include_tasks: + file: tasks/fail2ban.yml + apply: + tags: + - fail2ban + tags: + - fail2ban + + - name: 'Copy logrotate script' + copy: + src: ../logrotate.conf + dest: /etc/ + + - name: 'Save system logs to Papertrail' + register: papaertrailSystemLogs + shell: ' cd / && wget -qO - --header="X-Papertrail-Token: {{ papertrail_token }}" \ https://papertrailapp.com/destinations/16712142/setup.sh | sudo bash >> /var/log/papertrail.log 2>&1' + when: papertrail_token is defined + + - include_tasks: + file: tasks/decrypt-on-boot.yml + apply: + tags: + - decrypt-on-boot + tags: + - decrypt-on-boot + +- hosts: docker-manager-first + become: yes + become_method: sudo + tasks: + - include_tasks: + file: tasks/swarm.yml + apply: + tags: + - swarm + tags: + - swarm + + - include_tasks: + file: tasks/elasticsearch.yml + apply: + tags: + - elasticsearch + tags: + - elasticsearch + + - include_tasks: + file: tasks/traefik.yml + apply: + tags: + - traefik + tags: + - traefik + + - name: Label nodes + shell: docker node update --label-add {{ hostvars[hostname]['data_label'] }}=true {{ hostname }} + loop: "{{ groups['docker-manager-first'] + groups['docker-workers'] }}" + loop_control: + loop_var: hostname + when: hostvars[hostname]['data_label'] is defined + +- hosts: docker-workers + become: yes + become_method: sudo + tasks: + - name: 'Join as a worker' + shell: "docker swarm join --token {{ hostvars['manager1']['worker_token']['stdout'] }} {{ hostvars['manager1']['ansible_default_ipv4']['address'] }}:2377" + when: "docker_info.stdout.find('Swarm: inactive') != -1" + retries: 3 + delay: 20 diff --git a/infrastructure/server-setup/tasks/checks.yml b/infrastructure/server-setup/tasks/checks.yml new file mode 100644 index 000000000..54126a44f --- /dev/null +++ b/infrastructure/server-setup/tasks/checks.yml @@ -0,0 +1,13 @@ +- name: 'Check mandatory variables are defined' + assert: + that: + - mongodb_admin_username is defined + - mongodb_admin_password is defined + - elasticsearch_superuser_password is defined + - disk_encryption_key is defined + - encrypted_disk_size is defined + +- name: Update apt-get + apt: + update_cache: yes + cache_valid_time: 3600 diff --git a/infrastructure/server-setup/tasks/crontab.yml b/infrastructure/server-setup/tasks/crontab.yml new file mode 100644 index 000000000..3fbb64058 --- /dev/null +++ b/infrastructure/server-setup/tasks/crontab.yml @@ -0,0 +1,7 @@ +- name: 'Setup crontab to backup the opencrvs data' + cron: + name: 'backup opencrvs' + minute: '0' + hour: '0' + job: 'cd / && bash /opt/opencrvs/infrastructure/emergency-backup-metadata.sh --ssh_user={{ external_backup_server_user }} --ssh_host={{ external_backup_server_ip }} --ssh_port={{ external_backup_server_ssh_port }} --production_ip={{ manager_production_server_ip }} --remote_dir={{ external_backup_server_remote_directory }} --replicas=1 >> /var/log/opencrvs-backup.log 2>&1' + when: external_backup_server_ip is defined diff --git a/infrastructure/server-setup/tasks/data-partition.yml b/infrastructure/server-setup/tasks/data-partition.yml new file mode 100644 index 000000000..7b4b96fa6 --- /dev/null +++ b/infrastructure/server-setup/tasks/data-partition.yml @@ -0,0 +1,212 @@ +- name: 'Precheck if encrypted file system exists so we dont try to bootstrap' + stat: + path: /cryptfs_file_sparse.img + get_checksum: False + register: encryptedFileSystemPreCheck + +- name: 'Bootstrap encrypted data folder' + script: ../cryptfs/bootstrap.sh -s {{encrypted_disk_size}} -p {{disk_encryption_key}} + when: (not encryptedFileSystemPreCheck.stat.exists) + +- name: Wait for encrypted file system + ansible.builtin.wait_for: + path: /cryptfs_file_sparse.img + state: present + +- name: 'Register encrypted file system' + stat: + path: /cryptfs_file_sparse.img + get_checksum: False + register: encryptedFileSystemPostCheck + +- name: 'Mount encrypted data folder' + script: ../cryptfs/mount.sh -p {{disk_encryption_key}} + when: encryptedFileSystemPostCheck.stat.exists + + # Create data directories +- name: Check mongo data directory + stat: + path: /data/mongo + register: mongo_data + +- name: 'Create mongo data directory' + file: + path: /data/mongo + state: directory + when: not mongo_data.stat.exists + +- name: Check mongo data backup directory + stat: + path: /data/backups/mongo + register: mongo_data_backup + +- name: 'Create mongo backup directory' + file: + path: /data/backups/mongo + state: directory + group: 1000 + owner: 1000 + mode: g+rwx + when: not mongo_data_backup.stat.exists + +- name: 'Create traefik data directory' + file: + path: /data/traefik + state: directory + +- name: Check elasticsearch data directory + stat: + path: /data/elasticsearch + register: elasticsearch_data + +- name: 'Create elasticsearch data directory' + file: + path: /data/elasticsearch + state: directory + group: 1000 + owner: 1000 + mode: g+rwx + when: not elasticsearch_data.stat.exists + +- name: Check elasticsearch data backup directory + stat: + path: /data/backups/elasticsearch + register: elasticsearch_data_backup + +- name: 'Create elasticsearch backup directory' + file: + path: /data/backups/elasticsearch + state: directory + group: 1000 + owner: 1000 + mode: ugo+rwx + when: not elasticsearch_data_backup.stat.exists + +- name: Check metabase data directory + stat: + path: /data/metabase + register: metabase_data + +- name: 'Create metabase data directory' + file: + path: /data/metabase + state: directory + group: 1000 + owner: 1000 + mode: g+rwx + when: not metabase_data.stat.exists + +- name: Check metabase data backup directory + stat: + path: /data/backups/metabase + register: metabase_data_backup + +- name: 'Create metabase backup directory' + file: + path: /data/backups/metabase + state: directory + group: 1000 + owner: 1000 + mode: ugo+rwx + when: not metabase_data_backup.stat.exists + +- name: Check influxdb data directory + stat: + path: /data/influxdb + register: influxdb_data + +- name: 'Create influxdb data directory' + file: + path: /data/influxdb + state: directory + when: not influxdb_data.stat.exists + +- name: Check influxdb data backup directory + stat: + path: /data/backups/influxdb + register: influxdb_data_backup + +- name: 'Create influxdb backup directory' + file: + path: /data/backups/influxdb + state: directory + group: 1000 + owner: 1000 + mode: g+rwx + when: not influxdb_data_backup.stat.exists + +- name: Check minio data directory + stat: + path: /data/minio + register: minio_data + +- name: 'Create minio data directory' + file: + path: /data/minio + state: directory + group: 1000 + owner: 1000 + mode: ugo+rwx + when: not minio_data.stat.exists + +- name: Check minio data backup directory + stat: + path: /data/backups/minio + register: minio_data_backup + +- name: 'Create minio backup directory' + file: + path: /data/backups/minio + state: directory + group: 1000 + owner: 1000 + mode: ugo+rwx + when: not minio_data_backup.stat.exists + +- name: Check vsexport data directory + stat: + path: /data/vsexport + register: vsexport_data + +- name: 'Create vsexport data directory' + file: + path: /data/vsexport + state: directory + group: 1000 + owner: 1000 + mode: ugo+rwx + when: not vsexport_data.stat.exists + +- name: Check vsexport data backup directory + stat: + path: /data/backups/vsexport + register: vsexport_data_backup + +- name: 'Create vsexport backup directory' + file: + path: /data/backups/vsexport + state: directory + group: 1000 + owner: 1000 + mode: ugo+rwx + when: not vsexport_data_backup.stat.exists + +- name: 'Create secrets directory' + file: + path: /data/secrets + state: directory + group: 1000 + owner: 1000 + mode: g+rwx + +- name: Save secrets into encrypted folder for access by scripts + ansible.builtin.copy: + dest: /data/secrets/opencrvs.secrets + group: 1000 + owner: 1000 + mode: g+rwx + content: | + MONGODB_ADMIN_PASSWORD={{ mongodb_admin_password }} + MONGODB_ADMIN_USER={{ mongodb_admin_username }} + ELASTICSEARCH_ADMIN_PASSWORD={{elasticsearch_superuser_password}} + ELASTICSEARCH_ADMIN_USER=elastic diff --git a/infrastructure/server-setup/tasks/decrypt-on-boot.yml b/infrastructure/server-setup/tasks/decrypt-on-boot.yml new file mode 100644 index 000000000..bb2813e84 --- /dev/null +++ b/infrastructure/server-setup/tasks/decrypt-on-boot.yml @@ -0,0 +1,29 @@ +- name: Save disk encryption key into a file as an example (in production use a hardware security module) + ansible.builtin.copy: + dest: /root/disk-encryption-key.txt + group: 1000 + owner: 1000 + mode: g+rwx + content: | + DISK_ENCRYPTION_KEY={{ disk_encryption_key }} + +- name: Copy reboot.service systemd file. Must decrypt disk on reboot + ansible.builtin.copy: + dest: /etc/systemd/system/reboot.service + group: 1000 + owner: 1000 + mode: g+rwx + content: | + [Unit] + Description=Mount encrypted dir + + [Service] + ExecStart=bash /opt/opencrvs/infrastructure/cryptfs/decrypt.sh -key /root/disk-encryption-key.txt >> /var/log/cryptfs-reboot.log 2>&1 + + [Install] + WantedBy=multi-user.target + when: encryptedFileSystemPostCheck.stat.exists + +- name: 'Setup systemd to mount encrypted folder' + shell: systemctl daemon-reload && systemctl enable reboot.service + when: encryptedFileSystemPostCheck.stat.exists diff --git a/infrastructure/server-setup/tasks/elasticsearch.yml b/infrastructure/server-setup/tasks/elasticsearch.yml new file mode 100644 index 000000000..8c46548a6 --- /dev/null +++ b/infrastructure/server-setup/tasks/elasticsearch.yml @@ -0,0 +1,5 @@ +- name: 'Set higher max map count forelastic search' + sysctl: + name: vm.max_map_count + value: 262144 + state: present diff --git a/infrastructure/server-setup/tasks/fail2ban.yml b/infrastructure/server-setup/tasks/fail2ban.yml new file mode 100644 index 000000000..47e6e33e5 --- /dev/null +++ b/infrastructure/server-setup/tasks/fail2ban.yml @@ -0,0 +1,14 @@ +- name: 'Install Fail2Ban' + apt: + name: fail2ban + state: present + +- name: 'Copy fail2ban jail.local' + copy: + src: ../jail.local + dest: /etc/fail2ban/ + +- name: 'Start fail2ban and reload jail.local' + service: + name: fail2ban + state: restarted diff --git a/infrastructure/server-setup/tasks/mongodb.yml b/infrastructure/server-setup/tasks/mongodb.yml new file mode 100644 index 000000000..71248e8a5 --- /dev/null +++ b/infrastructure/server-setup/tasks/mongodb.yml @@ -0,0 +1,9 @@ +- name: Copy MongoDB replication security key file to nodes + copy: src=/tmp/mongodb-keyfile dest=/mongodb-keyfile mode=0400 force=no + +- name: Change access right of key file + file: + path: /mongodb-keyfile + state: file + owner: 1000 + group: 1000 diff --git a/infrastructure/server-setup/tasks/swap.yml b/infrastructure/server-setup/tasks/swap.yml new file mode 100644 index 000000000..9c732e3e7 --- /dev/null +++ b/infrastructure/server-setup/tasks/swap.yml @@ -0,0 +1,44 @@ +# https://stackoverflow.com/a/24765946 +- name: Create swap file + command: + dd if=/dev/zero of={{ swap_file_path }} bs=1024 count={{ swap_file_size_mb }}k + creates="{{ swap_file_path }}" + tags: + - swap.file.create + +- name: Change swap file permissions + file: path="{{ swap_file_path }}" + owner=root + group=root + mode=0600 + tags: + - swap.file.permissions + +- name: 'Check swap file type' + command: file {{ swap_file_path }} + register: swapfile + tags: + - swap.file.mkswap + +- name: Make swap file + command: 'sudo mkswap {{ swap_file_path }}' + when: swapfile.stdout.find('swap file') == -1 + tags: + - swap.file.mkswap + +- name: Write swap entry in fstab + mount: name=none + src={{ swap_file_path }} + fstype=swap + opts=sw + passno=0 + dump=0 + state=present + tags: + - swap.fstab + +- name: Mount swap + command: 'swapon {{ swap_file_path }}' + when: ansible_swaptotal_mb < 1 + tags: + - swap.file.swapon diff --git a/infrastructure/server-setup/tasks/swarm.yml b/infrastructure/server-setup/tasks/swarm.yml new file mode 100644 index 000000000..d2b1039fa --- /dev/null +++ b/infrastructure/server-setup/tasks/swarm.yml @@ -0,0 +1,17 @@ +- name: 'Allow secure docker swarm node communication (managers only)' + ufw: + rule: allow + port: 2377 + proto: tcp + +- name: 'Create primary swarm manager' + shell: docker swarm init --advertise-addr {{ ansible_default_ipv4.address }} + when: "docker_info.stdout.find('Swarm: inactive') != -1" + +- name: 'Get docker swarm manager token' + shell: docker swarm join-token -q manager + register: manager_token + +- name: 'Get docker swarm worker token' + shell: docker swarm join-token -q worker + register: worker_token diff --git a/infrastructure/server-setup/tasks/traefik.yml b/infrastructure/server-setup/tasks/traefik.yml new file mode 100644 index 000000000..a6f533c82 --- /dev/null +++ b/infrastructure/server-setup/tasks/traefik.yml @@ -0,0 +1,5 @@ +- name: 'Create acme file for traefik' + file: + path: /data/traefik/acme.json + state: touch + mode: '600' diff --git a/infrastructure/server-setup/tasks/ufw.yml b/infrastructure/server-setup/tasks/ufw.yml new file mode 100644 index 000000000..f91f5b962 --- /dev/null +++ b/infrastructure/server-setup/tasks/ufw.yml @@ -0,0 +1,40 @@ +- name: 'Install UFW' + apt: + name: ufw + state: present + +- name: 'Allow OpenSSH through UFW' + ufw: + rule: allow + name: OpenSSH + +# Docker swarm ports - Note: all published docker container port will override UFW rules! +- name: 'Allow secure docker client communication' + ufw: + rule: allow + port: 2376 + proto: tcp + +- name: 'Allow docker swarm communication among nodes - TCP' + ufw: + rule: allow + port: 7946 + proto: tcp + +- name: 'Allow docker swarm communication among nodes - UDP' + ufw: + rule: allow + port: 7946 + proto: udp + +- name: 'Allow docker overlay network traffic' + ufw: + rule: allow + port: 4789 + proto: udp + +- name: 'Deny everything else and enable UFW' + ufw: + state: enabled + default: deny + direction: incoming From 5cc5f7b790cf6b66cc3c86b50636b08930d872e3 Mon Sep 17 00:00:00 2001 From: Riku Rouvila Date: Fri, 24 Nov 2023 15:49:55 +0200 Subject: [PATCH 2/6] update provisioning pipeline --- .github/workflows/provision.yml | 31 +++++++++-------------- infrastructure/server-setup/example-3.ini | 2 +- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/.github/workflows/provision.yml b/.github/workflows/provision.yml index 09ac6f3bf..de923ef56 100644 --- a/.github/workflows/provision.yml +++ b/.github/workflows/provision.yml @@ -112,21 +112,6 @@ jobs: elasticsearch_superuser_password: ${{ secrets.ELASTICSEARCH_SUPERUSER_PASSWORD }} # ansible_sudo_pass: ${{ secrets.SUDO_PASSWORD }} in case your user is not root - # TODO: Iterate for 3 or 5 replicas - - name: Create ini file for 1 replica - id: ini-file - run: | - touch ${{ github.event.repository.name }}/infrastructure/server-setup/replicas-1.ini - echo "[docker-manager-first]" > ${{ github.event.repository.name }}/infrastructure/server-setup/replicas-1.ini - echo "manager1 ansible_host=\"${{ secrets.SSH_HOST }}\" ansible_user=${{ secrets.SSH_USER }} ansible_ssh_private_key_file=/tmp/server.pem" >> ${{ github.event.repository.name }}/infrastructure/server-setup/replicas-1.ini - echo "" >> ${{ github.event.repository.name }}/infrastructure/server-setup/replicas-1.ini - echo "[all:vars]" >> ${{ github.event.repository.name }}/infrastructure/server-setup/replicas-1.ini - echo "data1_hostname=${{ vars.HOSTNAME }}" >> ${{ github.event.repository.name }}/infrastructure/server-setup/replicas-1.ini - - - name: Check ini content - run: | - cat ${{ github.event.repository.name }}/infrastructure/server-setup/replicas-1.ini - - name: Run playbook on 1 replica in qa uses: dawidd6/action-ansible-playbook@v2 if: vars.REPLICAS == 1 && env.ENV_TYPE == 'qa' @@ -135,11 +120,15 @@ jobs: ANSIBLE_SSH_TIMEOUT: 30 ANSIBLE_SSH_RETRIES: 20 with: - playbook: playbook-1.yml + playbook: playbook.yml directory: ${{ github.event.repository.name }}/infrastructure/server-setup + inventory: | + [docker-manager-first] + ${{ vars.HOSTNAME }} ansible_host="${{ secrets.SSH_HOST }}" data_label=data1 + + [docker-workers] options: | --verbose - --inventory replicas-1.ini --extra-vars ""${{ steps.ansible-variables.outputs.EXTRA_VARS }}"" - name: Run playbook on 1 replica in production @@ -150,9 +139,13 @@ jobs: ANSIBLE_SSH_TIMEOUT: 30 ANSIBLE_SSH_RETRIES: 20 with: - playbook: playbook-1.yml + playbook: playbook.yml directory: ${{ github.event.repository.name }}/infrastructure/server-setup + inventory: | + [docker-manager-first] + ${{ vars.HOSTNAME }} ansible_host="${{ secrets.SSH_HOST }}" data_label=data1 + + [docker-workers] options: | --verbose - --inventory replicas-1.ini --extra-vars ""${{ steps.ansible-production-variables.outputs.EXTRA_VARS }}"" diff --git a/infrastructure/server-setup/example-3.ini b/infrastructure/server-setup/example-3.ini index be642c038..43ee0ce6b 100644 --- a/infrastructure/server-setup/example-3.ini +++ b/infrastructure/server-setup/example-3.ini @@ -11,7 +11,7 @@ ENTER_HOSTNAME_1 ansible_host="ENTER YOUR MANAGER HOST IP" data_label=data1 [docker-workers] -; We recommend you add 4 workers for a scaled production deployment +; We recommend you add 2 workers for a usual production deployment ; Uncomment the lines below ENTER_HOSTNAME_2 ansible_host="ENTER YOUR WORKER HOST IP" data_label=data2 ENTER_HOSTNAME_3 ansible_host="ENTER YOUR WORKER HOST IP" data_label=data3 From 1ea4484f9a72645043a7d694a17f580e2f188388 Mon Sep 17 00:00:00 2001 From: Riku Rouvila Date: Fri, 24 Nov 2023 16:16:52 +0200 Subject: [PATCH 3/6] try initialising the provision pipeline by adding a temporary push trigger --- .github/workflows/provision.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/provision.yml b/.github/workflows/provision.yml index de923ef56..0ebb6ceaf 100644 --- a/.github/workflows/provision.yml +++ b/.github/workflows/provision.yml @@ -1,6 +1,9 @@ name: Provision environment run-name: Provision ${{ github.event.inputs.environment }} on: + push: + branches: + - playbook-cleanup workflow_dispatch: inputs: environment: From 0c66f5d6cc2b084fdb9a2ab6db25dc83d2b84fc5 Mon Sep 17 00:00:00 2001 From: Riku Rouvila Date: Mon, 27 Nov 2023 09:39:15 +0200 Subject: [PATCH 4/6] setup ssh key before trying to provision --- .github/workflows/provision.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/workflows/provision.yml b/.github/workflows/provision.yml index 0ebb6ceaf..bfd739b3c 100644 --- a/.github/workflows/provision.yml +++ b/.github/workflows/provision.yml @@ -115,6 +115,20 @@ jobs: elasticsearch_superuser_password: ${{ secrets.ELASTICSEARCH_SUPERUSER_PASSWORD }} # ansible_sudo_pass: ${{ secrets.SUDO_PASSWORD }} in case your user is not root + - name: Read known hosts + run: | + cd ${{ github.event.repository.name }} + echo "KNOWN_HOSTS<> $GITHUB_ENV + sed -i -e '$a\' ./infrastructure/.known-hosts + cat ./infrastructure/.known-hosts >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + + - name: Install SSH Key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.SSH_KEY }} + known_hosts: ${{ env.KNOWN_HOSTS }} + - name: Run playbook on 1 replica in qa uses: dawidd6/action-ansible-playbook@v2 if: vars.REPLICAS == 1 && env.ENV_TYPE == 'qa' From 8529d2894da62d748ab9a14995a18d407b3d4a10 Mon Sep 17 00:00:00 2001 From: Riku Rouvila Date: Mon, 27 Nov 2023 09:44:20 +0200 Subject: [PATCH 5/6] add known hosts file --- infrastructure/.known-hosts | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 infrastructure/.known-hosts diff --git a/infrastructure/.known-hosts b/infrastructure/.known-hosts new file mode 100644 index 000000000..e1b0c4477 --- /dev/null +++ b/infrastructure/.known-hosts @@ -0,0 +1,12 @@ +# Farajaland staging +|1|QaneIg/kW2nT73307HQ/9Y9Bz5A=|RIaMnvGPGkJFWdEJFxWc8RLFs5E= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOKbJ1oRhgHaRxj4G8k9rkqIla59c4yWUkbfxX7yHPdWXmpwShOEaypF7SG9oXVP3+gWJG9aCLzv0F8GSFecB+w= +|1|56R2lbUeZ1Ljt37oregbUT9t3Kg=|S3zskisluF1Z1OllInJr6P+x4/o= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCw3/MlAt3ENmyKxkRF2vuz5Kl+6BT/1XpyUwJTNt4TzD0WIJb+HNbxOdmLmrc3gQQ/17pshYWWjm3H08eEELHNpehbG8yUpYjbplIGvDKz+cmwII2Snz0heBnosD9FWsHOiuioUTtUslUM8HK9Qk/ysRXMvQB6VKbsymAj7PN0rZEdQJYDvKzQ8nY8VvfrMRW8EoglFB3arMFdkNbTja25OaMWSSJcpcAeUW6EREM/N6y35G8CwGba1yx0fW2kPGiUXPmYXefa333NI+6+yE7id5RZepPEV2qvcSyE6yhY0GWvBpMmJlmHPvYn4HRRA8ucAN8iMssb+hH9i1GqY8XHjJ7TTb0T4Qy3/RDtohXLUa6j2efw3iToYj88OKcx7kafiiTZigFhzj1cNZQf+jaWm/tfQZbietr+LUCjFToxEsa4uhvXOvoempkBXFw0pBdfv0fYkulyb+L+9Teu5BcMbjULbWNgvbm394J86/YpLZzyhAwxNBKYIVq8wA2geBc= +|1|8qdU8nm5coaY6NvvJ9Hbg4fxmJw=|IJQksH0MG9lAsqn4eR9tolBwy5Q= ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINtPEM0nhqrb+BC/mMF85xDodd0RnDZKyyCpqoePhUj5 +# Farajaland QA +|1|z9A2rBv8YTjgWJ+0SFI5Mp2Lp+I=|MVFxVGBHPBUtJuQobkgxzzIBqhE= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDH9lVfT82JUM4YnJ0Le7mThJee+MTizFE6qfmPIxbd3WjN7Ak7O8UpViaNI77avUd2CKsO8gboKzFcU1eUCqm3qgVIEGmjaLl9V8MCIRPDHHdpRHIRuv8NFQxs9K8/W9KWKNVHLKwZmSvnre4X12QweX6ZUUmJXc+ECibHGJBrQQ9dgPETU+HnN1xw111zSY66Nbc0z8GA2FrnanUhtiKWz0r7/dOdXR05a0XOoi2KDDcoiiLBE2YYtLA4xgDBHTgeKJtV8W3DMVRg9KJ9bKfgPhwlrcVlUX4PPAYG3yIatvW8izoB51HLEeWBCxboZrzBtGJOPfy9AnTGZ8ROhDr+LpCxEHNZYnTXJti64C7pYKjn/qVYpev0CW+iqqjSR/Aksf+4j2rW/2tFAuICO3caYWlqjyySvXixwLe2ihOI3qlpt/uxjfdT07YTplzVvcu7z2I7AWe+u73Eo8STzLqzKmMSVIisN0boIXy2KkHWnIgW3/P53eVqxwng0tajI1M= +|1|ypxkbArSjikXdEscQUXWyDMFlkM=|HV68rCR+h/IG9A3NsEhg1IqFt8Y= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDiEWSrjOQdi+r/L0W56994Zw+MtqRHgO1hVR5jkV3ayzJ0+m3auVWsk6Xs+HP2RauVRiO3idO86s8XGfwz/vxo= +|1|8xUi4LJT0ArBoZMNZBzqdKna+X0=|ofos+azs78yCYodbGqkxPcvrzoc= ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGSIBuxNbsqj+NYkQQcBxTQZP6hkrI5jSK79rHIajRlN +# Farajaland production +|1|mmWxyVhdNt+9vCZY8YSu/b5T6mI=|oWUySmyU/yK3gMAgrMpcfutjats= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEp49NpGjmoCmYAHnNbZF6dpo0G3L3Z3m++B4Pq3sVUTLwMuNv4WfoebiSJH20tcTq92XbFV5NfCmdnfoksr/1Y= +|1|EWKHZMaMYdiCWDSqV8DsOmqFJ3c=|GQ0ApYEAoubxL2n9VFMY5cnN8Yc= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDJkWafUYUyF1eOzj1WqhjwJc+TvMjKmz0bCqtci5BMNUsn+R+Z+CIOPvI8eqYEzXiJ7VGAZq1twRYGSnRTJyCja3eJfvQfxb8hGVz2fkf9rthgYABdoHSJoGMTt4EP2LtTduzvLPCBctiID6bXUFMkM6j2pmLQK/gZGEKNaJb86D7xt1HXQsqV5bAAKCaehgn6LAQ8zee9YZtoP8fwhoorTJEJ3kpsvsneEpV3kiuAPdyfB8zI7E3HHqXgD+ij0eFogK+NSUIMUexFyRZgAtKDBjnNRQLGMEzY1UzM7pojRr8Bb9vT4tGJGBpzfWvWn6WavIpBa3Ht6sXmXHGexGn2X8gyG5rHif2FAmTV7O4M+sBlpxqr7G906BJ8JwOl8qp8T9BnesWiExFdeDwzsRPS49KQOBpxOqfK5OC1ZQlEzVIR1SQpOAGjDgGM1XtR4jsBX6OBlR7hcYSw9F6wCWjrWFrmv6HVNAXZgYsnzXQmJPpDbRQAyHTvIHK9/DobQI8= +|1|FuRTlvDs4p17HjsAIGQ7wQml0TM=|qDsBKG5gV6TDM2dw0lykGLS+11M= ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO/ZvINySxP0MXtHBbaJ6FqDBaFUl/YVAyUItqfiyeez From ffd3e4c3128f72722659a68d7ee0a2e5a2747311 Mon Sep 17 00:00:00 2001 From: Riku Rouvila Date: Mon, 27 Nov 2023 09:54:43 +0200 Subject: [PATCH 6/6] do not try to mount cryptfs partition to /data if it's already mounted --- .github/workflows/provision.yml | 4 ---- .../server-setup/tasks/data-partition.yml | 13 +++++++++++-- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.github/workflows/provision.yml b/.github/workflows/provision.yml index bfd739b3c..59748f68e 100644 --- a/.github/workflows/provision.yml +++ b/.github/workflows/provision.yml @@ -15,10 +15,6 @@ on: - staging - qa - production - branch_name: - description: Branch to provision from - default: develop - required: true jobs: provision: diff --git a/infrastructure/server-setup/tasks/data-partition.yml b/infrastructure/server-setup/tasks/data-partition.yml index 7b4b96fa6..e3f60c3e4 100644 --- a/infrastructure/server-setup/tasks/data-partition.yml +++ b/infrastructure/server-setup/tasks/data-partition.yml @@ -19,9 +19,18 @@ get_checksum: False register: encryptedFileSystemPostCheck +- name: Check if cryptfs_file_sparse.img is already mounted + shell: mount | grep -q 'cryptfs on /data' + register: is_mounted + ignore_errors: true + changed_when: false + - name: 'Mount encrypted data folder' - script: ../cryptfs/mount.sh -p {{disk_encryption_key}} - when: encryptedFileSystemPostCheck.stat.exists + script: ../cryptfs/mount.sh -p {{ disk_encryption_key }} + when: + - encryptedFileSystemPostCheck.stat.exists + # Check if cryptfs_file_sparse.img is already mounted + - is_mounted.rc != 0 # Create data directories - name: Check mongo data directory