diff --git a/.gitignore b/.gitignore index c8b241f..e420ee4 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -target/* \ No newline at end of file +target/* diff --git a/ansible/.gitignore b/ansible/.gitignore index 99ed0d4..89364e8 100644 --- a/ansible/.gitignore +++ b/ansible/.gitignore @@ -1 +1,7 @@ notes.txt +ansible.cfg +.yamlfmt +.run.sh + +roles/deploy/files/resources +roles/deploy/files/lard_ingestion diff --git a/ansible/bigip.yml b/ansible/bigip.yml index 5fc6dcf..a92217d 100644 --- a/ansible/bigip.yml +++ b/ansible/bigip.yml @@ -1,52 +1,28 @@ -- name: Copy schema for bigip +--- +- name: Create what is needed for the bigip load balancers + hosts: servers + remote_user: ubuntu vars: ostack_cloud: lard ostack_region: Ostack2-EXT - hosts: localhost # need to seperate this since done from localhost gather_facts: false pre_tasks: # copy file, so we have an .sql file to apply locally - - name: Create a directory if it does not exist + - name: Create a directory if it does not exist ansible.builtin.file: path: /etc/postgresql/16/db/bigip state: directory mode: '0755' become: true - delegate_to: '{{ hostvars[groups["servers"][0]].ansible_host }}' - remote_user: ubuntu + - name: Copy the schema to the remote 1 ansible.builtin.copy: src: ./roles/bigip/vars/bigip.sql dest: /etc/postgresql/16/db/bigip/bigip.sql mode: '0755' become: true - delegate_to: '{{ hostvars[groups["servers"][0]].ansible_host }}' - remote_user: ubuntu - - name: Create a directory if it does not exist - ansible.builtin.file: - path: /etc/postgresql/16/db/bigip - state: directory - mode: '0755' - become: true - delegate_to: '{{ hostvars[groups["servers"][1]].ansible_host }}' - remote_user: ubuntu - - name: Copy the schema to the remote 2 - ansible.builtin.copy: - src: ./roles/bigip/vars/bigip.sql - dest: /etc/postgresql/16/db/bigip/bigip.sql - mode: '0755' - become: true - delegate_to: '{{ hostvars[groups["servers"][1]].ansible_host }}' - remote_user: ubuntu -- name: Create what is needed for the bigip load balancers - hosts: servers - remote_user: ubuntu - vars: - ostack_cloud: lard - ostack_region: Ostack2-EXT - gather_facts: false # loops over both servers roles: - - role: bigip - # will fail to create table in the standby (since read only) \ No newline at end of file + - role: bigip + # will fail to create table in the standby (since read only) diff --git a/ansible/configure.yml b/ansible/configure.yml index ff586fd..ea369e6 100644 --- a/ansible/configure.yml +++ b/ansible/configure.yml @@ -1,3 +1,4 @@ +--- - name: Mount disks and install stuff on the VMs hosts: servers remote_user: ubuntu @@ -5,19 +6,18 @@ ostack_cloud: lard ipalias_network_name: ipalias ostack_region: Ostack2-EXT - # loops over both servers pre_tasks: - name: List ansible_hosts_all difference from ansible_host (aka the vm not currently being iterated on) ansible.builtin.debug: - msg: "{{ (ansible_play_hosts_all | difference([inventory_hostname])) | first }}" + msg: "{{ (ansible_play_hosts_all | difference([inventory_hostname])) | first }}" roles: - - role: addsshkeys + - role: addsshkeys - role: vm_format vars: - name_stuff: '{{ inventory_hostname }}' # name of current vm for finding ipalias port + name_stuff: "{{ inventory_hostname }}" # name of current vm for finding ipalias port - role: ssh vars: - vm_ip: '{{ ansible_host }}' # the current vm's ip + vm_ip: "{{ ansible_host }}" # the current vm's ip - name: Setup primary and standby vars: @@ -26,10 +26,14 @@ hosts: localhost gather_facts: false - roles: + roles: - role: primarystandbysetup - vars: - primary_name: lard-a - primary_ip: '{{ hostvars[groups["servers"][0]].ansible_host }}' # the first one is a - standby_name: lard-b - standby_ip: '{{ hostvars[groups["servers"][1]].ansible_host }}' # the second one is b \ No newline at end of file + when: inventory_hostname == "lard-a" + + - role: standbysetup + when: inventory_hostname == "lard-b" + # vars: + # primary_name: lard-a + # primary_ip: '{{ ansible_host }}' # the first one is a + # standby_name: lard-b + # standby_ip: '{{ hostvars[groups["servers"][1]].ansible_host }}' # the second one is b diff --git a/ansible/deploy.yml b/ansible/deploy.yml index e17750a..3863db5 100644 --- a/ansible/deploy.yml +++ b/ansible/deploy.yml @@ -3,5 +3,8 @@ # Deploy on both VMs, only the primary is "active" hosts: servers remote_user: ubuntu + gather_facts: false + # All role tasks require root user + become: true roles: - role: deploy diff --git a/ansible/host_vars/lard-a.yml b/ansible/host_vars/lard-a.yml new file mode 100644 index 0000000..508207e --- /dev/null +++ b/ansible/host_vars/lard-a.yml @@ -0,0 +1,2 @@ +--- +ansible_host: 123.123.123.123 diff --git a/ansible/host_vars/lard-b.yaml b/ansible/host_vars/lard-b.yaml new file mode 100644 index 0000000..508207e --- /dev/null +++ b/ansible/host_vars/lard-b.yaml @@ -0,0 +1,2 @@ +--- +ansible_host: 123.123.123.123 diff --git a/ansible/inventory.yml b/ansible/inventory.yml index a0a62a0..a57d71e 100644 --- a/ansible/inventory.yml +++ b/ansible/inventory.yml @@ -1,6 +1,5 @@ +--- servers: hosts: lard-a: - ansible_host: 157.249.*.* lard-b: - ansible_host: 157.249.*.* \ No newline at end of file diff --git a/ansible/provision.yml b/ansible/provision.yml index 11bd242..22f6ca2 100644 --- a/ansible/provision.yml +++ b/ansible/provision.yml @@ -1,20 +1,16 @@ -- name: setup networks and 2 vms - vars: - ostack_cloud: lard - ipalias_network_name: ipalias - ostack_region: Ostack2-EXT - hosts: localhost +--- +- name: Setup networks and 2 vms + hosts: servers gather_facts: false - roles: - - role: networks - - role: vm # in A - vars: - name_stuff: lard-a - availability_zone: ext-a - vm_ip: '{{ hostvars[groups["servers"][0]].ansible_host }}' - - role: vm # in B - vars: - name_stuff: lard-b - availability_zone: ext-b - vm_ip: '{{ hostvars[groups["servers"][1]].ansible_host }}' + tasks: + - name: Setup networks # noqa: run-once[task] + ansible.builtin.include_role: + name: networks + delegate_to: localhost + run_once: true + + - name: Setup VMs + ansible.builtin.include_role: + name: vm + delegate_to: localhost diff --git a/ansible/readme.md b/ansible/readme.md index f2f0a5f..4ca809d 100644 --- a/ansible/readme.md +++ b/ansible/readme.md @@ -1,74 +1,80 @@ ## README for LARD setup on openstack(2) #### Useful ansible commands: -``` + +```terminal ansible-inventory -i inventory.yml --graph ansible servers -m ping -u ubuntu -i inventory.yml ``` #### Dependencies to install -``` -pip3 install wheel # so you can allow downloading of binary python packages - -pip install -r requirements.txt - -ansible-galaxy collection install openstack.cloud - -ansible-galaxy collection install community.postgresql - -ansible-galaxy collection install community.general -ansible-galaxy collection install ansible.posix +```terminal +python3 -m venv {your_dir} +source {your_dir}/bin/activate -ansible-galaxy collection install ansible.utils - -``` +pip install -r requirements.txt +ansible-galaxy collection install -fr requirements.yml +``` ### Get access to OpenStack -You need to create application credentials in the project you are going to create the instances in, so that the ansible scripts can connect to the right ostack_cloud which in our case needs to be called lard. - -The file should exist here: -~/.config/openstack/clouds.yml -If have MET access see what is written at the start of the readme here: -https://gitlab.met.no/it/infra/ostack-ansible21x-examples +You need to create application credentials in the project you are going to +create the instances in, so that the ansible scripts can connect to the right +ostack_cloud which in our case needs to be called lard. -Or in the authentication section here: -https://gitlab.met.no/it/infra/ostack-doc/-/blob/master/ansible-os.md?ref_type=heads +The file should exist in `~/.config/openstack/clouds.yml`. +If have MET access see what is written at the start of the readme [here](https://gitlab.met.no/it/infra/ostack-ansible21x-examples) +or in the authentication section [here](https://gitlab.met.no/it/infra/ostack-doc/-/blob/master/ansible-os.md?ref_type=heads). ### Add your public key to the Ostack GUI -Go to "Compute" then "Key Pairs" and import your public key for use in the provisioning step. + +Go to "Compute" then "Key Pairs" and import your public key for use in the provisioning step. ### Provision! -The IPs in inventory.yml should correspond to floating ips you have requested in the network section of the open stack GUI. If you need to delete the old VMs (compute -> instances) and Volumes (volumes -> volumes) you can do so in the ostack GUI. *For some reason when deleting things to build up again one of the IPs did not get disassociated properly, and I had to do this manually (network -> floating IPs).* -The vars for the network and addssh tasks are encrypted with ansible-vault (ansible-vault decrypt roles/networks/vars/main.yml, ansible-vault decrypt roles/addshhkeys/vars/main.yml, ansible-vault decrypt roles/vm_format/vars/main.yml). -But if this has been setup before in the ostack project, these have likely already been run and therefore already exits so you could comment out this role from provision.yml. -Passwords are in ci_cd variables https://gitlab.met.no/met/obsklim/bakkeobservasjoner/lagring-og-distribusjon/db-products/poda/-/settings/ci_cd +The IPs in `inventory.yml` should correspond to floating ips you have requested +in the network section of the open stack GUI. If you need to delete the old VMs +(compute -> instances) and Volumes (volumes -> volumes) you can do so in the +ostack GUI. -``` +> \[!CAUTION\] For some reason when deleting things to build up again one of the IPs +> did not get disassociated properly, and I had to do this manually (network -> +> floating IPs). + +The vars for the network and addssh tasks are encrypted with ansible-vault +(ansible-vault decrypt roles/networks/vars/main.yml, ansible-vault decrypt +roles/addshhkeys/vars/main.yml, ansible-vault decrypt +roles/vm_format/vars/main.yml). But if this has been setup before in the ostack +project, these have likely already been run and therefore already exits so you +could comment out this role from provision.yml. Passwords are in [ci_cd variables](https://gitlab.met.no/met/obsklim/bakkeobservasjoner/lagring-og-distribusjon/db-products/poda/-/settings/ci_cd). + +```terminal ansible-playbook -i inventory.yml -e ostack_key_name=xxx provision.yml ``` -After provisioning the next steps may need to ssh into the hosts, and thus you need to add them to your known hosts. Ansible appears to be crap at this, so its best to do it before running the next step by going: -`ssh ubuntu@157.249.*.*` +After provisioning the next steps may need to ssh into the hosts, and thus you need to add them to your known hosts. +Ansible appears to be crap at this, so its best to do it before running the next step by going: +`ssh ubuntu@157.249.*.*` For all the VMs. If cleaning up from tearing down a previous set of VMs you may also need to remove them first: `ssh-keygen -f "/home/louiseo/.ssh/known_hosts" -R "157.249.*.*"` ### Configure! -The third IP being passed in here is the one that gets associated with the primary, and moved when doing a switchover. -*NOTE:* The floating IP association times out, but this is ignored as it is a known bug. -``` +The third IP being passed in here is the one that gets associated with the primary, and moved when doing a switchover. +*NOTE:* The floating IP association times out, but this is ignored as it is a known bug. + +```term ansible-playbook -i inventory.yml -e primary_floating_ip='157.249.*.*' -e db_password=xxx -e repmgr_password=xxx configure.yml ``` -The parts to do with the floating ip that belongs to the primary (ipalias) are based on: +The parts to do with the floating ip that belongs to the primary (ipalias) are based on: https://gitlab.met.no/ansible-roles/ipalias/-/tree/master?ref_type=heads ### Connect to database + ``` PGPASSWORD=xxx psql -h 157.249.*.* -p 5432 -U lard_user -d lard ``` @@ -76,6 +82,7 @@ PGPASSWORD=xxx psql -h 157.249.*.* -p 5432 -U lard_user -d lard ### Checking the cluster Become postgres user: sudo su postgres + ``` postgres@lard-b:/home/ubuntu$ repmgr -f /etc/repmgr.conf node check Node "lard-b": @@ -86,8 +93,9 @@ Node "lard-b": Downstream servers: OK (1 of 1 downstream nodes attached) Replication slots: OK (node has no physical replication slots) Missing physical replication slots: OK (node has no missing physical replication slots) - Configured data directory: OK (configured "data_directory" is "/mnt/ssd-b/16/main") + Configured data directory: OK (configured "data_directory" is "/mnt/ssd-data/16/main") ``` + ``` postgres@lard-a:/home/ubuntu$ repmgr -f /etc/repmgr.conf node check Node "lard-a": @@ -98,15 +106,16 @@ Node "lard-a": Downstream servers: OK (this node has no downstream nodes) Replication slots: OK (node has no physical replication slots) Missing physical replication slots: OK (node has no missing physical replication slots) - Configured data directory: OK (configured "data_directory" is "/mnt/ssd-b/16/main") + Configured data directory: OK (configured "data_directory" is "/mnt/ssd-data/16/main") ``` -While a few of the configurations are found in /etc/postgresql/16/main/postgresql.conf (particularly in the ansible block at the end), many of them -can only be seen in /mnt/ssd-b/16/main/postgresql.auto.conf (need sudo to see contents). +While a few of the configurations are found in /etc/postgresql/16/main/postgresql.conf (particularly in the ansible block at the end), many of them +can only be seen in /mnt/ssd-data/16/main/postgresql.auto.conf (need sudo to see contents). ### Perform switchover -This should only be used when both VMs are up and running, like in the case of planned maintenance on one datarom. -Then we would use this script to switch the primary to the datarom that will stay available ahead of time. + +This should only be used when both VMs are up and running, like in the case of planned maintenance on one datarom. +Then we would use this script to switch the primary to the datarom that will stay available ahead of time. *Make sure you are aware which one is the master, and put the names the right way around in this call.* @@ -118,8 +127,9 @@ This should also be possible to do manually, but might need to follow what is do `repmgr standby switchover -f /etc/repmgr.conf --siblings-follow` (need to be postgres user) ### Promote standby (assuming the primary is down) -Make sure you are know which one you want to promote! -This is used in the case where the primary has gone down (e.g. unplanned downtime of a datarom). + +Make sure you are know which one you want to promote!\ +This is used in the case where the primary has gone down (e.g. unplanned downtime of a datarom). **Manually:** SSH into the standby @@ -132,25 +142,28 @@ You can the check the status again (and now the old primary will say failed) Then move the ip in the ostack gui (see in network -> floating ips, dissasociate it then associated it with the ipalias port on the other VM) #### Later, when the old primary comes back up -The cluster will be in a slightly confused state, because this VM still thinks its a primary (although repmgr tells it the other one is running as a primary as well). If the setup is running as asynchronous we could lose data that wasn't copied over before the crash, if running synchronously then there should be no data loss. + +The cluster will be in a slightly confused state, because this VM still thinks its a primary (although repmgr tells it the other one is running as a primary as well). If the setup is running as asynchronous we could lose data that wasn't copied over before the crash, if running synchronously then there should be no data loss. SSH into the new primary `repmgr -f /etc/repmgr.conf cluster show` says: + - node "lard-a" (ID: 1) is running but the repmgr node record is inactive SSH into the old primary `repmgr -f /etc/repmgr.conf cluster show` says: -- node "lard-b" (ID: 2) is registered as standby but running as primary +- node "lard-b" (ID: 2) is registered as standby but running as primary With a **playbook** (rejoin_ip is the ip of the node that has been down and should now be a standby not a primary): + ``` ansible-playbook -i inventory.yml -e rejoin_ip=157.249.*.* -e primary_ip=157.249.*.* rejoin.yml ``` -Or **manually**: +Or **manually**: Make sure the pg process is stopped (see fast stop command) if it isn't already Become postgres user: @@ -161,15 +174,17 @@ Perform a rejoin `repmgr node rejoin -f /etc/repmgr.conf -d 'host=157.249.*.* user=repmgr dbname=repmgr connect_timeout=2' --force-rewind=/usr/lib/postgresql/16/bin/pg_rewind --verbose` ### for testing: -Take out one of the replicas (or can shut off instance in the openstack GUI): + +Take out one of the replicas (or can shut off instance in the openstack GUI): `sudo pg_ctlcluster 16 main -m fast stop` For bringing it back up (or turn it back on): `sudo pg_ctlcluster 16 main start` ### for load balancing at MET -This role creates a user and basic db for the loadbalancer to test the health of the db. Part of the role is allowed to fail on the secondary ("cannot execute ___ in a read-only transaction"), as it should pass on the primary and be replicated over. The hba conf change needs to be run on both. -The vars are encrypted, so run: ansible-vault decrypt roles/bigip/vars/main.yml +This role creates a user and basic db for the loadbalancer to test the health of the db. Part of the role is allowed to fail on the secondary ("cannot execute \_\_\_ in a read-only transaction"), as it should pass on the primary and be replicated over. The hba conf change needs to be run on both. + +The vars are encrypted, so run: ansible-vault decrypt roles/bigip/vars/main.yml Then run the bigip role on the VMs: @@ -177,6 +192,6 @@ Then run the bigip role on the VMs: ansible-playbook -i inventory.yml -e bigip_password=xxx bigip.yml ``` -### Links: +### Links: -https://www.enterprisedb.com/postgres-tutorials/postgresql-replication-and-automatic-failover-tutorial#replication \ No newline at end of file +https://www.enterprisedb.com/postgres-tutorials/postgresql-replication-and-automatic-failover-tutorial#replication diff --git a/ansible/rejoin.yml b/ansible/rejoin.yml index 701d1cb..d8964f7 100644 --- a/ansible/rejoin.yml +++ b/ansible/rejoin.yml @@ -1,9 +1,9 @@ +--- - name: Rejoin hosts: servers remote_user: ubuntu - # loops over both servers roles: - role: rejoin vars: - vm_ip: '{{ ansible_host }}' # the current vm's ip - when: ansible_host == rejoin_ip # only run on the one that needs to be rejoined \ No newline at end of file + rejoin_vm_ip: "{{ ansible_host }}" # the current vm's ip + when: ansible_host == rejoin_ip # only run on the one that needs to be rejoined diff --git a/ansible/requirements.txt b/ansible/requirements.txt index 29772cb..b5450ad 100644 --- a/ansible/requirements.txt +++ b/ansible/requirements.txt @@ -1,8 +1,11 @@ ansible-core~=2.15.0 ansible-lint~=6.17.0 +# ansible-core~=2.17.4 +# ansible-lint~=24.9.2 powerline-status powerline-gitstatus netaddr~=0.7.19 openstacksdk~=1.3.0 python-openstackclient~=6.2.0 -psycopg2-binary \ No newline at end of file +psycopg2-binary +wheel diff --git a/ansible/requirements.yml b/ansible/requirements.yml new file mode 100644 index 0000000..45323db --- /dev/null +++ b/ansible/requirements.yml @@ -0,0 +1,7 @@ +--- +collections: + - ansible.posix + - ansible.utils + - community.general + - community.postgresql + - openstack.cloud diff --git a/ansible/roles/addsshkeys/defaults/main.yml b/ansible/roles/addsshkeys/defaults/main.yml new file mode 100644 index 0000000..2b64084 --- /dev/null +++ b/ansible/roles/addsshkeys/defaults/main.yml @@ -0,0 +1,4 @@ +--- +addsshkeys_list: + - name: + key: diff --git a/ansible/roles/addsshkeys/tasks/main.yml b/ansible/roles/addsshkeys/tasks/main.yml index 5881bf2..5232bcc 100644 --- a/ansible/roles/addsshkeys/tasks/main.yml +++ b/ansible/roles/addsshkeys/tasks/main.yml @@ -1,9 +1,9 @@ ---- +--- - name: Add users keys to authorized_keys - ansible.builtin.authorized_key: + ansible.posix.authorized_key: user: ubuntu # this is the username on the remotehost whose authorized keys are being modified state: present key: "{{ item.key }}" - loop: '{{ authorized_keys_list }}' + loop: "{{ addsshkeys_list }}" loop_control: - label: "adding {{ item.name }} key to authorized_keys" \ No newline at end of file + label: "adding {{ item.name }} key to authorized_keys" diff --git a/ansible/roles/bigip/defaults/main.yml b/ansible/roles/bigip/defaults/main.yml new file mode 100644 index 0000000..660c97c --- /dev/null +++ b/ansible/roles/bigip/defaults/main.yml @@ -0,0 +1,4 @@ +--- +bigip_password: +bigip_load_balancer_ips: + - address: diff --git a/ansible/roles/bigip/tasks/main.yml b/ansible/roles/bigip/tasks/main.yml index a0813d7..a705ced 100644 --- a/ansible/roles/bigip/tasks/main.yml +++ b/ansible/roles/bigip/tasks/main.yml @@ -1,41 +1,41 @@ ---- +--- - name: Create bigip user and basic database - block: - # create user - - name: Create bigip user - community.postgresql.postgresql_user: - name: bigip - #db: bigip - password: '{{ bigip_password }}' - become: true - become_user: postgres - # create database - - name: Create a bigip database, with owner bigip - community.postgresql.postgresql_db: - name: bigip - owner: bigip - become: true - become_user: postgres - # create the schema - - name: Create the schema in bigip - community.postgresql.postgresql_script: - db: bigip - path: /etc/postgresql/16/db/bigip/bigip.sql - become: true - become_user: postgres - - name: Grant bigip priveleges on bigip database for table test - community.postgresql.postgresql_privs: - database: bigip - objs: test # only have rights on table test - privs: SELECT - role: bigip - grant_option: true - become: true - become_user: postgres # this is allowed to fail on the secondary, should work on the primary and be replicated over ignore_errors: true + block: + - name: Create bigip user + community.postgresql.postgresql_user: + name: bigip + # db: bigip + password: "{{ bigip_password }}" + become: true + become_user: postgres -# loop over the two ips of the load balancers, to add to hba conf + - name: Create a bigip database, with owner bigip + community.postgresql.postgresql_db: + name: bigip + owner: bigip + become: true + become_user: postgres + + - name: Create the schema in bigip + community.postgresql.postgresql_script: + db: bigip + path: /etc/postgresql/16/db/bigip/bigip.sql + become: true + become_user: postgres + + - name: Grant bigip priveleges on bigip database for table test + community.postgresql.postgresql_privs: + database: bigip + objs: test # only have rights on table test + privs: SELECT + role: bigip + grant_option: true + become: true + become_user: postgres + +# loop over the two ips of the load balancers, to add to hba conf - name: Change hba conf to allow connections from bigip (load balancer) without an encrypted password community.postgresql.postgresql_pg_hba: dest: /etc/postgresql/16/main/pg_hba.conf @@ -45,6 +45,6 @@ databases: bigip users: bigip become: true - loop: '{{ load_balancer_ips }}' + loop: "{{ bigip_load_balancer_ips }}" loop_control: - label: "adding {{ item.address }} to hba conf" \ No newline at end of file + label: "adding {{ item.address }} to hba conf" diff --git a/ansible/roles/deploy/defaults/main.yml b/ansible/roles/deploy/defaults/main.yml index 6d4b19a..98f6f21 100644 --- a/ansible/roles/deploy/defaults/main.yml +++ b/ansible/roles/deploy/defaults/main.yml @@ -1,12 +1,11 @@ --- -deploy_envars: - - LARD_CONN_STRING: - - STINFO_CONN_STRING: - deploy_files: - src: lard_ingestion.service dest: /etc/systemd/system mode: "0664" + - src: var_file + dest: /etc/systemd/lard_ingestion.var + mode: "0664" - src: "{{ playbook_dir }}/../target/release/lard_ingestion" dest: /usr/local/bin mode: "0755" diff --git a/ansible/roles/deploy/files/lard_ingestion.service b/ansible/roles/deploy/files/lard_ingestion.service index 7048c36..1540766 100644 --- a/ansible/roles/deploy/files/lard_ingestion.service +++ b/ansible/roles/deploy/files/lard_ingestion.service @@ -5,6 +5,7 @@ Description=lard ingestion service User=lard Group=lard WorkingDirectory=/usr/local/bin +EnvironmentFile=/etc/systemd/lard_ingestion.var ExecStart=/usr/local/bin/lard_ingestion lard Restart=on-failure diff --git a/ansible/roles/deploy/files/var_file b/ansible/roles/deploy/files/var_file new file mode 100644 index 0000000..e78a1c7 --- /dev/null +++ b/ansible/roles/deploy/files/var_file @@ -0,0 +1,14 @@ +$ANSIBLE_VAULT;1.1;AES256 +38313465366433613266313236363863393931333631613262636162666263376533343239333930 +6665366132356337303739303432323865616630623334310a656438393735393563353634306332 +30303436363866346564656336333331303334643065373736386665666465633834396334636566 +3637323936363239340a623664356532626661633766663166303264353533663232646635383632 +61353334326130373361323132393832613163306438393466313631313636623730666661363335 +39313161633434373464616636343334363136303833383634343133313463663830373331616364 +35323030343862323265653964316161393162616463343636656562396361663664306434616632 +61636330353565653131333632326534393765313365633930316234646236346434646233313838 +34323264316635336239353765636665316233363665386533663130653063343266333936616536 +66373137373436356532623339336365303937323632646265623339643935643735303263316666 +66346662333061623138323562303164363931356630316162323935646537663661396339353032 +61656433653239313935653330623937303334356262333338303239363330313330636538663136 +3564 diff --git a/ansible/roles/deploy/tasks/main.yml b/ansible/roles/deploy/tasks/main.yml index 222dfa5..fabd544 100644 --- a/ansible/roles/deploy/tasks/main.yml +++ b/ansible/roles/deploy/tasks/main.yml @@ -1,4 +1,5 @@ --- +# TODO: do we need separate user/groups? Can't we simply use ubuntu? - name: Create lard group ansible.builtin.group: name: lard @@ -21,25 +22,11 @@ mode: "{{ item.mode }}" owner: root group: root - become: true loop: "{{ deploy_files }}" -- name: Import environment variables # noqa: command-instead-of-module - ansible.builtin.command: systemctl import-environment LARD_CONN_STRING STINFO_CONN_STRING - # TODO: ansible docs say that 'environment:' is "not a recommended way to pass in confidential data." - environment: "{{ deploy_envars }}" - become: true - changed_when: false - - name: Start LARD ingestion service ansible.builtin.systemd: daemon_reload: true name: lard_ingestion state: restarted enabled: true - become: true - -- name: Unset environment variables # noqa: command-instead-of-module - ansible.builtin.command: systemctl unset-environment LARD_CONN_STRING STINFO_CONN_STRING - become: true - changed_when: false diff --git a/ansible/roles/movefloatingip/tasks/main.yml b/ansible/roles/movefloatingip/tasks/main.yml index a627098..eb4a9cc 100644 --- a/ansible/roles/movefloatingip/tasks/main.yml +++ b/ansible/roles/movefloatingip/tasks/main.yml @@ -1,3 +1,59 @@ -# roles/movefloatingip/tasks/main.yml -- name: Movefloatingip - import_tasks: movefloatingip.yml \ No newline at end of file +--- +# Switch over the primary's particular floating ip +# this makes sense to do after successfully switching over, +# however it means that the stuff writing to the primary needs to be +# robust enough to handle getting told the db is in a read only state for a short period. +- name: Move primary floating ip + # unfortunately it seems that attaching the floating ip results in a timeout + # even though it actually succeeds + ignore_errors: true + block: + # remove from old primary + - name: Detach floating ip address that we keep connected to the primary + openstack.cloud.floating_ip: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + server: "{{ name_primary }}" + state: absent + network: public + floating_ip_address: "{{ primary_floating_ip }}" + + - name: Gather information about new primary server + openstack.cloud.server_info: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + name: "{{ name_standby }}" + register: new_primary_server + + - name: Print out the ipalias port information for the server + ansible.builtin.debug: + msg: "Server {{ new_primary_server.servers[0].addresses.ipalias }}" + + # add to what is now primary (used to be standby) + - name: Attach floating ip address that we keep connected to the primary + openstack.cloud.floating_ip: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + server: "{{ new_primary_server.servers[0].id }}" + state: present + reuse: true + network: public + fixed_address: "{{ new_primary_server.servers[0].addresses.ipalias[0].addr }}" + floating_ip_address: "{{ primary_floating_ip }}" + wait: true + timeout: 60 + when: new_primary_server.servers[0].addresses.ipalias | length <=1 + +- name: Check floating ip is attached + openstack.cloud.floating_ip_info: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + floating_ip_address: "{{ primary_floating_ip }}" + register: fip + +# this will not run if the ip is not now on the right vm +- name: Print out the floating ip information to confirm its ok + ansible.builtin.debug: + msg: "Floating ip {{ fip }}" + become: true + when: fip.floating_ips[0].port_details.device_id == new_primary_server.servers[0].id diff --git a/ansible/roles/movefloatingip/tasks/movefloatingip.yml b/ansible/roles/movefloatingip/tasks/movefloatingip.yml deleted file mode 100644 index 26ab05c..0000000 --- a/ansible/roles/movefloatingip/tasks/movefloatingip.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -# Switch over the primary's particular floating ip -# this makes sense to do after successfully switching over, -# however it means that the stuff writing to the primary needs to be -# robust enough to handle getting told the db is in a read only state for a short period. -- name: Move primary floating ip - block: - # remove from old primary - - name: Detach floating ip address that we keep connected to the primary - openstack.cloud.floating_ip: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - server: '{{ name_primary }}' - state: absent - network: public - floating_ip_address: '{{ primary_floating_ip }}' - - - name: Gather information about new primary server - openstack.cloud.server_info: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - name: '{{ name_standby }}' - register: new_primary_server - - - name: Print out the ipalias port information for the server - ansible.builtin.debug: - msg: "Server {{ new_primary_server.servers[0].addresses.ipalias }}" - - # add to what is now primary (used to be standby) - - name: Attach floating ip address that we keep connected to the primary - openstack.cloud.floating_ip: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - server: '{{ new_primary_server.servers[0].id }}' - state: present - reuse: true - network: public - fixed_address: '{{ new_primary_server.servers[0].addresses.ipalias[0].addr }}' - floating_ip_address: '{{ primary_floating_ip }}' - wait: true - timeout: 60 - when: new_primary_server.servers[0].addresses.ipalias | length <=1 - # unfortunately it seems that attaching the floating ip results in a timeout - # even though it actually succeeds - ignore_errors: true - -- name: Check floating ip is attached - openstack.cloud.floating_ip_info: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - floating_ip_address: '{{ primary_floating_ip }}' - register: fip - -# this will not run if the ip is not now on the right vm -- name: Print out the floating ip information to confirm its ok - ansible.builtin.debug: - msg: "Floating ip {{ fip }}" - become: true - when: fip.floating_ips[0].port_details.device_id == new_primary_server.servers[0].id \ No newline at end of file diff --git a/ansible/roles/networks/defaults/main.yml b/ansible/roles/networks/defaults/main.yml new file mode 100644 index 0000000..ba0655c --- /dev/null +++ b/ansible/roles/networks/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# dictionary that maps dns to list of IPs +networks_dns: + localhost: + - 0.0.0.0 diff --git a/ansible/roles/networks/meta/main.yml b/ansible/roles/networks/meta/main.yml new file mode 100644 index 0000000..97d8be7 --- /dev/null +++ b/ansible/roles/networks/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + # All the 'ostack_*' variables are defined in this role + - role: ostack diff --git a/ansible/roles/networks/tasks/create-ipalias-network.yml b/ansible/roles/networks/tasks/create-ipalias-network.yml index 7b8ee6d..7ccfcc0 100644 --- a/ansible/roles/networks/tasks/create-ipalias-network.yml +++ b/ansible/roles/networks/tasks/create-ipalias-network.yml @@ -1,42 +1,42 @@ --- - - name: Create ipalias network - openstack.cloud.network: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - name: '{{ ipalias_network_name }}' - external: false - state: present - run_once: true +- name: Create ipalias network + openstack.cloud.network: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + name: "{{ ostack_ipalias_network_name }}" + external: false + state: present + run_once: true - - name: Create ipalias network subnet - openstack.cloud.subnet: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - network_name: '{{ ipalias_network_name }}' - name: '{{ ipalias_network_name }}-subnet' - cidr: 192.168.20.0/24 - state: present - dns_nameservers: '{{ met_dns[ostack_region] }}' - run_once: true +- name: Create ipalias network subnet + openstack.cloud.subnet: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + network_name: "{{ ostack_ipalias_network_name }}" + name: "{{ ostack_ipalias_network_name }}-subnet" + cidr: "{{ ostack_ipalias_cidr }}" + state: present + dns_nameservers: "{{ networks_dns[ostack_region] }}" + run_once: true - - name: Connect ipalias network to public network - openstack.cloud.router: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - state: present - name: ipalias-router - network: public - interfaces: - - '{{ ipalias_network_name }}-subnet' - run_once: true +- name: Connect ipalias network to public network + openstack.cloud.router: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + state: present + name: ipalias-router + network: public + interfaces: + - ipalias-subnet + run_once: true - - name: Remove default gateway for subnet - openstack.cloud.subnet: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - network_name: '{{ ipalias_network_name }}' - name: '{{ ipalias_network_name }}-subnet' - cidr: '{{ ipalias_ostack_network_cidr }}' - no_gateway_ip: true - state: present - run_once: true +- name: Remove default gateway for subnet + openstack.cloud.subnet: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + network_name: ipalias + name: ipalias-subnet + cidr: "{{ network_ostack_network_cidr }}" + no_gateway_ip: true + state: present + run_once: true diff --git a/ansible/roles/networks/tasks/create-project-network.yml b/ansible/roles/networks/tasks/create-project-network.yml index 1eff31c..ee8abe8 100644 --- a/ansible/roles/networks/tasks/create-project-network.yml +++ b/ansible/roles/networks/tasks/create-project-network.yml @@ -1,28 +1,28 @@ --- - - name: Create private network - openstack.cloud.network: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - state: present - name: '{{ ostack_network_name }}' - external: false +- name: Create private network + openstack.cloud.network: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + state: present + name: "{{ ostack_network_name }}" + external: false - - name: Create private network subnet - openstack.cloud.subnet: - state: present - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - network_name: '{{ ostack_network_name }}' - name: '{{ ostack_network_name }}-subnet' - cidr: '{{ ostack_network_cidr }}' - dns_nameservers: '{{ met_dns[ostack_region] }}' +- name: Create private network subnet + openstack.cloud.subnet: + state: present + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + network_name: "{{ ostack_network_name }}" + name: "{{ ostack_network_name }}-subnet" + cidr: "{{ ostack_cidr }}" + dns_nameservers: "{{ networks_dns[ostack_region] }}" - - name: Connect private network to public network - openstack.cloud.router: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - state: present - name: public-router - network: public - interfaces: - - '{{ ostack_network_name }}-subnet' +- name: Connect private network to public network + openstack.cloud.router: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + state: present + name: public-router + network: public + interfaces: + - "{{ ostack_network_name }}-subnet" diff --git a/ansible/roles/networks/tasks/create-project-security-group.yml b/ansible/roles/networks/tasks/create-project-security-group.yml index e4ebe62..99b3ad0 100644 --- a/ansible/roles/networks/tasks/create-project-security-group.yml +++ b/ansible/roles/networks/tasks/create-project-security-group.yml @@ -1,21 +1,21 @@ --- - - name: Create security groups - openstack.cloud.security_group: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - name: '{{ item }}' - description: Created with Ansible - loop: '{{ security_groups | map(attribute="name") | list | unique }}' +- name: Create security groups + openstack.cloud.security_group: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + name: "{{ item }}" + description: Created with Ansible + loop: '{{ ostack_security_groups | map(attribute="name") | list | unique }}' - - name: Populate security groups - openstack.cloud.security_group_rule: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - security_group: '{{ item.name }}' - protocol: tcp - port_range_max: "{{ item.rule.port }}" - port_range_min: "{{ item.rule.port }}" - remote_ip_prefix: "{{ item.rule.subnet }}" - loop: '{{ security_groups }}' - loop_control: - label: "updating security group {{ item.name }} with rule {{ item.rule }}" +- name: Populate security groups + openstack.cloud.security_group_rule: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + security_group: "{{ item.name }}" + protocol: tcp + port_range_max: "{{ item.rule.port }}" + port_range_min: "{{ item.rule.port }}" + remote_ip_prefix: "{{ item.rule.subnet }}" + loop: "{{ ostack_security_groups }}" + loop_control: + label: "updating security group {{ item.name }} with rule {{ item.rule }}" diff --git a/ansible/roles/networks/tasks/main.yml b/ansible/roles/networks/tasks/main.yml index 10a5623..f9417f2 100644 --- a/ansible/roles/networks/tasks/main.yml +++ b/ansible/roles/networks/tasks/main.yml @@ -1,12 +1,13 @@ +--- # roles/networks/tasks/main.yml - - name: Create the project network (if it doesn't exist) - import_tasks: create-project-network.yml +- name: Create the project network (if it doesn't exist) + ansible.builtion.import_tasks: create-project-network.yml - - name: Create the project security group (if it doesn't exist) - import_tasks: create-project-security-group.yml +- name: Create the project security group (if it doesn't exist) + ansible.builtion.import_tasks: create-project-security-group.yml - - name: Create the ipalias network (if it doesn't exist) - import_tasks: create-ipalias-network.yml +- name: Create the ipalias network (if it doesn't exist) + ansible.builtion.import_tasks: create-ipalias-network.yml - - name: Create ping security group - import_tasks: open-for-ping.yml \ No newline at end of file +- name: Create ping security group + ansible.builtion.import_tasks: open-for-ping.yml diff --git a/ansible/roles/networks/tasks/open-for-ping.yml b/ansible/roles/networks/tasks/open-for-ping.yml index 0e383f1..88de7ae 100644 --- a/ansible/roles/networks/tasks/open-for-ping.yml +++ b/ansible/roles/networks/tasks/open-for-ping.yml @@ -1,15 +1,15 @@ -### stuff needed for ping +--- - name: Create ping security group openstack.cloud.security_group: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" name: ping description: Created with Ansible - name: Populate ping security group openstack.cloud.security_group_rule: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" security_group: ping protocol: icmp - remote_ip_prefix: "157.249.0.0/16" \ No newline at end of file + remote_ip_prefix: "157.249.0.0/16" diff --git a/ansible/roles/networks/vars/main.yml b/ansible/roles/networks/vars/main.yml index 4071b6b..2ec8050 100644 --- a/ansible/roles/networks/vars/main.yml +++ b/ansible/roles/networks/vars/main.yml @@ -1,102 +1,105 @@ $ANSIBLE_VAULT;1.1;AES256 -63316462303232663161396533306631623963643536613865363931316530333935323339343165 -3334393663303564313730656263323461313133336263350a663637386337626430306430353138 -64326339663966643130363865373431656663333234666465363630616366376235346662373366 -3365333964303362660a646139336135343265613032353636616561323435366530633865323635 -63626239636465633662666262356665333162653563376266373530323562396561626535363366 -37333131353737353033313461613839366161623936656666396131646239303464623165616630 -66333866316231356161333464383062383634383530636162366464353361316532393033373963 -61646134623938633761303536646432616237316364323434646634393465363438616532313236 -61613639663835313635306265663263373639333062383937633932636166333437626461663932 -36643666613763626261316333386332623166333433306661623531623232323662396364643133 -31356665663935343436646136393935653439306332656332303834363730623937363930353737 -36323564396333326433653463313136663331623862663239663234613135333162316466383136 -39376638633063643732343764366535323166663063323034306637376665336632653264613763 -36613139376237373035393236336331653235656435303631323730323163373938666635383038 -66323662316137393137613235373261626639623331376632653935343066303034653534316236 -35653339313864313835316462383164323633323332366133343839376463623966353635363934 -35616636326432336631613736363638663439336232623064643631663830313330623736366632 -36643031383032353738653131643262383936396166353331336137333265393561626163633464 -37616662646363663933656530366633626338386530613835626533306164396336383561616533 -39636539653134366232346330386239396133313132363437313238306333623232356462386434 -37343662646562353031616535346336613131343838616532623366613136386639363763323734 -33646138313162393763653363633435623965376332633463313264333636663238366666376161 -36626365333937323430363035353439653338633838326331303830666261653866323634633434 -32343738636636356539643762393534333739623539326234613639633435636165626433616337 -35666564636463623765393232336432386636343133383537363061343064613336663665316666 -33386535376165303966306530653161383735323761363534326335653732346633333865366135 -63303464303138653937646264316164613265353934316334366335626231363832613365363532 -65343636643937376136386235366235643363343166353462663730346161393362623730643965 -39303062666266376431326333663933356465373233653835363866636237363565376662643430 -31656236623131646633643632366233633066653762323438383538366232363634353331313366 -66396331326434343362663931353866373234306663623631383330333533656139623565313336 -36303136333535613537393231613135613935323436303037376134653831353530666266376130 -32353834343461393133646134333065663239326535313831366630303361333566376532346462 -37363635366634323531616536393431656365613436666433616530356538376531656130366531 -37656130303132356432363930626632356336653235396362643062363662336530646333356538 -30373738353836363137306363613433376232366239623134643035343066653863623766653837 -62313039663666313033636331346131386632303430623034326664396663356262336363366265 -31393937373261353963623064653737343137376461353231656365373934326263376464643964 -33336566643131643163636162343862646665623139653639643439613261323366333634326438 -63633932333866346164616166343063386234383732333863333034346436356637653665626463 -34366234643339343162373663623136303236313266356164373362636237393631303866383034 -62616630663132613566336663633265356561646662333764383563353966383930613137653833 -62383661643739313230316332626236366435326662303831343936336166313033373561363037 -39393239613531643437346466383234393263643034363066366262316535313532326639356637 -66313762626232373839626638343465663935333061383839373963353833623932616433373336 -30363465623362326466323166353266346239326134376230633631653739393430326663316133 -61356431393665646664623135306538326430336137383931316165663561306262353239653765 -30636563626665363337623135346663383330626663373633336337623662353562393732646665 -37633636336564386364343632636532376536366165623032636266363765343864306234613735 -32306431393261313230326666616162303664396464303236643666336566313065663562613766 -65316132613339343864383635636433333933356664336435343134666536396162663031353532 -32373765323733656533353965333564393132656238333136663838396137336439393730303738 -34653130386130333038643833656235633531333839663462656262336262396362643766653064 -36633832346431346538306263356366613661393535356333386537383464373436623339623334 -34353038383563393334373134353734666564353639643763346166373862323866613839373539 -38643130346665336634393466356263383733613134333162653265393065633434616261323462 -65346264376534343735643039396538376637326639663966643939656663373636396566643638 -37366666623031323138356164363038393538383261313832366262636535643163663832613037 -31336136626134336661626464623439636533303731643639353664343163346332623032396430 -64383433643832343962343130636230626165376466386635363332633563333865633830383830 -66626334626433626339363837633235316636393163383464373638316132386363393739306230 -34343033393533303135343830333531626238393964306137323564623962313032633562366139 -35323261323531663335613039613764353262343433646537393830356135333265326238396663 -32636261623163633737666565666631663736333964363839373234633663343662366364646161 -63613365386335373637353633326434373632393334613131386439303339346530316334326364 -30336662653037656339393230323866643536643366383232393038323138323532636235653832 -61376338323839383539313364633936643934303264616131626233396563656163383836653132 -39393131393730343935663562386537313032383835663963653365343738373437303263313435 -32316365633333326131363034323463373065653930376365633834396137653634303038323364 -30303739363230353235666233636464373635396433616535643364666638656339653065366637 -35303531656665333334636535613631623133303662373235393231396234333566396435633839 -34663063366163653761336661386633656664313464663437323036373533323464373634616237 -64633666663033623234376630393361616638303166393230626336666236643462363565656431 -30626239323963376361353065383261383033326238613635643062373439616266313361306633 -64393263343130663765326562366266366538373130316638613734613134333030613831383938 -62393263343337306230363733326638366538393230313631383033313738346536656361623338 -34323131356230376530623035613133636434643766383162623363633464366661353031303863 -31396135333236373631363162326235313037343461656430376330383266613733656162616431 -31373231653361313465653233613537386661303737633730613033633334343964336665623639 -63393763343962346439653335333366346238643435666631356338366637316634373861383631 -38316563313866663561626632306635383062633237343038653032396266666666336436636138 -31666330323531393362366535326538626463633439393237633131376366393136386264306433 -33663434373662383632653264386566643132613938373062333635666138393136353035663666 -61636539353038363331306465383336303564633664623061326665383565616334363336313635 -37336664313334663237343762373362306239303362613966313765396666656663646636376338 -34633266343763306566633261343535653238663433613238633331306135626165366265613539 -35313334353238633532636139663363383130373066643230653535613964323061373862633433 -66343661323030666534373866363130316265346535303266616663316333666665626432386334 -36323865313661313365353666663563313232316531373761323534366266353462656132373738 -61393134656139393966636334326338643434626134333637626364326263333534643338383038 -34313339626263613566376539633737333532356131363561626364303738653066366337343935 -33323235616564316538356431623164373836356365323766613136323266616365646465613134 -30326161623665636166383636653266323739663236326162356238663865303463663964383463 -35396535623263316364366537626630643131633866396639386139373137663366636332373034 -66366231393932373230363161623039623463353732323962393361643238613130633835386231 -66373534363562663163333532653664313664306539303362346535663131303037383231616362 -30663635343563393163616333396534366637303430633264643161653865643264386262396166 -64626562396238643566326361336538646436353166343639383533386635356436306666396531 -38333836353961626431646635343032346232613464336531633862386439353131376130656632 -35356639303162663862663036396337336233613534613431303165646239316466366535613834 -3839 +38346131353064383463313564326435653835663261633735393438313936666162636561343736 +3062376431363262353562343331303439323836336133660a336139643062313432646162386432 +39623765323761363737643066613165303635633733653864636132626463333362323830323339 +6431306366393836320a626439313235376335656239393564313563613732363063353939663164 +62666331333262376535386334373161336239623330393964396332653964353061643339346434 +66636434316436383037373533373031643736646638653638373566636661383336393132386366 +39323836326232333233663333306539643461323563613865316331363563316261643165663335 +63626163343263333139306531343466326230313464626134626330653433306261616530623936 +36643138313465396330343062363531663863343036656538313466333065663832326266393234 +31366530623939323861366631613732636336343561323530363137363236353335343033616363 +35373631393230363831343733643963643630383436346230343865313134333437396230633165 +35623662323232663732326136633332303537616238356164333934396131353339623832393332 +39666539373462646462333662353562326232636537643264346261346238626136316262303636 +61326531386233343362313564333766376562393164616233393139616534366537666431303233 +38353035373537626539363434666531643334663737636533313938353539376231326464636661 +39363836626534373838343937633736623932393365656635613035376634333831373934333835 +63613463343735383939303634623132366636333861323236376162353336343761343631333536 +37396266373033626436666539376133373133336339663330663632666232353764656632363235 +66343964646562623731323162373331356138636362646364373335616665643162346563383636 +35376433343162366131643861643161623430323536353066633231393936356264373464373530 +35306130386630613438623038646338656538326235666566303366623033303765616430666662 +35313338633463313637303937636239366637613134653966616630373231636163393564636465 +32343662613437613738306537333033613639336265323838613935653136633137316231343732 +63613434393561313834313263666565323064633238326236663530646565613330663239346430 +30646636306538653462393366363336336436623834343562643732306230353031306131376366 +35626335613732373564343733333930353831626331346531303364393563623032303330663231 +33643537306664633861343665356230626263626334313366366239323064303638613362326639 +61333963343838666461393032643061313830616461373434336265316161316561343862306234 +65333466326264623231323564643666326638633366346563656631316633653732343633303838 +34373636396137326533643838626139313031376335303362636333366439313830666338303538 +37386635626532353566353065346466306534653233373264333138616330616530663162653833 +39633837303433396462326636633233353262343539333564663832376465343230383331633835 +30626462363339343331336431393233613535376363373534346635313165333861303866336631 +31343439386163613438346331333962316530393833356232656639313333326363376233633535 +37316532393539616365663765353538346135353963363831316362343264353431326465306634 +33356438316161623562356130633538653233653130633365646166626430346164356365363463 +63356534303034393337636237623137323261373434376433343065333031316261646232643336 +37653965666536383332616635336434643233643230623735376635383733366532643637336163 +34383531333937396362633338333133613836653365366265353561653436313737646130333865 +39663037633332653138356438336233666635363332623036393863386131613932306638323133 +33363532643936656538353333623365636338343533636634626532623065303964393066623064 +31373433326530626465363134363630383935613063366664313661636161623239346661623932 +62663264346439373637306535353336373863633163633836303565313761643662303731386365 +64313062313837646330363561303836626331333035393137323461363437626139393333663139 +63636638353938313365643664386637666166653463343361356563303433333638636438336536 +30633137653933653332373134346163336333353732373861343231616664626362616636666462 +39636135653164626366663837323234656465343832323134663939393134366439623038663138 +32623165356638643361343536316237613334366233633462383733303139393763643662376366 +39616138353333356336303336353035376361636239656161343035393537646163666637303538 +37393266643430363036386233336139383537626234376365613032303739323565323230623866 +64376465383639643065323131623665633765353832336662326132653366646131366337303030 +66373462383961653266656230626638663833353038356361363538663034313735346365313934 +61626532623536393634356165386665333238386262373964373230643230353930343533613365 +61303935313131643633353663303164613737363731663164306361613861343235303138333439 +30313333636334653332636130386663636637363530336463336330643936306230373862313866 +34626137623630366633396365363231656130386561343037333330643462383038386263313835 +33386161363366616464386230326462646533306339366664313665393836633132623566386137 +63626666636132633265616261653835643533343531636536306663303637613831303533646636 +39663539336465646666396430366639393366656665343237613839666433353165396262343038 +63326461626330306235396230656338356236356461616265666463623533366434643130313364 +61666639353062333332383364616332326338373034336631653961306666373136336439356438 +66653466336333626266326434303662633139323066366535323063396233333139646165326636 +64396139316262323232393161303363616134376436313437353761626339383266316231313561 +62613037373763613439643462386239306664346135353537626238373334643630373165626230 +39333563633236396266376238383230333536336439633966383463623565623830396237306531 +66633933393837646330373165666234643334343365353134386664633964643335633362646432 +35383933356133653339353732373963616335633131353237323636333464343665616335653934 +66396339626536653331613062643939326235353765396464383761616330393363643362636166 +38653130653035643762633564313233356338336466303865346665643063376263633765353731 +30653538303738323238636564373762613562653665326163363363643964666631393464633734 +62323464353562376162393334343464623065646337623361376639363630666665346364656262 +37623833636366316333666264653831376630626539333033343830653533326238333866633866 +36613935646261373935613362633661616438666635313464323439663636373030363633623736 +36616137663035363730376133396131323132353032643466363366646138386633393663373431 +37373164616561373330303037373264626235323866633763323663636231613563643536623365 +37653864613838386332383934373732636230663034633965323038396138343032323461366238 +66646137353936616235343332343335376632636164623465646439316339313266643964643534 +31626632616261343433373266663639366539313631376466613831393631656536356435343331 +39616565623861636662336137353836383664363662393864336666633237366434326537333939 +63333964346432306635623639323831353434643837326231383531623730326639626233386365 +33663464366435613931303163373738343461306439653665356634376432323938316237396364 +33336334346137663062663865356463616138343135373438333361393237333538386364323463 +66393537313861616134626633356431626238643135366263323833386133313837656464396235 +38663738663638323566626561356362303030646363306539636630336435323166613465386536 +66333433366531373733663163663063393563613465316164653635346362373138343935613736 +63656262633632636165326130393035656436663363613839616662636538623363616362353736 +32343963376138663638373734323864393035393532373836613037313932616534313637363733 +62613636363365623531306565366562343933336236343765303963336637323662626565656235 +35313030393536346365366364353634636561613430626430396239633433336638366133356466 +30353631396564653261366561373639656134323266626261656636346463366430356563333163 +66386231383564633831656666626435663362633761373435363731643839396265643335616466 +64653562663962383166393632646637613266383735666431616333376432633661373037333037 +32663836643636643434656664636430313330303063303437653331353563646332356437656639 +31306564656364386536313934633636633636346634613966353634383164363039393935316561 +62333366343862303761306266313730353563656139343664383232646436356438656238383630 +32306438303661313030663565343364313263626266396239353536616165333965663038663631 +39323265306531643438346233316162626562313361346662343139613735646137656339356534 +38343636393934383235666561333630306335363463326438663134656662616635363638653661 +37316465613364336562313934343731636234306131363861356430313531613431323965333034 +31626439643863653464343562396565333637643431653763616235393063323765336233323565 +33383965363164363962326137666165633737323362633761373663356634613566313863333833 +35306566386232363439373937613737343963643564373366346232373435663864353965383835 +36383731333563323431303530396436353264633662663765663936336561666165643138636536 +31643333333436386235 diff --git a/ansible/roles/ostack/defaults/main.yml b/ansible/roles/ostack/defaults/main.yml new file mode 100644 index 0000000..9f555ef --- /dev/null +++ b/ansible/roles/ostack/defaults/main.yml @@ -0,0 +1,28 @@ +--- +# TODO: separate what should be public and what private + +# public +ostack_cloud: lard +ostack_region: Ostack2-EXT +ostack_ipalias_network_name: ipalias +# ostack_state: present + +# private +## networks +ostack_network_name: + +# TODO: probably makes sense to move these to network if they are not reused +# and networks_dns should be moved here since it depends on ostack_region +ostack_cidr: +ostack_ipalias_cidr: +ostack_security_groups: + - name: + rule: + subnet: + port: + +## vm +ostack_availability_zone: +ostack_image: +ostack_flavor: +ostack_key_name: diff --git a/ansible/roles/primarystandbysetup/tasks/create-primary.yml b/ansible/roles/primarystandbysetup/tasks/create-primary.yml index 94d364f..dcc9de6 100644 --- a/ansible/roles/primarystandbysetup/tasks/create-primary.yml +++ b/ansible/roles/primarystandbysetup/tasks/create-primary.yml @@ -1,12 +1,20 @@ -# set up a role and provide suitable entries in pg_hba.conf with the database field set to replication +--- +# set up a role and provide suitable entries in pg_hba.conf with the database +# field set to replication -# ensure max_wal_senders is set to a sufficiently large value in the conf file (also possibly max_replication_slots?) -# When running a standby server, you must set this parameter to the same or higher value than on the primary server. Otherwise, queries will not be allowed in the standby server. +# ensure max_wal_senders is set to a sufficiently large value in the conf file +# (also possibly max_replication_slots?) +# When running a standby server, you must set this parameter to the same or +# higher value than on the primary server. Otherwise, queries will not be +# allowed in the standby server. -# set wal_keep_size to a value large enough to ensure that WAL segments are not recycled too early, or configure a replication slot for the standby? +# set wal_keep_size to a value large enough to ensure that WAL segments are not +# recycled too early, or configure a replication slot for the standby? # if there is a WAL archive accessible to the standby this may not be needed? -# On systems that support the keepalive socket option, setting tcp_keepalives_idle, tcp_keepalives_interval and tcp_keepalives_count helps the primary promptly notice a broken connection. +# On systems that support the keepalive socket option, setting +# tcp_keepalives_idle, tcp_keepalives_interval and tcp_keepalives_count helps +# the primary promptly notice a broken connection. # example auth # Allow the user "foo" from host 192.168.1.100 to connect to the primary @@ -14,249 +22,289 @@ # # TYPE DATABASE USER ADDRESS METHOD # host replication foo 192.168.1.100/32 md5 ---- - - name: Create a new database with name lard - community.postgresql.postgresql_db: - name: lard - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - - name: Copy the db folder to the remote - ansible.builtin.copy: - src: ../../../../db/ - dest: /etc/postgresql/16/db/ - mode: '0755' - become: true - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - - name: Create the public schema in lard - community.postgresql.postgresql_script: - db: lard - path: /etc/postgresql/16/db/public.sql - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - - name: Create the labels schema in lard - community.postgresql.postgresql_script: - db: lard - path: /etc/postgresql/16/db/labels.sql - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - - name: Connect to lard database, create user - community.postgresql.postgresql_user: - db: lard - name: lard_user - password: '{{ db_password }}' - role_attr_flags: SUPERUSER # not desired, but the privelege granting doesn't seem to work? - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - # - name: Grant lard_user priveleges on lard database - # community.postgresql.postgresql_privs: - # type: database - # db: lard - # privs: ALL - # role: lard_user - # become: true - # become_user: postgres - - # MAKE IT THE PRIMARY - - name: Set wal_level parameter - community.postgresql.postgresql_set: - name: wal_level - value: replica # https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-WAL-LEVEL - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - name: Set hot_standby parameter - community.postgresql.postgresql_set: - name: hot_standby - value: true - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - name: Set hot_standby_feedback parameter - community.postgresql.postgresql_set: - name: hot_standby_feedback - value: true - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - name: Set max_wal_senders parameter - community.postgresql.postgresql_set: - name: max_wal_senders - value: 10 - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - name: Set wal_log_hints parameter # needs to be enabled to use pg_rewind - # https://www.postgresql.org/docs/current/app-pgrewind.html - community.postgresql.postgresql_set: - name: wal_log_hints - value: true - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - name: Set max_replication_slots parameter - community.postgresql.postgresql_set: - name: max_replication_slots - value: 10 - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - # make it SYNCHRONOUS REPLICATION (without the next two settings it would be asynchronous) - - name: Set synchronous_standby_names parameter - community.postgresql.postgresql_set: - name: synchronous_standby_names # https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-SYNCHRONOUS-STANDBY-NAMES - value: "*" # all the standbys - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - name: Set synchronous_commit parameter - community.postgresql.postgresql_set: - name: synchronous_commit # https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT - value: on # will not give standby query consistency (tradeoff for better write performance), but will give standby durable commit after OS crash - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - # repmgr - # https://www.repmgr.org/docs/current/quickstart-repmgr-conf.html - - name: Create a repmgr.conf if it does not exist - ansible.builtin.file: - path: /etc/repmgr.conf - state: touch - mode: '0755' - become: true - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - name: Set contents of repmgr.conf - ansible.builtin.copy: - dest: "/etc/repmgr.conf" - content: | - node_id=1 - node_name='{{ primary_name }}' - conninfo='host={{ primary_ip }} user=repmgr dbname=repmgr connect_timeout=2' - data_directory='/mnt/ssd-b/16/main' - service_start_command='sudo /bin/systemctl start postgresql.service' - service_stop_command='sudo /bin/systemctl stop postgresql.service' - service_restart_command='sudo /bin/systemctl restart postgresql.service' - service_reload_command='sudo /bin/systemctl reload postgresql.service' - mode: '0755' - become: true - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - # https://www.repmgr.org/docs/current/quickstart-primary-register.html - - name: Run repmgr to register the primary - ansible.builtin.command: repmgr -f /etc/repmgr.conf primary register -F # only need -F if rerunning - become: true - become_user: postgres - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - register: register_primary_results - - name: Print out the register_primary_results - ansible.builtin.debug: - msg: "repmgr {{ register_primary_results }}" - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - # # STUFF FOR REPLICATION (do not need if using repmgr) - # - name: Create replicator user with replication priveleges - # community.postgresql.postgresql_user: - # name: replicator - # password: '{{ replicator_password }}' - # role_attr_flags: REPLICATION - # become: true - # become_user: postgres - - # # also specifically allow the replicator user - # - name: Change hba conf to allow replicator to connect - # community.postgresql.postgresql_pg_hba: - # dest: /etc/postgresql/16/main/pg_hba.conf - # databases: replication - # contype: host - # users: replicator - # #address: all - # address: '{{ standby_host }}' - # method: trust # seems to hang with md5, how to make auth work? - # become: true - - # # create replication slot - # - name: Create physical replication slot if doesn't exist - # become_user: postgres - # community.postgresql.postgresql_slot: - # slot_name: replication_slot - # #db: lard - # become: true - - # make sure these changes take effect? - - name: Restart service postgres - ansible.builtin.systemd_service: - name: postgresql - state: restarted - become: true - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu - - ### now move back to default of operating from localhost - - name: Attach primary floating ip - block: - - name: Gather information about primary server - openstack.cloud.server_info: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - name: '{{ primary_name }}' - become: false - register: primary_server - - - name: Print out the ipalias port information for the server - ansible.builtin.debug: - msg: "Server {{ primary_server.servers[0].addresses.ipalias }}" - - # give the primary a particular floating ip - - name: Attach floating ip address that we keep connected to the primary - openstack.cloud.floating_ip: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - server: '{{ primary_server.servers[0].id }}' - reuse: true - network: public - fixed_address: '{{ primary_server.servers[0].addresses.ipalias[0].addr }}' - floating_ip_address: '{{ primary_floating_ip }}' - wait: true - timeout: 60 - when: primary_server.servers[0].addresses.ipalias | length <=1 - # unfortunately it seems that attaching the floating ip results in a timeout - # even though it actually succeeds - ignore_errors: true - - - name: Check floating ip is attached - openstack.cloud.floating_ip_info: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - floating_ip_address: '{{ primary_floating_ip }}' - register: fip - - # this will not run if the ip is not now on the vm - - name: Print out the floating ip information to confirm its ok +- name: Create a new database with name lard + community.postgresql.postgresql_db: + name: lard + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Copy the db folder to the remote + ansible.builtin.copy: + src: ../../../../db/ + dest: /etc/postgresql/16/db/ + mode: "0755" + become: true + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +# - name: Import sql files +# ansible.builtin.command: + +# - name: Create schemas and tables in lard +# community.postgresql.postgresql_script: +# db: lard +# path: "/etc/postgresql/16/db/{{ item }}" +# become: true +# become_user: postgres +# delegate_to: "{{ primary_ip }}" +# remote_user: ubuntu +# loop: + +# TODO: loop over the sql files in order (needs prepending IDs) +- name: Create the public schema in lard + community.postgresql.postgresql_script: + db: lard + path: /etc/postgresql/16/db/public.sql + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Create the data partitions + community.postgresql.postgresql_script: + db: lard + path: /etc/postgresql/16/db/partition.sql + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Create the labels schema in lard + community.postgresql.postgresql_script: + db: lard + path: /etc/postgresql/16/db/labels.sql + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Create the flags schema in lard + community.postgresql.postgresql_script: + db: lard + path: /etc/postgresql/16/db/flags.sql + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Connect to lard database, create user + community.postgresql.postgresql_user: + db: lard + name: lard_user + password: "{{ db_password }}" + role_attr_flags: SUPERUSER # not desired, but the privelege granting doesn't seem to work? + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu +# - name: Grant lard_user priveleges on lard database +# community.postgresql.postgresql_privs: +# type: database +# db: lard +# privs: ALL +# role: lard_user +# become: true +# become_user: postgres + +# MAKE IT THE PRIMARY +- name: Set wal_level parameter + community.postgresql.postgresql_set: + name: wal_level + value: replica # https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-WAL-LEVEL + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Set hot_standby parameter + community.postgresql.postgresql_set: + name: hot_standby + value: true + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Set hot_standby_feedback parameter + community.postgresql.postgresql_set: + name: hot_standby_feedback + value: true + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Set max_wal_senders parameter + community.postgresql.postgresql_set: + name: max_wal_senders + value: 10 + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Set wal_log_hints parameter # needs to be enabled to use pg_rewind + # https://www.postgresql.org/docs/current/app-pgrewind.html + community.postgresql.postgresql_set: + name: wal_log_hints + value: true + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Set max_replication_slots parameter + community.postgresql.postgresql_set: + name: max_replication_slots + value: 10 + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +# make it SYNCHRONOUS REPLICATION (without the next two settings it would be asynchronous) +- name: Set synchronous_standby_names parameter + community.postgresql.postgresql_set: + name: synchronous_standby_names # https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-SYNCHRONOUS-STANDBY-NAMES + value: "*" # all the standbys + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Set synchronous_commit parameter + community.postgresql.postgresql_set: + name: synchronous_commit # https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + value: "on" # will not give standby query consistency (tradeoff for better write performance), but will give standby durable commit after OS crash + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +# repmgr +# https://www.repmgr.org/docs/current/quickstart-repmgr-conf.html +- name: Create a repmgr.conf if it does not exist + ansible.builtin.file: + path: /etc/repmgr.conf + state: touch + mode: "0755" + become: true + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +- name: Set contents of repmgr.conf + ansible.builtin.copy: + dest: "/etc/repmgr.conf" + content: | + node_id=1 + node_name='{{ primary_name }}' + conninfo='host={{ primary_ip }} user=repmgr dbname=repmgr connect_timeout=2' + data_directory='/mnt/ssd-data/16/main' + service_start_command='sudo /bin/systemctl start postgresql.service' + service_stop_command='sudo /bin/systemctl stop postgresql.service' + service_restart_command='sudo /bin/systemctl restart postgresql.service' + service_reload_command='sudo /bin/systemctl reload postgresql.service' + mode: "0755" + become: true + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +# https://www.repmgr.org/docs/current/quickstart-primary-register.html +- name: Run repmgr to register the primary + ansible.builtin.command: repmgr -f /etc/repmgr.conf primary register -F # only need -F if rerunning + become: true + become_user: postgres + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + register: register_primary_results + +- name: Print out the register_primary_results + ansible.builtin.debug: + msg: "repmgr {{ register_primary_results }}" + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +# # STUFF FOR REPLICATION (do not need if using repmgr) +# - name: Create replicator user with replication priveleges +# community.postgresql.postgresql_user: +# name: replicator +# password: '{{ replicator_password }}' +# role_attr_flags: REPLICATION +# become: true +# become_user: postgres + +# # also specifically allow the replicator user +# - name: Change hba conf to allow replicator to connect +# community.postgresql.postgresql_pg_hba: +# dest: /etc/postgresql/16/main/pg_hba.conf +# databases: replication +# contype: host +# users: replicator +# #address: all +# address: '{{ standby_host }}' +# method: trust # seems to hang with md5, how to make auth work? +# become: true + +# # create replication slot +# - name: Create physical replication slot if doesn't exist +# become_user: postgres +# community.postgresql.postgresql_slot: +# slot_name: replication_slot +# #db: lard +# become: true + +# make sure these changes take effect? +- name: Restart service postgres + ansible.builtin.systemd_service: + name: postgresql + state: restarted + become: true + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu + +### now move back to default of operating from localhost +- name: Attach primary floating ip + # unfortunately it seems that attaching the floating ip results in a timeout + # even though it actually succeeds + ignore_errors: true + block: + - name: Gather information about primary server + openstack.cloud.server_info: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + name: "{{ primary_name }}" + become: false + register: primary_server + + - name: Print out the ipalias port information for the server ansible.builtin.debug: - msg: "Floating ip {{ fip }}" - when: fip.floating_ips[0].port_details.device_id == primary_server.servers[0].id \ No newline at end of file + msg: "Server {{ primary_server.servers[0].addresses.ipalias }}" + + # give the primary a particular floating ip + - name: Attach floating ip address that we keep connected to the primary + openstack.cloud.floating_ip: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + server: "{{ primary_server.servers[0].id }}" + reuse: true + network: public + fixed_address: "{{ primary_server.servers[0].addresses.ipalias[0].addr }}" + floating_ip_address: "{{ primary_floating_ip }}" + wait: true + timeout: 60 + when: primary_server.servers[0].addresses.ipalias | length <=1 + +- name: Check floating ip is attached + openstack.cloud.floating_ip_info: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + floating_ip_address: "{{ primary_floating_ip }}" + register: fip + +# this will not run if the ip is not now on the vm +- name: Print out the floating ip information to confirm its ok + ansible.builtin.debug: + msg: "Floating ip {{ fip }}" + when: fip.floating_ips[0].port_details.device_id == primary_server.servers[0].id diff --git a/ansible/roles/primarystandbysetup/tasks/create-standby.yml b/ansible/roles/primarystandbysetup/tasks/create-standby.yml index d565243..a264a11 100644 --- a/ansible/roles/primarystandbysetup/tasks/create-standby.yml +++ b/ansible/roles/primarystandbysetup/tasks/create-standby.yml @@ -1,149 +1,154 @@ +--- # create standby.signal file in data directory -# configure streaming WAL -# primary_conninfo needs a libpq connection string (ip address + other details needed to connect to primary server) +# configure streaming WAL primary_conninfo needs a libpq connection string (ip +# address + other details needed to connect to primary server) -# since we want the standby to be able to operate as the primary, we need to configure the WAL archiving, connections, and auth like the primary -# example: -#primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass options=''-c wal_sender_timeout=5000''' -#restore_command = 'cp /path/to/archive/%f %p' -#archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' +# since we want the standby to be able to operate as the primary, we need to +# configure the WAL archiving, connections, and auth like the primary example: +# primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass +# options=''-c wal_sender_timeout=5000''' restore_command = 'cp +# /path/to/archive/%f %p' archive_cleanup_command = 'pg_archivecleanup +# /path/to/archive %r' -# add the following line to the postgresql.conf file on the standby -# The standby connects to the primary that is running on host 192.168.1.50 -# and port 5432 as the user "foo" whose password is "foopass". -#primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' +# add the following line to the postgresql.conf file on the standby The standby +# connects to the primary that is running on host 192.168.1.50 and port 5432 as +# the user "foo" whose password is "foopass". primary_conninfo = +# 'host=192.168.1.50 port=5432 user=foo password=foopass' # use the replication slot on the primary (in file after the primary_conninfo) -#primary_slot_name = 'node_a_slot' ---- - # repmgr - # https://www.repmgr.org/docs/current/quickstart-standby-clone.html - # must be done before the standby is put into read only mode (therefore not idempotent) - - name: Create a repmgr.conf if it does not exist - ansible.builtin.file: - path: /etc/repmgr.conf - state: touch - mode: '0755' - become: true - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu - - name: Set contents of repmgr.conf - ansible.builtin.copy: - dest: "/etc/repmgr.conf" - content: | - node_id=2 - node_name='{{ standby_name }}' - conninfo='host={{ standby_ip }} user=repmgr dbname=repmgr connect_timeout=2' - data_directory='/mnt/ssd-b/16/main' - service_start_command='sudo /bin/systemctl start postgresql.service' - service_stop_command='sudo /bin/systemctl stop postgresql.service' - service_restart_command='sudo /bin/systemctl restart postgresql.service' - service_reload_command='sudo /bin/systemctl reload postgresql.service' - mode: '0755' - become: true - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu +# primary_slot_name = 'node_a_slot' repmgr +# https://www.repmgr.org/docs/current/quickstart-standby-clone.html must be +# done before the standby is put into read only mode (therefore not idempotent) +- name: Create a repmgr.conf if it does not exist + ansible.builtin.file: + path: /etc/repmgr.conf + state: touch + mode: "0755" + become: true + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu +- name: Set contents of repmgr.conf + ansible.builtin.copy: + dest: "/etc/repmgr.conf" + content: | + node_id=2 + node_name='{{ standby_name }}' + conninfo='host={{ standby_ip }} user=repmgr dbname=repmgr connect_timeout=2' + data_directory='/mnt/ssd-b/16/main' + service_start_command='sudo /bin/systemctl start postgresql.service' + service_stop_command='sudo /bin/systemctl stop postgresql.service' + service_restart_command='sudo /bin/systemctl restart postgresql.service' + service_reload_command='sudo /bin/systemctl reload postgresql.service' + mode: "0755" + become: true + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu - - name: Stop service postgres, if running - ansible.builtin.systemd_service: - name: postgresql - state: stopped - become: true - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu +- name: Stop service postgres, if running + ansible.builtin.systemd_service: + name: postgresql + state: stopped + become: true + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu - # https://www.repmgr.org/docs/current/quickstart-standby-clone.html - - name: Run repmgr to dry run clone - ansible.builtin.command: repmgr -h '{{ primary_ip }}' -U repmgr -d repmgr -f /etc/repmgr.conf standby clone --dry-run - become: true - become_user: postgres - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu - register: dry_run_clone_results - - name: Print out the dry_run_clone_results - ansible.builtin.debug: - msg: "repmgr {{ dry_run_clone_results }}" +# TODO: add change_when to fix lint? +# https://www.repmgr.org/docs/current/quickstart-standby-clone.html +- name: Run repmgr to dry run clone + ansible.builtin.command: repmgr -h '{{ primary_ip }}' -U repmgr -d repmgr -f /etc/repmgr.conf standby clone --dry-run + become: true + become_user: postgres + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu + register: dry_run_clone_results +- name: Print out the dry_run_clone_results + ansible.builtin.debug: + msg: "repmgr {{ dry_run_clone_results }}" - - name: Run repmgr to clone standby from primary - ansible.builtin.command: repmgr -h '{{ primary_ip }}' -U repmgr -d repmgr -f /etc/repmgr.conf standby clone -F - become: true - register: clone_results - become_user: postgres - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu - - name: Print out the clone_results - ansible.builtin.debug: - msg: "repmgr {{ clone_results }}" +# TODO: add change_when to fix lint? +- name: Run repmgr to clone standby from primary + ansible.builtin.command: repmgr -h '{{ primary_ip }}' -U repmgr -d repmgr -f /etc/repmgr.conf standby clone -F + become: true + register: clone_results + become_user: postgres + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu +- name: Print out the clone_results + ansible.builtin.debug: + msg: "repmgr {{ clone_results }}" - # try to clean up so can run standby clone ? - # - name: Recursively remove directory - # ansible.builtin.file: - # path: /mnt/ssd-b/16/main - # state: absent - # become: true - # - name: Create a main directory if it does not exist - # ansible.builtin.file: - # path: /mnt/ssd-b/16/main - # state: directory - # mode: '0700' - # become: true - # become_user: postgres +# try to clean up so can run standby clone ? +# - name: Recursively remove directory +# ansible.builtin.file: +# path: /mnt/ssd-b/16/main +# state: absent +# become: true +# - name: Create a main directory if it does not exist +# ansible.builtin.file: +# path: /mnt/ssd-b/16/main +# state: directory +# mode: '0700' +# become: true +# become_user: postgres - # https://www.postgresql.org/docs/current/app-pgbasebackup.html - # NOTE: this part is not idempotent, so if a db is already in the dir, it will fail - # hence the stuff above that means this should not be run on a database with data!!! - # not needed if using repmgr, since clone calls this - # - name: Run pg_basebackup to initialize the replica / standby - # ansible.builtin.shell: export PGPASSWORD="{{ replicator_password }}" && pg_basebackup --pgdata=/mnt/ssd-b/16/main -R --slot=replication_slot --user=replicator --host={{ primary_host }} --port=5432 - # args: - # executable: /bin/bash - # become: true - # become_user: postgres - # register: basebackup_results +# https://www.postgresql.org/docs/current/app-pgbasebackup.html +# NOTE: this part is not idempotent, so if a db is already in the dir, it will +# fail hence the stuff above that means this should not be run on a database with +# data!!! not needed if using repmgr, since clone calls this +# - name: Run pg_basebackup to initialize the replica / standby +# ansible.builtin.shell: | +# export PGPASSWORD="{{ replicator_password }}" && +# pg_basebackup --pgdata=/mnt/ssd-b/16/main -R --slot=replication_slot --user=replicator --host={{ primary_host }} --port=5432 +# args: +# executable: /bin/bash +# become: true +# become_user: postgres +# register: basebackup_results - # - name: Print out the basebackup_results - # debug: msg="backup {{ basebackup_results }}" +# - name: Print out the basebackup_results +# debug: msg="backup {{ basebackup_results }}" - - name: Restart service postgres - ansible.builtin.systemd_service: - name: postgresql - state: restarted - become: true - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu +- name: Restart service postgres + ansible.builtin.systemd_service: + name: postgresql + state: restarted + become: true + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu - - name: Waits for port 5432 to be available, don't check for initial 10 seconds - ansible.builtin.wait_for: - host: 0.0.0.0 - port: 5432 - delay: 10 - state: started - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu +- name: Waits for port 5432 to be available, don't check for initial 10 seconds + ansible.builtin.wait_for: + host: 0.0.0.0 + port: 5432 + delay: 10 + state: started + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu - # https://www.repmgr.org/docs/current/quickstart-register-standby.html - - name: Run repmgr to register the standby - ansible.builtin.command: repmgr -f /etc/repmgr.conf standby register - become: true - become_user: postgres - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu - register: register_standby_results - - name: Print out the register_standby_results - ansible.builtin.debug: - msg: "repmgr {{ register_standby_results }}" +# TODO: add change_when to fix lint? +# https://www.repmgr.org/docs/current/quickstart-register-standby.html +- name: Run repmgr to register the standby + ansible.builtin.command: repmgr -f /etc/repmgr.conf standby register + become: true + become_user: postgres + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu + register: register_standby_results +- name: Print out the register_standby_results + ansible.builtin.debug: + msg: "repmgr {{ register_standby_results }}" - # run some sql... to confirm clone? - - name: Do some sql to test for the existence of lard...? - community.postgresql.postgresql_query: - db: lard - query: select count(*) from timeseries - register: query_results - become: true - become_user: postgres - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu - - name: Print out the query - ansible.builtin.debug: - msg: "Query {{ query_results }}" +# run some sql... to confirm clone? +- name: Do some sql to test for the existence of lard...? + community.postgresql.postgresql_query: + db: lard + query: select count(*) from timeseries + register: query_results + become: true + become_user: postgres + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu +- name: Print out the query + ansible.builtin.debug: + msg: "Query {{ query_results }}" diff --git a/ansible/roles/primarystandbysetup/tasks/main.yml b/ansible/roles/primarystandbysetup/tasks/main.yml index d1d6f04..964dffc 100644 --- a/ansible/roles/primarystandbysetup/tasks/main.yml +++ b/ansible/roles/primarystandbysetup/tasks/main.yml @@ -1,7 +1,8 @@ +--- # roles/primarystandbysetup/tasks/main.yml - name: Turn a vm into the primary - import_tasks: create-primary.yml + ansible.builtin.import_tasks: create-primary.yml # note, may in the future want to make multiple standbys - name: Turn a vm into the standby - import_tasks: create-standby.yml + ansible.builtin.import_tasks: create-standby.yml diff --git a/ansible/roles/rejoin/tasks/main.yml b/ansible/roles/rejoin/tasks/main.yml index 82fad6c..b235291 100644 --- a/ansible/roles/rejoin/tasks/main.yml +++ b/ansible/roles/rejoin/tasks/main.yml @@ -1,3 +1,4 @@ +--- # roles/rejoin/tasks/main.yml - name: Rejoin an old primary to cluster as standby - import_tasks: rejoin_old_primary.yml + ansible.builtin.import_tasks: rejoin_old_primary.yml diff --git a/ansible/roles/rejoin/tasks/rejoin_old_primary.yml b/ansible/roles/rejoin/tasks/rejoin_old_primary.yml index e28d92b..d387a2d 100644 --- a/ansible/roles/rejoin/tasks/rejoin_old_primary.yml +++ b/ansible/roles/rejoin/tasks/rejoin_old_primary.yml @@ -1,39 +1,49 @@ ---- - - name: stop service postgres - ansible.builtin.systemd_service: - name: postgresql - state: stopped - become: true +--- +- name: Stop service postgres + ansible.builtin.systemd_service: + name: postgresql + state: stopped + become: true - - name: Dry run of rejoin - ansible.builtin.command: repmgr node rejoin -f /etc/repmgr.conf -d 'host='{{ primary_ip }}' user=repmgr dbname=repmgr connect_timeout=2' --force-rewind=/usr/lib/postgresql/16/bin/pg_rewind --verbose --dry-run - become: true - become_user: postgres - register: rejoin_dry_run_results - - name: Print out the rejoin_dry_run_results - ansible.builtin.debug: - msg: "repmgr {{ rejoin_dry_run_results }}" +# TODO: add changed_when to fix lint? +- name: Dry run of rejoin + ansible.builtin.command: > + repmgr node rejoin + -f /etc/repmgr.conf -d 'host='{{ primary_ip }}' user=repmgr dbname=repmgr connect_timeout=2' + --force-rewind=/usr/lib/postgresql/16/bin/pg_rewind --verbose --dry-run + become: true + become_user: postgres + register: rejoin_dry_run_results +- name: Print out the rejoin_dry_run_results + ansible.builtin.debug: + msg: "repmgr {{ rejoin_dry_run_results }}" - - name: Rejoin old primary as standby - ansible.builtin.command: repmgr node rejoin -f /etc/repmgr.conf -d 'host='{{ primary_ip }}' user=repmgr dbname=repmgr connect_timeout=2' --force-rewind=/usr/lib/postgresql/16/bin/pg_rewind --verbose - become: true - become_user: postgres - register: rejoin_results - - name: Print out the rejoin_results - ansible.builtin.debug: - msg: "repmgr {{ rejoin_results }}" - - - name: start service postgres - ansible.builtin.systemd_service: - name: postgresql - state: started - become: true +# TODO: add changed_when to fix lint? +- name: Rejoin old primary as standby + ansible.builtin.command: > + repmgr node rejoin + -f /etc/repmgr.conf -d 'host='{{ primary_ip }}' user=repmgr dbname=repmgr connect_timeout=2' + --force-rewind=/usr/lib/postgresql/16/bin/pg_rewind --verbose + become: true + become_user: postgres + register: rejoin_results +- name: Print out the rejoin_results + ansible.builtin.debug: + msg: "repmgr {{ rejoin_results }}" - - name: Check cluster - ansible.builtin.command: repmgr -f /etc/repmgr.conf cluster show - become: true - become_user: postgres - register: status_results - - name: Print out the status_results - ansible.builtin.debug: - msg: "repmgr {{ status_results }}" \ No newline at end of file +- name: Start service postgres + ansible.builtin.systemd_service: + name: postgresql + state: started + become: true + +# TODO: add changed_when to fix lint? +- name: Check cluster + ansible.builtin.command: repmgr -f /etc/repmgr.conf cluster show + become: true + become_user: postgres + register: status_results + +- name: Print out the status_results + ansible.builtin.debug: + msg: "repmgr {{ status_results }}" diff --git a/ansible/roles/ssh/tasks/main.yml b/ansible/roles/ssh/tasks/main.yml index 1f968d6..327eca8 100644 --- a/ansible/roles/ssh/tasks/main.yml +++ b/ansible/roles/ssh/tasks/main.yml @@ -1,3 +1,60 @@ -# roles/ssh/tasks/main.yml -- name: Share the ssh keys one way between 2 particular VMs - import_tasks: share-ssh-keys.yml +--- +# find the other vm, that is not currently being iterated over (this will need to be changed if more than 2) +- name: Setting host facts for other_vm + ansible.builtin.set_fact: + other_vm: "{{ (ansible_play_hosts_all | difference([inventory_hostname])) | first }}" + +- name: List other vm + ansible.builtin.debug: + msg: "{{ other_vm }}" + +- name: Create user postgres + ansible.builtin.user: + name: postgres + generate_ssh_key: true + ssh_key_bits: 2048 + ssh_key_file: .ssh/id_rsa + force: true + register: ssh_keys + become: true +# Another way to generate a ssh key... +# - name: Force regenerate an OpenSSH keypair if it already exists +# community.crypto.openssh_keypair: +# path: .ssh/id_rsa +# force: true +# owner: postgres # should be this user's key +# register: ssh_keys +# become: true + +- name: List generated SSH key + ansible.builtin.debug: + msg: "{{ ssh_keys.ssh_public_key }}" + +- name: Add the key to authorized_key on the other vm + ansible.posix.authorized_key: + user: postgres + state: present + key: "{{ ssh_keys.ssh_public_key }}" + become: true + delegate_to: "{{ other_vm }}" + +- name: Get the host key + ansible.builtin.set_fact: + hostkey: "{{ ansible_ssh_host_key_ecdsa_public }}" + +- name: List host key + ansible.builtin.debug: + msg: "{{ hostkey }}" + +- name: List vm ip + ansible.builtin.debug: + msg: "{{ vm_ip }}" + +- name: Add the vm to known_hosts on the other vm + ansible.builtin.known_hosts: + path: ~postgres/.ssh/known_hosts # need this for the postgres user + name: "{{ vm_ip }}" + key: "{{ vm_ip }} ecdsa-sha2-nistp256 {{ hostkey }}" + state: present + become: true + delegate_to: "{{ other_vm }}" diff --git a/ansible/roles/ssh/tasks/share-ssh-keys.yml b/ansible/roles/ssh/tasks/share-ssh-keys.yml index 389f4b1..e69de29 100644 --- a/ansible/roles/ssh/tasks/share-ssh-keys.yml +++ b/ansible/roles/ssh/tasks/share-ssh-keys.yml @@ -1,60 +0,0 @@ ---- - # find the other vm, that is not currently being iterated over (this will need to be changed if more than 2) - - name: Setting host facts for other_vm - ansible.builtin.set_fact: - other_vm: '{{ (ansible_play_hosts_all | difference([inventory_hostname])) | first }}' - - - name: List other vm - ansible.builtin.debug: - msg: "{{ other_vm }}" - - - name: Create user postgres - ansible.builtin.user: - name: postgres - generate_ssh_key: true - ssh_key_bits: 2048 - ssh_key_file: .ssh/id_rsa - force: true - register: ssh_keys - become: true - # Another way to generate a ssh key... - # - name: Force regenerate an OpenSSH keypair if it already exists - # community.crypto.openssh_keypair: - # path: .ssh/id_rsa - # force: true - # owner: postgres # should be this user's key - # register: ssh_keys - # become: true - - - name: List generated SSH key - ansible.builtin.debug: - msg: "{{ ssh_keys.ssh_public_key }}" - - - name: Add the key to authorized_key on the other vm - ansible.posix.authorized_key: - user: postgres - state: present - key: '{{ ssh_keys.ssh_public_key }}' - become: true - delegate_to: '{{ other_vm }}' - - - name: Get the host key - ansible.builtin.set_fact: - hostkey: '{{ ansible_ssh_host_key_ecdsa_public }}' - - - name: List host key - ansible.builtin.debug: - msg: "{{ hostkey }}" - - - name: List vm ip - ansible.builtin.debug: - msg: "{{ vm_ip }}" - - - name: Add the vm to known_hosts on the other vm - ansible.builtin.known_hosts: - path: ~postgres/.ssh/known_hosts # need this for the postgres user - name: '{{ vm_ip }}' - key: '{{ vm_ip }} ecdsa-sha2-nistp256 {{ hostkey }}' - state: present - become: true - delegate_to: '{{ other_vm }}' \ No newline at end of file diff --git a/ansible/roles/switchover/tasks/main.yml b/ansible/roles/switchover/tasks/main.yml index 0fab67d..8c68fd7 100644 --- a/ansible/roles/switchover/tasks/main.yml +++ b/ansible/roles/switchover/tasks/main.yml @@ -1,4 +1,4 @@ +--- # roles/switchover/tasks/main.yml - name: Switchover - import_tasks: switchover.yml - + ansible.builtin.import_tasks: switchover.yml diff --git a/ansible/roles/switchover/tasks/switchover.yml b/ansible/roles/switchover/tasks/switchover.yml index 1573d7a..00eec25 100644 --- a/ansible/roles/switchover/tasks/switchover.yml +++ b/ansible/roles/switchover/tasks/switchover.yml @@ -1,60 +1,62 @@ +--- # assume the db is already there and synched, so now want to turn into a standby / replica # and want to turn the current standby into the primary ---- - - name: Restart service postgres (primary) - ansible.builtin.systemd_service: - name: postgresql - state: restarted - become: true - delegate_to: '{{ primary_ip }}' - remote_user: ubuntu +- name: Restart service postgres (primary) + ansible.builtin.systemd_service: + name: postgresql + state: restarted + become: true + delegate_to: "{{ primary_ip }}" + remote_user: ubuntu - # try to avoid issue: https://github.com/EnterpriseDB/repmgr/issues/703 - - name: Restart service postgres (standby) - ansible.builtin.systemd_service: - name: postgresql - state: restarted - become: true - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu +# try to avoid issue: https://github.com/EnterpriseDB/repmgr/issues/703 +- name: Restart service postgres (standby) + ansible.builtin.systemd_service: + name: postgresql + state: restarted + become: true + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu - # can now just do this with repmgr - # https://www.repmgr.org/docs/current/preparing-for-switchover.html - # need the two instances to be able to ssh to each other! - # siblings-follow only really needed if have multiple standbys... - - name: Dry run of switching the standby and primary - ansible.builtin.command: repmgr standby switchover -f /etc/repmgr.conf --siblings-follow --dry-run - become: true - become_user: postgres - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu - register: switchover_dry_run_results - - name: Print out the switchover_dry_run_results - ansible.builtin.debug: - msg: "repmgr {{ switchover_dry_run_results }}" - ## see preparing for switchover if things go wrong despite dry run, there is mention of --force-rewind - ## which would use pg_rewind to try to fix divergent timelines... +# can now just do this with repmgr +# https://www.repmgr.org/docs/current/preparing-for-switchover.html +# need the two instances to be able to ssh to each other! +# siblings-follow only really needed if have multiple standbys... +- name: Dry run of switching the standby and primary + ansible.builtin.command: repmgr standby switchover -f /etc/repmgr.conf --siblings-follow --dry-run + become: true + become_user: postgres + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu + register: switchover_dry_run_results - ## https://www.repmgr.org/docs/current/switchover-execution.html - ## https://www.repmgr.org/docs/current/switchover-troubleshooting.html - - name: Switch the standby and primary - ansible.builtin.command: repmgr standby switchover -f /etc/repmgr.conf --siblings-follow - become: true - become_user: postgres - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu - register: switchover_results - - name: Print out the switchover_results - ansible.builtin.debug: - msg: "repmgr {{ switchover_results }}" +- name: Print out the switchover_dry_run_results + ansible.builtin.debug: + msg: "repmgr {{ switchover_dry_run_results }}" +## see preparing for switchover if things go wrong despite dry run, there is mention of --force-rewind +## which would use pg_rewind to try to fix divergent timelines... - - name: Check cluster - ansible.builtin.command: repmgr -f /etc/repmgr.conf cluster show - become: true - become_user: postgres - delegate_to: '{{ standby_ip }}' - remote_user: ubuntu - register: status_results - - name: Print out the status_results - ansible.builtin.debug: - msg: "repmgr {{ status_results }}" +## https://www.repmgr.org/docs/current/switchover-execution.html +## https://www.repmgr.org/docs/current/switchover-troubleshooting.html +- name: Switch the standby and primary + ansible.builtin.command: repmgr standby switchover -f /etc/repmgr.conf --siblings-follow + become: true + become_user: postgres + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu + register: switchover_results +- name: Print out the switchover_results + ansible.builtin.debug: + msg: "repmgr {{ switchover_results }}" + +- name: Check cluster + ansible.builtin.command: repmgr -f /etc/repmgr.conf cluster show + become: true + become_user: postgres + delegate_to: "{{ standby_ip }}" + remote_user: ubuntu + register: status_results + +- name: Print out the status_results + ansible.builtin.debug: + msg: "repmgr {{ status_results }}" diff --git a/ansible/roles/vm/defaults/main.yml b/ansible/roles/vm/defaults/main.yml new file mode 100644 index 0000000..347ae59 --- /dev/null +++ b/ansible/roles/vm/defaults/main.yml @@ -0,0 +1,12 @@ +--- +vm_flavor: m1.xxlarge +vm_image: met-jammy-latest +vm_state: present +vm_security_groups: + - default + - ssh_usernet + - postgres + - ping + - lard_ingest +vm_volume_type: __DEFAULT__ +vm_volume_size: 900 diff --git a/ansible/roles/vm/meta/main.yml b/ansible/roles/vm/meta/main.yml new file mode 100644 index 0000000..97d8be7 --- /dev/null +++ b/ansible/roles/vm/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + # All the 'ostack_*' variables are defined in this role + - role: ostack diff --git a/ansible/roles/vm/tasks/create-project-vm.yml b/ansible/roles/vm/tasks/create-project-vm.yml deleted file mode 100644 index 408d14c..0000000 --- a/ansible/roles/vm/tasks/create-project-vm.yml +++ /dev/null @@ -1,89 +0,0 @@ ---- - - name: Create VM - openstack.cloud.server: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - availability_zone: '{{ availability_zone }}' - name: '{{ name_stuff }}' - image: '{{ ostack_image }}' - flavor: '{{ ostack_flavor }}' - network: '{{ ostack_network_name }}' - key_name: '{{ ostack_key_name }}' - security_groups: '{{ security_groups_list }}' - state: '{{ ostack_state }}' - # do not give ip, since want to assign a specific one in next step (so as to reuse the ones we have) - auto_ip: false - register: server - - - name: Print out the server information - ansible.builtin.debug: - msg: "Server {{ lookup('ansible.builtin.dict', server) }}" - - - name: Attach floating ip address - openstack.cloud.floating_ip: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - server: '{{ server.server.id }}' - reuse: true - network: public - floating_ip_address: '{{ vm_ip }}' - wait: true - timeout: 60 - - - name: Create Volume - openstack.cloud.volume: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - availability_zone: '{{ availability_zone }}' - name: '{{ name_stuff }}' - volume_type: '{{ volume_type }}' - size: '{{ volume_size }}' - register: volume - - - name: Print out the volume information - ansible.builtin.debug: - msg: "Volume {{ lookup('ansible.builtin.dict', volume) }}" - - - name: Attach a volume to a compute host - openstack.cloud.server_volume: - state: present - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - volume: '{{ volume.volume.id }}' - server: '{{ server.server.id }}' - device: /dev/vdb - - - name: Create port for ipalias and set security groups - openstack.cloud.port: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - state: present - name: 'ipalias-{{ name_stuff }}' - network: '{{ ipalias_network_name }}' - security_groups: '{{ security_groups_list }}' - - - name: Get port info - openstack.cloud.port_info: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - port: 'ipalias-{{ name_stuff }}' - register: ipalias_port - - - name: Print out the port information - ansible.builtin.debug: - msg: "Port {{ lookup('ansible.builtin.dict', ipalias_port) }}" - - - name: Add the ipalias network to server - ansible.builtin.command: # noqa no-changed-when - argv: - - openstack - - --os-cloud - - '{{ ostack_cloud }}' - - --os-region-name - - '{{ ostack_region }}' - - server - - add - - port - - '{{ server.server.id }}' - - 'ipalias-{{ name_stuff }}' - when: ipalias_port.ports.0.device_id | length <=0 diff --git a/ansible/roles/vm/tasks/main.yml b/ansible/roles/vm/tasks/main.yml index 589fc0e..10ba595 100644 --- a/ansible/roles/vm/tasks/main.yml +++ b/ansible/roles/vm/tasks/main.yml @@ -1,4 +1,89 @@ -# roles/vms/tasks/main.yml -- name: Create a VM - import_tasks: create-project-vm.yml +--- +- name: Create VM + openstack.cloud.server: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + availability_zone: "{{ ostack_availability_zone }}" + name: "{{ inventory_hostname }}" + image: "{{ vm_image }}" + flavor: "{{ vm_flavor }}" + network: "{{ ostack_network_name }}" + key_name: "{{ vm_key_name }}" + security_groups: "{{ vm_security_groups }}" + state: "{{ vm_state }}" + # do not give ip, since want to assign a specific one in next step (so as to reuse the ones we have) + auto_ip: false + register: server +- name: Print out the server information + ansible.builtin.debug: + msg: "Server {{ lookup('ansible.builtin.dict', server) }}" + +- name: Attach floating ip address + openstack.cloud.floating_ip: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + server: "{{ server.server.id }}" + reuse: true + network: public + floating_ip_address: "{{ ansible_host }}" + wait: true + timeout: 60 + +- name: Create Volume + openstack.cloud.volume: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + availability_zone: "{{ ostack_availability_zone }}" + name: "{{ inventory_hostname }}" + volume_type: "{{ vm_volume_type }}" + size: "{{ vm_volume_size }}" + register: volume + +- name: Print out the volume information + ansible.builtin.debug: + msg: "Volume {{ lookup('ansible.builtin.dict', volume) }}" + +- name: Attach a volume to a compute host + openstack.cloud.server_volume: + state: present + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + volume: "{{ volume.volume.id }}" + server: "{{ server.server.id }}" + device: /dev/vdb + +- name: Create port for ipalias and set security groups + openstack.cloud.port: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + state: present + name: "ipalias-{{ inventory_hostname }}" + network: "{{ ostack_ipalias_network_name }}" + security_groups: "{{ vm_security_groups }}" + +- name: Get port info + openstack.cloud.port_info: + cloud: "{{ ostack_cloud }}" + region_name: "{{ ostack_region }}" + port: "ipalias-{{ inventory_hostname }}" + register: ipalias_port + +- name: Print out the port information + ansible.builtin.debug: + msg: "Port {{ lookup('ansible.builtin.dict', ipalias_port) }}" + +- name: Add the ipalias network to server + ansible.builtin.command: # noqa no-changed-when + argv: + - openstack + - --os-cloud + - "{{ ostack_cloud }}" + - --os-region-name + - "{{ ostack_region }}" + - server + - add + - port + - "{{ server.server.id }}" + - "ipalias-{{ inventory_hostname }}" + when: ipalias_port.ports.0.device_id | length <=0 diff --git a/ansible/roles/vm/vars/main.yml b/ansible/roles/vm/vars/main.yml index d079f5b..4be7336 100644 --- a/ansible/roles/vm/vars/main.yml +++ b/ansible/roles/vm/vars/main.yml @@ -1,12 +1,13 @@ - # VM config - ostack_flavor: m1.xxlarge - ostack_image: met-jammy-latest - ostack_state: present - ostack_network_name: lard - security_groups_list: - - default - - ssh_usernet - - postgres - - ping - volume_type: __DEFAULT__ - volume_size: 900 \ No newline at end of file +# VM config +--- +vm_ostack_flavor: m1.xxlarge +vm_ostack_image: met-jammy-latest +vm_ostack_state: present +vm_ostack_network_name: lard +vm_security_groups_list: + - default + - ssh_usernet + - postgres + - ping +vm_volume_type: __DEFAULT__ +vm_volume_size: 900 diff --git a/ansible/roles/vm_format/tasks/format-mount-disk.yml b/ansible/roles/vm_format/tasks/format-mount-disk.yml index 5917fa7..5d6d20a 100644 --- a/ansible/roles/vm_format/tasks/format-mount-disk.yml +++ b/ansible/roles/vm_format/tasks/format-mount-disk.yml @@ -1,45 +1,45 @@ --- - - name: Create /mnt/ssd-data - ansible.builtin.file: - path: /mnt/ssd-data - state: directory - owner: ubuntu # change to postgres? - group: ubuntu # change to postgres? - mode: 'u=rw,g=rws,o=r' - become: true +- name: Create /mnt/ssd-data + ansible.builtin.file: + path: /mnt/ssd-data + state: directory + owner: ubuntu # change to postgres? + group: ubuntu # change to postgres? + mode: 'u=rw,g=rws,o=r' + become: true - - name: Create ext4 filesystem on {{ mount_point }} - community.general.filesystem: - dev: '{{ mount_point }}' - fstype: ext4 - become: true +- name: Create ext4 filesystem on {{ mount_point }} + community.general.filesystem: + dev: '{{ mount_point }}' + fstype: ext4 + become: true - - name: Read device information (always use unit when probing) - community.general.parted: - device: '{{ mount_point }}' - unit: MiB - register: sdb_info - become: true +- name: Read device information (always use unit when probing) + community.general.parted: + device: '{{ mount_point }}' + unit: MiB + register: sdb_info + become: true - - name: Print out the device information - ansible.builtin.debug: - msg: "Partitions {{ sdb_info.partitions }}" +- name: Print out the device information + ansible.builtin.debug: + msg: "Partitions {{ sdb_info.partitions }}" - # this also changes the fstab so its still there when rebooted! - - name: Mount the disk from {{ mount_point }} - ansible.posix.mount: - path: /mnt/ssd-data - src: '{{ mount_point }}' - fstype: ext4 - state: mounted - become: true +# this also changes the fstab so its still there when rebooted! +- name: Mount the disk from {{ mount_point }} + ansible.posix.mount: + path: /mnt/ssd-data + src: '{{ mount_point }}' + fstype: ext4 + state: mounted + become: true - - name: Fetch the UUID of {{ mount_point }} - ansible.builtin.command: blkid --match-tag UUID --output value '{{ mount_point }}' - changed_when: false - register: blkid_cmd - become: true +- name: Fetch the UUID of {{ mount_point }} + ansible.builtin.command: blkid --match-tag UUID --output value '{{ mount_point }}' + changed_when: false + register: blkid_cmd + become: true - - name: Print out the UUID - ansible.builtin.debug: - msg: "UUID {{ blkid_cmd.stdout }}" +- name: Print out the UUID + ansible.builtin.debug: + msg: "UUID {{ blkid_cmd.stdout }}" diff --git a/ansible/roles/vm_format/tasks/install-postgres.yml b/ansible/roles/vm_format/tasks/install-postgres.yml index 5864240..bf4bd1d 100644 --- a/ansible/roles/vm_format/tasks/install-postgres.yml +++ b/ansible/roles/vm_format/tasks/install-postgres.yml @@ -1,136 +1,140 @@ --- - - name: Add postgres apt key by id from a keyserver - ansible.builtin.apt_key: - url: https://www.postgresql.org/media/keys/ACCC4CF8.asc - state: present - become: true - - - name: Add postgres repository into sources list - ansible.builtin.apt_repository: - repo: deb https://apt.postgresql.org/pub/repos/apt jammy-pgdg main - state: present - become: true - - - name: Install a list of packages - ansible.builtin.apt: - pkg: - - nano - - postgresql-16 - - postgresql-16-repmgr # https://www.repmgr.org/docs/current/install-requirements.html - - pip # needed for installing psycopg2 - - acl # needed for becoming unpriveleged user (such as postgres) - update_cache: true - become: true - - - name: Install psycopg2 python package # dependency for postgres ansible stuff? - ansible.builtin.pip: - name: psycopg2-binary - become: true - - - name: Install openstacksdk python package - ansible.builtin.pip: - name: openstacksdk - become: true - - # make is so the data is actually kept on the ssd mount... - - ### synch the postgres stuff over to new directory, but stop postgres first - - name: Stop service postgres, if running - ansible.builtin.systemd_service: - name: postgresql - state: stopped - become: true - - - name: Run rsync - ansible.builtin.command: rsync -av /var/lib/postgresql/ /mnt/ssd-b/ - become: true - - ## change where data is stored and open up network wise - - name: Comment out original data_directory - ansible.builtin.replace: - dest: /etc/postgresql/16/main/postgresql.conf - regexp: '^data_directory' - replace: '#data_directory' - become: true - - - name: Modify postgresql config - ansible.builtin.blockinfile: - dest: /etc/postgresql/16/main/postgresql.conf - block: | - data_directory = '/mnt/ssd-b/16/main' - listen_addresses = '*' - become: true - - # probably want to restrict this once we know what will connect? - # but the security group rules should take care of limiting to met ranges - - name: Change hba conf to allow connections - community.postgresql.postgresql_pg_hba: - dest: /etc/postgresql/16/main/pg_hba.conf - contype: host - address: all # can we put met internal ip range(s)? - method: md5 - # users and database default to all - become: true - - # make sure these changes take effect - - name: Start up postgres service again - ansible.builtin.systemd_service: - name: postgresql - state: started - become: true - - # REPMGR - - name: Create repmgr user # https://www.repmgr.org/docs/current/quickstart-repmgr-user-database.html - community.postgresql.postgresql_user: - name: repmgr - password: '{{ repmgr_password }}' - role_attr_flags: SUPERUSER - become: true - become_user: postgres - - - name: Create a repmgr database, with owner repmgr - community.postgresql.postgresql_db: - name: repmgr - owner: repmgr - become: true - become_user: postgres - - - name: Change hba conf to allow repmgr to connect for replication - community.postgresql.postgresql_pg_hba: - dest: /etc/postgresql/16/main/pg_hba.conf - databases: replication - contype: host - users: repmgr - address: all - # address: '{{ standby_host }}' - method: trust - become: true - - - name: Change hba conf to allow repmgr to connect to the repmgr db - community.postgresql.postgresql_pg_hba: - dest: /etc/postgresql/16/main/pg_hba.conf - databases: repmgr - contype: host - users: repmgr - address: all - # address: '{{ standby_host }}' - method: trust - become: true - - - name: Restart postgres - ansible.builtin.systemd_service: - name: postgresql - state: restarted - become: true - - - name: Allow the postgres user to run /bin/systemctl restart, stop, start postgres - community.general.sudoers: - name: postgresql - user: postgres - commands: - - /bin/systemctl restart postgresql.service - - /bin/systemctl stop postgresql.service - - /bin/systemctl start postgresql.service - - /bin/systemctl reload postgresql.service - nopassword: true - become: true +- name: Add postgres apt key by id from a keyserver + ansible.builtin.apt_key: + url: https://www.postgresql.org/media/keys/ACCC4CF8.asc + state: present + become: true + +- name: Add postgres repository into sources list + ansible.builtin.apt_repository: + repo: deb https://apt.postgresql.org/pub/repos/apt jammy-pgdg main + state: present + become: true + +- name: Install a list of packages + ansible.builtin.apt: + pkg: + - nano + - postgresql-16 + - postgresql-16-repmgr # https://www.repmgr.org/docs/current/install-requirements.html + - pip # needed for installing psycopg2 + - acl # needed for becoming unpriveleged user (such as postgres) + update_cache: true + become: true + +- name: Install psycopg2 python package # dependency for postgres ansible stuff? + ansible.builtin.pip: + name: psycopg2-binary + become: true + +- name: Install openstacksdk python package + ansible.builtin.pip: + name: openstacksdk + become: true + +# make is so the data is actually kept on the ssd mount... + +### synch the postgres stuff over to new directory, but stop postgres first +- name: Stop service postgres, if running + ansible.builtin.systemd_service: + name: postgresql + state: stopped + become: true + +- name: Run rsync + ansible.posix.synchronize: + archive: true + src: /var/lib/postgresql/ + # TODO: bind this dir to a variable + dest: /mnt/ssd-data/ + become: true + +## change where data is stored and open up network wise +- name: Comment out original data_directory + ansible.builtin.replace: + dest: /etc/postgresql/16/main/postgresql.conf + regexp: '^data_directory' + replace: '#data_directory' + become: true + +- name: Modify postgresql config + ansible.builtin.blockinfile: + dest: /etc/postgresql/16/main/postgresql.conf + block: | + data_directory = '/mnt/ssd-data/16/main' + listen_addresses = '*' + become: true + +# probably want to restrict this once we know what will connect? +# but the security group rules should take care of limiting to met ranges +- name: Change hba conf to allow connections + community.postgresql.postgresql_pg_hba: + dest: /etc/postgresql/16/main/pg_hba.conf + contype: host + address: all # can we put met internal ip range(s)? + method: md5 + # users and database default to all + become: true + +# make sure these changes take effect +- name: Start up postgres service again + ansible.builtin.systemd_service: + name: postgresql + state: started + become: true + +# REPMGR +- name: Create repmgr user # https://www.repmgr.org/docs/current/quickstart-repmgr-user-database.html + community.postgresql.postgresql_user: + name: repmgr + password: '{{ repmgr_password }}' + role_attr_flags: SUPERUSER + become: true + become_user: postgres + +- name: Create a repmgr database, with owner repmgr + community.postgresql.postgresql_db: + name: repmgr + owner: repmgr + become: true + become_user: postgres + +- name: Change hba conf to allow repmgr to connect for replication + community.postgresql.postgresql_pg_hba: + dest: /etc/postgresql/16/main/pg_hba.conf + databases: replication + contype: host + users: repmgr + address: all + # address: '{{ standby_host }}' + method: trust + become: true + +- name: Change hba conf to allow repmgr to connect to the repmgr db + community.postgresql.postgresql_pg_hba: + dest: /etc/postgresql/16/main/pg_hba.conf + databases: repmgr + contype: host + users: repmgr + address: all + # address: '{{ standby_host }}' + method: trust + become: true + +- name: Restart postgres + ansible.builtin.systemd_service: + name: postgresql + state: restarted + become: true + +- name: Allow the postgres user to run /bin/systemctl restart, stop, start postgres + community.general.sudoers: + name: postgresql + user: postgres + commands: + - /bin/systemctl restart postgresql.service + - /bin/systemctl stop postgresql.service + - /bin/systemctl start postgresql.service + - /bin/systemctl reload postgresql.service + nopassword: true + become: true diff --git a/ansible/roles/vm_format/tasks/main.yml b/ansible/roles/vm_format/tasks/main.yml index 36d09eb..13f7b39 100644 --- a/ansible/roles/vm_format/tasks/main.yml +++ b/ansible/roles/vm_format/tasks/main.yml @@ -1,9 +1,10 @@ +--- # roles/vm_format/tasks/main.yml - name: netplan - import_tasks: netplan.yml - -- name: Format and mount the disk - import_tasks: format-mount-disk.yml + ansible.builtin.import_tasks: netplan.yml + +- name: Format and mount the disk + ansible.builtin.import_tasks: format-mount-disk.yml - name: Install postgres - import_tasks: install-postgres.yml \ No newline at end of file + ansible.builtin.import_tasks: install-postgres.yml diff --git a/ansible/roles/vm_format/tasks/netplan.yml b/ansible/roles/vm_format/tasks/netplan.yml index 118cb06..65df3bf 100644 --- a/ansible/roles/vm_format/tasks/netplan.yml +++ b/ansible/roles/vm_format/tasks/netplan.yml @@ -1,61 +1,59 @@ --- - - name: Get port info - openstack.cloud.port_info: - cloud: '{{ ostack_cloud }}' - region_name: '{{ ostack_region }}' - port: 'ipalias-{{ name_stuff }}' - register: ipalias_port - delegate_to: localhost +- name: Get port info + openstack.cloud.port_info: + cloud: '{{ ostack_cloud }}' + region_name: '{{ ostack_region }}' + port: 'ipalias-{{ name_stuff }}' + register: ipalias_port + delegate_to: localhost - - name: Print out the port information - ansible.builtin.debug: - msg: "Port {{ lookup('ansible.builtin.dict', ipalias_port) }}" - delegate_to: localhost +- name: Print out the port information + ansible.builtin.debug: + msg: "Port {{ lookup('ansible.builtin.dict', ipalias_port) }}" + delegate_to: localhost - - name: IP alias netplan configuration - ansible.builtin.set_fact: - netplan_config: - network: - version: 2 - ethernets: - ens6: - dhcp4: true - dhcp4-overrides: - use-routes: false - match: - macaddress: '{{ ipalias_port.ports.0.mac_address }}' - set-name: ens6 - routes: - - to: 0.0.0.0/0 - via: '{{ ipalias_ostack_network_cidr | ansible.utils.ipaddr("net") | ansible.utils.ipaddr("1") | ansible.utils.ipaddr("address") }}' - table: 102 - routing-policy: - - from: '{{ ipalias_ostack_network_cidr }}' - table: 102 - become: true +- name: IP alias netplan configuration + ansible.builtin.set_fact: + netplan_config: + network: + version: 2 + ethernets: + ens6: + dhcp4: true + dhcp4-overrides: + use-routes: false + match: + macaddress: '{{ ipalias_port.ports.0.mac_address }}' + set-name: ens6 + routes: + - to: 0.0.0.0/0 + via: '{{ ipalias_ostack_network_cidr | ansible.utils.ipaddr("net") | ansible.utils.ipaddr("1") | ansible.utils.ipaddr("address") }}' + table: 102 + routing-policy: + - from: '{{ ipalias_ostack_network_cidr }}' + table: 102 + become: true - - name: Copy out ipalias netplan config - ansible.builtin.copy: - content: '{{ netplan_config | to_nice_yaml }}' - dest: /etc/netplan/90-ansible-ipalias.yaml - mode: '0644' - register: netplan_config - become: true +- name: Copy out ipalias netplan config + ansible.builtin.copy: + content: '{{ netplan_config | to_nice_yaml }}' + dest: /etc/netplan/90-ansible-ipalias.yaml + mode: '0644' + register: netplan_config + become: true - - name: Print out netplan config - ansible.builtin.debug: - msg: "Netplan {{ netplan_config }}" - - - name: Apply netplan - ansible.builtin.command: sudo netplan apply - async: 45 - poll: 0 - -# https://gitlab.met.no/ansible-roles/ipalias/-/blob/master/tasks/netplan.yml?ref_type=heads -# this times out and then the servers are uncreachable? -# - name: Reboot server to apply new netplan config, without hitting netplan bug -# ansible.builtin.reboot: # noqa no-handler +- name: Print out netplan config + ansible.builtin.debug: + msg: "Netplan {{ netplan_config }}" + # https://gitlab.met.no/ansible-roles/ipalias/-/blob/master/tasks/netplan.yml?ref_type=heads + # this times out and then the servers are uncreachable? + # - name: Reboot server to apply new netplan config, without hitting netplan bug + # ansible.builtin.reboot: # noqa no-handler # reboot_timeout: 3600 -# when: netplan_config is changed -# become: true - \ No newline at end of file + # when: netplan_config is changed + # become: true + +- name: Apply netplan + ansible.builtin.command: sudo netplan apply + async: 45 + poll: 0 diff --git a/ansible/switchover.yml b/ansible/switchover.yml index 48c7ec6..e7cd16c 100644 --- a/ansible/switchover.yml +++ b/ansible/switchover.yml @@ -1,3 +1,4 @@ +--- - name: Switch the primary and standby / replica hosts: localhost vars: @@ -5,15 +6,17 @@ ostack_region: Ostack2-EXT gather_facts: false pre_tasks: - - name: find primary ip from inventory + - name: Find primary ip from inventory ansible.builtin.set_fact: - primary_ip: '{{ item }}' - with_inventory_hostnames: '{{ name_primary }}' - - name: find standby ip from inventory + primary_ip: "{{ item }}" + with_inventory_hostnames: "{{ name_primary }}" + + - name: Find standby ip from inventory ansible.builtin.set_fact: - standby_ip: '{{ item }}' - with_inventory_hostnames: '{{ name_standby }}' + standby_ip: "{{ item }}" + with_inventory_hostnames: "{{ name_standby }}" + roles: # ensure the names are passed in the right way around for the current state! - - role: switchover - - role: movefloatingip \ No newline at end of file + - role: switchover + - role: movefloatingip diff --git a/fake_data_generator/Cargo.toml b/fake_data_generator/Cargo.toml index 4578782..ceb8768 100644 --- a/fake_data_generator/Cargo.toml +++ b/fake_data_generator/Cargo.toml @@ -12,3 +12,6 @@ rand_distr.workspace = true serde.workspace = true tokio.workspace = true tokio-postgres.workspace = true + +[[bin]] +name = "generate_partitions" diff --git a/fake_data_generator/src/bin/generate_partitions.rs b/fake_data_generator/src/bin/generate_partitions.rs new file mode 100644 index 0000000..8d76f55 --- /dev/null +++ b/fake_data_generator/src/bin/generate_partitions.rs @@ -0,0 +1,47 @@ +use std::{fs::File, io::Write}; + +use chrono::{DateTime, TimeZone, Utc}; +use std::io::BufWriter; + +fn create_table_partitions( + table: &str, + boundaries: &[DateTime], + writer: &mut BufWriter, +) -> Result<(), std::io::Error> { + // .windows(2) gives a 2-wide sliding view of the vector, so we can see + // both bounds relevant to a partition + for window in boundaries.windows(2) { + let start_time = window[0]; + let end_time = window[1]; + + let line = format!( + "CREATE TABLE IF NOT EXISTS {}_y{}_to_y{} PARTITION OF public.{}\nFOR VALUES FROM ('{}') TO ('{}');\n", + table, + start_time.format("%Y"), + end_time.format("%Y"), + table, + start_time.format("%Y-%m-%d %H:%M:%S+00"), + end_time.format("%Y-%m-%d %H:%M:%S+00") + ); + writer.write_all(line.as_bytes())?; + } + + Ok(()) +} + +fn main() -> Result<(), std::io::Error> { + let outfile = File::create("../db/partitions_generated.sql")?; + let mut writer = BufWriter::new(outfile); + + // create a vector of the boundaries between partitions + let paritition_boundary_years: Vec> = [1950, 2000, 2010] + .into_iter() + .chain(2015..=2030) + .map(|y| Utc.with_ymd_and_hms(y, 1, 1, 0, 0, 0).unwrap()) + .collect(); + + create_table_partitions("data", &paritition_boundary_years, &mut writer)?; + create_table_partitions("nonscalar_data", &paritition_boundary_years, &mut writer)?; + + Ok(()) +}