forked from beekhof/osp-ha-deploy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rabbitmq.scenario
100 lines (83 loc) · 2.88 KB
/
rabbitmq.scenario
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# This file can be used directly by 'phd', see 'build-all.sh' in this
# directory for how it can be invoked. The only requirement is a list
# of nodes you'd like it to modify.
#
# The scope of each command-block is controlled by the preceeding
# 'target' line.
#
# - target=all
# The commands are executed on evey node provided
#
# - target=local
# The commands are executed from the node hosting phd. When not
# using phd, they should be run from some other independant host
# (such as the puppet master)
#
# - target=$PHD_ENV_nodes{N}
# The commands are executed on the Nth node provided.
# For example, to run on only the first node would be target=$PHD_ENV_nodes1
#
# Tasks to be performed at this step include:
#################################
# Scenario Requirements Section #
#################################
= VARIABLES =
PHD_VAR_deployment
PHD_VAR_env_configdir
PHD_VAR_network_domain
PHD_VAR_network_internal
#################################
# Scenario Requirements Section #
#################################
= REQUIREMENTS =
nodes: 1
######################
# Deployment Scripts #
######################
= SCRIPTS =
target=all
....
yum install -y rabbitmq-server nfs-utils
# HACK HACK HACK
# Download directly until 7.1 is out and we get access to the 0-day (which has the rabbit agent)
rpm -Uvh http://download.eng.bos.redhat.com/brewroot/packages/resource-agents/3.9.5/40.el7.2/x86_64/resource-agents-3.9.5-40.el7.2.x86_64.rpm
# NOTE: we need to bind the service to the internal IP address
cat > /etc/rabbitmq/rabbitmq-env.conf << EOF
NODE_IP_ADDRESS=$(ip addr show dev eth1 scope global | grep dynamic| sed -e 's#.*inet ##g' -e 's#/.*##g')
EOF
# required to generate the cookies
systemctl start rabbitmq-server
systemctl stop rabbitmq-server
....
target=all
....
# Unfortunately https://bugzilla.redhat.com/show_bug.cgi?id=1175005
# prevents NFS mounts from succeeding by default prior to a reboot
systemctl daemon-reload
systemctl start rpcbind.service
systemctl start rpc-statd.service
# Now mount /srv so that we can use $PHD_VAR_env_configdir further down
if grep -q srv /etc/fstab; then
echo /srv is already mounted;
else
mkdir -p /srv
echo "${PHD_VAR_network_internal}.1:/srv /srv nfs defaults,v3 0 0" >> /etc/fstab
mount /srv
fi
....
target=$PHD_ENV_nodes1
....
mkdir -p $PHD_VAR_env_configdir
cp /var/lib/rabbitmq/.erlang.cookie $PHD_VAR_env_configdir/rabbitmq_erlang_cookie
....
target=all
....
# the cookie has to be the same across all nodes. Copy around as preferred, I am
# using my NFS commodity storage. Also check for file permission/ownership. I
# workaround that step by using 'cat' vs cp.
cat $PHD_VAR_env_configdir/rabbitmq_erlang_cookie > /var/lib/rabbitmq/.erlang.cookie
....
target=$PHD_ENV_nodes1
....
pcs resource create rabbitmq-server rabbitmq-cluster set_policy='HA ^(?!amq\.).* {"ha-mode":"all"}' --clone ordered=true
....