-
Notifications
You must be signed in to change notification settings - Fork 9
/
hybrid_slurm_elastic_cluster.yml
145 lines (127 loc) · 3.88 KB
/
hybrid_slurm_elastic_cluster.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
tosca_definitions_version: tosca_simple_yaml_1_0
imports:
- indigo_custom_types: https://raw.githubusercontent.com/indigo-dc/tosca-types/master/custom_types.yaml
description: >
TOSCA test for launching a Virtual Elastic Cluster. It will launch
a single front-end that will be in change of managing the elasticity
using the LRMS workload.
metadata:
display_name: SLURM Elastic cluster
icon: https://github.com/indigo-dc/tosca-types/raw/master/images/slurm.png
tag: elastic
topology_template:
inputs:
wn_num:
type: integer
description: Maximum number of WNs in the elastic cluster
default: 5
required: yes
fe_cpus:
type: integer
description: Numer of CPUs for the front-end node
default: 1
required: yes
fe_mem:
type: scalar-unit.size
description: Amount of Memory for the front-end node
default: 1 GB
required: yes
wn_cpus:
type: integer
description: Numer of CPUs for the WNs
default: 1
required: yes
wn_mem:
type: scalar-unit.size
description: Amount of Memory for the WNs
default: 1 GB
required: yes
hybrid:
type: boolean
description: Flag to specify that this cluster will work in an hybrid environment
default: false
required: false
node_templates:
elastic_cluster_front_end:
type: tosca.nodes.indigo.ElasticCluster
properties:
deployment_id: orchestrator_deployment_id
iam_access_token: iam_access_token
iam_clues_client_id: iam_clues_client_id
iam_clues_client_secret: iam_clues_client_secret
hybrid: { get_input: hybrid }
requirements:
- lrms: lrms_front_end
- wn: wn_node
- host: lrms_server
lrms_front_end:
type: tosca.nodes.indigo.LRMS.FrontEnd.Slurm
properties:
wn_ips: { get_attribute: [ lrms_wn, private_address ] }
hybrid: { get_input: hybrid }
requirements:
- host: lrms_server
lrms_server:
type: tosca.nodes.indigo.Compute
capabilities:
endpoint:
properties:
dns_name: slurmserver
network_name: PUBLIC
ports:
openvpn:
protocol: udp
source: 1194
host:
properties:
num_cpus: { get_input: fe_cpus }
mem_size: { get_input: fe_mem }
os:
properties:
distribution: ubuntu
type: linux
version: 16.04
wn_node:
type: tosca.nodes.indigo.LRMS.WorkerNode.Slurm
properties:
front_end_ip: { get_attribute: [ lrms_server, private_address, 0 ] }
public_front_end_ip: { get_attribute: [ lrms_server, public_address, 0 ] }
hybrid: { get_input: hybrid }
capabilities:
wn:
properties:
min_instances: 1
max_instances: { get_input: wn_num }
default_instances: 1
requirements:
- host: lrms_wn
lrms_wn:
type: tosca.nodes.indigo.Compute
capabilities:
scalable:
properties:
count: 0
host:
properties:
num_cpus: { get_input: wn_cpus }
mem_size: { get_input: wn_mem }
os:
properties:
distribution: ubuntu
type: linux
version: 16.04
outputs:
cluster_ip:
value: { get_attribute: [ lrms_server, public_address, 0 ] }
cluster_creds:
value: { get_attribute: [ lrms_server, endpoint, credential, 0 ] }
# policies:
# - deploy_on_recas:
# type: tosca.policies.indigo.SlaPlacement
# properties: { sla_id: 5da02ca5cb0eda351f5847f1 }
# targets: [ lrms_server ]
# policies:
# - deploy_on_ifca:
# type: tosca.policies.indigo.SlaPlacement
# properties: { sla_id: 5d5ec2c6cb0eda3522b4b3f6 }
# targets: [ lrms_server ]