Skip to content

Commit c937b01

Browse files
committed
Adding nvme gateway deploymnets to common config
Signed-off-by: tintumathew10 <[email protected]>
1 parent 93db54c commit c937b01

File tree

2 files changed

+327
-0
lines changed

2 files changed

+327
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
# Single site cluster with 17 Ceph & 5 Client nodes
2+
# with daemons - 17 crash, 5 mon, 3 mgr, 3 rgw
3+
# 5 nfs, 10 (+2 backup) osds and
4+
# 1 alertmanager, node-exporter, prometheus, grafana
5+
6+
globals:
7+
- ceph-cluster:
8+
name: ceph
9+
node1:
10+
networks:
11+
- shared_net_4
12+
role:
13+
- _admin
14+
- installer
15+
- alertmanager
16+
- prometheus
17+
- grafana
18+
- mon
19+
node2:
20+
networks:
21+
- shared_net_4
22+
role:
23+
- mon
24+
- nfs
25+
- osd
26+
no-of-volumes: 4
27+
disk-size: 20
28+
node3:
29+
networks:
30+
- shared_net_4
31+
role:
32+
- mon
33+
- nfs
34+
- osd
35+
no-of-volumes: 4
36+
disk-size: 20
37+
node4:
38+
networks:
39+
- shared_net_4
40+
role:
41+
- rgw
42+
- mgr
43+
- osd
44+
no-of-volumes: 4
45+
disk-size: 20
46+
node5:
47+
networks:
48+
- shared_net_4
49+
role:
50+
- nfs
51+
- osd
52+
no-of-volumes: 4
53+
disk-size: 20
54+
node6:
55+
networks:
56+
- shared_net_4
57+
role:
58+
- nfs
59+
- osd
60+
no-of-volumes: 4
61+
disk-size: 20
62+
node7:
63+
networks:
64+
- shared_net_4
65+
role:
66+
- mds
67+
- osd
68+
no-of-volumes: 4
69+
disk-size: 20
70+
node8:
71+
networks:
72+
- shared_net_4
73+
role:
74+
- mds
75+
- osd
76+
no-of-volumes: 4
77+
disk-size: 20
78+
node9:
79+
networks:
80+
- shared_net_4
81+
role:
82+
- mds
83+
- osd
84+
no-of-volumes: 4
85+
disk-size: 20
86+
node10:
87+
networks:
88+
- shared_net_4
89+
role:
90+
- rgw
91+
- mds
92+
- osd
93+
no-of-volumes: 4
94+
disk-size: 20
95+
node11:
96+
networks:
97+
- shared_net_4
98+
role:
99+
- rgw
100+
- mds
101+
- osd
102+
no-of-volumes: 4
103+
disk-size: 20
104+
node12:
105+
networks:
106+
- shared_net_4
107+
role:
108+
- mon
109+
- nfs
110+
- osd-bak
111+
no-of-volumes: 4
112+
disk-size: 20
113+
node13:
114+
networks:
115+
- shared_net_4
116+
role:
117+
- mon
118+
- mgr
119+
- osd-bak
120+
no-of-volumes: 4
121+
disk-size: 20
122+
node14:
123+
networks:
124+
- shared_net_4
125+
role:
126+
- nvmeof-gw
127+
node15:
128+
networks:
129+
- shared_net_4
130+
role:
131+
- nvmeof-gw
132+
node16:
133+
networks:
134+
- shared_net_4
135+
role:
136+
- nvmeof-gw
137+
node17:
138+
networks:
139+
- shared_net_4
140+
role:
141+
- nvmeof-gw
142+
node18:
143+
networks:
144+
- shared_net_5
145+
role:
146+
- client
147+
node19:
148+
networks:
149+
- shared_net_5
150+
role:
151+
- client
152+
node20:
153+
networks:
154+
- shared_net_15
155+
role:
156+
- client
157+
node21:
158+
networks:
159+
- shared_net_15
160+
role:
161+
- client
162+
node22:
163+
networks:
164+
- shared_net_15
165+
role:
166+
- client
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
# Test suite deploy and configure single site cluster
2+
# global-conf: conf/squid/common/17node-4client-single-site-regression.yaml
3+
4+
tests:
5+
- test:
6+
name: Setup pre-requisites
7+
desc: Setup packages and configuration for cluster deployment
8+
module: install_prereq.py
9+
abort-on-fail: true
10+
11+
- test:
12+
name: Deploy cluster using cephadm
13+
desc: Bootstrap and deploy services
14+
polarion-id: CEPH-83573713
15+
module: test_cephadm.py
16+
config:
17+
steps:
18+
- config:
19+
service: cephadm
20+
command: bootstrap
21+
args:
22+
mon-ip: node1
23+
- config:
24+
service: host
25+
command: add_hosts
26+
args:
27+
attach_ip_address: true
28+
labels: apply-all-labels
29+
- config:
30+
service: osd
31+
command: apply
32+
args:
33+
all-available-devices: true
34+
- config:
35+
command: shell
36+
args:
37+
- "ceph fs volume create cephfs"
38+
- config:
39+
service: mds
40+
command: apply
41+
args:
42+
placement:
43+
label: mds
44+
base_cmd_args:
45+
verbose: true
46+
pos_args:
47+
- cephfs
48+
- config:
49+
command: shell
50+
args:
51+
- "ceph osd pool create rbd"
52+
- config:
53+
command: shell
54+
args:
55+
- "rbd pool init rbd"
56+
abort-on-fail: true
57+
58+
- test:
59+
name: Configure client
60+
desc: Configure the RGW & RBD clients
61+
module: test_client.py
62+
config:
63+
command: add
64+
id: client.1
65+
node: node18
66+
install_packages:
67+
- ceph-common
68+
- rbd-nbd
69+
- jq
70+
- fio
71+
copy_admin_keyring: true
72+
caps:
73+
mon: "allow *"
74+
osd: "allow *"
75+
mds: "allow *"
76+
mgr: "allow *"
77+
abort-on-fail: true
78+
79+
- test:
80+
name: Configure client
81+
desc: Configure the RGW & RBD clients
82+
module: test_client.py
83+
config:
84+
command: add
85+
id: client.2
86+
node: node19
87+
install_packages:
88+
- ceph-common
89+
- rbd-nbd
90+
- jq
91+
- fio
92+
copy_admin_keyring: true
93+
caps:
94+
mon: "allow *"
95+
osd: "allow *"
96+
mds: "allow *"
97+
mgr: "allow *"
98+
abort-on-fail: true
99+
100+
- test:
101+
name: Configure client
102+
desc: Configure the RGW & RBD clients
103+
module: test_client.py
104+
config:
105+
command: add
106+
id: client.3
107+
node: node20
108+
install_packages:
109+
- ceph-common
110+
- rbd-nbd
111+
- jq
112+
- fio
113+
copy_admin_keyring: true
114+
caps:
115+
mon: "allow *"
116+
osd: "allow *"
117+
mds: "allow *"
118+
mgr: "allow *"
119+
abort-on-fail: true
120+
121+
- test:
122+
name: Configure client
123+
desc: Configure the RGW & RBD clients
124+
module: test_client.py
125+
config:
126+
command: add
127+
id: client.4
128+
node: node21
129+
install_packages:
130+
- ceph-common
131+
- rbd-nbd
132+
- jq
133+
- fio
134+
copy_admin_keyring: true
135+
caps:
136+
mon: "allow *"
137+
osd: "allow *"
138+
mds: "allow *"
139+
mgr: "allow *"
140+
abort-on-fail: true
141+
142+
- test:
143+
name: Configure client
144+
desc: Configure the RGW & RBD clients
145+
module: test_client.py
146+
config:
147+
command: add
148+
id: client.5
149+
node: node22
150+
install_packages:
151+
- ceph-common
152+
- rbd-nbd
153+
- jq
154+
- fio
155+
copy_admin_keyring: true
156+
caps:
157+
mon: "allow *"
158+
osd: "allow *"
159+
mds: "allow *"
160+
mgr: "allow *"
161+
abort-on-fail: true

0 commit comments

Comments
 (0)