Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DMFG] Add DMFG conf and suite to common #4541

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 49 additions & 0 deletions conf/squid/common/5node-1client-rh.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Deployment for all the ceph daemons , with 3 MONS, 2 MDS, 1 MGR, 3 OSD and 2 RGW service daemon(s)
globals:
- ceph-cluster:
name: ceph
node1:
role:
- _admin
- installer
- mon
- mgr
- osd
- node-exporter
- alertmanager
- grafana
- prometheus
- crash
no-of-volumes: 6
disk-size: 15
node2:
role:
- _admin
- osd
- mon
- mgr
- mds
- node-exporter
- alertmanager
- crash
- rgw
- haproxy
no-of-volumes: 6
disk-size: 15
node3:
role:
- mon
- osd
- node-exporter
- crash
- rgw
- mds
- haproxy
no-of-volumes: 6
disk-size: 15
node4:
role:
- rgw
node5:
role:
- client
207 changes: 207 additions & 0 deletions suites/squid/common/regression/ss-dmfg-container-cli-args.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
# Conf file : conf/squid/common/5node-1client-rh.yaml
# deployment: suites/squid/common/regression/ss-single-client-deploy-and-configure.yaml

tests:

- test:
name: Service deployment with spec
desc: Add services using spec file.
module: test_cephadm.py
polarion-id: CEPH-83574887
config:
steps:
- config:
command: apply_spec
service: orch
validate-spec-services: true
specs:
- service_type: mon
placement:
nodes:
- node1
- node2
- node3
extra_container_args:
- "--cpus=2"
- service_type: mgr
placement:
label: mgr
extra_container_args:
- "--cpus=2"
- service_type: prometheus
placement:
count: 1
nodes:
- node1
extra_container_args:
- "--cpus=1"
- service_type: grafana
placement:
nodes:
- node1
extra_container_args:
- "--cpus=1"
- service_type: alertmanager
placement:
count: 2
label: alertmanager
extra_container_args:
- "--cpus=1"
- service_type: node-exporter
placement:
host_pattern: "*"
extra_container_args:
- "--cpus=1"
- service_type: crash
placement:
host_pattern: "*"
extra_container_args:
- "--cpus=1"
- service_type: osd
service_id: all-available-devices
placement:
host_pattern: "*"
spec:
data_devices:
all: "true" # boolean as string
encrypted: "true" # boolean as string
extra_container_args:
- "--cpus=2"
- config:
command: shell
args: # sleep to get all services deployed
- sleep
- "300"
- config:
command: shell
args: # arguments to ceph orch
- ceph
- fs
- volume
- create
- cephfs
- config:
command: apply_spec
service: orch
specs:
- service_type: mds
service_id: cephfs
placement:
label: mds
extra_container_args:
- "--cpus=1"
- config:
command: apply_spec
service: orch
specs:
- service_type: nfs
service_id: nfs-rgw-service
placement:
nodes:
- node4
spec:
port: 2049
extra_container_args:
- "--cpus=1"
- config:
command: shell
args:
- "radosgw-admin realm create --rgw-realm=east --default"
- config:
command: shell
args:
- "radosgw-admin zonegroup create --rgw-zonegroup=asia --master --default"
- config:
command: shell
args:
- "radosgw-admin zone create --rgw-zonegroup=asia --rgw-zone=india --master --default"
- config:
command: shell
args:
- "radosgw-admin period update --rgw-realm=east --commit"
- config:
command: apply_spec
service: orch
specs:
- service_type: rgw
service_id: my-rgw
placement:
count_per_host: 2
nodes:
- node4
- node3
- node2
spec:
rgw_frontend_port: 8080
rgw_realm: east
rgw_zone: india
extra_container_args:
- "--cpus=1"
- config:
command: shell
args: # sleep to get all services deployed
- sleep
- "120"

- test:
name: Setup destination node for SNMP traps
desc: Install snmptrapd tool and install CEPH MIB on destination node
module: snmp_destination.py
polarion-id: CEPH-83574775
config:
node: node3
specs:
- service_type: snmp-destination
spec:
credentials:
snmp_v3_auth_username: myadmin
snmp_v3_auth_password: mypassword

- test:
name: SNMP Gateway Service deployment with spec
desc: Add SNMP Gateway services using spec file
module: test_cephadm.py
polarion-id: CEPH-83574743
config:
steps:
- config:
command: apply_spec
service: orch
specs:
- service_type: snmp-gateway
service_name: snmp-gateway
placement:
count: 1
spec:
credentials:
snmp_v3_auth_username: myadmin
snmp_v3_auth_password: mypassword
port: 9464
snmp_destination: node3
snmp_version: V3
- config:
command: shell
args: # sleep to get all services deployed
- sleep
- "120"

- test:
name: Configure client
desc: Configure client on node5
module: test_client.py
polarion-id: CEPH-83573758
config:
command: add
id: client.1 # client Id (<type>.<Id>)
node: node5 # client node
install_packages:
- ceph-common # install ceph common packages
copy_admin_keyring: true # Copy admin keyring to node
store-keyring: true # /etc/ceph/ceph.client.1.keyring
caps: # authorize client capabilities
mon: "allow *"
osd: "allow *"
mds: "allow *"
mgr: "allow *"
destroy-cluster: false
abort-on-fail: true
110 changes: 110 additions & 0 deletions suites/squid/common/regression/ss-dmfg-permissive-mode.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# Conf file : conf/squid/common/5node-1client-rh.yaml
# deployment: suites/squid/common/regression/ss-single-client-deploy-and-configure.yaml

tests:
- test:
name: Add host with labels to cluster using cephadm-ansible wrapper modules
desc: Execute 'playbooks/add-host-to-cluster.yaml' playbook
polarion-id: CEPH-83575203
module: test_cephadm_ansible_operations.py
config:
ceph_orch_host:
playbook: add-ceph-orch-host.yaml
module_args:
name: node2
address: node2
labels: osd.1
abort-on-fail: true

- test:
name: Deploy OSD service to cluster using cephadm-ansible wrapper modules
desc: Execute 'deploy-osd-service.yaml' playbook
polarion-id: CEPH-83575213
module: test_cephadm_ansible_operations.py
config:
ceph_orch_apply:
playbook: deploy-osd-service.yaml
module_args:
label: osd.1

- test:
name: Add host with labels to cluster using cephadm-ansible wrapper modules
desc: Execute 'add-host-to-cluster.yaml' playbook
polarion-id: CEPH-83575203
module: test_cephadm_ansible_operations.py
config:
ceph_orch_host:
playbook: add-ceph-orch-host.yaml
module_args:
name: node3
address: node3
labels: osd.2
abort-on-fail: true

- test:
name: Deploy OSD service to cluster using cephadm-ansible wrapper modules
desc: Execute 'deploy-osd-service.yaml' playbook
polarion-id: CEPH-83575213
module: test_cephadm_ansible_operations.py
config:
ceph_orch_apply:
playbook: deploy-osd-service.yaml
module_args:
label: osd.2

- test:
name: Add host with labels to cluster using cephadm-ansible wrapper modules
desc: Execute 'add-host-to-cluster.yaml' playbook
polarion-id: CEPH-83575203
module: test_cephadm_ansible_operations.py
config:
ceph_orch_host:
playbook: add-ceph-orch-host.yaml
module_args:
name: node4
address: node4
labels: osd.3
abort-on-fail: true

- test:
name: Deploy OSD service to cluster using cephadm-ansible wrapper modules
desc: Execute 'deploy-osd-service.yml' playbook
polarion-id: CEPH-83575213
module: test_cephadm_ansible_operations.py
config:
ceph_orch_apply:
playbook: deploy-osd-service.yaml
module_args:
label: osd.3

- test:
name: Check cluster status when nodes are performed with reboot
desc: Perform reboot and check ceph status
polarion-id: CEPH-83573754
module: test_verify_cluster_health_after_reboot.py
config:
action: node-reboot
abort-on-fail: true

- test:
name: Check cluster status when daemon services are started restart stop with systemctl
desc: Verify systemctl ops of services
polarion-id: CEPH-83573755
module: test_verify_cluster_health_after_reboot.py
config:
action: service-state
abort-on-fail: true

- test:
name: Verify 'ceph health detail' output
desc: Verify ceph health detail info when mon node is offline
polarion-id: CEPH-83575328
module: test_ceph_health_detail_when_mon_offline.py

- test:
name: Change cluster network and public network using cephadm
desc: Verify changing the cluster network using cephadm
polarion-id: CEPH-83575112
module: test_validate_cluster_and_public_network_change.py
config:
public-network: 10.0.201.0/24
Loading
Loading