Skip to content

Commit

Permalink
Merge pull request #4135 from anrao19/rados_rgw
Browse files Browse the repository at this point in the history
[Rados-RGW] RGW multisite erasure-code-profile pool suite with bilog trim testcase
  • Loading branch information
mergify[bot] authored Oct 18, 2024
2 parents 1d95cbb + 5fcf11f commit 93038c9
Showing 1 changed file with 274 additions and 0 deletions.
274 changes: 274 additions & 0 deletions suites/squid/rgw/tier-2_rgw_rados_multisite_ecpool.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,274 @@
# Test suite for evaluating RGW multi-site deployment scenario.
# the data bucket is configured to use EC

# conf : conf/squid/rgw/ms-ec-profile-4+2-cluster.yaml
---

tests:

# Cluster deployment stage

- test:
abort-on-fail: true
desc: Install software pre-requisites for cluster deployment.
module: install_prereq.py
name: setup pre-requisites

- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
verify_cluster_health: true
steps:
- config:
command: bootstrap
service: cephadm
args:
mon-ip: node1
orphan-initial-daemons: true
initial-dashboard-password: admin@123
dashboard-password-noupdate: true
- config:
command: add_hosts
service: host
args:
attach_ip_address: true
labels: apply-all-labels
- config:
command: apply
service: mgr
args:
placement:
label: mgr
- config:
command: apply
service: mon
args:
placement:
label: mon
- config:
command: apply
service: osd
args:
all-available-devices: true
- config:
args:
- "ceph osd erasure-code-profile set rgwec01 k=4 m=2"
- "crush-failure-domain=host crush-device-class=hdd"
command: shell
- config:
args:
- "ceph osd pool create primary.rgw.buckets.data 32 32"
- "erasure rgwec01"
command: shell
- config:
args:
- "ceph osd pool application enable"
- "primary.rgw.buckets.data rgw"
command: shell
- config:
command: apply
service: rgw
pos_args:
- shared.pri
args:
placement:
nodes:
- node7
ceph-sec:
config:
verify_cluster_health: true
steps:
- config:
command: bootstrap
service: cephadm
args:
mon-ip: node1
orphan-initial-daemons: true
initial-dashboard-password: admin@123
dashboard-password-noupdate: true
- config:
command: add_hosts
service: host
args:
attach_ip_address: true
labels: apply-all-labels
- config:
command: apply
service: mgr
args:
placement:
label: mgr
- config:
command: apply
service: mon
args:
placement:
label: mon
- config:
command: apply
service: osd
args:
all-available-devices: true
- config:
args:
- "ceph osd erasure-code-profile set rgwec01 k=4 m=2"
- "crush-failure-domain=host crush-device-class=hdd"
command: shell
- config:
args:
- "ceph osd pool create secondary.rgw.buckets.data 32 32"
- "erasure rgwec01"
command: shell
- config:
args:
- "ceph osd pool application enable"
- "secondary.rgw.buckets.data rgw"
command: shell
- config:
command: apply
service: rgw
pos_args:
- shared.sec
args:
placement:
nodes:
- node7
desc: RHCS cluster deployment using cephadm.
destroy-cluster: false
module: test_cephadm.py
name: deploy cluster
polarion-id: CEPH-83575222

- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
command: add
id: client.1
node: node8
install_packages:
- ceph-common
copy_admin_keyring: true
ceph-sec:
config:
command: add
id: client.1
node: node8
install_packages:
- ceph-common
copy_admin_keyring: true
desc: Configure the RGW client system
destroy-cluster: false
module: test_client.py
name: configure client
polarion-id: CEPH-83573758
- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
cephadm: true
commands:
- "radosgw-admin realm create --rgw-realm india --default"
- "radosgw-admin zonegroup create --rgw-realm india --rgw-zonegroup shared --endpoints http://{node_ip:node7}:80 --master --default"
- "radosgw-admin zone create --rgw-realm india --rgw-zonegroup shared --rgw-zone primary --endpoints http://{node_ip:node7}:80 --master --default"
- "radosgw-admin period update --rgw-realm india --commit"
- "radosgw-admin user create --uid=repuser --display_name='Replication user' --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d --rgw-realm india --system"
- "radosgw-admin zone modify --rgw-realm india --rgw-zonegroup shared --rgw-zone primary --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d"
- "radosgw-admin period update --rgw-realm india --commit"
- "ceph config set client.rgw.{daemon_id:shared.pri} rgw_realm india"
- "ceph config set client.rgw.{daemon_id:shared.pri} rgw_zonegroup shared"
- "ceph config set client.rgw.{daemon_id:shared.pri} rgw_zone primary"
- "ceph orch restart {service_name:shared.pri}"
ceph-sec:
config:
cephadm: true
commands:
- "sleep 120"
- "radosgw-admin realm pull --rgw-realm india --url http://{node_ip:ceph-pri#node7}:80 --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d --default"
- "radosgw-admin period pull --url http://{node_ip:ceph-pri#node7}:80 --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d"
- "radosgw-admin zone create --rgw-realm india --rgw-zonegroup shared --rgw-zone secondary --endpoints http://{node_ip:node7}:80 --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d"
- "radosgw-admin period update --rgw-realm india --commit"
- "ceph config set client.rgw.{daemon_id:shared.sec} rgw_realm india"
- "ceph config set client.rgw.{daemon_id:shared.sec} rgw_zonegroup shared"
- "ceph config set client.rgw.{daemon_id:shared.sec} rgw_zone secondary"
- "ceph orch restart {service_name:shared.sec}"
desc: Setting up RGW multisite replication environment
module: exec.py
name: setup multisite
polarion-id: CEPH-10362
- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
cephadm: true
commands:
- "radosgw-admin sync status"
- "ceph -s"
- "radosgw-admin realm list"
- "radosgw-admin zonegroup list"
- "radosgw-admin zone list"
- "ceph osd dump"
desc: Retrieve the configured environment details
module: exec.py
name: get shared realm info on primary
polarion-id: CEPH-83575227
- test:
abort-on-fail: true
clusters:
ceph-sec:
config:
cephadm: true
commands:
- "radosgw-admin sync status"
- "ceph -s"
- "radosgw-admin realm list"
- "radosgw-admin zonegroup list"
- "radosgw-admin zone list"
- "ceph osd dump"
desc: Retrieve the configured environment details
module: exec.py
name: get shared realm info on secondary
polarion-id: CEPH-83575227

# Test work flow

- test:
clusters:
ceph-pri:
config:
set-env: true
script-name: user_create.py
config-file-name: non_tenanted_user.yaml
copy-user-info-to-site: ceph-sec
desc: create non-tenanted user
module: sanity_rgw_multisite.py
name: create non-tenanted user
polarion-id: CEPH-83575199

- test:
clusters:
ceph-sec:
config:
config-file-name: test_Mbuckets_with_Nobjects.yaml
script-name: test_Mbuckets_with_Nobjects.py
verify-io-on-site: [ "ceph-pri" ]
desc: Execute M buckets with N objects on secondary cluster
polarion-id: CEPH-83575435
module: sanity_rgw_multisite.py
name: m buckets with n objects

- test:
name: Bilog trimming test on primary
desc: test bilog trimming on primary
polarion-id: CEPH-83572658 #CEPH-10722, CEPH-10547
module: sanity_rgw_multisite.py
clusters:
ceph-pri:
config:
script-name: test_bilog_trimming.py
config-file-name: test_bilog_trimming.yaml

0 comments on commit 93038c9

Please sign in to comment.