Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ceph full thresholds upgrade combinations #11589

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions ocs_ci/ocs/ocs_upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -685,11 +685,11 @@ def run_ocs_upgrade(
upgrade_ocs.set_upgrade_images()
live_deployment = config.DEPLOYMENT["live_deployment"]
disable_addon = config.DEPLOYMENT.get("ibmcloud_disable_addon")
if (
managed_ibmcloud_platform = (
config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM
and live_deployment
and not disable_addon
):
and config.ENV_DATA["deployment_type"] == "managed"
)
if managed_ibmcloud_platform and live_deployment and not disable_addon:
clustername = config.ENV_DATA.get("cluster_name")
cmd = f"ibmcloud ks cluster addon disable openshift-data-foundation --cluster {clustername} -f"
run_ibmcloud_cmd(cmd)
Expand Down Expand Up @@ -717,10 +717,6 @@ def run_ocs_upgrade(
if ui_upgrade_supported:
ocs_odf_upgrade_ui()
else:
managed_ibmcloud_platform = (
config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM
and config.ENV_DATA["deployment_type"] == "managed"
)
if managed_ibmcloud_platform and not upgrade_in_current_source:
create_ocs_secret(config.ENV_DATA["cluster_namespace"])
if upgrade_version != "4.9":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,49 @@
tier2,
skipif_external_mode,
brown_squad,
pre_upgrade,
post_upgrade,
)

logger = logging.getLogger(__name__)

THRESHOLDS = [
{
"sc_key": "backfillFullRatio",
"value": "0.81",
"default_value": "0.8",
"ceph_key": "backfillfull_ratio",
},
{
"sc_key": "fullRatio",
"value": "0.86",
"default_value": "0.85",
"ceph_key": "full_ratio",
},
{
"sc_key": "nearFullRatio",
"value": "0.77",
"default_value": "0.75",
"ceph_key": "nearfull_ratio",
},
]


@pytest.fixture()
def thresholds_teardown_fixture(request):
"""
Teardown function

"""

def finalizer():
configure_cephcluster_params_in_storagecluster_cr(
params=THRESHOLDS, default_values=True
)

request.addfinalizer(finalizer)


@tier2
@brown_squad
@skipif_external_mode
@pytest.mark.polarion_id("OCS-6224")
Expand All @@ -28,58 +65,15 @@ class TestStorageClusterCephFullThresholdsParams(ManageTest):

"""

TRESHOLDS = [
{
"sc_key": "backfillFullRatio",
"value": "0.81",
"default_value": "0.8",
"ceph_key": "backfillfull_ratio",
},
{
"sc_key": "fullRatio",
"value": "0.86",
"default_value": "0.85",
"ceph_key": "full_ratio",
},
{
"sc_key": "nearFullRatio",
"value": "0.77",
"default_value": "0.75",
"ceph_key": "nearfull_ratio",
},
]

@pytest.fixture(autouse=True)
def teardown_fixture(self, request):
"""
Teardown function

"""

def finalizer():
configure_cephcluster_params_in_storagecluster_cr(
params=self.TRESHOLDS, default_values=True
)

request.addfinalizer(finalizer)

def test_storagecluster_ceph_full_thresholds_params(self):
"""
Procedure:
1.Configure storagecluster CR
2.Wait 2 seconds
3.Verify ceph full thresholds parameters on cephcluster CR and storagecluster CR are same
4.Verify parameters with ceph CLI 'ceph osd dump'
5.Configure the default params on storagecluster [treardown]

"""
def setup_thresholds_params(self):
configure_cephcluster_params_in_storagecluster_cr(
params=self.TRESHOLDS, default_values=False
params=THRESHOLDS, default_values=False
)

logger.info("Wait 2 sec the cephcluster will updated")
time.sleep(2)

def verify_thresholds_params(self):
logger.info(
"Verify upgrade parameters on cephcluster CR and storagecluster CR are same"
)
Expand All @@ -88,7 +82,7 @@ def test_storagecluster_ceph_full_thresholds_params(self):
namespace=config.ENV_DATA["cluster_namespace"],
resource_name=constants.CEPH_CLUSTER_NAME,
)
for parameter in self.TRESHOLDS:
for parameter in THRESHOLDS:
actual_value = cephcluster_obj.data["spec"]["storage"][parameter["sc_key"]]
assert (
str(actual_value).lower() == str(parameter["value"]).lower()
Expand All @@ -100,7 +94,7 @@ def test_storagecluster_ceph_full_thresholds_params(self):
func=run_cmd_verify_cli_output,
cmd="ceph osd dump",
expected_output_lst=(
tuple(f"{d['ceph_key']} {d['value']}" for d in self.TRESHOLDS)
tuple(f"{d['ceph_key']} {d['value']}" for d in THRESHOLDS)
),
cephtool_cmd=True,
ocs_operator_cmd=False,
Expand All @@ -110,3 +104,44 @@ def test_storagecluster_ceph_full_thresholds_params(self):
raise Exception(
"The ceph full thresholds storagecluster parameters are not updated in ceph tool"
)

@tier2
def test_storagecluster_ceph_full_thresholds_params(
self, thresholds_teardown_fixture
):
"""
Procedure:
1.Configure storagecluster CR
2.Wait 2 seconds
3.Verify ceph full thresholds parameters on cephcluster CR and storagecluster CR are same
4.Verify parameters with ceph CLI 'ceph osd dump'
5.Configure the default params on storagecluster [treardown]

"""
self.setup_thresholds_params()
self.verify_thresholds_params()

@pre_upgrade
def test_pre_upgrade_storagecluster_ceph_full_thresholds_params(self):
"""
Procedure:
1.Configure storagecluster CR
2.Wait 2 seconds
3.Verify ceph full thresholds parameters on cephcluster CR and storagecluster CR are same
4.Verify parameters with ceph CLI 'ceph osd dump'

"""
self.setup_thresholds_params()
self.verify_thresholds_params()

@post_upgrade
def test_post_upgrade_storagecluster_ceph_full_thresholds_params(
self, thresholds_teardown_fixture
):
"""
Procedure:
1.Verify ceph full thresholds parameters on cephcluster CR and storagecluster CR are same
2.Verify parameters with ceph CLI 'ceph osd dump'

"""
self.verify_thresholds_params()
Loading