|
| 1 | +""" |
| 2 | +Module to verify : |
| 3 | + - Live migration of image with external data source as RAW data format in single ceph cluster |
| 4 | +
|
| 5 | +Test case covered: |
| 6 | +CEPH-83584085 - Live migration of image with external data source as RAW data format in single ceph cluster |
| 7 | +
|
| 8 | +Pre-requisites : |
| 9 | +1. Cluster must be up and running with capacity to create pool |
| 10 | +2. We need atleast one client node with ceph-common package, |
| 11 | + conf and keyring files |
| 12 | +
|
| 13 | +Test Case Flow: |
| 14 | +1.Deploy single ceph cluster along with mon,mgr,osd’s |
| 15 | +2.Create two pools with rbd application enabled for migration as source and destination pool |
| 16 | +3.Store the external raw data in json spec |
| 17 | +E.g: testspec.json |
| 18 | +{ |
| 19 | + "type": "raw", |
| 20 | + "stream": { |
| 21 | + "type": "file", |
| 22 | + "file_path": "/mnt/image-head.raw" |
| 23 | + }, |
| 24 | + "snapshots": [ |
| 25 | + { |
| 26 | + "type": "raw", |
| 27 | + "name": "snap1", |
| 28 | + "stream": { |
| 29 | + "type": "file", |
| 30 | + "file_path": "/mnt/image-snap1.raw" |
| 31 | + } |
| 32 | + }, |
| 33 | + ] (optional oldest to newest ordering of snapshots) |
| 34 | +} |
| 35 | +Get the qcow2 data format with https or s3 streams. |
| 36 | +'{"type":"raw","stream":{"type":"http","url":"http://download.ceph.com/qa/ubuntu-12.04.raw"}}' |
| 37 | +4.Keep two files on image mounted path where one file with data filled |
| 38 | +and checksum noted, other file keep IO in-progress |
| 39 | +Note down md5sum checksum for rest data and IO should not interrupt during migration |
| 40 | +5.Execute prepare migration with import-only option for RBD image from an external source of raw data |
| 41 | +E.g: |
| 42 | +echo '{"type":"raw","stream":{"type":"http","url":"http://download.ceph.com/qa/ubuntu-12.04.qcow2"}}' | |
| 43 | +rbd migration prepare --import-only --source-spec-path - <pool_name>/<image_name> --cluster <cluster-source> |
| 44 | +6. Initiate migration execute using migration execute command |
| 45 | +E.g: |
| 46 | +rbd migration execute TARGET_POOL_NAME/SOURCE_IMAGE_NAME |
| 47 | +7. Check the progress of migration using rbd status |
| 48 | +E.g: |
| 49 | +rbd status TARGET_POOL_NAME/SOURCE_IMAGE_NAME It should display output with migration key |
| 50 | +[ceph: root@rbd-client /]# rbd status targetpool1/sourceimage1 |
| 51 | +Watchers: none |
| 52 | +Migration: |
| 53 | +source: sourcepool1/testimage1 (adb429cb769a) |
| 54 | +destination: targetpool1/testimage1 (add299966c63) |
| 55 | +state: executed |
| 56 | +8.Commit the Migration using migration commit command |
| 57 | +E.g: |
| 58 | +rbd migration commit TARGET_POOL_NAME/SOURCE_IMAGE_NAME |
| 59 | +9.Check the disk usage of the image for data transfer using rbd du command |
| 60 | +10.Verify data integrity by calculating MD5 sums for both the source RBD image and the migrated RBD image. |
| 61 | +11.Check migrated image metadata using rbd info command |
| 62 | +12. initiate the IO on new image |
| 63 | +""" |
| 64 | + |
| 65 | +import tempfile |
| 66 | +from copy import deepcopy |
| 67 | + |
| 68 | +from ceph.rbd.initial_config import initial_rbd_config |
| 69 | +from ceph.rbd.utils import check_data_integrity, getdict, random_string |
| 70 | +from ceph.rbd.workflows.cleanup import cleanup |
| 71 | +from ceph.rbd.workflows.krbd_io_handler import krbd_io_handler |
| 72 | +from ceph.rbd.workflows.migration import verify_migration_state |
| 73 | +from ceph.rbd.workflows.rbd import create_single_pool_and_images |
| 74 | +from ceph.utils import get_node_by_id |
| 75 | +from cli.rbd.rbd import Rbd |
| 76 | +from utility.log import Log |
| 77 | + |
| 78 | +log = Log(__name__) |
| 79 | + |
| 80 | + |
| 81 | +def migration_with_raw_data_format(rbd_obj, client, **kw): |
| 82 | + """ |
| 83 | + Test Live migration of image with external data source as RAW data format in single ceph cluster |
| 84 | + Args: |
| 85 | + rbd_obj: RBD object |
| 86 | + client : client node object |
| 87 | + **kw: any other arguments |
| 88 | + """ |
| 89 | + |
| 90 | + kw["client"] = client |
| 91 | + rbd = rbd_obj.get("rbd") |
| 92 | + rbd_op = Rbd(client) |
| 93 | + |
| 94 | + for pool_type in rbd_obj.get("pool_types"): |
| 95 | + rbd_config = kw.get("config", {}).get(pool_type, {}) |
| 96 | + multi_pool_config = deepcopy(getdict(rbd_config)) |
| 97 | + |
| 98 | + for pool, pool_config in multi_pool_config.items(): |
| 99 | + kw["pool-name"] = pool |
| 100 | + kw.update({f"{pool}": {}}) |
| 101 | + kw[pool].update({"pool_type": pool_type}) |
| 102 | + # Create an RBD image in pool |
| 103 | + image = "image_" + random_string(len=3) |
| 104 | + out, err = rbd.create(**{"image-spec": f"{pool}/{image}", "size": 1024}) |
| 105 | + if err: |
| 106 | + log.error(f"Create image {pool}/{image} failed with error {err}") |
| 107 | + return 1 |
| 108 | + else: |
| 109 | + log.info(f"Successfully created image {pool}/{image}") |
| 110 | + |
| 111 | + # Map, mount and run IOs |
| 112 | + err = run_IO(rbd, pool, image, **kw) |
| 113 | + if err: |
| 114 | + return 1 |
| 115 | + |
| 116 | + # Export rbd image to raw data file |
| 117 | + raw_file = tempfile.mktemp(prefix=f"{image}_", suffix=".raw") |
| 118 | + rbd.export( |
| 119 | + **{ |
| 120 | + "source-image-or-snap-spec": f"{pool}/{image}", |
| 121 | + "path-name": raw_file, |
| 122 | + } |
| 123 | + ) |
| 124 | + raw_spec = { |
| 125 | + "type": "raw", |
| 126 | + "stream": {"type": "file", "file_path": f"{raw_file}"}, |
| 127 | + } |
| 128 | + kw["cleanup_files"].append(raw_file) |
| 129 | + kw[pool].update({"spec": raw_spec}) |
| 130 | + # Perform Prepare, execute, commit migration |
| 131 | + err = run_prepare_execute_commit(rbd, pool, image, **kw) |
| 132 | + if err: |
| 133 | + return 1 |
| 134 | + |
| 135 | + # Check the disk usage of the image using rbd du |
| 136 | + log.info(f"Verifying Image {image} size with du command") |
| 137 | + image_spec = pool + "/" + image |
| 138 | + image_config = {"image-spec": image_spec} |
| 139 | + out = rbd_op.image_usage(**image_config) |
| 140 | + image_data = out[0] |
| 141 | + image_size = image_data.split("\n")[1].split()[3].strip() + "G" |
| 142 | + log.info(f"Image size captured : {image_size}") |
| 143 | + if image_size == "0G": |
| 144 | + log.error(f"Image size Verification failed for {image}") |
| 145 | + return 1 |
| 146 | + |
| 147 | + # Compare md5sum Integrity |
| 148 | + err = migrate_check_consistency(rbd, pool, image, **kw) |
| 149 | + if err: |
| 150 | + return 1 |
| 151 | + |
| 152 | + # Check migrated image metadata using rbd info |
| 153 | + |
| 154 | + # Initiate IO/ Run IO on new image |
| 155 | + err = run_IO(rbd, kw[pool]["target_pool"], kw[pool]["target_image"], **kw) |
| 156 | + if err: |
| 157 | + return 1 |
| 158 | + |
| 159 | + return 0 |
| 160 | + |
| 161 | + |
| 162 | +def run_prepare_execute_commit(rbd, pool, image, **kw): |
| 163 | + """ |
| 164 | + Function to carry out the following: |
| 165 | + - Create Target/destination pool for migration |
| 166 | + - Migration prepare |
| 167 | + - Migration Execute |
| 168 | + - Migration commit |
| 169 | + Args: |
| 170 | + kw: rbd object, pool, image, test data |
| 171 | + Returns: |
| 172 | + int: The return value. 0 for success, 1 otherwise |
| 173 | +
|
| 174 | + """ |
| 175 | + # Create Target Pool/ Destination Pool for migration |
| 176 | + is_ec_pool = True if "ec" in kw[pool]["pool_type"] else False |
| 177 | + config = kw.get("config", {}) |
| 178 | + target_pool = "target_pool_" + random_string(len=3) |
| 179 | + target_pool_config = {} |
| 180 | + if is_ec_pool: |
| 181 | + data_pool_target = "data_pool_new_" + random_string(len=3) |
| 182 | + target_pool_config["data_pool"] = data_pool_target |
| 183 | + rc = create_single_pool_and_images( |
| 184 | + config=config, |
| 185 | + pool=target_pool, |
| 186 | + pool_config=target_pool_config, |
| 187 | + client=kw["client"], |
| 188 | + cluster="ceph", |
| 189 | + rbd=rbd, |
| 190 | + ceph_version=int(config.get("rhbuild")[0]), |
| 191 | + is_ec_pool=is_ec_pool, |
| 192 | + is_secondary=False, |
| 193 | + do_not_create_image=True, |
| 194 | + ) |
| 195 | + if rc: |
| 196 | + log.error(f"Creation of target pool {target_pool} failed") |
| 197 | + return rc |
| 198 | + |
| 199 | + # Adding the new pool details to config so that they are handled in cleanup |
| 200 | + if kw[pool]["pool_type"] == "rep_pool_config": |
| 201 | + kw["config"]["rep_pool_config"][target_pool] = {} |
| 202 | + elif kw[pool]["pool_type"] == "ec_pool_config": |
| 203 | + kw["config"]["ec_pool_config"][target_pool] = {"data_pool": data_pool_target} |
| 204 | + |
| 205 | + # Prepare Migration |
| 206 | + target_image = "target_image_" + random_string(len=3) |
| 207 | + rbd.migration.prepare( |
| 208 | + source_spec=kw[pool]["spec"], |
| 209 | + dest_spec=f"{target_pool}/{target_image}", |
| 210 | + client_node=kw["client"], |
| 211 | + ) |
| 212 | + kw[pool].update({"target_pool": target_pool}) |
| 213 | + kw[pool].update({"target_image": target_image}) |
| 214 | + |
| 215 | + # Verify prepare migration status |
| 216 | + if verify_migration_state( |
| 217 | + action="prepare", |
| 218 | + image_spec=f"{target_pool}/{target_image}", |
| 219 | + **kw, |
| 220 | + ): |
| 221 | + log.error("Failed to prepare migration") |
| 222 | + return 1 |
| 223 | + else: |
| 224 | + log.info("Migration prepare status verfied successfully") |
| 225 | + |
| 226 | + # execute migration |
| 227 | + rbd.migration.action( |
| 228 | + action="execute", |
| 229 | + dest_spec=f"{target_pool}/{target_image}", |
| 230 | + client_node=kw["client"], |
| 231 | + ) |
| 232 | + |
| 233 | + # verify execute migration status |
| 234 | + if verify_migration_state( |
| 235 | + action="execute", |
| 236 | + image_spec=f"{target_pool}/{target_image}", |
| 237 | + **kw, |
| 238 | + ): |
| 239 | + log.error("Failed to execute migration") |
| 240 | + return 1 |
| 241 | + else: |
| 242 | + log.info("Migration executed successfully") |
| 243 | + |
| 244 | + # commit migration |
| 245 | + rbd.migration.action( |
| 246 | + action="commit", |
| 247 | + dest_spec=f"{target_pool}/{target_image}", |
| 248 | + client_node=kw["client"], |
| 249 | + ) |
| 250 | + |
| 251 | + # verify commit migration status |
| 252 | + if verify_migration_state( |
| 253 | + action="commit", |
| 254 | + image_spec=f"{target_pool}/{target_image}", |
| 255 | + **kw, |
| 256 | + ): |
| 257 | + log.error("Failed to commit migration") |
| 258 | + return 1 |
| 259 | + else: |
| 260 | + log.info("Migration committed successfully") |
| 261 | + |
| 262 | + |
| 263 | +def migrate_check_consistency(rbd, pool, image, **kw): |
| 264 | + """ |
| 265 | + Function to carry out the following: |
| 266 | + - Compare Md5sum of source and migrated images |
| 267 | + Args: |
| 268 | + kw: rbd object, pool, image, test data |
| 269 | + Returns: |
| 270 | + int: The return value. 0 for success, 1 otherwise |
| 271 | +
|
| 272 | + """ |
| 273 | + data_integrity_spec = { |
| 274 | + "first": { |
| 275 | + "image_spec": f"{pool}/{image}", |
| 276 | + "rbd": rbd, |
| 277 | + "client": kw["client"], |
| 278 | + "file_path": f"/tmp/{random_string(len=3)}", |
| 279 | + }, |
| 280 | + "second": { |
| 281 | + "image_spec": f"{kw[pool]['target_pool']}/{kw[pool]['target_image']}", |
| 282 | + "rbd": rbd, |
| 283 | + "client": kw["client"], |
| 284 | + "file_path": f"/tmp/{random_string(len=3)}", |
| 285 | + }, |
| 286 | + } |
| 287 | + rc = check_data_integrity(**data_integrity_spec) |
| 288 | + if rc: |
| 289 | + log.error( |
| 290 | + f"Data consistency check failed for {kw[pool]['target_pool']}/{kw[pool]['target_image']}" |
| 291 | + ) |
| 292 | + return 1 |
| 293 | + else: |
| 294 | + log.info("Data is consistent between the source and target images.") |
| 295 | + |
| 296 | + |
| 297 | +def run_IO(rbd, pool, image, **kw): |
| 298 | + fio = kw.get("config", {}).get("fio", {}) |
| 299 | + io_config = { |
| 300 | + "rbd_obj": rbd, |
| 301 | + "client": kw["client"], |
| 302 | + "size": fio["size"], |
| 303 | + "do_not_create_image": True, |
| 304 | + "config": { |
| 305 | + "file_size": fio["size"], |
| 306 | + "file_path": [f"/mnt/mnt_{random_string(len=5)}/file"], |
| 307 | + "get_time_taken": True, |
| 308 | + "image_spec": [f"{pool}/{image}"], |
| 309 | + "operations": { |
| 310 | + "fs": "ext4", |
| 311 | + "io": True, |
| 312 | + "mount": True, |
| 313 | + "device_map": True, |
| 314 | + }, |
| 315 | + "cmd_timeout": 2400, |
| 316 | + "io_type": "write", |
| 317 | + }, |
| 318 | + } |
| 319 | + out, err = krbd_io_handler(**io_config) |
| 320 | + if err: |
| 321 | + log.error(f"Map, mount and run IOs failed for {pool}/{image}") |
| 322 | + return 1 |
| 323 | + else: |
| 324 | + log.info(f"Map, mount and IOs successful for {pool}/{image}") |
| 325 | + |
| 326 | + out, err = rbd.map(**{"image-or-snap-spec": f"{pool}/{image}"}) |
| 327 | + if err: |
| 328 | + log.error(f"Failed to map the source image {pool}/{image}") |
| 329 | + return 1 |
| 330 | + else: |
| 331 | + log.info(f"Successfully mapped the source image {pool}/{image}") |
| 332 | + |
| 333 | + |
| 334 | +def run(**kw): |
| 335 | + """ |
| 336 | + This test verifies Live migration of image with external data source as RAW data format in single ceph cluster |
| 337 | + Args: |
| 338 | + kw: test data |
| 339 | + Returns: |
| 340 | + int: The return value. 0 for success, 1 otherwise |
| 341 | +
|
| 342 | + """ |
| 343 | + try: |
| 344 | + |
| 345 | + if kw.get("client_node"): |
| 346 | + client = get_node_by_id(kw.get("ceph_cluster"), kw.get("client_node")) |
| 347 | + else: |
| 348 | + client = kw.get("ceph_cluster").get_nodes(role="client")[0] |
| 349 | + rbd_obj = initial_rbd_config(**kw) |
| 350 | + pool_types = rbd_obj.get("pool_types") |
| 351 | + kw.update({"cleanup_files": []}) |
| 352 | + |
| 353 | + if rbd_obj: |
| 354 | + log.info("Executing test on Replicated and EC pool") |
| 355 | + if migration_with_raw_data_format(rbd_obj, client, **kw): |
| 356 | + return 1 |
| 357 | + log.info("Test live migration with raw data format is successful") |
| 358 | + |
| 359 | + except Exception as e: |
| 360 | + log.error(f"Test live migration with raw data format failed: {str(e)}") |
| 361 | + return 1 |
| 362 | + |
| 363 | + finally: |
| 364 | + try: |
| 365 | + for file in kw["cleanup_files"]: |
| 366 | + out, err = client.exec_command(sudo=True, cmd=f"rm -f {file}") |
| 367 | + if err: |
| 368 | + log.error(f"Failed to delete file {file}") |
| 369 | + except Exception as e: |
| 370 | + log.error(f"Failed to cleanup temp files with err {e}") |
| 371 | + cluster_name = kw.get("ceph_cluster", {}).name |
| 372 | + if "rbd_obj" not in locals(): |
| 373 | + rbd_obj = Rbd(client) |
| 374 | + obj = {cluster_name: rbd_obj} |
| 375 | + cleanup(pool_types=pool_types, multi_cluster_obj=obj, **kw) |
| 376 | + |
| 377 | + return 0 |
0 commit comments