From 32dec6705fc63944551dcbfc592c81ea7aed1ff8 Mon Sep 17 00:00:00 2001 From: Nicholas Cioli Date: Mon, 25 Sep 2023 19:22:50 +0000 Subject: [PATCH] feat(lvm-driver): enable RAID support Add support for LVM2 RAID types and parameters, with sane defaults for backwards compatibility. lvm-driver now assumes that a non- specified RAID type corresponds to the previous default of linear RAID, where data is packed onto disk until it runs out of space, continuing to the next as necessary. Tests have been added to cover the main supported RAID types (e.g. raid0, raid1, raid5, raid6, and raid10), but technically any valid LVM RAID type should work as well. Fixes #164 Signed-off-by: Nicholas Cioli --- buildscripts/build.sh | 3 +- changelogs/unreleased/164-nicholascioli | 1 + ci/ci-test.sh | 51 +++++++ deploy/lvm-operator.yaml | 48 +++++++ deploy/yamls/lvmvolume-crd.yaml | 48 +++++++ design/lvm/storageclass-parameters/raid.md | 129 ++++++++++++++++++ pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go | 49 +++++++ pkg/builder/volbuilder/volume.go | 82 +++++++++++ pkg/driver/controller.go | 10 +- pkg/driver/params.go | 24 ++++ pkg/lvm/lvm_util.go | 31 +++++ pkg/lvm/lvm_util_test.go | 110 +++++++++++++++ tests/provision_test.go | 41 ++++++ tests/suite_test.go | 3 + tests/utils.go | 20 +++ 15 files changed, 648 insertions(+), 2 deletions(-) create mode 100644 changelogs/unreleased/164-nicholascioli create mode 100644 design/lvm/storageclass-parameters/raid.md diff --git a/buildscripts/build.sh b/buildscripts/build.sh index a6e7be53..e5fe1101 100755 --- a/buildscripts/build.sh +++ b/buildscripts/build.sh @@ -96,12 +96,13 @@ output_name="bin/${PNAME}/"$GOOS"_"$GOARCH"/"$CTLNAME if [ $GOOS = "windows" ]; then output_name+='.exe' fi -env GOOS=$GOOS GOARCH=$GOARCH go build -ldflags \ +env GOOS=$GOOS GOARCH=$GOARCH CGO_ENABLED=0 go build -ldflags \ "-X github.com/openebs/lvm-localpv/pkg/version.GitCommit=${GIT_COMMIT} \ -X main.CtlName='${CTLNAME}' \ -X github.com/openebs/lvm-localpv/pkg/version.Version=${VERSION} \ -X github.com/openebs/lvm-localpv/pkg/version.VersionMeta=${VERSION_META}"\ -o $output_name\ + -installsuffix cgo \ ./cmd echo "" diff --git a/changelogs/unreleased/164-nicholascioli b/changelogs/unreleased/164-nicholascioli new file mode 100644 index 00000000..e5758e6f --- /dev/null +++ b/changelogs/unreleased/164-nicholascioli @@ -0,0 +1 @@ +add support for LVM raid options diff --git a/ci/ci-test.sh b/ci/ci-test.sh index c048aef7..0b8e242a 100755 --- a/ci/ci-test.sh +++ b/ci/ci-test.sh @@ -33,6 +33,12 @@ fi FOREIGN_LVM_SYSTEMID="openebs-ci-test-system" FOREIGN_LVM_CONFIG="global{system_id_source=lvmlocal}local{system_id=${FOREIGN_LVM_SYSTEMID}}" +# RAID info for corresponding tests +RAID_COUNT=5 + +# RAID info for corresponding tests +RAID_COUNT=5 + # Clean up generated resources for successive tests. cleanup_loopdev() { sudo losetup -l | grep '(deleted)' | awk '{print $1}' \ @@ -60,6 +66,20 @@ cleanup_foreign_lvmvg() { cleanup_loopdev } +cleanup_raidvg() { + sudo vgremove raidvg -y || true + + for IMG in `seq ${RAID_COUNT}` + do + if [ -f /tmp/openebs_ci_raid_disk_${IMG}.img ] + then + rm /tmp/openebs_ci_raid_disk_${IMG}.img + fi + done + + cleanup_loopdev +} + cleanup() { set +e @@ -67,6 +87,7 @@ cleanup() { cleanup_lvmvg cleanup_foreign_lvmvg + cleanup_raidvg kubectl delete pvc -n openebs lvmpv-pvc kubectl delete -f "${SNAP_CLASS}" @@ -93,10 +114,40 @@ foreign_disk="$(sudo losetup -f /tmp/openebs_ci_foreign_disk.img --show)" sudo pvcreate "${foreign_disk}" sudo vgcreate foreign_lvmvg "${foreign_disk}" --config="${FOREIGN_LVM_CONFIG}" +# setup a RAID volume group +cleanup_raidvg +raid_disks=() +for IMG in `seq ${RAID_COUNT}` +do + truncate -s 1024G /tmp/openebs_ci_raid_disk_${IMG}.img + raid_disk="$(sudo losetup -f /tmp/openebs_ci_raid_disk_${IMG}.img --show)" + sudo pvcreate "${raid_disk}" + + raid_disks+=("${raid_disk}") +done +sudo vgcreate raidvg "${raid_disks[@]}" + +# setup a RAID volume group +cleanup_raidvg +raid_disks=() +for IMG in `seq ${RAID_COUNT}` +do + truncate -s 1024G /tmp/openebs_ci_raid_disk_${IMG}.img + raid_disk="$(sudo losetup -f /tmp/openebs_ci_raid_disk_${IMG}.img --show)" + sudo pvcreate "${raid_disk}" + + raid_disks+=("${raid_disk}") +done +sudo vgcreate raidvg "${raid_disks[@]}" + # install snapshot and thin volume module for lvm sudo modprobe dm-snapshot sudo modprobe dm_thin_pool +# install RAID modules for lvm +sudo modprobe dm_raid +sudo modprobe dm_integrity + # Prepare env for running BDD tests # Minikube is already running kubectl apply -f "${LVM_OPERATOR}" diff --git a/deploy/lvm-operator.yaml b/deploy/lvm-operator.yaml index 2bcfee6f..94eaa9b7 100644 --- a/deploy/lvm-operator.yaml +++ b/deploy/lvm-operator.yaml @@ -95,12 +95,47 @@ spec: description: Capacity of the volume minLength: 1 type: string + integrity: + description: Integrity specifies whether logical volumes should be + checked for integrity. If it is set to "yes", then the LVM LocalPV + Driver will enable DM integrity for the logical volume + enum: + - "yes" + - "no" + type: string + lvcreateoptions: + description: LvCreateOptions are extra options for creating a volume. + Options should be separated by ; e.g. "--vdo;--readahead;auto" + type: string + mirrors: + description: Mirrors specifies the mirror count for a RAID configuration. + minimum: 0 + type: integer + nosync: + description: NoSync enables the `--nosync` option of a RAID volume. + If it is set to "yes", then LVM will skip drive sync when creating + the mirrors. Defaults to "no" + enum: + - "yes" + - "no" + type: string ownerNodeID: description: OwnerNodeID is the Node ID where the volume group is present which is where the volume has been provisioned. OwnerNodeID can not be edited after the volume has been provisioned. minLength: 1 type: string + raidtype: + description: RaidType specifies the type of RAID for the logical volume. + Defaults to linear, if unspecified. + enum: + - linear + - raid0 + - raid1 + - raid5 + - raid6 + - raid10 + type: string shared: description: Shared specifies whether the volume can be shared among multiple pods. If it is not set to "yes", then the LVM LocalPV Driver @@ -109,6 +144,18 @@ spec: - "yes" - "no" type: string + stripecount: + description: StripeCount specifies the stripe count for a RAID configuration. + This is equal to the number of physical volumes to scatter the logical + volume + minimum: 0 + type: integer + stripesize: + description: StripeSize specifies the size of a stripe for a RAID + configuration. Must be a power of 2 but must not exceed the physical + extent size + minimum: 0 + type: integer thinProvision: description: ThinProvision specifies whether logical volumes can be thinly provisioned. If it is set to "yes", then the LVM LocalPV @@ -129,6 +176,7 @@ spec: required: - capacity - ownerNodeID + - raidtype - vgPattern - volGroup type: object diff --git a/deploy/yamls/lvmvolume-crd.yaml b/deploy/yamls/lvmvolume-crd.yaml index 61b78125..3787afa5 100644 --- a/deploy/yamls/lvmvolume-crd.yaml +++ b/deploy/yamls/lvmvolume-crd.yaml @@ -74,12 +74,47 @@ spec: description: Capacity of the volume minLength: 1 type: string + integrity: + description: Integrity specifies whether logical volumes should be + checked for integrity. If it is set to "yes", then the LVM LocalPV + Driver will enable DM integrity for the logical volume + enum: + - "yes" + - "no" + type: string + lvcreateoptions: + description: LvCreateOptions are extra options for creating a volume. + Options should be separated by ; e.g. "--vdo;--readahead;auto" + type: string + mirrors: + description: Mirrors specifies the mirror count for a RAID configuration. + minimum: 0 + type: integer + nosync: + description: NoSync enables the `--nosync` option of a RAID volume. + If it is set to "yes", then LVM will skip drive sync when creating + the mirrors. Defaults to "no" + enum: + - "yes" + - "no" + type: string ownerNodeID: description: OwnerNodeID is the Node ID where the volume group is present which is where the volume has been provisioned. OwnerNodeID can not be edited after the volume has been provisioned. minLength: 1 type: string + raidtype: + description: RaidType specifies the type of RAID for the logical volume. + Defaults to linear, if unspecified. + enum: + - linear + - raid0 + - raid1 + - raid5 + - raid6 + - raid10 + type: string shared: description: Shared specifies whether the volume can be shared among multiple pods. If it is not set to "yes", then the LVM LocalPV Driver @@ -88,6 +123,18 @@ spec: - "yes" - "no" type: string + stripecount: + description: StripeCount specifies the stripe count for a RAID configuration. + This is equal to the number of physical volumes to scatter the logical + volume + minimum: 0 + type: integer + stripesize: + description: StripeSize specifies the size of a stripe for a RAID + configuration. Must be a power of 2 but must not exceed the physical + extent size + minimum: 0 + type: integer thinProvision: description: ThinProvision specifies whether logical volumes can be thinly provisioned. If it is set to "yes", then the LVM LocalPV @@ -108,6 +155,7 @@ spec: required: - capacity - ownerNodeID + - raidtype - vgPattern - volGroup type: object diff --git a/design/lvm/storageclass-parameters/raid.md b/design/lvm/storageclass-parameters/raid.md new file mode 100644 index 00000000..4d926320 --- /dev/null +++ b/design/lvm/storageclass-parameters/raid.md @@ -0,0 +1,129 @@ +--- +title: LVM-LocalPV RAID +authors: + - "@nicholascioli" +owners: [] +creation-date: 2023-11-04 +last-updated: 2023-11-04 +status: Implemented +--- + +# LVM-LocalPV RAID + +## Table of Contents +- [LVM-LocalPV RAID](#lvm-localpv-raid) + - [Table of Contents](#table-of-contents) + - [Summary](#summary) + - [Motivation](#motivation) + - [Goals](#goals) + - [Non Goals](#non-goals) + - [Proposal](#proposal) + - [User Stories](#user-stories) + - [Implementation Details](#implementation-details) + - [Usage details](#usage-details) + - [Test Plan](#test-plan) + - [Graduation Criteria](#graduation-criteria) + - [Drawbacks](#drawbacks) + - [Alternatives](#alternatives) + + +## Summary + +This proposal charts out the workflow details to support creation of RAID volumes. + +## Motivation + +### Goals + +- Able to provision RAID volumes in a VolumeGroup. +- Able to specify VolumeGroup-specific RAID options for all sub volumes. +- Able to specify extra options for all volumes in a VolumeGroup. + +### Non Goals + +- Validating combinations of RAID types / options. + +## Proposal + +### User Stories + +- RAIDed volumes provide data redundancy and can mitigate data loss due to individual drive failures. +- Ability to specify extra arguments for VolumeGroups allow for user customizations without needing + to rework k8s schemas. + +### Implementation Details + +- User/Admin has to set RAID-sepcific options under storageclass parameters which + are used when creating volumes in the VolumeGroup. +- During volume provisioning time external-provisioner will read all key-value pairs + that are specified under referenced storageclass and pass information to CSI + driver as payload for `CreateVolume` gRPC request. +- After receiving the `CreateVolume` request CSI driver will pick appropriate node based + on scheduling attributes(like topology information, matching VG name and available capacity) + and creates LVM volume resource by setting `Spec.RaidType` to a valid type along with other properties. +- Once the LVMVolume resource is created corresponding node LVM volume controller reconcile + LVM volume resource in the following way: + - LVM controller will check `Spec.RaidType` field, if the field is set to anything other + than `linear`, then the controller will perform following operations: + - Fetch information about existence of matching VolumeGroup. + - If there is a VolumeGroup with name then controller will create a volume. + Command used to create thin volume: `lvcreate --type --raidintegrity --nosync ... -y` + - If volume creation is successfull then controller will LVM volume resource as `Ready`. +- After watching `Ready` status CSI driver will return success response to `CreateVolume` gRPC + request. + +### Usage details + +1. User/Admin can configure the following options under the storageclass parameters. + +Option | Required | Valid Values | Description +-------|----------|--------------|------------------- +`type` | `true` | `raid0` / `stripe`, `raid` / `raid1` / `mirror`, `raid5`, `raid6`, `raid10` | The RAID type of the volume. +`integrity` | `false` | `true`, `false` | Whether or not to enable DM integrity for the volume. Defaults to `false`. +`mirrors` | depends | [0, ∞) | Mirror count. Certain RAID configurations require this to be set. +`nosync` | `false` | `true`, `false` | Whether or not to disable the initial sync. Defaults to false. +`stripecount` | depends | [0, ∞) | Stripe count. Certain RAID configurations require this to be set. +`stripesize` | `false` | [0, ∞) (but must be a power of 2) | The size of each stripe. If not specified, LVM will choose a sane default. +`lvcreateoptions` | `false` | String, delimited by `;` | Extra options to be passed to LVM when creating volumes. + +An example is shown below +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-lvm +provisioner: local.csi.openebs.io +parameters: + storage: "lvm" + volgroup: "lvmvg" + raidType: "raid1" + lvcreateoptions: "--vdo;--readahead auto" +``` + +### Test Plan +- Provision an application on various RAID configurations, verify volume accessibility from application, + and verify that `lvs` reports correct RAID information. + +## Graduation Criteria + +All testcases mentioned in [Test Plan](#test-plan) section need to be automated + +## Drawbacks + +- Since the RAID options exist at the storageclass level, changes to the storage + class RAID options is not possible without custom logic per RAID type or manual + operator interactions. +- Validation of the RAID options depend on the version of LVM2 installed as well as + the type of RAID used and its options. This is outside of the scope of these changes + and will cause users to have to debug issues with a finer comb to see why certain + options do not work together or on their specific machine. + +## Alternatives + +RAID can be done in either software or hardware, with many off-the-shelf products +including built-in hardware solutions. There are also other software RAID alternatives +that can be used below LVM, such as mdadm. + +This unfortunately requires operators to decouple +the SotrageClass from the RAID configuration, but does simplify the amount of code maintained by +this project. diff --git a/pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go b/pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go index d8d09823..5f68a946 100644 --- a/pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go +++ b/pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go @@ -87,6 +87,55 @@ type VolumeInfo struct { // +kubebuilder:validation:Required // +kubebuilder:validation:Enum=yes;no ThinProvision string `json:"thinProvision,omitempty"` + + // RaidType specifies the type of RAID for the logical volume. + // Defaults to linear, if unspecified. + // +kubebuilder:validation:Required + // +kubebuilder:validation:default=linear + // +kubebuilder:validation:Enum=linear;raid0;raid1;raid5;raid6;raid10 + RaidType string `json:"raidtype"` + + // Integrity specifies whether logical volumes should be checked for integrity. + // If it is set to "yes", then the LVM LocalPV Driver will enable DM integrity + // for the logical volume + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=yes;no + Integrity string `json:"integrity,omitempty"` + + // Mirrors specifies the mirror count for a RAID configuration. + // +kubebuilder:validation:Required + // +kubebuilder:validation:default=0 + // +kubebuilder:validation:Minimum=0 + Mirrors uint `json:"mirrors,omitempty"` + + // NoSync enables the `--nosync` option of a RAID volume. + // If it is set to "yes", then LVM will skip drive sync when creating + // the mirrors. Defaults to "no" + // +kubebuilder:validation:Required + // +kubebuilder:validation:default=no + // +kubebuilder:validation:Enum=yes;no + NoSync string `json:"nosync,omitempty"` + + // StripeCount specifies the stripe count for a RAID configuration. + // This is equal to the number of physical volumes to scatter the + // logical volume + // +kubebuilder:validation:Required + // +kubebuilder:validation:default=0 + // +kubebuilder:validation:Minimum=0 + StripeCount uint `json:"stripecount,omitempty"` + + // StripeSize specifies the size of a stripe for a RAID configuration. + // Must be a power of 2 but must not exceed the physical extent size + // +kubebuilder:validation:Required + // +kubebuilder:validation:default=0 + // +kubebuilder:validation:Minimum=0 + StripeSize uint `json:"stripesize,omitempty"` + + // LvCreateOptions are extra options for creating a volume. + // Options should be separated by ; + // e.g. "--vdo;--readahead;auto" + // +kubebuilder:validation:Required + LvCreateOptions string `json:"lvcreateoptions,omitempty"` } // VolStatus string that specifies the current state of the volume provisioning request. diff --git a/pkg/builder/volbuilder/volume.go b/pkg/builder/volbuilder/volume.go index c871c104..6074195b 100644 --- a/pkg/builder/volbuilder/volume.go +++ b/pkg/builder/volbuilder/volume.go @@ -17,8 +17,11 @@ limitations under the License. package volbuilder import ( + "strconv" + "github.com/openebs/lib-csi/pkg/common/errors" apis "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" ) // Builder is the builder object for LVMVolume @@ -139,6 +142,85 @@ func (b *Builder) WithThinProvision(thinProvision string) *Builder { return b } +// WithRaidType sets the RAID type for all logical volumes in the volume group +func (b *Builder) WithRaidType(raidType string) *Builder { + b.volume.Object.Spec.RaidType = raidType + return b +} + +// WithIntegrity sets where integrity is enable or not +func (b *Builder) WithIntegrity(integrity string) *Builder { + b.volume.Object.Spec.Integrity = integrity + return b +} + +// WithMirrors sets the RAID mirror count +func (b *Builder) WithMirrors(mirrors string) *Builder { + mirrorCount, err := strconv.ParseUint(mirrors, 10, 32) + if err != nil { + b.errs = append( + b.errs, + errors.New( + "invalid mirror count: must be a positive 32-bit integer", + ), + err, + ) + } + + b.volume.Object.Spec.Mirrors = uint(mirrorCount) + return b +} + +// WithNoSync sets the `nosync` RAID option +func (b *Builder) WithNoSync(nosync string) *Builder { + b.volume.Object.Spec.NoSync = nosync + return b +} + +func (b *Builder) WithStripeCount(stripes string) *Builder { + stripeCount, err := strconv.ParseUint(stripes, 10, 32) + if err != nil { + b.errs = append( + b.errs, + errors.New( + "invalid stripe count: must be a positive 32-bit integer", + ), + err, + ) + } + + b.volume.Object.Spec.StripeCount = uint(stripeCount) + return b +} + +// WithStripeSize sets the size of each stripe for a RAID volume +func (b *Builder) WithStripeSize(size string) *Builder { + stripeSize, err := resource.ParseQuantity(size) + if err != nil { + b.errs = append( + b.errs, + errors.New( + "invalid stripe size", + ), + err, + ) + } + + value := stripeSize.Value() + if value < 0 { + b.errs = append(b.errs, errors.New("invalid stripe size: value must be positive")) + } + + b.volume.Object.Spec.StripeSize = uint(value) + return b +} + +// WithLvCreateOptions sets any additional LVM options used when creating a volume +func (b *Builder) WithLvCreateOptions(options string) *Builder { + b.volume.Object.Spec.LvCreateOptions = options + return b +} + // WithVolGroup sets volume group name for creating volume func (b *Builder) WithVolGroup(vg string) *Builder { if vg == "" { diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 0ce1c8b0..48d47085 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -310,7 +310,15 @@ func CreateLVMVolume(ctx context.Context, req *csi.CreateVolumeRequest, WithOwnerNode(owner). WithVolumeStatus(lvm.LVMStatusPending). WithShared(params.Shared). - WithThinProvision(params.ThinProvision).Build() + WithThinProvision(params.ThinProvision). + WithRaidType(params.RaidType). + WithIntegrity(params.Integrity). + WithMirrors(params.Mirrors). + WithNoSync(params.NoSync). + WithStripeCount(params.StripeCount). + WithStripeSize(params.StripeSize). + WithLvCreateOptions(params.LvCreateOptions). + Build() if err != nil { return nil, status.Error(codes.Internal, err.Error()) diff --git a/pkg/driver/params.go b/pkg/driver/params.go index 6e24fd78..4b37e58d 100644 --- a/pkg/driver/params.go +++ b/pkg/driver/params.go @@ -42,6 +42,15 @@ type VolumeParams struct { PVCName string PVCNamespace string PVName string + + // Raid specific options + RaidType string + Integrity string + Mirrors string + NoSync string + StripeCount string + StripeSize string + LvCreateOptions string } // SnapshotParams holds collection of supported settings that can @@ -57,6 +66,12 @@ func NewVolumeParams(m map[string]string) (*VolumeParams, error) { Scheduler: SpaceWeighted, Shared: "no", ThinProvision: "no", + RaidType: "linear", + Integrity: "no", + Mirrors: "0", + NoSync: "no", + StripeCount: "0", + StripeSize: "0", } // parameter keys may be mistyped from the CRD specification when declaring // the storageclass, which kubectl validation will not catch. Because @@ -83,6 +98,15 @@ func NewVolumeParams(m map[string]string) (*VolumeParams, error) { "scheduler": ¶ms.Scheduler, "shared": ¶ms.Shared, "thinprovision": ¶ms.ThinProvision, + + // Raid options + "raidtype": ¶ms.RaidType, + "integrity": ¶ms.Integrity, + "mirrors": ¶ms.Mirrors, + "nosync": ¶ms.NoSync, + "stripecount": ¶ms.StripeCount, + "stripesize": ¶ms.StripeSize, + "lvcreateoptions": ¶ms.LvCreateOptions, } for key, param := range stringParams { value, ok := m[key] diff --git a/pkg/lvm/lvm_util.go b/pkg/lvm/lvm_util.go index ec24d434..2b340c5a 100644 --- a/pkg/lvm/lvm_util.go +++ b/pkg/lvm/lvm_util.go @@ -60,6 +60,7 @@ const ( YES = "yes" LVThinPool = "thin-pool" + LinearRAID = "linear" ) var ( @@ -237,6 +238,31 @@ func buildLVMCreateArgs(vol *apis.LVMVolume) []string { LVMVolArg = append(LVMVolArg, "-T", vol.Spec.VolGroup+"/"+pool, "-V", size) } + // command to set raid options and mirror / stripe info + // `lvcreate -L 1G --type --raidintegrity --nosync ...` + // Note: We only need to check if the raidtype is anything but the default of linear. + raidType := strings.TrimSpace(vol.Spec.RaidType) + if len(raidType) != 0 && raidType != LinearRAID { + LVMVolArg = append(LVMVolArg, "--type", vol.Spec.RaidType) + + // Now check for optional raid config + if vol.Spec.Mirrors != 0 { + LVMVolArg = append(LVMVolArg, "--mirrors", fmt.Sprintf("%d", vol.Spec.Mirrors)) + } + if strings.TrimSpace(vol.Spec.NoSync) == YES { + LVMVolArg = append(LVMVolArg, "--nosync") + } + if vol.Spec.StripeCount != 0 { + LVMVolArg = append(LVMVolArg, "--stripes", fmt.Sprintf("%d", vol.Spec.StripeCount)) + } + if vol.Spec.StripeSize != 0 { + LVMVolArg = append(LVMVolArg, "--stripesize", fmt.Sprintf("%db", vol.Spec.StripeSize)) + } + if strings.TrimSpace(vol.Spec.Integrity) == YES { + LVMVolArg = append(LVMVolArg, "--raidintegrity", "y") + } + } + if len(vol.Spec.VolGroup) != 0 { LVMVolArg = append(LVMVolArg, "-n", volume) } @@ -245,6 +271,11 @@ func buildLVMCreateArgs(vol *apis.LVMVolume) []string { LVMVolArg = append(LVMVolArg, vol.Spec.VolGroup) } + // Allow passing in arbitrary LVM options + if len(vol.Spec.LvCreateOptions) != 0 { + LVMVolArg = append(LVMVolArg, strings.Split(vol.Spec.LvCreateOptions, ";")...) + } + // -y is used to wipe the signatures before creating LVM volume LVMVolArg = append(LVMVolArg, "-y") return LVMVolArg diff --git a/pkg/lvm/lvm_util_test.go b/pkg/lvm/lvm_util_test.go index f45f9323..1938114c 100644 --- a/pkg/lvm/lvm_util_test.go +++ b/pkg/lvm/lvm_util_test.go @@ -19,6 +19,9 @@ package lvm import ( "reflect" "testing" + + apis "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var ( @@ -108,3 +111,110 @@ func Test_parseLogicalVolume(t *testing.T) { }) } } + +func Test_buildLVMCreateArgs(t *testing.T) { + type ValidationPair struct { + name string + spec apis.VolumeInfo + args []string + } + + tests := []ValidationPair{ + { + name: "simple", + spec: apis.VolumeInfo{ + VolGroup: "dddd", + }, + args: []string{ + "-n", "simple", + "dddd", + "-y", + }, + }, + { + name: "more-complex", + spec: apis.VolumeInfo{ + VolGroup: "ffff", + Capacity: "256Mi", + }, + args: []string{ + "-L", "256Mib", + "-n", "more-complex", + "ffff", + "-y", + }, + }, + { + name: "thin-provision", + spec: apis.VolumeInfo{ + VolGroup: "eeee", + ThinProvision: "yes", + }, + args: []string{ + "-T", "eeee/eeee_thinpool", + "-V", "b", // This is because setting a capacity causes lvm_util to actually call LVM... + "-n", "thin-provision", + "-y", + }, + }, + { + name: "vol-r1", + spec: apis.VolumeInfo{ + VolGroup: "aaaa", + RaidType: "raid1", + Mirrors: 8, + NoSync: "yes", + }, + args: []string{ + "--type", "raid1", + "--mirrors", "8", "--nosync", + "-n", "vol-r1", + "aaaa", + "-y", + }, + }, + { + name: "vol-r10", + spec: apis.VolumeInfo{ + VolGroup: "bbbb", + RaidType: "raid10", + Integrity: "yes", + Mirrors: 2, + StripeCount: 3, + StripeSize: 32768, + }, + args: []string{ + "--type", "raid10", + "--mirrors", "2", + "--stripes", "3", "--stripesize", "32768b", + "--raidintegrity", "y", + "-n", "vol-r10", + "bbbb", + "-y", + }, + }, + { + name: "vol-custom", + spec: apis.VolumeInfo{ + VolGroup: "cccc", + Capacity: "1G", + LvCreateOptions: "--vdo;--readahead;auto", + }, + args: []string{ + "-L", "1Gb", + "-n", "vol-custom", + "cccc", + "--vdo", "--readahead", "auto", + "-y", + }, + }, + } + + for _, tt := range tests { + got := buildLVMCreateArgs(&apis.LVMVolume{ObjectMeta: v1.ObjectMeta{Name: tt.name}, Spec: tt.spec}) + + if !reflect.DeepEqual(got, tt.args) { + t.Errorf("buildLVMCreateArgs() got = %v, want %v", got, tt.args) + } + } +} diff --git a/tests/provision_test.go b/tests/provision_test.go index 56c6e728..064fb9e7 100644 --- a/tests/provision_test.go +++ b/tests/provision_test.go @@ -17,6 +17,7 @@ limitations under the License. package tests import ( + "fmt" "time" . "github.com/onsi/ginkgo" @@ -94,6 +95,45 @@ func thinVolCreationTest() { By("Deleting thinProvision storage class", deleteStorageClass) } +func raidVolCreationTest() { + raidTypes := []map[string]string{ + { + "raidtype": "raid0", + "integrity": "yes", + }, + { + "raidtype": "raid1", + "mirrors": "3", + }, + { + "raidtype": "raid5", + "stripes": "3", + }, + { + "raidtype": "raid6", + "stripes": "3", + }, + { + "raidtype": "raid10", + "mirrors": "1", + "stripes": "2", + }, + } + + for _, args := range raidTypes { + By(fmt.Sprintf("Creating RAID `%s` storage class", args["raidtype"]), func() { createRaidStorageClass(args) }) + By("creating and verifying PVC bound status", createAndVerifyPVC) + + By("Creating and deploying app pod", createDeployVerifyApp) + By("verifying LVMVolume object", VerifyLVMVolume) + By("Deleting application deployment") + deleteAppDeployment(appName) + By("Deleting pvc") + deleteAndVerifyPVC(pvcName) + By("Deleting RAID storage class", deleteStorageClass) + } +} + func leakProtectionTest() { By("Creating default storage class", createStorageClass) ds := deleteNodeDaemonSet() // ensure that provisioning remains in pending state. @@ -119,5 +159,6 @@ func volumeCreationTest() { By("Running volume creation test", fsVolCreationTest) By("Running block volume creation test", blockVolCreationTest) By("Running thin volume creation test", thinVolCreationTest) + By("Running RAID volume creation test", raidVolCreationTest) By("Running leak protection test", leakProtectionTest) } diff --git a/tests/suite_test.go b/tests/suite_test.go index 92ed1f04..99dad7bd 100644 --- a/tests/suite_test.go +++ b/tests/suite_test.go @@ -43,6 +43,9 @@ import ( const ( // volume group name where volume provisioning will happen VOLGROUP = "lvmvg" + + // volume group name where RAID volume provisioning will happen + RAIDGROUP = "raidvg" ) var ( diff --git a/tests/utils.go b/tests/utils.go index 57e328fc..25c58fb9 100644 --- a/tests/utils.go +++ b/tests/utils.go @@ -166,6 +166,26 @@ func createThinStorageClass() { gomega.Expect(err).To(gomega.BeNil(), "while creating a thinProvision storageclass {%s}", scName) } +func createRaidStorageClass(raidArgs map[string]string) { + var ( + err error + ) + + // Add in the volume group to the args + raidArgs["volgroup"] = RAIDGROUP + + ginkgo.By("building a RAID storage class") + scObj, err = sc.NewBuilder(). + WithGenerateName(scName). + WithParametersNew(raidArgs). + WithProvisioner(LocalProvisioner).Build() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), + "while building RAID storageclass obj with prefix {%s}", scName) + + scObj, err = SCClient.Create(scObj) + gomega.Expect(err).To(gomega.BeNil(), "while creating a RAID storageclass {%s}", scName) +} + // VerifyLVMVolume verify the properties of a lvm-volume func VerifyLVMVolume() { ginkgo.By("fetching lvm volume")