Skip to content

Commit 943ddc4

Browse files
authored
Merge pull request red-hat-storage#1304 from umangachapagain/rook-update
Update Rook to v1.7.0 and K8s to v0.21.3
2 parents 3c6cbeb + 86b8214 commit 943ddc4

File tree

2,040 files changed

+185235
-35435
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

2,040 files changed

+185235
-35435
lines changed

api/v1/storagecluster_types.go

+3-4
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ import (
2020
nbv1 "github.com/noobaa/noobaa-operator/v2/pkg/apis/noobaa/v1alpha1"
2121
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
2222
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
23-
rook "github.com/rook/rook/pkg/apis/rook.io/v1"
2423
corev1 "k8s.io/api/core/v1"
2524
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2625
)
@@ -37,7 +36,7 @@ type StorageClusterSpec struct {
3736
// HostNetwork defaults to false
3837
HostNetwork bool `json:"hostNetwork,omitempty"`
3938
// Placement is optional and used to specify placements of OCS components explicitly
40-
Placement rook.PlacementSpec `json:"placement,omitempty"`
39+
Placement rookCephv1.PlacementSpec `json:"placement,omitempty"`
4140
// Resources follows the conventions of and is mapped to CephCluster.Spec.Resources
4241
Resources map[string]corev1.ResourceRequirements `json:"resources,omitempty"`
4342
Encryption EncryptionSpec `json:"encryption,omitempty"`
@@ -189,8 +188,8 @@ type StorageDeviceSet struct {
189188

190189
Name string `json:"name"`
191190
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
192-
PreparePlacement rook.Placement `json:"preparePlacement,omitempty"`
193-
Placement rook.Placement `json:"placement,omitempty"`
191+
PreparePlacement rookCephv1.Placement `json:"preparePlacement,omitempty"`
192+
Placement rookCephv1.Placement `json:"placement,omitempty"`
194193
Config StorageDeviceSetConfig `json:"config,omitempty"`
195194
DataPVCTemplate corev1.PersistentVolumeClaim `json:"dataPVCTemplate"`
196195
MetadataPVCTemplate *corev1.PersistentVolumeClaim `json:"metadataPVCTemplate,omitempty"`

api/v1/zz_generated.deepcopy.go

+1-2
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

config/crd/bases/ocs.openshift.io_storageclusters.yaml

+817-73
Large diffs are not rendered by default.

controllers/defaults/placements.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
package defaults
22

33
import (
4-
rook "github.com/rook/rook/pkg/apis/rook.io/v1"
4+
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
55
corev1 "k8s.io/api/core/v1"
66
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
77
)
@@ -18,7 +18,7 @@ var (
1818
}
1919
// DaemonPlacements map contains the default placement configs for the
2020
// various OCS daemons
21-
DaemonPlacements = map[string]rook.Placement{
21+
DaemonPlacements = map[string]rookCephv1.Placement{
2222
"all": {
2323
Tolerations: []corev1.Toleration{
2424
getOcsToleration(),

controllers/storagecluster/cephcluster.go

+11-12
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ import (
1515
"github.com/openshift/ocs-operator/controllers/defaults"
1616
statusutil "github.com/openshift/ocs-operator/controllers/util"
1717
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
18-
rook "github.com/rook/rook/pkg/apis/rook.io/v1"
1918
corev1 "k8s.io/api/core/v1"
2019
storagev1 "k8s.io/api/storage/v1"
2120
"k8s.io/apimachinery/pkg/api/errors"
@@ -319,15 +318,15 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, nodeCount int, s
319318
Enabled: true,
320319
RulesNamespace: "openshift-storage",
321320
},
322-
Storage: rook.StorageScopeSpec{
321+
Storage: rookCephv1.StorageScopeSpec{
323322
StorageClassDeviceSets: newStorageClassDeviceSets(sc, serverVersion),
324323
},
325-
Placement: rook.PlacementSpec{
324+
Placement: rookCephv1.PlacementSpec{
326325
"all": getPlacement(sc, "all"),
327326
"mon": getPlacement(sc, "mon"),
328327
"arbiter": getPlacement(sc, "arbiter"),
329328
},
330-
PriorityClassNames: rook.PriorityClassNamesSpec{
329+
PriorityClassNames: rookCephv1.PriorityClassNamesSpec{
331330
rookCephv1.KeyMgr: systemNodeCritical,
332331
rookCephv1.KeyMon: systemNodeCritical,
333332
rookCephv1.KeyOSD: systemNodeCritical,
@@ -338,7 +337,7 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, nodeCount int, s
338337
Enabled: true,
339338
Periodicity: "24h",
340339
},
341-
Labels: rook.LabelsSpec{
340+
Labels: rookCephv1.LabelsSpec{
342341
rookCephv1.KeyMonitoring: getCephClusterMonitoringLabels(*sc),
343342
},
344343
},
@@ -452,7 +451,7 @@ func newExternalCephCluster(sc *ocsv1.StorageCluster, cephImage, monitoringIP, m
452451
ManageMachineDisruptionBudgets: false,
453452
},
454453
Monitoring: monitoringSpec,
455-
Labels: rook.LabelsSpec{
454+
Labels: rookCephv1.LabelsSpec{
456455
rookCephv1.KeyMonitoring: getCephClusterMonitoringLabels(*sc),
457456
},
458457
},
@@ -536,11 +535,11 @@ func getMonCount(nodeCount int, arbiter bool) int {
536535
}
537536

538537
// newStorageClassDeviceSets converts a list of StorageDeviceSets into a list of Rook StorageClassDeviceSets
539-
func newStorageClassDeviceSets(sc *ocsv1.StorageCluster, serverVersion *version.Info) []rook.StorageClassDeviceSet {
538+
func newStorageClassDeviceSets(sc *ocsv1.StorageCluster, serverVersion *version.Info) []rookCephv1.StorageClassDeviceSet {
540539
storageDeviceSets := sc.Spec.StorageDeviceSets
541540
topologyMap := sc.Status.NodeTopologies
542541

543-
var storageClassDeviceSets []rook.StorageClassDeviceSet
542+
var storageClassDeviceSets []rookCephv1.StorageClassDeviceSet
544543

545544
// For kube server version 1.19 and above, topology spread constraints are used for OSD placements.
546545
// For kube server version below 1.19, NodeAffinity and PodAntiAffinity are used for OSD placements.
@@ -597,8 +596,8 @@ func newStorageClassDeviceSets(sc *ocsv1.StorageCluster, serverVersion *version.
597596
}
598597

599598
for i := 0; i < replica; i++ {
600-
placement := rook.Placement{}
601-
preparePlacement := rook.Placement{}
599+
placement := rookCephv1.Placement{}
600+
preparePlacement := rookCephv1.Placement{}
602601

603602
if noPlacement {
604603
if supportTSC {
@@ -679,7 +678,7 @@ func newStorageClassDeviceSets(sc *ocsv1.StorageCluster, serverVersion *version.
679678
}
680679
ds.DataPVCTemplate.Annotations = annotations
681680

682-
set := rook.StorageClassDeviceSet{
681+
set := rookCephv1.StorageClassDeviceSet{
683682
Name: fmt.Sprintf("%s-%d", ds.Name, i),
684683
Count: count,
685684
Resources: resources,
@@ -846,7 +845,7 @@ func getCephObjectStoreGatewayInstances(sc *ocsv1.StorageCluster) int32 {
846845

847846
// addStrictFailureDomainTSC adds hard topology constraints at failure domain level
848847
// and uses soft topology constraints within falure domain (across host).
849-
func addStrictFailureDomainTSC(placement *rook.Placement, topologyKey string) {
848+
func addStrictFailureDomainTSC(placement *rookCephv1.Placement, topologyKey string) {
850849
newTSC := placement.TopologySpreadConstraints[0]
851850
newTSC.TopologyKey = topologyKey
852851
newTSC.WhenUnsatisfiable = "DoNotSchedule"

controllers/storagecluster/placement.go

+7-5
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,18 @@ package storagecluster
33
import (
44
ocsv1 "github.com/openshift/ocs-operator/api/v1"
55
"github.com/openshift/ocs-operator/controllers/defaults"
6-
rookv1 "github.com/rook/rook/pkg/apis/rook.io/v1"
6+
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
7+
rookv1 "github.com/rook/rook/pkg/apis/rook.io"
8+
79
corev1 "k8s.io/api/core/v1"
810
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
911
"k8s.io/apimachinery/pkg/labels"
1012
"sigs.k8s.io/controller-runtime/pkg/client"
1113
)
1214

1315
// getPlacement returns placement configuration for ceph components with appropriate topology
14-
func getPlacement(sc *ocsv1.StorageCluster, component string) rookv1.Placement {
15-
placement := rookv1.Placement{}
16+
func getPlacement(sc *ocsv1.StorageCluster, component string) rookCephv1.Placement {
17+
placement := rookCephv1.Placement{}
1618
in, ok := sc.Spec.Placement[rookv1.KeyType(component)]
1719
if ok {
1820
(&in).DeepCopyInto(&placement)
@@ -97,7 +99,7 @@ func convertLabelToNodeSelectorRequirements(labelSelector metav1.LabelSelector)
9799
return reqs
98100
}
99101

100-
func appendNodeRequirements(placement *rookv1.Placement, reqs ...corev1.NodeSelectorRequirement) {
102+
func appendNodeRequirements(placement *rookCephv1.Placement, reqs ...corev1.NodeSelectorRequirement) {
101103
if placement.NodeAffinity == nil {
102104
placement.NodeAffinity = &corev1.NodeAffinity{}
103105
}
@@ -125,7 +127,7 @@ func (m MatchingLabelsSelector) ApplyToList(opts *client.ListOptions) {
125127
}
126128

127129
// setTopologyForAffinity assigns topology related values to the affinity placements
128-
func setTopologyForAffinity(placement *rookv1.Placement, selectorValue string, topologyKey string) {
130+
func setTopologyForAffinity(placement *rookCephv1.Placement, selectorValue string, topologyKey string) {
129131
placement.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].PodAffinityTerm.TopologyKey = topologyKey
130132

131133
nodeZoneSelector := corev1.NodeSelectorRequirement{

controllers/storagecluster/placement_test.go

+26-24
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@ import (
66
api "github.com/openshift/ocs-operator/api/v1"
77
ocsv1 "github.com/openshift/ocs-operator/api/v1"
88
"github.com/openshift/ocs-operator/controllers/defaults"
9-
rookv1 "github.com/rook/rook/pkg/apis/rook.io/v1"
9+
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
10+
rookCore "github.com/rook/rook/pkg/apis/rook.io"
11+
1012
"github.com/stretchr/testify/assert"
1113
corev1 "k8s.io/api/core/v1"
1214
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -54,7 +56,7 @@ var workerNodeSelector = corev1.NodeSelector{
5456
var workerNodeAffinity = corev1.NodeAffinity{
5557
RequiredDuringSchedulingIgnoredDuringExecution: &workerNodeSelector,
5658
}
57-
var workerPlacements = map[rookv1.KeyType]rookv1.Placement{
59+
var workerPlacements = map[rookCore.KeyType]rookCephv1.Placement{
5860
"all": {
5961
NodeAffinity: &workerNodeAffinity,
6062
},
@@ -63,11 +65,11 @@ var workerPlacements = map[rookv1.KeyType]rookv1.Placement{
6365
var emptyLabelSelector = metav1.LabelSelector{
6466
MatchExpressions: []metav1.LabelSelectorRequirement{},
6567
}
66-
var emptyPlacements = map[rookv1.KeyType]rookv1.Placement{
68+
var emptyPlacements = map[rookCore.KeyType]rookCephv1.Placement{
6769
"all": {},
6870
}
6971

70-
var customPlacement = rookv1.Placement{
72+
var customPlacement = rookCephv1.Placement{
7173
PodAntiAffinity: &corev1.PodAntiAffinity{
7274
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
7375
{
@@ -107,17 +109,17 @@ func TestGetPlacement(t *testing.T) {
107109
cases := []struct {
108110
label string
109111
storageCluster *api.StorageCluster
110-
placements rookv1.PlacementSpec
112+
placements rookCephv1.PlacementSpec
111113
labelSelector *metav1.LabelSelector
112-
expectedPlacements rookv1.PlacementSpec
114+
expectedPlacements rookCephv1.PlacementSpec
113115
topologyMap *ocsv1.NodeTopologyMap
114116
}{
115117
{
116118
label: "Case 1: Defaults are preserved i.e no placement and no label selector",
117119
storageCluster: mockStorageCluster,
118-
placements: rookv1.PlacementSpec{},
120+
placements: rookCephv1.PlacementSpec{},
119121
labelSelector: nil,
120-
expectedPlacements: rookv1.PlacementSpec{
122+
expectedPlacements: rookCephv1.PlacementSpec{
121123
"all": {
122124
NodeAffinity: defaults.DefaultNodeAffinity,
123125
Tolerations: defaults.DaemonPlacements["all"].Tolerations,
@@ -133,7 +135,7 @@ func TestGetPlacement(t *testing.T) {
133135
storageCluster: mockStorageCluster,
134136
placements: emptyPlacements,
135137
labelSelector: nil,
136-
expectedPlacements: rookv1.PlacementSpec{
138+
expectedPlacements: rookCephv1.PlacementSpec{
137139
"all": {
138140
NodeAffinity: defaults.DefaultNodeAffinity,
139141
},
@@ -146,9 +148,9 @@ func TestGetPlacement(t *testing.T) {
146148
{
147149
label: "Case 3: LabelSelector to modify the default Placements correctly",
148150
storageCluster: mockStorageCluster,
149-
placements: rookv1.PlacementSpec{},
151+
placements: rookCephv1.PlacementSpec{},
150152
labelSelector: &workerLabelSelector,
151-
expectedPlacements: rookv1.PlacementSpec{
153+
expectedPlacements: rookCephv1.PlacementSpec{
152154
"all": {
153155
NodeAffinity: &workerNodeAffinity,
154156
Tolerations: defaults.DaemonPlacements["all"].Tolerations,
@@ -164,7 +166,7 @@ func TestGetPlacement(t *testing.T) {
164166
storageCluster: mockStorageCluster,
165167
placements: emptyPlacements,
166168
labelSelector: &workerLabelSelector,
167-
expectedPlacements: rookv1.PlacementSpec{
169+
expectedPlacements: rookCephv1.PlacementSpec{
168170
"all": {
169171
NodeAffinity: &workerNodeAffinity,
170172
},
@@ -179,7 +181,7 @@ func TestGetPlacement(t *testing.T) {
179181
storageCluster: mockStorageCluster,
180182
placements: workerPlacements,
181183
labelSelector: &masterLabelSelector,
182-
expectedPlacements: rookv1.PlacementSpec{
184+
expectedPlacements: rookCephv1.PlacementSpec{
183185
"all": {
184186
NodeAffinity: &corev1.NodeAffinity{
185187
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
@@ -219,9 +221,9 @@ func TestGetPlacement(t *testing.T) {
219221
{
220222
label: "Case 6: Empty LabelSelector sets no NodeAffinity",
221223
storageCluster: mockStorageCluster,
222-
placements: rookv1.PlacementSpec{},
224+
placements: rookCephv1.PlacementSpec{},
223225
labelSelector: &emptyLabelSelector,
224-
expectedPlacements: rookv1.PlacementSpec{
226+
expectedPlacements: rookCephv1.PlacementSpec{
225227
"all": {
226228
Tolerations: defaults.DaemonPlacements["all"].Tolerations,
227229
},
@@ -233,10 +235,10 @@ func TestGetPlacement(t *testing.T) {
233235
{
234236
label: "Case 7: Custom placement is applied without failure",
235237
storageCluster: mockStorageCluster,
236-
placements: rookv1.PlacementSpec{
238+
placements: rookCephv1.PlacementSpec{
237239
"mon": customPlacement,
238240
},
239-
expectedPlacements: rookv1.PlacementSpec{
241+
expectedPlacements: rookCephv1.PlacementSpec{
240242
"all": {
241243
NodeAffinity: defaults.DefaultNodeAffinity,
242244
Tolerations: defaults.DaemonPlacements["all"].Tolerations,
@@ -250,11 +252,11 @@ func TestGetPlacement(t *testing.T) {
250252
{
251253
label: "Case 8: Custom placement is modified by labelSelector",
252254
storageCluster: mockStorageCluster,
253-
placements: rookv1.PlacementSpec{
255+
placements: rookCephv1.PlacementSpec{
254256
"mon": customPlacement,
255257
},
256258
labelSelector: &workerLabelSelector,
257-
expectedPlacements: rookv1.PlacementSpec{
259+
expectedPlacements: rookCephv1.PlacementSpec{
258260
"all": {
259261
NodeAffinity: &workerNodeAffinity,
260262
Tolerations: defaults.DaemonPlacements["all"].Tolerations,
@@ -268,8 +270,8 @@ func TestGetPlacement(t *testing.T) {
268270
{
269271
label: "Case 9: NodeTopologyMap modifies default mon placement",
270272
storageCluster: mockStorageCluster,
271-
placements: rookv1.PlacementSpec{},
272-
expectedPlacements: rookv1.PlacementSpec{
273+
placements: rookCephv1.PlacementSpec{},
274+
expectedPlacements: rookCephv1.PlacementSpec{
273275
"all": {
274276
NodeAffinity: defaults.DefaultNodeAffinity,
275277
Tolerations: defaults.DaemonPlacements["all"].Tolerations,
@@ -307,9 +309,9 @@ func TestGetPlacement(t *testing.T) {
307309
{
308310
label: "Case 10: skip podAntiAffinity in mon placement in case of stretched cluster",
309311
storageCluster: mockStorageClusterWithArbiter,
310-
placements: rookv1.PlacementSpec{},
312+
placements: rookCephv1.PlacementSpec{},
311313
labelSelector: nil,
312-
expectedPlacements: rookv1.PlacementSpec{
314+
expectedPlacements: rookCephv1.PlacementSpec{
313315
"all": {
314316
NodeAffinity: defaults.DefaultNodeAffinity,
315317
Tolerations: defaults.DaemonPlacements["all"].Tolerations,
@@ -323,7 +325,7 @@ func TestGetPlacement(t *testing.T) {
323325
}
324326

325327
for _, c := range cases {
326-
var actualPlacement rookv1.Placement
328+
var actualPlacement rookCephv1.Placement
327329
sc := &ocsv1.StorageCluster{}
328330
c.storageCluster.DeepCopyInto(sc)
329331
sc.Spec.Placement = c.placements

0 commit comments

Comments
 (0)