forked from GoogleCloudPlatform/flink-on-k8s-operator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathflinkcluster_reconciler.go
1017 lines (890 loc) · 33.3 KB
/
flinkcluster_reconciler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"reflect"
"time"
"github.com/go-logr/logr"
v1beta1 "github.com/googlecloudplatform/flink-operator/api/v1beta1"
"github.com/googlecloudplatform/flink-operator/controllers/batchscheduler"
"github.com/googlecloudplatform/flink-operator/controllers/flinkclient"
"github.com/googlecloudplatform/flink-operator/controllers/model"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// ClusterReconciler takes actions to drive the observed state towards the
// desired state.
type ClusterReconciler struct {
k8sClient client.Client
flinkClient flinkclient.FlinkClient
context context.Context
log logr.Logger
observed ObservedClusterState
desired model.DesiredClusterState
recorder record.EventRecorder
}
var requeueResult = ctrl.Result{RequeueAfter: 10 * time.Second, Requeue: true}
// Compares the desired state and the observed state, if there is a difference,
// takes actions to drive the observed state towards the desired state.
func (reconciler *ClusterReconciler) reconcile() (ctrl.Result, error) {
var err error
// Child resources of the cluster CR will be automatically reclaimed by K8S.
if reconciler.observed.cluster == nil {
reconciler.log.Info("The cluster has been deleted, no action to take")
return ctrl.Result{}, nil
}
if getUpdateState(reconciler.observed) == UpdateStateInProgress {
reconciler.log.Info("The cluster update is in progress")
}
// If batch-scheduling enabled
if reconciler.observed.cluster.Spec.BatchSchedulerName != nil &&
*reconciler.observed.cluster.Spec.BatchSchedulerName != "" {
scheduler, err := batchscheduler.GetScheduler(*reconciler.observed.cluster.Spec.BatchSchedulerName)
if err != nil {
return ctrl.Result{}, err
}
err = scheduler.Schedule(reconciler.observed.cluster, &reconciler.desired)
if err != nil {
return ctrl.Result{}, err
}
}
err = reconciler.reconcileConfigMap()
if err != nil {
return ctrl.Result{}, err
}
err = reconciler.reconcileJobManagerStatefulSet()
if err != nil {
return ctrl.Result{}, err
}
err = reconciler.reconcileJobManagerService()
if err != nil {
return ctrl.Result{}, err
}
err = reconciler.reconcileJobManagerIngress()
if err != nil {
return ctrl.Result{}, err
}
err = reconciler.reconcileTaskManagerStatefulSet()
if err != nil {
return ctrl.Result{}, err
}
result, err := reconciler.reconcileJob()
return result, nil
}
func (reconciler *ClusterReconciler) reconcileJobManagerStatefulSet() error {
return reconciler.reconcileStatefulSet(
"JobManager",
reconciler.desired.JmStatefulSet,
reconciler.observed.jmStatefulSet)
}
func (reconciler *ClusterReconciler) reconcileTaskManagerStatefulSet() error {
return reconciler.reconcileStatefulSet(
"TaskManager",
reconciler.desired.TmStatefulSet,
reconciler.observed.tmStatefulSet)
}
func (reconciler *ClusterReconciler) reconcileStatefulSet(
component string,
desiredStatefulSet *appsv1.StatefulSet,
observedStatefulSet *appsv1.StatefulSet) error {
var log = reconciler.log.WithValues("component", component)
if desiredStatefulSet != nil && observedStatefulSet == nil {
return reconciler.createStatefulSet(desiredStatefulSet, component)
}
if desiredStatefulSet != nil && observedStatefulSet != nil {
if getUpdateState(reconciler.observed) == UpdateStateInProgress {
updateComponent := fmt.Sprintf("%v StatefulSet", component)
var err error
if *reconciler.observed.cluster.Spec.RecreateOnUpdate {
err = reconciler.deleteOldComponent(desiredStatefulSet, observedStatefulSet, updateComponent)
} else {
err = reconciler.updateComponent(desiredStatefulSet, updateComponent)
}
if err != nil {
return err
}
return nil
}
log.Info("Statefulset already exists, no action")
return nil
}
if desiredStatefulSet == nil && observedStatefulSet != nil {
return reconciler.deleteStatefulSet(observedStatefulSet, component)
}
return nil
}
func (reconciler *ClusterReconciler) createStatefulSet(
statefulSet *appsv1.StatefulSet, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Creating StatefulSet", "StatefulSet", *statefulSet)
var err = k8sClient.Create(context, statefulSet)
if err != nil {
log.Error(err, "Failed to create StatefulSet")
} else {
log.Info("StatefulSet created")
}
return err
}
func (reconciler *ClusterReconciler) deleteOldComponent(desired runtime.Object, observed runtime.Object, component string) error {
var log = reconciler.log.WithValues("component", component)
if isComponentUpdated(observed, *reconciler.observed.cluster) {
reconciler.log.Info(fmt.Sprintf("%v is already updated, no action", component))
return nil
}
var context = reconciler.context
var k8sClient = reconciler.k8sClient
log.Info("Deleting component for update", "component", desired)
err := k8sClient.Delete(context, desired)
if err != nil {
log.Error(err, "Failed to delete component for update")
return err
}
log.Info("Component deleted for update successfully")
return nil
}
func (reconciler *ClusterReconciler) updateComponent(desired runtime.Object, component string) error {
var log = reconciler.log.WithValues("component", component)
var context = reconciler.context
var k8sClient = reconciler.k8sClient
log.Info("Update component", "component", desired)
err := k8sClient.Update(context, desired)
if err != nil {
log.Error(err, "Failed to update component for update")
return err
}
log.Info("Component update successfully")
return nil
}
func (reconciler *ClusterReconciler) updateStatefulSet(
statefulSet *appsv1.StatefulSet, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Updating StatefulSet", "StatefulSet", statefulSet)
var err = k8sClient.Update(context, statefulSet)
if err != nil {
log.Error(err, "Failed to update StatefulSet")
} else {
log.Info("StatefulSet updated")
}
return err
}
func (reconciler *ClusterReconciler) deleteStatefulSet(
statefulSet *appsv1.StatefulSet, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Deleting StatefulSet", "StatefulSet", statefulSet)
var err = k8sClient.Delete(context, statefulSet)
err = client.IgnoreNotFound(err)
if err != nil {
log.Error(err, "Failed to delete StatefulSet")
} else {
log.Info("StatefulSet deleted")
}
return err
}
func (reconciler *ClusterReconciler) reconcileJobManagerService() error {
var desiredJmService = reconciler.desired.JmService
var observedJmService = reconciler.observed.jmService
if desiredJmService != nil && observedJmService == nil {
return reconciler.createService(desiredJmService, "JobManager")
}
if desiredJmService != nil && observedJmService != nil {
if getUpdateState(reconciler.observed) == UpdateStateInProgress {
// v1.Service API does not handle update correctly when below values are empty.
desiredJmService.SetResourceVersion(observedJmService.GetResourceVersion())
desiredJmService.Spec.ClusterIP = observedJmService.Spec.ClusterIP
var err error
if *reconciler.observed.cluster.Spec.RecreateOnUpdate {
err = reconciler.deleteOldComponent(desiredJmService, observedJmService, "JobManager service")
} else {
err = reconciler.updateComponent(desiredJmService, "JobManager service")
}
if err != nil {
return err
}
return nil
}
reconciler.log.Info("JobManager service already exists, no action")
return nil
}
if desiredJmService == nil && observedJmService != nil {
return reconciler.deleteService(observedJmService, "JobManager")
}
return nil
}
func (reconciler *ClusterReconciler) createService(
service *corev1.Service, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Creating service", "resource", *service)
var err = k8sClient.Create(context, service)
if err != nil {
log.Info("Failed to create service", "error", err)
} else {
log.Info("Service created")
}
return err
}
func (reconciler *ClusterReconciler) deleteService(
service *corev1.Service, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Deleting service", "service", service)
var err = k8sClient.Delete(context, service)
err = client.IgnoreNotFound(err)
if err != nil {
log.Error(err, "Failed to delete service")
} else {
log.Info("service deleted")
}
return err
}
func (reconciler *ClusterReconciler) reconcileJobManagerIngress() error {
var desiredJmIngress = reconciler.desired.JmIngress
var observedJmIngress = reconciler.observed.jmIngress
if desiredJmIngress != nil && observedJmIngress == nil {
return reconciler.createIngress(desiredJmIngress, "JobManager")
}
if desiredJmIngress != nil && observedJmIngress != nil {
if getUpdateState(reconciler.observed) == UpdateStateInProgress {
var err error
if *reconciler.observed.cluster.Spec.RecreateOnUpdate {
err = reconciler.deleteOldComponent(desiredJmIngress, observedJmIngress, "JobManager ingress")
} else {
err = reconciler.updateComponent(desiredJmIngress, "JobManager ingress")
}
if err != nil {
return err
}
return nil
}
reconciler.log.Info("JobManager ingress already exists, no action")
return nil
}
if desiredJmIngress == nil && observedJmIngress != nil {
return reconciler.deleteIngress(observedJmIngress, "JobManager")
}
return nil
}
func (reconciler *ClusterReconciler) createIngress(
ingress *extensionsv1beta1.Ingress, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Creating ingress", "resource", *ingress)
var err = k8sClient.Create(context, ingress)
if err != nil {
log.Info("Failed to create ingress", "error", err)
} else {
log.Info("Ingress created")
}
return err
}
func (reconciler *ClusterReconciler) deleteIngress(
ingress *extensionsv1beta1.Ingress, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Deleting ingress", "ingress", ingress)
var err = k8sClient.Delete(context, ingress)
err = client.IgnoreNotFound(err)
if err != nil {
log.Error(err, "Failed to delete ingress")
} else {
log.Info("Ingress deleted")
}
return err
}
func (reconciler *ClusterReconciler) reconcileConfigMap() error {
var desiredConfigMap = reconciler.desired.ConfigMap
var observedConfigMap = reconciler.observed.configMap
if desiredConfigMap != nil && observedConfigMap == nil {
return reconciler.createConfigMap(desiredConfigMap, "ConfigMap")
}
if desiredConfigMap != nil && observedConfigMap != nil {
if getUpdateState(reconciler.observed) == UpdateStateInProgress {
var err error
if *reconciler.observed.cluster.Spec.RecreateOnUpdate {
err = reconciler.deleteOldComponent(desiredConfigMap, observedConfigMap, "ConfigMap")
} else {
err = reconciler.updateComponent(desiredConfigMap, "ConfigMap")
}
if err != nil {
return err
}
return nil
}
reconciler.log.Info("ConfigMap already exists, no action")
return nil
}
if desiredConfigMap == nil && observedConfigMap != nil {
return reconciler.deleteConfigMap(observedConfigMap, "ConfigMap")
}
return nil
}
func (reconciler *ClusterReconciler) createConfigMap(
cm *corev1.ConfigMap, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Creating configMap", "configMap", *cm)
var err = k8sClient.Create(context, cm)
if err != nil {
log.Info("Failed to create configMap", "error", err)
} else {
log.Info("ConfigMap created")
}
return err
}
func (reconciler *ClusterReconciler) deleteConfigMap(
cm *corev1.ConfigMap, component string) error {
var context = reconciler.context
var log = reconciler.log.WithValues("component", component)
var k8sClient = reconciler.k8sClient
log.Info("Deleting configMap", "configMap", cm)
var err = k8sClient.Delete(context, cm)
err = client.IgnoreNotFound(err)
if err != nil {
log.Error(err, "Failed to delete configMap")
} else {
log.Info("ConfigMap deleted")
}
return err
}
func (reconciler *ClusterReconciler) reconcileJob() (ctrl.Result, error) {
var log = reconciler.log
var desiredJob = reconciler.desired.Job
var observed = reconciler.observed
var observedJob = observed.job
var recordedJobStatus = observed.cluster.Status.Components.Job
var activeFlinkJob bool
var err error
// Update status changed via job reconciliation.
var newSavepointStatus *v1beta1.SavepointStatus
var newControlStatus *v1beta1.FlinkClusterControlStatus
defer reconciler.updateStatus(&newSavepointStatus, &newControlStatus)
// Cancel unexpected jobs
if len(observed.flinkJobStatus.flinkJobsUnexpected) > 0 {
log.Info("Cancelling unexpected running job(s)")
err = reconciler.cancelUnexpectedJobs(false /* takeSavepoint */)
return requeueResult, err
}
// Check if Flink job is active
if isJobActive(recordedJobStatus) {
activeFlinkJob = true
} else {
activeFlinkJob = false
}
// Create Flink job submitter
if desiredJob != nil && !activeFlinkJob {
// If update triggered, wait until all Flink cluster components are replaced with next revision.
if !isClusterUpdateToDate(observed) {
return requeueResult, nil
}
// Create Flink job submitter
log.Info("Updating job status to create new job submitter")
err = reconciler.updateStatusForNewJob()
if err != nil {
log.Info("Not proceed to create new job submitter because job status update failed")
return requeueResult, err
}
log.Info("Creating new job submitter")
if observedJob != nil {
log.Info("Deleting old job submitter")
err = reconciler.deleteJob(observedJob)
if err != nil {
log.Info("Failed to delete previous job submitter")
return requeueResult, err
}
}
err = reconciler.createJob(desiredJob)
return requeueResult, err
}
if desiredJob != nil && activeFlinkJob {
var jobID = reconciler.getFlinkJobID()
var restartPolicy = observed.cluster.Spec.Job.RestartPolicy
var recordedJobStatus = observed.cluster.Status.Components.Job
var jobSpec = reconciler.observed.cluster.Spec.Job
// Update or recover Flink job by restart.
if shouldUpdateJob(observed) {
log.Info("Job is about to be restarted to update")
err := reconciler.restartJob(*jobSpec.TakeSavepointOnUpgrade)
return requeueResult, err
} else if shouldRestartJob(restartPolicy, recordedJobStatus) {
log.Info("Job is about to be restarted to recover failure")
err := reconciler.restartJob(false)
return requeueResult, err
}
// Trigger savepoint if required.
if len(jobID) > 0 {
shouldTakeSavepont, savepointTriggerReason := reconciler.shouldTakeSavepoint()
if shouldTakeSavepont {
err = reconciler.updateSavepointTriggerTimeStatus()
if err == nil {
newSavepointStatus, _ = reconciler.takeSavepointAsync(jobID, savepointTriggerReason)
}
}
}
log.Info("Job is not finished yet, no action", "jobID", jobID)
return requeueResult, nil
}
// Stop Flink job
if desiredJob == nil && activeFlinkJob {
// Cancel Flink job if it is live
// case 1) In the case of which savepoint was triggered, after it is completed, proceed to delete step.
// case 2) When savepoint was skipped, continue to delete step immediately.
//
// If savepoint or cancellation was failed, the control state is fallen to the failed in the updater.
var jobID = reconciler.getFlinkJobID()
log.Info("Cancelling job", "jobID", jobID)
var savepointStatus, err = reconciler.cancelFlinkJobAsync(jobID, true /* takeSavepoint */)
if !reflect.DeepEqual(savepointStatus, observed.cluster.Status.Savepoint) {
newSavepointStatus = savepointStatus
}
if err != nil {
log.Error(err, "Failed to cancel job", "jobID", jobID)
newControlStatus = getFailedCancelStatus(err)
return requeueResult, err
}
// To proceed to delete step:
// case 1) savepoint triggered: savepointStatus state should be SavepointStateSucceeded and there is no error
// case 2) savepoint skipped: savepointStatus is nil and there is no error
if savepointStatus != nil && savepointStatus.State != v1beta1.SavepointStateSucceeded {
return requeueResult, nil
}
return ctrl.Result{}, err
}
if isJobStopped(recordedJobStatus) {
log.Info("Job has finished, no action")
}
return ctrl.Result{}, nil
}
func (reconciler *ClusterReconciler) createJob(job *batchv1.Job) error {
var context = reconciler.context
var log = reconciler.log
var k8sClient = reconciler.k8sClient
log.Info("Submitting job", "resource", *job)
var err = k8sClient.Create(context, job)
if err != nil {
log.Info("Failed to created job", "error", err)
} else {
log.Info("Job created")
}
return err
}
func (reconciler *ClusterReconciler) deleteJob(job *batchv1.Job) error {
var context = reconciler.context
var log = reconciler.log
var k8sClient = reconciler.k8sClient
var deletePolicy = metav1.DeletePropagationBackground
var deleteOption = client.DeleteOptions{PropagationPolicy: &deletePolicy}
log.Info("Deleting job", "job", job)
var err = k8sClient.Delete(context, job, &deleteOption)
err = client.IgnoreNotFound(err)
if err != nil {
log.Error(err, "Failed to delete job")
} else {
log.Info("Job deleted")
}
return err
}
func (reconciler *ClusterReconciler) getFlinkJobID() string {
var jobStatus = reconciler.observed.cluster.Status.Components.Job
if jobStatus != nil && len(jobStatus.ID) > 0 {
return jobStatus.ID
}
return ""
}
func (reconciler *ClusterReconciler) restartJob(shouldTakeSavepoint bool) error {
var log = reconciler.log
var observedJob = reconciler.observed.job
var observedFlinkJob = reconciler.observed.flinkJobStatus.flinkJob
log.Info("Stopping Flink job to restart", "", observedFlinkJob)
shouldTakeSavepoint = shouldTakeSavepoint && canTakeSavepoint(*reconciler.observed.cluster)
var err = reconciler.cancelRunningJobs(shouldTakeSavepoint /* takeSavepoint */)
if err != nil {
return err
}
if observedJob != nil {
var err = reconciler.deleteJob(observedJob)
if err != nil {
log.Error(
err, "Failed to delete failed job", "job", observedJob)
return err
}
}
// Do not create new job immediately, leave it to the next reconciliation,
// because we still need to be able to create the new job if we encounter
// ephemeral error here. It is better to organize the logic in a central place.
return nil
}
func (reconciler *ClusterReconciler) cancelUnexpectedJobs(
takeSavepoint bool) error {
var unexpectedJobs = reconciler.observed.flinkJobStatus.flinkJobsUnexpected
return reconciler.cancelJobs(takeSavepoint, unexpectedJobs)
}
// Cancel running jobs.
func (reconciler *ClusterReconciler) cancelRunningJobs(
takeSavepoint bool) error {
var runningJobs = reconciler.observed.flinkJobStatus.flinkJobsUnexpected
var flinkJob = reconciler.observed.flinkJobStatus.flinkJob
if flinkJob != nil && flinkJob.ID != "" &&
getFlinkJobDeploymentState(flinkJob.Status) == v1beta1.JobStateRunning {
runningJobs = append(runningJobs, flinkJob.ID)
}
return reconciler.cancelJobs(takeSavepoint, runningJobs)
}
// Cancel jobs.
func (reconciler *ClusterReconciler) cancelJobs(
takeSavepoint bool,
jobs []string) error {
var log = reconciler.log
for _, jobID := range jobs {
log.Info("Cancel running job", "jobID", jobID)
var err = reconciler.cancelFlinkJob(jobID, takeSavepoint)
if err != nil {
log.Error(err, "Failed to cancel running job", "jobID", jobID)
return err
}
}
return nil
}
// Takes a savepoint if possible then stops the job.
func (reconciler *ClusterReconciler) cancelFlinkJob(jobID string, takeSavepoint bool) error {
var log = reconciler.log
if takeSavepoint && canTakeSavepoint(*reconciler.observed.cluster) {
var err = reconciler.takeSavepoint(jobID)
if err != nil {
return err
}
} else {
log.Info("Skip taking savepoint before stopping job", "jobID", jobID)
}
var apiBaseURL = getFlinkAPIBaseURL(reconciler.observed.cluster)
reconciler.log.Info("Stoping job", "jobID", jobID)
return reconciler.flinkClient.StopJob(apiBaseURL, jobID)
}
// Trigger savepoint if it is possible, then return the savepoint status to update.
// When savepoint was already triggered, return the current observed status.
// If triggering savepoint is impossible or skipped or triggered savepoint was created, proceed to stop the job.
func (reconciler *ClusterReconciler) cancelFlinkJobAsync(jobID string, takeSavepoint bool) (*v1beta1.SavepointStatus, error) {
var log = reconciler.log
var cluster = reconciler.observed.cluster
var observedSavepoint = reconciler.observed.cluster.Status.Savepoint
var savepointStatus *v1beta1.SavepointStatus
var err error
switch observedSavepoint.State {
case v1beta1.SavepointStateNotTriggered:
if takeSavepoint && canTakeSavepoint(*reconciler.observed.cluster) {
savepointStatus, err = reconciler.takeSavepointAsync(jobID, v1beta1.SavepointTriggerReasonJobCancel)
if err != nil {
log.Info("Failed to trigger savepoint.")
return savepointStatus, fmt.Errorf("failed to trigger savepoint: %v", err)
}
log.Info("Triggered savepoint and wait it is completed.")
return savepointStatus, nil
} else {
savepointStatus = nil
if takeSavepoint {
log.Info("Savepoint was desired but couldn't be triggered. Skip taking savepoint before stopping job", "jobID", jobID)
} else {
log.Info("Skip taking savepoint before stopping job", "jobID", jobID)
}
}
case v1beta1.SavepointStateInProgress:
log.Info("Triggered savepoint already and wait until it is completed.")
return observedSavepoint, nil
case v1beta1.SavepointStateSucceeded:
savepointStatus = observedSavepoint
log.Info("Successfully savepoint created. Proceed to stop job.")
// Cannot be reached here with these states, because job-cancel control should be finished with failed savepoint states by updater.
case v1beta1.SavepointStateTriggerFailed:
fallthrough
case v1beta1.SavepointStateFailed:
fallthrough
default:
return nil, fmt.Errorf("unexpected savepoint status: %v", *observedSavepoint)
}
var apiBaseURL = getFlinkAPIBaseURL(cluster)
log.Info("Stopping job", "jobID", jobID)
err = reconciler.flinkClient.StopJob(apiBaseURL, jobID)
if err != nil {
return savepointStatus, fmt.Errorf("failed to stop job: %v", err)
}
return savepointStatus, nil
}
func (reconciler *ClusterReconciler) shouldTakeSavepoint() (bool, string) {
var log = reconciler.log
var jobSpec = reconciler.observed.cluster.Spec.Job
var jobStatus = reconciler.observed.cluster.Status.Components.Job
var savepointStatus = reconciler.observed.cluster.Status.Savepoint
if !canTakeSavepoint(*reconciler.observed.cluster) {
return false, ""
}
// User requested.
// In the case of which savepoint status is in finished state,
// savepoint trigger by spec.job.savepointGeneration is not possible
// because the field cannot be increased more when savepoint is failed.
//
// Savepoint retry by annotation is possible because the annotations would be cleared
// when the last savepoint was finished and user can attach the annotation again.
// Savepoint can be triggered in updater for user request, job-cancel and job update
if savepointStatus != nil && savepointStatus.State == v1beta1.SavepointStateNotTriggered {
return true, savepointStatus.TriggerReason
}
// TODO: spec.job.savepointGeneration will be deprecated
if jobSpec.SavepointGeneration > jobStatus.SavepointGeneration &&
(savepointStatus != nil && savepointStatus.State != v1beta1.SavepointStateFailed && savepointStatus.State != v1beta1.SavepointStateTriggerFailed) {
log.Info(
"Savepoint is requested",
"statusGen", jobStatus.SavepointGeneration,
"specGen", jobSpec.SavepointGeneration)
return true, v1beta1.SavepointTriggerReasonUserRequested
}
if jobSpec.AutoSavepointSeconds == nil {
return false, ""
}
var nextOkTriggerTime = getTimeAfterAddedSeconds(jobStatus.LastSavepointTriggerTime, SavepointTimeoutSec)
if time.Now().Before(nextOkTriggerTime) {
return false, ""
}
// First savepoint.
if len(jobStatus.LastSavepointTime) == 0 {
return true, v1beta1.SavepointTriggerReasonScheduledInitial
}
// Scheduled, check if next trigger time arrived.
var nextTime = getTimeAfterAddedSeconds(jobStatus.LastSavepointTime, int64(*jobSpec.AutoSavepointSeconds))
return time.Now().After(nextTime), v1beta1.SavepointTriggerReasonScheduled
}
// Convert raw time to object and add `addedSeconds` to it,
// getting a time object for the parsed `rawTime` with `addedSeconds` added to it.
func getTimeAfterAddedSeconds(rawTime string, addedSeconds int64) time.Time {
var tc = &TimeConverter{}
var lastTriggerTime = time.Time{}
if len(rawTime) != 0 {
lastTriggerTime = tc.FromString(rawTime)
}
return lastTriggerTime.Add(time.Duration(addedSeconds * int64(time.Second)))
}
// Trigger savepoint for a job then return savepoint status to update.
func (reconciler *ClusterReconciler) takeSavepointAsync(jobID string, triggerReason string) (*v1beta1.SavepointStatus, error) {
var log = reconciler.log
var cluster = reconciler.observed.cluster
var apiBaseURL = getFlinkAPIBaseURL(reconciler.observed.cluster)
var triggerSuccess bool
var triggerID string
var message string
var err error
log.Info("Trigger savepoint.", "jobID", jobID)
triggerID, err = reconciler.flinkClient.TakeSavepointAsync(apiBaseURL, jobID, *cluster.Spec.Job.SavepointsDir)
if err != nil {
// limit message size to 1KiB
if message = err.Error(); len(message) > 1024 {
message = message[:1024] + "..."
}
triggerSuccess = false
log.Info("Savepoint trigger is failed.", "jobID", jobID, "triggerID", triggerID, "error", err)
} else {
triggerSuccess = true
log.Info("Savepoint is triggered successfully.", "jobID", jobID, "triggerID", triggerID)
}
newSavepointStatus := getTriggeredSavepointStatus(jobID, triggerID, triggerReason, message, triggerSuccess)
requestedSavepoint := reconciler.observed.cluster.Status.Savepoint
// When savepoint was requested, maintain the requested time
if requestedSavepoint != nil && requestedSavepoint.State == v1beta1.SavepointStateNotTriggered {
newSavepointStatus.RequestTime = requestedSavepoint.RequestTime
}
return &newSavepointStatus, err
}
// Takes savepoint for a job then update job status with the info.
func (reconciler *ClusterReconciler) takeSavepoint(
jobID string) error {
var log = reconciler.log
var apiBaseURL = getFlinkAPIBaseURL(reconciler.observed.cluster)
log.Info("Taking savepoint.", "jobID", jobID)
var status, err = reconciler.flinkClient.TakeSavepoint(
apiBaseURL, jobID, *reconciler.observed.cluster.Spec.Job.SavepointsDir)
log.Info(
"Savepoint status.",
"status", status,
"error", err)
if err == nil && len(status.FailureCause.StackTrace) > 0 {
err = fmt.Errorf("%s", status.FailureCause.StackTrace)
}
if err != nil || !status.Completed {
log.Info("Failed to take savepoint.", "jobID", jobID)
}
statusUpdateErr := reconciler.updateSavepointStatus(status)
if statusUpdateErr != nil {
log.Error(
statusUpdateErr, "Failed to update savepoint status.", "error", statusUpdateErr)
}
return err
}
func (reconciler *ClusterReconciler) updateSavepointTriggerTimeStatus() error {
var cluster = v1beta1.FlinkCluster{}
reconciler.observed.cluster.DeepCopyInto(&cluster)
var jobStatus = cluster.Status.Components.Job
setTimestamp(&jobStatus.LastSavepointTriggerTime)
return reconciler.k8sClient.Status().Update(reconciler.context, &cluster)
}
func (reconciler *ClusterReconciler) updateSavepointStatus(
savepointStatus flinkclient.SavepointStatus) error {
var cluster = v1beta1.FlinkCluster{}
reconciler.observed.cluster.DeepCopyInto(&cluster)
if savepointStatus.IsSuccessful() {
var jobStatus = cluster.Status.Components.Job
jobStatus.SavepointGeneration++
jobStatus.LastSavepointTriggerID = savepointStatus.TriggerID
jobStatus.SavepointLocation = savepointStatus.Location
setTimestamp(&jobStatus.LastSavepointTime)
setTimestamp(&cluster.Status.LastUpdateTime)
}
// case in which savepointing is triggered by control annotation
var controlStatus = cluster.Status.Control
if controlStatus != nil && controlStatus.Name == v1beta1.ControlNameSavepoint &&
controlStatus.State == v1beta1.ControlStateProgressing {
if controlStatus.Details == nil {
controlStatus.Details = make(map[string]string)
}
var retries, err = getRetryCount(controlStatus.Details)
if err == nil {
if savepointStatus.IsFailed() || retries != "1" {
controlStatus.Details[ControlRetries] = retries
}
} else {
reconciler.log.Error(err, "failed to get retries from control status", "control status", controlStatus)
}
controlStatus.Details[ControlSavepointTriggerID] = savepointStatus.TriggerID
controlStatus.Details[ControlJobID] = savepointStatus.JobID
setTimestamp(&controlStatus.UpdateTime)
}
return reconciler.k8sClient.Status().Update(reconciler.context, &cluster)
}
// If job cancellation is failed, fill the status message with error message.
// Then, the state will be transited to the failed by the updater.
func getFailedCancelStatus(cancelErr error) *v1beta1.FlinkClusterControlStatus {
var state string
var message string
var now string
setTimestamp(&now)
state = v1beta1.ControlStateProgressing
// limit message size to 1KiB
if message = cancelErr.Error(); len(message) > 1024 {
message = message[:1024] + "..."
}
return &v1beta1.FlinkClusterControlStatus{
Name: v1beta1.ControlNameJobCancel,
State: state,
UpdateTime: now,
Message: message,
}
}
func (reconciler *ClusterReconciler) updateStatus(ss **v1beta1.SavepointStatus, cs **v1beta1.FlinkClusterControlStatus) {
var log = reconciler.log
var savepointStatus = *ss
var controlStatus = *cs
if savepointStatus == nil && controlStatus == nil {
return
}
// Record events
if savepointStatus != nil {
eventType, eventReason, eventMessage := getSavepointEvent(*savepointStatus)
reconciler.recorder.Event(reconciler.observed.cluster, eventType, eventReason, eventMessage)
}
if controlStatus != nil {
eventType, eventReason, eventMessage := getControlEvent(*controlStatus)
reconciler.recorder.Event(reconciler.observed.cluster, eventType, eventReason, eventMessage)
}
// Update status
var clusterClone = reconciler.observed.cluster.DeepCopy()
var statusUpdateErr error
retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var newStatus = &clusterClone.Status
if savepointStatus != nil {
newStatus.Savepoint = savepointStatus
}
if controlStatus != nil {
newStatus.Control = controlStatus
}
setTimestamp(&newStatus.LastUpdateTime)
statusUpdateErr = reconciler.k8sClient.Status().Update(reconciler.context, clusterClone)
if statusUpdateErr == nil {
return nil
}
var clusterUpdated v1beta1.FlinkCluster
if err := reconciler.k8sClient.Get(
reconciler.context,
types.NamespacedName{Namespace: clusterClone.Namespace, Name: clusterClone.Name}, &clusterUpdated); err == nil {
clusterClone = clusterUpdated.DeepCopy()
}
return statusUpdateErr
})
if statusUpdateErr != nil {
log.Error(
statusUpdateErr, "Failed to update status.", "error", statusUpdateErr)
}
}
func (reconciler *ClusterReconciler) updateStatusForNewJob() error {
var log = reconciler.log
var newJobStatus *v1beta1.JobStatus
var desiredJob = reconciler.desired.Job
var clusterClone = reconciler.observed.cluster.DeepCopy()
var err error
if clusterClone.Status.Components.Job != nil {
newJobStatus = clusterClone.Status.Components.Job
switch previousJobState := newJobStatus.State; previousJobState {
case v1beta1.JobStateFailed:
newJobStatus.RestartCount++
case v1beta1.JobStateUpdating:
newJobStatus.RestartCount = 0
}
} else {
newJobStatus = &v1beta1.JobStatus{}
clusterClone.Status.Components.Job = newJobStatus
}
var fromSavepoint = getFromSavepoint(desiredJob.Spec)
newJobStatus.ID = ""