-
Notifications
You must be signed in to change notification settings - Fork 136
/
.gitlab-ci.yml
651 lines (595 loc) · 23.4 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
#
# A dCache build/deploy/test pipeline file.
#
# The following environment variables injected by gitlab CI
#
# DCACHE_ORG_PGP_KEY: GPG key used to sign RPM and DEB packages
# DCACHE_ORG_KEY_NAME: GPG key name
# DCACHE_ORG_PGP_KEY_PASS: GPG key password
#
# PKG_UPLOAD_URL: URL to upload dCache release packages
# PKG_UPLOAD_USER: user name to use for authorization
# PKG_UPLOAD_PASS: password
# PKG_UPLOAD_OPTIONS: additional options to curl
#
# DOCKER_HUB_USER: user name on docker hub
# DOCKER_HUB_ACCESS_KEY: Access Key or Password of the docker user
#
#
#
# KUBECONFIG: env file that contains kubernetes configuration to access the cluster
#
# dCache deployment in kubernetes managed by helm chart
# https://gitlab.desy.de/dcache/dcache-helm
#
#
# The kubernetes based jobs don't use directly any job artefact, thus pulling artefact
# is explicitly disabled by default.
stages:
# build rpm, tar, deb, oci container
- build
# sign rpm, deb
- sign
# create k8s namespace
- testenv_pre
# deploy 3rd party infrastructure services
- test_infra
# deploy dCache helm chart
- test_deploy
# run tests
- testing
# clean k8s namespace, collect logs
- testenv_post
# on release, upload rpm, deb, tar, container
- upload
variables:
MAVEN_CLI_OPTS: "--batch-mode --errors --fail-at-end --show-version -DinstallAtEnd=true -DdeployAtEnd=true -Dmaven.repo.local=.m2/repository"
K8S_NAMESPACE: dcb-$CI_PIPELINE_ID
CHECK_TIMEOUT: --timeout=300s
HELM_OPTS: --replace --timeout 10m0s
AUTOCA_URL: https://ci.dcache.org/ca
DCACHE_HELM_REPO: https://gitlab.desy.de/api/v4/projects/7648/packages/helm/test
# let's debug nodes where job is running
before_script:
- |
set +x
echo "============== GitLab Agent =============="
uname -a
date
echo "Runner : $CI_RUNNER_DESCRIPTION"
echo "Runner id : $CI_RUNNER_ID"
echo "Runner version : $CI_RUNNER_VERSION"
echo "Runner revision : $CI_RUNNER_REVISION"
echo "Runner tags : $CI_RUNNER_TAGS"
echo "=========================================="
default:
retry:
max: 2
when:
- runner_system_failure
- api_failure
#
# default tags and image for testing stages/kubernetes/helm
#
.kubernetes_tags:
tags:
- kubernetes
- dcache-dev
dependencies: []
.kubernetes_image:
extends: .kubernetes_tags
image: bitnami/kubectl:latest
.helm_image:
extends: .kubernetes_tags
image:
name: dtzar/helm-kubectl:latest
entrypoint: ['']
#
# default cache konfiguration for maven build jobs
# Cache downloaded dependencies and plugins between builds.
# To keep cache across branches add 'key: "$CI_JOB_NAME"'
#
.build_cache:
cache:
key:
files:
- pom.xml
prefix: "$CI_JOB_NAME"
paths:
- ./.m2/repository
#
# default rules for upload stage
#
.upload_rules:
rules:
- if: $CI_COMMIT_TAG
rpm:
stage: build
image: dcache/maven-java17-rpm-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -Drun.slow.tests -am -pl packages/fhs -P rpm clean package
artifacts:
reports:
junit:
- "**/target/surefire-reports/TEST-*.xml"
- "**/target/failsafe-reports/TEST-*.xml"
paths:
- "packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm"
expire_in: 2 days
srm_client_rpm:
stage: build
image: dcache/maven-java17-rpm-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl modules/srm-client package -P rpm
artifacts:
paths:
- "modules/srm-client/target/rpmbuild/RPMS/noarch/dcache-srmclient*.rpm"
expire_in: 2 days
deb:
stage: build
image: dcache/maven-java17-deb-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/fhs -P deb clean package
artifacts:
paths:
- "packages/fhs/target/dcache_*.deb"
expire_in: 2 days
tar:
stage: build
image: dcache/maven-java17-tar-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/tar clean package
artifacts:
paths:
- "packages/tar/target/dcache-*.tar.gz"
expire_in: 2 days
spotbugs:
stage: build
image: dcache/maven-java17-tar-build
extends: .build_cache
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
script:
- dnf -y -q install jq
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/tar -DskipTests package com.github.spotbugs:spotbugs-maven-plugin:4.8.3.0:spotbugs verify
- find . -name gl-code-quality-report.json -print | xargs cat | jq -s "add" > merged-gl-code-quality-report.json
artifacts:
reports:
codequality:
- merged-gl-code-quality-report.json
paths:
- merged-gl-code-quality-report.json
expire_in: 2 days
container:
stage: build
# For latest releases see https://github.com/GoogleContainerTools/kaniko/releases
# Only debug/*-debug versions of the Kaniko image are known to work within Gitlab CI
image: gcr.io/kaniko-project/executor:debug
needs:
- tar
dependencies:
- tar
script:
- |-
tag=$CI_COMMIT_SHORT_SHA
if [[ -n "$CI_COMMIT_TAG" ]]; then
tag=$CI_COMMIT_TAG
fi
- mkdir maven
- tar -C maven --strip-components=1 -xzvf packages/tar/target/dcache-*.tar.gz
- cp $CI_PROJECT_DIR/packages/tar/src/main/container/* .
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- >
/kaniko/executor
--label dcache.build=testing
--context $CI_PROJECT_DIR
--dockerfile $CI_PROJECT_DIR/Dockerfile
--destination $CI_REGISTRY_IMAGE:$tag
#
# check packaged libraries: sort, drop the version and check for non unique dependencies
#
Check for duplicate dependencies:
stage: build
image: almalinux:9-minimal
needs:
- tar
dependencies:
- tar
script:
- microdnf install -y tar gzip
- mkdir /dcache
- tar -C /dcache --strip-components=1 -xzf packages/tar/target/dcache-*.tar.gz
- ls -1 /dcache/share/classes/ | sort | sed -e 's/\-[0-9].*$//' | uniq -D > duplicate-dependencies.txt
- if [ -s duplicate-dependencies.txt ]; then cat duplicate-dependencies.txt; exit 1; fi
sign_rpm:
stage: sign
image: almalinux:9-minimal
needs: ["rpm"]
script:
- microdnf install -y rpm-sign
- echo $DCACHE_ORG_PGP_KEY | base64 -d -i > secret.gpg
- gpg --quiet --batch --yes --allow-secret-key-import --passphrase="$DCACHE_ORG_PGP_KEY_PASS" --import secret.gpg
- gpg -a --export "$DCACHE_ORG_KEY_NAME" > RPM-GPG-KEY
- rpmsign --addsign --define "_signature gpg" --define "_gpg_name $DCACHE_ORG_KEY_NAME" --define "_gpg_sign_cmd_extra_args --pinentry-mode loopback --passphrase $DCACHE_ORG_PGP_KEY_PASS" packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
- rpmkeys --import RPM-GPG-KEY
- rpm --checksig -v packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
artifacts:
paths:
- packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
sign_srm_client_rpm:
stage: sign
image: almalinux:9-minimal
needs: ["srm_client_rpm"]
script:
- microdnf install -y rpm-sign
- echo $DCACHE_ORG_PGP_KEY | base64 -d -i > secret.gpg
- gpg --quiet --batch --yes --allow-secret-key-import --passphrase="$DCACHE_ORG_PGP_KEY_PASS" --import secret.gpg
- gpg -a --export "$DCACHE_ORG_KEY_NAME" > RPM-GPG-KEY
- rpmsign --addsign --define "_signature gpg" --define "_gpg_name $DCACHE_ORG_KEY_NAME" --define "_gpg_sign_cmd_extra_args --pinentry-mode loopback --passphrase $DCACHE_ORG_PGP_KEY_PASS" modules/srm-client/target/rpmbuild/RPMS/noarch/dcache-srmclient*.rpm
- rpmkeys --import RPM-GPG-KEY
- rpm --checksig -v modules/srm-client/target/rpmbuild/RPMS/noarch/dcache-srmclient*.rpm
artifacts:
paths:
- modules/srm-client/target/rpmbuild/RPMS/noarch/dcache-srmclient*.rpm
sign_deb:
stage: sign
image: ubuntu:22.04
needs: ["deb"]
script:
- apt-get -qq update
- apt-get -qq install debsigs gpg
- echo $DCACHE_ORG_PGP_KEY | base64 -d -i > secret.gpg
- gpg --quiet --batch --yes --allow-secret-key-import --passphrase="$DCACHE_ORG_PGP_KEY_PASS" --import secret.gpg
- echo $DCACHE_ORG_PGP_KEY_PASS > $HOME/.gnupg/gpg-passphrase
- echo "passphrase-file $HOME/.gnupg/gpg-passphrase" >> "$HOME/.gnupg/gpg.conf"
- echo 'allow-loopback-pinentry' >> "$HOME/.gnupg/gpg-agent.conf"
- echo 'pinentry-mode loopback' >> "$HOME/.gnupg/gpg.conf"
- echo 'use-agent' >> "$HOME/.gnupg/gpg.conf"
- echo RELOADAGENT | gpg-connect-agent
- debsigs --sign=origin --verify --check -v -k "$DCACHE_ORG_KEY_NAME" packages/fhs/target/dcache_*.deb
artifacts:
paths:
- packages/fhs/target/dcache_*.deb
RPM test install on EL9:
stage: test_deploy
image: almalinux:9
script:
- dnf --nogpgcheck install -y packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
#install_deb:
# stage: test_deploy
# image: ubuntu:21.10
# script:
# - apt-get update
# - DEBIAN_FRONTEND=noninteractive apt install -y -f ./packages/fhs/target/dcache_*.deb
upload_rpm:
stage: upload
image: almalinux:9-minimal
dependencies:
- sign_rpm
extends: .upload_rules
script:
- RPM_NAME=`ls packages/fhs/target/rpmbuild/RPMS/noarch/ | grep dcache`
- VERSION=`echo $RPM_NAME | cut -d'-' -f 2 | cut -d'.' -f 1,2`
- curl $PKG_UPLOAD_OPTIONS -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/fhs/target/rpmbuild/RPMS/noarch/$RPM_NAME "$PKG_UPLOAD_URL/$VERSION/$RPM_NAME"
upload_srm_client_rpm:
stage: upload
image: almalinux:9-minimal
dependencies:
- sign_srm_client_rpm
extends: .upload_rules
script:
- RPM_NAME=`ls modules/srm-client/target/rpmbuild/RPMS/noarch/ | grep dcache-srmclient`
- VERSION=`echo $RPM_NAME | cut -d'-' -f 3 | cut -d'.' -f 1,2`
- curl $PKG_UPLOAD_OPTIONS -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file modules/srm-client/target/rpmbuild/RPMS/noarch/$RPM_NAME "$PKG_UPLOAD_URL/$VERSION/$RPM_NAME"
upload_deb:
stage: upload
image: almalinux:9-minimal
dependencies:
- sign_deb
extends: .upload_rules
script:
- DEB_NAME=`ls packages/fhs/target/ | grep dcache`
- VERSION=`echo $DEB_NAME | cut -d'_' -f 2 | cut -d'.' -f 1,2`
- curl $PKG_UPLOAD_OPTIONS -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/fhs/target/$DEB_NAME "$PKG_UPLOAD_URL/$VERSION/$DEB_NAME"
upload_tar:
stage: upload
image: almalinux:9-minimal
dependencies:
- tar
extends: .upload_rules
script:
- TAR_NAME=`ls packages/tar/target/ | grep dcache`
- VERSION=`echo $TAR_NAME | cut -d'-' -f 2 | cut -d'.' -f 1,2`
- curl $PKG_UPLOAD_OPTIONS -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/tar/target/$TAR_NAME "$PKG_UPLOAD_URL/$VERSION/$TAR_NAME"
upload_container:
stage: upload
# Cache downloaded dependencies and plugins between builds.
# To keep cache across branches add 'key: "$CI_JOB_NAME"'
# For latest releases see https://github.com/GoogleContainerTools/kaniko/releases
# Only debug/*-debug versions of the Kaniko image are known to work within Gitlab CI
image: gcr.io/kaniko-project/executor:debug
dependencies:
- tar
extends: .upload_rules
script:
- |-
tag=$CI_COMMIT_SHORT_SHA
if [[ -n "$CI_COMMIT_TAG" ]]; then
tag=$CI_COMMIT_TAG
fi
- mkdir maven
- tar -C maven --strip-components=1 -xzvf packages/tar/target/dcache-*.tar.gz
- cp $CI_PROJECT_DIR/packages/tar/src/main/container/* .
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"https://index.docker.io/v1/\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_ACCESS_KEY\"}}}" > /kaniko/.docker/config.json
- >
/kaniko/executor
--label dcache.build=GA
--context $CI_PROJECT_DIR
--dockerfile $CI_PROJECT_DIR/Dockerfile
--destination dcache/dcache:$tag
#
# This jobs needs that the number of changes to fetch from GitLab when cloning a repository is high enough to generate
# the changelog.
AI Generated Release Notes:
image: almalinux:9-minimal
stage: upload
extends: .upload_rules
allow_failure: true
dependencies:
- sign_deb
- sign_rpm
- sign_srm_client_rpm
- tar
script:
- microdnf install -y git-core
- git fetch --refetch --all --tags
- .ci/generate-changelog.sh >> release-$CI_COMMIT_TAG.md
- curl -L -o chatgpt https://github.com/kardolus/chatgpt-cli/releases/latest/download/chatgpt-linux-amd64 && chmod +x chatgpt
- LAST_TAG=$(.ci/get-git-version.sh $CI_COMMIT_TAG)
- |-
git log $LAST_TAG..$CI_COMMIT_TAG | \
OPENAI_API_KEY=$LLM_API_KEY \
OPENAI_URL=$LLM_API_ENDPOINT \
OPENAI_MODEL=$LLM_MODEL \
OPENAI_COMPLETIONS_PATH=$LLM_COMPLETIONS_PATH \
OPENAI_ROLE="You are a helpful tech writer working on release notes of the dCache project." \
./chatgpt "$LLM_PROMPT" | \
tee -a release-$CI_COMMIT_TAG.md
artifacts:
paths:
- release-*.md
#
# prepare kubernetes env for the build
#
Prepare k8s environment:
stage: testenv_pre
extends: .kubernetes_image
script:
- kubectl create namespace ${K8S_NAMESPACE}
#
# collect all logs
#
Collect container logs:
stage: testenv_post
extends: .kubernetes_image
when: always
allow_failure: true
script:
- kubectl -n $K8S_NAMESPACE get pods | grep Running | awk '{print $1}' | xargs -n1 kubectl -n $K8S_NAMESPACE logs | tee $K8S_NAMESPACE.log
- kubectl -n $K8S_NAMESPACE run -i --rm --restart=Never -q --image=edenhill/kcat:1.7.1 kcat -- kcat -C -t billing -b billing-kafka:9092 -p 0 -e -q | tee $K8S_NAMESPACE-billing.json
artifacts:
name: "logs-$CI_PIPELINE_ID"
paths:
- "$K8S_NAMESPACE.log"
- "$K8S_NAMESPACE-billing.json"
#
# dispose kubernetes resources
#
Clean k8s environment:
stage: testenv_post
extends: .kubernetes_image
needs:
- Collect container logs
when: always
script:
- kubectl delete namespace ${K8S_NAMESPACE} --grace-period=1 --ignore-not-found=true
#
# infrastructure required to run dCache
#
Deploy PostgreSQL:
stage: test_infra
extends: .helm_image
script:
- helm repo add bitnami https://charts.bitnami.com/bitnami
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait --set auth.username=dcache --set auth.password=let-me-in --set auth.database=chimera chimera bitnami/postgresql
#
# infrastructure required to run dCache
#
Deploy Zookeeper:
stage: test_infra
extends: .helm_image
script:
- helm repo add bitnami https://charts.bitnami.com/bitnami
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait cells bitnami/zookeeper
#
# infrastructure required to run dCache
#
Deploy Kafka:
stage: test_infra
extends: .helm_image
needs:
- Deploy Zookeeper
script:
- helm repo add bitnami https://charts.bitnami.com/bitnami
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait --set externalZookeeper.servers=cells-zookeeper --set kraft.enabled=false billing bitnami/kafka --version 23.0.7
#
# infrastructure required to run dCache
#
Deploy MinIO as Tape:
stage: test_infra
extends: .helm_image
script:
- helm repo add bitnami https://charts.bitnami.com/bitnami
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --set-string auth.rootUser=dcache --set-string auth.rootPassword=let-me-in --set-string defaultBuckets="hsm" --wait tape bitnami/minio
#
# OIDC provided for token-based testing
#
Deploy Keycloak:
stage: test_infra
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/keycloack-deployment.yaml
# FIXME: add readiness check
# - while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready deployment.apps/keycloak; do sleep 1; done
#
# Start Current dCache version and an old pools
#
Helm-Deploy Current dCache build:
stage: test_deploy
extends: .helm_image
script:
- |-
tag=$CI_COMMIT_SHORT_SHA
if [[ -n "$CI_COMMIT_TAG" ]]; then
tag=$CI_COMMIT_TAG
fi
- helm repo add dcache ${DCACHE_HELM_REPO}
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait --set image.tag=${tag} --set dcache.hsm.enabled=true --set image.registry=${CI_REGISTRY} store dcache/dcache
Helm-Deploy Latest dCache Golden Release:
stage: test_deploy
extends: .helm_image
needs:
- Helm-Deploy Current dCache build
script:
- helm repo add dcache ${DCACHE_HELM_REPO}
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait --set image.tag=9.2.20 --set "dcache.pools={d,f}" --set dcache.door.enabled=false --set image.registry=${CI_REGISTRY} old-store dcache/dcache
Grid EL9 WN tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/wn-with-cvmfs.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod grid-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/init-el9-ui.sh grid-tester:/init-el9-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-grid-tests.sh grid-tester:/run-grid-tests.sh
- kubectl -n $K8S_NAMESPACE exec grid-tester -- /bin/sh /run-grid-tests.sh
- kubectl -n $K8S_NAMESPACE cp grid-tester:/xunit .
artifacts:
reports:
junit:
- "xunit*.xml"
SRM S2 test suite:
stage: testing
extends: .kubernetes_image
allow_failure: true
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/s2-runner.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod s2-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/init-el9-ui.sh s2-tester:/init-el9-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-s2.sh s2-tester:/run-s2.sh
- kubectl -n $K8S_NAMESPACE exec s2-tester -- /bin/sh /run-s2.sh
- kubectl -n $K8S_NAMESPACE cp s2-tester:/TEST-basic.xml TEST-basic.xml
- kubectl -n $K8S_NAMESPACE cp s2-tester:/TEST-avail.xml TEST-avail.xml
- kubectl -n $K8S_NAMESPACE cp s2-tester:/TEST-usecase.xml TEST-usecase.xml
artifacts:
reports:
junit:
- "TEST*.xml"
Frontend test suite:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/frontend.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod http-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/poolEndpoint.http http-tester:/poolEndpoint.http
- kubectl -n $K8S_NAMESPACE cp .ci/qosEndpoint.http http-tester:/qosEndpoint.http
- kubectl -n $K8S_NAMESPACE cp .ci/qos-policyEndpoint.http http-tester:/qos-policyEndpoint.http
- kubectl -n $K8S_NAMESPACE cp README.md http-tester:/README.md
- kubectl -n $K8S_NAMESPACE cp .ci/migrationEndpoint.http http-tester:/migrationEndpoint.http
- kubectl -n $K8S_NAMESPACE cp http-client.private.env.json http-tester:/http-client.private.env.json
- kubectl -n $K8S_NAMESPACE exec http-tester -- java --add-opens=java.base/java.util=ALL-UNNAMED $IJHTTP_JAVA_OPTS -cp /intellij-http-client/\* com.intellij.httpClient.cli.HttpClientMain -e Test -D /poolEndpoint.http -p /http-client.private.env.json --insecure --report=/httpTests
- kubectl -n $K8S_NAMESPACE cp http-tester:/httpTests/report.xml pool-report.xml
artifacts:
reports:
junit:
- "*-report.xml"
gsi_xroot_tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/wn-with-cvmfs-xroot.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod xroot-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/init-el9-ui.sh xroot-tester:/init-el9-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-xroot-tests.sh xroot-tester:/run-xroot-tests.sh
- kubectl -n $K8S_NAMESPACE exec xroot-tester -- /bin/sh /run-xroot-tests.sh
webdav_with_x509_tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/webdav-wn-cvmfs.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod webdav-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/init-el9-ui.sh webdav-tester:/init-el9-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-webdav-tests.sh webdav-tester:/run-webdav-tests.sh
- kubectl -n $K8S_NAMESPACE exec webdav-tester -- /bin/sh /run-webdav-tests.sh
NFS4.x protocol compliance tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE run pynfs-tester --image=dcache/pynfs:0.5 --restart=Never --command -- sleep 3600
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod pynfs-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE exec pynfs-tester -- /bin/bash -c "/run-nfs4.0.sh --maketree store-door-svc:/data OPEN5; exit 0"
- |-
kubectl -n $K8S_NAMESPACE exec pynfs-tester -- /bin/bash -c "/run-nfs4.0.sh --xml=/xunit-report-v40.xml \
--noinit store-door-svc:/data all \
noACC2a noACC2b noACC2c noACC2d noACC2f noACC2r noACC2s \
noCID1 noCID2 noCID4a noCID4b noCID4c noCID4d noCID4e \
noCLOSE10 noCLOSE12 noCLOSE5 noCLOSE6 noCLOSE8 noCLOSE9 \
noCMT1aa noCMT1b noCMT1c noCMT1d noCMT1e noCMT1f noCMT2a noCMT2b noCMT2c noCMT2d noCMT2f \
noCMT2s noCMT3 noCMT4 noCR12 noLKT1 noLKT2a noLKT2b noLKT2c noLKT2d noLKT2f noLKT2s noLKT3 \
noLKT4 noLKT6 noLKT7 noLKT8 noLKT9 noLKU10 noLKU3 noLKU4 noLKU5 noLKU6 noLKU6b noLKU7 noLKU8 \
noLKU9 noLKUNONE noLOCK12a noLOCK12b noLOCK13 noLOCKRNG noLOCKCHGU noLOCKCHGD noRLOWN3 \
noOPCF1 noOPCF6 noOPDG2 noOPDG3 noOPDG6 noOPDG7 noOPEN15 noOPEN18 noOPEN2 noOPEN20 noOPEN22 \
noOPEN23 noOPEN24 noOPEN26 noOPEN27 noOPEN28 noOPEN3 noOPEN30 noOPEN4 noRENEW3 noRD1 noRD10 \
noRD2 noRD3 noRD5 noRD5a noRD6 noRD7a noRD7b noRD7c noRD7d noRD7f noRD7s noRDDR12 noRDDR11 \
noRPLY1 noRPLY10 noRPLY12 \
noRPLY14 noRPLY2 noRPLY3 noRPLY5 noRPLY6 noRPLY7 noRPLY8 noRPLY9 noSATT3d noSATT4 noSATT6d \
noSATT6r noSATT18 noSEC7 noWRT1 noWRT11 noWRT13 noWRT14 noWRT15 noWRT18 noWRT19 noWRT1b noWRT2 \
noWRT3 noWRT6a noWRT6b noWRT6c noWRT6d noWRT6f noWRT6s noWRT8 noWRT9; \
exit 0"
- |-
kubectl -n $K8S_NAMESPACE exec pynfs-tester -- /bin/bash -c "/run-nfs4.1.sh --minorversion=2 --xml=/xunit-report-v41.xml \
--noinit store-door-svc:/data all xattr \
noCOUR2 noCSESS25 noCSESS26 noCSESS27 noCSESS28 noCSESS29 noCSID3 noCSID4 noCSID9 noEID5f \
noEID50 noOPEN31 noSEQ6 noRECC3 noSEQ7 noSEQ10b noSEQ2 noXATT11 noXATT10 noALLOC1 noALLOC2 noALLOC3; \
exit 0"
- kubectl -n $K8S_NAMESPACE cp pynfs-tester:/xunit-report-v40.xml xunit-report-v40.xml
- kubectl -n $K8S_NAMESPACE cp pynfs-tester:/xunit-report-v41.xml xunit-report-v41.xml
- nfs40_errors=$(( $(echo 0$(sed -n 's/.*testsuite .*errors=\"\([0-9]*\)\".*/+\1/p' xunit-report-v40.xml)) ))
- nfs40_failures=$(( $(echo 0$(sed -n 's/.*testsuite .*failures=\"\([0-9]*\)\".*/+\1/p' xunit-report-v40.xml)) ))
- nfs41_errors=$(( $(echo 0$(sed -n 's/.*testsuite .*errors=\"\([0-9]*\)\".*/+\1/p' xunit-report-v41.xml)) ))
- nfs41_failures=$(( $(echo 0$(sed -n 's/.*testsuite .*failures=\"\([0-9]*\)\".*/+\1/p' xunit-report-v41.xml)) ))
- exit $(( $nfs40_errors + $nfs41_errors + $nfs40_failures + $nfs41_failures ))
environment: testing
artifacts:
reports:
junit:
- "xunit*.xml"
Run OIDC test:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE run oidc-tester --image=almalinux:9 --restart=Never --command -- sleep 3600
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod oidc-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/run-oidc-test.sh oidc-tester:/run-oidc-test.sh
- kubectl -n $K8S_NAMESPACE exec oidc-tester -- /bin/sh /run-oidc-test.sh