diff --git a/.github/policies/resourceManagement.yml b/.github/policies/resourceManagement.yml
index ad8ef417626..d104f8e857c 100644
--- a/.github/policies/resourceManagement.yml
+++ b/.github/policies/resourceManagement.yml
@@ -13,34 +13,37 @@ configuration:
- payloadType: Pull_Request
then:
- if:
- - includesModifiedFiles:
- files:
- - SPECS/
- - SPECS-EXTENDED/
- - SPECS-SIGNED/
+ - filesMatchPattern:
+ pattern: "^(SPECS|SPECS-EXTENDED|SPECS-SIGNED)/"
+ matchAny: true
then:
- addLabel:
label: Packaging
- if:
- - includesModifiedFiles:
- files:
- - toolkit/docs/
+ - filesMatchPattern:
+ pattern: "^SPECS-EXTENDED/"
+ matchAny: true
+ then:
+ - addLabel:
+ label: specs-extended
+ - if:
+ - filesMatchPattern:
+ pattern: "^toolkit/docs/"
+ matchAny: true
then:
- addLabel:
label: documentation
- if:
- - includesModifiedFiles:
- files:
- - toolkit/tools/
- - toolkit/scripts/
+ - filesMatchPattern:
+ pattern: "^toolkit/(tools|scripts)/"
+ matchAny: true
then:
- addLabel:
label: Tools
- if:
- - includesModifiedFiles:
- files:
- - toolkit/imageconfigs/
- - toolkit/tools/imagegen/configuration
+ - filesMatchPattern:
+ pattern: "^toolkit/(imageconfigs|tools/imagegen/configuration)/"
+ matchAny: true
then:
- addLabel:
label: Schema
@@ -69,16 +72,40 @@ configuration:
then:
- if:
- targetsBranch:
- branch: 1.0-dev
+ branch: main
then:
- addLabel:
- label: 1.0-dev
+ label: main
- if:
- targetsBranch:
- branch: main
+ branch: 2.0
then:
- addLabel:
- label: main
+ label: 2.0
+ - if:
+ - targetsBranch:
+ branch: 3.0-dev
+ then:
+ - addLabel:
+ label: 3.0-dev
+ - if:
+ - targetsBranch:
+ branch: 3.0
+ then:
+ - addLabel:
+ label: 3.0
+ - if:
+ - targetsBranch:
+ branch: fasttrack/2.0
+ then:
+ - addLabel:
+ label: fasttrack/2.0
+ - if:
+ - targetsBranch:
+ branch: fasttrack/3.0
+ then:
+ - addLabel:
+ label: fasttrack/3.0
description:
onFailure:
onSuccess:
diff --git a/LICENSES-AND-NOTICES/SPECS/LICENSES-MAP.md b/LICENSES-AND-NOTICES/SPECS/LICENSES-MAP.md
index bf93a05d117..a9942f9ccd1 100644
--- a/LICENSES-AND-NOTICES/SPECS/LICENSES-MAP.md
+++ b/LICENSES-AND-NOTICES/SPECS/LICENSES-MAP.md
@@ -9,7 +9,7 @@ The Azure Linux SPEC files originated from a variety of sources with varying lic
| Fedora (Copyright Remi Collet) | [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode) | libmemcached-awesome
librabbitmq |
| Fedora (ISC) | [ISC License](https://github.com/sarugaku/resolvelib/blob/main/LICENSE) | python-resolvelib |
| Magnus Edenhill Open Source | [Magnus Edenhill Open Source BSD License](https://github.com/jemalloc/jemalloc/blob/dev/COPYING) | librdkafka |
-| Microsoft | [Microsoft MIT License](/LICENSES-AND-NOTICES/LICENSE.md) | application-gateway-kubernetes-ingress
asc
azcopy
azure-iot-sdk-c
azure-nvme-utils
azure-storage-cpp
azurelinux-release
azurelinux-repos
azurelinux-rpm-macros
azurelinux-sysinfo
bazel
blobfuse2
bmon
bpftrace
ccache
cert-manager
cf-cli
check-restart
clamav
cloud-hypervisor-cvm
cmake-fedora
containerd
coredns
dcos-cli
debugedit
dejavu-fonts
distroless-packages
docker-buildx
docker-cli
docker-compose
doxygen
dtc
elixir
espeak-ng
espeakup
flannel
fluent-bit
freefont
gflags
gh
go-md2man
grpc
grub2-efi-binary-signed
GSL
gtk-update-icon-cache
helm
ig
intel-pf-bb-config
ivykis
jsonbuilder
jx
kata-containers-cc
kata-packages-uvm
keda
keras
kernel-64k-signed
kernel-signed
kernel-uki
kernel-uki-signed
kpatch
kube-vip-cloud-provider
kubernetes
libacvp
libconfini
libconfuse
libgdiplus
libmaxminddb
libmetalink
libsafec
libuv
libxml++
lld
local-path-provisioner
lsb-release
ltp
lttng-consume
mm-common
moby-containerd-cc
moby-engine
msgpack
ncompress
networkd-dispatcher
nlohmann-json
nmap
node-problem-detector
ntopng
opentelemetry-cpp
packer
pcaudiolib
pcre2
perl-Test-Warnings
perl-Text-Template
pigz
prebuilt-ca-certificates
prebuilt-ca-certificates-base
prometheus-adapter
python-cachetools
python-cherrypy
python-cstruct
python-execnet
python-google-pasta
python-libclang
python-libevdev
python-logutils
python-ml-dtypes
python-namex
python-nocasedict
python-omegaconf
python-opt-einsum
python-optree
python-pecan
python-pip
python-pyrpm
python-remoto
python-repoze-lru
python-routes
python-rsa
python-setuptools
python-sphinxcontrib-websupport
python-tensorboard
python-tensorboard-plugin-wit
python-yamlloader
R
rabbitmq-server
rocksdb
rubygem-addressable
rubygem-asciidoctor
rubygem-async
rubygem-async-http
rubygem-async-io
rubygem-async-pool
rubygem-bindata
rubygem-concurrent-ruby
rubygem-connection_pool
rubygem-console
rubygem-cool.io
rubygem-deep_merge
rubygem-digest-crc
rubygem-elastic-transport
rubygem-elasticsearch
rubygem-elasticsearch-api
rubygem-eventmachine
rubygem-excon
rubygem-faraday
rubygem-faraday-em_http
rubygem-faraday-em_synchrony
rubygem-faraday-excon
rubygem-faraday-httpclient
rubygem-faraday-multipart
rubygem-faraday-net_http
rubygem-faraday-net_http_persistent
rubygem-faraday-patron
rubygem-faraday-rack
rubygem-faraday-retry
rubygem-ffi
rubygem-fiber-local
rubygem-fluent-config-regexp-type
rubygem-fluent-logger
rubygem-fluent-plugin-elasticsearch
rubygem-fluent-plugin-kafka
rubygem-fluent-plugin-prometheus
rubygem-fluent-plugin-prometheus_pushgateway
rubygem-fluent-plugin-record-modifier
rubygem-fluent-plugin-rewrite-tag-filter
rubygem-fluent-plugin-systemd
rubygem-fluent-plugin-webhdfs
rubygem-fluent-plugin-windows-exporter
rubygem-fluentd
rubygem-hirb
rubygem-hocon
rubygem-hoe
rubygem-http_parser
rubygem-httpclient
rubygem-io-event
rubygem-jmespath
rubygem-ltsv
rubygem-mini_portile2
rubygem-minitest
rubygem-mocha
rubygem-msgpack
rubygem-multi_json
rubygem-multipart-post
rubygem-net-http-persistent
rubygem-nio4r
rubygem-nokogiri
rubygem-oj
rubygem-parallel
rubygem-power_assert
rubygem-prometheus-client
rubygem-protocol-hpack
rubygem-protocol-http
rubygem-protocol-http1
rubygem-protocol-http2
rubygem-public_suffix
rubygem-puppet-resource_api
rubygem-rdiscount
rubygem-rdkafka
rubygem-rexml
rubygem-ruby-kafka
rubygem-ruby-progressbar
rubygem-rubyzip
rubygem-semantic_puppet
rubygem-serverengine
rubygem-sigdump
rubygem-strptime
rubygem-systemd-journal
rubygem-test-unit
rubygem-thor
rubygem-timers
rubygem-tzinfo
rubygem-tzinfo-data
rubygem-webhdfs
rubygem-webrick
rubygem-yajl-ruby
rubygem-zip-zip
runc
sdbus-cpp
sgx-backwards-compatibility
shim
skopeo
span-lite
sriov-network-device-plugin
SymCrypt
SymCrypt-OpenSSL
systemd-boot-signed
tensorflow
tinyxml2
toml11
tracelogging
umoci
usrsctp
vala
valkey
vnstat
zstd |
+| Microsoft | [Microsoft MIT License](/LICENSES-AND-NOTICES/LICENSE.md) | application-gateway-kubernetes-ingress
asc
azcopy
azure-iot-sdk-c
azure-nvme-utils
azure-storage-cpp
azurelinux-release
azurelinux-repos
azurelinux-rpm-macros
azurelinux-sysinfo
bazel
blobfuse2
bmon
bpftrace
ccache
cert-manager
cf-cli
check-restart
clamav
cloud-hypervisor-cvm
cmake-fedora
containerd
containerd2
coredns
dcos-cli
debugedit
dejavu-fonts
distroless-packages
docker-buildx
docker-cli
docker-compose
doxygen
dtc
elixir
espeak-ng
espeakup
flannel
fluent-bit
freefont
gflags
gh
go-md2man
grpc
grub2-efi-binary-signed
GSL
gtk-update-icon-cache
helm
ig
intel-pf-bb-config
ivykis
jsonbuilder
jx
kata-containers-cc
kata-packages-uvm
keda
keras
kernel-64k-signed
kernel-signed
kernel-uki
kernel-uki-signed
kpatch
kube-vip-cloud-provider
kubernetes
libacvp
libconfini
libconfuse
libgdiplus
libmaxminddb
libmetalink
libsafec
libuv
libxml++
lld
local-path-provisioner
lsb-release
ltp
lttng-consume
mm-common
moby-containerd-cc
moby-engine
msgpack
ncompress
networkd-dispatcher
nlohmann-json
nmap
node-problem-detector
ntopng
opentelemetry-cpp
packer
pcaudiolib
pcre2
perl-Test-Warnings
perl-Text-Template
pigz
prebuilt-ca-certificates
prebuilt-ca-certificates-base
prometheus-adapter
python-cachetools
python-cherrypy
python-cstruct
python-execnet
python-google-pasta
python-libclang
python-libevdev
python-logutils
python-ml-dtypes
python-namex
python-nocasedict
python-omegaconf
python-opt-einsum
python-optree
python-pecan
python-pip
python-pyrpm
python-remoto
python-repoze-lru
python-routes
python-rsa
python-setuptools
python-sphinxcontrib-websupport
python-tensorboard
python-tensorboard-plugin-wit
python-yamlloader
R
rabbitmq-server
rocksdb
rubygem-addressable
rubygem-asciidoctor
rubygem-async
rubygem-async-http
rubygem-async-io
rubygem-async-pool
rubygem-bindata
rubygem-concurrent-ruby
rubygem-connection_pool
rubygem-console
rubygem-cool.io
rubygem-deep_merge
rubygem-digest-crc
rubygem-elastic-transport
rubygem-elasticsearch
rubygem-elasticsearch-api
rubygem-eventmachine
rubygem-excon
rubygem-faraday
rubygem-faraday-em_http
rubygem-faraday-em_synchrony
rubygem-faraday-excon
rubygem-faraday-httpclient
rubygem-faraday-multipart
rubygem-faraday-net_http
rubygem-faraday-net_http_persistent
rubygem-faraday-patron
rubygem-faraday-rack
rubygem-faraday-retry
rubygem-ffi
rubygem-fiber-local
rubygem-fluent-config-regexp-type
rubygem-fluent-logger
rubygem-fluent-plugin-elasticsearch
rubygem-fluent-plugin-kafka
rubygem-fluent-plugin-prometheus
rubygem-fluent-plugin-prometheus_pushgateway
rubygem-fluent-plugin-record-modifier
rubygem-fluent-plugin-rewrite-tag-filter
rubygem-fluent-plugin-systemd
rubygem-fluent-plugin-webhdfs
rubygem-fluent-plugin-windows-exporter
rubygem-fluentd
rubygem-hirb
rubygem-hocon
rubygem-hoe
rubygem-http_parser
rubygem-httpclient
rubygem-io-event
rubygem-jmespath
rubygem-ltsv
rubygem-mini_portile2
rubygem-minitest
rubygem-mocha
rubygem-msgpack
rubygem-multi_json
rubygem-multipart-post
rubygem-net-http-persistent
rubygem-nio4r
rubygem-nokogiri
rubygem-oj
rubygem-parallel
rubygem-power_assert
rubygem-prometheus-client
rubygem-protocol-hpack
rubygem-protocol-http
rubygem-protocol-http1
rubygem-protocol-http2
rubygem-public_suffix
rubygem-puppet-resource_api
rubygem-rdiscount
rubygem-rdkafka
rubygem-rexml
rubygem-ruby-kafka
rubygem-ruby-progressbar
rubygem-rubyzip
rubygem-semantic_puppet
rubygem-serverengine
rubygem-sigdump
rubygem-strptime
rubygem-systemd-journal
rubygem-test-unit
rubygem-thor
rubygem-timers
rubygem-tzinfo
rubygem-tzinfo-data
rubygem-webhdfs
rubygem-webrick
rubygem-yajl-ruby
rubygem-zip-zip
runc
sdbus-cpp
sgx-backwards-compatibility
shim
skopeo
span-lite
sriov-network-device-plugin
SymCrypt
SymCrypt-OpenSSL
systemd-boot-signed
tensorflow
tinyxml2
toml11
tracelogging
umoci
usrsctp
vala
valkey
vnstat
zstd |
| Netplan source | [GPLv3](https://github.com/canonical/netplan/blob/main/COPYING) | netplan |
| Numad source | [LGPLv2 License](https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt) | numad |
| NVIDIA | [ASL 2.0 License and spec specific licenses](http://www.apache.org/licenses/LICENSE-2.0) | libnvidia-container
mlnx-tools
mlx-bootctl
nvidia-container-toolkit
ofed-scripts
perftest |
diff --git a/LICENSES-AND-NOTICES/SPECS/data/licenses.json b/LICENSES-AND-NOTICES/SPECS/data/licenses.json
index 51aec95013e..80f62df89c0 100644
--- a/LICENSES-AND-NOTICES/SPECS/data/licenses.json
+++ b/LICENSES-AND-NOTICES/SPECS/data/licenses.json
@@ -2223,6 +2223,7 @@
"cloud-hypervisor-cvm",
"cmake-fedora",
"containerd",
+ "containerd2",
"coredns",
"dcos-cli",
"debugedit",
diff --git a/SPECS-SIGNED/kernel-64k-signed/kernel-64k-signed.spec b/SPECS-SIGNED/kernel-64k-signed/kernel-64k-signed.spec
index a6ab283c372..993e94dfb6f 100644
--- a/SPECS-SIGNED/kernel-64k-signed/kernel-64k-signed.spec
+++ b/SPECS-SIGNED/kernel-64k-signed/kernel-64k-signed.spec
@@ -7,7 +7,7 @@
Summary: Signed Linux Kernel for %{buildarch} systems
Name: kernel-64k-signed-%{buildarch}
Version: 6.6.57.1
-Release: 6%{?dist}
+Release: 7%{?dist}
License: GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -105,6 +105,9 @@ echo "initrd of kernel %{uname_r} removed" >&2
%exclude /module_info.ld
%changelog
+* Sun Dec 22 2024 Ankita Pareek - 6.6.57.1-7
+- Bump release to match kernel
+
* Wed Dec 18 2024 Rachel Menge - 6.6.57.1-6
- Bump release to match kernel-64k
diff --git a/SPECS-SIGNED/kernel-signed/kernel-signed.spec b/SPECS-SIGNED/kernel-signed/kernel-signed.spec
index b45ec622577..51bf71bb822 100644
--- a/SPECS-SIGNED/kernel-signed/kernel-signed.spec
+++ b/SPECS-SIGNED/kernel-signed/kernel-signed.spec
@@ -10,7 +10,7 @@
Summary: Signed Linux Kernel for %{buildarch} systems
Name: kernel-signed-%{buildarch}
Version: 6.6.57.1
-Release: 6%{?dist}
+Release: 7%{?dist}
License: GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -145,6 +145,9 @@ echo "initrd of kernel %{uname_r} removed" >&2
%exclude /module_info.ld
%changelog
+* Sun Dec 22 2024 Ankita Pareek - 6.6.57.1-7
+- Bump release to match kernel
+
* Wed Dec 18 2024 Rachel Menge - 6.6.57.1-6
- Bump release to match kernel-64k
diff --git a/SPECS-SIGNED/kernel-uki-signed/kernel-uki-signed.spec b/SPECS-SIGNED/kernel-uki-signed/kernel-uki-signed.spec
index 3dc88f1e79d..bcf7c0c4cb1 100644
--- a/SPECS-SIGNED/kernel-uki-signed/kernel-uki-signed.spec
+++ b/SPECS-SIGNED/kernel-uki-signed/kernel-uki-signed.spec
@@ -6,7 +6,7 @@
Summary: Signed Unified Kernel Image for %{buildarch} systems
Name: kernel-uki-signed-%{buildarch}
Version: 6.6.57.1
-Release: 6%{?dist}
+Release: 7%{?dist}
License: GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -68,6 +68,9 @@ popd
/boot/efi/EFI/Linux/vmlinuz-uki-%{kernelver}.efi
%changelog
+* Sun Dec 22 2024 Ankita Pareek - 6.6.57.1-7
+- Bump release to match kernel
+
* Wed Dec 18 2024 Rachel Menge - 6.6.57.1-6
- Bump release to match kernel-64k
diff --git a/SPECS/avahi/CVE-2023-38469.patch b/SPECS/avahi/CVE-2023-38469.patch
new file mode 100644
index 00000000000..58583f58428
--- /dev/null
+++ b/SPECS/avahi/CVE-2023-38469.patch
@@ -0,0 +1,43 @@
+From c89fd5f2e85052f1f8b74ddeff38235932236889 Mon Sep 17 00:00:00 2001
+From: Kanishk-Bansal
+Date: Wed, 27 Nov 2024 08:48:59 +0000
+Subject: [PATCH] Fix CVE patch
+
+---
+ avahi-core/rr.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/avahi-core/rr.c b/avahi-core/rr.c
+index 7fa0bee..b03a24c 100644
+--- a/avahi-core/rr.c
++++ b/avahi-core/rr.c
+@@ -32,6 +32,7 @@
+ #include
+ #include
+
++#include "dns.h"
+ #include "rr.h"
+ #include "log.h"
+ #include "util.h"
+@@ -688,11 +689,17 @@ int avahi_record_is_valid(AvahiRecord *r) {
+ case AVAHI_DNS_TYPE_TXT: {
+
+ AvahiStringList *strlst;
++ size_t used = 0;
+
+- for (strlst = r->data.txt.string_list; strlst; strlst = strlst->next)
++ for (strlst = r->data.txt.string_list; strlst; strlst = strlst->next) {
+ if (strlst->size > 255 || strlst->size <= 0)
+ return 0;
+
++ used += 1+strlst->size;
++ if (used > AVAHI_DNS_RDATA_MAX)
++ return 0;
++ }
++
+ return 1;
+ }
+ }
+--
+2.45.2
+
diff --git a/SPECS/avahi/CVE-2023-38470.patch b/SPECS/avahi/CVE-2023-38470.patch
new file mode 100644
index 00000000000..e3ab0783944
--- /dev/null
+++ b/SPECS/avahi/CVE-2023-38470.patch
@@ -0,0 +1,51 @@
+From cc5f44eb015384d8c764646c48b9da80f811446c Mon Sep 17 00:00:00 2001
+From: Kanishk-Bansal
+Date: Mon, 2 Dec 2024 10:25:43 +0000
+Subject: [PATCH] Fix CVE-2023-38470
+
+---
+ avahi-common/domain-test.c | 14 ++++++++++++++
+ avahi-common/domain.c | 2 +-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/avahi-common/domain-test.c b/avahi-common/domain-test.c
+index cf763ec..3acc1c1 100644
+--- a/avahi-common/domain-test.c
++++ b/avahi-common/domain-test.c
+@@ -45,6 +45,20 @@ int main(AVAHI_GCC_UNUSED int argc, AVAHI_GCC_UNUSED char *argv[]) {
+ printf("%s\n", s = avahi_normalize_name_strdup("fo\\\\o\\..f oo."));
+ avahi_free(s);
+
++ printf("%s\n", s = avahi_normalize_name_strdup("."));
++ avahi_free(s);
++
++ s = avahi_normalize_name_strdup(",.=.}.=.?-.}.=.?.?.}.}.?.?.?.z.?.?.}.}."
++ "}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.=.=.?.?.}.}.?.?.}.}.}"
++ ".?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.=.=.?.?.}.}.?.?.?.zM.?`"
++ "?.}.}.}.?.?.?.r.=.?.}.=.?.?.}.?.?.?.}.=.?.?.}??.}.}.?.?."
++ "?.z.?.?.}.}.}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.?`?.}.}.}."
++ "??.?.zM.?`?.}.}.}.?.?.?.r.=.?.}.=.?.?.}.?.?.?.}.=.?.?.}?"
++ "?.}.}.?.?.?.z.?.?.}.}.}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM."
++ "?`?.}.}.}.?.?.?.r.=.=.?.?`.?.?}.}.}.?.?.?.r.=.?.}.=.?.?."
++ "}.?.?.?.}.=.?.?.}");
++ assert(s == NULL);
++
+ printf("%i\n", avahi_domain_equal("\\065aa bbb\\.\\046cc.cc\\\\.dee.fff.", "Aaa BBB\\.\\.cc.cc\\\\.dee.fff"));
+ printf("%i\n", avahi_domain_equal("A", "a"));
+
+diff --git a/avahi-common/domain.c b/avahi-common/domain.c
+index 3b1ab68..e66d241 100644
+--- a/avahi-common/domain.c
++++ b/avahi-common/domain.c
+@@ -201,7 +201,7 @@ char *avahi_normalize_name(const char *s, char *ret_s, size_t size) {
+ }
+
+ if (!empty) {
+- if (size < 1)
++ if (size < 2)
+ return NULL;
+
+ *(r++) = '.';
+--
+2.45.2
+
diff --git a/SPECS/avahi/CVE-2023-38471.patch b/SPECS/avahi/CVE-2023-38471.patch
new file mode 100644
index 00000000000..00c414826e8
--- /dev/null
+++ b/SPECS/avahi/CVE-2023-38471.patch
@@ -0,0 +1,63 @@
+From 48467feda7135e3fa2392294387601f88a06f001 Mon Sep 17 00:00:00 2001
+From: Kanishk-Bansal
+Date: Mon, 2 Dec 2024 10:49:17 +0000
+Subject: [PATCH] Fix CVE-2023-38471 patch
+
+---
+ avahi-core/server.c | 27 +++++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 6 deletions(-)
+
+diff --git a/avahi-core/server.c b/avahi-core/server.c
+index e507750..40f1d68 100644
+--- a/avahi-core/server.c
++++ b/avahi-core/server.c
+@@ -1295,7 +1295,11 @@ static void update_fqdn(AvahiServer *s) {
+ }
+
+ int avahi_server_set_host_name(AvahiServer *s, const char *host_name) {
+- char *hn = NULL;
++ char label_escaped[AVAHI_LABEL_MAX*4+1];
++ char label[AVAHI_LABEL_MAX];
++ char *hn = NULL, *h;
++ size_t len;
++
+ assert(s);
+
+ AVAHI_CHECK_VALIDITY(s, !host_name || avahi_is_valid_host_name(host_name), AVAHI_ERR_INVALID_HOST_NAME);
+@@ -1305,17 +1309,28 @@ int avahi_server_set_host_name(AvahiServer *s, const char *host_name) {
+ else
+ hn = avahi_normalize_name_strdup(host_name);
+
+- hn[strcspn(hn, ".")] = 0;
++ h = hn;
++ if (!avahi_unescape_label((const char **)&hn, label, sizeof(label))) {
++ avahi_free(h);
++ return AVAHI_ERR_INVALID_HOST_NAME;
++ }
++
++ avahi_free(h);
++
++ h = label_escaped;
++ len = sizeof(label_escaped);
++ if (!avahi_escape_label(label, strlen(label), &h, &len))
++ return AVAHI_ERR_INVALID_HOST_NAME;
+
+- if (avahi_domain_equal(s->host_name, hn) && s->state != AVAHI_SERVER_COLLISION) {
+- avahi_free(hn);
++ if (avahi_domain_equal(s->host_name, label_escaped) && s->state != AVAHI_SERVER_COLLISION)
+ return avahi_server_set_errno(s, AVAHI_ERR_NO_CHANGE);
+- }
+
+ withdraw_host_rrs(s);
+
+ avahi_free(s->host_name);
+- s->host_name = hn;
++ s->host_name = avahi_strdup(label_escaped);
++ if (!s->host_name)
++ return AVAHI_ERR_NO_MEMORY;
+
+ update_fqdn(s);
+
+--
+2.45.2
+
diff --git a/SPECS/avahi/CVE-2023-38472.patch b/SPECS/avahi/CVE-2023-38472.patch
new file mode 100644
index 00000000000..741168dbab0
--- /dev/null
+++ b/SPECS/avahi/CVE-2023-38472.patch
@@ -0,0 +1,40 @@
+From 2cc17a0febc2c1f70db147d9d56861f3520bacad Mon Sep 17 00:00:00 2001
+From: Kanishk-Bansal
+Date: Mon, 2 Dec 2024 04:44:07 +0000
+Subject: [PATCH] Fix CVE patch
+
+---
+ avahi-client/client-test.c | 3 +++
+ avahi-daemon/dbus-entry-group.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/avahi-client/client-test.c b/avahi-client/client-test.c
+index 7d04a6a..57750a4 100644
+--- a/avahi-client/client-test.c
++++ b/avahi-client/client-test.c
+@@ -258,6 +258,9 @@ int main (AVAHI_GCC_UNUSED int argc, AVAHI_GCC_UNUSED char *argv[]) {
+ printf("%s\n", avahi_strerror(avahi_entry_group_add_service (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "Lathiat's Site", "_http._tcp", NULL, NULL, 80, "foo=bar", NULL)));
+ printf("add_record: %d\n", avahi_entry_group_add_record (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", 0x01, 0x10, 120, "\5booya", 6));
+
++ error = avahi_entry_group_add_record (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", 0x01, 0x10, 120, "", 0);
++ assert(error != AVAHI_OK);
++
+ avahi_entry_group_commit (group);
+
+ domain = avahi_domain_browser_new (avahi, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, NULL, AVAHI_DOMAIN_BROWSER_BROWSE, 0, avahi_domain_browser_callback, (char*) "omghai3u");
+diff --git a/avahi-daemon/dbus-entry-group.c b/avahi-daemon/dbus-entry-group.c
+index 4e879a5..aa23d4b 100644
+--- a/avahi-daemon/dbus-entry-group.c
++++ b/avahi-daemon/dbus-entry-group.c
+@@ -340,7 +340,7 @@ DBusHandlerResult avahi_dbus_msg_entry_group_impl(DBusConnection *c, DBusMessage
+ if (!(r = avahi_record_new_full (name, clazz, type, ttl)))
+ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NO_MEMORY, NULL);
+
+- if (avahi_rdata_parse (r, rdata, size) < 0) {
++ if (!rdata || avahi_rdata_parse (r, rdata, size) < 0) {
+ avahi_record_unref (r);
+ return avahi_dbus_respond_error(c, m, AVAHI_ERR_INVALID_RDATA, NULL);
+ }
+--
+2.45.2
+
diff --git a/SPECS/avahi/CVE-2023-38473.patch b/SPECS/avahi/CVE-2023-38473.patch
new file mode 100644
index 00000000000..a59479fb686
--- /dev/null
+++ b/SPECS/avahi/CVE-2023-38473.patch
@@ -0,0 +1,101 @@
+From e6348a0e1f1f42547dce80135afea806125654cc Mon Sep 17 00:00:00 2001
+From: Kanishk-Bansal
+Date: Mon, 2 Dec 2024 09:20:54 +0000
+Subject: [PATCH] Fix CVE-2023-38473
+
+---
+ avahi-common/alternative-test.c | 3 +++
+ avahi-common/alternative.c | 27 +++++++++++++++++++--------
+ 2 files changed, 22 insertions(+), 8 deletions(-)
+
+diff --git a/avahi-common/alternative-test.c b/avahi-common/alternative-test.c
+index 9255435..681fc15 100644
+--- a/avahi-common/alternative-test.c
++++ b/avahi-common/alternative-test.c
+@@ -31,6 +31,9 @@ int main(AVAHI_GCC_UNUSED int argc, AVAHI_GCC_UNUSED char *argv[]) {
+ const char* const test_strings[] = {
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXüüüüüüü",
++ ").",
++ "\\.",
++ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\\\\",
+ "gurke",
+ "-",
+ " #",
+diff --git a/avahi-common/alternative.c b/avahi-common/alternative.c
+index b3d39f0..a094e6d 100644
+--- a/avahi-common/alternative.c
++++ b/avahi-common/alternative.c
+@@ -49,15 +49,20 @@ static void drop_incomplete_utf8(char *c) {
+ }
+
+ char *avahi_alternative_host_name(const char *s) {
++ char label[AVAHI_LABEL_MAX], alternative[AVAHI_LABEL_MAX*4+1];
++ char *alt, *r, *ret;
+ const char *e;
+- char *r;
++ size_t len;
+
+ assert(s);
+
+ if (!avahi_is_valid_host_name(s))
+ return NULL;
+
+- if ((e = strrchr(s, '-'))) {
++ if (!avahi_unescape_label(&s, label, sizeof(label)))
++ return NULL;
++
++ if ((e = strrchr(label, '-'))) {
+ const char *p;
+
+ e++;
+@@ -74,19 +79,18 @@ char *avahi_alternative_host_name(const char *s) {
+
+ if (e) {
+ char *c, *m;
+- size_t l;
+ int n;
+
+ n = atoi(e)+1;
+ if (!(m = avahi_strdup_printf("%i", n)))
+ return NULL;
+
+- l = e-s-1;
++ len = e-label-1;
+
+- if (l >= AVAHI_LABEL_MAX-1-strlen(m)-1)
+- l = AVAHI_LABEL_MAX-1-strlen(m)-1;
++ if (len >= AVAHI_LABEL_MAX-1-strlen(m)-1)
++ len = AVAHI_LABEL_MAX-1-strlen(m)-1;
+
+- if (!(c = avahi_strndup(s, l))) {
++ if (!(c = avahi_strndup(label, len))) {
+ avahi_free(m);
+ return NULL;
+ }
+@@ -100,7 +104,7 @@ char *avahi_alternative_host_name(const char *s) {
+ } else {
+ char *c;
+
+- if (!(c = avahi_strndup(s, AVAHI_LABEL_MAX-1-2)))
++ if (!(c = avahi_strndup(label, AVAHI_LABEL_MAX-1-2)))
+ return NULL;
+
+ drop_incomplete_utf8(c);
+@@ -109,6 +113,13 @@ char *avahi_alternative_host_name(const char *s) {
+ avahi_free(c);
+ }
+
++ alt = alternative;
++ len = sizeof(alternative);
++ ret = avahi_escape_label(r, strlen(r), &alt, &len);
++
++ avahi_free(r);
++ r = avahi_strdup(ret);
++
+ assert(avahi_is_valid_host_name(r));
+
+ return r;
+--
+2.45.2
+
diff --git a/SPECS/avahi/avahi.spec b/SPECS/avahi/avahi.spec
index 0275002caf9..cbfcc8696e4 100644
--- a/SPECS/avahi/avahi.spec
+++ b/SPECS/avahi/avahi.spec
@@ -3,7 +3,7 @@
Summary: Local network service discovery
Name: avahi
Version: 0.8
-Release: 3%{?dist}
+Release: 4%{?dist}
License: LGPLv2+
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -13,6 +13,11 @@ Patch0: %{name}-libevent-pc-fix.patch
Patch1: CVE-2021-3468.patch
Patch2: CVE-2021-3502.patch
Patch3: CVE-2023-1981.patch
+Patch4: CVE-2023-38469.patch
+Patch5: CVE-2023-38472.patch
+Patch6: CVE-2023-38473.patch
+Patch7: CVE-2023-38470.patch
+Patch8: CVE-2023-38471.patch
BuildRequires: automake
BuildRequires: dbus-devel >= 0.90
BuildRequires: dbus-glib-devel >= 0.70
@@ -420,6 +425,13 @@ exit 0
%endif
%changelog
+* Mon Dec 02 2024 Kanishk Bansal - 0.8-4
+- Fix CVE-2023-38471 with an upstream patch
+- Fix CVE-2023-38470 with an upstream patch
+- Fix CVE-2023-38473 with an upstream patch
+- Fix CVE-2023-38472 with an upstream patch
+- Fix CVE-2023-38469 with an upstream patch
+
* Tue Oct 29 2024 Daniel McIlvaney - 0.8-3
- Fix CVE-2023-1981 with an upstream patch, enable basic check section
diff --git a/SPECS/azurelinux-release/azurelinux-release.spec b/SPECS/azurelinux-release/azurelinux-release.spec
index af9f904d209..116861abf90 100644
--- a/SPECS/azurelinux-release/azurelinux-release.spec
+++ b/SPECS/azurelinux-release/azurelinux-release.spec
@@ -5,7 +5,7 @@
Summary: Azure Linux release files
Name: azurelinux-release
Version: %{dist_version}.0
-Release: 22%{?dist}
+Release: 23%{?dist}
License: MIT
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -118,6 +118,9 @@ install -Dm0644 %{SOURCE4} -t %{buildroot}%{_sysctldir}/
%{_sysctldir}/*.conf
%changelog
+* Sat Dec 21 2024 Jon Slobodzian - 3.0-23
+- Bump release for January 2025 Update
+
* Fri Nov 22 2024 CBL-Mariner Servicing Account - 3.0-22
- Bump release for December 2024 Update
diff --git a/SPECS/azurelinux-repos/azurelinux-amd-preview.repo b/SPECS/azurelinux-repos/azurelinux-amd-preview.repo
new file mode 100644
index 00000000000..0434974db96
--- /dev/null
+++ b/SPECS/azurelinux-repos/azurelinux-amd-preview.repo
@@ -0,0 +1,19 @@
+[azurelinux-official-amd-preview]
+name=Azure Linux Official AMD Preview $releasever $basearch
+baseurl=https://packages.microsoft.com/azurelinux/$releasever/preview/amd/$basearch
+gpgkey=file:///etc/pki/rpm-gpg/MICROSOFT-RPM-GPG-KEY
+gpgcheck=1
+repo_gpgcheck=1
+enabled=1
+skip_if_unavailable=True
+sslverify=1
+
+[azurelinux-official-amd-preview-source]
+name=Azure Linux Official AMD Preview $releasever Source
+baseurl=https://packages.microsoft.com/azurelinux/$releasever/preview/amd/srpms
+gpgkey=file:///etc/pki/rpm-gpg/MICROSOFT-RPM-GPG-KEY
+gpgcheck=1
+repo_gpgcheck=1
+enabled=0
+skip_if_unavailable=True
+sslverify=1
diff --git a/SPECS/azurelinux-repos/azurelinux-amd.repo b/SPECS/azurelinux-repos/azurelinux-amd.repo
new file mode 100644
index 00000000000..afcfbfe8537
--- /dev/null
+++ b/SPECS/azurelinux-repos/azurelinux-amd.repo
@@ -0,0 +1,19 @@
+[azurelinux-official-amd]
+name=Azure Linux Official AMD $releasever $basearch
+baseurl=https://packages.microsoft.com/azurelinux/$releasever/prod/amd/$basearch
+gpgkey=file:///etc/pki/rpm-gpg/MICROSOFT-RPM-GPG-KEY
+gpgcheck=1
+repo_gpgcheck=1
+enabled=1
+skip_if_unavailable=True
+sslverify=1
+
+[azurelinux-official-amd-source]
+name=Azure Linux Official AMD $releasever Source
+baseurl=https://packages.microsoft.com/azurelinux/$releasever/prod/amd/srpms
+gpgkey=file:///etc/pki/rpm-gpg/MICROSOFT-RPM-GPG-KEY
+gpgcheck=1
+repo_gpgcheck=1
+enabled=0
+skip_if_unavailable=True
+sslverify=1
diff --git a/SPECS/azurelinux-repos/azurelinux-repos.signatures.json b/SPECS/azurelinux-repos/azurelinux-repos.signatures.json
index 0dbba725064..a2771db1bd0 100644
--- a/SPECS/azurelinux-repos/azurelinux-repos.signatures.json
+++ b/SPECS/azurelinux-repos/azurelinux-repos.signatures.json
@@ -1,6 +1,8 @@
{
"Signatures": {
"MICROSOFT-RPM-GPG-KEY": "1092f37ec429e58bf9c7f898df17c3c32eb2ce3c4c037afb8ffe2d2b42e16e89",
+ "azurelinux-amd.repo": "cbe8cefea3cf99c1450d0b329c57bdb647ae15c10468c243573e33b9c4891120",
+ "azurelinux-amd-preview.repo": "723c3505d064c3b9ef436d700f748de7deca0de845d07d3fdeeafaf45949c070",
"azurelinux-debuginfo-preview.repo": "af36f823d923dd8b1a27efa7199552339f9e1315813078a4591ef855654b3017",
"azurelinux-debuginfo.repo": "ac8c1c699122e46e7501f04f1f0240e1eec322e5bb6102f6db5d1ce7215d7f5b",
"azurelinux-extended-debuginfo-preview.repo": "d29514a9962b0975cd4b32132f5cb5290930f6d0d582b46ef5d2cde6840dc9e0",
@@ -14,4 +16,4 @@
"azurelinux-official-base.repo": "b3351f3121c0600a0445a322bea8d500edaaa3d021b7cef96c1c9539d0eccab8",
"azurelinux-official-preview.repo": "a03ea4539f17fac3fdfe73fb295663778733ba84ed37bb3908784604e52b0da6"
}
-}
\ No newline at end of file
+}
diff --git a/SPECS/azurelinux-repos/azurelinux-repos.spec b/SPECS/azurelinux-repos/azurelinux-repos.spec
index 7afc1ba4195..d1680ca2bfb 100644
--- a/SPECS/azurelinux-repos/azurelinux-repos.spec
+++ b/SPECS/azurelinux-repos/azurelinux-repos.spec
@@ -1,7 +1,7 @@
Summary: AzureLinux repo files, gpg keys
Name: azurelinux-repos
Version: %{azl}.0
-Release: 3%{?dist}
+Release: 4%{?dist}
License: MIT
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -20,14 +20,39 @@ Source10: azurelinux-official-base.repo
Source11: azurelinux-official-preview.repo
Source12: azurelinux-extended-debuginfo.repo
Source13: azurelinux-extended-debuginfo-preview.repo
+Source14: azurelinux-amd.repo
+Source15: azurelinux-amd-preview.repo
Requires: %{name}-shared = %{version}-%{release}
+# Capture the built architecture before setting noarch in order to install the
+# appropriate repos for x86_64 later in this spec
+%ifarch x86_64
+%define buildx86 1
+%endif
BuildArch: noarch
%description
Azure Linux repo files and gpg keys
+%if %{defined buildx86}
+%package amd
+Summary: Azure Linux AMD GPU repo file.
+Group: System Environment/Base
+Requires: %{name}-shared = %{version}-%{release}
+
+%description amd
+%{summary}
+
+%package amd-preview
+Summary: Azure Linux AMD GPU preview repo file.
+Group: System Environment/Base
+Requires: %{name}-shared = %{version}-%{release}
+
+%description amd-preview
+%{summary}
+%endif
+
%package debug
Summary: Azure Linux Debuginfo repo file.
Group: System Environment/Base
@@ -142,6 +167,10 @@ install -m 644 %{SOURCE10} $REPO_DIRECTORY
install -m 644 %{SOURCE11} $REPO_DIRECTORY
install -m 644 %{SOURCE12} $REPO_DIRECTORY
install -m 644 %{SOURCE13} $REPO_DIRECTORY
+%if %{defined buildx86}
+install -m 644 %{SOURCE14} $REPO_DIRECTORY
+install -m 644 %{SOURCE15} $REPO_DIRECTORY
+%endif
export RPM_GPG_DIRECTORY="%{buildroot}%{_sysconfdir}/pki/rpm-gpg"
@@ -159,6 +188,16 @@ gpg --batch --yes --delete-keys 2BC94FFF7015A5F28F1537AD0CD9FED33135CE90
%defattr(-,root,root,-)
%config(noreplace) %{_sysconfdir}/yum.repos.d/azurelinux-official-base.repo
+%if %{defined buildx86}
+%files amd
+%defattr(-,root,root,-)
+%config(noreplace) %{_sysconfdir}/yum.repos.d/azurelinux-amd.repo
+
+%files amd-preview
+%defattr(-,root,root,-)
+%config(noreplace) %{_sysconfdir}/yum.repos.d/azurelinux-amd-preview.repo
+%endif
+
%files debug
%defattr(-,root,root,-)
%config(noreplace) %{_sysconfdir}/yum.repos.d/azurelinux-debuginfo.repo
@@ -208,6 +247,9 @@ gpg --batch --yes --delete-keys 2BC94FFF7015A5F28F1537AD0CD9FED33135CE90
%{_sysconfdir}/pki/rpm-gpg/MICROSOFT-RPM-GPG-KEY
%changelog
+* Fri Dec 20 2024 Gary Swalling - 3.0-4
+- Add amd .repo files.
+
* Thu May 30 2024 Andrew Phelps - 3.0-3
- Remove MICROSOFT-METADATA-GPG-KEY
diff --git a/SPECS/ca-certificates/ca-certificates.signatures.json b/SPECS/ca-certificates/ca-certificates.signatures.json
index 8348c78a905..1a2a357595b 100644
--- a/SPECS/ca-certificates/ca-certificates.signatures.json
+++ b/SPECS/ca-certificates/ca-certificates.signatures.json
@@ -11,6 +11,7 @@
"README.usr": "0d2e90b6cf575678cd9d4f409d92258ef0d676995d4d733acdb2425309a38ff8",
"bundle2pem.sh": "a61e0d9f34e21456cfe175e9a682f56959240e66dfeb75bd2457226226aa413a",
"certdata.base.txt": "771a6c9995ea00bb4ce50fd842a252454fe9b26acad8b0568a1055207442db57",
+ "certdata.distrusted.txt": "93aebf0f1e5253ed91fe269f7128fdb8b20630ef19558f629c79a8b7eb0ba30d",
"certdata.microsoft.txt": "1707ab328312f4ecce167a886e866136b46d7f979a01cc6f9e4afd042174babd",
"certdata2pem.py": "4f5848c14210758f19ab9fdc9ffd83733303a48642a3d47c4d682f904fdc0f33",
"pem2bundle.sh": "f96a2f0071fb80e30332c0bd95853183f2f49a3c98d5e9fc4716aeeb001e3426",
diff --git a/SPECS/ca-certificates/ca-certificates.spec b/SPECS/ca-certificates/ca-certificates.spec
index 8b16547d594..9a58bad8480 100644
--- a/SPECS/ca-certificates/ca-certificates.spec
+++ b/SPECS/ca-certificates/ca-certificates.spec
@@ -6,6 +6,8 @@
%define p11_format_base_bundle ca-bundle.trust.base.p11-kit
+%define p11_format_distrusted_bundle ca-bundle.trust.distrusted.p11-kit
+
%define p11_format_microsoft_bundle ca-bundle.trust.microsoft.p11-kit
# List of packages triggering legacy certs generation if 'ca-certificates-legacy'
@@ -45,7 +47,7 @@ Name: ca-certificates
# When updating, "Epoch, "Version", AND "Release" tags must be updated in the "prebuilt-ca-certificates*" packages as well.
Epoch: 1
Version: %{azl}.0.0
-Release: 7%{?dist}
+Release: 8%{?dist}
License: MPLv2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -69,6 +71,8 @@ Source21: certdata.base.txt
Source22: bundle2pem.sh
# The certdata.microsoft.txt is provided by Microsoft's Trusted Root Program.
Source23: certdata.microsoft.txt
+# The certdata.distrusted.txt is provided by Microsoft's Trusted Root Program.
+Source24: certdata.distrusted.txt
BuildRequires: /bin/ln
BuildRequires: asciidoc
@@ -146,6 +150,7 @@ cp -p %{SOURCE20} .
%convert_certdata %{SOURCE21}
%convert_certdata %{SOURCE23}
+%convert_certdata %{SOURCE24}
#manpage
cp %{SOURCE10} %{name}/update-ca-trust.8.txt
@@ -186,6 +191,9 @@ install -p -m 644 %{SOURCE18} %{buildroot}%{catrustdir}/source/README
# Microsoft certs
%install_bundles %{SOURCE23} %{p11_format_microsoft_bundle}
+# Distrusted certs
+%install_bundles %{SOURCE24} %{p11_format_distrusted_bundle}
+
# TODO: consider to dynamically create the update-ca-trust script from within
# this .spec file, in order to have the output file+directory names at once place only.
install -p -m 755 %{SOURCE2} %{buildroot}%{_bindir}/update-ca-trust
@@ -257,13 +265,16 @@ rm -f %{pkidir}/tls/certs/*.{0,pem}
%{_bindir}/bundle2pem.sh %{pkidir}/tls/certs/%{classic_tls_bundle}
%files
+%defattr(-,root,root)
# Microsoft certs bundle file with trust
%{_datadir}/pki/ca-trust-source/%{p11_format_microsoft_bundle}
%files base
+%defattr(-,root,root)
%{_datadir}/pki/ca-trust-source/%{p11_format_base_bundle}
%files shared
+%defattr(-,root,root)
%license LICENSE
# symlinks for old locations
@@ -307,6 +318,9 @@ rm -f %{pkidir}/tls/certs/*.{0,pem}
%dir %{pkidir}/tls
%dir %{pkidir}/tls/certs
+# Distrusted CAs
+%{_datadir}/pki/ca-trust-source/%{p11_format_distrusted_bundle}
+
%ghost %{catrustdir}/extracted/pem/tls-ca-bundle.pem
%ghost %{catrustdir}/extracted/pem/email-ca-bundle.pem
%ghost %{catrustdir}/extracted/pem/objsign-ca-bundle.pem
@@ -315,15 +329,21 @@ rm -f %{pkidir}/tls/certs/*.{0,pem}
%ghost %{catrustdir}/extracted/edk2/cacerts.bin
%files tools
+%defattr(-,root,root)
# update/extract tool
%{_bindir}/update-ca-trust
%{_mandir}/man8/update-ca-trust.8.gz
%files legacy
+%defattr(-,root,root)
%{_bindir}/bundle2pem.sh
%changelog
+* Wed Dec 11 2024 Pawel Winogrodzki - 3.0.0-8
+- Update adding Microsoft distrusted CAs.
+- Explicitly set default file ownership to root:root.
+
* Tue Aug 13 2024 CBL-Mariner Servicing Account - 3.0.0-7
- Updating Microsoft trusted root CAs.
diff --git a/SPECS/ca-certificates/certdata.distrusted.txt b/SPECS/ca-certificates/certdata.distrusted.txt
new file mode 100644
index 00000000000..913d0e76496
--- /dev/null
+++ b/SPECS/ca-certificates/certdata.distrusted.txt
@@ -0,0 +1,302 @@
+# Release: December 2024
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#
+# certdata.txt
+#
+# This file contains the object definitions for the certs and other
+# information "built into" NSS.
+#
+# Object definitions:
+#
+# Certificates
+#
+# -- Attribute -- -- type -- -- value --
+# CKA_CLASS CK_OBJECT_CLASS CKO_CERTIFICATE
+# CKA_TOKEN CK_BBOOL CK_TRUE
+# CKA_PRIVATE CK_BBOOL CK_FALSE
+# CKA_MODIFIABLE CK_BBOOL CK_FALSE
+# CKA_LABEL UTF8 (varies)
+# CKA_CERTIFICATE_TYPE CK_CERTIFICATE_TYPE CKC_X_509
+# CKA_SUBJECT DER+base64 (varies)
+# CKA_ID byte array (varies)
+# CKA_ISSUER DER+base64 (varies)
+# CKA_SERIAL_NUMBER DER+base64 (varies)
+# CKA_VALUE DER+base64 (varies)
+# CKA_NSS_EMAIL ASCII7 (unused here)
+# CKA_NSS_SERVER_DISTRUST_AFTER DER+base64 (varies)
+# CKA_NSS_EMAIL_DISTRUST_AFTER DER+base64 (varies)
+#
+# Trust
+#
+# -- Attribute -- -- type -- -- value --
+# CKA_CLASS CK_OBJECT_CLASS CKO_TRUST
+# CKA_TOKEN CK_BBOOL CK_TRUE
+# CKA_PRIVATE CK_BBOOL CK_FALSE
+# CKA_MODIFIABLE CK_BBOOL CK_FALSE
+# CKA_LABEL UTF8 (varies)
+# CKA_ISSUER DER+base64 (varies)
+# CKA_SERIAL_NUMBER DER+base64 (varies)
+# CKA_CERT_HASH binary+base64 (varies)
+# CKA_EXPIRES CK_DATE (not used here)
+# CKA_TRUST_DIGITAL_SIGNATURE CK_TRUST (varies)
+# CKA_TRUST_NON_REPUDIATION CK_TRUST (varies)
+# CKA_TRUST_KEY_ENCIPHERMENT CK_TRUST (varies)
+# CKA_TRUST_DATA_ENCIPHERMENT CK_TRUST (varies)
+# CKA_TRUST_KEY_AGREEMENT CK_TRUST (varies)
+# CKA_TRUST_KEY_CERT_SIGN CK_TRUST (varies)
+# CKA_TRUST_CRL_SIGN CK_TRUST (varies)
+# CKA_TRUST_SERVER_AUTH CK_TRUST (varies)
+# CKA_TRUST_CLIENT_AUTH CK_TRUST (varies)
+# CKA_TRUST_CODE_SIGNING CK_TRUST (varies)
+# CKA_TRUST_EMAIL_PROTECTION CK_TRUST (varies)
+# CKA_TRUST_IPSEC_END_SYSTEM CK_TRUST (varies)
+# CKA_TRUST_IPSEC_TUNNEL CK_TRUST (varies)
+# CKA_TRUST_IPSEC_USER CK_TRUST (varies)
+# CKA_TRUST_TIME_STAMPING CK_TRUST (varies)
+# CKA_TRUST_STEP_UP_APPROVED CK_BBOOL (varies)
+# (other trust attributes can be defined)
+#
+
+#
+# The object to tell NSS that this is a root list and we don't
+# have to go looking for others.
+#
+BEGINDATA
+CKA_CLASS CK_OBJECT_CLASS CKO_NSS_BUILTIN_ROOT_LIST
+CKA_TOKEN CK_BBOOL CK_TRUE
+CKA_PRIVATE CK_BBOOL CK_FALSE
+CKA_MODIFIABLE CK_BBOOL CK_FALSE
+CKA_LABEL UTF8 "Microsoft Builtin Distrusted Certificates"
+
+#
+# Certificate "google.com"
+#
+# Issuer: CN=AC Certisign ICP-Brasil SSL EV G4,OU=Autoridade Certificadora Raiz Brasileira v10,O=ICP-Brasil,C=BR
+# Serial Number:28:85:34:47:39:1a:72:1e:76:94:85:49:4e:73:57:52
+# Subject: CN=google.com,UID=b27bb194-0258-47ac-acba-c6f06f39787c,OID.2.5.4.97=OFBBR-ef0d9576-f46c-4c95-b690-e882e0b49bc0,L=Sao Paulo,ST=SP,O=GOOGLE PAY BRASIL INSTITUICAO DE PAGAMENTO LTDA,C=BR,serialNumber=43394419000188,incorporationCountry=BR,businessCategory=Private Organization
+# Not Valid Before: Thu Nov 28 21:19:48 2024
+# Not Valid After : Fri Nov 28 21:19:48 2025
+# Fingerprint (SHA-256): 42:13:29:F0:DC:2F:68:3D:6E:96:C1:B5:B3:10:97:4D:09:97:AD:98:4E:F6:91:20:F5:53:72:B4:F4:8E:10:37
+# Fingerprint (SHA1): 1C:68:E6:97:AB:50:91:FE:76:16:D5:2F:A0:36:02:5C:47:43:BB:73
+CKA_CLASS CK_OBJECT_CLASS CKO_CERTIFICATE
+CKA_TOKEN CK_BBOOL CK_TRUE
+CKA_PRIVATE CK_BBOOL CK_FALSE
+CKA_MODIFIABLE CK_BBOOL CK_FALSE
+CKA_LABEL UTF8 "google.com"
+CKA_CERTIFICATE_TYPE CK_CERTIFICATE_TYPE CKC_X_509
+CKA_SUBJECT MULTILINE_OCTAL
+\060\202\001\065\061\035\060\033\006\003\125\004\017\014\024\120
+\162\151\166\141\164\145\040\117\162\147\141\156\151\172\141\164
+\151\157\156\061\023\060\021\006\013\053\006\001\004\001\202\067
+\074\002\001\003\023\002\102\122\061\027\060\025\006\003\125\004
+\005\023\016\064\063\063\071\064\064\061\071\060\060\060\061\070
+\070\061\013\060\011\006\003\125\004\006\023\002\102\122\061\070
+\060\066\006\003\125\004\012\014\057\107\117\117\107\114\105\040
+\120\101\131\040\102\122\101\123\111\114\040\111\116\123\124\111
+\124\125\111\103\101\117\040\104\105\040\120\101\107\101\115\105
+\116\124\117\040\114\124\104\101\061\013\060\011\006\003\125\004
+\010\014\002\123\120\061\022\060\020\006\003\125\004\007\014\011
+\123\141\157\040\120\141\165\154\157\061\063\060\061\006\003\125
+\004\141\014\052\117\106\102\102\122\055\145\146\060\144\071\065
+\067\066\055\146\064\066\143\055\064\143\071\065\055\142\066\071
+\060\055\145\070\070\062\145\060\142\064\071\142\143\060\061\064
+\060\062\006\012\011\222\046\211\223\362\054\144\001\001\014\044
+\142\062\067\142\142\061\071\064\055\060\062\065\070\055\064\067
+\141\143\055\141\143\142\141\055\143\066\146\060\066\146\063\071
+\067\070\067\143\061\023\060\021\006\003\125\004\003\014\012\147
+\157\157\147\154\145\056\143\157\155
+END
+CKA_ID UTF8 "0"
+CKA_ISSUER MULTILINE_OCTAL
+\060\201\205\061\013\060\011\006\003\125\004\006\023\002\102\122
+\061\023\060\021\006\003\125\004\012\023\012\111\103\120\055\102
+\162\141\163\151\154\061\065\060\063\006\003\125\004\013\023\054
+\101\165\164\157\162\151\144\141\144\145\040\103\145\162\164\151
+\146\151\143\141\144\157\162\141\040\122\141\151\172\040\102\162
+\141\163\151\154\145\151\162\141\040\166\061\060\061\052\060\050
+\006\003\125\004\003\023\041\101\103\040\103\145\162\164\151\163
+\151\147\156\040\111\103\120\055\102\162\141\163\151\154\040\123
+\123\114\040\105\126\040\107\064
+END
+CKA_SERIAL_NUMBER MULTILINE_OCTAL
+\002\020\050\205\064\107\071\032\162\036\166\224\205\111\116\163
+\127\122
+END
+CKA_VALUE MULTILINE_OCTAL
+\060\202\010\001\060\202\005\351\240\003\002\001\002\002\020\050
+\205\064\107\071\032\162\036\166\224\205\111\116\163\127\122\060
+\015\006\011\052\206\110\206\367\015\001\001\013\005\000\060\201
+\205\061\013\060\011\006\003\125\004\006\023\002\102\122\061\023
+\060\021\006\003\125\004\012\023\012\111\103\120\055\102\162\141
+\163\151\154\061\065\060\063\006\003\125\004\013\023\054\101\165
+\164\157\162\151\144\141\144\145\040\103\145\162\164\151\146\151
+\143\141\144\157\162\141\040\122\141\151\172\040\102\162\141\163
+\151\154\145\151\162\141\040\166\061\060\061\052\060\050\006\003
+\125\004\003\023\041\101\103\040\103\145\162\164\151\163\151\147
+\156\040\111\103\120\055\102\162\141\163\151\154\040\123\123\114
+\040\105\126\040\107\064\060\036\027\015\062\064\061\061\062\070
+\062\061\061\071\064\070\132\027\015\062\065\061\061\062\070\062
+\061\061\071\064\070\132\060\202\001\065\061\035\060\033\006\003
+\125\004\017\014\024\120\162\151\166\141\164\145\040\117\162\147
+\141\156\151\172\141\164\151\157\156\061\023\060\021\006\013\053
+\006\001\004\001\202\067\074\002\001\003\023\002\102\122\061\027
+\060\025\006\003\125\004\005\023\016\064\063\063\071\064\064\061
+\071\060\060\060\061\070\070\061\013\060\011\006\003\125\004\006
+\023\002\102\122\061\070\060\066\006\003\125\004\012\014\057\107
+\117\117\107\114\105\040\120\101\131\040\102\122\101\123\111\114
+\040\111\116\123\124\111\124\125\111\103\101\117\040\104\105\040
+\120\101\107\101\115\105\116\124\117\040\114\124\104\101\061\013
+\060\011\006\003\125\004\010\014\002\123\120\061\022\060\020\006
+\003\125\004\007\014\011\123\141\157\040\120\141\165\154\157\061
+\063\060\061\006\003\125\004\141\014\052\117\106\102\102\122\055
+\145\146\060\144\071\065\067\066\055\146\064\066\143\055\064\143
+\071\065\055\142\066\071\060\055\145\070\070\062\145\060\142\064
+\071\142\143\060\061\064\060\062\006\012\011\222\046\211\223\362
+\054\144\001\001\014\044\142\062\067\142\142\061\071\064\055\060
+\062\065\070\055\064\067\141\143\055\141\143\142\141\055\143\066
+\146\060\066\146\063\071\067\070\067\143\061\023\060\021\006\003
+\125\004\003\014\012\147\157\157\147\154\145\056\143\157\155\060
+\202\001\042\060\015\006\011\052\206\110\206\367\015\001\001\001
+\005\000\003\202\001\017\000\060\202\001\012\002\202\001\001\000
+\245\071\062\166\146\112\020\362\222\260\147\320\324\326\000\245
+\162\170\155\042\014\366\350\006\234\273\346\243\106\262\207\204
+\365\316\016\143\113\113\351\240\024\326\123\263\340\043\116\355
+\201\352\030\177\366\120\142\300\126\373\004\303\011\033\263\025
+\110\177\001\170\272\370\214\026\336\360\057\320\301\103\271\005
+\336\135\034\023\341\103\247\050\130\355\027\324\072\376\174\222
+\360\006\062\201\354\321\230\061\114\025\072\162\013\314\154\030
+\230\241\170\130\202\215\017\366\016\110\003\325\202\331\300\376
+\236\320\033\267\330\334\217\332\331\107\030\277\212\346\126\160
+\310\326\015\051\365\172\366\252\230\347\322\005\307\135\351\037
+\312\236\236\377\176\217\070\203\262\003\026\025\272\170\136\271
+\044\126\313\012\217\257\006\311\057\321\275\055\302\201\124\130
+\042\132\315\142\113\221\247\012\167\301\152\276\254\274\344\163
+\206\013\020\217\110\141\263\046\133\164\110\004\207\122\145\373
+\151\241\005\022\012\373\335\137\226\323\165\051\047\256\316\236
+\250\021\054\170\147\214\275\125\374\300\152\224\353\165\217\131
+\002\003\001\000\001\243\202\002\270\060\202\002\264\060\030\006
+\003\125\035\021\001\001\377\004\016\060\014\202\012\147\157\157
+\147\154\145\056\143\157\155\060\011\006\003\125\035\023\004\002
+\060\000\060\037\006\003\125\035\043\004\030\060\026\200\024\027
+\111\323\106\270\151\244\056\077\011\203\116\024\215\111\076\220
+\325\014\050\060\201\232\006\003\125\035\040\004\201\222\060\201
+\217\060\201\202\006\006\140\114\001\002\001\152\060\170\060\166
+\006\010\053\006\001\005\005\007\002\001\026\152\150\164\164\160
+\072\057\057\151\143\160\055\142\162\141\163\151\154\056\143\145
+\162\164\151\163\151\147\156\056\143\157\155\056\142\162\057\162
+\145\160\157\163\151\164\157\162\151\157\057\144\160\143\057\141
+\143\137\143\145\162\164\151\163\151\147\156\137\151\143\160\137
+\142\162\137\163\163\154\057\104\120\103\137\101\103\137\103\145
+\162\164\151\163\151\147\156\137\111\143\160\137\102\162\137\123
+\163\154\056\160\144\146\060\010\006\006\147\201\014\001\002\002
+\060\201\312\006\003\125\035\037\004\201\302\060\201\277\060\136
+\240\134\240\132\206\130\150\164\164\160\072\057\057\151\143\160
+\055\142\162\141\163\151\154\056\143\145\162\164\151\163\151\147
+\156\056\143\157\155\056\142\162\057\162\145\160\157\163\151\164
+\157\162\151\157\057\154\143\162\057\101\103\103\145\162\164\151
+\163\151\147\156\111\103\120\102\122\123\123\114\105\126\107\064
+\057\114\141\164\145\163\164\103\122\114\056\143\162\154\060\135
+\240\133\240\131\206\127\150\164\164\160\072\057\057\151\143\160
+\055\142\162\141\163\151\154\056\157\165\164\162\141\154\143\162
+\056\143\157\155\056\142\162\057\162\145\160\157\163\151\164\157
+\162\151\157\057\154\143\162\057\101\103\103\145\162\164\151\163
+\151\147\156\111\103\120\102\122\123\123\114\105\126\107\064\057
+\114\141\164\145\163\164\103\122\114\056\143\162\154\060\016\006
+\003\125\035\017\001\001\377\004\004\003\002\003\250\060\035\006
+\003\125\035\045\004\026\060\024\006\010\053\006\001\005\005\007
+\003\001\006\010\053\006\001\005\005\007\003\002\060\023\006\012
+\053\006\001\004\001\326\171\002\004\003\001\001\377\004\002\005
+\000\060\201\275\006\010\053\006\001\005\005\007\001\001\004\201
+\260\060\201\255\060\151\006\010\053\006\001\005\005\007\060\002
+\206\135\150\164\164\160\072\057\057\151\143\160\055\142\162\141
+\163\151\154\056\143\145\162\164\151\163\151\147\156\056\143\157
+\155\056\142\162\057\162\145\160\157\163\151\164\157\162\151\157
+\057\143\145\162\164\151\146\151\143\141\144\157\163\057\101\103
+\137\103\145\162\164\151\163\151\147\156\137\111\143\160\137\102
+\162\137\123\163\154\137\105\126\137\107\064\056\160\067\143\060
+\100\006\010\053\006\001\005\005\007\060\001\206\064\150\164\164
+\160\072\057\057\157\143\163\160\055\141\143\055\143\145\162\164
+\151\163\151\147\156\055\151\143\160\055\142\162\055\163\163\154
+\056\143\145\162\164\151\163\151\147\156\056\143\157\155\056\142
+\162\060\015\006\011\052\206\110\206\367\015\001\001\013\005\000
+\003\202\002\001\000\004\277\164\275\336\224\331\155\317\017\142
+\333\066\327\114\036\123\143\176\215\160\003\240\323\006\373\365
+\167\164\071\324\202\171\354\345\013\353\226\072\237\323\247\366
+\271\247\132\155\174\371\260\177\135\207\024\165\006\057\263\077
+\160\345\152\161\147\363\344\255\257\115\172\163\033\154\164\354
+\344\304\061\003\030\275\234\022\233\223\053\021\073\364\221\165
+\160\055\102\341\220\147\212\270\007\064\347\165\346\020\170\137
+\001\301\316\344\226\363\337\263\307\302\004\333\110\224\200\320
+\352\261\025\020\211\034\317\151\256\172\161\207\032\063\050\117
+\300\232\310\161\146\345\321\007\267\323\320\035\127\002\273\173
+\131\016\216\076\155\115\044\146\112\245\154\360\264\244\356\312
+\050\213\212\270\111\211\206\146\233\013\160\027\260\075\217\022
+\360\241\202\146\334\052\053\314\363\150\240\055\363\122\341\116
+\162\052\075\357\317\137\311\045\005\262\133\046\055\247\332\062
+\377\250\105\167\142\023\333\014\142\240\133\271\346\160\313\001
+\007\332\010\105\114\354\326\061\110\110\164\106\220\340\302\270
+\231\034\204\021\027\341\336\266\037\320\275\366\247\206\333\336
+\120\347\244\215\210\141\141\106\146\070\300\253\260\320\220\326
+\245\307\041\351\224\320\063\071\110\345\052\042\254\163\164\205
+\242\067\151\350\036\302\102\130\346\211\372\151\262\305\002\213
+\203\200\230\261\344\051\153\361\103\323\353\062\365\150\122\052
+\167\301\250\367\375\266\337\130\107\336\106\302\044\261\136\025
+\024\073\255\246\116\242\351\241\011\113\326\051\105\332\143\216
+\041\201\017\276\036\222\150\134\235\033\130\215\031\016\025\322
+\310\337\152\331\232\214\341\060\243\114\175\074\303\132\250\053
+\333\021\267\140\135\231\223\003\335\056\241\062\176\313\134\305
+\114\114\100\377\066\116\252\160\037\027\322\121\305\277\344\105
+\111\036\012\031\346\335\247\203\043\132\351\355\150\076\022\153
+\155\110\337\121\224\002\112\337\374\023\040\307\113\024\077\154
+\364\153\003\136\374\242\242\164\321\300\100\324\211\367\307\146
+\005\331\230\314\124\045\273\245\306\024\036\224\214\100\075\215
+\104\265\367\204\063\367\037\075\221\056\263\325\023\135\313\040
+\173\136\210\017\230
+END
+CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE
+CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE
+CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE
+
+# Trust for "google.com"
+# Issuer: CN=AC Certisign ICP-Brasil SSL EV G4,OU=Autoridade Certificadora Raiz Brasileira v10,O=ICP-Brasil,C=BR
+# Serial Number:28:85:34:47:39:1a:72:1e:76:94:85:49:4e:73:57:52
+# Subject: CN=google.com,UID=b27bb194-0258-47ac-acba-c6f06f39787c,OID.2.5.4.97=OFBBR-ef0d9576-f46c-4c95-b690-e882e0b49bc0,L=Sao Paulo,ST=SP,O=GOOGLE PAY BRASIL INSTITUICAO DE PAGAMENTO LTDA,C=BR,serialNumber=43394419000188,incorporationCountry=BR,businessCategory=Private Organization
+# Not Valid Before: Thu Nov 28 21:19:48 2024
+# Not Valid After : Fri Nov 28 21:19:48 2025
+# Fingerprint (SHA-256): 42:13:29:F0:DC:2F:68:3D:6E:96:C1:B5:B3:10:97:4D:09:97:AD:98:4E:F6:91:20:F5:53:72:B4:F4:8E:10:37
+# Fingerprint (SHA1): 1C:68:E6:97:AB:50:91:FE:76:16:D5:2F:A0:36:02:5C:47:43:BB:73
+CKA_CLASS CK_OBJECT_CLASS CKO_NSS_TRUST
+CKA_TOKEN CK_BBOOL CK_TRUE
+CKA_PRIVATE CK_BBOOL CK_FALSE
+CKA_MODIFIABLE CK_BBOOL CK_FALSE
+CKA_LABEL UTF8 "google.com"
+CKA_CERT_SHA1_HASH MULTILINE_OCTAL
+\034\150\346\227\253\120\221\376\166\026\325\057\240\066\002\134
+\107\103\273\163
+END
+CKA_CERT_MD5_HASH MULTILINE_OCTAL
+\016\067\034\146\242\243\030\173\162\334\023\136\201\340\143\150
+END
+CKA_ISSUER MULTILINE_OCTAL
+\060\201\205\061\013\060\011\006\003\125\004\006\023\002\102\122
+\061\023\060\021\006\003\125\004\012\023\012\111\103\120\055\102
+\162\141\163\151\154\061\065\060\063\006\003\125\004\013\023\054
+\101\165\164\157\162\151\144\141\144\145\040\103\145\162\164\151
+\146\151\143\141\144\157\162\141\040\122\141\151\172\040\102\162
+\141\163\151\154\145\151\162\141\040\166\061\060\061\052\060\050
+\006\003\125\004\003\023\041\101\103\040\103\145\162\164\151\163
+\151\147\156\040\111\103\120\055\102\162\141\163\151\154\040\123
+\123\114\040\105\126\040\107\064
+END
+CKA_SERIAL_NUMBER MULTILINE_OCTAL
+\002\020\050\205\064\107\071\032\162\036\166\224\205\111\116\163
+\127\122
+END
+CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_NOT_TRUSTED
+CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_NOT_TRUSTED
+CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_NOT_TRUSTED
+CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE
\ No newline at end of file
diff --git a/SPECS/ca-certificates/certdata2pem.py b/SPECS/ca-certificates/certdata2pem.py
old mode 100644
new mode 100755
diff --git a/SPECS/ceph/CVE-2024-52338.patch b/SPECS/ceph/CVE-2024-52338.patch
new file mode 100644
index 00000000000..42f7cc85b47
--- /dev/null
+++ b/SPECS/ceph/CVE-2024-52338.patch
@@ -0,0 +1,131 @@
+diff --git a/src/arrow/r/R/metadata.R b/src/arrow/r/R/metadata.R
+index 768abeda7..82d54f7ce 100644
+--- a/src/arrow/r/R/metadata.R
++++ b/src/arrow/r/R/metadata.R
+@@ -22,7 +22,7 @@
+ # drop problems attributes (most likely from readr)
+ x[["attributes"]][["problems"]] <- NULL
+
+- out <- serialize(x, NULL, ascii = TRUE)
++ out <- serialize(safe_r_metadata(x, on_save = TRUE), NULL, ascii = TRUE)
+
+ # if the metadata is over 100 kB, compress
+ if (option_compress_metadata() && object.size(out) > 100000) {
+@@ -35,22 +35,104 @@
+ rawToChar(out)
+ }
+
+-.unserialize_arrow_r_metadata <- function(x) {
+- tryCatch(
+- expr = {
+- out <- unserialize(charToRaw(x))
++.deserialize_arrow_r_metadata <- function(x) {
++ tryCatch(unserialize_r_metadata(x),
++ error = function(e) {
++ if (getOption("arrow.debug", FALSE)) {
++ print(conditionMessage(e))
++ }
++ warning("Invalid metadata$r", call. = FALSE)
++ NULL
++ }
++ )
++}
+
+- # if this is still raw, try decompressing
+- if (is.raw(out)) {
+- out <- unserialize(memDecompress(out, type = "gzip"))
++unserialize_r_metadata <- function(x) {
++ # Check that this is ASCII serialized data (as in, what we wrote)
++ if (!identical(substr(unclass(x), 1, 1), "A")) {
++ stop("Invalid serialized data")
++ }
++ out <- safe_unserialize(charToRaw(x))
++ # If it's still raw, decompress and unserialize again
++ if (is.raw(out)) {
++ decompressed <- memDecompress(out, type = "gzip")
++ if (!identical(rawToChar(decompressed[1]), "A")) {
++ stop("Invalid serialized compressed data")
++ }
++ out <- safe_unserialize(decompressed)
++ }
++ if (!is.list(out)) {
++ stop("Invalid serialized data: must be a list")
++ }
++ safe_r_metadata(out)
++}
++safe_unserialize <- function(x) {
++ # By capturing the data in a list, we can inspect it for promises without
++ # triggering their evaluation.
++ out <- list(unserialize(x))
++ if (typeof(out[[1]]) == "promise") {
++ stop("Serialized data contains a promise object")
++ }
++ out[[1]]
++}
++safe_r_metadata <- function(metadata, on_save = FALSE) {
++ # This function recurses through the metadata list and checks that all
++ # elements are of types that are allowed in R metadata.
++ # If it finds an element that is not allowed, it removes it.
++ #
++ # This function is used both when saving and loading metadata.
++ # @param on_save: If TRUE, the function will not warn if it removes elements:
++ # we're just cleaning up the metadata for saving. If FALSE, it means we're
++ # loading the metadata, and we'll warn if we find invalid elements.
++ #
++ # When loading metadata, you can optionally keep the invalid elements by
++ # setting `options(arrow.unsafe_metadata = TRUE)`. It will still check
++ # for invalid elements and warn if any are found, though.
++ # This variable will be used to store the types of elements that were removed,
++ # if any, so we can give an informative warning if needed.
++ types_removed <- c()
++ # Internal function that we'll recursively apply,
++ # and mutate the `types_removed` variable outside of it.
++ check_r_metadata_types_recursive <- function(x) {
++ allowed_types <- c("character", "double", "integer", "logical", "complex", "list", "NULL")
++ if (is.list(x)) {
++ types <- map_chr(x, typeof)
++ x[types == "list"] <- map(x[types == "list"], check_r_metadata_types_recursive)
++ ok <- types %in% allowed_types
++ if (!all(ok)) {
++ # Record the invalid types, then remove the offending elements
++ types_removed <<- c(types_removed, setdiff(types, allowed_types))
++ x <- x[ok]
+ }
+- out
+- },
+- error = function(e) {
+- warning("Invalid metadata$r", call. = FALSE)
+- NULL
+ }
+- )
++ x
++ }
++ new <- check_r_metadata_types_recursive(metadata)
++ # On save: don't warn, just save the filtered metadata
++ if (on_save) {
++ return(new)
++ }
++ # On load: warn if any elements were removed
++ if (length(types_removed)) {
++ types_msg <- paste("Type:", oxford_paste(unique(types_removed)))
++ if (getOption("arrow.unsafe_metadata", FALSE)) {
++ # We've opted-in to unsafe metadata, so warn but return the original metadata
++ rlang::warn(
++ "R metadata may have unsafe or invalid elements",
++ body = c("i" = types_msg)
++ )
++ new <- metadata
++ } else {
++ rlang::warn(
++ "Potentially unsafe or invalid elements have been discarded from R metadata.",
++ body = c(
++ "i" = types_msg,
++ ">" = "If you trust the source, you can set `options(arrow.unsafe_metadata = TRUE)` to preserve them."
++ )
++ )
++ }
++ }
++ new
+ }
+
+ #' @importFrom rlang trace_back
diff --git a/SPECS/ceph/ceph.spec b/SPECS/ceph/ceph.spec
index 69597db52d1..06d7c026466 100644
--- a/SPECS/ceph/ceph.spec
+++ b/SPECS/ceph/ceph.spec
@@ -5,7 +5,7 @@
Summary: User space components of the Ceph file system
Name: ceph
Version: 18.2.2
-Release: 1%{?dist}
+Release: 2%{?dist}
License: LGPLv2 and LGPLv3 and CC-BY-SA and GPLv2 and Boost and BSD and MIT and Public Domain and GPLv3 and ASL-2.0
URL: https://ceph.io/
Vendor: Microsoft Corporation
@@ -13,7 +13,7 @@ Distribution: Azure Linux
Source0: https://download.ceph.com/tarballs/%{name}-%{version}.tar.gz
Patch0: 0034-src-pybind-rbd-rbd.pyx.patch
Patch1: 0032-cmake-modules-BuildBoost.cmake.patch
-
+Patch2: CVE-2024-52338.patch
#
# Copyright (C) 2004-2019 The Ceph Project Developers. See COPYING file
# at the top-level directory of this distribution and at
@@ -2000,6 +2000,9 @@ exit 0
%changelog
+* Wed Dec 4 2024 Bhagyashri Pathak - 18.2.2-2
+- Fix for CVE-2024-52338
+
* Thu May 23 2024 CBL-Mariner Servicing Account - 18.2.2-1
- Auto-upgrade to 18.2.2 - CVE patches
diff --git a/SPECS/cf-cli/CVE-2024-45337.patch b/SPECS/cf-cli/CVE-2024-45337.patch
new file mode 100644
index 00000000000..f7d2f6a6724
--- /dev/null
+++ b/SPECS/cf-cli/CVE-2024-45337.patch
@@ -0,0 +1,77 @@
+https://github.com/golang/crypto/commit/b4f1988a35dee11ec3e05d6bf3e90b695fbd8909.patch
+
+From b4f1988a35dee11ec3e05d6bf3e90b695fbd8909 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker
+Date: Tue, 3 Dec 2024 09:03:03 -0800
+Subject: [PATCH] ssh: make the public key cache a 1-entry FIFO cache
+
+Users of the the ssh package seem to extremely commonly misuse the
+PublicKeyCallback API, assuming that the key passed in the last call
+before a connection is established is the key used for authentication.
+Some users then make authorization decisions based on this key. This
+property is not documented, and may not be correct, due to the caching
+behavior of the package, resulting in users making incorrect
+authorization decisions about the connection.
+
+This change makes the cache a one entry FIFO cache, making the assumed
+property, that the last call to PublicKeyCallback represents the key
+actually used for authentication, actually hold.
+
+Thanks to Damien Tournoud, Patrick Dawkins, Vince Parker, and
+Jules Duvivier from the Platform.sh / Upsun engineering team
+for reporting this issue.
+
+Fixes golang/go#70779
+Fixes CVE-2024-45337
+
+Change-Id: Ife7c7b4045d8b6bcd7e3a417bdfae370c709797f
+Reviewed-on: https://go-review.googlesource.com/c/crypto/+/635315
+Reviewed-by: Roland Shoemaker
+Auto-Submit: Gopher Robot
+Reviewed-by: Damien Neil
+Reviewed-by: Nicola Murino
+LUCI-TryBot-Result: Go LUCI
+---
+ vendor/golang.org/x/crypto/ssh/server.go | 15 ++++++++++----
+
+diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
+index c0d1c29e6f..5b5ccd96f4 100644
+--- a/vendor/golang.org/x/crypto/ssh/server.go
++++ b/vendor/golang.org/x/crypto/ssh/server.go
+@@ -142,7 +142,7 @@ func (s *ServerConfig) AddHostKey(key Signer) {
+ }
+
+ // cachedPubKey contains the results of querying whether a public key is
+-// acceptable for a user.
++// acceptable for a user. This is a FIFO cache.
+ type cachedPubKey struct {
+ user string
+ pubKeyData []byte
+@@ -150,7 +150,13 @@ type cachedPubKey struct {
+ perms *Permissions
+ }
+
+-const maxCachedPubKeys = 16
++// maxCachedPubKeys is the number of cache entries we store.
++//
++// Due to consistent misuse of the PublicKeyCallback API, we have reduced this
++// to 1, such that the only key in the cache is the most recently seen one. This
++// forces the behavior that the last call to PublicKeyCallback will always be
++// with the key that is used for authentication.
++const maxCachedPubKeys = 1
+
+ // pubKeyCache caches tests for public keys. Since SSH clients
+ // will query whether a public key is acceptable before attempting to
+@@ -172,9 +178,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
+
+ // add adds the given tuple to the cache.
+ func (c *pubKeyCache) add(candidate cachedPubKey) {
+- if len(c.keys) < maxCachedPubKeys {
+- c.keys = append(c.keys, candidate)
++ if len(c.keys) >= maxCachedPubKeys {
++ c.keys = c.keys[1:]
+ }
++ c.keys = append(c.keys, candidate)
+ }
+
+ // ServerConn is an authenticated SSH connection, as seen from the
diff --git a/SPECS/cf-cli/cf-cli.spec b/SPECS/cf-cli/cf-cli.spec
index 10020f6b21e..81c451c70ae 100644
--- a/SPECS/cf-cli/cf-cli.spec
+++ b/SPECS/cf-cli/cf-cli.spec
@@ -5,7 +5,7 @@ Summary: The official command line client for Cloud Foundry.
Name: cf-cli
# Note: Upgrading the package also warrants an upgrade in the CF_BUILD_SHA
Version: 8.7.3
-Release: 3%{?dist}
+Release: 4%{?dist}
License: Apache-2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -33,6 +33,7 @@ Source0: https://github.com/cloudfoundry/cli/archive/refs/tags/v%{version
Source1: cli-%{version}-vendor.tar.gz
Patch0: CVE-2023-39325.patch
Patch1: CVE-2024-24786.patch
+Patch2: CVE-2024-45337.patch
BuildRequires: golang >= 1.18.3
%global debug_package %{nil}
@@ -46,6 +47,7 @@ The official command line client for Cloud Foundry.
tar --no-same-owner -xf %{SOURCE1}
%patch 0 -p1
%patch 1 -p1
+%patch 2 -p1
%build
export GOPATH=%{our_gopath}
@@ -67,6 +69,9 @@ install -p -m 755 -t %{buildroot}%{_bindir} ./out/cf
%{_bindir}/cf
%changelog
+* Fri Dec 20 2024 Aurelien Bombo - 8.7.3-4
+- Add patch for CVE-2024-45337
+
* Mon Nov 25 2024 Bala - 8.7.3-3
- Fix CVE-2024-24786
diff --git a/SPECS/containerd2/containerd.service b/SPECS/containerd2/containerd.service
new file mode 100644
index 00000000000..06b501178b9
--- /dev/null
+++ b/SPECS/containerd2/containerd.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=containerd container runtime
+Documentation=https://containerd.io
+After=network.target
+
+[Service]
+ExecStartPre=/sbin/modprobe overlay
+ExecStart=/usr/bin/containerd
+Restart=always
+Delegate=yes
+KillMode=process
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/SPECS/containerd2/containerd.toml b/SPECS/containerd2/containerd.toml
new file mode 100644
index 00000000000..422716a3c33
--- /dev/null
+++ b/SPECS/containerd2/containerd.toml
@@ -0,0 +1,9 @@
+version = 2
+[plugins]
+ [plugins."io.containerd.grpc.v1.cri"]
+ [plugins."io.containerd.grpc.v1.cri".containerd]
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
+ runtime_type = "io.containerd.runc.v2"
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
+ SystemdCgroup = true
\ No newline at end of file
diff --git a/SPECS/containerd2/containerd2.signatures.json b/SPECS/containerd2/containerd2.signatures.json
new file mode 100644
index 00000000000..d49f7f913eb
--- /dev/null
+++ b/SPECS/containerd2/containerd2.signatures.json
@@ -0,0 +1,7 @@
+{
+ "Signatures": {
+ "containerd.service": "a07bfcf412669b06673190b0779f48e652c9adcf1758289e849a00802804eec8",
+ "containerd.toml": "5b3821236f09b4c858e0e098bbe1400f4dbbb47d360e39d21c61858b088c2896",
+ "containerd-2.0.0.tar.gz": "346d644e1b96e1f4a39bfe9d1eb0eb01ca676f806c12d95e5dbe35325bbc1780"
+ }
+}
\ No newline at end of file
diff --git a/SPECS/containerd2/containerd2.spec b/SPECS/containerd2/containerd2.spec
new file mode 100644
index 00000000000..79013cc94f4
--- /dev/null
+++ b/SPECS/containerd2/containerd2.spec
@@ -0,0 +1,85 @@
+%global debug_package %{nil}
+%define upstream_name containerd
+%define commit_hash 207ad711eabd375a01713109a8a197d197ff6542
+
+Summary: Industry-standard container runtime
+Name: %{upstream_name}2
+Version: 2.0.0
+Release: 1%{?dist}
+License: ASL 2.0
+Group: Tools/Container
+URL: https://www.containerd.io
+Vendor: Microsoft Corporation
+Distribution: Azure Linux
+
+Source0: https://github.com/containerd/containerd/archive/v%{version}.tar.gz#/%{upstream_name}-%{version}.tar.gz
+Source1: containerd.service
+Source2: containerd.toml
+
+%{?systemd_requires}
+
+BuildRequires: golang
+BuildRequires: go-md2man
+BuildRequires: make
+BuildRequires: systemd-rpm-macros
+
+Requires: runc >= 1.2.2
+
+%description
+containerd is an industry-standard container runtime with an emphasis on
+simplicity, robustness and portability. It is available as a daemon for Linux
+and Windows, which can manage the complete container lifecycle of its host
+system: image transfer and storage, container execution and supervision,
+low-level storage and network attachments, etc.
+
+containerd is designed to be embedded into a larger system, rather than being
+used directly by developers or end-users.
+
+%prep
+%autosetup -p1 -n %{upstream_name}-%{version}
+
+%build
+export BUILDTAGS="-mod=vendor"
+make VERSION="%{version}" REVISION="%{commit_hash}" binaries man
+
+%check
+export BUILDTAGS="-mod=vendor"
+make VERSION="%{version}" REVISION="%{commit_hash}" test
+
+%install
+make VERSION="%{version}" REVISION="%{commit_hash}" DESTDIR="%{buildroot}" PREFIX="/usr" install install-man
+
+mkdir -p %{buildroot}/%{_unitdir}
+install -D -p -m 0644 %{SOURCE1} %{buildroot}%{_unitdir}/containerd.service
+install -D -p -m 0644 %{SOURCE2} %{buildroot}%{_sysconfdir}/containerd/config.toml
+install -vdm 755 %{buildroot}/opt/containerd/{bin,lib}
+
+%post
+%systemd_post containerd.service
+
+if [ $1 -eq 1 ]; then # Package install
+ systemctl enable containerd.service > /dev/null 2>&1 || :
+ systemctl start containerd.service > /dev/null 2>&1 || :
+fi
+
+%preun
+%systemd_preun containerd.service
+
+%postun
+%systemd_postun_with_restart containerd.service
+
+%files
+%license LICENSE NOTICE
+%{_bindir}/*
+%{_mandir}/*
+%config(noreplace) %{_unitdir}/containerd.service
+%config(noreplace) %{_sysconfdir}/containerd/config.toml
+%dir /opt/containerd
+%dir /opt/containerd/bin
+%dir /opt/containerd/lib
+
+%changelog
+* Wed Dec 11 2024 Nan Liu - 2.0.0-1
+- Created a standalone package for containerd 2.0.0
+- Initial CBL-Mariner import from Azure
+- Initial version and License verified
\ No newline at end of file
diff --git a/SPECS/docker-buildx/CVE-2024-45337.patch b/SPECS/docker-buildx/CVE-2024-45337.patch
new file mode 100644
index 00000000000..0c8df5f2421
--- /dev/null
+++ b/SPECS/docker-buildx/CVE-2024-45337.patch
@@ -0,0 +1,77 @@
+https://github.com/golang/crypto/commit/b4f1988a35dee11ec3e05d6bf3e90b695fbd8909.patch
+
+From b4f1988a35dee11ec3e05d6bf3e90b695fbd8909 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker
+Date: Tue, 3 Dec 2024 09:03:03 -0800
+Subject: [PATCH] ssh: make the public key cache a 1-entry FIFO cache
+
+Users of the the ssh package seem to extremely commonly misuse the
+PublicKeyCallback API, assuming that the key passed in the last call
+before a connection is established is the key used for authentication.
+Some users then make authorization decisions based on this key. This
+property is not documented, and may not be correct, due to the caching
+behavior of the package, resulting in users making incorrect
+authorization decisions about the connection.
+
+This change makes the cache a one entry FIFO cache, making the assumed
+property, that the last call to PublicKeyCallback represents the key
+actually used for authentication, actually hold.
+
+Thanks to Damien Tournoud, Patrick Dawkins, Vince Parker, and
+Jules Duvivier from the Platform.sh / Upsun engineering team
+for reporting this issue.
+
+Fixes golang/go#70779
+Fixes CVE-2024-45337
+
+Change-Id: Ife7c7b4045d8b6bcd7e3a417bdfae370c709797f
+Reviewed-on: https://go-review.googlesource.com/c/crypto/+/635315
+Reviewed-by: Roland Shoemaker
+Auto-Submit: Gopher Robot
+Reviewed-by: Damien Neil
+Reviewed-by: Nicola Murino
+LUCI-TryBot-Result: Go LUCI
+---
+ vendor/golang.org/x/crypto/ssh/server.go | 15 ++++++++++----
+
+diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
+index c0d1c29e6f..5b5ccd96f4 100644
+--- a/vendor/golang.org/x/crypto/ssh/server.go
++++ b/vendor/golang.org/x/crypto/ssh/server.go
+@@ -149,7 +149,7 @@ func (s *ServerConfig) AddHostKey(key Signer) {
+ }
+
+ // cachedPubKey contains the results of querying whether a public key is
+-// acceptable for a user.
++// acceptable for a user. This is a FIFO cache.
+ type cachedPubKey struct {
+ user string
+ pubKeyData []byte
+@@ -157,7 +157,13 @@ type cachedPubKey struct {
+ perms *Permissions
+ }
+
+-const maxCachedPubKeys = 16
++// maxCachedPubKeys is the number of cache entries we store.
++//
++// Due to consistent misuse of the PublicKeyCallback API, we have reduced this
++// to 1, such that the only key in the cache is the most recently seen one. This
++// forces the behavior that the last call to PublicKeyCallback will always be
++// with the key that is used for authentication.
++const maxCachedPubKeys = 1
+
+ // pubKeyCache caches tests for public keys. Since SSH clients
+ // will query whether a public key is acceptable before attempting to
+@@ -179,9 +185,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
+
+ // add adds the given tuple to the cache.
+ func (c *pubKeyCache) add(candidate cachedPubKey) {
+- if len(c.keys) < maxCachedPubKeys {
+- c.keys = append(c.keys, candidate)
++ if len(c.keys) >= maxCachedPubKeys {
++ c.keys = c.keys[1:]
+ }
++ c.keys = append(c.keys, candidate)
+ }
+
+ // ServerConn is an authenticated SSH connection, as seen from the
diff --git a/SPECS/docker-buildx/docker-buildx.spec b/SPECS/docker-buildx/docker-buildx.spec
index ce095179b50..104ef7d328e 100644
--- a/SPECS/docker-buildx/docker-buildx.spec
+++ b/SPECS/docker-buildx/docker-buildx.spec
@@ -4,13 +4,14 @@ Summary: A Docker CLI plugin for extended build capabilities with BuildKi
Name: docker-buildx
# update "commit_hash" above when upgrading version
Version: 0.14.0
-Release: 1%{?dist}
+Release: 2%{?dist}
License: ASL 2.0
Group: Tools/Container
Vendor: Microsoft Corporation
Distribution: Azure Linux
URL: https://www.github.com/docker/buildx
Source0: https://github.com/docker/buildx/archive/refs/tags/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
+Patch0: CVE-2024-45337.patch
BuildRequires: bash
BuildRequires: golang
@@ -44,6 +45,9 @@ install -m 755 buildx "%{buildroot}%{_libexecdir}/docker/cli-plugins/docker-buil
%{_libexecdir}/docker/cli-plugins/docker-buildx
%changelog
+* Fri Dec 20 2024 Aurelien Bombo - 0.14.0-2
+- Add patch for CVE-2024-45337
+
* Thu May 02 2024 CBL-Mariner Servicing Account - 0.14.0-1
- Auto-upgrade to 0.14.0 - address CVE-2024-23653
diff --git a/SPECS/docker-cli/CVE-2024-36623.patch b/SPECS/docker-cli/CVE-2024-36623.patch
new file mode 100644
index 00000000000..a1722aa6a0e
--- /dev/null
+++ b/SPECS/docker-cli/CVE-2024-36623.patch
@@ -0,0 +1,45 @@
+From 5689dabfb357b673abdb4391eef426f297d7d1bb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pawe=C5=82=20Gronowski?=
+Date: Thu, 22 Feb 2024 18:01:40 +0100
+Subject: [PATCH] pkg/streamformatter: Make `progressOutput` concurrency safe
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Sync access to the underlying `io.Writer` with a mutex.
+
+Signed-off-by: Paweł Gronowski
+---
+ vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go
+index b0456e580dc9d..098df6b5236b9 100644
+--- a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go
++++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go
+@@ -5,6 +5,7 @@ import (
+ "encoding/json"
+ "fmt"
+ "io"
++ "sync"
+
+ "github.com/docker/docker/pkg/jsonmessage"
+ "github.com/docker/docker/pkg/progress"
+@@ -109,6 +110,7 @@ type progressOutput struct {
+ sf formatProgress
+ out io.Writer
+ newLines bool
++ mu sync.Mutex
+ }
+
+ // WriteProgress formats progress information from a ProgressReader.
+@@ -120,6 +122,9 @@ func (out *progressOutput) WriteProgress(prog progress.Progress) error {
+ jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units}
+ formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux)
+ }
++
++ out.mu.Lock()
++ defer out.mu.Unlock()
+ _, err := out.out.Write(formatted)
+ if err != nil {
+ return err
diff --git a/SPECS/docker-cli/docker-cli.spec b/SPECS/docker-cli/docker-cli.spec
index c08b378b243..5fd8c1d553d 100644
--- a/SPECS/docker-cli/docker-cli.spec
+++ b/SPECS/docker-cli/docker-cli.spec
@@ -3,7 +3,7 @@
Summary: The open-source application container engine client.
Name: docker-cli
Version: 25.0.3
-Release: 2%{?dist}
+Release: 3%{?dist}
License: ASL 2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -13,6 +13,7 @@ Source0: https://github.com/docker/cli/archive/v%{version}.tar.gz#/%{name
Source1: %{name}-%{version}-govendor-v1.tar.gz
Patch0: disable_manpage_vendor.patch
Patch1: CVE-2024-24786.patch
+Patch2: CVE-2024-36623.patch
BuildRequires: git
BuildRequires: go-md2man
BuildRequires: golang
@@ -81,6 +82,9 @@ install -p -m 644 contrib/completion/fish/docker.fish %{buildroot}%{_datadir}/fi
%{_datadir}/fish/vendor_completions.d/docker.fish
%changelog
+* Tue Dec 10 2024 Sudipta Pandit - 25.0.3-3
+- Fix CVE-2024-36623 with patch
+
* Mon Nov 25 2024 Bala - 25.0.3-2
- Fix CVE-2024-24786
diff --git a/SPECS/dpdk/dpdk.signatures.json b/SPECS/dpdk/dpdk.signatures.json
index 94737603526..e9294c77ed3 100644
--- a/SPECS/dpdk/dpdk.signatures.json
+++ b/SPECS/dpdk/dpdk.signatures.json
@@ -1,5 +1,5 @@
{
"Signatures": {
- "dpdk-23.11.tar.xz": "64fa58fdfc9e9510e8e414e3bedd165bd3d4ca465a231b280323f83cd53fd865"
+ "dpdk-23.11.3.tar.xz": "8e51836bc6524e0512f89eeca27532021fe0ccc7f764cf238ee283e7f416c185"
}
}
diff --git a/SPECS/dpdk/dpdk.spec b/SPECS/dpdk/dpdk.spec
index f4b67cc3564..e6980d1f545 100644
--- a/SPECS/dpdk/dpdk.spec
+++ b/SPECS/dpdk/dpdk.spec
@@ -29,8 +29,8 @@
%bcond_without tools
Summary: Set of libraries and drivers for fast packet processing
Name: dpdk
-Version: 23.11
-Release: 2%{?dist}
+Version: 23.11.3
+Release: 1%{?dist}
License: BSD AND LGPLv2 AND GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -106,7 +106,7 @@ Vendor: Microsoft Corporation
Distribution: Azure Linux
%prep
-%autosetup -p1 -n dpdk-%{version}
+%autosetup -p1 -n dpdk-stable-%{version}
%build
CFLAGS="$(echo %{optflags} -fcommon)" \
@@ -179,6 +179,9 @@ CFLAGS="$(echo %{optflags} -fcommon)" \
%endif
%changelog
+* Fri Dec 20 2024 Jon Slobodzian - 23.11.3-1
+- Updgrade to 23.11.3 to resolve CVE-2024-11614.
+
* Thu Feb 22 2024 Pawel Winogrodzki - 23.11-2
- Updating naming for 3.0 version of Azure Linux.
diff --git a/SPECS/dracut/0016-Handle-SELinux-configuration-for-overlayfs-folders.patch b/SPECS/dracut/0016-Handle-SELinux-configuration-for-overlayfs-folders.patch
new file mode 100644
index 00000000000..082bb7968b7
--- /dev/null
+++ b/SPECS/dracut/0016-Handle-SELinux-configuration-for-overlayfs-folders.patch
@@ -0,0 +1,38 @@
+From 6fe401628d5671cf2f7683e4e0a04b9dcefb0180 Mon Sep 17 00:00:00 2001
+From: George Mileka
+Date: Mon, 25 Nov 2024 16:27:41 -0800
+Subject: [PATCH] Handle SELinux configuration for overlayfs folders.
+
+When SELinux is enabled, the root folder '/' is expected to be marked
+'root_t' - otherwise, SELinux will deny access calls.
+
+When an overlay is created for the root folder, it is created using
+the lower and upper folders. The upper folder is created on tmpfs,
+and subsequently getted the 'tmpfs_t' assigned by inheritence.
+When the root overlay is created, it inherits the 'tmpfs_t'.
+
+Now, we have a conflict between what SELinux expects ('root_t') and
+what we ended-up with ('tmpfs_t') leading to denials and failing to
+complete the boot successfully.
+
+This patch injects a script ('azl-configure-sellinux.sh') that will
+run after Dracut's SELinux module runs, and assigns the right SELinux
+types (i.e. 'root_t') to the overlay folders. That way, when pivoting
+takes place, the visible root folder has the right SELinux labeling.
+---
+ modules.d/90overlayfs/module-setup.sh | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/modules.d/90overlayfs/module-setup.sh b/modules.d/90overlayfs/module-setup.sh
+index dae20093..919df4f5 100755
+--- a/modules.d/90overlayfs/module-setup.sh
++++ b/modules.d/90overlayfs/module-setup.sh
+@@ -17,4 +17,5 @@ install() {
+ inst_hook pre-mount 01 "$moddir/prepare-overlayfs.sh"
+ inst_hook mount 01 "$moddir/mount-overlayfs.sh" # overlay on top of block device
+ inst_hook pre-pivot 10 "$moddir/mount-overlayfs.sh" # overlay on top of network device (e.g. nfs)
++ inst_hook pre-pivot 70 "$moddir/azl-configure-selinux.sh"
+ }
+--
+2.34.1
+
diff --git a/SPECS/dracut/90overlayfs/azl-configure-selinux.sh b/SPECS/dracut/90overlayfs/azl-configure-selinux.sh
new file mode 100644
index 00000000000..3670b00ed69
--- /dev/null
+++ b/SPECS/dracut/90overlayfs/azl-configure-selinux.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+type getarg > /dev/null 2>&1 || . /lib/dracut-lib.sh
+
+# If SELinux is disabled exit now
+getarg "selinux=0" > /dev/null && return 0
+
+SELINUX="enforcing"
+# shellcheck disable=SC1090
+[ -e "$NEWROOT/etc/selinux/config" ] && . "$NEWROOT/etc/selinux/config"
+[ "$SELINUX" == "disabled" ] && return 0
+
+getargbool 0 rd.live.overlay.overlayfs && overlayfs="yes"
+
+if [ -n "$overlayfs" ]; then
+
+ # Get the current root folder context
+ rootDirContext=$($NEWROOT/usr/sbin/matchpathcon -f $NEWROOT/etc/selinux/targeted/contexts/files/file_contexts -m dir /)
+
+ # Parse the context to extract the root folder '/' context type.
+ # The line should be on the form: "/ system_u:object_r:root_t:s0"
+
+ # Split folder and context
+ IFS='\t' read -r _ selinuxContext <<< "$rootDirContext"
+ echo "root folder context: ($selinuxContext)"
+
+ # Split context and extract its type
+ IFS=':' read -r _ _ contextType _ <<< "$selinuxContext"
+ echo "root folder label : ($contextType)"
+
+ # Set the type on the target folders
+ [ -e /sysroot ] && chcon -t $contextType /sysroot
+ [ -e /run/overlayfs ] && chcon -t $contextType /run/overlayfs
+ [ -e /run/ovlwork ] && chcon -t $contextType /run/ovlwork
+fi
diff --git a/SPECS/dracut/dracut.signatures.json b/SPECS/dracut/dracut.signatures.json
index adc514e9058..65ce41c8701 100644
--- a/SPECS/dracut/dracut.signatures.json
+++ b/SPECS/dracut/dracut.signatures.json
@@ -6,6 +6,7 @@
"00-vrf.conf": "e2885a4b090d8ca3771e60ce6dcd8b849e28ce5002a5c7b71ff796a92deb2810",
"00-xen.conf": "8b7a89b7716cb40a9c0d681caed6994d81ff4dfad4fe50cea15cd47b885dc5a6",
"50-noxattr.conf": "61d95f05890ac6ee3355d0a386dd5645d82b7a4202d90305d997fd18c6d139dd",
+ "azl-configure-selinux.sh": "5f526509910fccdc2dffad4ef5070740847195510e3faefff39b831c9d28a439",
"azl-liveos-artifacts-download.service": "888be8c82297cccd510d7f963611c2360ae67559826b2b474da6d9935237de64",
"azl-liveos-artifacts-download.sh": "f21dc68de8c81d8a8128e7a9d7be45d25978f0b5e47a4cf1a2d97b1e171ec045",
"dracut-102.tar.gz": "601b175cbf4d2ee902bb7bda3af8826ae2ca060c1af880f6da5a833413f4ec70",
diff --git a/SPECS/dracut/dracut.spec b/SPECS/dracut/dracut.spec
index ade311b548c..f6a3b8568bd 100644
--- a/SPECS/dracut/dracut.spec
+++ b/SPECS/dracut/dracut.spec
@@ -4,7 +4,7 @@
Summary: dracut to create initramfs
Name: dracut
Version: 102
-Release: 7%{?dist}
+Release: 8%{?dist}
# The entire source code is GPLv2+
# except install/* which is LGPLv2+
License: GPLv2+ AND LGPLv2+
@@ -30,6 +30,7 @@ Source11: 50-noxattr.conf
# code reviews given that they are new to Dracut.
Source12: 90livenet/azl-liveos-artifacts-download.service
Source13: 90livenet/azl-liveos-artifacts-download.sh
+Source14: 90overlayfs/azl-configure-selinux.sh
# allow-liveos-overlay-no-user-confirmation-prompt.patch has been introduced by
# the Azure Linux team to allow skipping the user confirmation prompt during
@@ -53,6 +54,7 @@ Patch: 0012-fix-dracut-functions-avoid-awk-in-get_maj_min.patch
Patch: 0013-revert-fix-crypt-unlock-encrypted-devices-by-default.patch
Patch: 0014-fix-systemd-pcrphase-in-hostonly-mode-do-not-try-to-include-systemd-pcrphase.patch
Patch: 0015-fix-systemd-pcrphase-make-tpm2-tss-an-optional-dependency.patch
+Patch: 0016-Handle-SELinux-configuration-for-overlayfs-folders.patch
BuildRequires: bash
BuildRequires: kmod-devel
@@ -205,6 +207,8 @@ install -m 0644 %{SOURCE11} %{buildroot}%{_sysconfdir}/dracut.conf.d/50-noxattr.
install -m 0644 %{SOURCE12} %{buildroot}%{dracutlibdir}/modules.d/90livenet/azl-liveos-artifacts-download.service
install -m 0755 %{SOURCE13} %{buildroot}%{dracutlibdir}/modules.d/90livenet/azl-liveos-artifacts-download.sh
+install -m 0755 %{SOURCE14} %{buildroot}%{dracutlibdir}/modules.d/90overlayfs/azl-configure-selinux.sh
+
mkdir -p %{buildroot}%{dracutlibdir}/modules.d/20overlayfs/
install -p -m 0755 %{SOURCE4} %{buildroot}%{dracutlibdir}/modules.d/20overlayfs/
install -p -m 0755 %{SOURCE5} %{buildroot}%{dracutlibdir}/modules.d/20overlayfs/
@@ -310,6 +314,9 @@ ln -srv %{buildroot}%{_bindir}/%{name} %{buildroot}%{_sbindir}/%{name}
%dir %{_sharedstatedir}/%{name}/overlay
%changelog
+* Mon Dec 09 2024 George Mileka - 102-8
+- Augment overlayfs with selinux handling.
+
* Thu Oct 31 2024 George Mileka - 102-7
- Augment livenet module with a download daemon.
diff --git a/SPECS/erlang/OTP-24-Code.patch b/SPECS/erlang/OTP-24-Code.patch
deleted file mode 100644
index aff1a242cb2..00000000000
--- a/SPECS/erlang/OTP-24-Code.patch
+++ /dev/null
@@ -1,237 +0,0 @@
-From 8508cef07d5d559e48d51d159a30f64897c3903f Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?=
-Date: Mon, 11 Mar 2024 07:41:23 +0100
-Subject: [PATCH 1/2] AArch64: Fix incorrect result for running code compiled
- by OTP 24
-
-On AArch64, when constructing a binary using the bit syntax, using a
-unit greater than 16 and not being a power of two would, would cause
-erroneous results. For example, given this module compiled with
-Erlang/OTP 24:
-
- -module(t).
- -export([bar/2]).
-
- bar(Contents, Size) ->
- <>.
-
-only the first two bytes would be correctly set:
-
- 1> t:bar(-1, 1).
- <<255,255,0,0:7>>
- 2> t:bar(-1, 1).
- <<255,255,149,35:7>>
- 3> t:bar(-1, 1).
- <<255,255,0,0:7>>
- 4> t:bar(-1, 1).
- <<255,255,3,0:7>>
----
- erts/emulator/beam/jit/arm/instr_bs.cpp | 9 ++-------
- erts/emulator/test/Makefile | 1 +
- erts/emulator/test/bs_construct_SUITE.erl | 10 ++++++++++
- 3 files changed, 13 insertions(+), 7 deletions(-)
-
-diff --git a/erts/emulator/beam/jit/arm/instr_bs.cpp b/erts/emulator/beam/jit/arm/instr_bs.cpp
-index 2eb2d4201cf1..79349a095d91 100644
---- a/erts/emulator/beam/jit/arm/instr_bs.cpp
-+++ b/erts/emulator/beam/jit/arm/instr_bs.cpp
-@@ -99,13 +99,8 @@ int BeamModuleAssembler::emit_bs_get_field_size(const ArgSource &Size,
- a.lsl(out, out, imm(trailing_bits - _TAG_IMMED1_SIZE));
- }
- } else {
-- if (unit >= (1 << _TAG_IMMED1_SIZE)) {
-- mov_imm(TMP1, unit >> _TAG_IMMED1_SIZE);
-- } else {
-- a.lsr(out, out, imm(_TAG_IMMED1_SIZE));
-- mov_imm(TMP1, unit);
-- }
--
-+ a.lsr(out, out, imm(_TAG_IMMED1_SIZE));
-+ mov_imm(TMP1, unit);
- a.mul(out, out, TMP1);
- }
-
-diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
-index c058fe72d266..f87aabead35d 100644
---- a/erts/emulator/test/Makefile
-+++ b/erts/emulator/test/Makefile
-@@ -152,6 +152,7 @@ NO_OPT= bs_bincomp \
- map
-
- R24= \
-+ bs_construct \
- process_max_heap_size
-
- R25= \
-diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl
-index adf893d9993a..3bbe53e5ab56 100644
---- a/erts/emulator/test/bs_construct_SUITE.erl
-+++ b/erts/emulator/test/bs_construct_SUITE.erl
-@@ -1389,6 +1389,16 @@ do_zero_init_1(Size, LPad, RPad) ->
- end()).
-
- error_info(_Config) ->
-+ case ?MODULE of
-+ bs_construct_r24_SUITE ->
-+ %% Error information is not implemented for old bit syntax
-+ %% instructions.
-+ ok;
-+ _ ->
-+ error_info()
-+ end.
-+
-+error_info() ->
- Atom = id(some_atom),
- NegSize = id(-1),
- HugeNegSize = id(-1 bsl 64),
-
-From 3ab2cc66a5c5abb049aec20043b45e428ece6d9a Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?=
-Date: Mon, 11 Mar 2024 10:19:55 +0100
-Subject: [PATCH 2/2] AArch64: Fix crash for running bit syntax code compiled
- by OTP 24
-
-This bug was introduced in 67c52b69250ebf.
-
-Closes #8238
----
- erts/emulator/beam/jit/arm/instr_bs.cpp | 2 +-
- erts/emulator/test/bs_construct_SUITE.erl | 22 +++++++-
- .../otp_24_code_gh_8238.S | 50 +++++++++++++++++++
- .../otp_24_code_gh_8238.erl | 10 ++++
- 4 files changed, 81 insertions(+), 3 deletions(-)
- create mode 100644 erts/emulator/test/bs_construct_SUITE_data/otp_24_code_gh_8238.S
- create mode 100644 erts/emulator/test/bs_construct_SUITE_data/otp_24_code_gh_8238.erl
-
-diff --git a/erts/emulator/beam/jit/arm/instr_bs.cpp b/erts/emulator/beam/jit/arm/instr_bs.cpp
-index 79349a095d91..09db80c2048d 100644
---- a/erts/emulator/beam/jit/arm/instr_bs.cpp
-+++ b/erts/emulator/beam/jit/arm/instr_bs.cpp
-@@ -148,7 +148,7 @@ void BeamModuleAssembler::emit_i_bs_init_fail_heap(const ArgSource &Size,
- }
-
- if (emit_bs_get_field_size(Size, 1, fail, ARG4) >= 0) {
-- a.lsr(ARG4, ARG4, imm(3));
-+ a.lsl(ARG4, ARG4, imm(3));
- mov_arg(ARG5, Heap);
- mov_arg(ARG6, Live);
- fragment_call(ga->get_bs_init_bits_shared());
-diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl
-index 3bbe53e5ab56..66d538e1a271 100644
---- a/erts/emulator/test/bs_construct_SUITE.erl
-+++ b/erts/emulator/test/bs_construct_SUITE.erl
-@@ -30,7 +30,9 @@
- otp_7422/1, zero_width/1, bad_append/1, bs_append_overflow/1,
- bs_append_offheap/1,
- reductions/1, fp16/1, zero_init/1, error_info/1, little/1,
-- heap_binary_unit/1]).
-+ heap_binary_unit/1,
-+ otp_24_code_gh_8238/1
-+ ]).
-
- -include_lib("common_test/include/ct.hrl").
-
-@@ -45,7 +47,8 @@ all() ->
- copy_writable_binary, kostis, dynamic, bs_add, otp_7422, zero_width,
- bad_append, bs_append_overflow, bs_append_offheap,
- reductions, fp16, zero_init,
-- error_info, little, heap_binary_unit].
-+ error_info, little, heap_binary_unit,
-+ otp_24_code_gh_8238].
-
- init_per_suite(Config) ->
- Config.
-@@ -1706,6 +1709,21 @@ heap_binary_unit_2(Variant, Rest) ->
- {error2, Bin2}
- end.
-
-+otp_24_code_gh_8238(Config) ->
-+ case ?MODULE of
-+ bs_construct_SUITE ->
-+ %% GH-8238. Code compiled with Erlang/OTP 24 would crash
-+ %% when run on OTP-26.2.3.
-+ DataDir = proplists:get_value(data_dir, Config),
-+ Asm = filename:join(DataDir, atom_to_list(?FUNCTION_NAME) ++ ".S"),
-+ {ok,Mod,Beam} = compile:file(Asm, [binary,from_asm,report]),
-+ {module,Mod} = code:load_binary(Mod, "", Beam),
-+ Mod:Mod(),
-+ ok;
-+ _ ->
-+ {skip,"Enough to run once"}
-+ end.
-+
- %%%
- %%% Common utilities.
- %%%
-diff --git a/erts/emulator/test/bs_construct_SUITE_data/otp_24_code_gh_8238.S b/erts/emulator/test/bs_construct_SUITE_data/otp_24_code_gh_8238.S
-new file mode 100644
-index 000000000000..7944fa818a69
---- /dev/null
-+++ b/erts/emulator/test/bs_construct_SUITE_data/otp_24_code_gh_8238.S
-@@ -0,0 +1,50 @@
-+{module, otp_24_code_gh_8238}. %% version = 0
-+
-+{exports, [{module_info,0},{module_info,1},{otp_24_code_gh_8238,0}]}.
-+
-+{attributes, []}.
-+
-+{labels, 7}.
-+
-+
-+{function, otp_24_code_gh_8238, 0, 2}.
-+ {label,1}.
-+ {line,[{location,"otp_24_code_gh_8238.erl",4}]}.
-+ {func_info,{atom,otp_24_code_gh_8238},{atom,otp_24_code_gh_8238},0}.
-+ {label,2}.
-+ {allocate,0,0}.
-+ {move,{integer,1000},{x,0}}.
-+ {line,[{location,"otp_24_code_gh_8238.erl",5}]}.
-+ {call_ext,1,{extfunc,erlang,integer_to_binary,1}}.
-+ {line,[{location,"otp_24_code_gh_8238.erl",6}]}.
-+ {gc_bif,byte_size,{f,0},1,[{x,0}],{x,1}}.
-+ {bs_add,{f,0},[{x,1},{integer,9},1],{x,1}}.
-+ {bs_init2,{f,0},{x,1},2,2,{field_flags,[]},{x,1}}.
-+ {bs_put_integer,{f,0},
-+ {integer,72},
-+ 1,
-+ {field_flags,[unsigned,big]},
-+ {integer,1281499675772873685536}}.
-+ {bs_put_binary,{f,0},{atom,all},8,{field_flags,[unsigned,big]},{x,0}}.
-+ {put_list,{x,1},nil,{x,1}}.
-+ {move,{literal,"~p\n"},{x,0}}.
-+ {call_ext_last,2,{extfunc,io,format,2},0}.
-+
-+
-+{function, module_info, 0, 4}.
-+ {label,3}.
-+ {line,[]}.
-+ {func_info,{atom,otp_24_code_gh_8238},{atom,module_info},0}.
-+ {label,4}.
-+ {move,{atom,otp_24_code_gh_8238},{x,0}}.
-+ {call_ext_only,1,{extfunc,erlang,get_module_info,1}}.
-+
-+
-+{function, module_info, 1, 6}.
-+ {label,5}.
-+ {line,[]}.
-+ {func_info,{atom,otp_24_code_gh_8238},{atom,module_info},1}.
-+ {label,6}.
-+ {move,{x,0},{x,1}}.
-+ {move,{atom,otp_24_code_gh_8238},{x,0}}.
-+ {call_ext_only,2,{extfunc,erlang,get_module_info,2}}.
-diff --git a/erts/emulator/test/bs_construct_SUITE_data/otp_24_code_gh_8238.erl b/erts/emulator/test/bs_construct_SUITE_data/otp_24_code_gh_8238.erl
-new file mode 100644
-index 000000000000..d18a7c096d0a
---- /dev/null
-+++ b/erts/emulator/test/bs_construct_SUITE_data/otp_24_code_gh_8238.erl
-@@ -0,0 +1,10 @@
-+-module(otp_24_code_gh_8238).
-+-export([?MODULE/0]).
-+
-+%% Produce otp_24_code_gh_8238.S using Erlang/OTP 24 like this:
-+%% erlc -S +no_copt +no_ssa_opt otp_24_code_gh_8238.erl
-+
-+?MODULE() ->
-+ Bin = integer_to_binary(1000),
-+ io:format("~p\n", [<<"Example: ", Bin/binary>>]).
-+
diff --git a/SPECS/erlang/erlang.signatures.json b/SPECS/erlang/erlang.signatures.json
index 1f7c2a71f30..a4dc48acefb 100644
--- a/SPECS/erlang/erlang.signatures.json
+++ b/SPECS/erlang/erlang.signatures.json
@@ -1,5 +1,5 @@
{
- "Signatures": {
- "erlang-26.2.3.tar.gz": "7a79e7955890b06572dbb3c3460771a71f729c15bc6ced018916a432669fd239"
- }
-}
+ "Signatures": {
+ "erlang-26.2.5.6.tar.gz": "371e59b98de59822e45fdbe50c18c8d8dd4c872990e7aaaba8a819e167186d03"
+ }
+}
\ No newline at end of file
diff --git a/SPECS/erlang/erlang.spec b/SPECS/erlang/erlang.spec
index 3ed804756ca..c6694b9ad93 100644
--- a/SPECS/erlang/erlang.spec
+++ b/SPECS/erlang/erlang.spec
@@ -1,15 +1,14 @@
%define debug_package %{nil}
Summary: erlang
Name: erlang
-Version: 26.2.3
-Release: 2%{?dist}
+Version: 26.2.5.6
+Release: 1%{?dist}
License: Apache-2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
Group: Development/Languages
URL: https://erlang.org
-Source0: https://github.com/erlang/otp/archive/OTP-%{version}/otp-OTP-%{version}.tar.gz#/%{name}-%{version}.tar.gz
-Patch0: OTP-24-Code.patch
+Source0: https://github.com/erlang/otp/archive/OTP-%{version}/otp-OTP-%{version}.tar.gz#/%{name}-%{version}.tar.gz
BuildRequires: ncurses-devel
BuildRequires: openssl-devel
BuildRequires: unixODBC-devel
@@ -54,6 +53,9 @@ export ERL_TOP=`pwd`
%{_libdir}/erlang/*
%changelog
+* Fri Dec 13 2024 Ahmed Badawi - 26.2.5.6-1
+- Upgrade to 26.2.5.6 - fix cve CVE-2024-53846. Removed previous patch below as vulnerability is addressed in new version
+
* Mon Apr 01 2024 Sam Meluch - 26.2.3-2
- Add patch to fix issue when running with compiled code from OTP-24 on aarch64
@@ -91,4 +93,4 @@ export ERL_TOP=`pwd`
- Updated Version
* Mon Dec 12 2016 Priyesh Padmavilasom 19.1-1
-- Initial.
+- Initial.
\ No newline at end of file
diff --git a/SPECS/etcd/CVE-2024-24786.patch b/SPECS/etcd/CVE-2024-24786.patch
new file mode 100644
index 00000000000..7d4b7aaf768
--- /dev/null
+++ b/SPECS/etcd/CVE-2024-24786.patch
@@ -0,0 +1,40 @@
+From bb1e9bdc04af19078578d008af166030916eef18 Mon Sep 17 00:00:00 2001
+From: bhapathak
+Date: Tue, 3 Dec 2024 14:35:21 +0000
+Subject: [PATCH] Vendor patch applied
+
+---
+ .../protobuf/encoding/protojson/well_known_types.go | 3 +++
+ .../protobuf/internal/encoding/json/decode.go | 2 +-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+index 6c37d41..3a7d3e7 100644
+--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
++++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+@@ -348,6 +348,9 @@ func (d decoder) skipJSONValue() error {
+ }
+ }
+ }
++
++ case json.EOF:
++ return errors.New("unexpected EOF")
+ }
+ return nil
+ }
+diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+index d043a6e..d2b3ac0 100644
+--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
++++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+@@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) {
+
+ case ObjectClose:
+ if len(d.openStack) == 0 ||
+- d.lastToken.kind == comma ||
++ d.lastToken.kind&(Name|comma) != 0 ||
+ d.openStack[len(d.openStack)-1] != ObjectOpen {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+--
+2.39.4
+
diff --git a/SPECS/etcd/etcd.spec b/SPECS/etcd/etcd.spec
index d178862b712..43fc23987a6 100644
--- a/SPECS/etcd/etcd.spec
+++ b/SPECS/etcd/etcd.spec
@@ -3,7 +3,7 @@
Summary: A highly-available key value store for shared configuration
Name: etcd
Version: 3.5.12
-Release: 1%{?dist}
+Release: 2%{?dist}
License: ASL 2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -44,6 +44,7 @@ Source1: etcd.service
# --pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime \
# -cJf [tarball name] [folder to tar]
Source2: %{name}-%{version}-vendor.tar.gz
+Patch0: CVE-2024-24786.patch
BuildRequires: golang >= 1.16
%description
@@ -60,7 +61,7 @@ The etcd-tools package contains the etcd-dump-db and etcd-dump-logs diagnostic
tools.
%prep
-%autosetup -p1
+%autosetup -N -p1
tar --no-same-owner -xf %{SOURCE2}
%build
@@ -71,6 +72,7 @@ mkdir -p %{ETCD_OUT_DIR}
for component in server etcdctl etcdutl; do
pushd $component
tar --no-same-owner -xf %{_builddir}/%{name}-%{version}/vendor-$component.tar.gz
+ patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f --input %{PATCH0}
go build \
-o %{ETCD_OUT_DIR} \
-ldflags=-X=go.etcd.io/etcd/api/v3/version.GitSHA=v%{version}
@@ -145,6 +147,9 @@ install -vdm755 %{buildroot}%{_sharedstatedir}/etcd
/%{_docdir}/%{name}-%{version}-tools/*
%changelog
+* Tue Dec 03 2024 bhapathak - 3.5.12-2
+- Patch CVE-2024-24786
+
* Fri May 24 2024 CBL-Mariner Servicing Account - 3.5.12-1
- Auto-upgrade to 3.5.12 - none
diff --git a/SPECS/flannel/CVE-2024-24786.patch b/SPECS/flannel/CVE-2024-24786.patch
new file mode 100644
index 00000000000..54f2436a58c
--- /dev/null
+++ b/SPECS/flannel/CVE-2024-24786.patch
@@ -0,0 +1,40 @@
+From 4242a8a09f371163fb7f4ac2c5998d3e162f8923 Mon Sep 17 00:00:00 2001
+From: sthelkar
+Date: Fri, 6 Dec 2024 04:36:49 +0000
+Subject: [PATCH] Vendor patch applied
+
+---
+ .../protobuf/encoding/protojson/well_known_types.go | 4 ++++
+ .../protobuf/internal/encoding/json/decode.go | 2 +-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+index 6c37d41..70c2ba6 100644
+--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
++++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+@@ -348,6 +348,10 @@ func (d decoder) skipJSONValue() error {
+ }
+ }
+ }
++ case json.EOF:
++ // This can only happen if there's a bug in Decoder.Read.
++ // Avoid an infinite loop if this does happen.
++ return errors.New("unexpected EOF")
+ }
+ return nil
+ }
+diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+index d043a6e..d2b3ac0 100644
+--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
++++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+@@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) {
+
+ case ObjectClose:
+ if len(d.openStack) == 0 ||
+- d.lastToken.kind == comma ||
++ d.lastToken.kind&(Name|comma) != 0 ||
+ d.openStack[len(d.openStack)-1] != ObjectOpen {
+ return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+ }
+--
+2.39.4
diff --git a/SPECS/flannel/flannel.spec b/SPECS/flannel/flannel.spec
index 9d47ef9506b..d374de304a3 100644
--- a/SPECS/flannel/flannel.spec
+++ b/SPECS/flannel/flannel.spec
@@ -3,7 +3,7 @@
Summary: Simple and easy way to configure a layer 3 network fabric designed for Kubernetes
Name: flannel
Version: 0.24.2
-Release: 7%{?dist}
+Release: 8%{?dist}
License: ASL 2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -11,6 +11,7 @@ Group: System Environment/Libraries
URL: https://github.com/flannel-io/flannel
Source0: https://github.com/flannel-io/%{name}/archive/refs/tags/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
Source1: %{name}-%{version}-vendor.tar.gz
+Patch0: CVE-2024-24786.patch
BuildRequires: gcc
BuildRequires: glibc-devel
BuildRequires: glibc-static >= 2.38-8%{?dist}
@@ -21,12 +22,9 @@ BuildRequires: kernel-headers
Flannel is a simple and easy way to configure a layer 3 network fabric designed for Kubernetes.
%prep
-%autosetup -p1
+%autosetup -p1 -a 1
%build
-# create vendor folder from the vendor tarball and set vendor mode
-tar -xf %{SOURCE1} --no-same-owner
-
export GOPATH=%{our_gopath}
export TAG=v%{version}
%ifarch x86_64
@@ -50,6 +48,9 @@ install -p -m 755 -t %{buildroot}%{_bindir} ./dist/flanneld
%{_bindir}/flanneld
%changelog
+* Fri Dec 06 2024 sthelkar - 0.24.2-8
+- Patch CVE-2024-24786
+
* Mon Aug 26 2024 Rachel Menge - 0.24.2-7
- Update to build dep latest glibc-static version
diff --git a/SPECS/fluent-bit/CVE-2024-27532.patch b/SPECS/fluent-bit/CVE-2024-27532.patch
new file mode 100644
index 00000000000..83b9b8f1ae9
--- /dev/null
+++ b/SPECS/fluent-bit/CVE-2024-27532.patch
@@ -0,0 +1,42 @@
+From bd866ae9686ea914f57e83bd5b2e9c7a5a2a7323 Mon Sep 17 00:00:00 2001
+From: Sudipta Pandit
+Date: Thu, 14 Nov 2024 13:32:31 +0530
+Subject: [PATCH] Fix CVE-2024-27532
+
+Reference: https://github.com/bytecodealliance/wasm-micro-runtime/pull/3133
+
+---
+ .../core/iwasm/interpreter/wasm_loader.c | 2 +-
+ .../core/iwasm/interpreter/wasm_mini_loader.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/lib/wasm-micro-runtime-WAMR-1.3.0/core/iwasm/interpreter/wasm_loader.c b/lib/wasm-micro-runtime-WAMR-1.3.0/core/iwasm/interpreter/wasm_loader.c
+index 87af8526f..2254ba577 100644
+--- a/lib/wasm-micro-runtime-WAMR-1.3.0/core/iwasm/interpreter/wasm_loader.c
++++ b/lib/wasm-micro-runtime-WAMR-1.3.0/core/iwasm/interpreter/wasm_loader.c
+@@ -6694,7 +6694,7 @@ wasm_loader_check_br(WASMLoaderContext *loader_ctx, uint32 depth,
+ uint16 cell_num;
+
+ bh_assert(loader_ctx->csp_num > 0);
+- if (loader_ctx->csp_num < depth + 1) {
++ if (loader_ctx->csp_num - 1 < depth) {
+ set_error_buf(error_buf, error_buf_size,
+ "unknown label, "
+ "unexpected end of section or function");
+diff --git a/lib/wasm-micro-runtime-WAMR-1.3.0/core/iwasm/interpreter/wasm_mini_loader.c b/lib/wasm-micro-runtime-WAMR-1.3.0/core/iwasm/interpreter/wasm_mini_loader.c
+index 157a82cc3..ee01db71d 100644
+--- a/lib/wasm-micro-runtime-WAMR-1.3.0/core/iwasm/interpreter/wasm_mini_loader.c
++++ b/lib/wasm-micro-runtime-WAMR-1.3.0/core/iwasm/interpreter/wasm_mini_loader.c
+@@ -5199,7 +5199,8 @@ wasm_loader_check_br(WASMLoaderContext *loader_ctx, uint32 depth,
+ int32 i, available_stack_cell;
+ uint16 cell_num;
+
+- if (loader_ctx->csp_num < depth + 1) {
++ bh_assert(loader_ctx->csp_num > 0);
++ if (loader_ctx->csp_num - 1 < depth) {
+ set_error_buf(error_buf, error_buf_size,
+ "unknown label, "
+ "unexpected end of section or function");
+--
+2.34.1
+
diff --git a/SPECS/fluent-bit/fluent-bit.spec b/SPECS/fluent-bit/fluent-bit.spec
index fbd0c432ea3..c41de3412bc 100644
--- a/SPECS/fluent-bit/fluent-bit.spec
+++ b/SPECS/fluent-bit/fluent-bit.spec
@@ -1,7 +1,7 @@
Summary: Fast and Lightweight Log processor and forwarder for Linux, BSD and OSX
Name: fluent-bit
Version: 3.1.9
-Release: 1%{?dist}
+Release: 2%{?dist}
License: Apache-2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -9,6 +9,7 @@ URL: https://fluentbit.io
Source0: https://github.com/fluent/%{name}/archive/refs/tags/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
Patch0: CVE-2024-34250.patch
Patch1: CVE-2024-25431.patch
+Patch2: CVE-2024-27532.patch
BuildRequires: bison
BuildRequires: cmake
BuildRequires: cyrus-sasl-devel
@@ -83,6 +84,9 @@ Development files for %{name}
%{_libdir}/fluent-bit/*.so
%changelog
+* Tue Dec 10 2024 Sudipta Pandit - 3.1.9-2
+- Backport fixes for CVE-2024-27532
+
* Tue Nov 23 2024 Paul Meyer - 3.1.9-1
- Update to 3.1.9 to enable Lua filter plugin using system luajit library.
- Remove patches for CVE-2024-25629 and CVE-2024-28182 as they are fixed in 3.1.9.
diff --git a/SPECS/gh/0001-Fix-false-negative-in-TestMigrationWriteErrors-when-.patch b/SPECS/gh/0001-Fix-false-negative-in-TestMigrationWriteErrors-when-.patch
new file mode 100644
index 00000000000..a3dc44dc3ee
--- /dev/null
+++ b/SPECS/gh/0001-Fix-false-negative-in-TestMigrationWriteErrors-when-.patch
@@ -0,0 +1,88 @@
+From 82441ca6f9736bc542e699c8cbf46f80542ed618 Mon Sep 17 00:00:00 2001
+From: Vince Perri <5596945+vinceaperri@users.noreply.github.com>
+Date: Tue, 19 Nov 2024 23:29:08 +0000
+Subject: [PATCH] Fix false-negative in TestMigrationWriteErrors when root
+
+---
+ internal/config/migrate_test.go | 44 ++++++++++++++++++++++++++++++++-
+ 1 file changed, 43 insertions(+), 1 deletion(-)
+
+diff --git a/internal/config/migrate_test.go b/internal/config/migrate_test.go
+index 783f605..7557149 100644
+--- a/internal/config/migrate_test.go
++++ b/internal/config/migrate_test.go
+@@ -6,13 +6,20 @@ import (
+ "io"
+ "os"
+ "path/filepath"
++ "syscall"
+ "testing"
++ "unsafe"
+
+ ghmock "github.com/cli/cli/v2/internal/gh/mock"
+ ghConfig "github.com/cli/go-gh/v2/pkg/config"
+ "github.com/stretchr/testify/require"
+ )
+
++const (
++ FS_IOC_SETFLAGS = 0x40086602
++ FS_IMMUTABLE_FL = 0x00000010
++)
++
+ func TestMigrationAppliedSuccessfully(t *testing.T) {
+ readConfig := StubWriteConfig(t)
+
+@@ -215,6 +222,9 @@ func TestMigrationWriteErrors(t *testing.T) {
+
+ // Then the error is wrapped and bubbled
+ require.ErrorContains(t, err, tt.wantErrContains)
++
++ // Make the file writeable again so we can clean up
++ makeFileWriteable(t, filepath.Join(tempDir, tt.unwriteableFile))
+ })
+ }
+ }
+@@ -226,7 +236,39 @@ func makeFileUnwriteable(t *testing.T, file string) {
+ require.NoError(t, err)
+ f.Close()
+
+- require.NoError(t, os.Chmod(file, 0000))
++ if os.Geteuid() == 0 {
++ fd, err := syscall.Open(file, syscall.O_RDONLY, 0)
++ require.NoError(t, err)
++ defer syscall.Close(fd)
++
++ var flags uint
++ _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(FS_IOC_SETFLAGS), uintptr(unsafe.Pointer(&flags)))
++ require.Zero(t, errno)
++
++ flags |= FS_IMMUTABLE_FL
++ _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(FS_IOC_SETFLAGS), uintptr(unsafe.Pointer(&flags)))
++ require.Zero(t, errno)
++ } else {
++ require.NoError(t, os.Chmod(file, 0000))
++ }
++}
++
++func makeFileWriteable(t *testing.T, file string) {
++ t.Helper()
++
++ if os.Geteuid() == 0 {
++ fd, err := syscall.Open(file, syscall.O_RDONLY, 0)
++ require.NoError(t, err)
++ defer syscall.Close(fd)
++
++ var flags uint
++ _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(FS_IOC_SETFLAGS), uintptr(unsafe.Pointer(&flags)))
++ require.Zero(t, errno)
++
++ flags &^= FS_IMMUTABLE_FL
++ _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(FS_IOC_SETFLAGS), uintptr(unsafe.Pointer(&flags)))
++ require.Zero(t, errno)
++ }
+ }
+
+ func mockMigration(doFunc func(config *ghConfig.Config) error) *ghmock.MigrationMock {
+--
+2.34.1
+
diff --git a/SPECS/gh/CVE-2024-54132.patch b/SPECS/gh/CVE-2024-54132.patch
new file mode 100644
index 00000000000..a32a42eb48c
--- /dev/null
+++ b/SPECS/gh/CVE-2024-54132.patch
@@ -0,0 +1,1349 @@
+From 8da27d2c8ac8b781cf34a5e04ed57cfe4b68fa55 Mon Sep 17 00:00:00 2001
+From: Andy Feller
+Date: Tue, 19 Nov 2024 17:55:18 -0500
+Subject: [PATCH 1/5] Second attempt to address exploit
+
+This builds off suggestion to reuse logic used already within `gh run download` for detecting path traversals.
+
+This largely works but runs into an issue where detection logic doesn't handle non-separated traversal.
+---
+ pkg/cmd/run/download/download.go | 5 ++
+ pkg/cmd/run/download/download_test.go | 102 ++++++++++++++++++++++++++
+ pkg/cmd/run/download/zip.go | 3 +
+ 3 files changed, 110 insertions(+)
+
+diff --git a/pkg/cmd/run/download/download.go b/pkg/cmd/run/download/download.go
+index 99ec45bbeec..168cb6fcc8c 100644
+--- a/pkg/cmd/run/download/download.go
++++ b/pkg/cmd/run/download/download.go
+@@ -169,6 +169,11 @@ func runDownload(opts *DownloadOptions) error {
+ if len(wantPatterns) != 0 || len(wantNames) != 1 {
+ destDir = filepath.Join(destDir, a.Name)
+ }
++
++ if !filepathDescendsFrom(destDir, opts.DestinationDir) {
++ return fmt.Errorf("error downloading %s: would result in path traversal", a.Name)
++ }
++
+ err := opts.Platform.Download(a.DownloadURL, destDir)
+ if err != nil {
+ return fmt.Errorf("error downloading %s: %w", a.Name, err)
+diff --git a/pkg/cmd/run/download/download_test.go b/pkg/cmd/run/download/download_test.go
+index 3c1c8f2d862..f07d661289f 100644
+--- a/pkg/cmd/run/download/download_test.go
++++ b/pkg/cmd/run/download/download_test.go
+@@ -289,6 +289,108 @@ func Test_runDownload(t *testing.T) {
+ })
+ },
+ },
++ {
++ name: "given artifact name contains `..`, verify an error about path traversal is returned",
++ opts: DownloadOptions{
++ RunID: "2345",
++ DestinationDir: ".",
++ },
++ mockAPI: func(p *mockPlatform) {
++ p.On("List", "2345").Return([]shared.Artifact{
++ {
++ Name: "..",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ }, nil)
++ },
++ wantErr: "error downloading ..: would result in path traversal",
++ },
++ {
++ name: "given artifact name contains `..`, verify an error about path traversal is returned",
++ opts: DownloadOptions{
++ RunID: "2345",
++ DestinationDir: "imaginary-dir",
++ },
++ mockAPI: func(p *mockPlatform) {
++ p.On("List", "2345").Return([]shared.Artifact{
++ {
++ Name: "..",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ }, nil)
++ },
++ wantErr: "error downloading ..: would result in path traversal",
++ },
++ {
++ name: "given artifact name contains `../etc/passwd`, verify an error about path traversal is returned",
++ opts: DownloadOptions{
++ RunID: "2345",
++ DestinationDir: ".",
++ },
++ mockAPI: func(p *mockPlatform) {
++ p.On("List", "2345").Return([]shared.Artifact{
++ {
++ Name: "../etc/passwd",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ }, nil)
++ },
++ wantErr: "error downloading ../etc/passwd: would result in path traversal",
++ },
++ {
++ name: "given artifact name contains `../etc/passwd`, verify an error about path traversal is returned",
++ opts: DownloadOptions{
++ RunID: "2345",
++ DestinationDir: "imaginary-dir",
++ },
++ mockAPI: func(p *mockPlatform) {
++ p.On("List", "2345").Return([]shared.Artifact{
++ {
++ Name: "../etc/passwd",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ }, nil)
++ },
++ wantErr: "error downloading ../etc/passwd: would result in path traversal",
++ },
++ {
++ name: "given artifact name contains `../../etc/passwd`, verify an error about path traversal is returned",
++ opts: DownloadOptions{
++ RunID: "2345",
++ DestinationDir: ".",
++ },
++ mockAPI: func(p *mockPlatform) {
++ p.On("List", "2345").Return([]shared.Artifact{
++ {
++ Name: "../../etc/passwd",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ }, nil)
++ },
++ wantErr: "error downloading ../../etc/passwd: would result in path traversal",
++ },
++ {
++ name: "given artifact name contains `../../etc/passwd`, verify an error about path traversal is returned",
++ opts: DownloadOptions{
++ RunID: "2345",
++ DestinationDir: "imaginary-dir",
++ },
++ mockAPI: func(p *mockPlatform) {
++ p.On("List", "2345").Return([]shared.Artifact{
++ {
++ Name: "../../etc/passwd",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ }, nil)
++ },
++ wantErr: "error downloading ../../etc/passwd: would result in path traversal",
++ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+diff --git a/pkg/cmd/run/download/zip.go b/pkg/cmd/run/download/zip.go
+index ab5723e9468..f6a27afdd66 100644
+--- a/pkg/cmd/run/download/zip.go
++++ b/pkg/cmd/run/download/zip.go
+@@ -73,6 +73,9 @@ func getPerm(m os.FileMode) os.FileMode {
+ func filepathDescendsFrom(p, dir string) bool {
+ p = filepath.Clean(p)
+ dir = filepath.Clean(dir)
++ if dir == "." && p == ".." {
++ return false
++ }
+ if dir == "." && !filepath.IsAbs(p) {
+ return !strings.HasPrefix(p, ".."+string(filepath.Separator))
+ }
+
+From 83cf41155646380d3df4037d3f2ac683147f194a Mon Sep 17 00:00:00 2001
+From: Tyler McGoffin
+Date: Tue, 19 Nov 2024 16:08:31 -0800
+Subject: [PATCH 2/5] Improve test names so there is no repetition
+
+---
+ pkg/cmd/run/download/download_test.go | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/pkg/cmd/run/download/download_test.go b/pkg/cmd/run/download/download_test.go
+index f07d661289f..fb445ccd4e9 100644
+--- a/pkg/cmd/run/download/download_test.go
++++ b/pkg/cmd/run/download/download_test.go
+@@ -290,7 +290,7 @@ func Test_runDownload(t *testing.T) {
+ },
+ },
+ {
+- name: "given artifact name contains `..`, verify an error about path traversal is returned",
++ name: "given artifact name contains `..` and the DestinationDir is `.`, verify an error about path traversal is returned",
+ opts: DownloadOptions{
+ RunID: "2345",
+ DestinationDir: ".",
+@@ -307,7 +307,7 @@ func Test_runDownload(t *testing.T) {
+ wantErr: "error downloading ..: would result in path traversal",
+ },
+ {
+- name: "given artifact name contains `..`, verify an error about path traversal is returned",
++ name: "given artifact name contains `..` and the DestinationDir is `imaginary-dir`, verify an error about path traversal is returned",
+ opts: DownloadOptions{
+ RunID: "2345",
+ DestinationDir: "imaginary-dir",
+@@ -324,7 +324,7 @@ func Test_runDownload(t *testing.T) {
+ wantErr: "error downloading ..: would result in path traversal",
+ },
+ {
+- name: "given artifact name contains `../etc/passwd`, verify an error about path traversal is returned",
++ name: "given artifact name contains `../etc/passwd` and the DestinationDir is `.`, verify an error about path traversal is returned",
+ opts: DownloadOptions{
+ RunID: "2345",
+ DestinationDir: ".",
+@@ -341,7 +341,7 @@ func Test_runDownload(t *testing.T) {
+ wantErr: "error downloading ../etc/passwd: would result in path traversal",
+ },
+ {
+- name: "given artifact name contains `../etc/passwd`, verify an error about path traversal is returned",
++ name: "given artifact name contains `../etc/passwd` and the DestinationDir is `imaginary-dir`, verify an error about path traversal is returned",
+ opts: DownloadOptions{
+ RunID: "2345",
+ DestinationDir: "imaginary-dir",
+@@ -358,7 +358,7 @@ func Test_runDownload(t *testing.T) {
+ wantErr: "error downloading ../etc/passwd: would result in path traversal",
+ },
+ {
+- name: "given artifact name contains `../../etc/passwd`, verify an error about path traversal is returned",
++ name: "given artifact name contains `../../etc/passwd` and the DestinationDir is `.`, verify an error about path traversal is returned",
+ opts: DownloadOptions{
+ RunID: "2345",
+ DestinationDir: ".",
+@@ -375,7 +375,7 @@ func Test_runDownload(t *testing.T) {
+ wantErr: "error downloading ../../etc/passwd: would result in path traversal",
+ },
+ {
+- name: "given artifact name contains `../../etc/passwd`, verify an error about path traversal is returned",
++ name: "given artifact name contains `../../etc/passwd` and the DestinationDir is `imaginary-dir`, verify an error about path traversal is returned",
+ opts: DownloadOptions{
+ RunID: "2345",
+ DestinationDir: "imaginary-dir",
+
+From e7c5706336d851b39930c7315132f89b25e77d4d Mon Sep 17 00:00:00 2001
+From: Andy Feller
+Date: Thu, 21 Nov 2024 17:02:20 -0500
+Subject: [PATCH 3/5] Refactor download testing, simpler file descends
+
+This incorporates the work done by @williammartin to improve reasoning about `gh run download` behavior through testing while verifying a simpler solution to checking if a path is contained within a directory.
+---
+ pkg/cmd/run/download/download.go | 1 +
+ pkg/cmd/run/download/download_test.go | 524 +++++++++++++++-----------
+ pkg/cmd/run/download/zip.go | 14 +-
+ 3 files changed, 309 insertions(+), 230 deletions(-)
+
+diff --git a/pkg/cmd/run/download/download.go b/pkg/cmd/run/download/download.go
+index 168cb6fcc8c..5bda2ba3da0 100644
+--- a/pkg/cmd/run/download/download.go
++++ b/pkg/cmd/run/download/download.go
+@@ -166,6 +166,7 @@ func runDownload(opts *DownloadOptions) error {
+ }
+ }
+ destDir := opts.DestinationDir
++ // Why do we only include the artifact name in the destination directory if there are multiple?
+ if len(wantPatterns) != 0 || len(wantNames) != 1 {
+ destDir = filepath.Join(destDir, a.Name)
+ }
+diff --git a/pkg/cmd/run/download/download_test.go b/pkg/cmd/run/download/download_test.go
+index fb445ccd4e9..0df94ccf498 100644
+--- a/pkg/cmd/run/download/download_test.go
++++ b/pkg/cmd/run/download/download_test.go
+@@ -2,8 +2,11 @@ package download
+
+ import (
+ "bytes"
++ "errors"
++ "fmt"
+ "io"
+ "net/http"
++ "os"
+ "path/filepath"
+ "testing"
+
+@@ -14,7 +17,6 @@ import (
+ "github.com/cli/cli/v2/pkg/iostreams"
+ "github.com/google/shlex"
+ "github.com/stretchr/testify/assert"
+- "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ )
+
+@@ -143,261 +145,350 @@ func Test_NewCmdDownload(t *testing.T) {
+ }
+ }
+
++type testArtifact struct {
++ artifact shared.Artifact
++ files []string
++}
++
++type fakePlatform struct {
++ runArtifacts map[string][]testArtifact
++}
++
++func (f *fakePlatform) List(runID string) ([]shared.Artifact, error) {
++ var runIds []string
++ if runID != "" {
++ runIds = []string{runID}
++ } else {
++ for k := range f.runArtifacts {
++ runIds = append(runIds, k)
++ }
++ }
++
++ var artifacts []shared.Artifact
++ for _, id := range runIds {
++ for _, a := range f.runArtifacts[id] {
++ artifacts = append(artifacts, a.artifact)
++ }
++ }
++
++ return artifacts, nil
++}
++
++func (f *fakePlatform) Download(url string, dir string) error {
++ if err := os.MkdirAll(dir, 0755); err != nil {
++ return err
++ }
++ // Now to be consistent, we find the artifact with the provided URL.
++ // It's a bit janky to iterate the runs, to find the right artifact
++ // rather than keying directly to it, but it allows the setup of the
++ // fake platform to be declarative rather than imperative.
++ // Think fakePlatform { artifacts: ... } rather than fakePlatform.makeArtifactAvailable()
++ for _, testArtifacts := range f.runArtifacts {
++ for _, testArtifact := range testArtifacts {
++ if testArtifact.artifact.DownloadURL == url {
++ for _, file := range testArtifact.files {
++ path := filepath.Join(dir, file)
++ return os.WriteFile(path, []byte{}, 0600)
++ }
++ }
++ }
++ }
++
++ return errors.New("no artifact matches the provided URL")
++}
++
+ func Test_runDownload(t *testing.T) {
+ tests := []struct {
+- name string
+- opts DownloadOptions
+- mockAPI func(*mockPlatform)
+- promptStubs func(*prompter.MockPrompter)
+- wantErr string
++ name string
++ opts DownloadOptions
++ platform *fakePlatform
++ promptStubs func(*prompter.MockPrompter)
++ expectedFiles []string
++ wantErr string
+ }{
+ {
+ name: "download non-expired",
+ opts: DownloadOptions{
+ RunID: "2345",
+ DestinationDir: "./tmp",
+- Names: []string(nil),
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "artifact-1",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
+- },
+- {
+- Name: "expired-artifact",
+- DownloadURL: "http://download.com/expired.zip",
+- Expired: true,
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "expired-artifact",
++ DownloadURL: "http://download.com/expired.zip",
++ Expired: true,
++ },
++ files: []string{
++ "expired",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-2",
++ DownloadURL: "http://download.com/artifact2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-2-file",
++ },
++ },
+ },
+- {
+- Name: "artifact-2",
+- DownloadURL: "http://download.com/artifact2.zip",
+- Expired: false,
+- },
+- }, nil)
+- p.On("Download", "http://download.com/artifact1.zip", filepath.FromSlash("tmp/artifact-1")).Return(nil)
+- p.On("Download", "http://download.com/artifact2.zip", filepath.FromSlash("tmp/artifact-2")).Return(nil)
++ },
++ },
++ expectedFiles: []string{
++ filepath.Join("artifact-1", "artifact-1-file"),
++ filepath.Join("artifact-2", "artifact-2-file"),
+ },
+ },
+ {
+- name: "no valid artifacts",
++ name: "all artifacts are expired",
+ opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: ".",
+- Names: []string(nil),
++ RunID: "2345",
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "artifact-1",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: true,
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: true,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-2",
++ DownloadURL: "http://download.com/artifact2.zip",
++ Expired: true,
++ },
++ files: []string{
++ "artifact-2-file",
++ },
++ },
+ },
+- {
+- Name: "artifact-2",
+- DownloadURL: "http://download.com/artifact2.zip",
+- Expired: true,
+- },
+- }, nil)
++ },
+ },
+- wantErr: "no valid artifacts found to download",
++ expectedFiles: []string{},
++ wantErr: "no valid artifacts found to download",
+ },
+ {
+ name: "no name matches",
+ opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: ".",
+- Names: []string{"artifact-3"},
++ RunID: "2345",
++ Names: []string{"artifact-3"},
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "artifact-1",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
+- },
+- {
+- Name: "artifact-2",
+- DownloadURL: "http://download.com/artifact2.zip",
+- Expired: false,
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-2",
++ DownloadURL: "http://download.com/artifact2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-2-file",
++ },
++ },
+ },
+- }, nil)
++ },
+ },
+- wantErr: "no artifact matches any of the names or patterns provided",
++ expectedFiles: []string{},
++ wantErr: "no artifact matches any of the names or patterns provided",
+ },
+ {
+ name: "no pattern matches",
+ opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: ".",
+- FilePatterns: []string{"artifiction-*"},
+- },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "artifact-1",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
+- },
+- {
+- Name: "artifact-2",
+- DownloadURL: "http://download.com/artifact2.zip",
+- Expired: false,
+- },
+- }, nil)
+- },
+- wantErr: "no artifact matches any of the names or patterns provided",
+- },
+- {
+- name: "prompt to select artifact",
+- opts: DownloadOptions{
+- RunID: "",
+- DoPrompt: true,
+- DestinationDir: ".",
+- Names: []string(nil),
++ RunID: "2345",
++ FilePatterns: []string{"artifiction-*"},
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "").Return([]shared.Artifact{
+- {
+- Name: "artifact-1",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
+- },
+- {
+- Name: "expired-artifact",
+- DownloadURL: "http://download.com/expired.zip",
+- Expired: true,
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-2",
++ DownloadURL: "http://download.com/artifact2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-2-file",
++ },
++ },
+ },
+- {
+- Name: "artifact-2",
+- DownloadURL: "http://download.com/artifact2.zip",
+- Expired: false,
+- },
+- {
+- Name: "artifact-2",
+- DownloadURL: "http://download.com/artifact2.also.zip",
+- Expired: false,
+- },
+- }, nil)
+- p.On("Download", "http://download.com/artifact2.zip", ".").Return(nil)
+- },
+- promptStubs: func(pm *prompter.MockPrompter) {
+- pm.RegisterMultiSelect("Select artifacts to download:", nil, []string{"artifact-1", "artifact-2"},
+- func(_ string, _, opts []string) ([]int, error) {
+- return []int{1}, nil
+- })
++ },
+ },
++ expectedFiles: []string{},
++ wantErr: "no artifact matches any of the names or patterns provided",
+ },
+ {
+- name: "given artifact name contains `..` and the DestinationDir is `.`, verify an error about path traversal is returned",
++ name: "avoid redownloading files of the same name",
+ opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: ".",
++ RunID: "2345",
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "..",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-2-file",
++ },
++ },
+ },
+- }, nil)
+- },
+- wantErr: "error downloading ..: would result in path traversal",
+- },
+- {
+- name: "given artifact name contains `..` and the DestinationDir is `imaginary-dir`, verify an error about path traversal is returned",
+- opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: "imaginary-dir",
++ },
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "..",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
+- },
+- }, nil)
++ expectedFiles: []string{
++ filepath.Join("artifact-1", "artifact-1-file"),
+ },
+- wantErr: "error downloading ..: would result in path traversal",
+ },
+ {
+- name: "given artifact name contains `../etc/passwd` and the DestinationDir is `.`, verify an error about path traversal is returned",
++ name: "prompt to select artifact",
+ opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: ".",
++ RunID: "",
++ DoPrompt: true,
++ Names: []string(nil),
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "../etc/passwd",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "expired-artifact",
++ DownloadURL: "http://download.com/expired.zip",
++ Expired: true,
++ },
++ files: []string{
++ "expired",
++ },
++ },
+ },
+- }, nil)
+- },
+- wantErr: "error downloading ../etc/passwd: would result in path traversal",
+- },
+- {
+- name: "given artifact name contains `../etc/passwd` and the DestinationDir is `imaginary-dir`, verify an error about path traversal is returned",
+- opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: "imaginary-dir",
+- },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "../etc/passwd",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
++ "6789": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-2",
++ DownloadURL: "http://download.com/artifact2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-2-file",
++ },
++ },
+ },
+- }, nil)
++ },
+ },
+- wantErr: "error downloading ../etc/passwd: would result in path traversal",
+- },
+- {
+- name: "given artifact name contains `../../etc/passwd` and the DestinationDir is `.`, verify an error about path traversal is returned",
+- opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: ".",
++ promptStubs: func(pm *prompter.MockPrompter) {
++ pm.RegisterMultiSelect("Select artifacts to download:", nil, []string{"artifact-1", "artifact-2"},
++ func(_ string, _, opts []string) ([]int, error) {
++ for i, o := range opts {
++ if o == "artifact-2" {
++ return []int{i}, nil
++ }
++ }
++ return nil, fmt.Errorf("no artifact-2 found in %v", opts)
++ })
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "../../etc/passwd",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
+- },
+- }, nil)
++ expectedFiles: []string{
++ filepath.Join("artifact-2-file"),
+ },
+- wantErr: "error downloading ../../etc/passwd: would result in path traversal",
+ },
+ {
+- name: "given artifact name contains `../../etc/passwd` and the DestinationDir is `imaginary-dir`, verify an error about path traversal is returned",
++ name: "handling artifact name with path traversal exploit",
+ opts: DownloadOptions{
+- RunID: "2345",
+- DestinationDir: "imaginary-dir",
++ RunID: "2345",
+ },
+- mockAPI: func(p *mockPlatform) {
+- p.On("List", "2345").Return([]shared.Artifact{
+- {
+- Name: "../../etc/passwd",
+- DownloadURL: "http://download.com/artifact1.zip",
+- Expired: false,
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "..",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "etc/passwd",
++ },
++ },
+ },
+- }, nil)
++ },
+ },
+- wantErr: "error downloading ../../etc/passwd: would result in path traversal",
++ expectedFiles: []string{},
++ wantErr: "error downloading ..: would result in path traversal",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ opts := &tt.opts
++ if opts.DestinationDir == "" {
++ opts.DestinationDir = t.TempDir()
++ } else {
++ opts.DestinationDir = filepath.Join(t.TempDir(), opts.DestinationDir)
++ }
++
+ ios, _, stdout, stderr := iostreams.Test()
+ opts.IO = ios
+- opts.Platform = newMockPlatform(t, tt.mockAPI)
++ opts.Platform = tt.platform
+
+ pm := prompter.NewMockPrompter(t)
+ opts.Prompter = pm
+@@ -412,34 +503,31 @@ func Test_runDownload(t *testing.T) {
+ require.NoError(t, err)
+ }
+
++ // Check that the exact number of files exist
++ require.Equal(t, len(tt.expectedFiles), countFilesInDirRecursively(t, opts.DestinationDir))
++
++ // Then check that the exact files are correct
++ for _, name := range tt.expectedFiles {
++ require.FileExists(t, filepath.Join(opts.DestinationDir, name))
++ }
++
+ assert.Equal(t, "", stdout.String())
+ assert.Equal(t, "", stderr.String())
+ })
+ }
+ }
+
+-type mockPlatform struct {
+- mock.Mock
+-}
++func countFilesInDirRecursively(t *testing.T, dir string) int {
++ t.Helper()
+
+-func newMockPlatform(t *testing.T, config func(*mockPlatform)) *mockPlatform {
+- m := &mockPlatform{}
+- m.Test(t)
+- t.Cleanup(func() {
+- m.AssertExpectations(t)
+- })
+- if config != nil {
+- config(m)
+- }
+- return m
+-}
+-
+-func (p *mockPlatform) List(runID string) ([]shared.Artifact, error) {
+- args := p.Called(runID)
+- return args.Get(0).([]shared.Artifact), args.Error(1)
+-}
++ count := 0
++ require.NoError(t, filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error {
++ require.NoError(t, err)
++ if !info.IsDir() {
++ count++
++ }
++ return nil
++ }))
+
+-func (p *mockPlatform) Download(url string, dir string) error {
+- args := p.Called(url, dir)
+- return args.Error(0)
++ return count
+ }
+diff --git a/pkg/cmd/run/download/zip.go b/pkg/cmd/run/download/zip.go
+index f6a27afdd66..52994199a9b 100644
+--- a/pkg/cmd/run/download/zip.go
++++ b/pkg/cmd/run/download/zip.go
+@@ -71,16 +71,6 @@ func getPerm(m os.FileMode) os.FileMode {
+ }
+
+ func filepathDescendsFrom(p, dir string) bool {
+- p = filepath.Clean(p)
+- dir = filepath.Clean(dir)
+- if dir == "." && p == ".." {
+- return false
+- }
+- if dir == "." && !filepath.IsAbs(p) {
+- return !strings.HasPrefix(p, ".."+string(filepath.Separator))
+- }
+- if !strings.HasSuffix(dir, string(filepath.Separator)) {
+- dir += string(filepath.Separator)
+- }
+- return strings.HasPrefix(p, dir)
++ relativePath, _ := filepath.Rel(dir, p)
++ return !strings.HasPrefix(relativePath, "..")
+ }
+
+From cdfc12caf52754ea4026d5338a56ad4e6f822105 Mon Sep 17 00:00:00 2001
+From: Andy Feller
+Date: Fri, 22 Nov 2024 15:26:11 -0500
+Subject: [PATCH 4/5] Expand logic and tests to handle edge cases
+
+This commit expands filepathDescendsFrom(string, string) to handle edge cases such as mixing absolute and relative paths or artifact name edge cases.
+
+Additionally, tests for filepathDescendsFrom() and downloadrun() have been expanded to verify additional use cases.
+---
+ pkg/cmd/run/download/download.go | 11 +-
+ pkg/cmd/run/download/download_test.go | 189 +++++++++++++++++++++++++-
+ pkg/cmd/run/download/zip.go | 21 ++-
+ pkg/cmd/run/download/zip_test.go | 80 +++++++++++
+ 4 files changed, 297 insertions(+), 4 deletions(-)
+
+diff --git a/pkg/cmd/run/download/download.go b/pkg/cmd/run/download/download.go
+index 5bda2ba3da0..04ce7434051 100644
+--- a/pkg/cmd/run/download/download.go
++++ b/pkg/cmd/run/download/download.go
+@@ -166,8 +166,15 @@ func runDownload(opts *DownloadOptions) error {
+ }
+ }
+ destDir := opts.DestinationDir
+- // Why do we only include the artifact name in the destination directory if there are multiple?
+- if len(wantPatterns) != 0 || len(wantNames) != 1 {
++
++ // Isolate the downloaded artifact file to avoid potential conflicts from other downloaded artifacts when:
++ //
++ // 1. len(wantPatterns) > 0: Any pattern can result in 2+ artifacts
++ // 2. len(wantNames) == 0: User wants all artifacts regardless what they are named
++ // 3. len(wantNames) > 1: User wants multiple, specific artifacts
++ //
++ // Otherwise if a single artifact is wanted, then the protective subdirectory is an unnecessary inconvenience.
++ if len(wantPatterns) > 0 || len(wantNames) != 1 {
+ destDir = filepath.Join(destDir, a.Name)
+ }
+
+diff --git a/pkg/cmd/run/download/download_test.go b/pkg/cmd/run/download/download_test.go
+index 0df94ccf498..aeab2027893 100644
+--- a/pkg/cmd/run/download/download_test.go
++++ b/pkg/cmd/run/download/download_test.go
+@@ -207,7 +207,7 @@ func Test_runDownload(t *testing.T) {
+ wantErr string
+ }{
+ {
+- name: "download non-expired",
++ name: "download non-expired to relative directory",
+ opts: DownloadOptions{
+ RunID: "2345",
+ DestinationDir: "./tmp",
+@@ -253,6 +253,53 @@ func Test_runDownload(t *testing.T) {
+ filepath.Join("artifact-2", "artifact-2-file"),
+ },
+ },
++ {
++ name: "download non-expired to absolute directory",
++ opts: DownloadOptions{
++ RunID: "2345",
++ DestinationDir: "/tmp",
++ },
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "expired-artifact",
++ DownloadURL: "http://download.com/expired.zip",
++ Expired: true,
++ },
++ files: []string{
++ "expired",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-2",
++ DownloadURL: "http://download.com/artifact2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-2-file",
++ },
++ },
++ },
++ },
++ },
++ expectedFiles: []string{
++ filepath.Join("artifact-1", "artifact-1-file"),
++ filepath.Join("artifact-2", "artifact-2-file"),
++ },
++ },
+ {
+ name: "all artifacts are expired",
+ opts: DownloadOptions{
+@@ -322,6 +369,53 @@ func Test_runDownload(t *testing.T) {
+ expectedFiles: []string{},
+ wantErr: "no artifact matches any of the names or patterns provided",
+ },
++ {
++ name: "pattern matches",
++ opts: DownloadOptions{
++ RunID: "2345",
++ FilePatterns: []string{"artifact-*"},
++ },
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "non-artifact-2",
++ DownloadURL: "http://download.com/non-artifact-2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "non-artifact-2-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-3",
++ DownloadURL: "http://download.com/artifact3.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-3-file",
++ },
++ },
++ },
++ },
++ },
++ expectedFiles: []string{
++ filepath.Join("artifact-1", "artifact-1-file"),
++ filepath.Join("artifact-3", "artifact-3-file"),
++ },
++ },
+ {
+ name: "no pattern matches",
+ opts: DownloadOptions{
+@@ -357,6 +451,99 @@ func Test_runDownload(t *testing.T) {
+ expectedFiles: []string{},
+ wantErr: "no artifact matches any of the names or patterns provided",
+ },
++ {
++ name: "want specific single artifact",
++ opts: DownloadOptions{
++ RunID: "2345",
++ Names: []string{"non-artifact-2"},
++ },
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "non-artifact-2",
++ DownloadURL: "http://download.com/non-artifact-2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "non-artifact-2-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-3",
++ DownloadURL: "http://download.com/artifact3.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-3-file",
++ },
++ },
++ },
++ },
++ },
++ expectedFiles: []string{
++ filepath.Join("non-artifact-2-file"),
++ },
++ },
++ {
++ name: "want specific multiple artifacts",
++ opts: DownloadOptions{
++ RunID: "2345",
++ Names: []string{"artifact-1", "artifact-3"},
++ },
++ platform: &fakePlatform{
++ runArtifacts: map[string][]testArtifact{
++ "2345": {
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-1",
++ DownloadURL: "http://download.com/artifact1.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-1-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "non-artifact-2",
++ DownloadURL: "http://download.com/non-artifact-2.zip",
++ Expired: false,
++ },
++ files: []string{
++ "non-artifact-2-file",
++ },
++ },
++ {
++ artifact: shared.Artifact{
++ Name: "artifact-3",
++ DownloadURL: "http://download.com/artifact3.zip",
++ Expired: false,
++ },
++ files: []string{
++ "artifact-3-file",
++ },
++ },
++ },
++ },
++ },
++ expectedFiles: []string{
++ filepath.Join("artifact-1", "artifact-1-file"),
++ filepath.Join("artifact-3", "artifact-3-file"),
++ },
++ },
+ {
+ name: "avoid redownloading files of the same name",
+ opts: DownloadOptions{
+diff --git a/pkg/cmd/run/download/zip.go b/pkg/cmd/run/download/zip.go
+index 52994199a9b..a68b75fd6b7 100644
+--- a/pkg/cmd/run/download/zip.go
++++ b/pkg/cmd/run/download/zip.go
+@@ -71,6 +71,25 @@ func getPerm(m os.FileMode) os.FileMode {
+ }
+
+ func filepathDescendsFrom(p, dir string) bool {
+- relativePath, _ := filepath.Rel(dir, p)
++ // Regardless of the logic below, `p` is never allowed to be current directory `.` or parent directory `..`
++ // however we check explicitly here before filepath.Rel() which doesn't cover all cases.
++ p = filepath.Clean(p)
++
++ if p == "." || p == ".." {
++ return false
++ }
++
++ // filepathDescendsFrom() takes advantage of filepath.Rel() to determine if `p` is descended from `dir`:
++ //
++ // 1. filepath.Rel() calculates a path to traversal from fictious `dir` to `p`.
++ // 2. filepath.Rel() errors in a handful of cases where absolute and relative paths are compared as well as certain traversal edge cases
++ // For more information, https://github.com/golang/go/blob/00709919d09904b17cfe3bfeb35521cbd3fb04f8/src/path/filepath/path_test.go#L1510-L1515
++ // 3. If the path to traverse `dir` to `p` requires `..`, then we know it is not descend from / contained in `dir`
++ //
++ // As-is, this function requires the caller to ensure `p` and `dir` are either 1) both relative or 2) both absolute.
++ relativePath, err := filepath.Rel(dir, p)
++ if err != nil {
++ return false
++ }
+ return !strings.HasPrefix(relativePath, "..")
+ }
+diff --git a/pkg/cmd/run/download/zip_test.go b/pkg/cmd/run/download/zip_test.go
+index ca401cdb9ed..b85122ec57a 100644
+--- a/pkg/cmd/run/download/zip_test.go
++++ b/pkg/cmd/run/download/zip_test.go
+@@ -130,6 +130,86 @@ func Test_filepathDescendsFrom(t *testing.T) {
+ },
+ want: false,
+ },
++ {
++ name: "deny parent directory filename (`..`) escaping absolute directory",
++ args: args{
++ p: filepath.FromSlash(".."),
++ dir: filepath.FromSlash("/var/logs/"),
++ },
++ want: false,
++ },
++ {
++ name: "deny parent directory filename (`..`) escaping current directory",
++ args: args{
++ p: filepath.FromSlash(".."),
++ dir: filepath.FromSlash("."),
++ },
++ want: false,
++ },
++ {
++ name: "deny parent directory filename (`..`) escaping parent directory",
++ args: args{
++ p: filepath.FromSlash(".."),
++ dir: filepath.FromSlash(".."),
++ },
++ want: false,
++ },
++ {
++ name: "deny parent directory filename (`..`) escaping relative directory",
++ args: args{
++ p: filepath.FromSlash(".."),
++ dir: filepath.FromSlash("relative-dir"),
++ },
++ want: false,
++ },
++ {
++ name: "deny current directory filename (`.`) in absolute directory",
++ args: args{
++ p: filepath.FromSlash("."),
++ dir: filepath.FromSlash("/var/logs/"),
++ },
++ want: false,
++ },
++ {
++ name: "deny current directory filename (`.`) in current directory",
++ args: args{
++ p: filepath.FromSlash("."),
++ dir: filepath.FromSlash("."),
++ },
++ want: false,
++ },
++ {
++ name: "deny current directory filename (`.`) in parent directory",
++ args: args{
++ p: filepath.FromSlash("."),
++ dir: filepath.FromSlash(".."),
++ },
++ want: false,
++ },
++ {
++ name: "deny current directory filename (`.`) in relative directory",
++ args: args{
++ p: filepath.FromSlash("."),
++ dir: filepath.FromSlash("relative-dir"),
++ },
++ want: false,
++ },
++ {
++ name: "relative path, absolute dir",
++ args: args{
++ p: filepath.FromSlash("whatever"),
++ dir: filepath.FromSlash("/a/b/c"),
++ },
++ want: false,
++ },
++ {
++ name: "absolute path, relative dir",
++ args: args{
++ p: filepath.FromSlash("/a/b/c"),
++ dir: filepath.FromSlash("whatever"),
++ },
++ want: false,
++ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+
+From 8720479b0bfc95450abb2ba88489f2893e4838a9 Mon Sep 17 00:00:00 2001
+From: Andy Feller
+Date: Tue, 3 Dec 2024 13:33:00 -0500
+Subject: [PATCH 5/5] Consolidate logic for isolating artifacts
+
+---
+ pkg/cmd/run/download/download.go | 34 ++++++++++++++++++++++----------
+ 1 file changed, 24 insertions(+), 10 deletions(-)
+
+diff --git a/pkg/cmd/run/download/download.go b/pkg/cmd/run/download/download.go
+index 04ce7434051..8f25e84a228 100644
+--- a/pkg/cmd/run/download/download.go
++++ b/pkg/cmd/run/download/download.go
+@@ -151,8 +151,10 @@ func runDownload(opts *DownloadOptions) error {
+ opts.IO.StartProgressIndicator()
+ defer opts.IO.StopProgressIndicator()
+
+- // track downloaded artifacts and avoid re-downloading any of the same name
++ // track downloaded artifacts and avoid re-downloading any of the same name, isolate if multiple artifacts
+ downloaded := set.NewStringSet()
++ isolateArtifacts := isolateArtifacts(wantNames, wantPatterns)
++
+ for _, a := range artifacts {
+ if a.Expired {
+ continue
+@@ -165,16 +167,9 @@ func runDownload(opts *DownloadOptions) error {
+ continue
+ }
+ }
+- destDir := opts.DestinationDir
+
+- // Isolate the downloaded artifact file to avoid potential conflicts from other downloaded artifacts when:
+- //
+- // 1. len(wantPatterns) > 0: Any pattern can result in 2+ artifacts
+- // 2. len(wantNames) == 0: User wants all artifacts regardless what they are named
+- // 3. len(wantNames) > 1: User wants multiple, specific artifacts
+- //
+- // Otherwise if a single artifact is wanted, then the protective subdirectory is an unnecessary inconvenience.
+- if len(wantPatterns) > 0 || len(wantNames) != 1 {
++ destDir := opts.DestinationDir
++ if isolateArtifacts {
+ destDir = filepath.Join(destDir, a.Name)
+ }
+
+@@ -196,6 +191,25 @@ func runDownload(opts *DownloadOptions) error {
+ return nil
+ }
+
++func isolateArtifacts(wantNames []string, wantPatterns []string) bool {
++ if len(wantPatterns) > 0 {
++ // Patterns can match multiple artifacts
++ return true
++ }
++
++ if len(wantNames) == 0 {
++ // All artifacts wanted regardless what they are named
++ return true
++ }
++
++ if len(wantNames) > 1 {
++ // Multiple, specific artifacts wanted
++ return true
++ }
++
++ return false
++}
++
+ func matchAnyName(names []string, name string) bool {
+ for _, n := range names {
+ if name == n {
diff --git a/SPECS/gh/generate_source_tarball.sh b/SPECS/gh/generate_source_tarball.sh
new file mode 100755
index 00000000000..cc04d4b5189
--- /dev/null
+++ b/SPECS/gh/generate_source_tarball.sh
@@ -0,0 +1,168 @@
+#!/bin/bash
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+#
+# This script downloads the source tarball and uses it to generate the
+# vendor tarball for the gh package. It also updates the package's
+# signatures.json file for both tarballs, although it doesn't account for
+# version changes.
+#
+# Notes:
+# - You require GNU tar version 1.28+.
+# - The additional options passed to tar enable generation of a tarball
+# with the same hash every time regardless of the environment. See:
+# https://reproducible-builds.org/docs/archives/
+# - For the value of "--mtime" we use the date "2021-04-26 00:00Z" to
+# simplify future updates.
+set -eu
+
+# get_spec_value extracts the parsed value of a tag from a spec file.
+# - spec: The path to the spec file.
+# - tag: The tag whose value is extracted.
+# The extracted value is returned via stdout.
+get_spec_value() {
+ local spec="$1"
+ local tag="$2"
+ local tmp=$(mktemp)
+ rpmspec -P "$spec" > "$tmp"
+ grep -E "^${tag}:" "$tmp" | sed -E "s/^$tag:\s*//"
+ rm "$tmp"
+}
+
+# set_signature_value adds or updates the value of a signature in the
+# signatures.json file.
+# - signatures_json: The path to the signatures.json file.
+# - path: The path to the file whose signature is updated.
+set_signature_value() {
+ local signatures_json="$1"
+ local path="$2"
+ local name=$(basename "$path")
+ local sum=$(sha256sum "$path" | cut -d' ' -f1)
+ signatures_tmp=$(mktemp)
+ jq --indent 1 ".Signatures.\"$name\" = \"$sum\"" "$signatures_json" > "$signatures_tmp"
+ mv "$signatures_tmp" "$signatures_json"
+}
+
+exit_usage() {
+ echo "Usage: $0 [flags]"
+ echo ""
+ echo "Flags:"
+ echo " --srcTarball src tarball file. If not provided, it will be downloaded according to the spec file."
+ echo " --outFolder folder where to copy the new tarball(s). If not provided, the tarballs will be copied to the same folder as the script."
+ echo " --pkgVersion package version. If not provided, it will be extracted from the spec file."
+ echo " --setSignature set the signature of the tarball(s) in the signatures.json file."
+ exit 2
+}
+
+arg_out_folder=""
+arg_src_tarball=""
+arg_pkg_version=""
+arg_set_signatures=0
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ -h|--help)
+ exit_usage
+ ;;
+ --outFolder)
+ # Convert to absolute path
+ arg_out_folder=$(readlink -f "$2")
+ shift
+ ;;
+ --srcTarball)
+ arg_src_tarball="$2"
+ shift
+ ;;
+ --pkgVersion)
+ arg_pkg_version="$2"
+ shift
+ ;;
+ --setSignature)
+ arg_set_signatures=1
+ ;;
+ -*)
+ echo "Error: Unknown option: $1"
+ exit_usage
+ ;;
+ *)
+ echo "Error: Unknown argument: $1"
+ exit_usage
+ ;;
+ esac
+
+ shift
+done
+
+script_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+
+out_folder="$arg_out_folder"
+if [[ -z "$out_folder" ]]; then
+ out_folder="$script_dir"
+elif [[ ! -d "$out_folder" ]]; then
+ echo "Error: The output folder does not exist."
+ exit 1
+fi
+
+spec_file=$(ls "$script_dir"/*.spec)
+
+src_tarball="$arg_src_tarball"
+if [[ -z "$src_tarball" ]]; then
+ src_url=$(get_spec_value "$spec_file" "Source0")
+ if [[ -z "$src_url" ]]; then
+ echo "Error: Unable to determine the source0 URL from the spec file."
+ exit 1
+ fi
+
+ src_tarball_name=$(echo "$src_url" | grep -oP '(?<=#/)[^/]+')
+ if [[ -z "$src_tarball_name" ]]; then
+ echo "Error: Unable to determine the source0 tarball name from the source URL."
+ exit 1
+ fi
+
+ src_tarball="$script_dir/$src_tarball_name"
+ if [[ ! -f "$src_tarball" ]]; then
+ wget -O "$src_tarball" "$src_url"
+ fi
+elif [[ ! -f "$src_tarball" ]]; then
+ echo "Error: The source tarball file does not exist."
+ exit 1
+fi
+
+pkg_name=$(get_spec_value "$spec_file" "Name")
+if [[ -z "$pkg_name" ]]; then
+ echo "Error: Unable to determine the package name from the spec file."
+ exit 1
+fi
+
+pkg_version="$arg_pkg_version"
+if [[ -z "$pkg_version" ]]; then
+ pkg_version=$(get_spec_value "$spec_file" "Version")
+ if [[ -z "$pkg_version" ]]; then
+ echo "Error: Unable to determine the package version from the spec file."
+ exit 1
+ fi
+fi
+
+# Extract the source tarball and generate the vendor tarball.
+source_dir=$(mktemp -d)
+trap "rm -rf '$source_dir'" EXIT
+tar -C "$source_dir" -xf "$src_tarball"
+cd "$source_dir"/*
+go mod vendor
+vendor_tarball="$out_folder/$pkg_name-$pkg_version-vendor.tar.gz"
+tar --sort=name \
+ --mtime="2021-04-26 00:00Z" \
+ --owner=0 \
+ --group=0 \
+ --numeric-owner \
+ --pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime \
+ -c \
+ -f "$vendor_tarball" \
+ vendor
+
+if [[ $arg_set_signatures -eq 1 ]]; then
+ signatures_file=$(ls "$script_dir"/*.signatures.json)
+ set_signature_value "$signatures_file" "$src_tarball"
+ set_signature_value "$signatures_file" "$vendor_tarball"
+fi
+
+echo "Vendor tarball generated: $vendor_tarball"
diff --git a/SPECS/gh/gh.signatures.json b/SPECS/gh/gh.signatures.json
index 4ed8112994e..b5ec83df14b 100644
--- a/SPECS/gh/gh.signatures.json
+++ b/SPECS/gh/gh.signatures.json
@@ -1,6 +1,6 @@
{
"Signatures": {
- "cli-2.43.1.tar.gz": "1ea3f451fb7002c1fb95a7fab21e9ab16591058492628fe264c5878e79ec7c90",
- "gh-2.43.1-vendor.tar.gz": "27791885c92900deae2baec254ada1d64d4dfabcfece5886a214f3279eb119f0"
+ "gh-2.62.0.tar.gz": "8b0d44a7fccd0c768d5ef7c3fbd274851b5752084e47761f146852de6539193e",
+ "gh-2.62.0-vendor.tar.gz": "2b39f75a9a45aa5e7b8d95e6b5fd7a11a7504e6cd7c92e904027f129abe48599"
}
}
diff --git a/SPECS/gh/gh.spec b/SPECS/gh/gh.spec
index a58348b95fd..4e8e16fa9d3 100644
--- a/SPECS/gh/gh.spec
+++ b/SPECS/gh/gh.spec
@@ -1,33 +1,20 @@
Summary: GitHub official command line tool
Name: gh
-Version: 2.43.1
+Version: 2.62.0
Release: 2%{?dist}
License: MIT
Vendor: Microsoft Corporation
Distribution: Azure Linux
Group: Applications/Tools
URL: https://github.com/cli/cli
-Source0: https://github.com/cli/cli/archive/refs/tags/v%{version}.tar.gz#/cli-%{version}.tar.gz
-# Below is a manually created tarball, no download link.
+Source0: https://github.com/cli/cli/archive/refs/tags/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
+
+# Below is a manually created tarball, no download link. It is generated by running ./generate_source_tarball.sh.
# We're using pre-populated Go modules from this tarball, since network is disabled during build time.
-# How to re-build this file:
-# 1. wget https://github.com/cli/cli/archive/refs/tags/v%{version}.tar.gz -O cli-%%{version}.tar.gz
-# 2. tar -xf cli-%%{version}.tar.gz
-# 3. cd cli-%%{version}
-# 4. go mod vendor
-# 5. tar --sort=name \
-# --mtime="2021-04-26 00:00Z" \
-# --owner=0 --group=0 --numeric-owner \
-# --pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime \
-# -cf %%{name}-%%{version}-vendor.tar.gz vendor
-#
-# NOTES:
-# - You require GNU tar version 1.28+.
-# - The additional options enable generation of a tarball with the same hash every time regardless of the environment.
-# See: https://reproducible-builds.org/docs/archives/
-# - For the value of "--mtime" use the date "2021-04-26 00:00Z" to simplify future updates.
Source1: %{name}-%{version}-vendor.tar.gz
+Patch0: 0001-Fix-false-negative-in-TestMigrationWriteErrors-when-.patch
+Patch1: CVE-2024-54132.patch
BuildRequires: golang < 1.23
BuildRequires: git
Requires: git
@@ -70,6 +57,12 @@ make test
%{_datadir}/zsh/site-functions/_gh
%changelog
+* Fri Dec 13 2024 Sandeep Karambelkar - 2.62.0-2
+- Patch CVE-2024-54132
+
+* Mon Nov 18 2024 Vince Perri - 2.62.0-1
+- Update to v2.62.0
+
* Tue Oct 15 2024 Muhammad Falak - 2.43.1-2
- Pin golang version to <= 1.22
diff --git a/SPECS/kernel-64k/kernel-64k.spec b/SPECS/kernel-64k/kernel-64k.spec
index 4f2ec85b6d4..ff0277bfec8 100644
--- a/SPECS/kernel-64k/kernel-64k.spec
+++ b/SPECS/kernel-64k/kernel-64k.spec
@@ -25,7 +25,7 @@
Summary: Linux Kernel
Name: kernel-64k
Version: 6.6.57.1
-Release: 6%{?dist}
+Release: 7%{?dist}
License: GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -370,6 +370,9 @@ echo "initrd of kernel %{uname_r} removed" >&2
%{_sysconfdir}/bash_completion.d/bpftool
%changelog
+* Sun Dec 22 2024 Ankita Pareek - 6.6.57.1-7
+- Bump release to match kernel
+
* Wed Dec 18 2024 Rachel Menge - 6.6.57.1-6
- Enable kexec signature verification
diff --git a/SPECS/kernel-headers/kernel-headers.spec b/SPECS/kernel-headers/kernel-headers.spec
index 77d34b27719..96cebd1e25c 100644
--- a/SPECS/kernel-headers/kernel-headers.spec
+++ b/SPECS/kernel-headers/kernel-headers.spec
@@ -14,7 +14,7 @@
Summary: Linux API header files
Name: kernel-headers
Version: 6.6.57.1
-Release: 6%{?dist}
+Release: 7%{?dist}
License: GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -75,6 +75,9 @@ done
%endif
%changelog
+* Sun Dec 22 2024 Ankita Pareek - 6.6.57.1-7
+- Bump release to match kernel
+
* Wed Dec 18 2024 Rachel Menge - 6.6.57.1-6
- Bump release to match kernel-64k
diff --git a/SPECS/kernel/config b/SPECS/kernel/config
index 0eecab7d127..b225406e6b8 100644
--- a/SPECS/kernel/config
+++ b/SPECS/kernel/config
@@ -380,7 +380,7 @@ CONFIG_ARCH_CPUIDLE_HALTPOLL=y
CONFIG_PARAVIRT_CLOCK=y
# CONFIG_JAILHOUSE_GUEST is not set
# CONFIG_ACRN_GUEST is not set
-# CONFIG_INTEL_TDX_GUEST is not set
+CONFIG_INTEL_TDX_GUEST=y
# CONFIG_MK8 is not set
# CONFIG_MPSC is not set
# CONFIG_MCORE2 is not set
@@ -6157,6 +6157,7 @@ CONFIG_VMGENID=y
# CONFIG_NITRO_ENCLAVES is not set
# CONFIG_EFI_SECRET is not set
CONFIG_SEV_GUEST=y
+CONFIG_TDX_GUEST_DRIVER=m
CONFIG_VIRTIO_ANCHOR=y
CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI_LIB=y
diff --git a/SPECS/kernel/kernel-uki.spec b/SPECS/kernel/kernel-uki.spec
index edc6c72e7ba..3c928626a97 100644
--- a/SPECS/kernel/kernel-uki.spec
+++ b/SPECS/kernel/kernel-uki.spec
@@ -13,7 +13,7 @@
Summary: Unified Kernel Image
Name: kernel-uki
Version: 6.6.57.1
-Release: 6%{?dist}
+Release: 7%{?dist}
License: GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -70,6 +70,9 @@ cp %{buildroot}/boot/vmlinuz-uki-%{kernelver}.efi %{buildroot}/boot/efi/EFI/Linu
/boot/efi/EFI/Linux/vmlinuz-uki-%{kernelver}.efi
%changelog
+* Sun Dec 22 2024 Ankita Pareek - 6.6.57.1-7
+- Bump release to match kernel
+
* Wed Dec 18 2024 Rachel Menge - 6.6.57.1-6
- Bump release to match kernel-64k
diff --git a/SPECS/kernel/kernel.signatures.json b/SPECS/kernel/kernel.signatures.json
index 5b05220a50a..eda9fcf8ddd 100644
--- a/SPECS/kernel/kernel.signatures.json
+++ b/SPECS/kernel/kernel.signatures.json
@@ -1,7 +1,7 @@
{
"Signatures": {
"azurelinux-ca-20230216.pem": "d545401163c75878319f01470455e6bc18a5968e39dd964323225e3fe308849b",
- "config": "53cf68442824f43df68ee1105d1fe428474da2cc723610114d0409ad15a424d1",
+ "config": "651f9cab61a3eb370f7e6451d2115cce2c5f137f5d7e5f28234b5d07bf841d0f",
"config_aarch64": "bfb4b4344045354a2ba518d11ae81fe5e3d45e9b11253ca2e199792543a9d624",
"cpupower": "d7518767bf2b1110d146a49c7d42e76b803f45eb8bd14d931aa6d0d346fae985",
"cpupower.service": "b057fe9e5d0e8c36f485818286b80e3eba8ff66ff44797940e99b1fd5361bb98",
diff --git a/SPECS/kernel/kernel.spec b/SPECS/kernel/kernel.spec
index f7a31ef1284..6533a6d2545 100644
--- a/SPECS/kernel/kernel.spec
+++ b/SPECS/kernel/kernel.spec
@@ -30,7 +30,7 @@
Summary: Linux Kernel
Name: kernel
Version: 6.6.57.1
-Release: 6%{?dist}
+Release: 7%{?dist}
License: GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -424,6 +424,9 @@ echo "initrd of kernel %{uname_r} removed" >&2
%{_sysconfdir}/bash_completion.d/bpftool
%changelog
+* Sun Dec 22 2024 Ankita Pareek - 6.6.57.1-7
+- Enable CONFIG_INTEL_TDX_GUEST and CONFIG_TDX_GUEST_DRIVER
+
* Wed Dec 18 2024 Rachel Menge - 6.6.57.1-6
- Bump release to match kernel-64k
diff --git a/SPECS/kubernetes/kubernetes.signatures.json b/SPECS/kubernetes/kubernetes.signatures.json
index c3bc7fdb1d4..c665825e132 100644
--- a/SPECS/kubernetes/kubernetes.signatures.json
+++ b/SPECS/kubernetes/kubernetes.signatures.json
@@ -1,6 +1,6 @@
{
"Signatures": {
"kubelet.service": "3be41509e18552113367252397cbd7a28e2c481de04ec54f09e232ffabda16d2",
- "kubernetes-v1.30.1.tar.gz": "e06188c08ff6bd651cdb8d6baa37ceb7906eaf1e165bbe5d74d8e0cd1d19136e"
+ "kubernetes-v1.30.3.tar.gz": "64d800416cee3e1753452632fb39222de87e3f08d134e6f977f9613c5f3019c1"
}
}
diff --git a/SPECS/kubernetes/kubernetes.spec b/SPECS/kubernetes/kubernetes.spec
index d1cbb64168f..26a094aa632 100644
--- a/SPECS/kubernetes/kubernetes.spec
+++ b/SPECS/kubernetes/kubernetes.spec
@@ -9,8 +9,8 @@
%define container_image_components 'kube-proxy kube-apiserver kube-controller-manager kube-scheduler'
Summary: Microsoft Kubernetes
Name: kubernetes
-Version: 1.30.1
-Release: 4%{?dist}
+Version: 1.30.3
+Release: 1%{?dist}
License: ASL 2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -271,6 +271,9 @@ fi
%{_exec_prefix}/local/bin/pause
%changelog
+* Wed Dec 11 2024 CBL-Mariner Servicing Account - 1.30.3-1
+- Auto-upgrade to 1.30.3 - Fix CVE-2024-10220
+
* Tue Oct 01 2024 Henry Li - 1.30.1-4
- Add patch to resolve CVE-2024-28180
diff --git a/SPECS/kubevirt/CVE-2024-45337.patch b/SPECS/kubevirt/CVE-2024-45337.patch
new file mode 100644
index 00000000000..f7d2f6a6724
--- /dev/null
+++ b/SPECS/kubevirt/CVE-2024-45337.patch
@@ -0,0 +1,77 @@
+https://github.com/golang/crypto/commit/b4f1988a35dee11ec3e05d6bf3e90b695fbd8909.patch
+
+From b4f1988a35dee11ec3e05d6bf3e90b695fbd8909 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker
+Date: Tue, 3 Dec 2024 09:03:03 -0800
+Subject: [PATCH] ssh: make the public key cache a 1-entry FIFO cache
+
+Users of the the ssh package seem to extremely commonly misuse the
+PublicKeyCallback API, assuming that the key passed in the last call
+before a connection is established is the key used for authentication.
+Some users then make authorization decisions based on this key. This
+property is not documented, and may not be correct, due to the caching
+behavior of the package, resulting in users making incorrect
+authorization decisions about the connection.
+
+This change makes the cache a one entry FIFO cache, making the assumed
+property, that the last call to PublicKeyCallback represents the key
+actually used for authentication, actually hold.
+
+Thanks to Damien Tournoud, Patrick Dawkins, Vince Parker, and
+Jules Duvivier from the Platform.sh / Upsun engineering team
+for reporting this issue.
+
+Fixes golang/go#70779
+Fixes CVE-2024-45337
+
+Change-Id: Ife7c7b4045d8b6bcd7e3a417bdfae370c709797f
+Reviewed-on: https://go-review.googlesource.com/c/crypto/+/635315
+Reviewed-by: Roland Shoemaker
+Auto-Submit: Gopher Robot
+Reviewed-by: Damien Neil
+Reviewed-by: Nicola Murino
+LUCI-TryBot-Result: Go LUCI
+---
+ vendor/golang.org/x/crypto/ssh/server.go | 15 ++++++++++----
+
+diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
+index c0d1c29e6f..5b5ccd96f4 100644
+--- a/vendor/golang.org/x/crypto/ssh/server.go
++++ b/vendor/golang.org/x/crypto/ssh/server.go
+@@ -142,7 +142,7 @@ func (s *ServerConfig) AddHostKey(key Signer) {
+ }
+
+ // cachedPubKey contains the results of querying whether a public key is
+-// acceptable for a user.
++// acceptable for a user. This is a FIFO cache.
+ type cachedPubKey struct {
+ user string
+ pubKeyData []byte
+@@ -150,7 +150,13 @@ type cachedPubKey struct {
+ perms *Permissions
+ }
+
+-const maxCachedPubKeys = 16
++// maxCachedPubKeys is the number of cache entries we store.
++//
++// Due to consistent misuse of the PublicKeyCallback API, we have reduced this
++// to 1, such that the only key in the cache is the most recently seen one. This
++// forces the behavior that the last call to PublicKeyCallback will always be
++// with the key that is used for authentication.
++const maxCachedPubKeys = 1
+
+ // pubKeyCache caches tests for public keys. Since SSH clients
+ // will query whether a public key is acceptable before attempting to
+@@ -172,9 +178,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
+
+ // add adds the given tuple to the cache.
+ func (c *pubKeyCache) add(candidate cachedPubKey) {
+- if len(c.keys) < maxCachedPubKeys {
+- c.keys = append(c.keys, candidate)
++ if len(c.keys) >= maxCachedPubKeys {
++ c.keys = c.keys[1:]
+ }
++ c.keys = append(c.keys, candidate)
+ }
+
+ // ServerConn is an authenticated SSH connection, as seen from the
diff --git a/SPECS/kubevirt/kubevirt.spec b/SPECS/kubevirt/kubevirt.spec
index 4f8a2dd3506..485280f1c6a 100644
--- a/SPECS/kubevirt/kubevirt.spec
+++ b/SPECS/kubevirt/kubevirt.spec
@@ -20,7 +20,7 @@
Summary: Container native virtualization
Name: kubevirt
Version: 1.2.0
-Release: 10%{?dist}
+Release: 11%{?dist}
License: ASL 2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -33,6 +33,7 @@ Source0: https://github.com/kubevirt/kubevirt/archive/refs/tags/v%{versio
Patch0: Cleanup-housekeeping-cgroup-on-vm-del.patch
Patch1: CVE-2023-48795.patch
Patch2: CVE-2024-24786.patch
+Patch3: CVE-2024-45337.patch
%global debug_package %{nil}
BuildRequires: swtpm-tools
BuildRequires: glibc-devel
@@ -273,6 +274,9 @@ install -p -m 0644 cmd/virt-launcher/qemu.conf %{buildroot}%{_datadir}/kube-virt
%{_bindir}/virt-tests
%changelog
+* Fri Dec 20 2024 Aurelien Bombo - 1.2.0-11
+- Add patch for CVE-2024-45337
+
* Mon Nov 25 2024 Bala - 1.2.0-10
- Fix for CVE-2024-24786
diff --git a/SPECS/libarrow/CVE-2024-52338.patch b/SPECS/libarrow/CVE-2024-52338.patch
new file mode 100644
index 00000000000..3ed94d3550d
--- /dev/null
+++ b/SPECS/libarrow/CVE-2024-52338.patch
@@ -0,0 +1,145 @@
+diff --git a/r/R/extension.R b/r/R/extension.R
+index 59a0212..3529144 100644
+--- a/r/R/extension.R
++++ b/r/R/extension.R
+@@ -429,7 +429,7 @@ VctrsExtensionType <- R6Class("VctrsExtensionType",
+ paste0(capture.output(print(self$ptype())), collapse = "\n")
+ },
+ deserialize_instance = function() {
+- private$.ptype <- unserialize(self$extension_metadata())
++ private$.ptype <- safe_r_metadata(safe_unserialize(self$extension_metadata()))
+ },
+ ExtensionEquals = function(other) {
+ inherits(other, "VctrsExtensionType") && identical(self$ptype(), other$ptype())
+diff --git a/r/R/metadata.R b/r/R/metadata.R
+index 3ae2db4..28eecf4 100644
+--- a/r/R/metadata.R
++++ b/r/R/metadata.R
+@@ -30,7 +30,7 @@
+ }
+ }
+
+- out <- serialize(x, NULL, ascii = TRUE)
++ out <- serialize(safe_r_metadata(x, on_save = TRUE), NULL, ascii = TRUE)
+
+ # if the metadata is over 100 kB, compress
+ if (option_compress_metadata() && object.size(out) > 100000) {
+@@ -44,23 +44,106 @@
+ }
+
+ .deserialize_arrow_r_metadata <- function(x) {
+- tryCatch(
+- expr = {
+- out <- unserialize(charToRaw(x))
++ tryCatch(unserialize_r_metadata(x),
++ error = function(e) {
++ if (getOption("arrow.debug", FALSE)) {
++ print(conditionMessage(e))
++ }
++ warning("Invalid metadata$r", call. = FALSE)
++ NULL
++ }
++ )
++}
+
+- # if this is still raw, try decompressing
+- if (is.raw(out)) {
+- out <- unserialize(memDecompress(out, type = "gzip"))
++unserialize_r_metadata <- function(x) {
++ # Check that this is ASCII serialized data (as in, what we wrote)
++ if (!identical(substr(unclass(x), 1, 1), "A")) {
++ stop("Invalid serialized data")
++ }
++ out <- safe_unserialize(charToRaw(x))
++ # If it's still raw, decompress and unserialize again
++ if (is.raw(out)) {
++ decompressed <- memDecompress(out, type = "gzip")
++ if (!identical(rawToChar(decompressed[1]), "A")) {
++ stop("Invalid serialized compressed data")
++ }
++ out <- safe_unserialize(decompressed)
++ }
++ if (!is.list(out)) {
++ stop("Invalid serialized data: must be a list")
++ }
++ safe_r_metadata(out)
++}
++safe_unserialize <- function(x) {
++ # By capturing the data in a list, we can inspect it for promises without
++ # triggering their evaluation.
++ out <- list(unserialize(x))
++ if (typeof(out[[1]]) == "promise") {
++ stop("Serialized data contains a promise object")
++ }
++ out[[1]]
++}
++safe_r_metadata <- function(metadata, on_save = FALSE) {
++ # This function recurses through the metadata list and checks that all
++ # elements are of types that are allowed in R metadata.
++ # If it finds an element that is not allowed, it removes it.
++ #
++ # This function is used both when saving and loading metadata.
++ # @param on_save: If TRUE, the function will not warn if it removes elements:
++ # we're just cleaning up the metadata for saving. If FALSE, it means we're
++ # loading the metadata, and we'll warn if we find invalid elements.
++ #
++ # When loading metadata, you can optionally keep the invalid elements by
++ # setting `options(arrow.unsafe_metadata = TRUE)`. It will still check
++ # for invalid elements and warn if any are found, though.
++ # This variable will be used to store the types of elements that were removed,
++ # if any, so we can give an informative warning if needed.
++ types_removed <- c()
++ # Internal function that we'll recursively apply,
++ # and mutate the `types_removed` variable outside of it.
++ check_r_metadata_types_recursive <- function(x) {
++ allowed_types <- c("character", "double", "integer", "logical", "complex", "list", "NULL")
++ if (is.list(x)) {
++ types <- map_chr(x, typeof)
++ x[types == "list"] <- map(x[types == "list"], check_r_metadata_types_recursive)
++ ok <- types %in% allowed_types
++ if (!all(ok)) {
++ # Record the invalid types, then remove the offending elements
++ types_removed <<- c(types_removed, setdiff(types, allowed_types))
++ x <- x[ok]
+ }
+- out
+- },
+- error = function(e) {
+- warning("Invalid metadata$r", call. = FALSE)
+- NULL
+ }
+- )
++ x
++ }
++ new <- check_r_metadata_types_recursive(metadata)
++ # On save: don't warn, just save the filtered metadata
++ if (on_save) {
++ return(new)
++ }
++ # On load: warn if any elements were removed
++ if (length(types_removed)) {
++ types_msg <- paste("Type:", oxford_paste(unique(types_removed)))
++ if (getOption("arrow.unsafe_metadata", FALSE)) {
++ # We've opted-in to unsafe metadata, so warn but return the original metadata
++ rlang::warn(
++ "R metadata may have unsafe or invalid elements",
++ body = c("i" = types_msg)
++ )
++ new <- metadata
++ } else {
++ rlang::warn(
++ "Potentially unsafe or invalid elements have been discarded from R metadata.",
++ body = c(
++ "i" = types_msg,
++ ">" = "If you trust the source, you can set `options(arrow.unsafe_metadata = TRUE)` to preserve them."
++ )
++ )
++ }
++ }
++ new
+ }
+
++
+ #' @importFrom rlang trace_back
+ apply_arrow_r_metadata <- function(x, r_metadata) {
+ if (is.null(r_metadata)) {
diff --git a/SPECS/libarrow/libarrow.spec b/SPECS/libarrow/libarrow.spec
index 82ddf659733..915f7e5244a 100644
--- a/SPECS/libarrow/libarrow.spec
+++ b/SPECS/libarrow/libarrow.spec
@@ -13,7 +13,7 @@
Name: libarrow
Version: 15.0.0
-Release: 6%{?dist}
+Release: 7%{?dist}
Summary: A toolbox for accelerated data interchange and in-memory processing
License: Apache-2.0
URL: https://arrow.apache.org/
@@ -23,6 +23,7 @@ Vendor: Microsoft Corporation
Distribution: Azure Linux
Source0: https://github.com/apache/arrow/archive/refs/tags/apache-arrow-%{version}.tar.gz#/libarrow-%{version}.tar.gz
Patch0001: 0001-python-pyproject.toml.patch
+Patch0002: CVE-2024-52338.patch
# Apache ORC (liborc) has numerous compile errors and apparently assumes
# a 64-bit build and runtime environment. This is only consumer of the liborc
@@ -246,6 +247,9 @@ popd
%{_libdir}/pkgconfig/parquet*.pc
%changelog
+* Wed Dec 4 2024 Bhagyashri Pathak - 15.0.0-7
+- Patch to fix CVE-2024-52338
+
* Thu Jul 25 2024 Devin Anderson - 15.0.0-6
- Bump release to rebuild with latest 'abseil-cpp'.
- Fix 'rpm' warning about macro expansion inside a comment.
diff --git a/SPECS/libnvidia-container/libnvidia-container.signatures.json b/SPECS/libnvidia-container/libnvidia-container.signatures.json
index cb0ee199644..98ae0ebc573 100644
--- a/SPECS/libnvidia-container/libnvidia-container.signatures.json
+++ b/SPECS/libnvidia-container/libnvidia-container.signatures.json
@@ -1,6 +1,6 @@
{
"Signatures": {
- "libnvidia-container-1.17.1.tar.gz": "861ee77bbf1d19531f4c65e57989d5f1a41b0a0b28d6a2d0e2c8e2ede14c1e25",
+ "libnvidia-container-1.17.3.tar.gz": "9c28e729f1677b6ff9edb3f4dccff4f9827ffe13c04fbcffa41aabacbc0fdd54",
"nvidia-modprobe-550.54.14.tar.gz": "5687b0dfa6087dd480ae91e91ff1dca975794e35a2edcf9ec08d8f9cb98ef905"
}
}
\ No newline at end of file
diff --git a/SPECS/libnvidia-container/libnvidia-container.spec b/SPECS/libnvidia-container/libnvidia-container.spec
index 9ef428fcb67..455226ccdca 100644
--- a/SPECS/libnvidia-container/libnvidia-container.spec
+++ b/SPECS/libnvidia-container/libnvidia-container.spec
@@ -3,7 +3,7 @@
%define mod_probe_dir deps/src/nvidia-modprobe-%{modprobe_version}
Summary: NVIDIA container runtime library
Name: libnvidia-container
-Version: 1.17.1
+Version: 1.17.3
Release: 1%{?dist}
License: BSD AND ASL2.0 AND GPLv3+ AND LGPLv3+ AND MIT AND GPLv2
Vendor: Microsoft Corporation
@@ -135,6 +135,9 @@ This package contains command-line tools that facilitate using the library.
%{_bindir}/*
%changelog
+* Thu Dec 05 2024 Henry Li - 1.17.3-1
+- Upgrade to v1.17.3
+
* Mon Nov 11 2024 Henry Li - 1.17.1-1
- Upgrade to v1.17.1
diff --git a/SPECS/libseccomp/libseccomp.signatures.json b/SPECS/libseccomp/libseccomp.signatures.json
index 810e1646b60..e07a10271da 100644
--- a/SPECS/libseccomp/libseccomp.signatures.json
+++ b/SPECS/libseccomp/libseccomp.signatures.json
@@ -1,5 +1,5 @@
{
"Signatures": {
- "libseccomp-2.5.4.tar.gz": "d82902400405cf0068574ef3dc1fe5f5926207543ba1ae6f8e7a1576351dcbdb"
+ "libseccomp-2.5.5.tar.gz": "248a2c8a4d9b9858aa6baf52712c34afefcf9c9e94b76dce02c1c9aa25fb3375"
}
}
diff --git a/SPECS/libseccomp/libseccomp.spec b/SPECS/libseccomp/libseccomp.spec
index 61b68835420..3916f24b162 100644
--- a/SPECS/libseccomp/libseccomp.spec
+++ b/SPECS/libseccomp/libseccomp.spec
@@ -1,6 +1,6 @@
Summary: Enhanced seccomp library
Name: libseccomp
-Version: 2.5.4
+Version: 2.5.5
Release: 1%{?dist}
License: LGPLv2
Vendor: Microsoft Corporation
@@ -62,6 +62,9 @@ find %{buildroot} -type f -name "*.la" -delete -print
%{_mandir}/man3/*
%changelog
+* Tue Nov 05 2024 Nan Liu - 2.5.5-1
+- Upgrade to 2.5.5 as other older version is no longer supported upstream
+
* Fri Oct 27 2023 CBL-Mariner Servicing Account - 2.5.4-1
- Auto-upgrade to 2.5.4 - Azure Linux 3.0 - package upgrades
diff --git a/SPECS/moby-engine/CVE-2024-36620.patch b/SPECS/moby-engine/CVE-2024-36620.patch
new file mode 100644
index 00000000000..2f2ca221177
--- /dev/null
+++ b/SPECS/moby-engine/CVE-2024-36620.patch
@@ -0,0 +1,33 @@
+From ab570ab3d62038b3d26f96a9bb585d0b6095b9b4 Mon Sep 17 00:00:00 2001
+From: Christopher Petito <47751006+krissetto@users.noreply.github.com>
+Date: Fri, 19 Apr 2024 10:44:30 +0000
+Subject: [PATCH] nil dereference fix on image history Created value
+
+Issue was caused by the changes here https://github.com/moby/moby/pull/45504
+First released in v25.0.0-beta.1
+
+Signed-off-by: Christopher Petito <47751006+krissetto@users.noreply.github.com>
+---
+ daemon/images/image_history.go | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/daemon/images/image_history.go b/daemon/images/image_history.go
+index 1617f8be62906..f621ceae13bc6 100644
+--- a/daemon/images/image_history.go
++++ b/daemon/images/image_history.go
+@@ -43,9 +43,14 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*image.
+ layerCounter++
+ }
+
++ var created int64
++ if h.Created != nil {
++ created = h.Created.Unix()
++ }
++
+ history = append([]*image.HistoryResponseItem{{
+ ID: "",
+- Created: h.Created.Unix(),
++ Created: created,
+ CreatedBy: h.CreatedBy,
+ Comment: h.Comment,
+ Size: layerSize,
diff --git a/SPECS/moby-engine/CVE-2024-36621.patch b/SPECS/moby-engine/CVE-2024-36621.patch
new file mode 100644
index 00000000000..2f9fe886fdc
--- /dev/null
+++ b/SPECS/moby-engine/CVE-2024-36621.patch
@@ -0,0 +1,76 @@
+From 37545cc644344dcb576cba67eb7b6f51a463d31e Mon Sep 17 00:00:00 2001
+From: Tonis Tiigi
+Date: Wed, 6 Mar 2024 23:11:32 -0800
+Subject: [PATCH] builder-next: fix missing lock in ensurelayer
+
+When this was called concurrently from the moby image
+exporter there could be a data race where a layer was
+written to the refs map when it was already there.
+
+In that case the reference count got mixed up and on
+release only one of these layers was actually released.
+
+Signed-off-by: Tonis Tiigi
+---
+ .../builder-next/adapters/snapshot/layer.go | 3 +++
+ .../adapters/snapshot/snapshot.go | 19 +++++++++++--------
+ 2 files changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/builder/builder-next/adapters/snapshot/layer.go b/builder/builder-next/adapters/snapshot/layer.go
+index 73120ea70b2ee..fc83058339c7b 100644
+--- a/builder/builder-next/adapters/snapshot/layer.go
++++ b/builder/builder-next/adapters/snapshot/layer.go
+@@ -22,6 +22,9 @@ func (s *snapshotter) GetDiffIDs(ctx context.Context, key string) ([]layer.DiffI
+ }
+
+ func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) {
++ s.layerCreateLocker.Lock(key)
++ defer s.layerCreateLocker.Unlock(key)
++
+ diffIDs, err := s.GetDiffIDs(ctx, key)
+ if err != nil {
+ return nil, err
+diff --git a/builder/builder-next/adapters/snapshot/snapshot.go b/builder/builder-next/adapters/snapshot/snapshot.go
+index a0d28ad984ba4..510ffefb49406 100644
+--- a/builder/builder-next/adapters/snapshot/snapshot.go
++++ b/builder/builder-next/adapters/snapshot/snapshot.go
+@@ -17,6 +17,7 @@ import (
+ "github.com/moby/buildkit/identity"
+ "github.com/moby/buildkit/snapshot"
+ "github.com/moby/buildkit/util/leaseutil"
++ "github.com/moby/locker"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ bolt "go.etcd.io/bbolt"
+@@ -51,10 +52,11 @@ type checksumCalculator interface {
+ type snapshotter struct {
+ opt Opt
+
+- refs map[string]layer.Layer
+- db *bolt.DB
+- mu sync.Mutex
+- reg graphIDRegistrar
++ refs map[string]layer.Layer
++ db *bolt.DB
++ mu sync.Mutex
++ reg graphIDRegistrar
++ layerCreateLocker *locker.Locker
+ }
+
+ // NewSnapshotter creates a new snapshotter
+@@ -71,10 +73,11 @@ func NewSnapshotter(opt Opt, prevLM leases.Manager, ns string) (snapshot.Snapsho
+ }
+
+ s := &snapshotter{
+- opt: opt,
+- db: db,
+- refs: map[string]layer.Layer{},
+- reg: reg,
++ opt: opt,
++ db: db,
++ refs: map[string]layer.Layer{},
++ reg: reg,
++ layerCreateLocker: locker.New(),
+ }
+
+ slm := newLeaseManager(s, prevLM)
diff --git a/SPECS/moby-engine/CVE-2024-36623.patch b/SPECS/moby-engine/CVE-2024-36623.patch
new file mode 100644
index 00000000000..6018f33abc2
--- /dev/null
+++ b/SPECS/moby-engine/CVE-2024-36623.patch
@@ -0,0 +1,45 @@
+From 5689dabfb357b673abdb4391eef426f297d7d1bb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pawe=C5=82=20Gronowski?=
+Date: Thu, 22 Feb 2024 18:01:40 +0100
+Subject: [PATCH] pkg/streamformatter: Make `progressOutput` concurrency safe
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Sync access to the underlying `io.Writer` with a mutex.
+
+Signed-off-by: Paweł Gronowski
+---
+ pkg/streamformatter/streamformatter.go | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/pkg/streamformatter/streamformatter.go b/pkg/streamformatter/streamformatter.go
+index b0456e580dc9d..098df6b5236b9 100644
+--- a/pkg/streamformatter/streamformatter.go
++++ b/pkg/streamformatter/streamformatter.go
+@@ -5,6 +5,7 @@ import (
+ "encoding/json"
+ "fmt"
+ "io"
++ "sync"
+
+ "github.com/docker/docker/pkg/jsonmessage"
+ "github.com/docker/docker/pkg/progress"
+@@ -109,6 +110,7 @@ type progressOutput struct {
+ sf formatProgress
+ out io.Writer
+ newLines bool
++ mu sync.Mutex
+ }
+
+ // WriteProgress formats progress information from a ProgressReader.
+@@ -120,6 +122,9 @@ func (out *progressOutput) WriteProgress(prog progress.Progress) error {
+ jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units}
+ formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux)
+ }
++
++ out.mu.Lock()
++ defer out.mu.Unlock()
+ _, err := out.out.Write(formatted)
+ if err != nil {
+ return err
diff --git a/SPECS/moby-engine/CVE-2024-45337.patch b/SPECS/moby-engine/CVE-2024-45337.patch
new file mode 100644
index 00000000000..0c8df5f2421
--- /dev/null
+++ b/SPECS/moby-engine/CVE-2024-45337.patch
@@ -0,0 +1,77 @@
+https://github.com/golang/crypto/commit/b4f1988a35dee11ec3e05d6bf3e90b695fbd8909.patch
+
+From b4f1988a35dee11ec3e05d6bf3e90b695fbd8909 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker
+Date: Tue, 3 Dec 2024 09:03:03 -0800
+Subject: [PATCH] ssh: make the public key cache a 1-entry FIFO cache
+
+Users of the the ssh package seem to extremely commonly misuse the
+PublicKeyCallback API, assuming that the key passed in the last call
+before a connection is established is the key used for authentication.
+Some users then make authorization decisions based on this key. This
+property is not documented, and may not be correct, due to the caching
+behavior of the package, resulting in users making incorrect
+authorization decisions about the connection.
+
+This change makes the cache a one entry FIFO cache, making the assumed
+property, that the last call to PublicKeyCallback represents the key
+actually used for authentication, actually hold.
+
+Thanks to Damien Tournoud, Patrick Dawkins, Vince Parker, and
+Jules Duvivier from the Platform.sh / Upsun engineering team
+for reporting this issue.
+
+Fixes golang/go#70779
+Fixes CVE-2024-45337
+
+Change-Id: Ife7c7b4045d8b6bcd7e3a417bdfae370c709797f
+Reviewed-on: https://go-review.googlesource.com/c/crypto/+/635315
+Reviewed-by: Roland Shoemaker
+Auto-Submit: Gopher Robot
+Reviewed-by: Damien Neil
+Reviewed-by: Nicola Murino
+LUCI-TryBot-Result: Go LUCI
+---
+ vendor/golang.org/x/crypto/ssh/server.go | 15 ++++++++++----
+
+diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
+index c0d1c29e6f..5b5ccd96f4 100644
+--- a/vendor/golang.org/x/crypto/ssh/server.go
++++ b/vendor/golang.org/x/crypto/ssh/server.go
+@@ -149,7 +149,7 @@ func (s *ServerConfig) AddHostKey(key Signer) {
+ }
+
+ // cachedPubKey contains the results of querying whether a public key is
+-// acceptable for a user.
++// acceptable for a user. This is a FIFO cache.
+ type cachedPubKey struct {
+ user string
+ pubKeyData []byte
+@@ -157,7 +157,13 @@ type cachedPubKey struct {
+ perms *Permissions
+ }
+
+-const maxCachedPubKeys = 16
++// maxCachedPubKeys is the number of cache entries we store.
++//
++// Due to consistent misuse of the PublicKeyCallback API, we have reduced this
++// to 1, such that the only key in the cache is the most recently seen one. This
++// forces the behavior that the last call to PublicKeyCallback will always be
++// with the key that is used for authentication.
++const maxCachedPubKeys = 1
+
+ // pubKeyCache caches tests for public keys. Since SSH clients
+ // will query whether a public key is acceptable before attempting to
+@@ -179,9 +185,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
+
+ // add adds the given tuple to the cache.
+ func (c *pubKeyCache) add(candidate cachedPubKey) {
+- if len(c.keys) < maxCachedPubKeys {
+- c.keys = append(c.keys, candidate)
++ if len(c.keys) >= maxCachedPubKeys {
++ c.keys = c.keys[1:]
+ }
++ c.keys = append(c.keys, candidate)
+ }
+
+ // ServerConn is an authenticated SSH connection, as seen from the
diff --git a/SPECS/moby-engine/moby-engine.spec b/SPECS/moby-engine/moby-engine.spec
index dc57d983520..50f00c3f016 100644
--- a/SPECS/moby-engine/moby-engine.spec
+++ b/SPECS/moby-engine/moby-engine.spec
@@ -3,7 +3,7 @@
Summary: The open-source application container engine
Name: moby-engine
Version: 25.0.3
-Release: 7%{?dist}
+Release: 9%{?dist}
License: ASL 2.0
Group: Tools/Container
URL: https://mobyproject.org
@@ -19,6 +19,10 @@ Patch1: enable-docker-proxy-libexec-search.patch
Patch2: CVE-2024-41110.patch
Patch3: CVE-2024-29018.patch
Patch4: CVE-2024-24786.patch
+Patch5: CVE-2024-36621.patch
+Patch6: CVE-2024-36620.patch
+Patch7: CVE-2024-36623.patch
+Patch8: CVE-2024-45337.patch
%{?systemd_requires}
@@ -114,6 +118,12 @@ fi
%{_unitdir}/*
%changelog
+* Fri Dec 20 2024 Aurelien Bombo - 25.0.3-9
+- Add patch for CVE-2024-45337
+
+* Wed Dec 04 2024 Adit Jha - 25.0.3-8
+- Fix CVE-2024-36620, CVE-2024-36621, and CVE-2024-36623 with patches
+
* Mon Nov 25 2024 Bala - 25.0.3-7
- Fix CVE-2024-24786 by patching
diff --git a/SPECS/nvidia-container-toolkit/nvidia-container-toolkit.signatures.json b/SPECS/nvidia-container-toolkit/nvidia-container-toolkit.signatures.json
index 0fa9f0cf7d0..092098cc515 100644
--- a/SPECS/nvidia-container-toolkit/nvidia-container-toolkit.signatures.json
+++ b/SPECS/nvidia-container-toolkit/nvidia-container-toolkit.signatures.json
@@ -1,6 +1,6 @@
{
"Signatures": {
- "nvidia-container-toolkit-1.17.1-vendor.tar.gz": "894d10f0504e7a8a8fe8748d736288e90f23e24f13aac058b6b83c4ca99dc40f",
- "nvidia-container-toolkit-1.17.1.tar.gz": "bf1e3ede225bfa41a932e00430e3efbc2c788d8a4e93e5133ff24b5a3b2ae1eb"
+ "nvidia-container-toolkit-1.17.3-vendor.tar.gz": "894d10f0504e7a8a8fe8748d736288e90f23e24f13aac058b6b83c4ca99dc40f",
+ "nvidia-container-toolkit-1.17.3.tar.gz": "58eb450e52d45483a26d9269cf4f74a8b9d0b765751581f1123b18dc48609791"
}
}
diff --git a/SPECS/nvidia-container-toolkit/nvidia-container-toolkit.spec b/SPECS/nvidia-container-toolkit/nvidia-container-toolkit.spec
index 4c41703fda2..8eb08e5bff1 100644
--- a/SPECS/nvidia-container-toolkit/nvidia-container-toolkit.spec
+++ b/SPECS/nvidia-container-toolkit/nvidia-container-toolkit.spec
@@ -1,7 +1,7 @@
%global debug_package %{nil}
Summary: NVIDIA container runtime hook
Name: nvidia-container-toolkit
-Version: 1.17.1
+Version: 1.17.3
Release: 1%{?dist}
License: ALS2.0
Vendor: Microsoft Corporation
@@ -32,7 +32,8 @@ BuildRequires: golang >= 1.20.7
Obsoletes: nvidia-container-runtime <= 3.5.0-1, nvidia-container-runtime-hook <= 1.4.0-2
Provides: nvidia-container-runtime
Provides: nvidia-container-runtime-hook
-Requires: libnvidia-container-tools >= 1.13.5, libnvidia-container-tools < 2.0.0
+Requires: libnvidia-container-tools >= %{version}, libnvidia-container-tools < 2.0.0
+Requires: nvidia-container-toolkit-base == %{version}-%{release}
%description
Provides a OCI hook to enable GPU support in containers.
@@ -58,12 +59,14 @@ tar -xvf %{SOURCE1}
go build -ldflags "-s -w " -o "nvidia-container-runtime-hook" ./cmd/nvidia-container-runtime-hook
go build -ldflags "-s -w " -o "nvidia-container-runtime" ./cmd/nvidia-container-runtime
go build -ldflags "-s -w " -o "nvidia-ctk" ./cmd/nvidia-ctk
+go build -ldflags "-s -w " -o "nvidia-cdi-hook" ./cmd/nvidia-cdi-hook
%install
mkdir -p %{buildroot}%{_bindir}
install -m 755 -t %{buildroot}%{_bindir} nvidia-container-runtime-hook
install -m 755 -t %{buildroot}%{_bindir} nvidia-container-runtime
install -m 755 -t %{buildroot}%{_bindir} nvidia-ctk
+install -m 755 -t %{buildroot}%{_bindir} nvidia-cdi-hook
%posttrans
ln -sf %{_bindir}/nvidia-container-runtime-hook %{_bindir}/nvidia-container-toolkit
@@ -82,8 +85,14 @@ rm -f %{_bindir}/nvidia-container-toolkit
%license LICENSE
%{_bindir}/nvidia-container-runtime
%{_bindir}/nvidia-ctk
+%{_bindir}/nvidia-cdi-hook
%changelog
+* Thu Dec 05 2024 Henry Li - 1.17.3-1
+- Upgrade to v1.17.3
+- Add nvidia-cdi-hook binary to nvidia-container-toolkit-base package
+- Add nvidia-container-toolkit-base as runtime requirement for nvidia-container-toolkit
+
* Mon Nov 11 2024 Henry Li - 1.17.1-1
- Upgrade to v1.17.1 to resolve CVE‑2024-0134
diff --git a/SPECS/packer/CVE-2024-45337.patch b/SPECS/packer/CVE-2024-45337.patch
new file mode 100644
index 00000000000..f7d2f6a6724
--- /dev/null
+++ b/SPECS/packer/CVE-2024-45337.patch
@@ -0,0 +1,77 @@
+https://github.com/golang/crypto/commit/b4f1988a35dee11ec3e05d6bf3e90b695fbd8909.patch
+
+From b4f1988a35dee11ec3e05d6bf3e90b695fbd8909 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker
+Date: Tue, 3 Dec 2024 09:03:03 -0800
+Subject: [PATCH] ssh: make the public key cache a 1-entry FIFO cache
+
+Users of the the ssh package seem to extremely commonly misuse the
+PublicKeyCallback API, assuming that the key passed in the last call
+before a connection is established is the key used for authentication.
+Some users then make authorization decisions based on this key. This
+property is not documented, and may not be correct, due to the caching
+behavior of the package, resulting in users making incorrect
+authorization decisions about the connection.
+
+This change makes the cache a one entry FIFO cache, making the assumed
+property, that the last call to PublicKeyCallback represents the key
+actually used for authentication, actually hold.
+
+Thanks to Damien Tournoud, Patrick Dawkins, Vince Parker, and
+Jules Duvivier from the Platform.sh / Upsun engineering team
+for reporting this issue.
+
+Fixes golang/go#70779
+Fixes CVE-2024-45337
+
+Change-Id: Ife7c7b4045d8b6bcd7e3a417bdfae370c709797f
+Reviewed-on: https://go-review.googlesource.com/c/crypto/+/635315
+Reviewed-by: Roland Shoemaker
+Auto-Submit: Gopher Robot
+Reviewed-by: Damien Neil
+Reviewed-by: Nicola Murino
+LUCI-TryBot-Result: Go LUCI
+---
+ vendor/golang.org/x/crypto/ssh/server.go | 15 ++++++++++----
+
+diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
+index c0d1c29e6f..5b5ccd96f4 100644
+--- a/vendor/golang.org/x/crypto/ssh/server.go
++++ b/vendor/golang.org/x/crypto/ssh/server.go
+@@ -142,7 +142,7 @@ func (s *ServerConfig) AddHostKey(key Signer) {
+ }
+
+ // cachedPubKey contains the results of querying whether a public key is
+-// acceptable for a user.
++// acceptable for a user. This is a FIFO cache.
+ type cachedPubKey struct {
+ user string
+ pubKeyData []byte
+@@ -150,7 +150,13 @@ type cachedPubKey struct {
+ perms *Permissions
+ }
+
+-const maxCachedPubKeys = 16
++// maxCachedPubKeys is the number of cache entries we store.
++//
++// Due to consistent misuse of the PublicKeyCallback API, we have reduced this
++// to 1, such that the only key in the cache is the most recently seen one. This
++// forces the behavior that the last call to PublicKeyCallback will always be
++// with the key that is used for authentication.
++const maxCachedPubKeys = 1
+
+ // pubKeyCache caches tests for public keys. Since SSH clients
+ // will query whether a public key is acceptable before attempting to
+@@ -172,9 +178,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
+
+ // add adds the given tuple to the cache.
+ func (c *pubKeyCache) add(candidate cachedPubKey) {
+- if len(c.keys) < maxCachedPubKeys {
+- c.keys = append(c.keys, candidate)
++ if len(c.keys) >= maxCachedPubKeys {
++ c.keys = c.keys[1:]
+ }
++ c.keys = append(c.keys, candidate)
+ }
+
+ // ServerConn is an authenticated SSH connection, as seen from the
diff --git a/SPECS/packer/packer.spec b/SPECS/packer/packer.spec
index e2f50e20492..e818b1c155f 100644
--- a/SPECS/packer/packer.spec
+++ b/SPECS/packer/packer.spec
@@ -4,7 +4,7 @@
Summary: Tool for creating identical machine images for multiple platforms from a single source configuration.
Name: packer
Version: 1.9.5
-Release: 3%{?dist}
+Release: 4%{?dist}
License: MPLv2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -36,6 +36,7 @@ Patch1: CVE-2022-3064.patch
Patch2: CVE-2023-49569.patch
Patch3: CVE-2024-6104.patch
Patch4: CVE-2024-24786.patch
+Patch5: CVE-2024-45337.patch
BuildRequires: golang >= 1.17.1
BuildRequires: kernel-headers
BuildRequires: glibc-devel
@@ -69,6 +70,9 @@ go test -mod=vendor
%{_bindir}/packer
%changelog
+* Fri Dec 20 2024 Aurelien Bombo - 1.9.5-4
+- Add patch for CVE-2024-45337
+
* Mon Nov 25 2024 Bala - 1.9.5-3
- Patched CVE-2024-24786
diff --git a/SPECS/pam/CVE-2024-10041.patch b/SPECS/pam/CVE-2024-10041.patch
new file mode 100644
index 00000000000..27fc26ee959
--- /dev/null
+++ b/SPECS/pam/CVE-2024-10041.patch
@@ -0,0 +1,89 @@
+From b3020da7da384d769f27a8713257fbe1001878be Mon Sep 17 00:00:00 2001
+From: "Dmitry V. Levin"
+Date: Mon, 1 Jan 2024 12:00:00 +0000
+Subject: [PATCH] pam_unix/passverify: always run the helper to obtain shadow
+ password file entries
+
+Initially, when pam_unix.so verified the password, it used to try to
+obtain the shadow password file entry for the given user by invoking
+getspnam(3), and only when that didn't work and the effective uid
+was nonzero, pam_unix.so used to invoke the helper as a fallback.
+
+When SELinux support was introduced by commit
+67aab1ff5515054341a438cf9804e9c9b3a88033, the fallback was extended
+also for the case when SELinux was enabled.
+
+Later, commit f220cace205332a3dc34e7b37a85e7627e097e7d extended the
+fallback conditions for the case when pam_modutil_getspnam() failed
+with EACCES.
+
+Since commit 470823c4aacef5cb3b1180be6ed70846b61a3752, the helper is
+invoked as a fallback when pam_modutil_getspnam() fails for any reason.
+
+The ultimate solution for the case when pam_unix.so does not have
+permissions to obtain the shadow password file entry is to stop trying
+to use pam_modutil_getspnam() and to invoke the helper instead.
+Here are two recent examples.
+
+https://github.com/linux-pam/linux-pam/pull/484 describes a system
+configuration where libnss_systemd is enabled along with libnss_files
+in the shadow entry of nsswitch.conf, so when libnss_files is unable
+to obtain the shadow password file entry for the root user, e.g. when
+SELinux is enabled, NSS falls back to libnss_systemd which returns
+a synthesized shadow password file entry for the root user, which
+in turn locks the root user out.
+
+https://bugzilla.redhat.com/show_bug.cgi?id=2150155 describes
+essentially the same problem in a similar system configuration.
+
+This commit is the final step in the direction of addressing the issue:
+for password verification pam_unix.so now invokes the helper instead of
+making the pam_modutil_getspnam() call.
+
+* modules/pam_unix/passverify.c (get_account_info) [!HELPER_COMPILE]:
+Always return PAM_UNIX_RUN_HELPER instead of trying to obtain
+the shadow password file entry.
+
+Complements: https://github.com/linux-pam/linux-pam/pull/386
+Resolves: https://github.com/linux-pam/linux-pam/pull/484
+Link: https://github.com/authselect/authselect/commit/1e78f7e048747024a846fd22d68afc6993734e92
+---
+ modules/pam_unix/passverify.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/modules/pam_unix/passverify.c b/modules/pam_unix/passverify.c
+index 2474fa7aa4..c48e3c5a79 100644
+--- a/modules/pam_unix/passverify.c
++++ b/modules/pam_unix/passverify.c
+@@ -238,20 +238,21 @@ PAMH_ARG_DECL(int get_account_info,
+ return PAM_UNIX_RUN_HELPER;
+ #endif
+ } else if (is_pwd_shadowed(*pwd)) {
++#ifdef HELPER_COMPILE
+ /*
+- * ...and shadow password file entry for this user,
++ * shadow password file entry for this user,
+ * if shadowing is enabled
+ */
+- *spwdent = pam_modutil_getspnam(pamh, name);
+- if (*spwdent == NULL) {
+-#ifndef HELPER_COMPILE
+- /* still a chance the user can authenticate */
+- return PAM_UNIX_RUN_HELPER;
+-#endif
+- return PAM_AUTHINFO_UNAVAIL;
+- }
+- if ((*spwdent)->sp_pwdp == NULL)
++ *spwdent = getspnam(name);
++ if (*spwdent == NULL || (*spwdent)->sp_pwdp == NULL)
+ return PAM_AUTHINFO_UNAVAIL;
++#else
++ /*
++ * The helper has to be invoked to deal with
++ * the shadow password file entry.
++ */
++ return PAM_UNIX_RUN_HELPER;
++#endif
+ }
+ } else {
+ return PAM_USER_UNKNOWN;
diff --git a/SPECS/pam/CVE-2024-10963.patch b/SPECS/pam/CVE-2024-10963.patch
new file mode 100644
index 00000000000..57491a7aede
--- /dev/null
+++ b/SPECS/pam/CVE-2024-10963.patch
@@ -0,0 +1,224 @@
+From 940747f88c16e029b69a74e80a2e94f65cb3e628 Mon Sep 17 00:00:00 2001
+From: Thorsten Kukuk
+Date: Thu, 14 Nov 2024 10:27:28 +0100
+Subject: [PATCH] pam_access: rework resolving of tokens as hostname
+
+* modules/pam_access/pam_access.c: separate resolving of IP addresses
+ from hostnames. Don't resolve TTYs or display variables as hostname
+ (#834).
+ Add "nodns" option to disallow resolving of tokens as hostname.
+* modules/pam_access/pam_access.8.xml: document nodns option
+* modules/pam_access/access.conf.5.xml: document that hostnames should
+ be written as FQHN.
+---
+ modules/pam_access/access.conf.5.xml | 4 ++
+ modules/pam_access/pam_access.8.xml | 46 ++++++++++++------
+ modules/pam_access/pam_access.c | 72 +++++++++++++++++++++++++++-
+ 3 files changed, 105 insertions(+), 17 deletions(-)
+
+diff --git a/modules/pam_access/access.conf.5.xml b/modules/pam_access/access.conf.5.xml
+index 0b93db00e4..10b8ba9252 100644
+--- a/modules/pam_access/access.conf.5.xml
++++ b/modules/pam_access/access.conf.5.xml
+@@ -233,6 +233,10 @@
+ item and the line will be most probably ignored. For this reason, it is not
+ recommended to put spaces around the ':' characters.
+
++
++ Hostnames should be written as Fully-Qualified Host Name (FQHN) to avoid
++ confusion with device names or PAM service names.
++
+
+
+
+diff --git a/modules/pam_access/pam_access.8.xml b/modules/pam_access/pam_access.8.xml
+index c991d7a097..71a4f7ee94 100644
+--- a/modules/pam_access/pam_access.8.xml
++++ b/modules/pam_access/pam_access.8.xml
+@@ -22,11 +22,14 @@
+
+ debug
+
++
++ noaudit
++
+
+ nodefgroup
+
+
+- noaudit
++ nodns
+
+
+ accessfile=file
+@@ -132,6 +135,33 @@
+
+
+
++
++
++ nodefgroup
++
++
++
++ User tokens which are not enclosed in parentheses will not be
++ matched against the group database. The backwards compatible default is
++ to try the group database match even for tokens not enclosed
++ in parentheses.
++
++
++
++
++
++
++ nodns
++
++
++
++ Do not try to resolve tokens as hostnames, only IPv4 and IPv6
++ addresses will be resolved. Which means to allow login from a
++ remote host, the IP addresses need to be specified in access.conf.
++
++
++
++
+
+
+ fieldsep=separators
+@@ -185,20 +215,6 @@
+
+
+
+-
+-
+- nodefgroup
+-
+-
+-
+- User tokens which are not enclosed in parentheses will not be
+- matched against the group database. The backwards compatible default is
+- to try the group database match even for tokens not enclosed
+- in parentheses.
+-
+-
+-
+-
+
+
+
+diff --git a/modules/pam_access/pam_access.c b/modules/pam_access/pam_access.c
+index 48e7c7e974..109115e9cf 100644
+--- a/modules/pam_access/pam_access.c
++++ b/modules/pam_access/pam_access.c
+@@ -100,6 +100,7 @@ struct login_info {
+ int debug; /* Print debugging messages. */
+ int only_new_group_syntax; /* Only allow group entries of the form "(xyz)" */
+ int noaudit; /* Do not audit denials */
++ int nodns; /* Do not try to resolve tokens as hostnames */
+ const char *fs; /* field separator */
+ const char *sep; /* list-element separator */
+ int from_remote_host; /* If PAM_RHOST was used for from */
+@@ -150,7 +150,9 @@ parse_args(pam_handle_t *pamh, struct login_info *loginfo,
+ loginfo->only_new_group_syntax = YES;
+ } else if (strcmp (argv[i], "noaudit") == 0) {
+ loginfo->noaudit = YES;
+- } else {
++ } else if (strcmp (argv[i], "nodns") == 0) {
++ loginfo->nodns = YES;
++ } else {
+ pam_syslog(pamh, LOG_ERR, "unrecognized option [%s]", argv[i]);
+ }
+ }
+@@ -820,7 +823,7 @@ remote_match (pam_handle_t *pamh, char *tok, struct login_info *item)
+ if ((str_len = strlen(string)) > tok_len
+ && strcasecmp(tok, string + str_len - tok_len) == 0)
+ return YES;
+- } else if (tok[tok_len - 1] == '.') { /* internet network numbers (end with ".") */
++ } else if (tok[tok_len - 1] == '.') { /* internet network numbers/subnet (end with ".") */
+ struct addrinfo hint;
+
+ memset (&hint, '\0', sizeof (hint));
+@@ -895,6 +898,39 @@ string_match (pam_handle_t *pamh, const char *tok, const char *string,
+ }
+
+
++static int
++is_device (pam_handle_t *pamh, const char *tok)
++{
++ struct stat st;
++ const char *dev = "/dev/";
++ char *devname;
++
++ devname = malloc (strlen(dev) + strlen (tok) + 1);
++ if (devname == NULL) {
++ pam_syslog(pamh, LOG_ERR, "Cannot allocate memory for device name: %m");
++ /*
++ * We should return an error and abort, but pam_access has no good
++ * error handling.
++ */
++ return NO;
++ }
++
++ char *cp = stpcpy (devname, dev);
++ strcpy (cp, tok);
++
++ if (lstat(devname, &st) != 0)
++ {
++ free (devname);
++ return NO;
++ }
++ free (devname);
++
++ if (S_ISCHR(st.st_mode))
++ return YES;
++
++ return NO;
++}
++
+ /* network_netmask_match - match a string against one token
+ * where string is a hostname or ip (v4,v6) address and tok
+ * represents either a hostname, a single ip (v4,v6) address
+@@ -956,10 +992,42 @@ network_netmask_match (pam_handle_t *pamh,
+ return NO;
+ }
+ }
++ else if (isipaddr(tok, NULL, NULL) == YES)
++ {
++ if (getaddrinfo (tok, NULL, NULL, &ai) != 0)
++ {
++ if (item->debug)
++ pam_syslog(pamh, LOG_DEBUG, "cannot resolve IP address \"%s\"", tok);
++
++ return NO;
++ }
++ netmask_ptr = NULL;
++ }
++ else if (item->nodns)
++ {
++ /* Only hostnames are left, which we would need to resolve via DNS */
++ return NO;
++ }
+ else
+ {
++ /* Bail out on X11 Display entries and ttys. */
++ if (tok[0] == ':')
++ {
++ if (item->debug)
++ pam_syslog (pamh, LOG_DEBUG,
++ "network_netmask_match: tok=%s is X11 display", tok);
++ return NO;
++ }
++ if (is_device (pamh, tok))
++ {
++ if (item->debug)
++ pam_syslog (pamh, LOG_DEBUG,
++ "network_netmask_match: tok=%s is a TTY", tok);
++ return NO;
++ }
++
+ /*
+- * It is either an IP address or a hostname.
++ * It is most likely a hostname.
+ * Let getaddrinfo sort everything out
+ */
+ if (getaddrinfo (tok, NULL, NULL, &ai) != 0)
diff --git a/SPECS/pam/pam.spec b/SPECS/pam/pam.spec
index 18cb42a6858..2342234b901 100644
--- a/SPECS/pam/pam.spec
+++ b/SPECS/pam/pam.spec
@@ -1,7 +1,7 @@
Summary: Linux Pluggable Authentication Modules
Name: pam
Version: 1.5.3
-Release: 2%{?dist}
+Release: 4%{?dist}
License: BSD and GPLv2+
URL: https://github.com/linux-pam/linux-pam
Source0: https://github.com/linux-pam/linux-pam/releases/download/v%{version}/Linux-PAM-%{version}.tar.xz
@@ -16,6 +16,8 @@ Requires: audit-libs
Recommends: cracklib-dicts
Patch0: CVE-2024-22365.patch
+Patch1: CVE-2024-10963.patch
+Patch2: CVE-2024-10041.patch
%description
The Linux PAM package contains Pluggable Authentication Modules used to
@@ -104,6 +106,12 @@ EOF
%{_libdir}/pkgconfig/pamc.pc
%changelog
+* Wed Dec 18 2024 Adit Jha - 1.5.3-4
+- Patching CVE-2024-10041.
+
+* Fri Dec 06 2024 Adit Jha - 1.5.3-3
+- Patching CVE-2024-10963.
+
* Wed Oct 30 2024 Pawel Winogrodzki - 1.5.3-2
- Patching CVE-2024-22365.
diff --git a/SPECS/php/php-8.1.0-phpinfo.patch b/SPECS/php/php-8.3.13-phpinfo.patch
similarity index 98%
rename from SPECS/php/php-8.1.0-phpinfo.patch
rename to SPECS/php/php-8.3.13-phpinfo.patch
index d19b2f4bab5..a02a2b8a9b7 100644
--- a/SPECS/php/php-8.1.0-phpinfo.patch
+++ b/SPECS/php/php-8.3.13-phpinfo.patch
@@ -25,7 +25,7 @@ diff -up ./ext/standard/tests/general_functions/phpinfo.phpt.phpinfo ./ext/stand
@@ -17,7 +17,6 @@ PHP Version => %s
System => %s
- Build Date => %s%a
+ Build Date => %r(.+?)%r
-Configure Command => %s
Server API => Command Line Interface
Virtual Directory Support => %s
diff --git a/SPECS/php/php-8.3.14-phpinfo.patch b/SPECS/php/php-8.3.14-phpinfo.patch
new file mode 100644
index 00000000000..a02a2b8a9b7
--- /dev/null
+++ b/SPECS/php/php-8.3.14-phpinfo.patch
@@ -0,0 +1,44 @@
+
+Drop "Configure Command" from phpinfo as it doesn't
+provide any useful information.
+The available extensions are not related to this command.
+
+Replace full GCC name by gcc in php -v output
+
+
+diff -up ./ext/standard/info.c.phpinfo ./ext/standard/info.c
+--- ./ext/standard/info.c.phpinfo 2020-07-21 10:49:31.000000000 +0200
++++ ./ext/standard/info.c 2020-07-21 11:41:56.295633523 +0200
+@@ -805,9 +805,6 @@ PHPAPI ZEND_COLD void php_print_info(int
+ #ifdef PHP_BUILD_ARCH
+ php_info_print_table_row(2, "Architecture", PHP_BUILD_ARCH);
+ #endif
+-#ifdef CONFIGURE_COMMAND
+- php_info_print_table_row(2, "Configure Command", CONFIGURE_COMMAND );
+-#endif
+
+ if (sapi_module.pretty_name) {
+ php_info_print_table_row(2, "Server API", sapi_module.pretty_name );
+diff -up ./ext/standard/tests/general_functions/phpinfo.phpt.phpinfo ./ext/standard/tests/general_functions/phpinfo.phpt
+--- ./ext/standard/tests/general_functions/phpinfo.phpt.phpinfo 2020-07-21 10:49:31.000000000 +0200
++++ ./ext/standard/tests/general_functions/phpinfo.phpt 2020-07-21 11:41:56.296633522 +0200
+@@ -17,7 +17,6 @@ PHP Version => %s
+
+ System => %s
+ Build Date => %r(.+?)%r
+-Configure Command => %s
+ Server API => Command Line Interface
+ Virtual Directory Support => %s
+ Configuration File (php.ini) Path => %s
+diff -up ./sapi/cli/php_cli.c.phpinfo ./sapi/cli/php_cli.c
+--- ./sapi/cli/php_cli.c.phpinfo 2020-07-21 11:43:38.812475300 +0200
++++ ./sapi/cli/php_cli.c 2020-07-21 11:43:45.783464540 +0200
+@@ -645,7 +645,7 @@ static int do_cli(int argc, char **argv)
+ "NTS"
+ #endif
+ #ifdef PHP_BUILD_COMPILER
+- " " PHP_BUILD_COMPILER
++ " gcc"
+ #endif
+ #ifdef PHP_BUILD_ARCH
+ " " PHP_BUILD_ARCH
diff --git a/SPECS/php/php.signatures.json b/SPECS/php/php.signatures.json
index bf36916be81..1d5fd0b6db9 100644
--- a/SPECS/php/php.signatures.json
+++ b/SPECS/php/php.signatures.json
@@ -6,7 +6,7 @@
"nginx-fpm.conf": "5a222ab2c3fc0145cb67a1c5125471bbf097de304e77c9858e7077a3b4fcad59",
"nginx-php.conf": "b3b3f744c4c122302fcb11f39cac78d01cef15ee6f8bd67e98b3438efcf8dc95",
"opcache-default.blacklist": "4eef0875e1a0c6a75b8a2bafd4ddc029b83be74dd336a6a99214b0c32808cb38",
- "php-8.3.12.tar.xz": "f774e28633e26fc8c5197f4dae58ec9e3ff87d1b4311cbc61ab05a7ad24bd131",
+ "php-8.3.14.tar.xz": "58b4cb9019bf70c0cbcdb814c7df79b9065059d14cf7dbf48d971f8e56ae9be7",
"php-fpm-www.conf": "1cacdd4962c01a0a968933c38db503023940ad9105f021bdab85d6cdc46dcbb8",
"php-fpm.conf": "bb261d53b9b42bb163a7637bb373ffa18a20dddf27a3efe6cb5ed1b1cf5981a9",
"php-fpm.logrotate": "7d8279bebb9ffabc596a2699150e93d4ce4513245890b9b786d337288b19fa79",
diff --git a/SPECS/php/php.spec b/SPECS/php/php.spec
index 54e6b7f089f..1b106f2f3f7 100644
--- a/SPECS/php/php.spec
+++ b/SPECS/php/php.spec
@@ -32,7 +32,7 @@
%global with_qdbm 0
Summary: PHP scripting language for creating dynamic web sites
Name: php
-Version: 8.3.12
+Version: 8.3.14
Release: 1%{?dist}
# All files licensed under PHP version 3.01, except
# Zend is licensed under Zend
@@ -77,7 +77,7 @@ Patch43: php-7.4.0-phpize.patch
Patch45: php-7.4.0-ldap_r.patch
# drop "Configure command" from phpinfo output
# and only use gcc (instead of full version)
-Patch47: php-8.1.0-phpinfo.patch
+Patch47: php-8.3.14-phpinfo.patch
# Upstream fixes (100+)
# Security fixes (200+)
# Fixes for tests (300+)
@@ -1514,6 +1514,10 @@ systemctl try-restart php-fpm.service >/dev/null 2>&1 || :
%dir %{_datadir}/php/preload
%changelog
+* Wed Dec 04 2024 Kavya Sree Kaitepalli - 8.3.14-1
+- Upgrade to 8.3.14 to fix CVE-2024-8932, CVE-2024-11234, CVE-2024-11233, CVE-2024-11236
+- Update patch for phpinfo
+
* Wed Oct 16 2024 Archana Choudhary - 8.3.12-1
- Upgarde to 8.3.12 to fix CVE-2024-8927, CVE-2024-8925
- Refactor patch (with fuzzing) for system tzdata
diff --git a/SPECS/prebuilt-ca-certificates-base/prebuilt-ca-certificates-base.spec b/SPECS/prebuilt-ca-certificates-base/prebuilt-ca-certificates-base.spec
index 4b0f03161d2..a891db4c20c 100644
--- a/SPECS/prebuilt-ca-certificates-base/prebuilt-ca-certificates-base.spec
+++ b/SPECS/prebuilt-ca-certificates-base/prebuilt-ca-certificates-base.spec
@@ -3,7 +3,7 @@ Name: prebuilt-ca-certificates-base
# When updating, "Epoch, "Version", AND "Release" tags must be updated in the "ca-certificates" package as well.
Epoch: 1
Version: %{azl}.0.0
-Release: 7%{?dist}
+Release: 8%{?dist}
License: MIT
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -46,6 +46,9 @@ find %{buildroot} -name README -delete
%{_sysconfdir}/pki/java/cacerts
%changelog
+* Wed Dec 11 2024 Pawel Winogrodzki - 3.0.0-8
+- Update adding Microsoft distrusted CAs.
+
* Tue Aug 13 2024 CBL-Mariner Servicing Account - 3.0.0-7
- Making 'Release' match with 'ca-certificates'
diff --git a/SPECS/prebuilt-ca-certificates/prebuilt-ca-certificates.spec b/SPECS/prebuilt-ca-certificates/prebuilt-ca-certificates.spec
index 0c9326f5c12..f1b153e627c 100644
--- a/SPECS/prebuilt-ca-certificates/prebuilt-ca-certificates.spec
+++ b/SPECS/prebuilt-ca-certificates/prebuilt-ca-certificates.spec
@@ -3,7 +3,7 @@ Name: prebuilt-ca-certificates
# When updating, "Epoch, "Version", AND "Release" tags must be updated in the "ca-certificates" package as well.
Epoch: 1
Version: %{azl}.0.0
-Release: 7%{?dist}
+Release: 8%{?dist}
License: MIT
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -49,6 +49,9 @@ find %{buildroot} -name README -delete
%{_sysconfdir}/pki/java/cacerts
%changelog
+* Wed Dec 11 2024 Pawel Winogrodzki - 3.0.0-8
+- Update adding Microsoft distrusted CAs.
+
* Tue Aug 13 2024 CBL-Mariner Servicing Account - 3.0.0-7
- Making 'Release' match with 'ca-certificates'
diff --git a/SPECS/python-virtualenv/CVE-2024-53899.patch b/SPECS/python-virtualenv/CVE-2024-53899.patch
new file mode 100644
index 00000000000..798ad566216
--- /dev/null
+++ b/SPECS/python-virtualenv/CVE-2024-53899.patch
@@ -0,0 +1,421 @@
+From f18ac23159a5ec6763eb91be51218cb2ea29dbd2 Mon Sep 17 00:00:00 2001
+From: Sudipta Pandit
+Date: Wed, 11 Dec 2024 14:14:56 +0530
+Subject: [PATCH] Backport fix for CVE-2024-53899
+
+Reference: https://github.com/pypa/virtualenv/pull/2771
+---
+ src/virtualenv/activation/bash/activate.sh | 8 ++++----
+ src/virtualenv/activation/batch/__init__.py | 4 ++++
+ src/virtualenv/activation/cshell/activate.csh | 8 ++++----
+ src/virtualenv/activation/fish/activate.fish | 8 ++++----
+ src/virtualenv/activation/nushell/__init__.py | 19 ++++++++++++++++++
+ src/virtualenv/activation/nushell/activate.nu | 8 ++++----
+ .../activation/powershell/__init__.py | 12 +++++++++++
+ .../activation/powershell/activate.ps1 | 6 +++---
+ src/virtualenv/activation/python/__init__.py | 6 +++++-
+ .../activation/python/activate_this.py | 8 ++++----
+ src/virtualenv/activation/via_template.py | 13 +++++++++++-
+ tests/conftest.py | 6 +++++-
+ tests/unit/activation/conftest.py | 3 +--
+ tests/unit/activation/test_batch.py | 10 +++++-----
+ tests/unit/activation/test_powershell.py | 20 ++++++++++++++-----
+ 15 files changed, 101 insertions(+), 38 deletions(-)
+
+diff --git a/src/virtualenv/activation/bash/activate.sh b/src/virtualenv/activation/bash/activate.sh
+index b06e3fd..e412509 100644
+--- a/src/virtualenv/activation/bash/activate.sh
++++ b/src/virtualenv/activation/bash/activate.sh
+@@ -45,18 +45,18 @@ deactivate () {
+ # unset irrelevant variables
+ deactivate nondestructive
+
+-VIRTUAL_ENV='__VIRTUAL_ENV__'
++VIRTUAL_ENV=__VIRTUAL_ENV__
+ if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then
+ VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV")
+ fi
+ export VIRTUAL_ENV
+
+ _OLD_VIRTUAL_PATH="$PATH"
+-PATH="$VIRTUAL_ENV/__BIN_NAME__:$PATH"
++PATH="$VIRTUAL_ENV/"__BIN_NAME__":$PATH"
+ export PATH
+
+-if [ "x__VIRTUAL_PROMPT__" != x ] ; then
+- VIRTUAL_ENV_PROMPT="__VIRTUAL_PROMPT__"
++if [ "x"__VIRTUAL_PROMPT__ != x ] ; then
++ VIRTUAL_ENV_PROMPT=__VIRTUAL_PROMPT__
+ else
+ VIRTUAL_ENV_PROMPT=$(basename "$VIRTUAL_ENV")
+ fi
+diff --git a/src/virtualenv/activation/batch/__init__.py b/src/virtualenv/activation/batch/__init__.py
+index a6d58eb..3d74ba8 100644
+--- a/src/virtualenv/activation/batch/__init__.py
++++ b/src/virtualenv/activation/batch/__init__.py
+@@ -15,6 +15,10 @@ class BatchActivator(ViaTemplateActivator):
+ yield "deactivate.bat"
+ yield "pydoc.bat"
+
++ @staticmethod
++ def quote(string):
++ return string
++
+ def instantiate_template(self, replacements, template, creator):
+ # ensure the text has all newlines as \r\n - required by batch
+ base = super().instantiate_template(replacements, template, creator)
+diff --git a/src/virtualenv/activation/cshell/activate.csh b/src/virtualenv/activation/cshell/activate.csh
+index f0c9cca..24de550 100644
+--- a/src/virtualenv/activation/cshell/activate.csh
++++ b/src/virtualenv/activation/cshell/activate.csh
+@@ -10,15 +10,15 @@ alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PA
+ # Unset irrelevant variables.
+ deactivate nondestructive
+
+-setenv VIRTUAL_ENV '__VIRTUAL_ENV__'
++setenv VIRTUAL_ENV __VIRTUAL_ENV__
+
+ set _OLD_VIRTUAL_PATH="$PATH:q"
+-setenv PATH "$VIRTUAL_ENV:q/__BIN_NAME__:$PATH:q"
++setenv PATH "$VIRTUAL_ENV:q/"__BIN_NAME__":$PATH:q"
+
+
+
+-if ('__VIRTUAL_PROMPT__' != "") then
+- setenv VIRTUAL_ENV_PROMPT '__VIRTUAL_PROMPT__'
++if (__VIRTUAL_PROMPT__ != "") then
++ setenv VIRTUAL_ENV_PROMPT __VIRTUAL_PROMPT__
+ else
+ setenv VIRTUAL_ENV_PROMPT "$VIRTUAL_ENV:t:q"
+ endif
+diff --git a/src/virtualenv/activation/fish/activate.fish b/src/virtualenv/activation/fish/activate.fish
+index fcedde4..4c614c3 100644
+--- a/src/virtualenv/activation/fish/activate.fish
++++ b/src/virtualenv/activation/fish/activate.fish
+@@ -58,7 +58,7 @@ end
+ # Unset irrelevant variables.
+ deactivate nondestructive
+
+-set -gx VIRTUAL_ENV '__VIRTUAL_ENV__'
++set -gx VIRTUAL_ENV __VIRTUAL_ENV__
+
+ # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
+ if test (echo $FISH_VERSION | head -c 1) -lt 3
+@@ -66,12 +66,12 @@ if test (echo $FISH_VERSION | head -c 1) -lt 3
+ else
+ set -gx _OLD_VIRTUAL_PATH $PATH
+ end
+-set -gx PATH "$VIRTUAL_ENV"'/__BIN_NAME__' $PATH
++set -gx PATH "$VIRTUAL_ENV"'/'__BIN_NAME__ $PATH
+
+ # Prompt override provided?
+ # If not, just use the environment name.
+-if test -n '__VIRTUAL_PROMPT__'
+- set -gx VIRTUAL_ENV_PROMPT '__VIRTUAL_PROMPT__'
++if test -n __VIRTUAL_PROMPT__
++ set -gx VIRTUAL_ENV_PROMPT __VIRTUAL_PROMPT__
+ else
+ set -gx VIRTUAL_ENV_PROMPT (basename "$VIRTUAL_ENV")
+ end
+diff --git a/src/virtualenv/activation/nushell/__init__.py b/src/virtualenv/activation/nushell/__init__.py
+index 68cd4a3..ef7a79a 100644
+--- a/src/virtualenv/activation/nushell/__init__.py
++++ b/src/virtualenv/activation/nushell/__init__.py
+@@ -7,6 +7,25 @@ class NushellActivator(ViaTemplateActivator):
+ def templates(self):
+ yield "activate.nu"
+
++ @staticmethod
++ def quote(string):
++ """
++ Nushell supports raw strings like: r###'this is a string'###.
++
++ This method finds the maximum continuous sharps in the string and then
++ quote it with an extra sharp.
++ """
++ max_sharps = 0
++ current_sharps = 0
++ for char in string:
++ if char == "#":
++ current_sharps += 1
++ max_sharps = max(current_sharps, max_sharps)
++ else:
++ current_sharps = 0
++ wrapping = "#" * (max_sharps + 1)
++ return f"r{wrapping}'{string}'{wrapping}"
++
+ def replacements(self, creator, dest_folder): # noqa: ARG002
+ return {
+ "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
+diff --git a/src/virtualenv/activation/nushell/activate.nu b/src/virtualenv/activation/nushell/activate.nu
+index 19d4fa1..00a41e0 100644
+--- a/src/virtualenv/activation/nushell/activate.nu
++++ b/src/virtualenv/activation/nushell/activate.nu
+@@ -32,8 +32,8 @@ export-env {
+ }
+ }
+
+- let virtual_env = '__VIRTUAL_ENV__'
+- let bin = '__BIN_NAME__'
++ let virtual_env = __VIRTUAL_ENV__
++ let bin = __BIN_NAME__
+
+ let is_windows = ($nu.os-info.family) == 'windows'
+ let path_name = (if (has-env 'Path') {
+@@ -47,10 +47,10 @@ export-env {
+ let new_path = ($env | get $path_name | prepend $venv_path)
+
+ # If there is no default prompt, then use the env name instead
+- let virtual_env_prompt = (if ('__VIRTUAL_PROMPT__' | is-empty) {
++ let virtual_env_prompt = (if (__VIRTUAL_PROMPT__ | is-empty) {
+ ($virtual_env | path basename)
+ } else {
+- '__VIRTUAL_PROMPT__'
++ __VIRTUAL_PROMPT__
+ })
+
+ let new_env = {
+diff --git a/src/virtualenv/activation/powershell/__init__.py b/src/virtualenv/activation/powershell/__init__.py
+index 1f6d0f4..8489656 100644
+--- a/src/virtualenv/activation/powershell/__init__.py
++++ b/src/virtualenv/activation/powershell/__init__.py
+@@ -7,6 +7,18 @@ class PowerShellActivator(ViaTemplateActivator):
+ def templates(self):
+ yield "activate.ps1"
+
++ @staticmethod
++ def quote(string):
++ """
++ This should satisfy PowerShell quoting rules [1], unless the quoted
++ string is passed directly to Windows native commands [2].
++
++ [1]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules
++ [2]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_parsing#passing-arguments-that-contain-quote-characters
++ """ # noqa: D205
++ string = string.replace("'", "''")
++ return f"'{string}'"
++
+
+ __all__ = [
+ "PowerShellActivator",
+diff --git a/src/virtualenv/activation/powershell/activate.ps1 b/src/virtualenv/activation/powershell/activate.ps1
+index 5ccfe12..bd30e2e 100644
+--- a/src/virtualenv/activation/powershell/activate.ps1
++++ b/src/virtualenv/activation/powershell/activate.ps1
+@@ -37,8 +37,8 @@ deactivate -nondestructive
+ $VIRTUAL_ENV = $BASE_DIR
+ $env:VIRTUAL_ENV = $VIRTUAL_ENV
+
+-if ("__VIRTUAL_PROMPT__" -ne "") {
+- $env:VIRTUAL_ENV_PROMPT = "__VIRTUAL_PROMPT__"
++if (__VIRTUAL_PROMPT__ -ne "") {
++ $env:VIRTUAL_ENV_PROMPT = __VIRTUAL_PROMPT__
+ }
+ else {
+ $env:VIRTUAL_ENV_PROMPT = $( Split-Path $env:VIRTUAL_ENV -Leaf )
+@@ -46,7 +46,7 @@ else {
+
+ New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH
+
+-$env:PATH = "$env:VIRTUAL_ENV/__BIN_NAME____PATH_SEP__" + $env:PATH
++$env:PATH = "$env:VIRTUAL_ENV/" + __BIN_NAME__ + __PATH_SEP__ + $env:PATH
+ if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) {
+ function global:_old_virtual_prompt {
+ ""
+diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py
+index 3126a39..e900f7e 100644
+--- a/src/virtualenv/activation/python/__init__.py
++++ b/src/virtualenv/activation/python/__init__.py
+@@ -10,10 +10,14 @@ class PythonActivator(ViaTemplateActivator):
+ def templates(self):
+ yield "activate_this.py"
+
++ @staticmethod
++ def quote(string):
++ return repr(string)
++
+ def replacements(self, creator, dest_folder):
+ replacements = super().replacements(creator, dest_folder)
+ lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
+- lib_folders = os.pathsep.join(lib_folders.keys()).replace("\\", "\\\\") # escape Windows path characters
++ lib_folders = os.pathsep.join(lib_folders.keys())
+ replacements.update(
+ {
+ "__LIB_FOLDERS__": lib_folders,
+diff --git a/src/virtualenv/activation/python/activate_this.py b/src/virtualenv/activation/python/activate_this.py
+index 8066af1..97e0777 100644
+--- a/src/virtualenv/activation/python/activate_this.py
++++ b/src/virtualenv/activation/python/activate_this.py
+@@ -18,18 +18,18 @@ except NameError as exc:
+ raise AssertionError(msg) from exc
+
+ bin_dir = os.path.dirname(abs_file)
+-base = bin_dir[: -len("__BIN_NAME__") - 1] # strip away the bin part from the __file__, plus the path separator
++base = bin_dir[: -len(__BIN_NAME__) - 1] # strip away the bin part from the __file__, plus the path separator
+
+ # prepend bin to PATH (this file is inside the bin directory)
+ os.environ["PATH"] = os.pathsep.join([bin_dir, *os.environ.get("PATH", "").split(os.pathsep)])
+ os.environ["VIRTUAL_ENV"] = base # virtual env is right above bin directory
+-os.environ["VIRTUAL_ENV_PROMPT"] = "__VIRTUAL_PROMPT__" or os.path.basename(base) # noqa: SIM222
++os.environ["VIRTUAL_ENV_PROMPT"] = __VIRTUAL_PROMPT__ or os.path.basename(base)
+
+ # add the virtual environments libraries to the host python import mechanism
+ prev_length = len(sys.path)
+-for lib in "__LIB_FOLDERS__".split(os.pathsep):
++for lib in __LIB_FOLDERS__.split(os.pathsep):
+ path = os.path.realpath(os.path.join(bin_dir, lib))
+- site.addsitedir(path.decode("utf-8") if "__DECODE_PATH__" else path)
++ site.addsitedir(path.decode("utf-8") if __DECODE_PATH__ else path)
+ sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
+
+ sys.real_prefix = sys.prefix
+diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py
+index 239318c..94e3639 100644
+--- a/src/virtualenv/activation/via_template.py
++++ b/src/virtualenv/activation/via_template.py
+@@ -2,6 +2,7 @@ from __future__ import annotations
+
+ import os
+ import sys
++import shlex
+ from abc import ABCMeta, abstractmethod
+
+ from .activator import Activator
+@@ -21,6 +22,16 @@ class ViaTemplateActivator(Activator, metaclass=ABCMeta):
+ def templates(self):
+ raise NotImplementedError
+
++ @staticmethod
++ def quote(string):
++ """
++ Quote strings in the activation script.
++
++ :param string: the string to quote
++ :return: quoted string that works in the activation script
++ """
++ return shlex.quote(string)
++
+ def generate(self, creator):
+ dest_folder = creator.bin_dir
+ replacements = self.replacements(creator, dest_folder)
+@@ -61,7 +72,7 @@ class ViaTemplateActivator(Activator, metaclass=ABCMeta):
+ text = binary.decode("utf-8", errors="strict")
+ for key, value in replacements.items():
+ value_uni = self._repr_unicode(creator, value)
+- text = text.replace(key, value_uni)
++ text = text.replace(key, self.quote(value_uni))
+ return text
+
+ @staticmethod
+diff --git a/tests/conftest.py b/tests/conftest.py
+index bec6d8f..869ca10 100644
+--- a/tests/conftest.py
++++ b/tests/conftest.py
+@@ -275,7 +275,11 @@ def is_inside_ci():
+
+ @pytest.fixture(scope="session")
+ def special_char_name():
+- base = "e-$ èрт🚒♞中片-j"
++ base = "'\";&&e-$ èрт🚒♞中片-j"
++ if IS_WIN:
++ # get rid of invalid characters on Windows
++ base = base.replace('"', "")
++ base = base.replace(";", "")
+ # workaround for pypy3 https://bitbucket.org/pypy/pypy/issues/3147/venv-non-ascii-support-windows
+ encoding = "ascii" if IS_WIN else sys.getfilesystemencoding()
+ # let's not include characters that the file system cannot encode)
+diff --git a/tests/unit/activation/conftest.py b/tests/unit/activation/conftest.py
+index 4811d07..bc66dd9 100644
+--- a/tests/unit/activation/conftest.py
++++ b/tests/unit/activation/conftest.py
+@@ -6,7 +6,6 @@ import subprocess
+ import sys
+ from os.path import dirname, normcase
+ from pathlib import Path
+-from shlex import quote
+ from subprocess import Popen
+
+ import pytest
+@@ -153,7 +152,7 @@ class ActivationTester:
+ assert out[-1] == "None", raw
+
+ def quote(self, s):
+- return quote(s)
++ return self.of_class.quote(s)
+
+ def python_cmd(self, cmd):
+ return f"{os.path.basename(sys.executable)} -c {self.quote(cmd)}"
+diff --git a/tests/unit/activation/test_batch.py b/tests/unit/activation/test_batch.py
+index 10dc41b..5ea0652 100644
+--- a/tests/unit/activation/test_batch.py
++++ b/tests/unit/activation/test_batch.py
+@@ -1,7 +1,5 @@
+ from __future__ import annotations
+
+-from shlex import quote
+-
+ import pytest
+
+ from virtualenv.activation import BatchActivator
+@@ -27,10 +25,12 @@ def test_batch(activation_tester_class, activation_tester, tmp_path):
+ return ["@echo off", "", "chcp 65001 1>NUL", *super()._get_test_lines(activate_script)]
+
+ def quote(self, s):
+- """double quotes needs to be single, and single need to be double"""
+- return "".join(("'" if c == '"' else ('"' if c == "'" else c)) for c in quote(s))
++ if '"' in s or " " in s:
++ text = s.replace('"', r"\"")
++ return f'"{text}"'
++ return s
+
+ def print_prompt(self):
+- return "echo %PROMPT%"
++ return 'echo "%PROMPT%"'
+
+ activation_tester(Batch)
+diff --git a/tests/unit/activation/test_powershell.py b/tests/unit/activation/test_powershell.py
+index c454d69..8c66a9d 100644
+--- a/tests/unit/activation/test_powershell.py
++++ b/tests/unit/activation/test_powershell.py
+@@ -1,7 +1,6 @@
+ from __future__ import annotations
+
+ import sys
+-from shlex import quote
+
+ import pytest
+
+@@ -21,10 +20,6 @@ def test_powershell(activation_tester_class, activation_tester, monkeypatch):
+ self.activate_cmd = "."
+ self.script_encoding = "utf-16"
+
+- def quote(self, s):
+- """powershell double quote needed for quotes within single quotes"""
+- return quote(s).replace('"', '""')
+-
+ def _get_test_lines(self, activate_script):
+ # for BATCH utf-8 support need change the character code page to 650001
+ return super()._get_test_lines(activate_script)
+@@ -35,4 +30,19 @@ def test_powershell(activation_tester_class, activation_tester, monkeypatch):
+ def print_prompt(self):
+ return "prompt"
+
++ def quote(self, s):
++ """
++ Tester will pass strings to native commands on Windows so extra
++ parsing rules are used. Check `PowerShellActivator.quote` for more
++ details.
++ """
++ text = PowerShellActivator.quote(s)
++ return text.replace('"', '""') if sys.platform == "win32" else text
++
++ def activate_call(self, script):
++ # Commands are called without quotes in PowerShell
++ cmd = self.activate_cmd
++ scr = self.quote(str(script))
++ return f"{cmd} {scr}".strip()
++
+ activation_tester(PowerShell)
+--
+2.34.1
+
diff --git a/SPECS/python-virtualenv/python-virtualenv.spec b/SPECS/python-virtualenv/python-virtualenv.spec
index b8f8b1499b9..ee8cf8739cf 100644
--- a/SPECS/python-virtualenv/python-virtualenv.spec
+++ b/SPECS/python-virtualenv/python-virtualenv.spec
@@ -1,7 +1,7 @@
Summary: Virtual Python Environment builder
Name: python-virtualenv
Version: 20.25.0
-Release: 2%{?dist}
+Release: 3%{?dist}
License: MIT
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -9,6 +9,7 @@ Group: Development/Languages/Python
URL: https://pypi.python.org/pypi/virtualenv
Source0: https://files.pythonhosted.org/packages/94/d7/adb787076e65dc99ef057e0118e25becf80dd05233ef4c86f07aa35f6492/virtualenv-20.25.0.tar.gz
Patch0: 0001-replace-to-flit.patch
+Patch1: CVE-2024-53899.patch
BuildArch: noarch
%description
@@ -37,7 +38,7 @@ Provides: %{name}-doc = %{version}-%{release}
virtualenv is a tool to create isolated Python environment.
%prep
-%autosetup -n virtualenv-%{version}
+%autosetup -p1 -n virtualenv-%{version}
%generate_buildrequires
@@ -60,6 +61,9 @@ tox -e py
%{_bindir}/virtualenv
%changelog
+* Wed Dec 11 2024 Sudipta Pandit - 20.25.0-3
+- Backport fix for CVE-2024-53899
+
* Wed Apr 24 2024 Andrew Phelps - 20.25.0-2
- Add runtime requirement on python3-filelock
diff --git a/SPECS/python-zipp/CVE-2024-5569.patch b/SPECS/python-zipp/CVE-2024-5569.patch
new file mode 100644
index 00000000000..5150f7a6d88
--- /dev/null
+++ b/SPECS/python-zipp/CVE-2024-5569.patch
@@ -0,0 +1,88 @@
+From 5c5c9987169788d7cb55db9755f8b9981ebee515 Mon Sep 17 00:00:00 2001
+From: Kanishk-Bansal
+Date: Wed, 18 Dec 2024 07:11:58 +0000
+Subject: [PATCH] Fix CVE Patch
+
+Signed-off-by: Kanishk-Bansal
+---
+ zipp/__init__.py | 64 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 63 insertions(+), 1 deletion(-)
+
+diff --git a/zipp/__init__.py b/zipp/__init__.py
+index becd010..e980e9b 100644
+--- a/zipp/__init__.py
++++ b/zipp/__init__.py
+@@ -84,7 +84,69 @@ class InitializedState:
+ super().__init__(*args, **kwargs)
+
+
+-class CompleteDirs(InitializedState, zipfile.ZipFile):
++class SanitizedNames:
++ """
++ ZipFile mix-in to ensure names are sanitized.
++ """
++
++ def namelist(self):
++ return list(map(self._sanitize, super().namelist()))
++
++ @staticmethod
++ def _sanitize(name):
++ r"""
++ Ensure a relative path with posix separators and no dot names.
++
++ Modeled after
++ https://github.com/python/cpython/blob/bcc1be39cb1d04ad9fc0bd1b9193d3972835a57c/Lib/zipfile/__init__.py#L1799-L1813
++ but provides consistent cross-platform behavior.
++
++ >>> san = SanitizedNames._sanitize
++ >>> san('/foo/bar')
++ 'foo/bar'
++ >>> san('//foo.txt')
++ 'foo.txt'
++ >>> san('foo/.././bar.txt')
++ 'foo/bar.txt'
++ >>> san('foo../.bar.txt')
++ 'foo../.bar.txt'
++ >>> san('\\foo\\bar.txt')
++ 'foo/bar.txt'
++ >>> san('D:\\foo.txt')
++ 'D/foo.txt'
++ >>> san('\\\\server\\share\\file.txt')
++ 'server/share/file.txt'
++ >>> san('\\\\?\\GLOBALROOT\\Volume3')
++ '?/GLOBALROOT/Volume3'
++ >>> san('\\\\.\\PhysicalDrive1\\root')
++ 'PhysicalDrive1/root'
++
++ Retain any trailing slash.
++ >>> san('abc/')
++ 'abc/'
++
++ Raises a ValueError if the result is empty.
++ >>> san('../..')
++ Traceback (most recent call last):
++ ...
++ ValueError: Empty filename
++ """
++
++ def allowed(part):
++ return part and part not in {'..', '.'}
++
++ # Remove the drive letter.
++ # Don't use ntpath.splitdrive, because that also strips UNC paths
++ bare = re.sub('^([A-Z]):', r'\1', name, flags=re.IGNORECASE)
++ clean = bare.replace('\\', '/')
++ parts = clean.split('/')
++ joined = '/'.join(filter(allowed, parts))
++ if not joined:
++ raise ValueError("Empty filename")
++ return joined + '/' * name.endswith('/')
++
++
++class CompleteDirs(InitializedState, SanitizedNames, zipfile.ZipFile):
+ """
+ A ZipFile subclass that ensures that implied directories
+ are always included in the namelist.
+--
+2.45.2
+
diff --git a/SPECS/python-zipp/python-zipp.spec b/SPECS/python-zipp/python-zipp.spec
index ce356c902ad..ff82f90e551 100644
--- a/SPECS/python-zipp/python-zipp.spec
+++ b/SPECS/python-zipp/python-zipp.spec
@@ -3,12 +3,13 @@
Summary: Backport of pathlib-compatible object wrapper for zip files
Name: python-%{pypi_name}
Version: 3.17.0
-Release: 2%{?dist}
+Release: 3%{?dist}
License: MIT
Vendor: Microsoft Corporation
Distribution: Azure Linux
URL: https://github.com/jaraco/zipp
Source0: %{pypi_source}
+Patch0: CVE-2024-5569.patch
BuildArch: noarch
@@ -38,7 +39,7 @@ Summary: %{summary}
A pathlib-compatible Zipfile object wrapper. A backport of the Path object.
%prep
-%autosetup -n %{pypi_name}-%{version}
+%autosetup -n %{pypi_name}-%{version} -p1
%generate_buildrequires
%pyproject_buildrequires -r
@@ -61,6 +62,9 @@ rm -rf .pyproject-builddir
%doc README.rst
%changelog
+* Wed Dec 18 2024 Kanishk Bansal - 3.17.0-3
+- Address CVE-2024-5569 with an upstream patch
+
* Fri Mar 01 2024 Andrew Phelps - 3.17.0-2
- Add BR for python-setuptools_scm
diff --git a/SPECS/python3/CVE-2024-12254.patch b/SPECS/python3/CVE-2024-12254.patch
new file mode 100644
index 00000000000..d88bbb9fbc0
--- /dev/null
+++ b/SPECS/python3/CVE-2024-12254.patch
@@ -0,0 +1,203 @@
+From bfc2e93d755bf496e5ef4cae9609d2823122c909 Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 10:01:10 -0600
+Subject: [PATCH 01/10] Ensure writelines pauses the protocol if needed
+
+---
+ Lib/asyncio/selector_events.py | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py
+index f94bf10b4225e7..f1ab9b12d69a5d 100644
+--- a/Lib/asyncio/selector_events.py
++++ b/Lib/asyncio/selector_events.py
+@@ -1175,6 +1175,7 @@ def writelines(self, list_of_data):
+ # If the entire buffer couldn't be written, register a write handler
+ if self._buffer:
+ self._loop._add_writer(self._sock_fd, self._write_ready)
++ self._maybe_pause_protocol()
+
+ def can_write_eof(self):
+ return True
+
+From e54226f50c0b6fc6f1b08493f32f4c8e5e735348 Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 10:38:03 -0600
+Subject: [PATCH 02/10] add test coverage
+
+---
+ Lib/test/test_asyncio/test_selector_events.py | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/Lib/test/test_asyncio/test_selector_events.py b/Lib/test/test_asyncio/test_selector_events.py
+index aaeda33dd0c677..efca30f37414f9 100644
+--- a/Lib/test/test_asyncio/test_selector_events.py
++++ b/Lib/test/test_asyncio/test_selector_events.py
+@@ -805,6 +805,18 @@ def test_writelines_send_partial(self):
+ self.assertTrue(self.sock.send.called)
+ self.assertTrue(self.loop.writers)
+
++ def test_writelines_pauses_protocol(self):
++ data = memoryview(b'data')
++ self.sock.send.return_value = 2
++ self.sock.send.fileno.return_value = 7
++
++ transport = self.socket_transport()
++ transport._high_water = 1
++ transport.writelines([data])
++ self.assertTrue(self.protocol.pause_writing.called)
++ self.assertTrue(self.sock.send.called)
++ self.assertTrue(self.loop.writers)
++
+ @unittest.skipUnless(selector_events._HAS_SENDMSG, 'no sendmsg')
+ def test_write_sendmsg_full(self):
+ data = memoryview(b'data')
+
+From 235085682fbd1d37ecd9838d74859dd447c81344 Mon Sep 17 00:00:00 2001
+From: "blurb-it[bot]" <43283697+blurb-it[bot]@users.noreply.github.com>
+Date: Thu, 5 Dec 2024 21:35:22 +0000
+Subject: [PATCH 03/10] =?UTF-8?q?=F0=9F=93=9C=F0=9F=A4=96=20Added=20by=20b?=
+ =?UTF-8?q?lurb=5Fit.?=
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+---
+ .../next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst | 1 +
+ 1 file changed, 1 insertion(+)
+ create mode 100644 Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+
+diff --git a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+new file mode 100644
+index 00000000000000..33a18014e74332
+--- /dev/null
++++ b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+@@ -0,0 +1 @@
++Fixed the :mod:`asyncio` selector transport not pausing the protocol when the buffer reaches the high water mark.
+
+From 8fcc087fabafb0b9a14c8f9c402fe963fdf0107b Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 15:36:53 -0600
+Subject: [PATCH 04/10] Update
+ Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+
+---
+ .../Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+index 33a18014e74332..a44f54fdee407e 100644
+--- a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
++++ b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+@@ -1 +1 @@
+-Fixed the :mod:`asyncio` selector transport not pausing the protocol when the buffer reaches the high water mark.
++Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark.
+
+From 2d550302474ffafdd16b5f5eec9af96c133c98e5 Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 19:37:35 -0600
+Subject: [PATCH 05/10] Update
+ Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+
+---
+ .../Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+index a44f54fdee407e..181112e3e38cfe 100644
+--- a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
++++ b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+@@ -1 +1 @@
+-Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark.
++Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark when using `writelines`.
+
+From 0cbbba61e903a738f0748213d8b2bf1e608db8ae Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 19:37:49 -0600
+Subject: [PATCH 06/10] Update
+ Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+
+---
+ .../Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+index 181112e3e38cfe..8eadbf4abc781e 100644
+--- a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
++++ b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+@@ -1 +1 @@
+-Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark when using `writelines`.
++Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark when using ``writelines``.
+
+From 472df1700698a830c6f451f5febc6d34a1bbffb9 Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 19:41:59 -0600
+Subject: [PATCH 07/10] Update
+ Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+
+---
+ .../Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+index 8eadbf4abc781e..28abf1e2928c94 100644
+--- a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
++++ b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+@@ -1 +1 @@
+-Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark when using ``writelines``.
++Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark when using :meth:`asyncio.StreamWriter.writelines`.
+
+From 28869e5520d66da89b5108adef6fa71ab3aef027 Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 19:46:36 -0600
+Subject: [PATCH 08/10] Update
+ Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+
+---
+ .../Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+index 28abf1e2928c94..a7930e69ced5a0 100644
+--- a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
++++ b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+@@ -1 +1 @@
+-Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark when using :meth:`asyncio.StreamWriter.writelines`.
++Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark when using :meth:`asyncio.WriteTransport.writelines`.
+
+From 056974c5a5df3c5a8754479e5a04529e2f3a607c Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 19:53:25 -0600
+Subject: [PATCH 09/10] Update
+ Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+
+---
+ .../Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+index a7930e69ced5a0..9deac957eb14e6 100644
+--- a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
++++ b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+@@ -1 +1 @@
+-Fixed the :mod:`asyncio` selector transport not pausing writes for the protocol when the buffer reaches the high water mark when using :meth:`asyncio.WriteTransport.writelines`.
++Fixed the :class:`asyncio.SelectorEventLoop` transport not pausing writes for the protocol when the buffer reaches the high water mark when using :meth:`asyncio.WriteTransport.writelines`.
+
+From 6b01a670de78bba61306bd91456ec6621e213c20 Mon Sep 17 00:00:00 2001
+From: "J. Nick Koston"
+Date: Thu, 5 Dec 2024 22:07:34 -0600
+Subject: [PATCH 10/10] Update
+ Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+
+Co-authored-by: Kumar Aditya
+---
+ .../Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+index 9deac957eb14e6..76cfc58121d3bd 100644
+--- a/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
++++ b/Misc/NEWS.d/next/Security/2024-12-05-21-35-19.gh-issue-127655.xpPoOf.rst
+@@ -1 +1 @@
+-Fixed the :class:`asyncio.SelectorEventLoop` transport not pausing writes for the protocol when the buffer reaches the high water mark when using :meth:`asyncio.WriteTransport.writelines`.
++Fixed the :class:`!asyncio.selector_events._SelectorSocketTransport` transport not pausing writes for the protocol when the buffer reaches the high water mark when using :meth:`asyncio.WriteTransport.writelines`.
diff --git a/SPECS/python3/python3.spec b/SPECS/python3/python3.spec
index 50058cdc7d0..21e9ec6b808 100644
--- a/SPECS/python3/python3.spec
+++ b/SPECS/python3/python3.spec
@@ -6,7 +6,7 @@
Summary: A high-level scripting language
Name: python3
Version: 3.12.3
-Release: 4%{?dist}
+Release: 5%{?dist}
License: PSF
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -21,6 +21,7 @@ Patch1: CVE-2024-7592.patch
Patch2: CVE-2024-6923.patch
Patch3: CVE-2024-6232.patch
Patch4: CVE-2024-8088.patch
+Patch5: CVE-2024-12254.patch
BuildRequires: bzip2-devel
BuildRequires: expat-devel >= 2.1.0
@@ -242,6 +243,9 @@ rm -rf %{buildroot}%{_bindir}/__pycache__
%{_libdir}/python%{majmin}/test/*
%changelog
+* Mon Dec 10 2024 Ankita Pareek - 3.12.3-5
+- Patch CVE-2024-12254
+
* Fri Sep 20 2024 Himaja Kesari - 3.12.3-4
- Patch CVE-2024-6232 and CVE-2024-8088
diff --git a/SPECS/ruby/Avoid-another-race-condition-of-open-mode.patch b/SPECS/ruby/Avoid-another-race-condition-of-open-mode.patch
new file mode 100644
index 00000000000..f8d52d1a9d1
--- /dev/null
+++ b/SPECS/ruby/Avoid-another-race-condition-of-open-mode.patch
@@ -0,0 +1,45 @@
+From 2daad257bee7a500e18ebe553e79487b267fb140 Mon Sep 17 00:00:00 2001
+From: Nobuyoshi Nakada
+Date: Mon, 12 Aug 2024 20:18:34 +0900
+Subject: [PATCH] Avoid another race condition of open mode
+
+Instead, just open in CREATE and APPEND mode.
+Also, move the workaround for old Solaris as fallback to retry.
+---
+ lib/rubygems.rb | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/lib/rubygems.rb b/lib/rubygems.rb
+index 2b52cde0a749..c51ba69203cb 100644
+--- a/lib/rubygems.rb
++++ b/lib/rubygems.rb
+@@ -798,24 +798,20 @@ def self.open_file(path, flags, &block)
+ File.open(path, flags, &block)
+ end
+
++ MODE_TO_FLOCK = IO::RDONLY | IO::APPEND | IO::CREAT # :nodoc:
++
+ ##
+ # Open a file with given flags, and protect access with flock
+
+ def self.open_file_with_flock(path, &block)
+- flags = File.exist?(path) ? "r+" : "a+"
+-
+- File.open(path, flags) do |io|
++ File.open(path, MODE_TO_FLOCK) do |io|
+ begin
+ io.flock(File::LOCK_EX)
+ rescue Errno::ENOSYS, Errno::ENOTSUP
++ rescue Errno::ENOLCK # NFS
++ raise unless Thread.main == Thread.current
+ end
+ yield io
+- rescue Errno::ENOLCK # NFS
+- if Thread.main != Thread.current
+- raise
+- else
+- open_file(path, flags, &block)
+- end
+ end
+ end
+
diff --git a/SPECS/ruby/CVE-2024-41946.patch b/SPECS/ruby/CVE-2024-41946.patch
deleted file mode 100644
index 63e3c821d91..00000000000
--- a/SPECS/ruby/CVE-2024-41946.patch
+++ /dev/null
@@ -1,111 +0,0 @@
-From 8f46a0155b5ff647fef8314b0b36491ea2d7de7b Mon Sep 17 00:00:00 2001
-From: Harshit Gupta
-Date: Wed, 18 Sep 2024 09:34:21 -0700
-Subject: [PATCH] Patch for CVE-2024-41946
-
-Based on upstream commit
-https://github.com/ruby/rexml/commit/033d1909a8f259d5a7c53681bcaf14f13bcf0368
-
----
- lib/rexml/parsers/baseparser.rb | 19 ++++++++++++++++++-
- lib/rexml/parsers/pullparser.rb | 4 ++++
- lib/rexml/parsers/sax2parser.rb | 4 ++++
- 3 files changed, 26 insertions(+), 1 deletion(-)
-
-diff --git a/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/baseparser.rb b/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/baseparser.rb
-index d09237c..61f6787 100644
---- a/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/baseparser.rb
-+++ b/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/baseparser.rb
-@@ -128,6 +128,7 @@ module REXML
- def initialize( source )
- self.stream = source
- @listeners = []
-+ @entity_expansion_count = 0
- end
-
- def add_listener( listener )
-@@ -135,6 +136,7 @@ module REXML
- end
-
- attr_reader :source
-+ attr_reader :entity_expansion_count
-
- def stream=( source )
- @source = SourceFactory.create_from( source )
-@@ -446,7 +448,9 @@ module REXML
- def entity( reference, entities )
- value = nil
- value = entities[ reference ] if entities
-- if not value
-+ if value
-+ record_entity_expansion
-+ else
- value = DEFAULT_ENTITIES[ reference ]
- value = value[2] if value
- end
-@@ -481,12 +485,17 @@ module REXML
- }
- matches.collect!{|x|x[0]}.compact!
- if matches.size > 0
-+ sum = 0
- matches.each do |entity_reference|
- unless filter and filter.include?(entity_reference)
- entity_value = entity( entity_reference, entities )
- if entity_value
- re = /{entity_reference};/
- rv.gsub!( re, entity_value )
-+ sum += rv.bytesize
-+ if sum > Security.entity_expansion_text_limit
-+ raise "entity expansion has grown too large"
-+ end
- else
- er = DEFAULT_ENTITIES[entity_reference]
- rv.gsub!( er[0], er[2] ) if er
-@@ -499,6 +508,14 @@ module REXML
- end
-
- private
-+
-+ def record_entity_expansion
-+ @entity_expansion_count += 1
-+ if @entity_expansion_count > Security.entity_expansion_limit
-+ raise "number of entity expansions exceeded, processing aborted."
-+ end
-+ end
-+
- def need_source_encoding_update?(xml_declaration_encoding)
- return false if xml_declaration_encoding.nil?
- return false if /\AUTF-16\z/i =~ xml_declaration_encoding
-diff --git a/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/pullparser.rb b/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/pullparser.rb
-index f8b232a..36b4595 100644
---- a/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/pullparser.rb
-+++ b/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/pullparser.rb
-@@ -47,6 +47,10 @@ module REXML
- @listeners << listener
- end
-
-+ def entity_expansion_count
-+ @parser.entity_expansion_count
-+ end
-+
- def each
- while has_next?
- yield self.pull
-diff --git a/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/sax2parser.rb b/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/sax2parser.rb
-index 6a24ce2..01cb469 100644
---- a/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/sax2parser.rb
-+++ b/.bundle/gems/rexml-3.2.8/lib/rexml/parsers/sax2parser.rb
-@@ -22,6 +22,10 @@ module REXML
- @parser.source
- end
-
-+ def entity_expansion_count
-+ @parser.entity_expansion_count
-+ end
-+
- def add_listener( listener )
- @parser.add_listener( listener )
- end
---
-2.34.1
-
diff --git a/SPECS/ruby/CVE-2024-49761.patch b/SPECS/ruby/CVE-2024-49761.patch
new file mode 100644
index 00000000000..c0c666e570a
--- /dev/null
+++ b/SPECS/ruby/CVE-2024-49761.patch
@@ -0,0 +1,46 @@
+From 51474a44f41e1e26cac2168922034f675851855d Mon Sep 17 00:00:00 2001
+From: Saul Paredes
+Date: Tue, 12 Nov 2024 12:30:10 -0800
+Subject: [PATCH] ruby: patch CVE-2024-49761 Patch adapted from
+ https://github.com/ruby/rexml/commit/ce59f2eb1aeb371fe1643414f06618dbe031979f
+ which fixes CVE-2024-49761 per
+ https://nvd.nist.gov/vuln/detail/CVE-2024-49761
+
+Needed for rubygem-rexml versions < 3.3.9
+
+Signed-off-by: Saul Paredes
+---
+ .../gems/rexml-3.3.6/lib/rexml/parsers/baseparser.rb | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/.bundle/gems/rexml-3.3.6/lib/rexml/parsers/baseparser.rb b/.bundle/gems/rexml-3.3.6/lib/rexml/parsers/baseparser.rb
+index d11c276..e9ab0ee 100644
+--- a/.bundle/gems/rexml-3.3.6/lib/rexml/parsers/baseparser.rb
++++ b/.bundle/gems/rexml-3.3.6/lib/rexml/parsers/baseparser.rb
+@@ -150,7 +150,7 @@ module REXML
+ PEDECL_PATTERN = "\\s+(%)\\s+#{NAME}\\s+#{PEDEF}\\s*>"
+ ENTITYDECL_PATTERN = /(?:#{GEDECL_PATTERN})|(?:#{PEDECL_PATTERN})/um
+ CARRIAGE_RETURN_NEWLINE_PATTERN = /\r\n?/
+- CHARACTER_REFERENCES = /*((?:\d+)|(?:x[a-fA-F0-9]+));/
++ CHARACTER_REFERENCES = /((?:\d+)|(?:x[a-fA-F0-9]+));/
+ DEFAULT_ENTITIES_PATTERNS = {}
+ default_entities = ['gt', 'lt', 'quot', 'apos', 'amp']
+ default_entities.each do |term|
+@@ -564,8 +564,12 @@ module REXML
+ return rv if matches.size == 0
+ rv.gsub!( Private::CHARACTER_REFERENCES ) {
+ m=$1
+- m = "0#{m}" if m[0] == ?x
+- [Integer(m)].pack('U*')
++ if m.start_with?("x")
++ code_point = Integer(m[1..-1], 16)
++ else
++ code_point = Integer(m, 10)
++ end
++ [code_point].pack('U*')
+ }
+ matches.collect!{|x|x[0]}.compact!
+ if filter
+--
+2.25.1
+
diff --git a/SPECS/ruby/Remove-the-lock-file-for-binstubs.patch b/SPECS/ruby/Remove-the-lock-file-for-binstubs.patch
new file mode 100644
index 00000000000..81b78251c0f
--- /dev/null
+++ b/SPECS/ruby/Remove-the-lock-file-for-binstubs.patch
@@ -0,0 +1,99 @@
+From ace303c2d7bc0d98407e5e8b1ca77de07aa0eb75 Mon Sep 17 00:00:00 2001
+From: Nobuyoshi Nakada
+Date: Tue, 13 Aug 2024 17:19:41 +0900
+Subject: [PATCH] Remove the lock file for binstubs
+
+https://github.com/rubygems/rubygems/pull/7806#issuecomment-2241662488
+
+This patch is needed so other rubygems don't install unnecessary lock files per
+https://src.fedoraproject.org/rpms/ruby/c/b7e197fb887200e4faaf8fae663a9df00bdc09d3?branch=rawhide
+
+---
+ lib/rubygems.rb | 2 +-
+ lib/rubygems/installer.rb | 3 ++-
+ test/rubygems/test_gem_installer.rb | 10 ++++++++++
+ 3 files changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/lib/rubygems.rb b/lib/rubygems.rb
+index bd9f240e2091..7626ccfdf0d6 100644
+--- a/lib/rubygems.rb
++++ b/lib/rubygems.rb
+@@ -794,7 +794,7 @@ def self.open_file(path, flags, &block)
+ File.open(path, flags, &block)
+ end
+
+- MODE_TO_FLOCK = IO::RDONLY | IO::APPEND | IO::CREAT # :nodoc:
++ MODE_TO_FLOCK = IO::RDONLY | IO::APPEND | IO::CREAT | IO::SHARE_DELETE | IO::BINARY # :nodoc:
+
+ ##
+ # Open a file with given flags, and protect access with flock
+diff --git a/lib/rubygems/installer.rb b/lib/rubygems/installer.rb
+index d558c0be2bfa..8f95bab733f8 100644
+--- a/lib/rubygems/installer.rb
++++ b/lib/rubygems/installer.rb
+@@ -538,7 +538,7 @@ def generate_plugins # :nodoc:
+ def generate_bin_script(filename, bindir)
+ bin_script_path = File.join bindir, formatted_program_filename(filename)
+
+- Gem.open_file_with_flock("#{bin_script_path}.lock") do
++ Gem.open_file_with_flock("#{bin_script_path}.lock") do |lock|
+ require "fileutils"
+ FileUtils.rm_f bin_script_path # prior install may have been --no-wrappers
+
+@@ -546,6 +546,7 @@ def generate_bin_script(filename, bindir)
+ file.write app_script_text(filename)
+ file.chmod(options[:prog_mode] || 0o755)
+ end
++ File.unlink(lock.path)
+ end
+
+ verbose bin_script_path
+diff --git a/test/rubygems/test_gem_installer.rb b/test/rubygems/test_gem_installer.rb
+index a61d1b6fff28..2f4ff7349db4 100644
+--- a/test/rubygems/test_gem_installer.rb
++++ b/test/rubygems/test_gem_installer.rb
+@@ -1083,6 +1083,8 @@ def test_install_creates_working_binstub
+ end
+
+ assert_match(/ran executable/, e.message)
++
++ assert_path_not_exist(File.join(installer.bin_dir, "executable.lock"))
+ end
+
+ def test_conflicting_binstubs
+@@ -1131,6 +1133,8 @@ def test_conflicting_binstubs
+ # We expect the bin stub to activate the version that actually contains
+ # the binstub.
+ assert_match("I have an executable", e.message)
++
++ assert_path_not_exist(File.join(installer.bin_dir, "executable.lock"))
+ end
+
+ def test_install_creates_binstub_that_understand_version
+@@ -1160,6 +1164,8 @@ def test_install_creates_binstub_that_understand_version
+ end
+
+ assert_includes(e.message, "can't find gem a (= 3.0)")
++
++ assert_path_not_exist(File.join(installer.bin_dir, "executable.lock"))
+ end
+
+ def test_install_creates_binstub_that_prefers_user_installed_gem_to_default
+@@ -1192,6 +1198,8 @@ def test_install_creates_binstub_that_prefers_user_installed_gem_to_default
+ end
+
+ assert_equal(e.message, "ran executable")
++
++ assert_path_not_exist(File.join(installer.bin_dir, "executable.lock"))
+ end
+
+ def test_install_creates_binstub_that_dont_trust_encoding
+@@ -1222,6 +1230,8 @@ def test_install_creates_binstub_that_dont_trust_encoding
+ end
+
+ assert_match(/ran executable/, e.message)
++
++ assert_path_not_exist(File.join(installer.bin_dir, "executable.lock"))
+ end
+
+ def test_install_with_no_prior_files
diff --git a/SPECS/ruby/ruby.signatures.json b/SPECS/ruby/ruby.signatures.json
index 9eb2f74004c..d996802d5e9 100644
--- a/SPECS/ruby/ruby.signatures.json
+++ b/SPECS/ruby/ruby.signatures.json
@@ -7,6 +7,6 @@
"rubygems.con": "eb804c6b50eeafdb2172285265bc487a80acaa9846233cd5f1d20a25f1dac2ea",
"rubygems.prov": "b79c1f5873dd20d251e100b276a5e584c1fb677f3e1b92534fc09130fabe8ee5",
"rubygems.req": "e85681d8fa45d214055f3b26a8c1829b3a4bd67b26a5ef3c1f6426e7eff83ad0",
- "ruby-3.3.3.tar.gz": "83c05b2177ee9c335b631b29b8c077b4770166d02fa527f3a9f6a40d13f3cce2"
+ "ruby-3.3.5.tar.gz": "3781a3504222c2f26cb4b9eb9c1a12dbf4944d366ce24a9ff8cf99ecbce75196"
}
}
diff --git a/SPECS/ruby/ruby.spec b/SPECS/ruby/ruby.spec
index 9a384802039..9e80cb45d13 100644
--- a/SPECS/ruby/ruby.spec
+++ b/SPECS/ruby/ruby.spec
@@ -4,17 +4,17 @@
%global gem_dir %{_datadir}/ruby/gems
# Default package version defined separately, because the %%version macro gets overwritten by 'Version' tags of the subpackages.
-%global ruby_version 3.3.3
+%global ruby_version 3.3.5
%define ruby_version_majmin %(echo %{ruby_version} | cut -d. -f1-2)
-%global rubygems_version 3.5.3
+%global rubygems_version 3.5.22
# Add version for default gems from https://stdgems.org/
# A helpful one-liner script to check the current default versions is available via RUBY_VER=%%{ruby_version_majmin} ./get_gem_versions.sh
%global abbrev_version 0.1.2
%global base64_version 0.2.0
%global benchmark_version 0.3.0
%global bigdecimal_version 3.1.5
-%global bundler_version 2.5.3
+%global bundler_version 2.5.22
%global cgi_version 0.4.1
%global csv_version 3.2.8
%global date_version 3.3.4
@@ -32,15 +32,14 @@
%global find_version 0.2.0
%global forwardable_version 1.3.3
%global getoptlong_version 0.2.1
-%global io_console_version 0.7.1
+%global io_console_version 0.7.2
%global io_nonblock_version 0.3.0
%global io_wait_version 0.3.1
%global ipaddr_version 1.2.6
-%global irb_version 1.11.0
-%global json_version 2.7.1
+%global irb_version 1.13.1
%global logger_version 1.6.0
%global mutex_m_version 0.2.0
-%global net_http_version 0.4.0
+%global net_http_version 0.4.1
%global net_protocol_version 0.2.2
%global nkf_version 0.1.3
%global observer_version 0.1.2
@@ -55,9 +54,9 @@
%global prettyprint_version 0.2.0
%global pstore_version 0.1.3
%global psych_version 5.1.2
-%global rdoc_version 6.6.2
+%global rdoc_version 6.6.3.1
%global readline_version 0.0.4
-%global reline_version 0.4.1
+%global reline_version 0.5.10
%global resolv_version 0.3.0
%global resolv_replace_version 0.1.1
%global rinda_version 0.2.0
@@ -66,21 +65,21 @@
%global set_version 1.1.0
%global shellwords_version 0.2.0
%global singleton_version 0.2.0
-%global stringio_version 3.1.0
-%global strscan_version 3.0.7
+%global stringio_version 3.1.1
+%global strscan_version 3.0.9
%global syslog_version 0.1.2
-%global syntax_suggest_version 2.0.0
+%global syntax_suggest_version 2.0.1
%global tempfile_version 0.2.1
%global time_version 0.3.0
%global timeout_version 0.4.1
%global tmpdir_version 0.2.0
%global tsort_version 0.2.0
%global un_version 0.3.0
-%global uri_version 0.13.0
+%global uri_version 0.13.1
%global weakref_version 0.1.3
%global win32ole_version 1.8.10
%global yaml_version 0.3.0
-%global zlib_version 3.1.0
+%global zlib_version 3.1.1
Summary: Ruby
Name: ruby
@@ -88,7 +87,7 @@ Name: ruby
# provides should be versioned according to the ruby version.
# More info: https://stdgems.org/
Version: %{ruby_version}
-Release: 2%{?dist}
+Release: 1%{?dist}
License: (Ruby OR BSD) AND Public Domain AND MIT AND CC0 AND zlib AND UCD
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -102,8 +101,11 @@ Source4: rubygems.con
Source5: rubygems.prov
Source6: rubygems.req
Source7: macros.rubygems
-Patch0: CVE-2024-41946.patch
-# Updates default ruby-uri to 0.12.2 and vendored one to 0.10.3. Remove once ruby gets updated to a version that comes with both lib/uri/version.rb and lib/bundler/vendor/uri/lib/uri/version.rb versions >= 0.12.2 or == 0.10.3
+Patch0: CVE-2024-49761.patch
+# patches below taken from https://src.fedoraproject.org/rpms/ruby/c/b7e197fb887200e4faaf8fae663a9df00bdc09d3?branch=rawhide
+# to remove the lock file for binstubs and avoid race condition
+Patch1: Avoid-another-race-condition-of-open-mode.patch
+Patch2: Remove-the-lock-file-for-binstubs.patch
BuildRequires: openssl-devel
# Pkgconfig(yaml-0.1) is needed to build the 'psych' gem.
BuildRequires: pkgconfig(yaml-0.1)
@@ -408,6 +410,11 @@ sudo -u test make test TESTS="-v"
%{_rpmconfigdir}/rubygems.con
%changelog
+* Fri Nov 08 2024 Saul Paredes - 3.3.5-1
+- Upgrade ruby to 3.3.5 to resolve CVE-2024-39908
+- Remove CVE-2024-41946.patch as it no longer applies as ruby 3.3.5 containers rubygem-rexml 3.3.6, where CVE-2024-41946 is already fixed
+- Patch CVE-2024-49761
+
* Wed Sep 18 2024 Harshit Gupta - 3.3.3-2
- Revert ruby back to 3.3.3 to avoid build failure of rubygems-* packages
- Add patch for CVE-2024-41946 for bundled gem rexml
diff --git a/SPECS/runc/runc.signatures.json b/SPECS/runc/runc.signatures.json
index 4cdcc7d971a..18a8b03544d 100644
--- a/SPECS/runc/runc.signatures.json
+++ b/SPECS/runc/runc.signatures.json
@@ -1,5 +1,5 @@
{
"Signatures": {
- "runc-1.1.12.tar.gz": "be31b07d6a54a8f234016501c300ad04b6c428c56588e7eca8c3b663308db208"
+ "runc-1.2.2.tar.gz": "0eabc936d481d123be92c429588f9d1de7cafd36b37a8a5085b1412e758796a1"
}
}
diff --git a/SPECS/runc/runc.spec b/SPECS/runc/runc.spec
index 332add7b584..8523a32c790 100644
--- a/SPECS/runc/runc.spec
+++ b/SPECS/runc/runc.spec
@@ -1,9 +1,9 @@
-%define commit_hash 51d5e94601ceffbbd85688df1c928ecccbfa4685
+%define commit_hash 7cb363254b69e10320360b63fb73e0ffb5da7bf2
Summary: CLI tool for spawning and running containers per OCI spec.
Name: runc
# update "commit_hash" above when upgrading version
-Version: 1.1.12
-Release: 2%{?dist}
+Version: 1.2.2
+Release: 1%{?dist}
License: ASL 2.0
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -12,7 +12,7 @@ URL: https://github.com/opencontainers/runc
Source0: https://github.com/opencontainers/runc/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
BuildRequires: git
BuildRequires: go-md2man
-BuildRequires: golang < 1.23
+BuildRequires: golang
BuildRequires: libseccomp-devel
BuildRequires: make
Requires: glibc
@@ -43,6 +43,10 @@ make install-man DESTDIR=%{buildroot} PREFIX=%{_prefix}
%{_mandir}/*
%changelog
+* Mon Nov 25 2024 Nan Liu - 1.2.2-1
+- Bump version to 1.2.2
+- Remove the golang version constraint
+
* Tue Oct 15 2024 Muhammad Falak - 1.1.12-2
- Pin golang version to <= 1.22
diff --git a/SPECS/selinux-policy/0038-enable-liveos-iso-flow.patch b/SPECS/selinux-policy/0038-enable-liveos-iso-flow.patch
new file mode 100644
index 00000000000..3d7d3cc9a92
--- /dev/null
+++ b/SPECS/selinux-policy/0038-enable-liveos-iso-flow.patch
@@ -0,0 +1,223 @@
+From 43b44054474cf139ee11afa386f0d4e5b2204a07 Mon Sep 17 00:00:00 2001
+From: George Mileka
+Date: Fri, 6 Dec 2024 14:21:18 -0800
+Subject: [PATCH] Enable LiveOS ISO Flow
+
+In the LiveOS ISO flow, when selinux is enabled, initrd loads the
+default selinux policy from the rootfs, then transitions to the
+the rootfs where the final OS processes are started.
+
+The problem is that selinux denies many of these processes access
+to a number of resources (mostly file descriptors, but here is also
+access to the boot media, and other resources).
+
+This patch allows these failing processes access to the resources
+they need.
+---
+ policy/modules/admin/usermanage.te | 2 ++
+ policy/modules/apps/loadkeys.te | 2 ++
+ policy/modules/kernel/devices.if | 18 ++++++++++++++++++
+ policy/modules/kernel/kernel.if | 19 +++++++++++++++++++
+ policy/modules/kernel/kernel.te | 1 +
+ policy/modules/system/authlogin.te | 1 +
+ policy/modules/system/getty.te | 2 ++
+ policy/modules/system/init.te | 7 +++++++
+ policy/modules/system/locallogin.te | 1 +
+ policy/modules/system/selinuxutil.te | 2 ++
+ policy/modules/system/udev.te | 2 ++
+ 11 files changed, 57 insertions(+)
+
+diff --git a/policy/modules/admin/usermanage.te b/policy/modules/admin/usermanage.te
+index 57a9848bc..ef24fb85b 100644
+--- a/policy/modules/admin/usermanage.te
++++ b/policy/modules/admin/usermanage.te
+@@ -317,6 +317,7 @@ allow passwd_t self:shm create_shm_perms;
+ allow passwd_t self:sem create_sem_perms;
+ allow passwd_t self:msgq create_msgq_perms;
+ allow passwd_t self:msg { send receive };
++allow passwd_t kernel_t:fd use;
+
+ allow passwd_t crack_db_t:dir list_dir_perms;
+ read_lnk_files_pattern(passwd_t, crack_db_t, crack_db_t)
+@@ -492,6 +493,7 @@ allow useradd_t self:unix_dgram_socket create_socket_perms;
+ allow useradd_t self:unix_stream_socket create_stream_socket_perms;
+ allow useradd_t self:unix_dgram_socket sendto;
+ allow useradd_t self:unix_stream_socket connectto;
++allow useradd_t kernel_t:fd use;
+
+ # for getting the number of groups
+ kernel_read_kernel_sysctls(useradd_t)
+diff --git a/policy/modules/apps/loadkeys.te b/policy/modules/apps/loadkeys.te
+index 2263e9dba..61d098341 100644
+--- a/policy/modules/apps/loadkeys.te
++++ b/policy/modules/apps/loadkeys.te
+@@ -21,6 +21,8 @@ allow loadkeys_t self:capability { dac_override dac_read_search setuid sys_tty_c
+ allow loadkeys_t self:fifo_file rw_fifo_file_perms;
+ allow loadkeys_t self:unix_stream_socket { connect create };
+
++kernel_getattr_unlabeled_dirs(loadkeys_t)
++kernel_use_fds(loadkeys_t)
+ kernel_read_system_state(loadkeys_t)
+
+ init_use_fds(loadkeys_t)
+diff --git a/policy/modules/kernel/devices.if b/policy/modules/kernel/devices.if
+index 35e14a021..13026d7a0 100644
+--- a/policy/modules/kernel/devices.if
++++ b/policy/modules/kernel/devices.if
+@@ -2206,6 +2206,24 @@ interface(`dev_setattr_input_dev',`
+ allow $1 event_device_t:chr_file setattr;
+ ')
+
++########################################
++##
++## Get the attributes of the event devices.
++##
++##
++##
++## Domain allowed access.
++##
++##
++#
++interface(`dev_read_input_dev',`
++ gen_require(`
++ type event_device_t;
++ ')
++
++ allow $1 event_device_t:chr_file read_chr_file_perms;
++')
++
+ ########################################
+ ##
+ ## Read input event devices (/dev/input).
+diff --git a/policy/modules/kernel/kernel.if b/policy/modules/kernel/kernel.if
+index 1d696a07c..90583facf 100644
+--- a/policy/modules/kernel/kernel.if
++++ b/policy/modules/kernel/kernel.if
+@@ -330,6 +330,25 @@ interface(`kernel_stream_connect',`
+ allow $1 kernel_t:unix_stream_socket connectto;
+ ')
+
++########################################
++##
++## Connect to kernel using a unix
++## domain stream socket.
++##
++##
++##
++## Domain allowed access.
++##
++##
++#
++interface(`kernel_stream_read_write',`
++ gen_require(`
++ type kernel_t;
++ ')
++
++ allow $1 kernel_t:unix_stream_socket { rw_socket_perms };
++')
++
+ ########################################
+ ##
+ ## Getattr on kernel unix datagram sockets.
+diff --git a/policy/modules/kernel/kernel.te b/policy/modules/kernel/kernel.te
+index ea08036b6..59c1afeaf 100644
+--- a/policy/modules/kernel/kernel.te
++++ b/policy/modules/kernel/kernel.te
+@@ -238,6 +238,7 @@ sid tcp_socket gen_context(system_u:object_r:unlabeled_t,mls_systemhigh)
+ #
+
+ allow kernel_t self:capability { chown dac_override dac_read_search fowner fsetid kill setgid setuid setpcap linux_immutable net_bind_service net_broadcast net_admin net_raw ipc_lock ipc_owner sys_rawio sys_chroot sys_ptrace sys_pacct sys_admin sys_boot sys_nice sys_resource sys_time sys_tty_config mknod lease audit_write audit_control setfcap };
++allow kernel_t self:capability2 checkpoint_restore;
+ allow kernel_t self:process { transition signal_perms getsched setsched getsession getpgid setpgid getcap setcap share getattr noatsecure siginh rlimitinh dyntransition setkeycreate setsockcreate getrlimit };
+ allow kernel_t self:shm create_shm_perms;
+ allow kernel_t self:sem create_sem_perms;
+diff --git a/policy/modules/system/authlogin.te b/policy/modules/system/authlogin.te
+index 9dfef7a0c..791be68c2 100644
+--- a/policy/modules/system/authlogin.te
++++ b/policy/modules/system/authlogin.te
+@@ -116,6 +116,7 @@ allow chkpwd_t self:process { getattr signal };
+ dontaudit chkpwd_t self:process getcap;
+
+ allow chkpwd_t shadow_t:file read_file_perms;
++allow chkpwd_t kernel_t:fd use;
+ files_list_etc(chkpwd_t)
+
+ kernel_dontaudit_search_kernel_sysctl(chkpwd_t)
+diff --git a/policy/modules/system/getty.te b/policy/modules/system/getty.te
+index a900226bf..360f7c67f 100644
+--- a/policy/modules/system/getty.te
++++ b/policy/modules/system/getty.te
+@@ -37,6 +37,8 @@ dontaudit getty_t self:capability sys_tty_config;
+ allow getty_t self:capability2 checkpoint_restore;
+ allow getty_t self:process { getpgid getsession setpgid signal_perms };
+ allow getty_t self:fifo_file rw_fifo_file_perms;
++allow getty_t kernel_t:fd use;
++allow getty_t tmpfs_t:dir search;
+
+ read_files_pattern(getty_t, getty_conf_t, getty_conf_t)
+ read_lnk_files_pattern(getty_t, getty_conf_t, getty_conf_t)
+diff --git a/policy/modules/system/init.te b/policy/modules/system/init.te
+index 9ec685c74..a1d90762b 100644
+--- a/policy/modules/system/init.te
++++ b/policy/modules/system/init.te
+@@ -1542,12 +1542,19 @@ optional_policy(`
+ zebra_read_config(initrc_t)
+ ')
+
++########################################
++#
++# Rules applied to all system processes
++#
++kernel_use_fds(systemprocess)
++
+ ########################################
+ #
+ # Rules applied to all daemons
+ #
+
+ domain_dontaudit_use_interactive_fds(daemon)
++kernel_use_fds(daemon)
+
+ # daemons started from init will
+ # inherit fds from init for the console
+diff --git a/policy/modules/system/locallogin.te b/policy/modules/system/locallogin.te
+index 8330be8a9..9f2f160a4 100644
+--- a/policy/modules/system/locallogin.te
++++ b/policy/modules/system/locallogin.te
+@@ -47,6 +47,7 @@ allow local_login_t self:sem create_sem_perms;
+ allow local_login_t self:msgq create_msgq_perms;
+ allow local_login_t self:msg { send receive };
+ allow local_login_t self:key { search write link };
++allow local_login_t kernel_t:fd use;
+
+ allow local_login_t local_login_lock_t:file manage_file_perms;
+ files_lock_filetrans(local_login_t, local_login_lock_t, file)
+diff --git a/policy/modules/system/selinuxutil.te b/policy/modules/system/selinuxutil.te
+index 8e521e583..c7fb76550 100644
+--- a/policy/modules/system/selinuxutil.te
++++ b/policy/modules/system/selinuxutil.te
+@@ -119,6 +119,8 @@ type setfiles_t alias restorecon_t, can_relabelto_binary_policy;
+ type setfiles_exec_t alias restorecon_exec_t;
+ init_system_domain(setfiles_t, setfiles_exec_t)
+ domain_obj_id_change_exemption(setfiles_t)
++dev_read_input_dev(setfiles_t)
++kernel_stream_read_write(setfiles_t)
+
+ ########################################
+ #
+diff --git a/policy/modules/system/udev.te b/policy/modules/system/udev.te
+index 8af0d90e0..a7bb89658 100644
+--- a/policy/modules/system/udev.te
++++ b/policy/modules/system/udev.te
+@@ -57,6 +57,8 @@ allow udev_t self:rawip_socket create_socket_perms;
+ # rdma_rename
+ allow udev_t self:netlink_rdma_socket create_socket_perms;
+
++fs_getattr_iso9660_files(udev_t)
++
+ ifdef(`init_systemd',`
+ # systemd-vconsole-setup will be called by udev during virtual terminal initialization
+ allow udev_t self:capability sys_tty_config;
+--
+2.34.1
+
diff --git a/SPECS/selinux-policy/selinux-policy.spec b/SPECS/selinux-policy/selinux-policy.spec
index 8389a1a3166..7fd00e11c04 100644
--- a/SPECS/selinux-policy/selinux-policy.spec
+++ b/SPECS/selinux-policy/selinux-policy.spec
@@ -9,7 +9,7 @@
Summary: SELinux policy
Name: selinux-policy
Version: %{refpolicy_major}.%{refpolicy_minor}
-Release: 8%{?dist}
+Release: 9%{?dist}
License: GPLv2
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -56,6 +56,7 @@ Patch34: 0034-systemd-Fix-dac_override-use-in-systemd-machine-id-s.patch
Patch35: 0035-rpm-Run-systemd-sysctl-from-post.patch
Patch36: 0036-fstools-Add-additional-perms-for-cloud-utils-growpar.patch
Patch37: 0037-docker-Fix-dockerc-typo-in-container_engine_executab.patch
+Patch38: 0038-enable-liveos-iso-flow.patch
BuildRequires: bzip2
BuildRequires: checkpolicy >= %{CHECKPOLICYVER}
BuildRequires: m4
@@ -327,11 +328,14 @@ exit 0
selinuxenabled && semodule -nB
exit 0
%changelog
+* Wed Nov 20 2024 George Mileka - 2.20240226-9
+- Enable SELinux for LiveOS ISO.
+
* Wed Sep 11 2024 Chris PeBenito - 2.20240226-8
- Add additional required permissions for cloud-utils-growpart.
- Cherry-pick upstream fix for typo in docker module.
-* Mon Aug 13 2024 Chris PeBenito - 2.20240226-7
+* Tue Aug 13 2024 Chris PeBenito - 2.20240226-7
- Change policy composition so the base module only consits of policy modules
that must be in the base. This will allow dowstream users to disable or
override the individual policy modules.
diff --git a/SPECS/telegraf/CVE-2024-45337.patch b/SPECS/telegraf/CVE-2024-45337.patch
new file mode 100644
index 00000000000..0c8df5f2421
--- /dev/null
+++ b/SPECS/telegraf/CVE-2024-45337.patch
@@ -0,0 +1,77 @@
+https://github.com/golang/crypto/commit/b4f1988a35dee11ec3e05d6bf3e90b695fbd8909.patch
+
+From b4f1988a35dee11ec3e05d6bf3e90b695fbd8909 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker
+Date: Tue, 3 Dec 2024 09:03:03 -0800
+Subject: [PATCH] ssh: make the public key cache a 1-entry FIFO cache
+
+Users of the the ssh package seem to extremely commonly misuse the
+PublicKeyCallback API, assuming that the key passed in the last call
+before a connection is established is the key used for authentication.
+Some users then make authorization decisions based on this key. This
+property is not documented, and may not be correct, due to the caching
+behavior of the package, resulting in users making incorrect
+authorization decisions about the connection.
+
+This change makes the cache a one entry FIFO cache, making the assumed
+property, that the last call to PublicKeyCallback represents the key
+actually used for authentication, actually hold.
+
+Thanks to Damien Tournoud, Patrick Dawkins, Vince Parker, and
+Jules Duvivier from the Platform.sh / Upsun engineering team
+for reporting this issue.
+
+Fixes golang/go#70779
+Fixes CVE-2024-45337
+
+Change-Id: Ife7c7b4045d8b6bcd7e3a417bdfae370c709797f
+Reviewed-on: https://go-review.googlesource.com/c/crypto/+/635315
+Reviewed-by: Roland Shoemaker
+Auto-Submit: Gopher Robot
+Reviewed-by: Damien Neil
+Reviewed-by: Nicola Murino
+LUCI-TryBot-Result: Go LUCI
+---
+ vendor/golang.org/x/crypto/ssh/server.go | 15 ++++++++++----
+
+diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
+index c0d1c29e6f..5b5ccd96f4 100644
+--- a/vendor/golang.org/x/crypto/ssh/server.go
++++ b/vendor/golang.org/x/crypto/ssh/server.go
+@@ -149,7 +149,7 @@ func (s *ServerConfig) AddHostKey(key Signer) {
+ }
+
+ // cachedPubKey contains the results of querying whether a public key is
+-// acceptable for a user.
++// acceptable for a user. This is a FIFO cache.
+ type cachedPubKey struct {
+ user string
+ pubKeyData []byte
+@@ -157,7 +157,13 @@ type cachedPubKey struct {
+ perms *Permissions
+ }
+
+-const maxCachedPubKeys = 16
++// maxCachedPubKeys is the number of cache entries we store.
++//
++// Due to consistent misuse of the PublicKeyCallback API, we have reduced this
++// to 1, such that the only key in the cache is the most recently seen one. This
++// forces the behavior that the last call to PublicKeyCallback will always be
++// with the key that is used for authentication.
++const maxCachedPubKeys = 1
+
+ // pubKeyCache caches tests for public keys. Since SSH clients
+ // will query whether a public key is acceptable before attempting to
+@@ -179,9 +185,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
+
+ // add adds the given tuple to the cache.
+ func (c *pubKeyCache) add(candidate cachedPubKey) {
+- if len(c.keys) < maxCachedPubKeys {
+- c.keys = append(c.keys, candidate)
++ if len(c.keys) >= maxCachedPubKeys {
++ c.keys = c.keys[1:]
+ }
++ c.keys = append(c.keys, candidate)
+ }
+
+ // ServerConn is an authenticated SSH connection, as seen from the
diff --git a/SPECS/telegraf/telegraf.spec b/SPECS/telegraf/telegraf.spec
index 10c199c24c9..f222039a727 100644
--- a/SPECS/telegraf/telegraf.spec
+++ b/SPECS/telegraf/telegraf.spec
@@ -1,7 +1,7 @@
Summary: agent for collecting, processing, aggregating, and writing metrics.
Name: telegraf
Version: 1.31.0
-Release: 2%{?dist}
+Release: 3%{?dist}
License: MIT
Vendor: Microsoft Corporation
Distribution: Azure Linux
@@ -11,6 +11,7 @@ Source0: %{url}/archive/refs/tags/v%{version}.tar.gz#/%{name}-%{version}.
# Use the generate_source_tarbbal.sh script to get the vendored sources.
Source1: %{name}-%{version}-vendor.tar.gz
Patch0: CVE-2024-37298.patch
+Patch1: CVE-2024-45337.patch
BuildRequires: golang
BuildRequires: systemd-devel
Requires: logrotate
@@ -77,6 +78,9 @@ fi
%dir %{_sysconfdir}/%{name}/telegraf.d
%changelog
+* Wed Dec 18 2024 Aurelien Bombo - 1.31.0-3
+- Patch CVE-2024-45337
+
* Thu Jul 11 2024 Sumedh Sharma - 1.31.0-2
- Add patch for CVE-2024-37298
diff --git a/SPECS/tuned/CVE-2024-52336.patch b/SPECS/tuned/CVE-2024-52336.patch
new file mode 100644
index 00000000000..e4d75fdba49
--- /dev/null
+++ b/SPECS/tuned/CVE-2024-52336.patch
@@ -0,0 +1,281 @@
+From 391843d28461cf2904dd646bd845e2203132497f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jaroslav=20=C5=A0karvada?=
+Date: Tue, 26 Nov 2024 13:52:17 +0100
+Subject: [PATCH] new release (2.15.1)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Jaroslav Škarvada
+
+Modified patch 90c24eea037c7a5e9414c93f8fb3e549ed4a7b06 to apply to CBL-Mariner
+Modified-by: Sandeep Karambelkar
+---
+ com.redhat.tuned.policy | 14 +++++++-------
+ tuned.spec | 10 +++++++++-
+ tuned/consts.py | 4 ++++
+ tuned/daemon/controller.py | 15 +++++++++------
+ tuned/plugins/base.py | 12 ++++++++++++
+ tuned/plugins/plugin_script.py | 4 ++++
+ tuned/utils/commands.py | 4 ++++
+ tuned/version.py | 2 +-
+ 8 files changed, 50 insertions(+), 15 deletions(-)
+
+diff --git a/com.redhat.tuned.policy b/com.redhat.tuned.policy
+index 824ae0c..89206f0 100644
+--- a/com.redhat.tuned.policy
++++ b/com.redhat.tuned.policy
+@@ -43,7 +43,7 @@
+
+ auth_admin
+ auth_admin
+- yes
++ auth_admin
+
+
+
+@@ -103,7 +103,7 @@
+
+ auth_admin
+ auth_admin
+- yes
++ auth_admin
+
+
+
+@@ -113,7 +113,7 @@
+
+ auth_admin
+ auth_admin
+- yes
++ auth_admin
+
+
+
+@@ -123,7 +123,7 @@
+
+ auth_admin
+ auth_admin
+- yes
++ auth_admin
+
+
+
+@@ -163,7 +163,7 @@
+
+ auth_admin
+ auth_admin
+- yes
++ auth_admin
+
+
+
+@@ -193,7 +193,7 @@
+
+ auth_admin
+ auth_admin
+- yes
++ auth_admin
+
+
+
+@@ -203,7 +203,7 @@
+
+ auth_admin
+ auth_admin
+- yes
++ auth_admin
+
+
+
+@@ -223,7 +223,7 @@
+
+ auth_admin
+ auth_admin
+- yes
++ auth_admin
+
+
+
+diff --git a/tuned.spec b/tuned.spec
+index 66e0936..d395829 100644
+--- a/tuned.spec
++++ b/tuned.spec
+@@ -44,7 +44,7 @@
+
+ Summary: A dynamic adaptive system tuning daemon
+ Name: tuned
+-Version: 2.21.0
++Version: 2.21.1
+ Release: 1%{?prerel1}%{?with_snapshot:.%{git_suffix}}%{?dist}
+ License: GPLv2+
+ Source0: https://github.com/redhat-performance/%{name}/archive/v%{version}%{?prerel2}/%{name}-%{version}%{?prerel2}.tar.gz
+@@ -557,6 +557,14 @@ fi
+ %{_mandir}/man7/tuned-profiles-openshift.7*
+
+ %changelog
++* Tue Nov 26 2024 Jaroslav Škarvada - 2.21.1-1
++- new release
++ - fixed privileged execution of arbitrary scripts by active local user
++ resolves: CVE-2024-52336
++ - added sanity checks for API methods parameters
++ resolves: CVE-2024-52337
++ - tuned-ppd: fixed controller init to correctly set _on_battery
++
+ * Tue Aug 29 2023 Jaroslav Škarvada - 2.21.0-1
+ - new release
+ - rebased tuned to latest upstream
+diff --git a/tuned/consts.py b/tuned/consts.py
+index b7fb215..7d22748 100644
+--- a/tuned/consts.py
++++ b/tuned/consts.py
+@@ -1,4 +1,8 @@
+ import logging
++import string
++
++NAMES_ALLOWED_CHARS = string.ascii_letters + string.digits + " !@'+-.,/:;_$&*()%<=>?#[]{|}^~" + '"'
++NAMES_MAX_LENGTH = 4096
+
+ GLOBAL_CONFIG_FILE = "/etc/tuned/tuned-main.conf"
+ ACTIVE_PROFILE_FILE = "/etc/tuned/active_profile"
+diff --git a/tuned/daemon/controller.py b/tuned/daemon/controller.py
+index 1292a8a..e637ab4 100644
+--- a/tuned/daemon/controller.py
++++ b/tuned/daemon/controller.py
+@@ -182,6 +182,8 @@ class Controller(tuned.exports.interfaces.ExportableInterface):
+ def switch_profile(self, profile_name, caller = None):
+ if caller == "":
+ return (False, "Unauthorized")
++ if not self._cmd.is_valid_name(profile_name):
++ return (False, "Invalid profile_name")
+ return self._switch_profile(profile_name, True)
+
+ @exports.export("", "(bs)")
+@@ -255,7 +257,7 @@ class Controller(tuned.exports.interfaces.ExportableInterface):
+
+ @exports.export("s", "(bsss)")
+ def profile_info(self, profile_name, caller = None):
+- if caller == "":
++ if caller == "" or not self._cmd.is_valid_name(profile_name):
+ return tuple(False, "", "", "")
+ if profile_name is None or profile_name == "":
+ profile_name = self.active_profile()
+@@ -287,7 +289,7 @@ class Controller(tuned.exports.interfaces.ExportableInterface):
+ dictionary -- {plugin_name: {parameter_name: default_value}}
+ """
+ if caller == "":
+- return False
++ return {}
+ plugins = {}
+ for plugin_class in self._daemon.get_all_plugins():
+ plugin_name = plugin_class.__module__.split(".")[-1].split("_", 1)[1]
+@@ -300,8 +302,8 @@ class Controller(tuned.exports.interfaces.ExportableInterface):
+ @exports.export("s","s")
+ def get_plugin_documentation(self, plugin_name, caller = None):
+ """Return docstring of plugin's class"""
+- if caller == "":
+- return False
++ if caller == "" or not self._cmd.is_valid_name(plugin_name):
++ return ""
+ return self._daemon.get_plugin_documentation(str(plugin_name))
+
+ @exports.export("s","a{ss}")
+@@ -314,8 +316,8 @@ class Controller(tuned.exports.interfaces.ExportableInterface):
+ Return:
+ dictionary -- {parameter_name: hint}
+ """
+- if caller == "":
+- return False
++ if caller == "" or not self._cmd.is_valid_name(plugin_name):
++ return {}
+ return self._daemon.get_plugin_hints(str(plugin_name))
+
+ @exports.export("s", "b")
+@@ -328,7 +330,7 @@ class Controller(tuned.exports.interfaces.ExportableInterface):
+ Return:
+ bool -- True on success
+ """
+- if caller == "":
++ if caller == "" or not self._cmd.is_valid_name(path):
+ return False
+ if self._daemon._application and self._daemon._application._unix_socket_exporter:
+ self._daemon._application._unix_socket_exporter.register_signal_path(path)
+@@ -342,6 +344,10 @@ class Controller(tuned.exports.interfaces.ExportableInterface):
+ def instance_acquire_devices(self, devices, instance_name, caller = None):
+ if caller == "":
+ return (False, "Unauthorized")
++ if not self._cmd.is_valid_name(devices):
++ return (False, "Invalid devices")
++ if not self._cmd.is_valid_name(instance_name):
++ return (False, "Invalid instance_name")
+ found = False
+ for instance_target in self._daemon._unit_manager.instances:
+ if instance_target.name == instance_name:
+diff --git a/tuned/plugins/base.py b/tuned/plugins/base.py
+index 0c70987..d6db6ca 100644
+--- a/tuned/plugins/base.py
++++ b/tuned/plugins/base.py
+@@ -212,6 +212,14 @@ class Plugin(object):
+ def _instance_post_static(self, instance, enabling):
+ pass
+
++ def _safe_script_path(self, path):
++ path = os.path.realpath(path)
++ profile_paths = self._global_cfg.get_list(consts.CFG_PROFILE_DIRS, consts.CFG_DEF_PROFILE_DIRS)
++ for p in profile_paths:
++ if path.startswith(p):
++ return True
++ return False
++
+ def _call_device_script(self, instance, script, op, devices, rollback = consts.ROLLBACK_SOFT):
+ if script is None:
+ return None
+@@ -222,6 +230,10 @@ class Plugin(object):
+ log.error("Relative paths cannot be used in script_pre or script_post. " \
+ + "Use ${i:PROFILE_DIR}.")
+ return False
++ if not self._safe_script_path(script):
++ log.error("Paths outside of the profile directories cannot be used in the " \
++ + "script_pre or script_post, ignoring script: '%s'" % script)
++ return False
+ dir_name = os.path.dirname(script)
+ ret = True
+ for dev in devices:
+diff --git a/tuned/plugins/plugin_script.py b/tuned/plugins/plugin_script.py
+index ab605e4..5a5700f 100644
+--- a/tuned/plugins/plugin_script.py
++++ b/tuned/plugins/plugin_script.py
+@@ -75,6 +75,10 @@ class ScriptPlugin(base.Plugin):
+ for script in scripts:
+ environ = os.environ
+ environ.update(self._variables.get_env())
++ if not self._safe_script_path(script):
++ log.error("Paths outside of the profile directories cannot be used in the script, " \
++ + "ignoring script: '%s'." % script)
++ continue
+ log.info("calling script '%s' with arguments '%s'" % (script, str(arguments)))
+ log.debug("using environment '%s'" % str(list(environ.items())))
+ try:
+diff --git a/tuned/utils/commands.py b/tuned/utils/commands.py
+index 177474e..5ed4b2a 100644
+--- a/tuned/utils/commands.py
++++ b/tuned/utils/commands.py
+@@ -523,3 +523,7 @@ class commands:
+ f.write(profile_name + "\n")
+ except (OSError,IOError) as e:
+ raise TunedException("Failed to save the active post-loaded profile: %s" % e.strerror)
++
++ # Checks if name contains only valid characters and has valid length or is empty string or None
++ def is_valid_name(self, name):
++ return not name or (all(c in consts.NAMES_ALLOWED_CHARS for c in name) and len(name) <= consts.NAMES_MAX_LENGTH)
+diff --git a/tuned/version.py b/tuned/version.py
+index 28ff450..305b20d 100644
+--- a/tuned/version.py
++++ b/tuned/version.py
+@@ -1,5 +1,5 @@
+ TUNED_VERSION_MAJOR = 2
+ TUNED_VERSION_MINOR = 21
+-TUNED_VERSION_PATCH = 0
++TUNED_VERSION_PATCH = 1
+
+ TUNED_VERSION_STR = "%d.%d.%d" % (TUNED_VERSION_MAJOR, TUNED_VERSION_MINOR, TUNED_VERSION_PATCH)
diff --git a/SPECS/tuned/CVE-2024-52337.nopatch b/SPECS/tuned/CVE-2024-52337.nopatch
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/SPECS/tuned/tuned.spec b/SPECS/tuned/tuned.spec
index e6150186354..12d5c437017 100644
--- a/SPECS/tuned/tuned.spec
+++ b/SPECS/tuned/tuned.spec
@@ -7,12 +7,13 @@
Summary: A dynamic adaptive system tuning daemon
Name: tuned
Version: 2.21.0
-Release: 1%{?dist}
+Release: 2%{?dist}
License: GPLv2+
Vendor: Microsoft Corporation
Distribution: Azure Linux
Source0: https://github.com/redhat-performance/%{name}/archive/v%{version}%{?prerel2}/%{name}-%{version}%{?prerel2}.tar.gz
Patch0: skip-gui-files.patch
+Patch1: CVE-2024-52336.patch
URL: http://www.tuned-project.org/
BuildArch: noarch
@@ -428,6 +429,9 @@ fi
%{_mandir}/man7/tuned-profiles-openshift.7*
%changelog
+* Wed Dec 11 2024 Sandeep Karambelkar 2.21.0-2
+- Fix CVEs - CVE-2024-52336 CVE-2024-52337
+
* Tue Jan 16 2024 Sharath Srikanth Chellappa 2.21.0-1
- Upgrade package version to 2.21.0
- Modifying SPEC file to reflect upstream changes (https://github.com/redhat-performance/tuned/blob/v2.21.0/tuned.spec)
diff --git a/cgmanifest.json b/cgmanifest.json
index 66a68e721c3..e724680b882 100644
--- a/cgmanifest.json
+++ b/cgmanifest.json
@@ -2022,6 +2022,16 @@
}
}
},
+ {
+ "component": {
+ "type": "other",
+ "other": {
+ "name": "containerd2",
+ "version": "2.0.0",
+ "downloadUrl": "https://github.com/containerd/containerd/archive/v2.0.0.tar.gz"
+ }
+ }
+ },
{
"component": {
"type": "other",
@@ -2958,8 +2968,8 @@
"type": "other",
"other": {
"name": "dpdk",
- "version": "23.11",
- "downloadUrl": "https://fast.dpdk.org/rel/dpdk-23.11.tar.xz"
+ "version": "23.11.3",
+ "downloadUrl": "https://fast.dpdk.org/rel/dpdk-23.11.3.tar.xz"
}
}
},
@@ -3328,8 +3338,8 @@
"type": "other",
"other": {
"name": "erlang",
- "version": "26.2.3",
- "downloadUrl": "https://github.com/erlang/otp/archive/OTP-26.2.3/otp-OTP-26.2.3.tar.gz"
+ "version": "26.2.5.6",
+ "downloadUrl": "https://github.com/erlang/otp/archive/OTP-26.2.5.6/otp-OTP-26.2.5.6.tar.gz"
}
}
},
@@ -4270,8 +4280,8 @@
"type": "other",
"other": {
"name": "gh",
- "version": "2.43.1",
- "downloadUrl": "https://github.com/cli/cli/archive/refs/tags/v2.43.1.tar.gz"
+ "version": "2.62.0",
+ "downloadUrl": "https://github.com/cli/cli/archive/refs/tags/v2.62.0.tar.gz"
}
}
},
@@ -8351,8 +8361,8 @@
"type": "other",
"other": {
"name": "kubernetes",
- "version": "1.30.1",
- "downloadUrl": "https://dl.k8s.io/v1.30.1/kubernetes-src.tar.gz"
+ "version": "1.30.3",
+ "downloadUrl": "https://dl.k8s.io/v1.30.3/kubernetes-src.tar.gz"
}
}
},
@@ -10341,8 +10351,8 @@
"type": "other",
"other": {
"name": "libnvidia-container",
- "version": "1.17.1",
- "downloadUrl": "https://github.com/NVIDIA/libnvidia-container/archive/v1.17.1.tar.gz"
+ "version": "1.17.3",
+ "downloadUrl": "https://github.com/NVIDIA/libnvidia-container/archive/v1.17.3.tar.gz"
}
}
},
@@ -10841,8 +10851,8 @@
"type": "other",
"other": {
"name": "libseccomp",
- "version": "2.5.4",
- "downloadUrl": "https://github.com/seccomp/libseccomp/releases/download/v2.5.4/libseccomp-2.5.4.tar.gz"
+ "version": "2.5.5",
+ "downloadUrl": "https://github.com/seccomp/libseccomp/releases/download/v2.5.5/libseccomp-2.5.5.tar.gz"
}
}
},
@@ -14302,8 +14312,8 @@
"type": "other",
"other": {
"name": "nvidia-container-toolkit",
- "version": "1.17.1",
- "downloadUrl": "https://github.com/NVIDIA/nvidia-container-toolkit/archive/v1.17.1.tar.gz"
+ "version": "1.17.3",
+ "downloadUrl": "https://github.com/NVIDIA/nvidia-container-toolkit/archive/v1.17.3.tar.gz"
}
}
},
@@ -20833,8 +20843,8 @@
"type": "other",
"other": {
"name": "php",
- "version": "8.3.12",
- "downloadUrl": "https://www.php.net/distributions/php-8.3.12.tar.xz"
+ "version": "8.3.14",
+ "downloadUrl": "https://www.php.net/distributions/php-8.3.14.tar.xz"
}
}
},
@@ -25904,8 +25914,8 @@
"type": "other",
"other": {
"name": "ruby",
- "version": "3.3.3",
- "downloadUrl": "https://cache.ruby-lang.org/pub/ruby/3.3/ruby-3.3.3.tar.gz"
+ "version": "3.3.5",
+ "downloadUrl": "https://cache.ruby-lang.org/pub/ruby/3.3/ruby-3.3.5.tar.gz"
}
}
},
@@ -27064,8 +27074,8 @@
"type": "other",
"other": {
"name": "runc",
- "version": "1.1.12",
- "downloadUrl": "https://github.com/opencontainers/runc/archive/v1.1.12.tar.gz"
+ "version": "1.2.2",
+ "downloadUrl": "https://github.com/opencontainers/runc/archive/v1.2.2.tar.gz"
}
}
},
diff --git a/toolkit/resources/manifests/package/pkggen_core_aarch64.txt b/toolkit/resources/manifests/package/pkggen_core_aarch64.txt
index 3ab3b3d6642..061b5cfed51 100644
--- a/toolkit/resources/manifests/package/pkggen_core_aarch64.txt
+++ b/toolkit/resources/manifests/package/pkggen_core_aarch64.txt
@@ -1,5 +1,5 @@
filesystem-1.1-21.azl3.aarch64.rpm
-kernel-headers-6.6.57.1-6.azl3.noarch.rpm
+kernel-headers-6.6.57.1-7.azl3.noarch.rpm
glibc-2.38-8.azl3.aarch64.rpm
glibc-devel-2.38-8.azl3.aarch64.rpm
glibc-i18n-2.38-8.azl3.aarch64.rpm
@@ -227,22 +227,22 @@ pinentry-1.2.1-1.azl3.aarch64.rpm
gnupg2-2.4.4-2.azl3.aarch64.rpm
gnupg2-lang-2.4.4-2.azl3.aarch64.rpm
gpgme-1.23.2-2.azl3.aarch64.rpm
-azurelinux-repos-shared-3.0-3.azl3.noarch.rpm
-azurelinux-repos-3.0-3.azl3.noarch.rpm
+azurelinux-repos-shared-3.0-4.azl3.noarch.rpm
+azurelinux-repos-3.0-4.azl3.noarch.rpm
libffi-3.4.4-1.azl3.aarch64.rpm
libffi-devel-3.4.4-1.azl3.aarch64.rpm
libtasn1-4.19.0-1.azl3.aarch64.rpm
p11-kit-0.25.0-1.azl3.aarch64.rpm
p11-kit-trust-0.25.0-1.azl3.aarch64.rpm
-ca-certificates-shared-3.0.0-7.azl3.noarch.rpm
-ca-certificates-tools-3.0.0-7.azl3.noarch.rpm
-ca-certificates-base-3.0.0-7.azl3.noarch.rpm
-ca-certificates-3.0.0-7.azl3.noarch.rpm
+ca-certificates-shared-3.0.0-8.azl3.noarch.rpm
+ca-certificates-tools-3.0.0-8.azl3.noarch.rpm
+ca-certificates-base-3.0.0-8.azl3.noarch.rpm
+ca-certificates-3.0.0-8.azl3.noarch.rpm
dwz-0.14-2.azl3.aarch64.rpm
unzip-6.0-21.azl3.aarch64.rpm
-python3-3.12.3-4.azl3.aarch64.rpm
-python3-devel-3.12.3-4.azl3.aarch64.rpm
-python3-libs-3.12.3-4.azl3.aarch64.rpm
+python3-3.12.3-5.azl3.aarch64.rpm
+python3-devel-3.12.3-5.azl3.aarch64.rpm
+python3-libs-3.12.3-5.azl3.aarch64.rpm
python3-setuptools-69.0.3-4.azl3.noarch.rpm
python3-pygments-2.7.4-2.azl3.noarch.rpm
which-2.21-8.azl3.aarch64.rpm
diff --git a/toolkit/resources/manifests/package/pkggen_core_x86_64.txt b/toolkit/resources/manifests/package/pkggen_core_x86_64.txt
index 3871203b17b..ab25a4670da 100644
--- a/toolkit/resources/manifests/package/pkggen_core_x86_64.txt
+++ b/toolkit/resources/manifests/package/pkggen_core_x86_64.txt
@@ -1,5 +1,5 @@
filesystem-1.1-21.azl3.x86_64.rpm
-kernel-headers-6.6.57.1-6.azl3.noarch.rpm
+kernel-headers-6.6.57.1-7.azl3.noarch.rpm
glibc-2.38-8.azl3.x86_64.rpm
glibc-devel-2.38-8.azl3.x86_64.rpm
glibc-i18n-2.38-8.azl3.x86_64.rpm
@@ -227,22 +227,22 @@ pinentry-1.2.1-1.azl3.x86_64.rpm
gnupg2-2.4.4-2.azl3.x86_64.rpm
gnupg2-lang-2.4.4-2.azl3.x86_64.rpm
gpgme-1.23.2-2.azl3.x86_64.rpm
-azurelinux-repos-shared-3.0-3.azl3.noarch.rpm
-azurelinux-repos-3.0-3.azl3.noarch.rpm
+azurelinux-repos-shared-3.0-4.azl3.noarch.rpm
+azurelinux-repos-3.0-4.azl3.noarch.rpm
libffi-3.4.4-1.azl3.x86_64.rpm
libffi-devel-3.4.4-1.azl3.x86_64.rpm
libtasn1-4.19.0-1.azl3.x86_64.rpm
p11-kit-0.25.0-1.azl3.x86_64.rpm
p11-kit-trust-0.25.0-1.azl3.x86_64.rpm
-ca-certificates-shared-3.0.0-7.azl3.noarch.rpm
-ca-certificates-tools-3.0.0-7.azl3.noarch.rpm
-ca-certificates-base-3.0.0-7.azl3.noarch.rpm
-ca-certificates-3.0.0-7.azl3.noarch.rpm
+ca-certificates-shared-3.0.0-8.azl3.noarch.rpm
+ca-certificates-tools-3.0.0-8.azl3.noarch.rpm
+ca-certificates-base-3.0.0-8.azl3.noarch.rpm
+ca-certificates-3.0.0-8.azl3.noarch.rpm
dwz-0.14-2.azl3.x86_64.rpm
unzip-6.0-21.azl3.x86_64.rpm
-python3-3.12.3-4.azl3.x86_64.rpm
-python3-devel-3.12.3-4.azl3.x86_64.rpm
-python3-libs-3.12.3-4.azl3.x86_64.rpm
+python3-3.12.3-5.azl3.x86_64.rpm
+python3-devel-3.12.3-5.azl3.x86_64.rpm
+python3-libs-3.12.3-5.azl3.x86_64.rpm
python3-setuptools-69.0.3-4.azl3.noarch.rpm
python3-pygments-2.7.4-2.azl3.noarch.rpm
which-2.21-8.azl3.x86_64.rpm
diff --git a/toolkit/resources/manifests/package/toolchain_aarch64.txt b/toolkit/resources/manifests/package/toolchain_aarch64.txt
index b3838e476e7..23f449f1015 100644
--- a/toolkit/resources/manifests/package/toolchain_aarch64.txt
+++ b/toolkit/resources/manifests/package/toolchain_aarch64.txt
@@ -10,19 +10,19 @@ audit-libs-3.1.2-1.azl3.aarch64.rpm
autoconf-2.72-2.azl3.noarch.rpm
automake-1.16.5-2.azl3.noarch.rpm
azurelinux-check-macros-3.0-7.azl3.noarch.rpm
-azurelinux-repos-3.0-3.azl3.noarch.rpm
-azurelinux-repos-debug-3.0-3.azl3.noarch.rpm
-azurelinux-repos-debug-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-extended-3.0-3.azl3.noarch.rpm
-azurelinux-repos-extended-debug-3.0-3.azl3.noarch.rpm
-azurelinux-repos-extended-debug-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-extended-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-ms-non-oss-3.0-3.azl3.noarch.rpm
-azurelinux-repos-ms-non-oss-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-ms-oss-3.0-3.azl3.noarch.rpm
-azurelinux-repos-ms-oss-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-shared-3.0-3.azl3.noarch.rpm
+azurelinux-repos-3.0-4.azl3.noarch.rpm
+azurelinux-repos-debug-3.0-4.azl3.noarch.rpm
+azurelinux-repos-debug-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-extended-3.0-4.azl3.noarch.rpm
+azurelinux-repos-extended-debug-3.0-4.azl3.noarch.rpm
+azurelinux-repos-extended-debug-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-extended-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-ms-non-oss-3.0-4.azl3.noarch.rpm
+azurelinux-repos-ms-non-oss-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-ms-oss-3.0-4.azl3.noarch.rpm
+azurelinux-repos-ms-oss-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-shared-3.0-4.azl3.noarch.rpm
azurelinux-rpm-macros-3.0-7.azl3.noarch.rpm
bash-5.2.15-3.azl3.aarch64.rpm
bash-debuginfo-5.2.15-3.azl3.aarch64.rpm
@@ -37,11 +37,11 @@ bzip2-1.0.8-1.azl3.aarch64.rpm
bzip2-debuginfo-1.0.8-1.azl3.aarch64.rpm
bzip2-devel-1.0.8-1.azl3.aarch64.rpm
bzip2-libs-1.0.8-1.azl3.aarch64.rpm
-ca-certificates-3.0.0-7.azl3.noarch.rpm
-ca-certificates-base-3.0.0-7.azl3.noarch.rpm
-ca-certificates-legacy-3.0.0-7.azl3.noarch.rpm
-ca-certificates-shared-3.0.0-7.azl3.noarch.rpm
-ca-certificates-tools-3.0.0-7.azl3.noarch.rpm
+ca-certificates-3.0.0-8.azl3.noarch.rpm
+ca-certificates-base-3.0.0-8.azl3.noarch.rpm
+ca-certificates-legacy-3.0.0-8.azl3.noarch.rpm
+ca-certificates-shared-3.0.0-8.azl3.noarch.rpm
+ca-certificates-tools-3.0.0-8.azl3.noarch.rpm
ccache-4.8.3-3.azl3.aarch64.rpm
ccache-debuginfo-4.8.3-3.azl3.aarch64.rpm
check-0.15.2-1.azl3.aarch64.rpm
@@ -156,7 +156,7 @@ intltool-0.51.0-7.azl3.noarch.rpm
itstool-2.0.7-1.azl3.noarch.rpm
kbd-2.2.0-2.azl3.aarch64.rpm
kbd-debuginfo-2.2.0-2.azl3.aarch64.rpm
-kernel-headers-6.6.57.1-6.azl3.noarch.rpm
+kernel-headers-6.6.57.1-7.azl3.noarch.rpm
kmod-30-1.azl3.aarch64.rpm
kmod-debuginfo-30-1.azl3.aarch64.rpm
kmod-devel-30-1.azl3.aarch64.rpm
@@ -296,10 +296,10 @@ p11-kit-debuginfo-0.25.0-1.azl3.aarch64.rpm
p11-kit-devel-0.25.0-1.azl3.aarch64.rpm
p11-kit-server-0.25.0-1.azl3.aarch64.rpm
p11-kit-trust-0.25.0-1.azl3.aarch64.rpm
-pam-1.5.3-2.azl3.aarch64.rpm
-pam-debuginfo-1.5.3-2.azl3.aarch64.rpm
-pam-devel-1.5.3-2.azl3.aarch64.rpm
-pam-lang-1.5.3-2.azl3.aarch64.rpm
+pam-1.5.3-4.azl3.aarch64.rpm
+pam-debuginfo-1.5.3-4.azl3.aarch64.rpm
+pam-devel-1.5.3-4.azl3.aarch64.rpm
+pam-lang-1.5.3-4.azl3.aarch64.rpm
patch-2.7.6-9.azl3.aarch64.rpm
patch-debuginfo-2.7.6-9.azl3.aarch64.rpm
pcre2-10.42-3.azl3.aarch64.rpm
@@ -529,18 +529,18 @@ pyproject-rpm-macros-1.12.0-2.azl3.noarch.rpm
pyproject-srpm-macros-1.12.0-2.azl3.noarch.rpm
python-markupsafe-debuginfo-2.1.3-1.azl3.aarch64.rpm
python-wheel-wheel-0.43.0-1.azl3.noarch.rpm
-python3-3.12.3-4.azl3.aarch64.rpm
+python3-3.12.3-5.azl3.aarch64.rpm
python3-audit-3.1.2-1.azl3.aarch64.rpm
python3-cracklib-2.9.11-1.azl3.aarch64.rpm
-python3-curses-3.12.3-4.azl3.aarch64.rpm
+python3-curses-3.12.3-5.azl3.aarch64.rpm
python3-Cython-3.0.5-2.azl3.aarch64.rpm
-python3-debuginfo-3.12.3-4.azl3.aarch64.rpm
-python3-devel-3.12.3-4.azl3.aarch64.rpm
+python3-debuginfo-3.12.3-5.azl3.aarch64.rpm
+python3-devel-3.12.3-5.azl3.aarch64.rpm
python3-flit-core-3.9.0-1.azl3.noarch.rpm
python3-gpg-1.23.2-2.azl3.aarch64.rpm
python3-jinja2-3.1.2-1.azl3.noarch.rpm
python3-libcap-ng-0.8.4-1.azl3.aarch64.rpm
-python3-libs-3.12.3-4.azl3.aarch64.rpm
+python3-libs-3.12.3-5.azl3.aarch64.rpm
python3-libxml2-2.11.5-1.azl3.aarch64.rpm
python3-lxml-4.9.3-1.azl3.aarch64.rpm
python3-magic-5.45-1.azl3.noarch.rpm
@@ -552,8 +552,8 @@ python3-pygments-2.7.4-2.azl3.noarch.rpm
python3-rpm-4.18.2-1.azl3.aarch64.rpm
python3-rpm-generators-14-11.azl3.noarch.rpm
python3-setuptools-69.0.3-4.azl3.noarch.rpm
-python3-test-3.12.3-4.azl3.aarch64.rpm
-python3-tools-3.12.3-4.azl3.aarch64.rpm
+python3-test-3.12.3-5.azl3.aarch64.rpm
+python3-tools-3.12.3-5.azl3.aarch64.rpm
python3-wheel-0.43.0-1.azl3.noarch.rpm
readline-8.2-1.azl3.aarch64.rpm
readline-debuginfo-8.2-1.azl3.aarch64.rpm
diff --git a/toolkit/resources/manifests/package/toolchain_x86_64.txt b/toolkit/resources/manifests/package/toolchain_x86_64.txt
index e15f53cd0d3..c4c92d558ef 100644
--- a/toolkit/resources/manifests/package/toolchain_x86_64.txt
+++ b/toolkit/resources/manifests/package/toolchain_x86_64.txt
@@ -10,19 +10,21 @@ audit-libs-3.1.2-1.azl3.x86_64.rpm
autoconf-2.72-2.azl3.noarch.rpm
automake-1.16.5-2.azl3.noarch.rpm
azurelinux-check-macros-3.0-7.azl3.noarch.rpm
-azurelinux-repos-3.0-3.azl3.noarch.rpm
-azurelinux-repos-debug-3.0-3.azl3.noarch.rpm
-azurelinux-repos-debug-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-extended-3.0-3.azl3.noarch.rpm
-azurelinux-repos-extended-debug-3.0-3.azl3.noarch.rpm
-azurelinux-repos-extended-debug-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-extended-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-ms-non-oss-3.0-3.azl3.noarch.rpm
-azurelinux-repos-ms-non-oss-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-ms-oss-3.0-3.azl3.noarch.rpm
-azurelinux-repos-ms-oss-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-preview-3.0-3.azl3.noarch.rpm
-azurelinux-repos-shared-3.0-3.azl3.noarch.rpm
+azurelinux-repos-3.0-4.azl3.noarch.rpm
+azurelinux-repos-amd-3.0-4.azl3.noarch.rpm
+azurelinux-repos-amd-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-debug-3.0-4.azl3.noarch.rpm
+azurelinux-repos-debug-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-extended-3.0-4.azl3.noarch.rpm
+azurelinux-repos-extended-debug-3.0-4.azl3.noarch.rpm
+azurelinux-repos-extended-debug-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-extended-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-ms-non-oss-3.0-4.azl3.noarch.rpm
+azurelinux-repos-ms-non-oss-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-ms-oss-3.0-4.azl3.noarch.rpm
+azurelinux-repos-ms-oss-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-preview-3.0-4.azl3.noarch.rpm
+azurelinux-repos-shared-3.0-4.azl3.noarch.rpm
azurelinux-rpm-macros-3.0-7.azl3.noarch.rpm
bash-5.2.15-3.azl3.x86_64.rpm
bash-debuginfo-5.2.15-3.azl3.x86_64.rpm
@@ -38,11 +40,11 @@ bzip2-1.0.8-1.azl3.x86_64.rpm
bzip2-debuginfo-1.0.8-1.azl3.x86_64.rpm
bzip2-devel-1.0.8-1.azl3.x86_64.rpm
bzip2-libs-1.0.8-1.azl3.x86_64.rpm
-ca-certificates-3.0.0-7.azl3.noarch.rpm
-ca-certificates-base-3.0.0-7.azl3.noarch.rpm
-ca-certificates-legacy-3.0.0-7.azl3.noarch.rpm
-ca-certificates-shared-3.0.0-7.azl3.noarch.rpm
-ca-certificates-tools-3.0.0-7.azl3.noarch.rpm
+ca-certificates-3.0.0-8.azl3.noarch.rpm
+ca-certificates-base-3.0.0-8.azl3.noarch.rpm
+ca-certificates-legacy-3.0.0-8.azl3.noarch.rpm
+ca-certificates-shared-3.0.0-8.azl3.noarch.rpm
+ca-certificates-tools-3.0.0-8.azl3.noarch.rpm
ccache-4.8.3-3.azl3.x86_64.rpm
ccache-debuginfo-4.8.3-3.azl3.x86_64.rpm
check-0.15.2-1.azl3.x86_64.rpm
@@ -161,8 +163,8 @@ intltool-0.51.0-7.azl3.noarch.rpm
itstool-2.0.7-1.azl3.noarch.rpm
kbd-2.2.0-2.azl3.x86_64.rpm
kbd-debuginfo-2.2.0-2.azl3.x86_64.rpm
-kernel-cross-headers-6.6.57.1-6.azl3.noarch.rpm
-kernel-headers-6.6.57.1-6.azl3.noarch.rpm
+kernel-cross-headers-6.6.57.1-7.azl3.noarch.rpm
+kernel-headers-6.6.57.1-7.azl3.noarch.rpm
kmod-30-1.azl3.x86_64.rpm
kmod-debuginfo-30-1.azl3.x86_64.rpm
kmod-devel-30-1.azl3.x86_64.rpm
@@ -302,10 +304,10 @@ p11-kit-debuginfo-0.25.0-1.azl3.x86_64.rpm
p11-kit-devel-0.25.0-1.azl3.x86_64.rpm
p11-kit-server-0.25.0-1.azl3.x86_64.rpm
p11-kit-trust-0.25.0-1.azl3.x86_64.rpm
-pam-1.5.3-2.azl3.x86_64.rpm
-pam-debuginfo-1.5.3-2.azl3.x86_64.rpm
-pam-devel-1.5.3-2.azl3.x86_64.rpm
-pam-lang-1.5.3-2.azl3.x86_64.rpm
+pam-1.5.3-4.azl3.x86_64.rpm
+pam-debuginfo-1.5.3-4.azl3.x86_64.rpm
+pam-devel-1.5.3-4.azl3.x86_64.rpm
+pam-lang-1.5.3-4.azl3.x86_64.rpm
patch-2.7.6-9.azl3.x86_64.rpm
patch-debuginfo-2.7.6-9.azl3.x86_64.rpm
pcre2-10.42-3.azl3.x86_64.rpm
@@ -535,18 +537,18 @@ pyproject-rpm-macros-1.12.0-2.azl3.noarch.rpm
pyproject-srpm-macros-1.12.0-2.azl3.noarch.rpm
python-markupsafe-debuginfo-2.1.3-1.azl3.x86_64.rpm
python-wheel-wheel-0.43.0-1.azl3.noarch.rpm
-python3-3.12.3-4.azl3.x86_64.rpm
+python3-3.12.3-5.azl3.x86_64.rpm
python3-audit-3.1.2-1.azl3.x86_64.rpm
python3-cracklib-2.9.11-1.azl3.x86_64.rpm
-python3-curses-3.12.3-4.azl3.x86_64.rpm
+python3-curses-3.12.3-5.azl3.x86_64.rpm
python3-Cython-3.0.5-2.azl3.x86_64.rpm
-python3-debuginfo-3.12.3-4.azl3.x86_64.rpm
-python3-devel-3.12.3-4.azl3.x86_64.rpm
+python3-debuginfo-3.12.3-5.azl3.x86_64.rpm
+python3-devel-3.12.3-5.azl3.x86_64.rpm
python3-flit-core-3.9.0-1.azl3.noarch.rpm
python3-gpg-1.23.2-2.azl3.x86_64.rpm
python3-jinja2-3.1.2-1.azl3.noarch.rpm
python3-libcap-ng-0.8.4-1.azl3.x86_64.rpm
-python3-libs-3.12.3-4.azl3.x86_64.rpm
+python3-libs-3.12.3-5.azl3.x86_64.rpm
python3-libxml2-2.11.5-1.azl3.x86_64.rpm
python3-lxml-4.9.3-1.azl3.x86_64.rpm
python3-magic-5.45-1.azl3.noarch.rpm
@@ -558,8 +560,8 @@ python3-pygments-2.7.4-2.azl3.noarch.rpm
python3-rpm-4.18.2-1.azl3.x86_64.rpm
python3-rpm-generators-14-11.azl3.noarch.rpm
python3-setuptools-69.0.3-4.azl3.noarch.rpm
-python3-test-3.12.3-4.azl3.x86_64.rpm
-python3-tools-3.12.3-4.azl3.x86_64.rpm
+python3-test-3.12.3-5.azl3.x86_64.rpm
+python3-tools-3.12.3-5.azl3.x86_64.rpm
python3-wheel-0.43.0-1.azl3.noarch.rpm
readline-8.2-1.azl3.x86_64.rpm
readline-debuginfo-8.2-1.azl3.x86_64.rpm
diff --git a/toolkit/scripts/containerized-build/create_container_build.sh b/toolkit/scripts/containerized-build/create_container_build.sh
index 70c360c5f94..7eb72dd2c30 100755
--- a/toolkit/scripts/containerized-build/create_container_build.sh
+++ b/toolkit/scripts/containerized-build/create_container_build.sh
@@ -84,6 +84,7 @@ script_dir=$(realpath $(dirname "${BASH_SOURCE[0]}"))
topdir=/usr/src/azl
enable_local_repo=false
keep_container="--rm"
+packages_to_install="azurelinux-release vim git jq"
while (( "$#")); do
case "$1" in
@@ -92,7 +93,7 @@ while (( "$#")); do
-p ) repo_path="$(realpath $2)"; shift 2 ;;
-mo ) extra_mounts="$2"; shift 2 ;;
-b ) build_mount_dir="$(realpath $2)"; shift 2;;
- -ep ) extra_packages="$2"; shift 2;;
+ -ep ) packages_to_install="${packages_to_install} $2"; shift 2;;
-r ) enable_local_repo=true; shift ;;
-k ) keep_container=""; shift ;;
-q ) STD_OUT_REDIRECT=/dev/null; shift ;;
@@ -268,7 +269,7 @@ docker build -q \
--build-arg enable_local_repo="$enable_local_repo" \
--build-arg azl_repo="$repo_path" \
--build-arg mode="$mode" \
- --build-arg extra_packages="$extra_packages" \
+ --build-arg packages_to_install="$packages_to_install" \
.
echo "docker_image_tag is ${docker_image_tag}"
diff --git a/toolkit/scripts/containerized-build/resources/azl.Dockerfile b/toolkit/scripts/containerized-build/resources/azl.Dockerfile
index 1eda1907483..3b1ecbefdc3 100644
--- a/toolkit/scripts/containerized-build/resources/azl.Dockerfile
+++ b/toolkit/scripts/containerized-build/resources/azl.Dockerfile
@@ -4,7 +4,7 @@ ARG version
ARG enable_local_repo
ARG azl_repo
ARG mode
-ARG extra_packages
+ARG packages_to_install
LABEL containerized-rpmbuild=$azl_repo/build
COPY resources/local_repo /etc/yum.repos.d/local_repo.disabled_repo
@@ -23,5 +23,5 @@ RUN if [[ "${mode}" == "build" ]]; then echo "cd /usr/src/azl || { echo \"ERROR:
RUN if [[ "${mode}" == "test" ]]; then echo "cd /mnt || { echo \"ERROR: Could not change directory to /mnt \"; exit 1; }" >> /root/.bashrc; fi
# Install packages from bashrc so we can use the previously setup tdnf defaults.
-RUN echo "echo installing packages azurelinux-release vim git ${extra_packages}" >> /root/.bashrc && \
- echo "tdnf install --releasever=${version} -qy azurelinux-release vim git ${extra_packages}" >> /root/.bashrc
+RUN echo "echo installing packages ${packages_to_install}" >> /root/.bashrc && \
+ echo "tdnf install --releasever=${version} -qy ${packages_to_install}" >> /root/.bashrc
diff --git a/toolkit/scripts/containerized-build/resources/setup_functions.sh b/toolkit/scripts/containerized-build/resources/setup_functions.sh
index 21d5f5ce8a5..4ea299465e9 100644
--- a/toolkit/scripts/containerized-build/resources/setup_functions.sh
+++ b/toolkit/scripts/containerized-build/resources/setup_functions.sh
@@ -158,6 +158,20 @@ install_dependencies() {
# Get the list of dependencies from the spec file.
mapfile -t dep_list < <(rpmspec -q --buildrequires $spec_file)
+ # Replace the dependencies with the package providing them.
+ for i in "${!dep_list[@]}"
+ do
+ # if the dependency is a file, find the package that provides it
+ if [[ ${dep_list[$i]} == /* ]]; then
+ package=$(tdnf repoquery --file "${dep_list[$i]}" --json | jq -r 'map(.Name) | unique | .[]')
+ if [ -z "$package" ]; then
+ echo "Could not find package providing '${dep_list[$i]}'." >/dev/stderr
+ return 1
+ else
+ dep_list[$i]=$package
+ fi
+ fi
+ done
# Install all the dependencies.
tdnf install -y "${dep_list[@]}" || exit_code=$?
done
diff --git a/toolkit/scripts/pkggen.mk b/toolkit/scripts/pkggen.mk
index d714e29124a..aaed4f211f5 100644
--- a/toolkit/scripts/pkggen.mk
+++ b/toolkit/scripts/pkggen.mk
@@ -202,7 +202,23 @@ ifeq ($(PRECACHE),y)
$(cached_file): $(STATUS_FLAGS_DIR)/precache.flag
endif
-$(cached_file): $(graph_file) $(go-graphpkgfetcher) $(chroot_worker) $(pkggen_local_repo) $(depend_REPO_LIST) $(REPO_LIST) $(cached_remote_rpms) $(TOOLCHAIN_MANIFEST) $(toolchain_rpms) $(depend_EXTRA_BUILD_LAYERS) $(depend_REPO_SNAPSHOT_TIME) $(STATUS_FLAGS_DIR)/build_packages_cache_cleanup.flag
+ifneq ($(strip $(PACKAGE_CACHE_SUMMARY)$(REPO_SNAPSHOT_TIME)),)
+# We MUST clear the RPM package cache ONLY in the following scenarios:
+# - the package cache summary file is used and has changed or
+# - the repo snapshot time is used and has changed.
+#
+# These scenario are meant to build with a specific set of RPMs, so we must
+# avoid contamination from previous builds.
+#
+# For other scenarios the cache is allowed to contain a mixture of packages and
+# we allow the tooling to figure out the appropriate ones to use during the build.
+#
+# IMPORTANT: update the '$(STATUS_FLAGS_DIR)/build_packages_cache_cleanup.flag' target
+# in tandem with updates to this one.
+$(cached_file): $(STATUS_FLAGS_DIR)/build_packages_cache_cleanup.flag
+endif
+
+$(cached_file): $(graph_file) $(go-graphpkgfetcher) $(chroot_worker) $(pkggen_local_repo) $(depend_REPO_LIST) $(REPO_LIST) $(cached_remote_rpms) $(TOOLCHAIN_MANIFEST) $(toolchain_rpms) $(depend_EXTRA_BUILD_LAYERS) $(depend_PACKAGE_CACHE_SUMMARY) $(depend_REPO_SNAPSHOT_TIME)
mkdir -p $(remote_rpms_cache_dir) && \
$(go-graphpkgfetcher) \
--input=$(graph_file) \
@@ -265,10 +281,8 @@ clean-compress-rpms:
clean-compress-srpms:
rm -rf $(srpms_archive)
-# We need to clear the rpm package cache if we have a snapshot time. The filenames will all be
-# the same, but the actual .rpm files may be fundamentally different.
-$(STATUS_FLAGS_DIR)/build_packages_cache_cleanup.flag: $(depend_REPO_SNAPSHOT_TIME)
- @echo "REPO_SNAPSHOT_TIME has changed, sanitizing rpm cache"
+$(STATUS_FLAGS_DIR)/build_packages_cache_cleanup.flag: $(depend_PACKAGE_CACHE_SUMMARY) $(depend_REPO_SNAPSHOT_TIME)
+ @echo "Either 'PACKAGE_CACHE_SUMMARY' or 'REPO_SNAPSHOT_TIME' has changed, sanitizing rpm cache"
@if [ -d "$(remote_rpms_cache_dir)" ]; then \
find "$(remote_rpms_cache_dir)" -type f -name '*.rpm' -delete; \
fi
diff --git a/toolkit/scripts/utils.mk b/toolkit/scripts/utils.mk
index 4cc53c4f874..197d0a76a43 100644
--- a/toolkit/scripts/utils.mk
+++ b/toolkit/scripts/utils.mk
@@ -55,10 +55,10 @@ endef
######## VARIABLE DEPENDENCY TRACKING ########
# List of variables to watch for changes.
-watch_vars=PACKAGE_BUILD_LIST PACKAGE_REBUILD_LIST PACKAGE_IGNORE_LIST REPO_LIST CONFIG_FILE STOP_ON_PKG_FAIL TOOLCHAIN_ARCHIVE REBUILD_TOOLCHAIN SRPM_PACK_LIST SPECS_DIR MAX_CASCADING_REBUILDS RUN_CHECK TEST_RUN_LIST TEST_RERUN_LIST TEST_IGNORE_LIST EXTRA_BUILD_LAYERS LICENSE_CHECK_MODE VALIDATE_TOOLCHAIN_GPG REPO_SNAPSHOT_TIME
+watch_vars=PACKAGE_BUILD_LIST PACKAGE_REBUILD_LIST PACKAGE_IGNORE_LIST REPO_LIST CONFIG_FILE STOP_ON_PKG_FAIL TOOLCHAIN_ARCHIVE REBUILD_TOOLCHAIN SRPM_PACK_LIST SPECS_DIR MAX_CASCADING_REBUILDS RUN_CHECK TEST_RUN_LIST TEST_RERUN_LIST TEST_IGNORE_LIST EXTRA_BUILD_LAYERS LICENSE_CHECK_MODE VALIDATE_TOOLCHAIN_GPG REPO_SNAPSHOT_TIME PACKAGE_CACHE_SUMMARY
# Current list: $(depend_PACKAGE_BUILD_LIST) $(depend_PACKAGE_REBUILD_LIST) $(depend_PACKAGE_IGNORE_LIST) $(depend_REPO_LIST) $(depend_CONFIG_FILE) $(depend_STOP_ON_PKG_FAIL)
# $(depend_TOOLCHAIN_ARCHIVE) $(depend_REBUILD_TOOLCHAIN) $(depend_SRPM_PACK_LIST) $(depend_SPECS_DIR) $(depend_EXTRA_BUILD_LAYERS) $(depend_MAX_CASCADING_REBUILDS) $(depend_RUN_CHECK) $(depend_TEST_RUN_LIST)
-# $(depend_TEST_RERUN_LIST) $(depend_TEST_IGNORE_LIST) $(depend_LICENSE_CHECK_MODE) $(depend_VALIDATE_TOOLCHAIN_GPG) $(depend_REPO_SNAPSHOT_TIME)
+# $(depend_TEST_RERUN_LIST) $(depend_TEST_IGNORE_LIST) $(depend_LICENSE_CHECK_MODE) $(depend_VALIDATE_TOOLCHAIN_GPG) $(depend_REPO_SNAPSHOT_TIME) $(depend_PACKAGE_CACHE_SUMMARY)
.PHONY: variable_depends_on_phony clean-variable_depends_on_phony setfacl_always_run_phony
clean: clean-variable_depends_on_phony
diff --git a/toolkit/tools/go.mod b/toolkit/tools/go.mod
index 5f53072b218..f5860e9fe65 100644
--- a/toolkit/tools/go.mod
+++ b/toolkit/tools/go.mod
@@ -22,7 +22,7 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.9.0
github.com/ulikunitz/xz v0.5.10
- golang.org/x/sys v0.21.0
+ golang.org/x/sys v0.28.0
gonum.org/v1/gonum v0.15.0
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/ini.v1 v1.67.0
@@ -48,8 +48,8 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20170218160415-a3153f7040e9 // indirect
- golang.org/x/crypto v0.24.0 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
- golang.org/x/net v0.26.0 // indirect
- golang.org/x/text v0.16.0 // indirect
+ golang.org/x/net v0.33.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
)
diff --git a/toolkit/tools/go.sum b/toolkit/tools/go.sum
index 88fbb4c6f23..da0e2a5c9cd 100644
--- a/toolkit/tools/go.sum
+++ b/toolkit/tools/go.sum
@@ -90,12 +90,12 @@ github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/xrash/smetrics v0.0.0-20170218160415-a3153f7040e9 h1:w8V9v0qVympSF6GjdjIyeqR7+EVhAF9CBQmkmW7Zw0w=
github.com/xrash/smetrics v0.0.0-20170218160415-a3153f7040e9/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
-golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
-golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191018095205-727590c5006e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -103,12 +103,12 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ=
gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo=
diff --git a/toolkit/tools/imageconfigvalidator/imageconfigvalidator.go b/toolkit/tools/imageconfigvalidator/imageconfigvalidator.go
index 4a6a0cbd654..5bc360b7b36 100644
--- a/toolkit/tools/imageconfigvalidator/imageconfigvalidator.go
+++ b/toolkit/tools/imageconfigvalidator/imageconfigvalidator.go
@@ -15,6 +15,7 @@ import (
"github.com/microsoft/azurelinux/toolkit/tools/imagegen/installutils"
"github.com/microsoft/azurelinux/toolkit/tools/internal/exe"
"github.com/microsoft/azurelinux/toolkit/tools/internal/logger"
+ "github.com/microsoft/azurelinux/toolkit/tools/internal/pkgjson"
"github.com/microsoft/azurelinux/toolkit/tools/internal/timestamp"
"github.com/microsoft/azurelinux/toolkit/tools/pkg/profile"
@@ -114,6 +115,7 @@ func validatePackages(config configuration.Config) (err error) {
const (
validateError = "failed to validate package lists in config"
+ kernelPkgName = "kernel"
dracutFipsPkgName = "dracut-fips"
fipsKernelCmdLine = "fips=1"
userAddPkgName = "shadow-utils"
@@ -134,16 +136,24 @@ func validatePackages(config configuration.Config) (err error) {
}
for _, pkg := range packageList {
- if pkg == "kernel" {
+ // The installer tools have an undocumented feature which can support both "pkg-name" and "pkg-name=version" formats.
+ // This is in use, so we need to handle pinned versions in this check. Technically, 'tdnf' also supports "pkg-name-version" format,
+ // but it is not easily distinguishable from "long-package-name" format so it will not be supported here.
+ pkgVer, err := pkgjson.PackageStringToPackageVer(pkg)
+ if err != nil {
+ return fmt.Errorf("%s: %w", validateError, err)
+ }
+
+ if pkgVer.Name == kernelPkgName {
return fmt.Errorf("%s: kernel should not be included in a package list, add via config file's [KernelOptions] entry", validateError)
}
- if pkg == dracutFipsPkgName {
+ if pkgVer.Name == dracutFipsPkgName {
foundDracutFipsPackage = true
}
- if pkg == selinuxPkgName {
+ if pkgVer.Name == selinuxPkgName {
foundSELinuxPackage = true
}
- if pkg == userAddPkgName {
+ if pkgVer.Name == userAddPkgName {
foundUserAddPackage = true
}
}
diff --git a/toolkit/tools/imageconfigvalidator/imageconfigvalidator_test.go b/toolkit/tools/imageconfigvalidator/imageconfigvalidator_test.go
index 4bec21327c6..1d30b7b2df8 100644
--- a/toolkit/tools/imageconfigvalidator/imageconfigvalidator_test.go
+++ b/toolkit/tools/imageconfigvalidator/imageconfigvalidator_test.go
@@ -6,6 +6,7 @@ package main
import (
"fmt"
"os"
+ "path"
"path/filepath"
"strings"
"testing"
@@ -73,188 +74,159 @@ func TestShouldFailEmptySystemConfig(t *testing.T) {
assert.Equal(t, "invalid [SystemConfigs]:\nmissing [Name] field", err.Error())
}
-func TestShouldFailDeeplyNestedParsingError(t *testing.T) {
+func TestSELinuxRequiresSELinuxPackageInline(t *testing.T) {
const (
- configDirectory string = "../../imageconfigs/"
- targetPackage = "core-efi.json"
+ configDirectory = "./testdata/"
+ targetConfig = "test-config.json"
+ selinuxPkgName = "selinux-policy"
)
- configFiles, err := os.ReadDir(configDirectory)
- assert.NoError(t, err)
-
- // Pick the first config file and mess something up which is deeply
- // nested inside the json
- for _, file := range configFiles {
- if !file.IsDir() && strings.Contains(file.Name(), targetPackage) {
- configPath := filepath.Join(configDirectory, file.Name())
-
- fmt.Println("Corrupting ", configPath)
-
- config, err := configuration.LoadWithAbsolutePaths(configPath, configDirectory)
- assert.NoError(t, err)
-
- config.Disks[0].PartitionTableType = configuration.PartitionTableType("not_a_real_partition_type")
- err = ValidateConfiguration(config)
- assert.Error(t, err)
- assert.Equal(t, "invalid [Disks]:\ninvalid [PartitionTableType]: invalid value for PartitionTableType (not_a_real_partition_type)", err.Error())
+ configPath := filepath.Join(configDirectory, targetConfig)
- return
- }
- }
- assert.Failf(t, "Could not find config", "Could not find image config file '%s' to test", filepath.Join(configDirectory, targetPackage))
-}
-
-func TestShouldFailMissingFipsPackageWithFipsCmdLine(t *testing.T) {
- const (
- configDirectory string = "../../imageconfigs/"
- targetPackage = "core-fips.json"
- fipsPackageListFile = "fips-packages.json"
- )
- configFiles, err := os.ReadDir(configDirectory)
+ config, err := configuration.LoadWithAbsolutePaths(configPath, configDirectory)
assert.NoError(t, err)
- // Pick the core-fips config file, but remove the fips package list
- for _, file := range configFiles {
- if !file.IsDir() && strings.Contains(file.Name(), targetPackage) {
- configPath := filepath.Join(configDirectory, file.Name())
-
- fmt.Println("Corrupting ", configPath)
+ config.SystemConfigs[0].KernelCommandLine.SELinux = "enforcing"
- config, err := configuration.LoadWithAbsolutePaths(configPath, configDirectory)
- assert.NoError(t, err)
-
- newPackageList := []string{}
- for _, pl := range config.SystemConfigs[0].PackageLists {
- if !strings.Contains(pl, fipsPackageListFile) {
- newPackageList = append(newPackageList, pl)
- }
- }
-
- config.SystemConfigs[0].PackageLists = newPackageList
+ err = ValidateConfiguration(config)
+ assert.Error(t, err)
+ assert.Equal(t, "failed to validate package lists in config: [SELinux] selected, but 'selinux-policy' package is not included in the package lists", err.Error())
- err = ValidateConfiguration(config)
- assert.Error(t, err)
- assert.Equal(t, "failed to validate package lists in config: 'fips=1' provided on kernel cmdline, but 'dracut-fips' package is not included in the package lists", err.Error())
+ //Add required SELinux package in the inline package definition
+ newPackagesField := []string{selinuxPkgName}
+ config.SystemConfigs[0].Packages = newPackagesField
- return
- }
- }
- assert.Fail(t, "Could not find "+targetPackage+" to test")
+ err = ValidateConfiguration(config)
+ assert.NoError(t, err)
}
-func TestShouldFailMissingSELinuxPackageWithSELinux(t *testing.T) {
- const (
- configDirectory = "../../imageconfigs/"
- targetPackage = "core-efi.json"
- targetPackageList = "selinux.json"
- )
- configFiles, err := os.ReadDir(configDirectory)
+func TestValidationAgainstTestConfig(t *testing.T) {
+ confiDirAbsPath, err := filepath.Abs("./testdata/")
assert.NoError(t, err)
- // Pick the core-efi config file, then enable SELinux
- for _, file := range configFiles {
- if !file.IsDir() && strings.Contains(file.Name(), targetPackage) {
- configPath := filepath.Join(configDirectory, file.Name())
-
- fmt.Println("Corrupting ", configPath)
-
- config, err := configuration.LoadWithAbsolutePaths(configPath, configDirectory)
- for i, list := range config.SystemConfigs[0].PackageLists {
- // Delete the packagelist from the config
- if strings.Contains(list, targetPackageList) {
- config.SystemConfigs[0].PackageLists = append(config.SystemConfigs[0].PackageLists[:i], config.SystemConfigs[0].PackageLists[i+1:]...)
+ tests := []struct {
+ name string
+ extraListPath string
+ configModifier func(*configuration.Config)
+ expectedError1 string
+ expectedError2 string
+ }{
+ {
+ name: "Deeply nested parsing error",
+ extraListPath: "",
+ configModifier: func(config *configuration.Config) {
+ config.Disks[0].PartitionTableType = configuration.PartitionTableType("not_a_real_partition_type")
+ },
+ expectedError1: "invalid [Disks]:\ninvalid [PartitionTableType]: invalid value for PartitionTableType (not_a_real_partition_type)",
+ // No action is taken to fix the error, so it will still be present
+ expectedError2: "invalid [Disks]:\ninvalid [PartitionTableType]: invalid value for PartitionTableType (not_a_real_partition_type)",
+ },
+ {
+ name: "fips with dracut-fips",
+ extraListPath: "./testdata/fips-list.json",
+ configModifier: func(config *configuration.Config) {
+ config.SystemConfigs[0].KernelCommandLine.EnableFIPS = true
+ },
+ expectedError1: "failed to validate package lists in config: 'fips=1' provided on kernel cmdline, but 'dracut-fips' package is not included in the package lists",
+ expectedError2: "",
+ },
+ {
+ name: "selinux with selinux-policy",
+ extraListPath: "./testdata/selinux-policy-list.json",
+ configModifier: func(config *configuration.Config) {
+ config.SystemConfigs[0].KernelCommandLine.SELinux = "enforcing"
+ },
+ expectedError1: "failed to validate package lists in config: [SELinux] selected, but 'selinux-policy' package is not included in the package lists",
+ expectedError2: "",
+ },
+ {
+ name: "user with shadowutils",
+ extraListPath: "./testdata/shadowutils-list.json",
+ configModifier: func(config *configuration.Config) {
+ config.SystemConfigs[0].Users = []configuration.User{
+ {
+ Name: "testuser",
+ },
}
- }
- assert.NoError(t, err)
-
- config.SystemConfigs[0].KernelCommandLine.SELinux = "enforcing"
-
- err = ValidateConfiguration(config)
- assert.Error(t, err)
- assert.Equal(t, "failed to validate package lists in config: [SELinux] selected, but 'selinux-policy' package is not included in the package lists", err.Error())
-
- return
- }
+ },
+ expectedError1: "failed to validate package lists in config: the 'shadow-utils' package must be included in the package lists when the image is configured to add users or groups",
+ expectedError2: "",
+ },
+ {
+ name: "Shadowutils pinned version",
+ extraListPath: "./testdata/pinned-shadowutils-list.json",
+ configModifier: func(config *configuration.Config) {
+ config.SystemConfigs[0].Users = []configuration.User{
+ {
+ Name: "testuser",
+ },
+ }
+ },
+ expectedError1: "failed to validate package lists in config: the 'shadow-utils' package must be included in the package lists when the image is configured to add users or groups",
+ expectedError2: "",
+ },
+
+ {
+ name: "Shadowutils pinned version with whitespace",
+ extraListPath: "./testdata/pinned-shadowutils-ws-list.json",
+ configModifier: func(config *configuration.Config) {
+ config.SystemConfigs[0].Users = []configuration.User{
+ {
+ Name: "testuser",
+ },
+ }
+ },
+ expectedError1: "failed to validate package lists in config: the 'shadow-utils' package must be included in the package lists when the image is configured to add users or groups",
+ expectedError2: "",
+ },
+ {
+ name: "missing package list",
+ extraListPath: "./testdata/not-a-real-list.json",
+ configModifier: func(config *configuration.Config) {},
+ expectedError1: "",
+ expectedError2: "failed to validate package lists in config: open " + path.Join(confiDirAbsPath, "not-a-real-list.json") + ": no such file or directory",
+ },
+ {
+ name: "bad package name",
+ extraListPath: "./testdata/bogus-list.json",
+ configModifier: func(config *configuration.Config) {},
+ expectedError1: "",
+ expectedError2: `failed to validate package lists in config: packages list entry "bad package = bad < version" does not match the '[name][optional_condition][optional_version]' format`,
+ },
}
- assert.Fail(t, "Could not find "+targetPackage+" to test")
-}
-func TestShouldSucceedSELinuxPackageDefinedInline(t *testing.T) {
- const (
- configDirectory = "../../imageconfigs/"
- targetPackage = "core-efi.json"
- targetPackageList = "selinux.json"
- selinuxPkgName = "selinux-policy"
- )
- configFiles, err := os.ReadDir(configDirectory)
- assert.NoError(t, err)
-
- // Pick the core-efi config file, then enable SELinux
- for _, file := range configFiles {
- if !file.IsDir() && strings.Contains(file.Name(), targetPackage) {
- configPath := filepath.Join(configDirectory, file.Name())
-
- fmt.Println("Corrupting ", configPath)
-
- config, err := configuration.LoadWithAbsolutePaths(configPath, configDirectory)
- for i, list := range config.SystemConfigs[0].PackageLists {
- // Delete the packagelist from the config
- if strings.Contains(list, targetPackageList) {
- config.SystemConfigs[0].PackageLists = append(config.SystemConfigs[0].PackageLists[:i], config.SystemConfigs[0].PackageLists[i+1:]...)
- }
- }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ configPath := filepath.Join("./testdata/", "test-config.json")
+ config, err := configuration.LoadWithAbsolutePaths(configPath, "./testdata/")
assert.NoError(t, err)
- //Add required SELinux package in the inline package definition
- newPackagesField := []string{selinuxPkgName}
- config.SystemConfigs[0].Packages = newPackagesField
-
- config.SystemConfigs[0].KernelCommandLine.SELinux = "enforcing"
+ // Break the config
+ tt.configModifier(&config)
+ // Ensure the validation detects the expected failure
err = ValidateConfiguration(config)
- assert.NoError(t, err)
- return
- }
- }
- assert.Fail(t, "Could not find "+targetPackage+" to test")
-}
-
-func TestShouldFailMissingShadowUtilsPackageWithUsers(t *testing.T) {
- const (
- configDirectory = "../../imageconfigs/"
- targetPackage = "core-efi.json"
- targetPackageList = "core-packages-image.json"
- )
- configFiles, err := os.ReadDir(configDirectory)
- assert.NoError(t, err)
-
- // Pick the core-efi config file, then add a user, then remove shadow-utils from the package list (via dropping core... its a bit hacky)
- for _, file := range configFiles {
- if !file.IsDir() && strings.Contains(file.Name(), targetPackage) {
- configPath := filepath.Join(configDirectory, file.Name())
-
- fmt.Println("Corrupting ", configPath)
-
- config, err := configuration.LoadWithAbsolutePaths(configPath, configDirectory)
- for i, list := range config.SystemConfigs[0].PackageLists {
- // Delete the packagelist from the config
- if strings.Contains(list, targetPackageList) {
- config.SystemConfigs[0].PackageLists = append(config.SystemConfigs[0].PackageLists[:i], config.SystemConfigs[0].PackageLists[i+1:]...)
- }
+ if tt.expectedError1 != "" {
+ assert.Error(t, err)
+ assert.Equal(t, tt.expectedError1, err.Error())
+ } else {
+ assert.NoError(t, err)
}
- assert.NoError(t, err)
- config.SystemConfigs[0].Users = []configuration.User{
- {
- Name: "testuser",
- },
+ // Fix the config by adding the package list if provided
+ if tt.extraListPath != "" {
+ replacementPackageListAbsPath, err := filepath.Abs(tt.extraListPath)
+ assert.NoError(t, err)
+ config.SystemConfigs[0].PackageLists = append(config.SystemConfigs[0].PackageLists, replacementPackageListAbsPath)
}
+ // Validate again
err = ValidateConfiguration(config)
- assert.Error(t, err)
- assert.Equal(t, "failed to validate package lists in config: the 'shadow-utils' package must be included in the package lists when the image is configured to add users or groups", err.Error())
-
- return
- }
+ if tt.expectedError2 != "" {
+ assert.Error(t, err)
+ assert.Equal(t, tt.expectedError2, err.Error())
+ } else {
+ assert.NoError(t, err)
+ }
+ })
}
- assert.Fail(t, "Could not find "+targetPackage+" to test")
}
diff --git a/toolkit/tools/imageconfigvalidator/testdata/bogus-list.json b/toolkit/tools/imageconfigvalidator/testdata/bogus-list.json
new file mode 100644
index 00000000000..bd8b2e53e0a
--- /dev/null
+++ b/toolkit/tools/imageconfigvalidator/testdata/bogus-list.json
@@ -0,0 +1,5 @@
+{
+ "packages": [
+ "bad package = bad < version"
+ ]
+}
diff --git a/toolkit/tools/imageconfigvalidator/testdata/dummy-list.json b/toolkit/tools/imageconfigvalidator/testdata/dummy-list.json
new file mode 100644
index 00000000000..b3d776e8d3a
--- /dev/null
+++ b/toolkit/tools/imageconfigvalidator/testdata/dummy-list.json
@@ -0,0 +1,5 @@
+{
+ "packages": [
+ "words"
+ ]
+}
diff --git a/toolkit/tools/imageconfigvalidator/testdata/fips-list.json b/toolkit/tools/imageconfigvalidator/testdata/fips-list.json
new file mode 100644
index 00000000000..eea216be224
--- /dev/null
+++ b/toolkit/tools/imageconfigvalidator/testdata/fips-list.json
@@ -0,0 +1,5 @@
+{
+ "packages": [
+ "dracut-fips"
+ ]
+}
diff --git a/toolkit/tools/imageconfigvalidator/testdata/pinned-shadowutils-list.json b/toolkit/tools/imageconfigvalidator/testdata/pinned-shadowutils-list.json
new file mode 100644
index 00000000000..07519542cf9
--- /dev/null
+++ b/toolkit/tools/imageconfigvalidator/testdata/pinned-shadowutils-list.json
@@ -0,0 +1,5 @@
+{
+ "packages": [
+ "shadow-utils=some-version"
+ ]
+}
diff --git a/toolkit/tools/imageconfigvalidator/testdata/pinned-shadowutils-ws-list.json b/toolkit/tools/imageconfigvalidator/testdata/pinned-shadowutils-ws-list.json
new file mode 100644
index 00000000000..a2e589ce3b7
--- /dev/null
+++ b/toolkit/tools/imageconfigvalidator/testdata/pinned-shadowutils-ws-list.json
@@ -0,0 +1,5 @@
+{
+ "packages": [
+ "shadow-utils = some-version"
+ ]
+}
diff --git a/toolkit/tools/imageconfigvalidator/testdata/selinux-policy-list.json b/toolkit/tools/imageconfigvalidator/testdata/selinux-policy-list.json
new file mode 100644
index 00000000000..34ff7c3874b
--- /dev/null
+++ b/toolkit/tools/imageconfigvalidator/testdata/selinux-policy-list.json
@@ -0,0 +1,5 @@
+{
+ "packages": [
+ "selinux-policy"
+ ]
+}
diff --git a/toolkit/tools/imageconfigvalidator/testdata/shadowutils-list.json b/toolkit/tools/imageconfigvalidator/testdata/shadowutils-list.json
new file mode 100644
index 00000000000..9ea75c300bc
--- /dev/null
+++ b/toolkit/tools/imageconfigvalidator/testdata/shadowutils-list.json
@@ -0,0 +1,5 @@
+{
+ "packages": [
+ "shadow-utils"
+ ]
+}
diff --git a/toolkit/tools/imageconfigvalidator/testdata/test-config.json b/toolkit/tools/imageconfigvalidator/testdata/test-config.json
new file mode 100644
index 00000000000..cb814a36854
--- /dev/null
+++ b/toolkit/tools/imageconfigvalidator/testdata/test-config.json
@@ -0,0 +1,59 @@
+{
+
+ "_comment": "Based on core-efi.json",
+
+ "Disks": [
+ {
+ "PartitionTableType": "gpt",
+ "MaxSize": 4096,
+ "Artifacts": [
+ {
+ "Name": "test",
+ "Type": "vhdx"
+ }
+ ],
+ "Partitions": [
+ {
+ "ID": "boot",
+ "Flags": [
+ "esp",
+ "boot"
+ ],
+ "Start": 1,
+ "End": 9,
+ "FsType": "fat32"
+ },
+ {
+ "ID": "rootfs",
+ "Start": 9,
+ "End": 0,
+ "FsType": "ext4"
+ }
+ ]
+ }
+ ],
+ "SystemConfigs": [
+ {
+ "Name": "Standard",
+ "BootType": "efi",
+ "PartitionSettings": [
+ {
+ "ID": "boot",
+ "MountPoint": "/boot/efi",
+ "MountOptions" : "umask=0077"
+ },
+ {
+ "ID": "rootfs",
+ "MountPoint": "/"
+ }
+ ],
+ "PackageLists": [
+ "dummy-list.json"
+ ],
+ "KernelOptions": {
+ "default": "kernel"
+ },
+ "Hostname": "azurelinux"
+ }
+ ]
+}
diff --git a/toolkit/tools/imagegen/diskutils/diskutils.go b/toolkit/tools/imagegen/diskutils/diskutils.go
index 72f0c8e02af..7d834888f79 100644
--- a/toolkit/tools/imagegen/diskutils/diskutils.go
+++ b/toolkit/tools/imagegen/diskutils/diskutils.go
@@ -32,7 +32,9 @@ var (
DefaultMkfsOptions = map[string][]string{
"ext2": {"-b", "4096", "-O", "none,sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr"},
"ext3": {"-b", "4096", "-O", "none,sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr,has_journal"},
- "ext4": {"-b", "4096", "-O", "none,sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr,has_journal,extent,huge_file,flex_bg,metadata_csum,64bit,dir_nlink,extra_isize"},
+ // grub2 doesn't recognize ext4 with metadata_csum_seed enabled
+ // ^metadata_csum_seed disables filesystem to store the metadata checksum seed in the superblock, hence disables changing uuid of mounted filesystem
+ "ext4": {"-b", "4096", "-O", "none,sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr,has_journal,extent,huge_file,flex_bg,metadata_csum,64bit,dir_nlink,extra_isize,^metadata_csum_seed"},
}
partedVersionRegex = regexp.MustCompile(`^parted \(GNU parted\) (\d+)\.(\d+)`)
diff --git a/toolkit/tools/imagegen/diskutils/encryption.go b/toolkit/tools/imagegen/diskutils/encryption.go
index 36098cf5172..aaaebd755da 100644
--- a/toolkit/tools/imagegen/diskutils/encryption.go
+++ b/toolkit/tools/imagegen/diskutils/encryption.go
@@ -173,8 +173,16 @@ func encryptRootPartition(partDevPath string, partition configuration.Partition,
return
}
+ mkfsOptions, ok := DefaultMkfsOptions[partition.FsType]
+ if !ok {
+ mkfsOptions = []string{}
+ }
+ mkfsArgs := []string{"-t", partition.FsType}
+ mkfsArgs = append(mkfsArgs, mkfsOptions...)
+ mkfsArgs = append(mkfsArgs, fullMappedPath)
+
// Create the file system
- _, stderr, err = shell.Execute("mkfs", "-t", partition.FsType, fullMappedPath)
+ _, stderr, err = shell.Execute("mkfs", mkfsArgs...)
if err != nil {
err = fmt.Errorf("failed to mkfs for partition (%v):\n%v\n%w", partDevPath, stderr, err)
}