From b06765c1b71354b611b547acd9fa04e164278853 Mon Sep 17 00:00:00 2001 From: CBL-Mariner-Bot <75509084+CBL-Mariner-Bot@users.noreply.github.com> Date: Sun, 26 Jan 2025 07:41:51 -0800 Subject: [PATCH] [AUTO-CHERRYPICK] [3.0] Fix CVE-2023-39325 and CVE-2023-44487 for containerized-data-importer - branch 3.0-dev (#12085) Co-authored-by: Henry Li <69694695+henryli001@users.noreply.github.com> --- .../CVE-2023-39325.patch | 117 ++++++++ .../CVE-2023-44487.patch | 258 ++++++++++++++++++ .../containerized-data-importer.spec | 7 +- 3 files changed, 381 insertions(+), 1 deletion(-) create mode 100644 SPECS/containerized-data-importer/CVE-2023-39325.patch create mode 100644 SPECS/containerized-data-importer/CVE-2023-44487.patch diff --git a/SPECS/containerized-data-importer/CVE-2023-39325.patch b/SPECS/containerized-data-importer/CVE-2023-39325.patch new file mode 100644 index 00000000000..e0085e416d6 --- /dev/null +++ b/SPECS/containerized-data-importer/CVE-2023-39325.patch @@ -0,0 +1,117 @@ +diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go +index 8cb14f3..6000140 100644 +--- a/vendor/golang.org/x/net/http2/server.go ++++ b/vendor/golang.org/x/net/http2/server.go +@@ -581,9 +581,11 @@ type serverConn struct { + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push ++ curHandlers uint32 // number of running handler goroutines + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream ++ unstartedHandlers []unstartedHandler + initialStreamSendWindowSize int32 + maxFrameSize int32 + peerMaxHeaderListSize uint32 // zero means unknown (default) +@@ -981,6 +983,8 @@ func (sc *serverConn) serve() { + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() ++ case handlerDoneMsg: ++ sc.handlerDone() + default: + panic("unknown timer") + } +@@ -1028,6 +1032,7 @@ var ( + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) ++ handlerDoneMsg = new(serverMessage) + ) + + func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +@@ -2022,8 +2027,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + } + } + +- go sc.runHandler(rw, req, handler) +- return nil ++ return sc.scheduleHandler(id, rw, req, handler) + } + + func (sc *serverConn) upgradeRequest(req *http.Request) { +@@ -2043,6 +2047,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { + sc.conn.SetReadDeadline(time.Time{}) + } + ++ // This is the first request on the connection, ++ // so start the handler directly rather than going ++ // through scheduleHandler. ++ sc.curHandlers++ + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + } + +@@ -2283,8 +2291,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response + return &responseWriter{rws: rws} + } + ++type unstartedHandler struct { ++ streamID uint32 ++ rw *responseWriter ++ req *http.Request ++ handler func(http.ResponseWriter, *http.Request) ++} ++ ++// scheduleHandler starts a handler goroutine, ++// or schedules one to start as soon as an existing handler finishes. ++func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error { ++ sc.serveG.check() ++ maxHandlers := sc.advMaxStreams ++ if sc.curHandlers < maxHandlers { ++ sc.curHandlers++ ++ go sc.runHandler(rw, req, handler) ++ return nil ++ } ++ if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) { ++ return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm)) ++ } ++ sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{ ++ streamID: streamID, ++ rw: rw, ++ req: req, ++ handler: handler, ++ }) ++ return nil ++} ++ ++func (sc *serverConn) handlerDone() { ++ sc.serveG.check() ++ sc.curHandlers-- ++ i := 0 ++ maxHandlers := sc.advMaxStreams ++ for ; i < len(sc.unstartedHandlers); i++ { ++ u := sc.unstartedHandlers[i] ++ if sc.streams[u.streamID] == nil { ++ // This stream was reset before its goroutine had a chance to start. ++ continue ++ } ++ if sc.curHandlers >= maxHandlers { ++ break ++ } ++ sc.curHandlers++ ++ go sc.runHandler(u.rw, u.req, u.handler) ++ sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references ++ } ++ sc.unstartedHandlers = sc.unstartedHandlers[i:] ++ if len(sc.unstartedHandlers) == 0 { ++ sc.unstartedHandlers = nil ++ } ++} ++ + // Run on its own goroutine. + func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { ++ defer sc.sendServeMsg(handlerDoneMsg) + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() diff --git a/SPECS/containerized-data-importer/CVE-2023-44487.patch b/SPECS/containerized-data-importer/CVE-2023-44487.patch new file mode 100644 index 00000000000..ee2a818f281 --- /dev/null +++ b/SPECS/containerized-data-importer/CVE-2023-44487.patch @@ -0,0 +1,258 @@ +diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +index 3dd1564..9d9a3fd 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +@@ -165,15 +165,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} +- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is +- // permitted in the HTTP2 spec. +- maxStreams := config.MaxStreams +- if maxStreams == 0 { +- maxStreams = math.MaxUint32 +- } else { ++ if config.MaxStreams != math.MaxUint32 { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, +- Val: maxStreams, ++ Val: config.MaxStreams, + }) + } + dynamicWindow := true +@@ -252,7 +247,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), +- maxStreams: maxStreams, ++ maxStreams: config.MaxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, +diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go +index f4dde72..98839ad 100644 +--- a/vendor/google.golang.org/grpc/server.go ++++ b/vendor/google.golang.org/grpc/server.go +@@ -43,7 +43,6 @@ import ( + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" +- "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" +@@ -74,10 +73,10 @@ func init() { + srv.drainServerTransports(addr) + } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { +- extraServerOptions = append(extraServerOptions, opt...) ++ globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { +- extraServerOptions = nil ++ globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption +@@ -115,12 +114,6 @@ type serviceInfo struct { + mdata interface{} + } + +-type serverWorkerData struct { +- st transport.ServerTransport +- wg *sync.WaitGroup +- stream *transport.Stream +-} +- + // Server is a gRPC server to serve RPC requests. + type Server struct { + opts serverOptions +@@ -145,7 +138,7 @@ type Server struct { + channelzID *channelz.Identifier + czData *channelzData + +- serverWorkerChannels []chan *serverWorkerData ++ serverWorkerChannel chan func() + } + + type serverOptions struct { +@@ -177,13 +170,14 @@ type serverOptions struct { + } + + var defaultServerOptions = serverOptions{ ++ maxConcurrentStreams: math.MaxUint32, + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, + } +-var extraServerOptions []ServerOption ++var globalServerOptions []ServerOption + + // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. + type ServerOption interface { +@@ -387,6 +381,9 @@ func MaxSendMsgSize(m int) ServerOption { + // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number + // of concurrent streams to each ServerTransport. + func MaxConcurrentStreams(n uint32) ServerOption { ++ if n == 0 { ++ n = math.MaxUint32 ++ } + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +@@ -565,42 +562,35 @@ const serverWorkerResetThreshold = 1 << 16 + // re-allocations (see the runtime.morestack problem [1]). + // + // [1] https://github.com/golang/go/issues/18138 +-func (s *Server) serverWorker(ch chan *serverWorkerData) { +- // To make sure all server workers don't reset at the same time, choose a +- // random number of iterations before resetting. +- threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) +- for completed := 0; completed < threshold; completed++ { +- data, ok := <-ch ++func (s *Server) serverWorker() { ++ for completed := 0; completed < serverWorkerResetThreshold; completed++ { ++ f, ok := <-s.serverWorkerChannel + if !ok { + return + } +- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +- data.wg.Done() ++ f() + } +- go s.serverWorker(ch) ++ go s.serverWorker() + } + + // initServerWorkers creates worker goroutines and channels to process incoming + // connections to reduce the time spent overall on runtime.morestack. + func (s *Server) initServerWorkers() { +- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) ++ s.serverWorkerChannel = make(chan func()) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { +- s.serverWorkerChannels[i] = make(chan *serverWorkerData) +- go s.serverWorker(s.serverWorkerChannels[i]) ++ go s.serverWorker() + } + } + + func (s *Server) stopServerWorkers() { +- for i := uint32(0); i < s.opts.numServerWorkers; i++ { +- close(s.serverWorkerChannels[i]) +- } ++ close(s.serverWorkerChannel) + } + + // NewServer creates a gRPC server which has no service registered and has not + // started to accept requests yet. + func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions +- for _, o := range extraServerOptions { ++ for _, o := range globalServerOptions { + o.apply(&opts) + } + for _, o := range opt { +@@ -945,25 +935,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + +- var roundRobinCounter uint32 ++ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) ++ ++ streamQuota.acquire() ++ f := func() { ++ defer streamQuota.release() ++ defer wg.Done() ++ s.handleStream(st, stream, s.traceInfo(st, stream)) ++ } ++ + if s.opts.numServerWorkers > 0 { +- data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { +- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: ++ case s.serverWorkerChannel <- f: ++ return + default: + // If all stream workers are busy, fallback to the default code path. +- go func() { +- s.handleStream(st, stream, s.traceInfo(st, stream)) +- wg.Done() +- }() + } + } else { +- go func() { +- defer wg.Done() +- s.handleStream(st, stream, s.traceInfo(st, stream)) +- }() ++ go f() + } + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { +@@ -1978,3 +1969,34 @@ type channelzServer struct { + func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() + } ++ ++// atomicSemaphore implements a blocking, counting semaphore. acquire should be ++// called synchronously; release may be called asynchronously. ++type atomicSemaphore struct { ++ n atomic.Int64 ++ wait chan struct{} ++} ++ ++func (q *atomicSemaphore) acquire() { ++ if q.n.Add(-1) < 0 { ++ // We ran out of quota. Block until a release happens. ++ <-q.wait ++ } ++} ++ ++func (q *atomicSemaphore) release() { ++ // N.B. the "<= 0" check below should allow for this to work with multiple ++ // concurrent calls to acquire, but also note that with synchronous calls to ++ // acquire, as our system does, n will never be less than -1. There are ++ // fairness issues (queuing) to consider if this was to be generalized. ++ if q.n.Add(1) <= 0 { ++ // An acquire was waiting on us. Unblock it. ++ q.wait <- struct{}{} ++ } ++} ++ ++func newHandlerQuota(n uint32) *atomicSemaphore { ++ a := &atomicSemaphore{wait: make(chan struct{}, 1)} ++ a.n.Store(int64(n)) ++ return a ++} +\ No newline at end of file +diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +index d738725..3674914 100644 +--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go ++++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +@@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { + // OnError will block if it is called more often than the embedded period time. + // This will prevent overly tight hot error loops. + func (r *rudimentaryErrorBackoff) OnError(error) { ++ now := time.Now() // start the timer before acquiring the lock + r.lastErrorTimeLock.Lock() +- defer r.lastErrorTimeLock.Unlock() +- d := time.Since(r.lastErrorTime) +- if d < r.minPeriod { +- // If the time moves backwards for any reason, do nothing +- time.Sleep(r.minPeriod - d) +- } ++ d := now.Sub(r.lastErrorTime) + r.lastErrorTime = time.Now() ++ r.lastErrorTimeLock.Unlock() ++ ++ // Do not sleep with the lock held because that causes all callers of HandleError to block. ++ // We only want the current goroutine to block. ++ // A negative or zero duration causes time.Sleep to return immediately. ++ // If the time moves backwards for any reason, do nothing. ++ time.Sleep(r.minPeriod - d) + } + + // GetCaller returns the caller of the function that calls it. diff --git a/SPECS/containerized-data-importer/containerized-data-importer.spec b/SPECS/containerized-data-importer/containerized-data-importer.spec index e82fa5123d2..29b7692725a 100644 --- a/SPECS/containerized-data-importer/containerized-data-importer.spec +++ b/SPECS/containerized-data-importer/containerized-data-importer.spec @@ -18,7 +18,7 @@ Summary: Container native virtualization Name: containerized-data-importer Version: 1.57.0 -Release: 7%{?dist} +Release: 8%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Azure Linux @@ -29,6 +29,8 @@ Patch0: CVE-2024-3727.patch Patch1: CVE-2022-2879.patch Patch2: CVE-2024-24786.patch Patch3: CVE-2024-45338.patch +Patch4: CVE-2023-39325.patch +Patch5: CVE-2023-44487.patch BuildRequires: golang BuildRequires: golang-packaging BuildRequires: libnbd-devel @@ -223,6 +225,9 @@ install -m 0644 _out/manifests/release/cdi-cr.yaml %{buildroot}%{_datadir}/cdi/m %{_datadir}/cdi/manifests %changelog +* Fri Jan 24 2025 Henry Li - 1.57.0-8 +- Add patch for CVE-2023-39325 and CVE-2023-44487 + * Tue Dec 31 2024 Rohit Rawat - 1.57.0-7 - Add patch for CVE-2024-45338