Skip to content

Commit 38520f6

Browse files
mc-nvrmccorm4nv-kmcgill53statirajunvda-mesharma
authored
Upadate default branch post 24.12 (#7918)
Co-authored-by: Ryan McCormick <[email protected]> Co-authored-by: Kyle McGill <[email protected]> Co-authored-by: Suman Tatiraju <[email protected]> Co-authored-by: Meenakshi Sharma <[email protected]> Co-authored-by: Suman Tatiraju <[email protected]>
1 parent 48be9ce commit 38520f6

33 files changed

+138
-110
lines changed

Dockerfile.sdk

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
#
3030

3131
# Base image on the minimum Triton container
32-
ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.11-py3-min
32+
ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.12-py3-min
3333

3434
ARG TRITON_CLIENT_REPO_SUBDIR=clientrepo
3535
ARG TRITON_PA_REPO_SUBDIR=perfanalyzerrepo

Dockerfile.win10.min

+18-14
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,9 @@ RUN choco install unzip -y
3737
#
3838
# Installing TensorRT
3939
#
40-
ARG TENSORRT_VERSION=10.4.0.26
40+
ARG TENSORRT_VERSION=10.7.0.23
4141
ARG TENSORRT_ZIP="TensorRT-${TENSORRT_VERSION}.Windows.win10.cuda-12.6.zip"
42-
ARG TENSORRT_SOURCE=https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.4.0/zip/TensorRT-10.4.0.26.Windows.win10.cuda-12.6.zip
42+
ARG TENSORRT_SOURCE=https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/zip/TensorRT-10.7.0.23.Windows.win10.cuda-12.6.zip
4343
# COPY ${TENSORRT_ZIP} /tmp/${TENSORRT_ZIP}
4444
ADD ${TENSORRT_SOURCE} /tmp/${TENSORRT_ZIP}
4545
RUN unzip /tmp/%TENSORRT_ZIP%
@@ -51,9 +51,9 @@ LABEL TENSORRT_VERSION="${TENSORRT_VERSION}"
5151
#
5252
# Installing cuDNN
5353
#
54-
ARG CUDNN_VERSION=9.4.0.58
54+
ARG CUDNN_VERSION=9.6.0.74
5555
ARG CUDNN_ZIP=cudnn-windows-x86_64-${CUDNN_VERSION}_cuda12-archive.zip
56-
ARG CUDNN_SOURCE=https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/windows-x86_64/cudnn-windows-x86_64-9.4.0.58_cuda12-archive.zip
56+
ARG CUDNN_SOURCE=https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/windows-x86_64/cudnn-windows-x86_64-9.6.0.74_cuda12-archive.zip
5757
ADD ${CUDNN_SOURCE} /tmp/${CUDNN_ZIP}
5858
RUN unzip /tmp/%CUDNN_ZIP%
5959
RUN move cudnn-* cudnn
@@ -75,20 +75,19 @@ RUN choco install git docker unzip -y
7575
#
7676
# Installing python
7777
#
78-
ARG PYTHON_VERSION=3.10.11
78+
ARG PYTHON_VERSION=3.12.3
7979
ARG PYTHON_SOURCE=https://www.python.org/ftp/python/${PYTHON_VERSION}/python-${PYTHON_VERSION}-amd64.exe
8080
ADD ${PYTHON_SOURCE} python-${PYTHON_VERSION}-amd64.exe
8181
RUN python-%PYTHON_VERSION%-amd64.exe /quiet InstallAllUsers=1 PrependPath=1 Include_doc=0 TargetDir="C:\python%PYTHON_VERSION%"
8282
RUN mklink "C:\python%PYTHON_VERSION%\python3.exe" "C:\python%PYTHON_VERSION%\python.exe"
8383
RUN pip install --upgrade wheel setuptools docker
84-
RUN pip install grpcio-tools psutil
8584

8685
LABEL PYTHON_VERSION=${PYTHON_VERSION}
8786

8887
#
8988
# Installing CMake
9089
#
91-
ARG CMAKE_VERSION=3.30.0
90+
ARG CMAKE_VERSION=3.30.5
9291
RUN pip install cmake==%CMAKE_VERSION%
9392

9493
ENV CMAKE_TOOLCHAIN_FILE /vcpkg/scripts/buildsystems/vcpkg.cmake
@@ -101,14 +100,16 @@ LABEL CMAKE_VERSION=${CMAKE_VERSION}
101100
#
102101
# Installing Visual Studio BuildTools: VS17 2022
103102
#
104-
ARG BUILDTOOLS_VERSION=17.10.35201.131
105103
# Download collect.exe in case of an install failure.
106104
ADD https://aka.ms/vscollect.exe "C:\tmp\collect.exe"
107105

108106
# Use the latest release channel. For more control, specify the location of an internal layout.
109107
# Download the Build Tools bootstrapper.
110108
# ARG BUILD_TOOLS_SOURCE=https://aka.ms/vs/17/release/vs_buildtools.exe
111-
ARG BUILD_TOOLS_SOURCE=https://download.visualstudio.microsoft.com/download/pr/28626b4b-f88f-4b55-a0cf-f3eaa2c643fb/e6c43d4dfb36338d954cdb3ad9010ab2a479e712088f4f6b016eadcc721bab28/vs_BuildTools.exe
109+
110+
ARG BUILDTOOLS_VERSION=17.12.35506.116
111+
ARG BUILD_TOOLS_SOURCE=https://download.visualstudio.microsoft.com/download/pr/5536698c-711c-4834-876f-2817d31a2ef2/58894fc272e86d3c3a6d85bf3a1df1e5a0685be8b9ab65d9f3cc5c2a8c6921cc/vs_BuildTools.exe
112+
112113
ADD ${BUILD_TOOLS_SOURCE} vs_buildtools.exe
113114
# Install Build Tools with the Microsoft.VisualStudio.Workload.VCTools workload, including recommended.
114115
ARG VS_INSTALL_PATH_WP="C:\BuildTools"
@@ -149,12 +150,13 @@ WORKDIR /
149150
# Installing CUDA
150151
#
151152
ARG CUDA_MAJOR=12
152-
ARG CUDA_MINOR=5
153-
ARG CUDA_PATCH=1
153+
ARG CUDA_MINOR=6
154+
ARG CUDA_PATCH=3
154155
ARG CUDA_VERSION=${CUDA_MAJOR}.${CUDA_MINOR}.${CUDA_PATCH}
155156
ARG CUDA_PACKAGES="nvcc_${CUDA_MAJOR}.${CUDA_MINOR} \
156157
cudart_${CUDA_MAJOR}.${CUDA_MINOR} \
157158
nvml_dev_${CUDA_MAJOR}.${CUDA_MINOR} \
159+
nvrtc_${CUDA_MAJOR}.${CUDA_MINOR} nvrtc_dev_${CUDA_MAJOR}.${CUDA_MINOR} \
158160
cublas_${CUDA_MAJOR}.${CUDA_MINOR} cublas_dev_${CUDA_MAJOR}.${CUDA_MINOR} \
159161
cufft_${CUDA_MAJOR}.${CUDA_MINOR} cufft_dev_${CUDA_MAJOR}.${CUDA_MINOR} \
160162
curand_${CUDA_MAJOR}.${CUDA_MINOR} curand_dev_${CUDA_MAJOR}.${CUDA_MINOR} \
@@ -175,21 +177,23 @@ RUN copy "%CUDA_INSTALL_ROOT_WP%\extras\visual_studio_integration\MSBuildExtensi
175177

176178
RUN setx PATH "%CUDA_INSTALL_ROOT_WP%\bin;%PATH%"
177179

178-
ARG CUDNN_VERSION=9.4.0.58
180+
ENV CUDA_VERSION=${CUDA_VERSION}
181+
LABEL CUDA_VERSION="${CUDA_VERSION}"
182+
183+
ARG CUDNN_VERSION=9.6.0.74
179184
ENV CUDNN_VERSION ${CUDNN_VERSION}
180185
COPY --from=dependency_base /cudnn /cudnn
181186
RUN copy cudnn\bin\cudnn*.dll "%CUDA_INSTALL_ROOT_WP%\bin\."
182187
RUN copy cudnn\lib\x64\cudnn*.lib "%CUDA_INSTALL_ROOT_WP%\lib\x64\."
183188
RUN copy cudnn\include\cudnn*.h "%CUDA_INSTALL_ROOT_WP%\include\."
184189
LABEL CUDNN_VERSION="${CUDNN_VERSION}"
185190

186-
ARG TENSORRT_VERSION=10.4.0.26
191+
ARG TENSORRT_VERSION=10.7.0.23
187192
ENV TRT_VERSION ${TENSORRT_VERSION}
188193
COPY --from=dependency_base /TensorRT /TensorRT
189194
RUN setx PATH "c:\TensorRT\lib;%PATH%"
190195
LABEL TENSORRT_VERSION="${TENSORRT_VERSION}"
191196

192-
LABEL CUDA_VERSION="${CUDA_VERSION}"
193197
# It is important that the entrypoint initialize VisualStudio
194198
# environment otherwise the build will fail. Also set
195199
# CMAKE_TOOLCHAIN_FILE and VCPKG_TARGET_TRIPLET so

README.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@
3232

3333
>[!WARNING]
3434
>You are currently on the `main` branch which tracks under-development progress
35-
>towards the next release. The current release is version [2.52.0](https://github.com/triton-inference-server/server/releases/latest)
36-
>and corresponds to the 24.11 container release on NVIDIA GPU Cloud (NGC).
35+
>towards the next release. The current release is version [2.53.0](https://github.com/triton-inference-server/server/releases/latest)
36+
>and corresponds to the 24.12 container release on NVIDIA GPU Cloud (NGC).
3737
3838
Triton Inference Server is an open source inference serving software that
3939
streamlines AI inferencing. Triton enables teams to deploy any AI model from
@@ -91,16 +91,16 @@ Inference Server with the
9191

9292
```bash
9393
# Step 1: Create the example model repository
94-
git clone -b r24.11 https://github.com/triton-inference-server/server.git
94+
git clone -b r24.12 https://github.com/triton-inference-server/server.git
9595
cd server/docs/examples
9696
./fetch_models.sh
9797

9898
# Step 2: Launch triton from the NGC Triton container
99-
docker run --gpus=1 --rm --net=host -v ${PWD}/model_repository:/models nvcr.io/nvidia/tritonserver:24.11-py3 tritonserver --model-repository=/models
99+
docker run --gpus=1 --rm --net=host -v ${PWD}/model_repository:/models nvcr.io/nvidia/tritonserver:24.12-py3 tritonserver --model-repository=/models
100100

101101
# Step 3: Sending an Inference Request
102102
# In a separate console, launch the image_client example from the NGC Triton SDK container
103-
docker run -it --rm --net=host nvcr.io/nvidia/tritonserver:24.11-py3-sdk
103+
docker run -it --rm --net=host nvcr.io/nvidia/tritonserver:24.12-py3-sdk
104104
/workspace/install/bin/image_client -m densenet_onnx -c 3 -s INCEPTION /workspace/images/mug.jpg
105105

106106
# Inference should return the following

TRITON_VERSION

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
2.53.0dev
1+
2.54.0dev

build.py

+10-4
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,10 @@
7171
#
7272

7373
DEFAULT_TRITON_VERSION_MAP = {
74-
"release_version": "2.53.0dev",
75-
"triton_container_version": "24.12dev",
76-
"upstream_container_version": "24.11",
77-
"ort_version": "1.19.2",
74+
"release_version": "2.54.0dev",
75+
"triton_container_version": "24.01dev",
76+
"upstream_container_version": "24.12",
77+
"ort_version": "1.20.1",
7878
"ort_openvino_version": "2024.4.0",
7979
"standalone_openvino_version": "2024.4.0",
8080
"dcgm_version": "3.3.6",
@@ -1238,6 +1238,8 @@ def create_dockerfile_linux(
12381238
find /opt/tritonserver/python -maxdepth 1 -type f -name \\
12391239
"tritonfrontend-*.whl" | xargs -I {} pip install --upgrade {}[all]
12401240
1241+
RUN pip3 install -r python/openai/requirements.txt
1242+
12411243
"""
12421244
if not FLAGS.no_core_build:
12431245
# Add feature labels for SageMaker endpoint
@@ -1934,6 +1936,10 @@ def core_build(
19341936
os.path.join(install_dir, "include", "triton", "core"),
19351937
)
19361938

1939+
cmake_script.cpdir(
1940+
os.path.join(repo_dir, "python", "openai"), os.path.join(install_dir, "python")
1941+
)
1942+
19371943
cmake_script.cp(os.path.join(repo_dir, "LICENSE"), install_dir)
19381944
cmake_script.cp(os.path.join(repo_dir, "TRITON_VERSION"), install_dir)
19391945

deploy/aws/values.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
replicaCount: 1
2828

2929
image:
30-
imageName: nvcr.io/nvidia/tritonserver:24.11-py3
30+
imageName: nvcr.io/nvidia/tritonserver:24.12-py3
3131
pullPolicy: IfNotPresent
3232
modelRepositoryPath: s3://triton-inference-server-repository/model_repository
3333
numGpus: 1

deploy/fleetcommand/Chart.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626

2727
apiVersion: v1
2828
# appVersion is the Triton version; update when changing release
29-
appVersion: "2.51.0"
29+
appVersion: "2.53.0"
3030
description: Triton Inference Server (Fleet Command)
3131
name: triton-inference-server
3232
# version is the Chart version; update when changing anything in the chart

deploy/fleetcommand/values.yaml

+3-3
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
replicaCount: 1
2828

2929
image:
30-
imageName: nvcr.io/nvidia/tritonserver:24.11-py3
30+
imageName: nvcr.io/nvidia/tritonserver:24.12-py3
3131
pullPolicy: IfNotPresent
3232
numGpus: 1
3333
serverCommand: tritonserver
@@ -47,13 +47,13 @@ image:
4747
#
4848
# To set model control mode, uncomment and configure below
4949
# TODO: Fix the following url, it is invalid
50-
# See https://github.com/triton-inference-server/server/blob/r24.11/docs/model_management.md
50+
# See https://github.com/triton-inference-server/server/blob/r24.12/docs/model_management.md
5151
# for more details
5252
#- --model-control-mode=explicit|poll|none
5353
#
5454
# Additional server args
5555
#
56-
# see https://github.com/triton-inference-server/server/blob/r24.11/README.md
56+
# see https://github.com/triton-inference-server/server/blob/r24.12/README.md
5757
# for more details
5858

5959
service:

deploy/gcp/values.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
replicaCount: 1
2828

2929
image:
30-
imageName: nvcr.io/nvidia/tritonserver:24.11-py3
30+
imageName: nvcr.io/nvidia/tritonserver:24.12-py3
3131
pullPolicy: IfNotPresent
3232
modelRepositoryPath: gs://triton-inference-server-repository/model_repository
3333
numGpus: 1

deploy/gke-marketplace-app/benchmark/perf-analyzer-script/triton_client.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ metadata:
3333
namespace: default
3434
spec:
3535
containers:
36-
- image: nvcr.io/nvidia/tritonserver:24.11-py3-sdk
36+
- image: nvcr.io/nvidia/tritonserver:24.12-py3-sdk
3737
imagePullPolicy: Always
3838
name: nv-triton-client
3939
securityContext:

deploy/gke-marketplace-app/server-deployer/build_and_push.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,9 @@
2727

2828
export REGISTRY=gcr.io/$(gcloud config get-value project | tr ':' '/')
2929
export APP_NAME=tritonserver
30-
export MAJOR_VERSION=2.51
31-
export MINOR_VERSION=2.51.0
32-
export NGC_VERSION=24.11-py3
30+
export MAJOR_VERSION=2.53
31+
export MINOR_VERSION=2.53.0
32+
export NGC_VERSION=24.12-py3
3333

3434
docker pull nvcr.io/nvidia/$APP_NAME:$NGC_VERSION
3535

deploy/gke-marketplace-app/server-deployer/chart/triton/Chart.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2626

2727
apiVersion: v1
28-
appVersion: "2.51"
28+
appVersion: "2.53"
2929
description: Triton Inference Server
3030
name: triton-inference-server
31-
version: 2.51.0
31+
version: 2.53.0

deploy/gke-marketplace-app/server-deployer/chart/triton/values.yaml

+3-3
Original file line numberDiff line numberDiff line change
@@ -31,14 +31,14 @@ maxReplicaCount: 3
3131
tritonProtocol: HTTP
3232
# HPA GPU utilization autoscaling target
3333
HPATargetAverageValue: 85
34-
modelRepositoryPath: gs://triton_sample_models/24.11
35-
publishedVersion: '2.51.0'
34+
modelRepositoryPath: gs://triton_sample_models/24.12
35+
publishedVersion: '2.53.0'
3636
gcpMarketplace: true
3737

3838
image:
3939
registry: gcr.io
4040
repository: nvidia-ngc-public/tritonserver
41-
tag: 24.11-py3
41+
tag: 24.12-py3
4242
pullPolicy: IfNotPresent
4343
# modify the model repository here to match your GCP storage bucket
4444
numGpus: 1

deploy/gke-marketplace-app/server-deployer/data-test/schema.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
x-google-marketplace:
2828
schemaVersion: v2
2929
applicationApiVersion: v1beta1
30-
publishedVersion: '2.51.0'
30+
publishedVersion: '2.53.0'
3131
publishedVersionMetadata:
3232
releaseNote: >-
3333
Initial release.

deploy/gke-marketplace-app/server-deployer/schema.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
x-google-marketplace:
2828
schemaVersion: v2
2929
applicationApiVersion: v1beta1
30-
publishedVersion: '2.51.0'
30+
publishedVersion: '2.53.0'
3131
publishedVersionMetadata:
3232
releaseNote: >-
3333
Initial release.
@@ -89,7 +89,7 @@ properties:
8989
modelRepositoryPath:
9090
type: string
9191
title: Bucket where models are stored. Please make sure the user/service account to create the GKE app has permission to this GCS bucket. Read Triton documentation on configs and formatting details, supporting TensorRT, TensorFlow, Pytorch, Onnx ... etc.
92-
default: gs://triton_sample_models/24.11
92+
default: gs://triton_sample_models/24.12
9393
image.ldPreloadPath:
9494
type: string
9595
title: Leave this empty by default. Triton allows users to create custom layers for backend such as TensorRT plugin or Tensorflow custom ops, the compiled shared library must be provided via LD_PRELOAD environment variable.

deploy/gke-marketplace-app/trt-engine/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
```
3434
docker run --gpus all -it --network host \
3535
--shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 \
36-
-v ~:/scripts nvcr.io/nvidia/tensorrt:24.11-py3
36+
-v ~:/scripts nvcr.io/nvidia/tensorrt:24.12-py3
3737
3838
pip install onnx six torch tf2onnx tensorflow
3939
@@ -57,7 +57,7 @@ mkdir -p engines
5757
5858
python3 builder.py -m models/fine-tuned/bert_tf_ckpt_large_qa_squad2_amp_128_v19.03.1/model.ckpt -o engines/bert_large_int8_bs1_s128.engine -b 1 -s 128 -c models/fine-tuned/bert_tf_ckpt_large_qa_squad2_amp_128_v19.03.1/ -v models/fine-tuned/bert_tf_ckpt_large_qa_squad2_amp_128_v19.03.1/vocab.txt --int8 --fp16 --strict --calib-num 1 -iln -imh
5959
60-
gsutil cp bert_large_int8_bs1_s128.engine gs://triton_sample_models/24.11/bert/1/model.plan
60+
gsutil cp bert_large_int8_bs1_s128.engine gs://triton_sample_models/24.12/bert/1/model.plan
6161
```
6262

63-
For each Triton upgrade, container version used to generate the model, and the model path in GCS `gs://triton_sample_models/24.11/` should be updated accordingly with the correct version.
63+
For each Triton upgrade, container version used to generate the model, and the model path in GCS `gs://triton_sample_models/24.12/` should be updated accordingly with the correct version.

deploy/k8s-onprem/values.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ tags:
2929
loadBalancing: true
3030

3131
image:
32-
imageName: nvcr.io/nvidia/tritonserver:24.11-py3
32+
imageName: nvcr.io/nvidia/tritonserver:24.12-py3
3333
pullPolicy: IfNotPresent
3434
modelRepositoryServer: < Replace with the IP Address of your file server >
3535
modelRepositoryPath: /srv/models

deploy/oci/values.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
replicaCount: 1
2828

2929
image:
30-
imageName: nvcr.io/nvidia/tritonserver:24.11-py3
30+
imageName: nvcr.io/nvidia/tritonserver:24.12-py3
3131
pullPolicy: IfNotPresent
3232
modelRepositoryPath: s3://https://<OCI_NAMESPACE>.compat.objectstorage.<OCI_REGION>.oraclecloud.com:443/triton-inference-server-repository
3333
numGpus: 1

docs/conf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@
177177
"switcher": {
178178
# use for local testing
179179
# "json_url": "http://localhost:8000/_static/switcher.json",
180-
"json_url": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/_static/switcher.json",
180+
"json_url": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/_static/switcher.json",
181181
"version_match": one_before if "dev" in version_long else version_short,
182182
},
183183
"navbar_start": ["navbar-logo", "version-switcher"],

docs/customization_guide/build.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ $ ./build.py ... --repo-tag=common:<container tag> --repo-tag=core:<container ta
173173

174174
If you are building on a release branch then `<container tag>` will
175175
default to the branch name. For example, if you are building on the
176-
r24.11 branch, `<container tag>` will default to r24.11. If you are
176+
r24.12 branch, `<container tag>` will default to r24.12. If you are
177177
building on any other branch (including the *main* branch) then
178178
`<container tag>` will default to "main". Therefore, you typically do
179179
not need to provide `<container tag>` at all (nor the preceding
@@ -334,8 +334,8 @@ python build.py --cmake-dir=<path/to/repo>/build --build-dir=/tmp/citritonbuild
334334
If you are building on *main* branch then `<container tag>` will
335335
default to "main". If you are building on a release branch then
336336
`<container tag>` will default to the branch name. For example, if you
337-
are building on the r24.11 branch, `<container tag>` will default to
338-
r24.11. Therefore, you typically do not need to provide `<container
337+
are building on the r24.12 branch, `<container tag>` will default to
338+
r24.12. Therefore, you typically do not need to provide `<container
339339
tag>` at all (nor the preceding colon). You can use a different
340340
`<container tag>` for a component to instead use the corresponding
341341
branch/tag in the build. For example, if you have a branch called

0 commit comments

Comments
 (0)