diff --git a/Dockerfile b/Dockerfile/Dockerfile-ubuntu22_cuda12.3.2 similarity index 88% rename from Dockerfile rename to Dockerfile/Dockerfile-ubuntu22_cuda12.3.2 index ccc6e51..552b42c 100644 --- a/Dockerfile +++ b/Dockerfile/Dockerfile-ubuntu22_cuda12.3.2 @@ -1,6 +1,6 @@ -ARG DOCKER_FROM=nvidia/cuda:12.3.2-runtime-ubuntu22.04 -FROM ${DOCKER_FROM} +FROM nvidia/cuda:12.3.2-runtime-ubuntu22.04 +# Here, we are using CUDNN8 (devel) -- CUDNN9 is also compatible for CUDA 12.3 # Adapted from https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.2.2/ubuntu2204/devel/cudnn8/Dockerfile ENV NV_CUDNN_VERSION=8.9.7.29 ENV NV_CUDNN_PACKAGE_NAME="libcudnn8" @@ -15,6 +15,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ && apt-mark hold ${NV_CUDNN_PACKAGE_NAME} \ && rm -rf /var/lib/apt/lists/* +ARG BASE_DOCKER_FROM=nvidia/cuda:12.3.2-runtime-ubuntu22.04 + ##### Base # Install system packages @@ -49,8 +51,11 @@ RUN apt-get update -y --fix-missing \ python3-venv \ git \ sudo \ + # Adding libGL (used by a few common nodes) libgl1 \ libglib2.0-0 \ + # Adding FFMPEG (for video generation workflow) + ffmpeg \ && apt-get clean ENV BUILD_FILE="/etc/image_base.txt" diff --git a/Dockerfile/Dockerfile-ubuntu22_cuda12.4.1 b/Dockerfile/Dockerfile-ubuntu22_cuda12.4.1 new file mode 100644 index 0000000..e3443a1 --- /dev/null +++ b/Dockerfile/Dockerfile-ubuntu22_cuda12.4.1 @@ -0,0 +1,89 @@ +FROM nvidia/cuda:12.4.1-runtime-ubuntu22.04 + +# CUDNN9 "runtime" package +# Adapted from https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.4.1/ubuntu2204/runtime/cudnn/Dockerfile +ENV NV_CUDNN_VERSION=9.1.0.70-1 +ENV NV_CUDNN_PACKAGE_NAME=libcudnn9-cuda-12 +ENV NV_CUDNN_PACKAGE="libcudnn9-cuda-12=${NV_CUDNN_VERSION}" + +LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ${NV_CUDNN_PACKAGE} \ + && apt-mark hold ${NV_CUDNN_PACKAGE_NAME} \ + && rm -rf /var/lib/apt/lists/* + +ARG BASE_DOCKER_FROM=nvidia/cuda:12.4.1-runtime-ubuntu22.04 + +##### Base + +# Install system packages +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update -y --fix-missing\ + && apt-get install -y \ + apt-utils \ + locales \ + ca-certificates \ + && apt-get upgrade -y \ + && apt-get clean + +# UTF-8 +RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 +ENV LANG=en_US.utf8 +ENV LC_ALL=C + +# Install needed packages +RUN apt-get update -y --fix-missing \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + python3-dev \ + unzip \ + wget \ + zip \ + zlib1g \ + zlib1g-dev \ + gnupg \ + rsync \ + python3-pip \ + python3-venv \ + git \ + sudo \ + # Adding libGL (used by a few common nodes) + libgl1 \ + libglib2.0-0 \ + # Adding FFMPEG (for video generation workflow) + ffmpeg \ + && apt-get clean + +ENV BUILD_FILE="/etc/image_base.txt" +ARG BASE_DOCKER_FROM +RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} +RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} + +ARG BUILD_BASE="unknown" +LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} +RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it + +##### ComfyUI preparation +# The comfy user will have UID 1024 and GID 1024 +ENV COMFYUSER_DIR="/comfy" +RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers \ + && useradd -u 1024 -U -d ${COMFYUSER_DIR} -s /bin/bash -m comfy \ + && usermod -G users comfy \ + && adduser comfy sudo \ + && test -d ${COMFYUSER_DIR} +RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it + +ENV NVIDIA_VISIBLE_DEVICES=all + +EXPOSE 8188 + +USER comfy +WORKDIR ${COMFYUSER_DIR} +COPY --chown=comfy:comfy --chmod=555 init.bash comfyui-nvidia_init.bash + +ARG BUILD_DATE="unknown" +LABEL comfyui-nvidia-docker-build=${BUILD_DATE} + +CMD [ "./comfyui-nvidia_init.bash" ] diff --git a/Dockerfile/Dockerfile-ubuntu24_cuda12.5.1 b/Dockerfile/Dockerfile-ubuntu24_cuda12.5.1 new file mode 100644 index 0000000..0c0b3a3 --- /dev/null +++ b/Dockerfile/Dockerfile-ubuntu24_cuda12.5.1 @@ -0,0 +1,88 @@ +FROM nvidia/cuda:12.5.1-runtime-ubuntu24.04 + +# Extended from https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.5.1/ubuntu2404/runtime/Dockerfile +ENV NV_CUDNN_VERSION=9.3.0.75-1 +ENV NV_CUDNN_PACKAGE_NAME="libcudnn9" +ENV NV_CUDA_ADD=cuda-12 +ENV NV_CUDNN_PACKAGE="$NV_CUDNN_PACKAGE_NAME-$NV_CUDA_ADD=$NV_CUDNN_VERSION" + +LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ${NV_CUDNN_PACKAGE} \ + && apt-mark hold ${NV_CUDNN_PACKAGE_NAME}-${NV_CUDA_ADD} + +ARG BASE_DOCKER_FROM=nvidia/cuda:12.5.1-runtime-ubuntu24.04 + +##### Base + +# Install system packages +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update -y --fix-missing\ + && apt-get install -y \ + apt-utils \ + locales \ + ca-certificates \ + && apt-get upgrade -y \ + && apt-get clean + +# UTF-8 +RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 +ENV LANG=en_US.utf8 +ENV LC_ALL=C + +# Install needed packages +RUN apt-get update -y --fix-missing \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + python3-dev \ + unzip \ + wget \ + zip \ + zlib1g \ + zlib1g-dev \ + gnupg \ + rsync \ + python3-pip \ + python3-venv \ + git \ + sudo \ + # Adding libGL (used by a few common nodes) + libgl1 \ + libglib2.0-0 \ + # Adding FFMPEG (for video generation workflow) + ffmpeg \ + && apt-get clean + +ENV BUILD_FILE="/etc/image_base.txt" +ARG BASE_DOCKER_FROM +RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} +RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} + +ARG BUILD_BASE="unknown" +LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} +RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it + +##### ComfyUI preparation +# The comfy user will have UID 1024 and GID 1024 +ENV COMFYUSER_DIR="/comfy" +RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers \ + && useradd -u 1024 -U -d ${COMFYUSER_DIR} -s /bin/bash -m comfy \ + && usermod -G users comfy \ + && adduser comfy sudo \ + && test -d ${COMFYUSER_DIR} +RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it + +ENV NVIDIA_VISIBLE_DEVICES=all + +EXPOSE 8188 + +USER comfy +WORKDIR ${COMFYUSER_DIR} +COPY --chown=comfy:comfy --chmod=555 init.bash comfyui-nvidia_init.bash + +ARG BUILD_DATE="unknown" +LABEL comfyui-nvidia-docker-build=${BUILD_DATE} + +CMD [ "./comfyui-nvidia_init.bash" ] diff --git a/Makefile b/Makefile index ed834b5..bb8a88b 100644 --- a/Makefile +++ b/Makefile @@ -2,90 +2,121 @@ SHELL := /bin/bash .PHONY: all DOCKER_CMD=docker - -DOCKER_FROM=nvidia/cuda:12.3.2-runtime-ubuntu22.04 - -BUILD_DATE=$(shell printf '%(%Y%m%d_%H%M)T' -1) -BUILD_BASE=ubuntu22_cuda12.3 - -COMFYUI_CONTAINER_NAME=comfyui-nvidia-docker -BUILD_TAG=${BUILD_BASE}-latest -NAMED_BUILD=${COMFYUI_CONTAINER_NAME}:${BUILD_TAG} -NAMED_BUILD_LATEST=${COMFYUI_CONTAINER_NAME}:latest - -DOCKERFILE=Dockerfile DOCKER_PRE="NVIDIA_VISIBLE_DEVICES=all" +DOCKER_BUILD_ARGS= +##DOCKER_BUILD_ARGS="--no-cache" +#BUILD_DATE=$(shell printf '%(%Y%m%d)T' -1) +BUILD_DATE=20250116 -DOCKER_BUILD_ARGS= -#DOCKER_BUILD_ARGS="--no-cache" +COMFYUI_CONTAINER_NAME=comfyui-nvidia-docker -# Set to False to make it less verbose -VERBOSE_PRINT=True +COMPONENTS_DIR=components +DOCKERFILE_DIR=Dockerfile -##### +# Get the list of all the base- files in COMPONENTS_DIR +DOCKER_ALL=$(shell ls -1 ${COMPONENTS_DIR}/base-* | perl -pe 's%^.+/base-%%' | sort) all: - @echo "** Available Docker images to be built (make targets):" - @echo "latest: builds ${NAMED_BUILD} and tags it as ${NAMED_BUILD_LATEST}" + @if [ `echo ${DOCKER_ALL} | wc -w` -eq 0 ]; then echo "No images candidates to build"; exit 1; fi + @echo "Available ${COMFYUI_CONTAINER_NAME} ${DOCKER_CMD} images to be built (make targets):" + @echo -n " "; echo ${DOCKER_ALL} | sed -e 's/ /\n /g' @echo "" - @echo "build: builds latest" - -##### latest + @echo "build: builds all" -build: - @make latest +build: ${DOCKER_ALL} +${DOCKERFILE_DIR}: + @mkdir -p ${DOCKERFILE_DIR} -latest: - @VAR_NT=${COMFYUI_CONTAINER_NAME}-${BUILD_TAG} USED_BUILD=${NAMED_BUILD} USED_BUILD_LATEST=${NAMED_BUILD_LATEST} make build_main_actual - - -build_main_actual: - @echo "== [${USED_BUILD}] ==" +${DOCKER_ALL}: ${DOCKERFILE_DIR} + @echo ""; echo ""; echo "===== Building ${COMFYUI_CONTAINER_NAME}:$@" + @cat ${COMPONENTS_DIR}/base-$@ > ${DOCKERFILE_DIR}/Dockerfile-$@ + @cat ${COMPONENTS_DIR}/part1-common >> ${DOCKERFILE_DIR}/Dockerfile-$@ + @$(eval VAR_NT="${COMFYUI_CONTAINER_NAME}-$@") @echo "-- Docker command to be run:" - @echo "BUILDX_EXPERIMENTAL=1 ${DOCKER_PRE} docker buildx debug --on=error build --progress plain --platform linux/amd64 ${DOCKER_BUILD_ARGS} \\" > ${VAR_NT}.cmd - @echo " --build-arg DOCKER_FROM=\"${DOCKER_FROM}\" \\" >> ${VAR_NT}.cmd - @echo " --build-arg BASE_DOCKER_FROM=\"${DOCKER_FROM}\" \\" >> ${VAR_NT}.cmd + @echo "docker buildx ls | grep -q ${COMFYUI_CONTAINER_NAME} && echo \"builder already exists -- to delete it, use: docker buildx rm ${COMFYUI_CONTAINER_NAME}\" || docker buildx create --name ${COMFYUI_CONTAINER_NAME}" > ${VAR_NT}.cmd + @echo "docker buildx use ${COMFYUI_CONTAINER_NAME} || exit 1" >> ${VAR_NT}.cmd + @echo "BUILDX_EXPERIMENTAL=1 ${DOCKER_PRE} docker buildx debug --on=error build --progress plain --platform linux/amd64 ${DOCKER_BUILD_ARGS} \\" >> ${VAR_NT}.cmd @echo " --build-arg BUILD_DATE=\"${BUILD_DATE}\" \\" >> ${VAR_NT}.cmd - @echo " --build-arg BUILD_BASE=\"${BUILD_BASE}\" \\" >> ${VAR_NT}.cmd - @echo " --tag=\"${USED_BUILD}\" \\" >> ${VAR_NT}.cmd - @echo " -f ${DOCKERFILE} \\" >> ${VAR_NT}.cmd + @echo " --build-arg BUILD_BASE=\"$@\" \\" >> ${VAR_NT}.cmd + @echo " --tag=\"${COMFYUI_CONTAINER_NAME}:$@\" \\" >> ${VAR_NT}.cmd + @echo " -f ${DOCKERFILE_DIR}/Dockerfile-$@ \\" >> ${VAR_NT}.cmd + @echo " --load \\" >> ${VAR_NT}.cmd @echo " ." >> ${VAR_NT}.cmd - @cat ${VAR_NT}.cmd | tee ${VAR_NT}.log.temp + @echo "" | tee -a ${VAR_NT}.log.temp + @echo "Press Ctl+c within 5 seconds to cancel" + @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" +# Actual build @chmod +x ./${VAR_NT}.cmd @script -a -e -c ./${VAR_NT}.cmd ${VAR_NT}.log.temp; exit "$${PIPESTATUS[0]}" - @mv ${VAR_NT}.log.temp ${VAR_NT}.log @rm -f ./${VAR_NT}.cmd - @${DOCKER_CMD} tag ${USED_BUILD} ${USED_BUILD_LATEST} +###### clean + +docker_tag_list: + @${DOCKER_CMD} images --filter "label=comfyui-nvidia-docker-build" +docker_buildx_rm: + @docker buildx rm ${COMFYUI_CONTAINER_NAME} -##### clean +# Get the list of all existing Docker images +DOCKERHUB_REPO="mmartial" +DOCKER_PRESENT=$(shell for i in ${DOCKER_ALL}; do image="${COMFYUI_CONTAINER_NAME}:$$i"; if docker images --format "{{.Repository}}:{{.Tag}}" | grep -v ${DOCKERHUB_REPO} | grep -q $$image; then echo $$image; fi; done) docker_rmi: - docker rmi --force ${NAMED_BUILD} ${DOCKERHUB_REPO}/${NAMED_BUILD} ${NAMED_BUILD_LATEST} ${DOCKERHUB_REPO}/${NAMED_BUILD_LATEST} + @echo -n "== Images to delete: " + @echo ${DOCKER_PRESENT} | wc -w + @if [ `echo ${DOCKER_PRESENT} | wc -w` -eq 0 ]; then echo "No images to delete"; exit 1; fi + @echo ${DOCKER_PRESENT} | sed -e 's/ /\n/g' + @echo "" + @echo "Press Ctl+c within 5 seconds to cancel" + @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" + @for i in ${DOCKER_PRESENT}; do docker rmi $$i; done + @echo ""; echo " ** Remaining image with the build label:" + @make docker_tag_list -############################################## For maintainer only -##### push -DOCKERHUB_REPO="mmartial" +############################################### For maintainer only +###### push -- will only proceed with existing ("present") images + +LATEST_ENTRY=$(shell echo ${DOCKER_ALL} | sed -e 's/ /\n/g' | tail -1) +LATEST_CANDIDATE=$(shell echo ${COMFYUI_CONTAINER_NAME}:${LATEST_ENTRY}) docker_tag: - @make latest - @${DOCKER_CMD} tag ${NAMED_BUILD} ${DOCKERHUB_REPO}/${NAMED_BUILD} - @${DOCKER_CMD} tag ${NAMED_BUILD_LATEST} ${DOCKERHUB_REPO}/${NAMED_BUILD_LATEST} - @make docker_tag_list + @if [ `echo ${DOCKER_PRESENT} | wc -w` -eq 0 ]; then echo "No images to tag"; exit 1; fi + @echo "== About to tag:" + @for i in ${DOCKER_PRESENT}; do image_out1="${DOCKERHUB_REPO}/$$i-${BUILD_DATE}"; image_out2="${DOCKERHUB_REPO}/$$i-latest"; echo " ++ $$i -> $$image_out1"; echo " ++ $$i -> $$image_out2"; done + @if echo ${DOCKER_PRESENT} | grep -q ${LATEST_CANDIDATE}; then image_out="${DOCKERHUB_REPO}/${COMFYUI_CONTAINER_NAME}:latest"; echo " ++ ${LATEST_CANDIDATE} -> $$image_out"; else echo " -- Unable to find latest candidate: ${LATEST_CANDIDATE}"; fi + @echo "" + @echo "tagging for hub.docker.com upload -- Press Ctl+c within 5 seconds to cancel" + @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" + @for i in ${DOCKER_PRESENT}; do image_out1="${DOCKERHUB_REPO}/$$i-${BUILD_DATE}"; image_out2="${DOCKERHUB_REPO}/$$i-latest"; docker tag $$i $$image_out1; docker tag $$i $$image_out2; done + @if echo ${DOCKER_PRESENT} | grep -q ${LATEST_CANDIDATE}; then image_out="${DOCKERHUB_REPO}/${COMFYUI_CONTAINER_NAME}:latest"; docker tag ${LATEST_CANDIDATE} $$image_out; fi + +DOCKERHUB_READY=$(shell for i in ${DOCKER_ALL}; do image="${DOCKERHUB_REPO}/${COMFYUI_CONTAINER_NAME}:$$i"; image1=$$image-${BUILD_DATE}; image2=$$image-latest; if docker images --format "{{.Repository}}:{{.Tag}}" | grep -q $$image1; then echo $$image1; fi; if docker images --format "{{.Repository}}:{{.Tag}}" | grep -q $$image2; then echo $$image2; fi; done) +DOCKERHUB_READY_LATEST=$(shell image="${DOCKERHUB_REPO}/${COMFYUI_CONTAINER_NAME}:latest"; if docker images --format "{{.Repository}}:{{.Tag}}" | grep -q $$image; then echo $$image; else echo ""; fi) -docker_tag_list: - @echo "Docker images tagged:" - @${DOCKER_CMD} images --filter "label=comfyui-nvidia-docker-build" docker_push: - @make docker_tag - @echo "hub.docker.com upload -- Press Ctl+c within 5 seconds to cancel -- will only work for maintainers" + @if [ `echo ${DOCKERHUB_READY} | wc -w` -eq 0 ]; then echo "No images to push"; exit 1; fi + @echo "== About to push:" + @for i in ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST}; do echo " ++ $$i"; done + @echo "pushing to hub.docker.com -- Press Ctl+c within 5 seconds to cancel" + @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" + @for i in ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST}; do docker push $$i; done + + +docker_rmi_hub: + @echo ""; echo " ** Potential images with the build label:" + @make docker_tag_list + @if [ `echo ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST} | wc -w` -eq 0 ]; then echo "No expected images to delete"; exit 1; fi + @echo "== About to delete:" + @for i in ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST}; do echo " -- $$i"; done + @echo "deleting -- Press Ctl+c within 5 seconds to cancel" @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" - @${DOCKER_CMD} push ${DOCKERHUB_REPO}/${NAMED_BUILD} - @${DOCKER_CMD} push ${DOCKERHUB_REPO}/${NAMED_BUILD_LATEST} + @for i in ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST}; do docker rmi $$i; done + @echo ""; echo " ** Remaining images with the build label:" + @make docker_tag_list diff --git a/README.md b/README.md index c7eb04a..418e16d 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,31 @@ [ComfyUI](https://github.com/comfyanonymous/ComfyUI/tree/master) is a Stable Diffusion WebUI. With the recent addition of a [Flux example](https://comfyanonymous.github.io/ComfyUI_examples/flux/), I created this container builder to test it. +This container was built to benefit from the process isolation that container build but also to drop the container's ComfyUI privileges to that of a normal user (the container's `comfy` user, which is `sudo` capable). -The container size (over 5GB) contains the required components on an Ubuntu 22.04 image with Nvidia CUDA and CuDNN (the base container is available from Nvidia's DockerHub); we add the requirements components to support an installation of ComfyUI. +The container size (usually over 4GB) contains the required components on an Ubuntu image with Nvidia CUDA and CuDNN (the base container is available from Nvidia's DockerHub); we add the requirements components to support an installation of ComfyUI. + +Multiple images are available. The name of the image contains a tag the reflects its core components. For example: `ubuntu24_cuda12.5.1` is based on an Ubuntu 24.04 with CUDA 12.5.1. +Depending on the version of the Nvidia drivers installed, the Docker container runtime will only support up to a certain version of CUDA. For example, Driver 550 supports up to CUDA 12.4 and therefore will not be able to run the CUDA 12.4.1 or 12.5.1 versions. +Use the `nvidia-smi` command on your system to obtain the `CUDA Version:` entry in the produced table's header. +For more details on drivers capabilities and how to update those, please see [Setting up NVIDIA docker & podman (Ubuntu 24.04)](https://blg.gkr.one/20240404-u24_nvidia_docker_podman/). + +The `latest` tag will always point to the most up-to-date build (i.e., the most recent OS+CUDA). +If this version is incompatible with your container runtime, please see the list of alternative builds. + +| tag | aka | +| --- | --- | +| ubuntu22_cuda12.3.2-latest | | +| ubuntu22_cuda12.4.1-latest | | +| ubuntu24_cuda12.5.1-latest | latest | + +During its first run, the container will download ComfyUI from `git` (into the `run/ComfyUI` folder), create a Python virtual environment (in `run/venv`) for all the Python packages needed by the tool, and install [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) into ComfyUI's `custom_nodes` directory. +This adds about 5GB of content to the installation. Download time depends on your internet connection. + +Given that `venv` (Python virtual environments) might not be compatible from OS+CUDA-version to version and will create a new `venv` when the current one is not for the expected version. +**An installation might end up with multiple `venv`-based directory in the `run` folder, as the tool will rename existing unusable ones as "venv-OS+CUDA" (for example `venv-ubuntu22_cuda12.3.2`). In order to support downgrading if needed, the script will not delete previous `version`, and this is currently left to the end-user to remove if not needed** +Using alernate `venv` means that some installed custom nodes might have an `import dailed` error. We are attempting to make use of [`cm-cli`](https://github.com/ltdrdata/ComfyUI-Manager/blob/main/docs/en/cm-cli.md) before starting ComfyUI. If that fails, start the `Manager -> Custom Nodes Manager`, `Filter` by `Import Failed` and use the `Try fix` button as this will download required pacakges and install those in the used `venv`. A `Restart` and UI reload will be required but this ought to fix issues with the nodes. -During its first run, it will download ComfyUI from git (into the `run/ComfyUI` folder), create a Python virtual environment (in `run/venv`) for all the Python packages needed by the tool, and install [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) into ComfyUI's `custom_nodes` directory. -This adds an expected 5GB of content to the installation. Depending on your internet connection, it takes as much time as necessary to complete. You will know the ComfyUI WebUI is running when you check the `docker logs` and see `To see the GUI go to: http://0.0.0.0:8188` **About 10GB of space between the container and the virtual environment installation is needed.** @@ -25,6 +45,8 @@ It is recommended that a container monitoring tool be available to watch the log - [2.3. First time use](#23-first-time-use) - [3. Docker image](#3-docker-image) - [3.1. Building the image](#31-building-the-image) + - [3.1.1. Using the Makefile](#311-using-the-makefile) + - [3.1.2. Using a Dockerfile](#312-using-a-dockerfile) - [3.2. Availability on DockerHub](#32-availability-on-dockerhub) - [3.3. Unraid availability](#33-unraid-availability) - [3.4. Nvidia base container](#34-nvidia-base-container) @@ -33,19 +55,22 @@ It is recommended that a container monitoring tool be available to watch the log - [4.2. FLUX.1\[dev\] example](#42-flux1dev-example) - [5. FAQ](#5-faq) - [5.1. Virtualenv](#51-virtualenv) + - [5.1.1. Multiple virtualenv](#511-multiple-virtualenv) + - [5.1.2. Fixing Failed Custom Nodes](#512-fixing-failed-custom-nodes) - [5.2. user\_script.bash](#52-user_scriptbash) - [5.3. Available environment variables](#53-available-environment-variables) - [5.3.1. WANTED\_UID and WANTED\_GID](#531-wanted_uid-and-wanted_gid) - [5.3.2. COMFY\_CMDLINE\_BASE and COMFY\_CMDLINE\_XTRA](#532-comfy_cmdline_base-and-comfy_cmdline_xtra) - [5.3.3. SECURITY\_LEVEL](#533-security_level) - [5.4. ComfyUI Manager \& Security levels](#54-comfyui-manager--security-levels) - - [5.5. Additional FAQ](#55-additional-faq) + - [5.5. Shell within the Docker image](#55-shell-within-the-docker-image) + - [5.6. Additional FAQ](#56-additional-faq) - [6. Troubleshooting](#6-troubleshooting) - [7. Changelog](#7-changelog) # 1. Preamble -This build is made to NOT run as the `root` user, but run within the container as a `comfy` user using the UID/GID requested at `docker run` time (if none are provided, the container will use 1024/1024). +This build is made to NOT run as the `root` user, but run within the container as a `comfy` user using the UID/GID requested at `docker run` time (if none are provided, the container will use `1024`/`1024`). This is done to allow end users to have local directory structures for all the side data (input, output, temp, user), Hugging Face `HF_HOME` if used, and the entire `models`, which are separate from the container and able to be altered by the user. To request a different UID/GID at run time, use the `WANTED_UID` and `WANTED_GID` environment variables when calling the container. @@ -70,12 +95,18 @@ Among the folders that will be created within `run` are `HF, ComfyUI, venv` - `custom_nodes` for additional support nodes, for example ComfyUI-Manager, - `models` and all its sub-directories is where `checkpoints`, `clip`, `loras`, `unet`, etc have to be placed. - `input` and `output` are where input images will be placed and generated images will end up. + - `user` is where the user's customizations, saved `workflows` (and ComfyUI Manager's configuration) are stored. - `venv` is the virtual environment where all the required Python packages for ComfyUI and other additions will be placed. A default ComfyUI package installation requires about 5GB of additional installation in addition to the container itself; those packages will be in this `venv` folder. -When starting t the container image executes the `init.bash` script that performs a few operations: +**Currently, it is not recommended to volume map folders within the `ComfyUI` folder**. Doing so is likely to prevent proper installation (during the first run) or update, as any volume mapping (`docker ... -v` or `- local_path:container_path` for compose) creates those directories within a directory structure that is not suppoeed to exist at first run. + +When starting, the container image executes the `init.bash` script that performs a few operations: - Ensure we can use the `WANTED_UID` and `WANTED_GID` as the `comfy` user (the user set to run the container), - Obtain the latest version of ComfyUI from GitHub if not already present in the mounted `run` folder. -- Create the virtual environment (`venv`) if it does not already exist +- Create the virtual environment (`venv`) if one does not already exist + - if one exists confirm it is the one for this OS+CUDA pair + - if not, rename it and look for a renamed one that would match + - if none is found, create a new one - Activate this virtual environment - Install all the ComfyUI-required Python packages. If those are already present, additional content should not need to be downloaded. - Installing ComfyUI-Manager if it is not present. @@ -160,20 +191,48 @@ Other needed files could be found on [HuggingFace](https://huggingface.co/) or [ ## 3.1. Building the image -Note that a `docker buildx prune -f` might be needed to force a clean build after removing already existing containers. +### 3.1.1. Using the Makefile + -The `comfyui-nvidia-docker` (`latest`) image contains the installation of the core components of ComfyUI from its latest release from GitHub. +Running `make` will show us the different build targets. That list will differ depending on the available `base` files in the `components` directory -Running `make` will show us the different build options; `latest` is the one we want. +For example, you might see: Run: ```bash -make latest +% make +Available comfyui-nvidia-docker docker images to be built (make targets): + ubuntu22_cuda12.3.2 + ubuntu22_cuda12.4.1 + ubuntu24_cuda12.5.1 + +build: builds all +``` + +It is possible to build a specifif target, for example `make ubuntu22_cuda12.3.2` or build all the available containers. + +Running a given target will create a `comfyui-nvidia-docker` `docker buildx`. +As long as none are already present, this will initiate a build with no caching. + +The process will create the `Dockerfile` used within the `Dockerfile` folder. For example. when using `make ubuntu22_cuda12.3.2` a `Dockerfile/Dockerfile-ubuntu22_cuda12.3.2` file is created that will contain the steps used to build the local `comfyui-nvidia-docker:ubuntu22_cuda12.3.2` Docker image. + +### 3.1.2. Using a Dockerfile + +It is also possible to use one of the generated `Dockerfile` to build a specific image. +After selecting the image to build from the `OS+CUDA` name within the `Dockerfile` folder, proceed with a `docker build` command in the directory where this `README.md` is located. +For example to build the `ubuntu24_cuda12.5.1` container, run: + +```bash +docker build --tag comfyui-nvidia-docker:ubuntu24_cuda12.5.1 -f Dockerfile/Dockerfile-ubuntu24_cuda12.5.1 . ``` +Upon a succesful build completion, we will have a newly created local `comfyui-nvidia-docker:ubuntu24_cuda12.5.1` Docker image. + ## 3.2. Availability on DockerHub -Builds are available on DockerHub at [mmartial/comfyui-nvidia-docker](https://hub.docker.com/r/mmartial/comfyui-nvidia-docker), built from this repository's `Dockerfile`. +Builds are available on DockerHub at [mmartial/comfyui-nvidia-docker](https://hub.docker.com/r/mmartial/comfyui-nvidia-docker), built from this repository's `Dockerfile`(s). + +The table at the top of this document shows the list of available versions on DockerHub. Make sure your NVIDIA container runtime supports the proposed CUDA version. This is particularily important if you use the `latest` tag, as it is expected to refer to the most recent OS+CUDA release. ## 3.3. Unraid availability @@ -215,6 +274,28 @@ This allows for the installation of Python packages using `pip3 install`. After running `docker exec -t comfy-nvidia /bin/bash` from the provided `bash`, activate the `venv` with `source /comfy/mnt/venv/bin/activate`. From this `bash` prompt, you can now run `pip3 freeze` or other `pip3` commands such as `pip3 install civitai` +### 5.1.1. Multiple virtualenv + +Because a `venv` is tied to a OS+CUDA version, the tool attempts to create some internal logic so that the `venv` folder matches the OS+CUDA of the started container. +**Starting two `comfyui-nvidia-docker` containers with different OS+CUDA tags at the same time is likely to cause some issues** + +For illustration, let's say we last ran `ubuntu22_cuda12.3.1`, exited the container and now attempt to run `ubuntu24_cuda12.5.1`. The script initialization is as follows: +- check for an existing `venv`; there is one +- check that this `venv` is for `ubuntu24_cuda12.5.1`: it is not, it is for `ubuntu22_cuda12.3.1` +- move `venv` to `venv-ubuntu22_cuda12.3.1` +- check if there is a `venv-ubuntu24_cuda12.5.1` to renamed as `venv` if present: there is not +- the script continues as if there was no `venv` and a new one for `ubuntu24_cuda12.5.1` is created + +Because of this, it is possible to have multiple `venv`-based folders in the "run" folder. + +### 5.1.2. Fixing Failed Custom Nodes + +A side effect of the multiple virtual environment integration is that some installed custom nodes might have an `import failed` error when switching from one OS+CUDA-version to another. +When the container is initialized we run `cm-cli.py fix all` to attempt to fix for this. +If this does not resolve the issue, start the `Manager -> Custom Nodes Manager`, `Filter` by `Import Failed` and use the `Try fix` button as this will download required pacakges and install those in the used `venv`. A `Restart` and UI reload will be required but this ought to fix issues with the nodes. + +![Import Failed: Try Fix](./assets/ImportFailed-TryFix.png) + ## 5.2. user_script.bash The `run/user_script.bash` user script can perform additional operations. @@ -292,9 +373,9 @@ It is also possible to use the environment variables in combination with the `us ```bash #!/bin/bash -#echo "== Adding system package" +#echo "== Update installed packages" DEBIAN_FRONTEND=noninteractive sudo apt-get update -DEBIAN_FRONTEND=noninteractive sudo apt-get install -y libgl1 libglib2.0-0 +DEBIAN_FRONTEND=noninteractive sudo apt-get upgrade -y # Exit with an "okay" status to allow the init script to run the regular ComfyUI command exit 0 @@ -325,7 +406,11 @@ Note that if this is the first time starting the container, the file will not ye To use `cm-cli`, from the virtualenv, use: `python3 /comfy/mnt/custom_nodes/ComfyUI-Manager/cm-cli.py`. For example: `python3 /comfy/mnt/custom_nodes/ComfyUI-Manager/cm-cli.py show installed` (`COMFYUI_PATH=/ComfyUI` should be set) -## 5.5. Additional FAQ +## 5.5. Shell within the Docker image + +Depending on your `WANTED_UID` and `WANTED_GID`, when starting a `docker exec` (or getting a ba`bash` terminal from `docker compose`) it is possible that ythe shell is stared with incorrect permissions (we will see a `bash: /comfy/.bashrc: Permission denied` error). The `comfy` user is `sudo`-able: run `sudo su comfytoo` to get the proper UID/GID. + +## 5.6. Additional FAQ See [extras/FAQ.md] for additional FAQ topics, among which: - Updating ComfyUI @@ -335,12 +420,15 @@ See [extras/FAQ.md] for additional FAQ topics, among which: # 6. Troubleshooting The `venv` in the "run" directory contains all the required Python packages used by the tool. -In case of an issue, it is recommended that you terminate the container, delete the `venv` directory, and restart the container. -The virtual environment will be recreated; any `custom_scripts` should re-install their requirements. +In case of an issue, it is recommended that you terminate the container, delete (or rename) the `venv` directory, and restart the container. +The virtual environment will be recreated; any `custom_scripts` should re-install their requirements; please see the "Fixing Failed Custom Nodes" section for additional details. + +It is also possible to rename the entire "run" directory for get a clean installation of ComfyUI and its virtual environment. This method is preferred --compared to deleting the "run" directory-- as it will allow us to copy the content of the various downloaded `ComfyUI/models`, `ComfyUI/custom_nodes`, generated `ComfyUI/outputs`, `ComfyUI/user`, added `ComfyUI/inputs` and other folder present within the old "run" directory. # 7. Changelog -- 20250109: Integrated `SECURITY_LEVELS` within the docker arguments + added libGL into the base container. +- 20250116: Happy 2nd Birthday ComfyUI -- added multiple builds for different base Ubuntu OS and CUDA combinations + added `ffmpeg` into the base container. +- 20250109: Integrated `SECURITY_LEVELS` within the docker arguments + added `libGL` into the base container. - 20240915: Added `COMFY_CMDLINE_BASE` and `COMFY_CMDLINE_XTRA` variable - 20240824: Tag 0.2: shift to pull at first run-time, user upgradable with lighter base container - 20240824: Tag 0.1: builds were based on ComfyUI release, not user upgradable diff --git a/assets/ImportFailed-TryFix.png b/assets/ImportFailed-TryFix.png new file mode 100644 index 0000000..196e86e Binary files /dev/null and b/assets/ImportFailed-TryFix.png differ diff --git a/components/base-ubuntu22_cuda12.3.2 b/components/base-ubuntu22_cuda12.3.2 new file mode 100644 index 0000000..685112d --- /dev/null +++ b/components/base-ubuntu22_cuda12.3.2 @@ -0,0 +1,19 @@ +FROM nvidia/cuda:12.3.2-runtime-ubuntu22.04 + +# Here, we are using CUDNN8 (devel) -- CUDNN9 is also compatible for CUDA 12.3 +# Adapted from https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.2.2/ubuntu2204/devel/cudnn8/Dockerfile +ENV NV_CUDNN_VERSION=8.9.7.29 +ENV NV_CUDNN_PACKAGE_NAME="libcudnn8" +ENV NV_CUDA_ADD=cuda12.2 +ENV NV_CUDNN_PACKAGE="$NV_CUDNN_PACKAGE_NAME=$NV_CUDNN_VERSION-1+$NV_CUDA_ADD" +ENV NV_CUDNN_PACKAGE_DEV="$NV_CUDNN_PACKAGE_NAME-dev=$NV_CUDNN_VERSION-1+$NV_CUDA_ADD" +LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ${NV_CUDNN_PACKAGE} \ + ${NV_CUDNN_PACKAGE_DEV} \ + && apt-mark hold ${NV_CUDNN_PACKAGE_NAME} \ + && rm -rf /var/lib/apt/lists/* + +ARG BASE_DOCKER_FROM=nvidia/cuda:12.3.2-runtime-ubuntu22.04 + diff --git a/components/base-ubuntu22_cuda12.4.1 b/components/base-ubuntu22_cuda12.4.1 new file mode 100644 index 0000000..bbcd4ef --- /dev/null +++ b/components/base-ubuntu22_cuda12.4.1 @@ -0,0 +1,17 @@ +FROM nvidia/cuda:12.4.1-runtime-ubuntu22.04 + +# CUDNN9 "runtime" package +# Adapted from https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.4.1/ubuntu2204/runtime/cudnn/Dockerfile +ENV NV_CUDNN_VERSION=9.1.0.70-1 +ENV NV_CUDNN_PACKAGE_NAME=libcudnn9-cuda-12 +ENV NV_CUDNN_PACKAGE="libcudnn9-cuda-12=${NV_CUDNN_VERSION}" + +LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ${NV_CUDNN_PACKAGE} \ + && apt-mark hold ${NV_CUDNN_PACKAGE_NAME} \ + && rm -rf /var/lib/apt/lists/* + +ARG BASE_DOCKER_FROM=nvidia/cuda:12.4.1-runtime-ubuntu22.04 + diff --git a/components/base-ubuntu24_cuda12.5.1 b/components/base-ubuntu24_cuda12.5.1 new file mode 100644 index 0000000..d4686fa --- /dev/null +++ b/components/base-ubuntu24_cuda12.5.1 @@ -0,0 +1,16 @@ +FROM nvidia/cuda:12.5.1-runtime-ubuntu24.04 + +# Extended from https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.5.1/ubuntu2404/runtime/Dockerfile +ENV NV_CUDNN_VERSION=9.3.0.75-1 +ENV NV_CUDNN_PACKAGE_NAME="libcudnn9" +ENV NV_CUDA_ADD=cuda-12 +ENV NV_CUDNN_PACKAGE="$NV_CUDNN_PACKAGE_NAME-$NV_CUDA_ADD=$NV_CUDNN_VERSION" + +LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ${NV_CUDNN_PACKAGE} \ + && apt-mark hold ${NV_CUDNN_PACKAGE_NAME}-${NV_CUDA_ADD} + +ARG BASE_DOCKER_FROM=nvidia/cuda:12.5.1-runtime-ubuntu24.04 + diff --git a/components/part1-common b/components/part1-common new file mode 100644 index 0000000..9e9022e --- /dev/null +++ b/components/part1-common @@ -0,0 +1,72 @@ +##### Base + +# Install system packages +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update -y --fix-missing\ + && apt-get install -y \ + apt-utils \ + locales \ + ca-certificates \ + && apt-get upgrade -y \ + && apt-get clean + +# UTF-8 +RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 +ENV LANG=en_US.utf8 +ENV LC_ALL=C + +# Install needed packages +RUN apt-get update -y --fix-missing \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + python3-dev \ + unzip \ + wget \ + zip \ + zlib1g \ + zlib1g-dev \ + gnupg \ + rsync \ + python3-pip \ + python3-venv \ + git \ + sudo \ + # Adding libGL (used by a few common nodes) + libgl1 \ + libglib2.0-0 \ + # Adding FFMPEG (for video generation workflow) + ffmpeg \ + && apt-get clean + +ENV BUILD_FILE="/etc/image_base.txt" +ARG BASE_DOCKER_FROM +RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} +RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} + +ARG BUILD_BASE="unknown" +LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} +RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it + +##### ComfyUI preparation +# The comfy user will have UID 1024 and GID 1024 +ENV COMFYUSER_DIR="/comfy" +RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers \ + && useradd -u 1024 -U -d ${COMFYUSER_DIR} -s /bin/bash -m comfy \ + && usermod -G users comfy \ + && adduser comfy sudo \ + && test -d ${COMFYUSER_DIR} +RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it + +ENV NVIDIA_VISIBLE_DEVICES=all + +EXPOSE 8188 + +USER comfy +WORKDIR ${COMFYUSER_DIR} +COPY --chown=comfy:comfy --chmod=555 init.bash comfyui-nvidia_init.bash + +ARG BUILD_DATE="unknown" +LABEL comfyui-nvidia-docker-build=${BUILD_DATE} + +CMD [ "./comfyui-nvidia_init.bash" ] diff --git a/init.bash b/init.bash index cb5eba5..4143428 100644 --- a/init.bash +++ b/init.bash @@ -85,6 +85,8 @@ if test -z ${COMFYUSER_DIR}; then error_exit "Empty COMFYUSER_DIR variable"; fi it=/etc/build_base.txt if [ ! -f $it ]; then error_exit "$it missing, exiting"; fi BUILD_BASE=`cat $it` +BUILD_BASE_FILE=$it +BUILD_BASE_SPECIAL="ubuntu22_cuda12.3.2" # this is a special value: when this feature was introduced, will be used to mark exisitng venv if the marker is not present echo "-- BUILD_BASE: \"${BUILD_BASE}\"" if test -z ${BUILD_BASE}; then error_exit "Empty BUILD_BASE variable"; fi @@ -122,6 +124,10 @@ if [ ! -z "$WANTED_UID" -a "$WANTED_UID" != "$new_uid" ]; then echo "Wrong UID ( # We are now running as comfy echo "== Running as comfy" +# Confirm we can write to the user directory +echo "== Testing write access as the comfy user to the run directory" +it=${COMFYUSER_DIR}/mnt/.testfile; touch $it && rm -f ${COMFYUSER_DIR}/mnt/.testfile || error_exit "Failed to write to ${COMFYUSER_DIR}/mnt" + # Obtain the latest version of ComfyUI if not already present cd ${COMFYUSER_DIR}/mnt if [ ! -d "ComfyUI" ]; then @@ -129,16 +135,57 @@ if [ ! -d "ComfyUI" ]; then git clone https://github.com/comfyanonymous/ComfyUI.git ComfyUI || error_exit "ComfyUI clone failed" fi +# Confirm the ComfyUI directory is present and we can write to it +if [ ! -d "ComfyUI" ]; then error_exit "ComfyUI not found"; fi +it=ComfyUI/.testfile && rm -f $it || error_exit "Failed to write to ComfyUI directory as the comfy user" + if [ ! -d HF ]; then echo "== Creating HF directory" mkdir -p HF fi export HF_HOME=${COMFYUSER_DIR}/mnt/HF +# Confirm the HF directory is present and we can write to it +if [ ! -d "HF" ]; then error_exit "HF not found"; fi +it=HF/.testfile && rm -f $it || error_exit "Failed to write to HF directory as the comfy user" + +# Attempting to support multiple build bases +# the venv directory is specific to the build base +# we are placing a marker file in the venv directory to match it to a build base +# if the marker is not for container's build base, we rename the venv directory to avoid conflicts + +# if a venv is present, confirm we can write to it +if [ -d "venv" ]; then + it=venv/.testfile && rm -f $it || error_exit "Failed to write to venv directory as the comfy user" + # use the special value to mark existing venv if the marker is not present + it=venv/.build_base.txt; if [ ! -f $it ]; then echo $BUILD_BASE_SPECIAL > $it; fi +fi + +SWITCHED_VENV=True # this is a marker to indicate that we have switched to a different venv, which is set unless we re-use the same venv as before (see below) +# Check for an existing venv; if present, is it the proper one -- ie does its .build_base.txt match the container's BUILD_BASE_FILE? +if [ -d venv ]; then + it=venv/.build_base.txt + venv_bb=`cat $it` + + if cmp --silent $it $BUILD_BASE_FILE; then + echo "== venv is for this BUILD_BASE (${BUILD_BASE})" + SWITCHED_VENV=False + else + echo "== venv ($venv_bb) is not for this BUILD_BASE (${BUILD_BASE}), renaming it and seeing if a valid one is present" + mv venv venv-${venv_bb} || error_exit "Failed to rename venv to venv-${venv_bb}" + + if [ -d venv-${BUILD_BASE} ]; then + echo "== Existing venv (${BUILD_BASE}) found, attempting to use it" + mv venv-${BUILD_BASE} venv || error_exit "Failed to rename ven-${BUILD_BASE} to venv" + fi + fi +fi + # virtualenv for installation if [ ! -d "venv" ]; then echo "== Creating virtualenv" python3 -m venv venv || error_exit "Virtualenv creation failed" + echo $BUILD_BASE > venv/.build_base.txt fi # Activate the virtualenv and upgrade pip @@ -178,14 +225,15 @@ if [ ! -d ComfyUI-Manager ]; then git clone https://github.com/ltdrdata/ComfyUI-Manager.git || error_exit "ComfyUI-Manager clone failed" fi if [ ! -d ComfyUI-Manager ]; then error_exit "ComfyUI-Manager not found"; fi +pip3 install --trusted-host pypi.org --trusted-host files.pythonhosted.org -r ${COMFYUI_PATH}/custom_nodes/ComfyUI-Manager/requirements.txt || echo "ComfyUI-Manager CLI requirements install/upgrade failed" # Lower security_level for ComfyUI-Manager to allow access from outside the container # This is needed to allow the WebUI to be served on 0.0.0.0 ie all interfaces and not just localhost (which would be limited to within the container) # Please see https://github.com/ltdrdata/ComfyUI-Manager?tab=readme-ov-file#security-policy for more details # # recent releases of ComfyUI-Manager have a config.ini file in the user folder, if this is not present, we expect it in the default folder -cm_conf_user=/comfy/mnt/ComfyUI/user/default/ComfyUI-Manager/config.ini -cm_conf=/comfy/mnt/ComfyUI/custom_nodes/ComfyUI-Manager/config.ini +cm_conf_user=${COMFYUI_PATH}/user/default/ComfyUI-Manager/config.ini +cm_conf=${COMFYUI_PATH}/custom_nodes/ComfyUI-Manager/config.ini if [ -f $cm_conf_user ]; then cm_conf=$cm_conf_user; fi if [ ! -f $cm_conf ]; then echo "== ComfyUI-Manager $cm_conf file missing, script potentially never run before. You will need to run ComfyUI-Manager a first time for the configuration file to be generated, we can not attempt to update its security level yet -- if this keeps occurring, please let the developer know so he can investigate. Thank you" @@ -196,6 +244,21 @@ else grep security_level $cm_conf fi +# Attempt to use ComfyUI Manager CLI to fix all installed nodes -- This must be done within the activated virtualenv +if [ "A${SWITCHED_VENV}" == "AFalse" ]; then + echo "== Skipping ComfyUI-Manager CLI fix as we are re-using the same venv as the last execution" + echo " -- If you are experiencing issues with custom nodes, use 'Manager -> Custom Nodes Manager -> Filter: Import Failed -> Try Fix' from the WebUI" +else + cm_cli=${COMFYUI_PATH}/custom_nodes/ComfyUI-Manager/cm-cli.py + if [ -f $cm_cli ]; then + echo "== Running ComfyUI-Manager CLI to fix installed custom nodes" + python3 $cm_cli fix all || echo "ComfyUI-Manager CLI failed -- in case of issue with custom nodes: use 'Manager -> Custom Nodes Manager -> Filter: Import Failed -> Try Fix' from the WebUI" + else + echo "== ComfyUI-Manager CLI not found, skipping" + fi +fi + +# Final steps before running ComfyUI cd ${COMFYUI_PATH} echo -n "== Container directory: "; pwd