Refactor invoke (#405)

Fixes a problem with cross attention class missing from diffusers


models are now taken from the huggingFace cache.


50eb02f68b
pull/386/head^2 5.0.2
AbdBarho 1 year ago committed by GitHub
parent 555c26b7ce
commit 10c16e1971
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -43,10 +43,10 @@ services:
<<: *base_service
profiles: ["invoke"]
build: ./services/invoke/
image: sd-invoke:26
image: sd-invoke:27
environment:
- PRELOAD=true
- CLI_ARGS=
- CLI_ARGS=--no-nsfw_checker --no-safety_checker --xformers
sygil: &sygil

@ -11,53 +11,48 @@ ENV DEBIAN_FRONTEND=noninteractive PIP_EXISTS_ACTION=w PIP_PREFER_BINARY=1
RUN --mount=type=cache,target=/root/.cache/pip pip install torch==1.13.1+cu117 torchvision --extra-index-url https://download.pytorch.org/whl/cu117
RUN apt-get update && apt-get install git -y && apt-get clean
RUN git clone https://github.com/invoke-ai/InvokeAI.git /stable-diffusion
WORKDIR /stable-diffusion
RUN --mount=type=cache,target=/root/.cache/pip \
git reset --hard f232068ab89bd80e4f5f3133dcdb62ea78f1d0f7 && \
git config --global http.postBuffer 1048576000 && \
egrep -v '^-e .' environments-and-requirements/requirements-lin-cuda.txt > req.txt && \
pip install -r req.txt && \
rm req.txt
# patch match:
# https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md
RUN \
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && \
# apt-get install build-essential python3-opencv libopencv-dev -y && \
apt-get install make g++ libopencv-dev -y && \
apt-get install make g++ git libopencv-dev -y && \
apt-get clean && \
cd /usr/lib/x86_64-linux-gnu/pkgconfig/ && \
ln -sf opencv4.pc opencv.pc
ARG BRANCH=main SHA=6e0c6d9cc9f6bdbdefc4b9e94bc1ccde1b04aa42
ENV ROOT=/InvokeAI
RUN git clone https://github.com/invoke-ai/InvokeAI.git ${ROOT}
WORKDIR ${ROOT}
RUN --mount=type=cache,target=/root/.cache/pip \
git reset --hard 4463124bddd221c333d4c70e73aa2949ad35453d && \
pip install .
ARG BRANCH=main SHA=50eb02f68be912276a9c106d5e8038a5671a0386
RUN --mount=type=cache,target=/root/.cache/pip \
git fetch && \
git reset --hard && \
git checkout ${BRANCH} && \
git reset --hard ${SHA} && \
pip install .
pip install -U .
RUN --mount=type=cache,target=/root/.cache/pip \
--mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.15-cp310-cp310-linux_x86_64.whl \
pip install -U opencv-python-headless huggingface_hub triton /xformers-0.0.15-cp310-cp310-linux_x86_64.whl && \
--mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.17-cp310-cp310-linux_x86_64.whl \
pip install -U opencv-python-headless triton /xformers-0.0.17-cp310-cp310-linux_x86_64.whl && \
python3 -c "from patchmatch import patch_match"
RUN touch invokeai.init
COPY . /docker/
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
ENV NVIDIA_VISIBLE_DEVICES=all
ENV PYTHONUNBUFFERED=1 ROOT=/stable-diffusion PYTHONPATH="${PYTHONPATH}:${ROOT}" PRELOAD=false CLI_ARGS="" HF_HOME=/root/.cache/huggingface
ENV PYTHONUNBUFFERED=1 PRELOAD=false HF_HOME=/root/.cache/huggingface CONFIG_DIR=/data/config/invoke CLI_ARGS=""
EXPOSE 7860
ENTRYPOINT ["/docker/entrypoint.sh"]
CMD invokeai --web --host 0.0.0.0 --port 7860 --config /docker/models.yaml --root_dir ${ROOT} --outdir /output/invoke ${CLI_ARGS}
CMD invokeai --web --host 0.0.0.0 --port 7860 --root_dir ${ROOT} --config ${CONFIG_DIR}/models.yaml --outdir /output/invoke ${CLI_ARGS}
# TODO: make sure the config is persisted between sessions

@ -4,25 +4,25 @@ set -Eeuo pipefail
declare -A MOUNTS
mkdir -p ${CONFIG_DIR}
# cache
MOUNTS["/root/.cache"]=/data/.cache/
# this is really just a hack to avoid migrations
rm -rf ${HF_HOME}/diffusers
# ui specific
MOUNTS["${ROOT}/models/codeformer"]=/data/Codeformer/
MOUNTS["${ROOT}/models/gfpgan/GFPGANv1.4.pth"]=/data/GFPGAN/GFPGANv1.4.pth
MOUNTS["${ROOT}/models/gfpgan/weights"]=/data/.cache/
MOUNTS["${ROOT}/models/gfpgan/weights"]=/data/GFPGAN/
MOUNTS["${ROOT}/models/realesrgan"]=/data/RealESRGAN/
MOUNTS["${ROOT}/models/bert-base-uncased"]=/data/.cache/huggingface/transformers/
MOUNTS["${ROOT}/models/openai/clip-vit-large-patch14"]=/data/.cache/huggingface/transformers/
MOUNTS["${ROOT}/models/CompVis/stable-diffusion-safety-checker"]=/data/.cache/huggingface/transformers/
MOUNTS["${ROOT}/models/ldm"]=/data/.cache/invoke/ldm/
MOUNTS["${ROOT}/embeddings"]=/data/embeddings/
# hacks
MOUNTS["${ROOT}/models/clipseg"]=/data/.cache/invoke/clipseg/
for to_path in "${!MOUNTS[@]}"; do
set -Eeuo pipefail
@ -40,7 +40,8 @@ done
if "${PRELOAD}" == "true"; then
set -Eeuo pipefail
invokeai-configure --skip-sd-weights --root ${ROOT} --yes
invokeai-configure --root ${ROOT} --yes
cp ${ROOT}/configs/models.yaml ${CONFIG_DIR}/models.yaml
fi
exec "$@"

@ -1,23 +0,0 @@
# This file describes the alternative machine learning models
# available to InvokeAI script.
#
# To add a new model, follow the examples below. Each
# model requires a model config file, a weights file,
# and the width and height of the images it
# was trained on.
stable-diffusion-1.5:
description: Stable Diffusion version 1.5
weights: /data/StableDiffusion/v1-5-pruned-emaonly.ckpt
vae: /data/VAE/vae-ft-mse-840000-ema-pruned.ckpt
config: ./invokeai/configs/stable-diffusion/v1-inference.yaml
width: 512
height: 512
default: true
inpainting-1.5:
description: RunwayML SD 1.5 model optimized for inpainting
weights: /data/StableDiffusion/sd-v1-5-inpainting.ckpt
vae: /data/VAE/vae-ft-mse-840000-ema-pruned.ckpt
config: ./invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml
width: 512
height: 512
default: false
Loading…
Cancel
Save