|
1 | 1 | # Copyright (c) 2022 Eugene Brodsky (https://github.com/ebr)
|
2 | 2 |
|
3 |
| -# Directory in the container where the INVOKEAI_ROOT will be mounted |
| 3 | +# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted |
4 | 4 | INVOKEAI_ROOT=/mnt/invokeai
|
5 |
| -# Host directory to contain the model cache. Will be mounted at INVOKEAI_ROOT path in the container |
6 |
| -INVOKEAI_CACHEDIR=${HOME}/invokeai |
| 5 | +# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container |
| 6 | +HOST_MOUNT_PATH=${HOME}/invokeai |
7 | 7 |
|
8 | 8 | DOCKER_BUILDKIT=1
|
9 | 9 | IMAGE=local/invokeai:latest
|
10 | 10 |
|
11 | 11 | USER=$(shell id -u)
|
12 | 12 | GROUP=$(shell id -g)
|
13 | 13 |
|
14 |
| -# All downloaded models and config will end up in ${INVOKEAI_CACHEDIR}. |
15 |
| -# Contents can be moved to a persistent storage and used to rehydrate the cache on another host |
| 14 | +# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host. |
| 15 | +# This is consistent with the expected non-Docker behaviour. |
| 16 | +# Contents can be moved to a persistent storage and used to prime the cache on another host. |
16 | 17 |
|
17 | 18 | build:
|
18 | 19 | docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
19 | 20 |
|
20 |
| -# Copy only the content of config dir first, such that the configuration |
21 |
| -# script can run with the expected config dir already populated. |
22 |
| -# Then, run the configuration script. |
23 | 21 | configure:
|
24 |
| - docker run --rm -it \ |
25 |
| - -v ${INVOKEAI_CACHEDIR}/configs:/mnt/configs \ |
26 |
| - --entrypoint bash ${IMAGE} \ |
27 |
| - -c "cp -r ./configs/* /mnt/configs/" |
28 | 22 | docker run --rm -it --runtime=nvidia --gpus=all \
|
29 |
| - -v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \ |
30 |
| - -v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \ |
31 |
| - ${IMAGE} \ |
32 |
| - -c "scripts/configure_invokeai.py --root ${INVOKEAI_ROOT}" |
33 |
| - sudo chown -R ${USER}:${GROUP} ${INVOKEAI_CACHEDIR} |
| 23 | + -v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \ |
| 24 | + -v ${HOST_MOUNT_PATH}/.cache:/root/.cache \ |
| 25 | + -e INVOKEAI_ROOT=${INVOKEAI_ROOT} \ |
| 26 | + --entrypoint bash \ |
| 27 | + ${IMAGE} -c "scripts/configure_invokeai.py" |
| 28 | + sudo chown -R ${USER}:${GROUP} ${HOST_MOUNT_PATH} |
34 | 29 |
|
35 |
| -# Run the container with the cache mounted and the web server exposed on port 9090 |
| 30 | +# Run the container with the runtime dir mounted and the web server exposed on port 9090 |
36 | 31 | web:
|
37 | 32 | docker run --rm -it --runtime=nvidia --gpus=all \
|
38 |
| - -v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \ |
39 |
| - -v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \ |
| 33 | + -v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \ |
| 34 | + -v ${HOST_MOUNT_PATH}/.cache:/root/.cache \ |
| 35 | + -e INVOKEAI_ROOT=${INVOKEAI_ROOT} \ |
40 | 36 | --entrypoint bash -p9090:9090 ${IMAGE} \
|
41 | 37 | -c "scripts/invoke.py --web --host 0.0.0.0 --root ${INVOKEAI_ROOT}"
|
42 | 38 |
|
| 39 | +# Run the cli with the runtime dir mounted |
43 | 40 | cli:
|
44 | 41 | docker run --rm -it --runtime=nvidia --gpus=all \
|
45 |
| - -v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \ |
46 |
| - -v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \ |
| 42 | + -v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \ |
| 43 | + -v ${HOST_MOUNT_PATH}/.cache:/root/.cache \ |
| 44 | + -e INVOKEAI_ROOT=${INVOKEAI_ROOT} \ |
47 | 45 | --entrypoint bash ${IMAGE} \
|
48 | 46 | -c "scripts/invoke.py --root ${INVOKEAI_ROOT}"
|
49 | 47 |
|
50 | 48 | # Run the container with the cache mounted and open a bash shell instead of the Invoke CLI or webserver
|
51 | 49 | shell:
|
52 | 50 | docker run --rm -it --runtime=nvidia --gpus=all \
|
53 |
| - -v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \ |
54 |
| - -v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \ |
| 51 | + -v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \ |
| 52 | + -v ${HOST_MOUNT_PATH}/.cache:/root/.cache \ |
55 | 53 | -e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
56 | 54 | --entrypoint bash ${IMAGE} --
|
57 | 55 |
|
|
0 commit comments