From 4ad43fc00545ab99395205dbe29b8d3180e91e98 Mon Sep 17 00:00:00 2001 From: ack Date: Thu, 19 Feb 2026 11:56:33 +0000 Subject: [PATCH] =?UTF-8?q?=D0=94=D0=BE=D0=B1=D0=B0=D0=B2=D0=B8=D1=82?= =?UTF-8?q?=D1=8C=20docker-compose.yaml?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docker-compose.yaml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 docker-compose.yaml diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..3170400 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,32 @@ +# +# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose +# +# Make sure to use the docker-compose.yml of the current release: +# +# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml +# +# The compose file on main may not be compatible with the latest release. + +name: immich + +services: + immich-machine-learning: + container_name: immich_machine_learning + # For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag. + # Example tag: ${IMMICH_VERSION:-release}-cuda + image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} + # extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration + # file: hwaccel.ml.yml + # service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable + dns: + - 8.8.8.8 + - 1.1.1.1 + volumes: + - model-cache:/cache + restart: always + healthcheck: + disable: false + + +volumes: + model-cache: \ No newline at end of file