#The healthcheck for hyperpipe-front provided in the original tutorial did not quite work out for me #the container hyperpipe-front always had statut 'unhealthy' (although it worked) #so there's this alternative. services: db: image: postgres:16 container_name: Piped-DB hostname: piped-db mem_limit: 512m cpu_shares: 768 security_opt: - no-new-privileges:true healthcheck: test: ["CMD", "pg_isready", "-q", "-d", "piped", "-U", "pipeduser"] timeout: 45s interval: 10s retries: 10 volumes: - /volume1/docker/piped/db:/var/lib/postgresql/data:rw environment: POSTGRES_DB: piped POSTGRES_USER: pipeduser POSTGRES_PASSWORD: pipedpass restart: on-failure:5 piped-proxy: image: 1337kavin/piped-proxy:latest container_name: Piped-PROXY hostname: piped-proxy mem_limit: 512m cpu_shares: 768 security_opt: - no-new-privileges:true read_only: true volumes: - /volume1/docker/piped/piped-proxy:/app/socket:rw environment: UDS: 1 restart: on-failure:5 piped-back: image: 1337kavin/piped:latest container_name: Piped-BACKEND hostname: piped-backend mem_limit: 2g cpu_shares: 768 security_opt: - no-new-privileges:true healthcheck: test: stat /etc/passwd || exit 1 volumes: - /volume1/docker/piped/config.properties:/app/config.properties:ro restart: on-failure:5 depends_on: db: condition: service_healthy piped-front: image: 1337kavin/piped-frontend:latest entrypoint: ash -c 'sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g /usr/share/nginx/html/assets/* && /docker-entrypoint.sh && nginx -g "daemon off;"' container_name: Piped-FRONTEND hostname: piped-frontend user: 0:0 mem_limit: 1g cpu_shares: 768 security_opt: - no-new-privileges:true healthcheck: test: wget --no-verbose --tries=1 --spider http://localhost:80 restart: on-failure:5 depends_on: piped-back: condition: service_healthy nginx: image: nginx:mainline-alpine container_name: Piped-NGINX hostname: nginx mem_limit: 512m cpu_shares: 768 security_opt: - no-new-privileges:true healthcheck: test: wget --no-verbose --tries=1 --spider http://localhost:80 ports: - 8045:80 volumes: - /volume1/docker/piped/nginx.conf:/etc/nginx/nginx.conf:ro - /volume1/docker/piped/pipedapi.conf:/etc/nginx/conf.d/pipedapi.conf:ro - /volume1/docker/piped/pipedproxy.conf:/etc/nginx/conf.d/pipedproxy.conf:ro - /volume1/docker/piped/pipedfrontend.conf:/etc/nginx/conf.d/pipedfrontend.conf:ro - /volume1/docker/piped/ytproxy.conf:/etc/nginx/snippets/ytproxy.conf:ro - /volume1/docker/piped/piped-proxy:/var/run/ytproxy:rw restart: on-failure:5 depends_on: piped-back: condition: service_healthy piped-front: condition: service_started piped-proxy: condition: service_started hyperpipe-back: image: codeberg.org/hyperpipe/hyperpipe-backend:latest container_name: Hyperpipe-API hostname: hyperpipe-backend mem_limit: 512m cpu_shares: 768 security_opt: - no-new-privileges:true read_only: true user: 1026:100 ports: - 3771:3000 environment: HYP_PROXY: hyperpipe-proxy.onrender.com restart: on-failure:5 depends_on: nginx: condition: service_healthy hyperpipe-front: image: codeberg.org/hyperpipe/hyperpipe:latest entrypoint: sh -c 'find /usr/share/nginx/html -type f -exec sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g {} \; -exec sed -i s/hyperpipeapi.onrender.com/hyperpipeapi.yourname.synology.me/g {} \; && /docker-entrypoint.sh && nginx -g "daemon off;"' container_name: Hyperpipe-FRONTEND hostname: hyperpipe-frontend mem_limit: 512m cpu_shares: 768 security_opt: - no-new-privileges:true healthcheck: test: wget --no-verbose --tries=1 --spider https://piped.yourname.synology.me #test: wget --no-verbose --tries=1 --spider http://localhost ports: - 8745:80 restart: on-failure:5 depends_on: hyperpipe-back: condition: service_started