docker-compose.yml
· 4.2 KiB · YAML
原始檔案
#The healthcheck for hyperpipe-front provided in the original tutorial did not quite work out for me
#the container hyperpipe-front always had statut 'unhealthy' (although it worked)
#so there's this alternative.
services:
db:
image: postgres:16
container_name: Piped-DB
hostname: piped-db
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "piped", "-U", "pipeduser"]
timeout: 45s
interval: 10s
retries: 10
volumes:
- /volume1/docker/piped/db:/var/lib/postgresql/data:rw
environment:
POSTGRES_DB: piped
POSTGRES_USER: pipeduser
POSTGRES_PASSWORD: pipedpass
restart: on-failure:5
piped-proxy:
image: 1337kavin/piped-proxy:latest
container_name: Piped-PROXY
hostname: piped-proxy
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
volumes:
- /volume1/docker/piped/piped-proxy:/app/socket:rw
environment:
UDS: 1
restart: on-failure:5
piped-back:
image: 1337kavin/piped:latest
container_name: Piped-BACKEND
hostname: piped-backend
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: stat /etc/passwd || exit 1
volumes:
- /volume1/docker/piped/config.properties:/app/config.properties:ro
restart: on-failure:5
depends_on:
db:
condition: service_healthy
piped-front:
image: 1337kavin/piped-frontend:latest
entrypoint: ash -c 'sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g /usr/share/nginx/html/assets/* && /docker-entrypoint.sh && nginx -g "daemon off;"'
container_name: Piped-FRONTEND
hostname: piped-frontend
user: 0:0
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:80
restart: on-failure:5
depends_on:
piped-back:
condition: service_healthy
nginx:
image: nginx:mainline-alpine
container_name: Piped-NGINX
hostname: nginx
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:80
ports:
- 8045:80
volumes:
- /volume1/docker/piped/nginx.conf:/etc/nginx/nginx.conf:ro
- /volume1/docker/piped/pipedapi.conf:/etc/nginx/conf.d/pipedapi.conf:ro
- /volume1/docker/piped/pipedproxy.conf:/etc/nginx/conf.d/pipedproxy.conf:ro
- /volume1/docker/piped/pipedfrontend.conf:/etc/nginx/conf.d/pipedfrontend.conf:ro
- /volume1/docker/piped/ytproxy.conf:/etc/nginx/snippets/ytproxy.conf:ro
- /volume1/docker/piped/piped-proxy:/var/run/ytproxy:rw
restart: on-failure:5
depends_on:
piped-back:
condition: service_healthy
piped-front:
condition: service_started
piped-proxy:
condition: service_started
hyperpipe-back:
image: codeberg.org/hyperpipe/hyperpipe-backend:latest
container_name: Hyperpipe-API
hostname: hyperpipe-backend
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
ports:
- 3771:3000
environment:
HYP_PROXY: hyperpipe-proxy.onrender.com
restart: on-failure:5
depends_on:
nginx:
condition: service_healthy
hyperpipe-front:
image: codeberg.org/hyperpipe/hyperpipe:latest
entrypoint: sh -c 'find /usr/share/nginx/html -type f -exec sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g {} \; -exec sed -i s/hyperpipeapi.onrender.com/hyperpipeapi.yourname.synology.me/g {} \; && /docker-entrypoint.sh && nginx -g "daemon off;"'
container_name: Hyperpipe-FRONTEND
hostname: hyperpipe-frontend
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider https://piped.yourname.synology.me
#test: wget --no-verbose --tries=1 --spider http://localhost
ports:
- 8745:80
restart: on-failure:5
depends_on:
hyperpipe-back:
condition: service_started
1 | #The healthcheck for hyperpipe-front provided in the original tutorial did not quite work out for me |
2 | #the container hyperpipe-front always had statut 'unhealthy' (although it worked) |
3 | #so there's this alternative. |
4 | services: |
5 | db: |
6 | image: postgres:16 |
7 | container_name: Piped-DB |
8 | hostname: piped-db |
9 | mem_limit: 512m |
10 | cpu_shares: 768 |
11 | security_opt: |
12 | - no-new-privileges:true |
13 | healthcheck: |
14 | test: ["CMD", "pg_isready", "-q", "-d", "piped", "-U", "pipeduser"] |
15 | timeout: 45s |
16 | interval: 10s |
17 | retries: 10 |
18 | volumes: |
19 | - /volume1/docker/piped/db:/var/lib/postgresql/data:rw |
20 | environment: |
21 | POSTGRES_DB: piped |
22 | POSTGRES_USER: pipeduser |
23 | POSTGRES_PASSWORD: pipedpass |
24 | restart: on-failure:5 |
25 | |
26 | piped-proxy: |
27 | image: 1337kavin/piped-proxy:latest |
28 | container_name: Piped-PROXY |
29 | hostname: piped-proxy |
30 | mem_limit: 512m |
31 | cpu_shares: 768 |
32 | security_opt: |
33 | - no-new-privileges:true |
34 | read_only: true |
35 | volumes: |
36 | - /volume1/docker/piped/piped-proxy:/app/socket:rw |
37 | environment: |
38 | UDS: 1 |
39 | restart: on-failure:5 |
40 | |
41 | piped-back: |
42 | image: 1337kavin/piped:latest |
43 | container_name: Piped-BACKEND |
44 | hostname: piped-backend |
45 | mem_limit: 2g |
46 | cpu_shares: 768 |
47 | security_opt: |
48 | - no-new-privileges:true |
49 | healthcheck: |
50 | test: stat /etc/passwd || exit 1 |
51 | volumes: |
52 | - /volume1/docker/piped/config.properties:/app/config.properties:ro |
53 | restart: on-failure:5 |
54 | depends_on: |
55 | db: |
56 | condition: service_healthy |
57 | |
58 | piped-front: |
59 | image: 1337kavin/piped-frontend:latest |
60 | entrypoint: ash -c 'sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g /usr/share/nginx/html/assets/* && /docker-entrypoint.sh && nginx -g "daemon off;"' |
61 | container_name: Piped-FRONTEND |
62 | hostname: piped-frontend |
63 | user: 0:0 |
64 | mem_limit: 1g |
65 | cpu_shares: 768 |
66 | security_opt: |
67 | - no-new-privileges:true |
68 | healthcheck: |
69 | test: wget --no-verbose --tries=1 --spider http://localhost:80 |
70 | restart: on-failure:5 |
71 | depends_on: |
72 | piped-back: |
73 | condition: service_healthy |
74 | |
75 | nginx: |
76 | image: nginx:mainline-alpine |
77 | container_name: Piped-NGINX |
78 | hostname: nginx |
79 | mem_limit: 512m |
80 | cpu_shares: 768 |
81 | security_opt: |
82 | - no-new-privileges:true |
83 | healthcheck: |
84 | test: wget --no-verbose --tries=1 --spider http://localhost:80 |
85 | ports: |
86 | - 8045:80 |
87 | volumes: |
88 | - /volume1/docker/piped/nginx.conf:/etc/nginx/nginx.conf:ro |
89 | - /volume1/docker/piped/pipedapi.conf:/etc/nginx/conf.d/pipedapi.conf:ro |
90 | - /volume1/docker/piped/pipedproxy.conf:/etc/nginx/conf.d/pipedproxy.conf:ro |
91 | - /volume1/docker/piped/pipedfrontend.conf:/etc/nginx/conf.d/pipedfrontend.conf:ro |
92 | - /volume1/docker/piped/ytproxy.conf:/etc/nginx/snippets/ytproxy.conf:ro |
93 | - /volume1/docker/piped/piped-proxy:/var/run/ytproxy:rw |
94 | restart: on-failure:5 |
95 | depends_on: |
96 | piped-back: |
97 | condition: service_healthy |
98 | piped-front: |
99 | condition: service_started |
100 | piped-proxy: |
101 | condition: service_started |
102 | |
103 | hyperpipe-back: |
104 | image: codeberg.org/hyperpipe/hyperpipe-backend:latest |
105 | container_name: Hyperpipe-API |
106 | hostname: hyperpipe-backend |
107 | mem_limit: 512m |
108 | cpu_shares: 768 |
109 | security_opt: |
110 | - no-new-privileges:true |
111 | read_only: true |
112 | user: 1026:100 |
113 | ports: |
114 | - 3771:3000 |
115 | environment: |
116 | HYP_PROXY: hyperpipe-proxy.onrender.com |
117 | restart: on-failure:5 |
118 | depends_on: |
119 | nginx: |
120 | condition: service_healthy |
121 | |
122 | hyperpipe-front: |
123 | image: codeberg.org/hyperpipe/hyperpipe:latest |
124 | entrypoint: sh -c 'find /usr/share/nginx/html -type f -exec sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g {} \; -exec sed -i s/hyperpipeapi.onrender.com/hyperpipeapi.yourname.synology.me/g {} \; && /docker-entrypoint.sh && nginx -g "daemon off;"' |
125 | container_name: Hyperpipe-FRONTEND |
126 | hostname: hyperpipe-frontend |
127 | mem_limit: 512m |
128 | cpu_shares: 768 |
129 | security_opt: |
130 | - no-new-privileges:true |
131 | healthcheck: |
132 | test: wget --no-verbose --tries=1 --spider https://piped.yourname.synology.me |
133 | #test: wget --no-verbose --tries=1 --spider http://localhost |
134 | ports: |
135 | - 8745:80 |
136 | restart: on-failure:5 |
137 | depends_on: |
138 | hyperpipe-back: |
139 | condition: service_started |