docker-compose.yml
· 4.1 KiB · YAML
原始文件
#The healthcheck for hyperpipe-front provided in the original tutorial did not quite work out for me
#so there's this alternative.
services:
db:
image: postgres:16
container_name: Piped-DB
hostname: piped-db
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "piped", "-U", "pipeduser"]
timeout: 45s
interval: 10s
retries: 10
volumes:
- /volume1/docker/piped/db:/var/lib/postgresql/data:rw
environment:
POSTGRES_DB: piped
POSTGRES_USER: pipeduser
POSTGRES_PASSWORD: pipedpass
restart: on-failure:5
piped-proxy:
image: 1337kavin/piped-proxy:latest
container_name: Piped-PROXY
hostname: piped-proxy
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
volumes:
- /volume1/docker/piped/piped-proxy:/app/socket:rw
environment:
UDS: 1
restart: on-failure:5
piped-back:
image: 1337kavin/piped:latest
container_name: Piped-BACKEND
hostname: piped-backend
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: stat /etc/passwd || exit 1
volumes:
- /volume1/docker/piped/config.properties:/app/config.properties:ro
restart: on-failure:5
depends_on:
db:
condition: service_healthy
piped-front:
image: 1337kavin/piped-frontend:latest
entrypoint: ash -c 'sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g /usr/share/nginx/html/assets/* && /docker-entrypoint.sh && nginx -g "daemon off;"'
container_name: Piped-FRONTEND
hostname: piped-frontend
user: 0:0
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:80
restart: on-failure:5
depends_on:
piped-back:
condition: service_healthy
nginx:
image: nginx:mainline-alpine
container_name: Piped-NGINX
hostname: nginx
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:80
ports:
- 8045:80
volumes:
- /volume1/docker/piped/nginx.conf:/etc/nginx/nginx.conf:ro
- /volume1/docker/piped/pipedapi.conf:/etc/nginx/conf.d/pipedapi.conf:ro
- /volume1/docker/piped/pipedproxy.conf:/etc/nginx/conf.d/pipedproxy.conf:ro
- /volume1/docker/piped/pipedfrontend.conf:/etc/nginx/conf.d/pipedfrontend.conf:ro
- /volume1/docker/piped/ytproxy.conf:/etc/nginx/snippets/ytproxy.conf:ro
- /volume1/docker/piped/piped-proxy:/var/run/ytproxy:rw
restart: on-failure:5
depends_on:
piped-back:
condition: service_healthy
piped-front:
condition: service_started
piped-proxy:
condition: service_started
hyperpipe-back:
image: codeberg.org/hyperpipe/hyperpipe-backend:latest
container_name: Hyperpipe-API
hostname: hyperpipe-backend
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
ports:
- 3771:3000
environment:
HYP_PROXY: hyperpipe-proxy.onrender.com
restart: on-failure:5
depends_on:
nginx:
condition: service_healthy
hyperpipe-front:
image: codeberg.org/hyperpipe/hyperpipe:latest
entrypoint: sh -c 'find /usr/share/nginx/html -type f -exec sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g {} \; -exec sed -i s/hyperpipeapi.onrender.com/hyperpipeapi.yourname.synology.me/g {} \; && /docker-entrypoint.sh && nginx -g "daemon off;"'
container_name: Hyperpipe-FRONTEND
hostname: hyperpipe-frontend
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider https://piped.yourname.synology.me
#test: wget --no-verbose --tries=1 --spider http://localhost
ports:
- 8745:80
restart: on-failure:5
depends_on:
hyperpipe-back:
condition: service_started
1 | #The healthcheck for hyperpipe-front provided in the original tutorial did not quite work out for me |
2 | #so there's this alternative. |
3 | services: |
4 | db: |
5 | image: postgres:16 |
6 | container_name: Piped-DB |
7 | hostname: piped-db |
8 | mem_limit: 512m |
9 | cpu_shares: 768 |
10 | security_opt: |
11 | - no-new-privileges:true |
12 | healthcheck: |
13 | test: ["CMD", "pg_isready", "-q", "-d", "piped", "-U", "pipeduser"] |
14 | timeout: 45s |
15 | interval: 10s |
16 | retries: 10 |
17 | volumes: |
18 | - /volume1/docker/piped/db:/var/lib/postgresql/data:rw |
19 | environment: |
20 | POSTGRES_DB: piped |
21 | POSTGRES_USER: pipeduser |
22 | POSTGRES_PASSWORD: pipedpass |
23 | restart: on-failure:5 |
24 | |
25 | piped-proxy: |
26 | image: 1337kavin/piped-proxy:latest |
27 | container_name: Piped-PROXY |
28 | hostname: piped-proxy |
29 | mem_limit: 512m |
30 | cpu_shares: 768 |
31 | security_opt: |
32 | - no-new-privileges:true |
33 | read_only: true |
34 | volumes: |
35 | - /volume1/docker/piped/piped-proxy:/app/socket:rw |
36 | environment: |
37 | UDS: 1 |
38 | restart: on-failure:5 |
39 | |
40 | piped-back: |
41 | image: 1337kavin/piped:latest |
42 | container_name: Piped-BACKEND |
43 | hostname: piped-backend |
44 | mem_limit: 2g |
45 | cpu_shares: 768 |
46 | security_opt: |
47 | - no-new-privileges:true |
48 | healthcheck: |
49 | test: stat /etc/passwd || exit 1 |
50 | volumes: |
51 | - /volume1/docker/piped/config.properties:/app/config.properties:ro |
52 | restart: on-failure:5 |
53 | depends_on: |
54 | db: |
55 | condition: service_healthy |
56 | |
57 | piped-front: |
58 | image: 1337kavin/piped-frontend:latest |
59 | entrypoint: ash -c 'sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g /usr/share/nginx/html/assets/* && /docker-entrypoint.sh && nginx -g "daemon off;"' |
60 | container_name: Piped-FRONTEND |
61 | hostname: piped-frontend |
62 | user: 0:0 |
63 | mem_limit: 1g |
64 | cpu_shares: 768 |
65 | security_opt: |
66 | - no-new-privileges:true |
67 | healthcheck: |
68 | test: wget --no-verbose --tries=1 --spider http://localhost:80 |
69 | restart: on-failure:5 |
70 | depends_on: |
71 | piped-back: |
72 | condition: service_healthy |
73 | |
74 | nginx: |
75 | image: nginx:mainline-alpine |
76 | container_name: Piped-NGINX |
77 | hostname: nginx |
78 | mem_limit: 512m |
79 | cpu_shares: 768 |
80 | security_opt: |
81 | - no-new-privileges:true |
82 | healthcheck: |
83 | test: wget --no-verbose --tries=1 --spider http://localhost:80 |
84 | ports: |
85 | - 8045:80 |
86 | volumes: |
87 | - /volume1/docker/piped/nginx.conf:/etc/nginx/nginx.conf:ro |
88 | - /volume1/docker/piped/pipedapi.conf:/etc/nginx/conf.d/pipedapi.conf:ro |
89 | - /volume1/docker/piped/pipedproxy.conf:/etc/nginx/conf.d/pipedproxy.conf:ro |
90 | - /volume1/docker/piped/pipedfrontend.conf:/etc/nginx/conf.d/pipedfrontend.conf:ro |
91 | - /volume1/docker/piped/ytproxy.conf:/etc/nginx/snippets/ytproxy.conf:ro |
92 | - /volume1/docker/piped/piped-proxy:/var/run/ytproxy:rw |
93 | restart: on-failure:5 |
94 | depends_on: |
95 | piped-back: |
96 | condition: service_healthy |
97 | piped-front: |
98 | condition: service_started |
99 | piped-proxy: |
100 | condition: service_started |
101 | |
102 | hyperpipe-back: |
103 | image: codeberg.org/hyperpipe/hyperpipe-backend:latest |
104 | container_name: Hyperpipe-API |
105 | hostname: hyperpipe-backend |
106 | mem_limit: 512m |
107 | cpu_shares: 768 |
108 | security_opt: |
109 | - no-new-privileges:true |
110 | read_only: true |
111 | user: 1026:100 |
112 | ports: |
113 | - 3771:3000 |
114 | environment: |
115 | HYP_PROXY: hyperpipe-proxy.onrender.com |
116 | restart: on-failure:5 |
117 | depends_on: |
118 | nginx: |
119 | condition: service_healthy |
120 | |
121 | hyperpipe-front: |
122 | image: codeberg.org/hyperpipe/hyperpipe:latest |
123 | entrypoint: sh -c 'find /usr/share/nginx/html -type f -exec sed -i s/pipedapi.kavin.rocks/pipedapi.yourname.synology.me/g {} \; -exec sed -i s/hyperpipeapi.onrender.com/hyperpipeapi.yourname.synology.me/g {} \; && /docker-entrypoint.sh && nginx -g "daemon off;"' |
124 | container_name: Hyperpipe-FRONTEND |
125 | hostname: hyperpipe-frontend |
126 | mem_limit: 512m |
127 | cpu_shares: 768 |
128 | security_opt: |
129 | - no-new-privileges:true |
130 | healthcheck: |
131 | test: wget --no-verbose --tries=1 --spider https://piped.yourname.synology.me |
132 | #test: wget --no-verbose --tries=1 --spider http://localhost |
133 | ports: |
134 | - 8745:80 |
135 | restart: on-failure:5 |
136 | depends_on: |
137 | hyperpipe-back: |
138 | condition: service_started |