Skip to content

Instantly share code, notes, and snippets.

@skylerwlewis
Last active January 26, 2025 14:47
Show Gist options
  • Save skylerwlewis/50dd3dd628a5198ea29cfbcbf21783ac to your computer and use it in GitHub Desktop.
Save skylerwlewis/50dd3dd628a5198ea29cfbcbf21783ac to your computer and use it in GitHub Desktop.
Routing Wireguard container traffic through Gluetun container
# https://gist.github.com/skylerwlewis/50dd3dd628a5198ea29cfbcbf21783ac
#
# This is a Gluetun + Wireguard + Wireguard-UI docker compose setup based on instructions I found here:
# https://lemmy.max-p.me/post/midwest.social/14630711
#
# In addition to the logic in the link above, I added:
# - Wireguard-UI to simplify client setup and PostUp & PostDown modifications
# - Auto-restart capability for Wireguard & Wireguard-UI containers if Gluetun goes down
#
# After copying this file to your desired directory, run the following to setup the containers:
# docker compose up -d && docker compose down && docker compose up -d
# This up-down-up is required because wireguard-ui creates the wg0.conf file too late during the initial setup
#
# If you are using 172.22.0.0/24 already, simply change "172.22" to a different subnet.
#
# If you use a different local network (other than 192.168.1.0/24),
# replace "192.168.1.0/24" in the config below with your local network
#
# If you need to run on different ports, or want to run multiple copies of the setup below:
# Use a different gluetun healthcheck port (default 9999):
# - Under gluetun's ports, change "9999:9999/tcp" to "<updated port>:9999/tcp"
# - Replace all other instances of 9999 in this file with the updated port
# Use a different wireguard port (default 51820):
# - Replace all instances of 51820 in this file with the updated port
# - Replace WGUI_FIREWALL_MARK value "0xca6c" with hexidecimal value of updated port
# Use a different wireguard-ui port (default 5000):
# - Under wireguard-ui's ports, change "5000:5000" to "<updated port>:5000"
#
# A version of this setup using wg-easy can be found here:
# https://gist.github.com/skylerwlewis/f009741cd20d8ab86ce65d67accccc3f
networks:
default:
ipam:
config:
- subnet: 172.22.0.0/24
services:
gluetun:
image: qmcgaw/gluetun
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
environment:
PUID: 1000
PGID: 1000
TZ: "America/Chicago"
# See https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers
# for information on how to setup gluetun for different VPN providers.
# An example of a Mullvad Wireguard setup is below
VPN_SERVICE_PROVIDER: mullvad
VPN_TYPE: wireguard
WIREGUARD_PRIVATE_KEY: <PrivateKey_value_from_cilent_.conf_file>
OWNED_ONLY: yes # if uncommented, gluetun will use only Mullvad-owned servers
WIREGUARD_ADDRESSES: <Addresses_value_from_cilent_.conf_file>
SERVER_CITIES: "Amsterdam"
UPDATER_PERIOD: 24h
volumes:
- ./data/gluetun/conf:/gluetun
sysctls:
# Disables ipv6
- net.ipv6.conf.all.disable_ipv6=1
restart: unless-stopped
networks:
default:
ipv4_address: 172.22.0.100
ports:
# port of the gluetun healthcheck server
- 9999:9999/tcp
configs:
# allows wireguard to send traffic through gluetun
# https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/firewall.md#custom-iptables-rules
- source: post-rules.txt
target: /iptables/post-rules.txt
wireguard:
image: lscr.io/linuxserver/wireguard:latest
cap_add:
- NET_ADMIN
environment:
PUID: 1000
PGID: 1000
TZ: "America/Chicago"
LOG_CONFS: true #optional
volumes:
- ./data/wireguard:/config
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
networks:
default:
ipv4_address: 172.22.0.2
ports:
# port for wireguard-ui. this must be set here as the `wireguard-ui` container joins the network of this container and hasn't its own network over which it could publish the ports
- 5000:5000
# port of the wireguard server
- 51820:51820/udp
healthcheck:
# check health of gluetun container, and auto-restart if gluetun is unhealthy
test: "nc -z 172.22.0.1 9999 || kill 1"
interval: 1m
timeout: 1m
restart: unless-stopped
wireguard-ui:
image: ngoduykhanh/wireguard-ui:latest
depends_on:
- wireguard
cap_add:
- NET_ADMIN
# use the network of the 'wireguard' service. this enables to show active clients in the status page
network_mode: service:wireguard
environment:
PUID: 1000
PGID: 1000
TZ: "America/Chicago"
# WGUI_ENDPOINT_ADDRESS: "wireguard.example.com" # optional
WGUI_DNS: "172.22.0.100"
WGUI_MTU: 0
WGUI_PERSISTENT_KEEPALIVE: 0
WGUI_FIREWALL_MARK: "0xca6c" #51820
WGUI_CONFIG_FILE_PATH: "/etc/wireguard/wg_confs/wg0.conf"
WGUI_SERVER_INTERFACE_ADDRESSES: "10.252.1.1/24"
WGUI_SERVER_LISTEN_PORT: 51820
WGUI_SERVER_POST_UP_SCRIPT: "iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth+ -j MASQUERADE; wg set wg0 fwmark 51820; ip -4 rule add not fwmark 51820 table 51820; ip -4 rule add table main suppress_prefixlength 0; ip -4 route add 0.0.0.0/0 via 172.22.0.100 table 51820; ip -4 route add 192.168.1.0/24 via 172.22.0.1"
WGUI_SERVER_POST_DOWN_SCRIPT: "iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth+ -j MASQUERADE"
logging:
driver: json-file
options:
max-size: 50m
volumes:
- ./data/wireguard-ui/db:/app/db
- ./data/wireguard:/etc/wireguard
healthcheck:
# check health of gluetun container, and auto-restart if gluetun is unhealthy
test: "nc -z 172.22.0.1 9999 || kill 1"
interval: 1m
timeout: 1m
restart: unless-stopped
configs:
post-rules.txt:
content: |
iptables -t nat -A POSTROUTING -o tun+ -j MASQUERADE
iptables -t filter -A FORWARD -d 172.22.0.2 -j ACCEPT
iptables -t filter -A FORWARD -s 172.22.0.2 -j ACCEPT
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment