Skip to content

Instantly share code, notes, and snippets.

@dabumana
Last active April 4, 2025 02:31
Show Gist options
  • Save dabumana/98daa0893dd143a63327b4a8ca1af77b to your computer and use it in GitHub Desktop.
Save dabumana/98daa0893dd143a63327b4a8ca1af77b to your computer and use it in GitHub Desktop.
Docker development stack with Django + PostgreSQL + Nginx + Redis + ELK (optional), this script helps you to organize and prepare an isolated environment for development purposes.
#!/bin/bash
#
# Author: @dabumana
# Copyright BSD
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
### Intro ########################
echo '''
### WELCOME <=0x0=> ########################################################################
# This script helps you to organize a virtual environment on a isolated container #
# for development and production purposes, the requirements and variables of the project #
# are adjusted to the parameters of architecture detailed in the sequence. #
############################################################################################
'''
### Project Values ###############
au=@dabumana
dt=$(date)
pt=Ark
rp=https://gist.github.com/dabumana/98daa0893dd143a63327b4a8ca1af77b
li=BSD
### Structure Values #############
arr=("$pt"/{parallel_services/{django/{requires/requiriments.txt,local/.env,config/conf.yml,{Dockerfile,README.md}},elasticsearch/{local/.env,config/elastic.yml,{Dockerfile,README.md}},kibana/{local/.env,config/kibana.yml,{Dockerfile,README.md}},logstash/{local/.env,config/{logstash.yml,logstash.conf},{Dockerfile,README.md}},packetbeat/{local/.env,config/packetbeat.yml,{Dockerfile,README.md}},nginx/{local/.env,config/nginx.conf,{Dockerfile,README.md}},postgres/{local/.env,config/db.sql,{Dockerfile,README.md}},redis/{local/.env,config/redis.conf,{Dockerfile,README.md}}},docs/{README.md,GUIDE.md,DEV.md,TEST.md,PROD.md},build/README.md,keys/{README.md,DEV.md,TEST.md,PROD.md},{Docker-compose.yml,.env,LICENSE,README.md}})
### Timestamp Values #############
for i in "${arr[@]}"; do mkdir -p "${i%/*}" && echo " Project: $pt
Author: $au
Filename: $i
Date: $dt
Repository: $rp
License: $li" | sed -e "s/^/# /" >> "$i" ;done
### Global Env Values ############
echo '''
COMPOSE_API_VERSION
COMPOSE_CONVERT_WINDOWS_PATHS
COMPOSE_FILE
COMPOSE_HTTP_TIMEOUT
COMPOSE_TLS_VERSION
COMPOSE_PROJECT_NAME
DOCKER_CERT_PATH
DOCKER_HOST
DOCKER_TLS_VERIFY
DNS=0.0.0.0
DNS_SERVER1=6.6.6.6
DNS_SERVER2=9.9.9.9
NOMBRE_CONTAINER=bind_com
PID=600
PUERTO=80
PUERTO_PUBLICO=80
PUERTO_PROTOCOLO=tcp
PUERTO_MODO=host
EXPUESTO=80
DNS_B=0.0.0.0
DNS_SERVER1_B=6.6.6.6
DNS_SERVER2_B=9.9.9.9
VOLUMEN_SOURCE_M=data
VOLUMEN_TARGET_M=/opt/servicios/backend/django/src
VOLUMEN_SOURCE_STA=static
VOLUMEN_TARGET_STA=/opt/servicios/backend/django/static
VOLUMEN_SOURCE_MED=media
VOLUMEN_TARGET_MED=/opt/servicios/backend/django/media
NOMBRE_CONTAINER_B=backend_com
PID_B=601
PUERTO_B=8000
PUERTO_PUBLICO_B=8000
PUERTO_PROTOCOLO_B=tcp
PUERTO_MODO_B=host
EXPUESTO_B=8000
DNS_C=0.0.0.0
DNS_SERVER1_C=6.6.6.6
DNS_SERVER2_C=9.9.9.9
NOMBRE_CONTAINER_C=data_ext_com
VOLUMEN_SOURCE_DATA_EXT=data_ext
VOLUMEN_TARGET_DATA_EXT=/opt/servicios/DB/data_ext
PID_C=602
PUERTO_C=5432
PUERTO_PUBLICO_C=5432
PUERTO_PROTOCOLO_C=tcp
PUERTO_MODO_C=host
EXPUESTO_C=5432
DNS_D=0.0.0.0
DNS_SERVER1_D=6.6.6.6
DNS_SERVER2_D=9.9.9.9
NOMBRE_CONTAINER_D=data_int_com
VOLUMEN_SOURCE_DATA_INT=data_int
VOLUMEN_TARGET_DATA_INT=/opt/servicios/DB/data_int
PID_D=603
PUERTO_D=6379
PUERTO_PUBLICO_D=6379
PUERTO_PROTOCOLO_D=tcp
PUERTO_MODO_D=host
EXPUESTO_D=6379
DNS_E=0.0.0.0
DNS_SERVER1_E=6.6.6.6
DNS_SERVER2_E=9.9.9.9
NOMBRE_CONTAINER_E=data_es_com
VOLUMEN_SOURCE_DATA_ES=data_es
VOLUMEN_TARGET_DATA_ES=/usr/share/elasticsearch/data:rw
VOLUMEN_SOURCE_CONF_ES=./parallel_services/elasticsearch/config/
VOLUMEN_TARGET_CONF_ES=/etc/elasticsearch/config:ro
PID_E=604
PUERTO_E=9200
PUERTO_PUBLICO_E=9200
PUERTO_PROTOCOLO_E=tcp
PUERTO_MODO_E=host
EXPUESTO_E=9200
DNS_F=0.0.0.0
DNS_SERVER1_F=6.6.6.6
DNS_SERVER2_F=9.9.9.9
NOMBRE_CONTAINER_F=data_ki_com
VOLUMEN_SOURCE_DATA_KI=data_ki
VOLUMEN_TARGET_DATA_KI=/usr/share/kibana/data:rw
VOLUMEN_SOURCE_CONF_KI=./DockerServices/kibana/config/
VOLUMEN_TARGET_CONF_KI=/usr/share/kibana/config:ro
PID_F=605
PUERTO_F=5601
PUERTO_PUBLICO_F=5601
PUERTO_PROTOCOLO_F=tcp
PUERTO_MODO_F=host
EXPUESTO_F=5601
DNS_G=0.0.0.0
DNS_SERVER1_G=6.6.6.6
DNS_SERVER2_G=9.9.9.9
NOMBRE_CONTAINER_G=data_lo_com
VOLUMEN_SOURCE_DATA_LO=data_lo
VOLUMEN_TARGET_DATA_LO=/usr/share/logstash/data:rw
VOLUMEN_SOURCE_CONF_LO=./parallel_services/logstash/config/
VOLUMEN_TARGET_CONF_LO=/usr/share/logstash/config:ro
PID_G=606
PUERTO_G=5000
PUERTO_PUBLICO_G=5000
PUERTO_PROTOCOLO_G=tcp
PUERTO_MODO_G=host
EXPUESTO_G=5000
DNS_H=0.0.0.0
DNS_SERVER1_H=6.6.6.6
DNS_SERVER2_H=9.9.9.9
NOMBRE_CONTAINER_H=data_pb_ch4n
VOLUMEN_SOURCE_DATA_PB=data_pb
VOLUMEN_TARGET_DATA_PB=/usr/share/packetbeat/data:rw
VOLUMEN_SOURCE_CONF_PB=./parallel_services/packetbeat/config/
VOLUMEN_TARGET_CONF_PB=/etc/elasticsearch/config:ro
PID_H=607
PUERTO_H=88
PUERTO_PUBLICO_H=88
PUERTO_PROTOCOLO_H=tcp
PUERTO_MODO_H=host
EXPUESTO_H=88
''' >>"$pt"/.env
### Docker Compose Orchestation ##########################
## You can use the locan .env files if you prefer just change the path in the 'env_file' variable [parallel_services/{service}/local/.env]
## or the global .env file by default
echo '''
version: "3.6"
services:
data_int:
build:
context: parallel_services/redis
dockerfile: Dockerfile
deploy:
mode: replicated
replicas: 1
resources:
limits:
cpus: "2"
memory: 2G
reservations:
cpus: "1"
memory: 1G
env_file:
- .env
dns: $DNS_D
dns_search:
- $DNS_SERVER1_D
- $DNS_SERVER2_D
volumes:
- type: volume
source: $VOLUMEN_SOURCE_DATA_INT
target: $VOLUMEN_TARGET_DATA_INT
volume:
nocopy: true
container_name: $NOMBRE_CONTAINER_D
pid: $PID_D
stop_grace_period: 3s
stop_signal: SIGKILL
ports:
- target: $PUERTO_D
published: $PUERTO_PUBLICO_D
protocol: $PUERTO_PROTOCOLO_D
mode: $PUERTO_MODO_D
expose:
- $EXPUESTO_D
networks:
- Interna
security_opt:
- label:user:USER
- label:role:ROLE
data_ext:
build:
context: parallel_services/postgres
dockerfile: Dockerfile
deploy:
mode: replicated
replicas: 1
resources:
limits:
cpus: "2"
memory: 2G
reservations:
cpus: "1"
memory: 1G
env_file:
- .env
dns: $DNS_C
dns_search:
- $DNS_SERVER1_C
- $DNS_SERVER2_C
volumes:
- type: volume
source: $VOLUMEN_SOURCE_DATA_EXT
target: $VOLUMEN_TARGET_DATA_EXT
volume:
nocopy: true
container_name: $NOMBRE_CONTAINER_C
pid: $PID_C
stop_grace_period: 3s
stop_signal: SIGKILL
ports:
- target: $PUERTO_C
published: $PUERTO_PUBLICO_C
protocol: $PUERTO_PROTOCOLO_C
mode: $PUERTO_MODO_C
expose:
- $EXPUESTO_C
networks:
- Externa
security_opt:
- label:user:USER
- label:role:ROLE
backend:
build:
context: parallel_services/django
dockerfile: Dockerfile
deploy:
mode: replicated
replicas: 1
resources:
limits:
cpus: "2"
memory: 2G
reservations:
cpus: "1"
memory: 1G
#command: python3 manage.py runserver 0.0.0.0:8000
env_file:
- .env
dns: $DNS_B
dns_search:
- $DNS_SERVER1_B
- $DNS_SERVER2_B
volumes:
- type: volume
source: $VOLUMEN_SOURCE_M
target: $VOLUMEN_TARGET_M
volume:
nocopy: true
- type: volume
source: $VOLUMEN_SOURCE_STA
target: $VOLUMEN_TARGET_STA
volume:
nocopy: true
- type: volume
source: $VOLUMEN_SOURCE_MED
target: $VOLUMEN_TARGET_MED
volume:
nocopy: true
container_name: $NOMBRE_CONTAINER_B
pid: $PID_B
stop_grace_period: 3s
stop_signal: SIGKILL
ports:
- target: $PUERTO_B
published: $PUERTO_PUBLICO_B
protocol: $PUERTO_PROTOCOLO_B
mode: $PUERTO_MODO_B
expose:
- $EXPUESTO_B
depends_on:
- data_int
- data_ext
#- data_es
#- data_ki
#- data_lo
#- data_pb
networks:
- Subfija
security_opt:
- label:user:USER
- label:role:ROLE
bind:
build:
context: parallel_services/nginx
dockerfile: Dockerfile
deploy:
mode: replicated
replicas: 1
resources:
limits:
cpus: "1"
memory: 100M
reservations:
cpus: "1"
memory: 10M
env_file:
- .env
dns: $DNS
dns_search:
- $DNS_SERVER1
- $DNS_SERVER2
volumes:
- type: volume
source: $VOLUMEN_SOURCE_STA
target: $VOLUMEN_TARGET_STA
volume:
nocopy: true
- type: volume
source: $VOLUMEN_SOURCE_MED
target: $VOLUMEN_TARGET_MED
volume:
nocopy: true
container_name: $NOMBRE_CONTAINER
pid: $PID
stop_grace_period: 3s
stop_signal: SIGKILL
ports:
- target: $PUERTO
published: $PUERTO_PUBLICO
protocol: $PUERTO_PROTOCOLO
mode: $PUERTO_MODO
expose:
- $EXPUESTO
depends_on:
- backend
networks:
- Reverse
security_opt:
- label:user:USER
- label:role:ROLE
volumes:
data_es:
#name: "data_es"
#driver: "local"
data_lo:
#name: "data_lo"
#driver: "local"
data_ki:
#name: "data_ki"
#driver: "local"
data_pb:
#name: "data_pb"
#driver: "local"
static:
#name: "static"
#driver: "local"
media:
#name: "media"
#driver: "local"
data:
#name: "data"
#driver: "local"
data_ext:
#name: "data_ext"
#driver: "local"
data_int:
#name: "data_int"
#driver: "local"
networks:
Subfija:
driver: bridge
Reverse:
driver: bridge
Externa:
driver: bridge
Interna:
driver: bridge
default:
driver: host
''' >> "$pt"/Docker-compose.yml
### Django Node ###############################################
echo '''
FROM python:3.7
ENV PYTHONBUFFERED 1
ARG PYTHONPATH=/opt/servicios/backend/django/src
RUN mkdir -p /opt/servicios/backend/django/src
COPY requires/ /opt/servicios/backend/django/src
WORKDIR /opt/servicios/backend/django/src
RUN ls /opt/servicios/backend/django/src
RUN pip install -r requiriments.txt
EXPOSE 8000
HEALTHCHECK --interval=33s --timeout=12s --start-period=3s --retries=3 CMD [ "curl", "-f", "http://localhost:8000" ]
''' >> "$pt"/parallel_services/django/Dockerfile
### Elastic Node ##############################################
echo '''
FROM docker.elastic.co/elasticsearch/elasticsearch:6.4.2-alpine
''' >> "$pt"/parallel_services/elasticsearch/Dockerfile
### Elastic Node Config ###############################################
echo '''
cluster.name: "pivot_cluster_es"
network.host: 0.0.0.0
transport.host: 0.0.0.0
enforce.bootstrap.checks: true
bootstrap.memory_lock: true
discovery.zen.minimum_master_nodes: 1
discovery.type: single-node
discovery.zen.ping.unicast.hosts: "0.0.0.0:9200"
#-XX:PermSize=256m -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=64m -XX:+UseG1GC -XX:+CMSClassUnloadingEnabled -XX:+PrintHeapAtGC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps"
#xpack.license.self_generated.type: trial
#xpack.security.enabled: true
#xpack.security.http.ssl.enabled: true
#xpack.security.http.ssl.verification_mode: certificate
#xpack.security.http.ssl.key: certs/elasticsearch/elasticsearch.key
#xpack.security.http.ssl.certificate: certs/elasticsearch/elasticsearch.crt
#xpack.security.http.ssl.certificate_authorities: [ "certs/ca/ca.crt" ]
#xpack.security.transport.ssl.enabled: true
#xpack.security.transport.ssl.key: certs/elasticsearch/elasticsearch.key
#xpack.security.transport.ssl.certificate: certs/elasticsearch/elasticsearch.crt
#xpack.security.transport.ssl.certificate_authorities: [ "certs/ca/ca.crt" ]
''' >> "$pt"/parallel_services/elasticsearch/config/elastic.yml
### Kibana Node ########################################
echo '''
FROM docker.elastic.co/kibana/kibana:6.4.2-alpine
''' >> "$pt"/parallel_services/kibana/Dockerfile
### Kibana Node Config ########################################
echo '''
cluster.name: "pivot_cluster_ki"
server.host: 0.0.0.0
server.port: 5601
elasticsearch.url: "http//0.0.0.0:9200"
enforce.bootstrap.checks: true
bootstrap.memory_lock: true
discovery.zen.minimum_master_nodes: 1
discovery.type: single-node
discovery.zen.ping.unicast.hosts: "0.0.0.0:5601"
xpack.monitoring.ui.container.elasticsearch.enabled: true
''' >> "$pt"/parallel_services/kibana/config/kibana.yml
### Logstash Node ########################################
echo '''
FROM docker.elastic.co/logstash/logstash:6.4.2-alpine
COPY pipeline/ /usr/share/logstash/pipeline
''' >> "$pt"/parallel_services/logstash/Dockerfile
### Logstash Node Config ##########################################
echo '''
cluster.name: "pivot_cluster_lo"
http.host: 0.0.0.0
xpack.monitoring.elasticsearch.url: "http://0.0.0.0:9200"
enforce.bootstrap.checks: true
bootstrap.memory_lock: true
discovery.zen.minimum_master_nodes: 1
discovery.type: single-node
discovery.zen.ping.unicast.hosts: "0.0.0.0:5000"
''' >> "$pt"/parallel_services/logstash/config/logstash.yml
echo '''
input {
tcp {
port => 5000
}
udp {
port => 12121
}
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
}
}
''' >> "$pt"/parallel_services/logstash/config/logstash.conf
### Packetbeat Node ########################################
echo '''
FROM docker.elastic.co/beats/packetbeat:6.2.4-alpine
''' >> "$pt"/parallel_services/packetbeat/Dockerfile
### Packetbeat Node Config ############################################
echo '''
packetbeat.interfaces.device: any
packetbeat.interfaces.snaplen: 1514
packetbeat.interfaces.type: af_packet
packetbeat.interfaces.buffer_size_mb: 30
packetbeat.interfaces.bpf_filter: "port 88"
''' >> "$pt"/parallel_services/packetbeat/config/packetbeat.yml
### Nginx Node ########################################
echo '''
FROM nginx:alpine
ADD config/nginx.conf /etc/nginx/nginx.conf
RUN mkdir -p /var/log/nginx
CMD ["nginx", "-g", "daemon off;"]
''' >> "$pt"/parallel_services/nginx/Dockerfile
### Nginx Node Config ########################################
echo '''
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 999;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local]'
'"$request" $status $body_bytes_sent'
'"$http_referer" "$http_user_agent"'
'"$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
upstream bind_io {
server 0.0.0.0:8000 fail_timeout=0;
}
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name localhost;
charset utf-8;
root /usr/share/nginx/html;
index index.html;
location / {
try_files $uri @app;
}
location /static/ {
autoindex on;
alias /opt/servicios/backend/django/static;
}
location /media/ {
autoindex on;
alias /opt/servicios/backend/django/media;
}
location @app {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# HTTPS
# proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://bind_io;
}
error_page 500 502 503 504 /500.html;
location = /500.html {
root /home/<user>/static;
}
}
}
''' >> "$pt"/parallel_services/nginx/config/nginx.conf
### Postgres Node ########################################
echo '''
FROM postgres:alpine
ADD config/db.sql /docker-entrypoint-initdb.d/Db_init.sql
RUN ls /docker-entrypoint-initdb.d/
RUN mkdir -p /opt/servicios/DB/data_ext
''' >> "$pt"/parallel_services/postgres/Dockerfile
### Postgres Node Config ##########################################
echo '''
CREATE DATABASE ch4n_DB;
CREATE USER ch4n_sen;
CREATE PASSWORD domo63n3515;
GRANT ALL PRIVILEGES ON DATABASE ch4n_DB TO ch4n_sen;
''' > "$pt"/parallel_services/postgres/config/db.sql
### Redis Node ########################################
echo '''
FROM redis:latest
COPY config/redis.conf /etc/redis/
RUN echo "vm.overcommit_memory=1" >> /etc/sysctl.conf
RUN cat /etc/sysctl.conf
CMD ["redis-server", "/etc/redis/redis.conf"]
''' >> "$pt"/parallel_services/redis/Dockerfile
### Redis Node Config ########################################
echo '''
bind 127.0.0.1
protected-mode yes
port 6379
tcp-backlog 911
tcp-keepalive 333
client-output-buffer-limit normal 16mb 4mb 60
client-query-buffer-limit 32mb
proto-max-bulk-len 64mb
hz 12
maxclients 999
maxmemory-policy noeviction
maxmemory-samples 6
supervised upstart
loglevel verbose
syslog-ident redis
databases 1
always-show-logo no
save 900 1
stop-writes-on-bgsave-error yes
dbfilename IO.rdb
dir ./
# Opcion experimental para desfragmentar la memoria y recuperar espacio no utilizado por medio de un flush
# activedefrag yes
# active-defrag-ignore-bytes 100mb
# active-defrag-threshold-lower 10
# active-defrag-threshold-upper 100
# active-defrag-cycle-min 25
# active-defrag-cycle-max 75
''' >> "$pt"/parallel_services/redis/config/redis.conf
### Requiriments Py Env ###############################################
echo '''
django
psycopg2
django-admin
django_markdownify
''' >> "$pt"/parallel_services/django/requires/requiriments.txt
### Print Architecture######################################
find . | sed -e "s/[^-][^\/]*\// |/g" -e "s/|\([^ ]\)/|-\1/"
############################################################
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment