Skip to content

Instantly share code, notes, and snippets.

@laughingman7743
Last active August 12, 2021 13:46
Show Gist options
  • Save laughingman7743/1b7622278e8ba0493a9ce2ad904d9677 to your computer and use it in GitHub Desktop.
Save laughingman7743/1b7622278e8ba0493a9ce2ad904d9677 to your computer and use it in GitHub Desktop.
Terraform wrapper python
.idea
*.iml
*.tfstate
*.tfstate.backup
.terraform
*/*/*/__*.tf
!*/*/_template/__*.tf
Pipfile.lock
version: 0.2
env:
variables:
AWS_DEFAULT_REGION: "ap-northeast-1"
parameter-store:
TF_AWS_ACCESS_KEY_ID: "/path/to/access_key_id"
TF_AWS_SECRET_ACCESS_KEY: "/path/to/secret_access_key"
phases:
build:
commands:
- python bin/tf.py --notify aws ${CMD} --no-input --no-lock --no-color production ${ROLE}
version: 0.2
env:
variables:
AWS_DEFAULT_REGION: "ap-northeast-1"
parameter-store:
TF_AWS_ACCESS_KEY_ID: "/path/to/access_key_id"
TF_AWS_SECRET_ACCESS_KEY: "/path/to/secret_access_key"
phases:
build:
commands:
- python bin/tf.py --notify aws ${CMD} --no-input --no-lock --no-color staging ${ROLE}
FROM python:3-alpine
ENV TERRAFORM_VERSION="0.11.3"
ADD requirements.txt /tmp/requirements.txt
RUN apk --no-cache add wget ca-certificates unzip bash && \
wget -q -O /tmp/terraform.zip "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" && \
unzip /tmp/terraform.zip -d /usr/local/bin && \
apk del --purge wget unzip && \
pip install -r /tmp/requirements.txt && \
rm -rf /var/cache/apk/* /tmp/terraform.zip /tmp/requirements.txt
CMD ["/bin/bash"]
String getRole(comment) {
if (comment != null) {
comments = comment.split(/\\r\\n/)
for (String c : comments) {
if (c.startsWith('test-role:')) {
return c.replaceAll('test-role:', '')
}
}
}
return 'all'
}
void codeBuild(Map args) {
awsCodeBuild projectName: 'YOUR_PROJECT_NAME',
sourceControlType: 'jenkins',
sourceVersion: "${sha1}",
credentialsType: 'keys',
region: 'ap-northeast-1',
buildSpecFile: "${args.buildSpec}",
envVariables: "[" +
" {ENV, ${args.env}}," +
" {CMD, ${args.cmd}}," +
" {ROLE, ${args.role}}," +
" {SLACK_CHANNEL, ${args.channel}}," +
" {SNS_TOPIC_ARN, ${env.SNS_TOPIC_ARN}}," +
" {PULL_REQUEST_TITLE, ${ghprbPullTitle}}," +
" {PULL_REQUEST_LINK, ${ghprbPullLink}}," +
" {PULL_REQUEST_AUTHOR, ${ghprbPullAuthorLogin}}," +
" {PULL_REQUEST_BRANCH, ${ghprbSourceBranch}}," +
" {BUILD_URL, ${BUILD_URL}}" +
" ]"
}
pipeline {
agent any
environment {
ROLE = getRole "${ghprbPullLongDescription}"
}
stages {
stage('Printenv') {
steps {
sh 'printenv'
}
}
stage('Clean workspace') {
steps {
deleteDir()
}
}
stage('Checkout') {
steps {
checkout scm: [
$class : 'GitSCM',
branches : [[name: "${sha1}"]],
userRemoteConfigs: [
[
url: "${env.GIT_URL}",
credentialsId: "YOUR_CREDENTIALS_ID",
refspec: "+refs/pull/*:refs/remotes/origin/pr/*"
]
]
]
}
}
stage('CodeBuild') {
parallel {
stage('CodeBuild staging') {
steps {
codeBuild buildSpec: 'buildspec.stg.yml',
env: 'staging',
cmd: 'plan --detailed-exitcode',
role: "${ROLE}",
channel: "${env.STG_SLACK_CHANNEL}"
}
}
stage('CodeBuild production') {
steps {
codeBuild buildSpec: 'buildspec.prd.yml',
env: 'production',
cmd: 'plan --detailed-exitcode',
role: "${ROLE}",
channel: "${env.PRD_SLACK_CHANNEL}"
}
}
}
}
}
post {
failure {
slackSend(color: 'danger', message: "${JOB_NAME}\n${BUILD_URL}", channel: "${env.STG_SLACK_CHANNEL}")
slackSend(color: 'danger', message: "${JOB_NAME}\n${BUILD_URL}", channel: "${env.PRD_SLACK_CHANNEL}")
}
}
options {
timestamps()
}
}
[[source]]
url = "https://pypi.python.org/simple"
verify_ssl = true
name = "pypi"
[packages]
awscli = "==1.14.35"
"boto3" = "==1.5.25"
click = "==6.7"
simplejson = "==3.13.2"
configobj = "==5.0.6"
[dev-packages]
[requires]
python_version = "3.6"
awscli==1.14.35
boto3==1.5.25
click==6.7
simplejson==3.13.2
configobj==5.0.6
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import io
import logging
import os
import re
import subprocess
import sys
from gzip import GzipFile
from io import BytesIO
import click
import shutil
import simplejson as json
from boto3.session import Session
from concurrent import futures
from configobj import ConfigObj
_logger = logging.getLogger(__name__)
_logger.addHandler(logging.StreamHandler(sys.stdout))
_logger.setLevel(logging.INFO)
ENV = os.environ
AWS_REGION = os.getenv('AWS_DEFAULT_REGION', 'ap-northeast-1')
CONTEXT_SETTINGS = dict(
help_option_names=['-h', '--help'],
max_content_width=120,
)
BASE_PATH = os.path.abspath(os.path.join(os.path.join(__file__, os.pardir), os.pardir))
TEMPLATE_DIR = [
'{provider}/_template/__*.tf',
'{provider}/{category}/_template/__*.tf',
]
TFVARS_FILE = [
'{provider}/_env_tfvars/all.tfvars',
'{provider}/_env_tfvars/{env}.tfvars',
'{provider}/{category}/_category_tfvars/category.tfvars',
'{provider}/{category}/_category_tfvars/{env}_category.tfvars',
'{provider}/{category}/{role}/_env_tfvars/{env}.tfvars',
]
S3_STATE_FILE = 'state/{env}/{provider}/{category}/{role}.tfstate'
APPLY_COMPLETE_PATTERN = re.compile(
r'Apply complete! Resources: (?P<added>\d+) added, (?P<changed>\d+) changed, (?P<destroyed>\d+) destroyed\.',
re.MULTILINE)
UNICODE_CODE_POINT_PATTERN = re.compile(r'^&#(?P<code>[\d]+)$')
PROVIDER_LIST = ['aws', 'gcp', 'datadog']
def unquote(str_):
unquoted = []
code_points = None
for c in str_:
if c == '&':
if code_points:
unquoted.append(''.join(code_points))
code_points = [c]
elif c == '#':
if code_points:
code_points.append(c)
else:
unquoted.append(c)
elif c == ';':
if code_points:
code_point = ''.join(code_points)
m = UNICODE_CODE_POINT_PATTERN.match(code_point)
if m:
unquoted.append(chr(int(''.join(code_points[2:]))))
code_points = None
else:
unquoted.append(code_point)
else:
unquoted.append(c)
else:
if code_points:
code_points.append(c)
else:
unquoted.append(c)
if code_points:
unquoted.append(''.join(code_points))
return ''.join(unquoted)
def upload(text, profile=None):
session = Session(profile_name=profile, region_name=AWS_REGION)
s3_client = session.client('s3')
s3_stdout_bucket = os.environ['S3_STDOUT_BUCKET']
s3_stdout_file = 'stdout/{build_id}.gz'
stream = BytesIO()
stream.seek(0)
with GzipFile(fileobj=stream, mode='wb') as gz:
gz.write(text.encode('utf-8'))
key = s3_stdout_file.format(build_id=os.environ['CODEBUILD_BUILD_ID'].replace(':', '_'))
s3_client.put_object(Bucket=s3_stdout_bucket,
Key=key,
Body=stream.getvalue())
return s3_stdout_bucket, key
def _publish(client, sns_topic_arn, message, subject='Terraform CI'):
response = client.publish(
TopicArn=sns_topic_arn,
Message=json.dumps(message, ensure_ascii=False, encoding='utf-8'),
Subject=subject,
)
_logger.info(response)
def publish_slack(client, color, pretext, text, s3_file, build_url,
pull_request_title, pull_request_link, pull_request_author, pull_request_branch):
icon_emoji = os.getenv('SLACK_ICON_EMOJI', ':terraform:')
user_name = os.getenv('SLACK_USER_NAME', 'Terraform CI')
channel = os.environ['SLACK_CHANNEL']
sns_topic_arn = os.environ['SLACK_SNS_TOPIC_ARN']
message = {
'channel': channel,
'username': user_name,
'icon_emoji': icon_emoji,
}
fields = [{'title': 'Build URL', 'value': build_url}]
if pull_request_branch:
fields.append({'title': 'Branch', 'value': pull_request_branch})
if pull_request_author:
fields.append({'title': 'Author', 'value': pull_request_author})
if s3_file:
message.update({
's3_file': s3_file,
})
message.update({
'attachments': json.dumps([{
'color': color,
'pretext': pretext,
'title': unquote(pull_request_title),
'title_link': pull_request_link,
'text': text,
'fields': fields,
'mrkdwn_in': [
'pretext', 'title', 'text', 'fields'
]
}], ensure_ascii=False, encoding='utf-8').replace('\\\\n', '\n')
})
_logger.info(message)
_publish(client, sns_topic_arn, message)
def publish_github(client, env, category, role, text, s3_file, pull_request_number):
git_url = os.environ['GIT_URL']
sns_topic_arn = os.environ['GITHUB_SNS_TOPIC_ARN']
message = {
'body': '### Env\n*{env}*\n### Role\n*{category}/{role}*\n### Plan\n{text}'.format(
env=env, category=category, role=role, text=text),
'number': pull_request_number,
'repo': git_url.split('/')[-1].split('.')[0]
}
if s3_file:
message.update({
's3_file': s3_file,
})
_logger.info(message)
_publish(client, sns_topic_arn, message)
def publish(attachments, profile=None):
session = Session(profile_name=profile, region_name=AWS_REGION)
client = session.client('sns')
pull_request_title = os.environ['PULL_REQUEST_TITLE']
pull_request_link = os.environ['PULL_REQUEST_LINK']
pull_request_author = os.getenv('PULL_REQUEST_AUTHOR', None)
pull_request_branch = os.getenv('PULL_REQUEST_BRANCH', None)
pull_request_number = os.getenv('PULL_REQUEST_NUMBER', None)
build_url = os.environ['BUILD_URL']
# https://twitter.com/slackhq/status/754026699621330944
slack_character_limit = int(os.getenv('SLACK_CHARACTER_LIMIT', 8000))
for env, category, role, color, pretext, text in attachments:
s3_file = None
if len(text) > slack_character_limit:
bucket, key = upload(text)
s3_file = {
's3_bucket': bucket,
's3_key': key,
'title': build_url,
}
text = ''
publish_slack(client, color, pretext, text, s3_file, build_url,
pull_request_title, pull_request_link, pull_request_author, pull_request_branch)
if pull_request_number:
publish_github(client, env, category, role, text, s3_file, int(pull_request_number))
def glob_roles(provider):
path = os.path.join(BASE_PATH, provider)
return [(h[0], h[1]) for h in [g.replace(path, '')[1:].split('/')
for g in glob.glob(os.path.join(path, '[!_]*', '[!_]*'))
if os.path.isdir(g)]]
def parse_roles(provider, role):
if role == 'all':
roles = glob_roles(provider)
else:
roles = [(s[0], s[1]) for s in [r.strip().split('/') for r in role.split(':')
if r.strip()]]
return roles
def read_ini(provider, env):
ini_file = os.path.join(BASE_PATH, provider, '_env_ini', 'tfenv_{0}.ini'.format(env))
if not os.path.exists(ini_file):
raise RuntimeError('INI FILE: {0} not found.'.format(ini_file))
config = ConfigObj(ini_file)
return config.get('S3_BUCKET'), config.get('S3_PROFILE')
def copy_template(provider, category, role):
for template in TEMPLATE_DIR:
path = os.path.join(BASE_PATH,
template.format(provider=provider, category=category, role=role))
for g in glob.glob(path):
_logger.debug('Copy template: {0}'.format(g))
shutil.copy2(g, os.path.join(
BASE_PATH, provider, category, role, os.path.basename(g)))
def remove_state_file(provider, category, role):
path = os.path.join(BASE_PATH, provider, category, role)
for g in glob.glob(os.path.join(path, '*.tfstate*')):
_logger.debug('Remove state file: {0}'.format(g))
os.remove(g)
shutil.rmtree(os.path.join(path, '.terraform'), ignore_errors=True)
def build_backend_configs(bucket, provider, profile, env, category, role):
key = S3_STATE_FILE.format(provider=provider, env=env, category=category, role=role)
backend_configs = ' '.join([
'-backend-config="bucket={0}"'.format(bucket),
'-backend-config="key={0}"'.format(key),
'-backend-config="region={0}"'.format(AWS_REGION),
'-backend-config="profile={0}"'.format(profile),
])
_logger.debug('Terraform backend configs: {0}'.format(backend_configs))
return backend_configs
def build_var_files(provider, env, category, role):
tfvars = []
for tfvar in TFVARS_FILE:
tfvar = tfvar.format(provider=provider, env=env, category=category, role=role)
tfvars.append('-var-file={0}'.format(os.path.join(BASE_PATH, tfvar)))
tfvars = ' '.join(tfvars)
_logger.debug('Terraform var files: {0}'.format(tfvars))
return tfvars
def build_options(**kwargs):
options = []
for k, v in kwargs.items():
if k == 'destroy':
if v:
options.append('-destroy')
elif k == 'detailed_exitcode':
if v:
options.append('-detailed-exitcode')
elif k == 'input':
if v is False:
options.append('-input=false')
elif k == 'lock':
if v is False:
options.append('-lock=false')
pass
elif k == 'lock_timeout':
if v and v > 0:
options.append('-lock-timeout={0}s'.format(v))
elif k == 'module_depth':
if v and v > -1:
options.append('-module-depth={0}'.format(v))
elif k == 'color':
if v is False:
options.append('-no-color')
elif k == 'parallelism':
if v:
options.append('-parallelism={0}'.format(v))
elif k == 'refresh':
if v is False:
options.append('-refresh=false')
elif k == 'var':
if v:
for var in v:
options.append("-var '{0}'".format(var))
elif k == 'auto_approve':
if v:
options.append('-auto-approve')
elif k == 'force':
if v:
options.append('-force')
elif k == 'upgrade':
if v:
options.append('-upgrade')
else:
raise RuntimeError('Unknown option: {0}'.format(k))
pass
options = ' '.join(options)
_logger.debug('Terraform options: {0}'.format(options))
return options
def _exec_terraform(cmd, cwd, notify):
_logger.debug('Terraform command: {0}'.format(cmd))
tf_env = ENV.copy()
if 'TF_AWS_ACCESS_KEY_ID' in ENV.keys():
tf_env['AWS_ACCESS_KEY_ID'] = ENV['TF_AWS_ACCESS_KEY_ID']
if 'TF_AWS_SECRET_ACCESS_KEY' in ENV.keys():
tf_env['AWS_SECRET_ACCESS_KEY'] = ENV['TF_AWS_SECRET_ACCESS_KEY']
if 'TF_AWS_SESSION_TOKEN' in ENV.keys():
tf_env['AWS_SESSION_TOKEN'] = ENV['TF_AWS_SESSION_TOKEN']
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
cwd=cwd, bufsize=-1, env=tf_env)
stdout = ''
with io.open(proc.stdout.fileno(), closefd=False) as stream:
for line in stream:
if not notify:
_logger.info(line.rstrip('\n'))
stdout += line
proc.wait()
return proc.returncode, stdout
def _init(ctx, category, role):
terraform = ctx.obj['terraform']
notify = ctx.obj['notify']
provider = ctx.obj['provider']
env = ctx.obj['env']
init_options = ctx.obj['init_options']
bucket = ctx.obj['bucket']
profile = ctx.obj['profile']
remove_state_file(provider, category, role)
backend_configs = build_backend_configs(bucket, provider, profile, env, category, role)
cmds = [terraform, 'init', init_options, backend_configs]
cwd = os.path.join(BASE_PATH, provider, category, role)
returncode, stdout = _exec_terraform(' '.join(cmds), cwd, notify)
if returncode != 0:
_logger.error(stdout)
raise RuntimeError('Terraform init failed.')
return returncode
def _plan(ctx, category, role):
notify = ctx.obj['notify']
provider = ctx.obj['provider']
env = ctx.obj['env']
options = ctx.obj['options']
copy_template(provider, category, role)
var_files = build_var_files(provider, env, category, role)
_init(ctx, category, role)
cmds = ['terraform', 'plan', options, var_files]
cwd = os.path.join(BASE_PATH, provider, category, role)
returncode, stdout = _exec_terraform(' '.join(cmds), cwd, notify)
return category, role, returncode, stdout
def _apply(ctx, category, role):
terraform = ctx.obj['terraform']
notify = ctx.obj['notify']
provider = ctx.obj['provider']
env = ctx.obj['env']
options = ctx.obj['options']
copy_template(provider, category, role)
var_files = build_var_files(provider, env, category, role)
_init(ctx, category, role)
cmds = [terraform, 'apply', options, var_files]
cwd = os.path.join(BASE_PATH, provider, category, role)
returncode, stdout = _exec_terraform(' '.join(cmds), cwd, notify)
return category, role, returncode, stdout
def _destroy(ctx, category, role):
notify = ctx.obj['notify']
provider = ctx.obj['provider']
env = ctx.obj['env']
options = ctx.obj['options']
copy_template(provider, category, role)
var_files = build_var_files(provider, env, category, role)
_init(ctx, category, role)
cmds = ['terraform', 'destroy', options, var_files]
cwd = os.path.join(BASE_PATH, provider, category, role)
returncode, stdout = _exec_terraform(' '.join(cmds), cwd, notify)
return category, role, returncode, stdout
def _show(ctx, category, role):
terraform = ctx.obj['terraform']
notify = ctx.obj['notify']
provider = ctx.obj['provider']
options = ctx.obj['options']
copy_template(provider, category, role)
_init(ctx, category, role)
cmds = [terraform, 'show', options]
cwd = os.path.join(BASE_PATH, provider, category, role)
returncode, stdout = _exec_terraform(' '.join(cmds), cwd, notify)
return category, role, returncode, stdout
def _fmt(ctx, category, role):
terraform = ctx.obj['terraform']
notify = ctx.obj['notify']
provider = ctx.obj['provider']
cmds = [terraform, 'fmt']
cwd = os.path.join(BASE_PATH, provider, category, role)
returncode, stdout = _exec_terraform(' '.join(cmds), cwd, notify)
return category, role, returncode, stdout
@click.group(context_settings=CONTEXT_SETTINGS)
@click.argument('provider', type=click.Choice(PROVIDER_LIST))
@click.option('--terraform', default='terraform', required=False,
help='Terraform binary path. (e.g. /usr/local/bin/terraform)')
@click.option('--upgrade/--no-upgrade', required=False,
help="""Opt to upgrade modules and plugins as part of their respective installation
steps.""")
@click.option('--notify/--no-notify', default=False, required=False,
help='Slack notification.')
@click.option('--worker_count', '-w', type=int, required=False, default=8,
help="""The number of workers to be processed in parallel when multiple roles are
specified. (Only when Slack notification is enabled)""")
@click.option('--debug/--no-debug', default=False, required=False,
help='Set the logging level to DEBUG.')
@click.option('--tf-debug/--no-tf-debug', default=False, required=False,
help='Set the TF_LOG environment variable to TRACE.')
@click.pass_context
def cli(ctx, provider, terraform, upgrade, notify, worker_count, debug, tf_debug):
ctx.obj = dict()
ctx.obj['provider'] = provider
ctx.obj['terraform'] = terraform
ctx.obj['upgrade'] = upgrade
ctx.obj['notify'] = notify
ctx.obj['worker_count'] = worker_count
if debug:
_logger.setLevel(logging.DEBUG)
if tf_debug:
ENV['TF_LOG'] = 'TRACE'
@cli.command()
@click.argument('env', type=click.Choice(['staging', 'production']))
@click.argument('role', type=str)
@click.option('--destroy', is_flag=True, required=False,
help='If set, generates a plan to destroy all the known resources.')
@click.option('--detailed-exitcode', is_flag=True, required=False,
help="""Return a detailed exit code when the command exits. When provided, this
argument changes the exit codes and their meanings to provide more granular information about
what the resulting plan contains:
0 = Succeeded with empty diff (no changes)
1 = Error
2 = Succeeded with non-empty diff (changes present)""")
@click.option('--input/--no-input', default=True, required=False,
help='Ask for input for variables if not directly set.')
@click.option('--lock/--no-lock', default=True, required=False,
help='Lock the state file when locking is supported.')
@click.option('--lock-timeout', type=int, default=0, required=False,
help='Duration to retry a state lock.')
@click.option('--module-depth', type=int, default=-1, required=False,
help="""Specifies the depth of modules to show in the output. This does not affect
the plan itself, only the output shown. By default, this is -1, which will expand all.""")
@click.option('--color/--no-color', default=True, required=False,
help='Enables output with coloring.')
@click.option('--parallelism', type=int, default=10, required=False,
help='Limit the number of concurrent operation as Terraform walks the graph.')
@click.option('--refresh/--no-refresh', default=True, required=False,
help='Update the state prior to checking for differences.')
@click.option('--var', type=str, multiple=True, required=False,
help="""Set a variable in the Terraform configuration. This flag can be set multiple
times. Variable values are interpreted as HCL,
so list and map values can be specified via this flag.""")
@click.pass_context
def plan(ctx, env, role, destroy, detailed_exitcode, input, lock, lock_timeout,
module_depth, color, parallelism, refresh, var):
ctx.obj['env'] = env
provider = ctx.obj['provider']
ctx.obj['bucket'], ctx.obj['profile'] = read_ini(provider, env)
ctx.obj['init_options'] = build_options(input=input, lock=lock, lock_timeout=lock_timeout,
color=color, upgrade=ctx.obj['upgrade'])
ctx.obj['options'] = build_options(destroy=destroy, detailed_exitcode=detailed_exitcode,
input=input, lock=lock, lock_timeout=lock_timeout,
module_depth=module_depth, color=color,
parallelism=parallelism, refresh=refresh, var=var)
roles = parse_roles(provider, role)
if ctx.obj['notify']:
worker_count = ctx.obj['worker_count']
attachments = []
error = False
with futures.ThreadPoolExecutor(max_workers=worker_count) as executor:
fs = [executor.submit(_plan, ctx, c, r) for c, r in roles
if os.path.exists(os.path.join(BASE_PATH, provider, c, r))]
for f in futures.as_completed(fs):
category, role, returncode, stdout = f.result()
_logger.info('{0}/{1}'.format(category, role))
_logger.info(stdout)
pretext = 'Role: *{0}/{1}*'.format(category, role)
if returncode == 1:
color = 'danger'
message = '```{0}```'.format(stdout)
attachments.append((env, category, role, color, pretext, message.strip()))
error = True
elif returncode == 2:
color = 'warning'
message = '```{0}```'.format(stdout.split(
'Terraform will perform the following actions:')[1])
attachments.append((env, category, role, color, pretext, message.strip()))
elif returncode == 0:
# Suppress no changes.
# color = 'good'
# message = 'No changes. Infrastructure is up-to-date.'
pass
else:
raise RuntimeError('Unknown returncode: {0}'.format(returncode))
publish(attachments)
if error:
raise RuntimeError
else:
for c, r in roles:
_, _, returncode, _ = _plan(ctx, c, r)
if returncode not in [0, 2]:
raise RuntimeError('Returncode: {0}'.format(returncode))
@cli.command()
@click.argument('env', type=click.Choice(['staging', 'production']))
@click.argument('role', type=str)
@click.option('--input/--no-input', default=True, required=False,
help='Ask for input for variables if not directly set.')
@click.option('--lock/--no-lock', default=True, required=False,
help='Lock the state file when locking is supported.')
@click.option('--lock-timeout', type=int, default=0, required=False,
help='Duration to retry a state lock.')
@click.option('--auto-approve/--no-auto-approve', default=False, required=False,
help='Skip interactive approval of plan before applying.')
@click.option('--color/--no-color', default=True, required=False,
help='Enables output with coloring.')
@click.option('--parallelism', type=int, default=10, required=False,
help='Limit the number of concurrent operation as Terraform walks the graph.')
@click.option('--refresh/--no-refresh', default=True, required=False,
help="""Update the state for each resource prior to planning and applying. This has
no effect if a plan file is given directly to apply.""")
@click.option('--var', type=str, multiple=True, required=False,
help="""Set a variable in the Terraform configuration.
This flag can be set multiple times. Variable values are interpreted as HCL, so list and map values
can be specified via this flag.""")
@click.pass_context
def apply(ctx, env, role, input, lock, lock_timeout,
auto_approve, color, parallelism, refresh, var):
ctx.obj['env'] = env
provider = ctx.obj['provider']
ctx.obj['bucket'], ctx.obj['profile'] = read_ini(provider, env)
ctx.obj['init_options'] = build_options(input=input, lock=lock, lock_timeout=lock_timeout,
color=color, upgrade=ctx.obj['upgrade'])
ctx.obj['options'] = build_options(input=input, lock=lock, lock_timeout=lock_timeout,
auto_approve=auto_approve, color=color,
parallelism=parallelism, refresh=refresh, var=var)
roles = parse_roles(provider, role)
if ctx.obj['notify']:
worker_count = ctx.obj['worker_count']
attachments = []
error = False
with futures.ThreadPoolExecutor(max_workers=worker_count) as executor:
fs = [executor.submit(_apply, ctx, c, r) for c, r in roles
if os.path.exists(os.path.join(BASE_PATH, provider, c, r))]
for f in futures.as_completed(fs):
category, role, returncode, stdout = f.result()
_logger.info('{0}/{1}'.format(category, role))
_logger.info(stdout)
pretext = 'Role: *{0}/{1}*'.format(category, role)
message = '```{0}```'.format(stdout.strip())
if returncode != 0:
color = 'danger'
attachments.append((env, category, role, color, pretext, message))
error = True
else:
added, changed, destroyed = 0, 0, 0
for match in APPLY_COMPLETE_PATTERN.finditer(message):
added = int(match.group('added'))
changed = int(match.group('changed'))
destroyed = int(match.group('destroyed'))
break
if destroyed > 0:
color = 'danger'
attachments.append((env, category, role, color, pretext, message))
elif changed > 0:
color = 'warning'
attachments.append((env, category, role, color, pretext, message))
elif added > 0:
color = 'good'
attachments.append((env, category, role, color, pretext, message))
publish(attachments)
if error:
raise RuntimeError
else:
for c, r in roles:
_, _, returncode, _ = _apply(ctx, c, r)
if returncode != 0:
raise RuntimeError('Returncode: {0}'.format(returncode))
@cli.command()
@click.argument('env', type=click.Choice(['staging', 'production']))
@click.argument('role', type=str)
@click.option('--force/--no-force', default=False, required=False,
help='If --force is set, then the destroy confirmation will not be shown.')
@click.pass_context
def destroy(ctx, env, role, force):
ctx.obj['env'] = env
provider = ctx.obj['provider']
ctx.obj['bucket'], ctx.obj['profile'] = read_ini(provider, env)
ctx.obj['init_options'] = build_options(upgrade=ctx.obj['upgrade'])
ctx.obj['options'] = build_options(force=force)
roles = parse_roles(provider, role)
for c, r in roles:
_, _, returncode, _ = _destroy(ctx, c, r)
if returncode != 0:
raise RuntimeError('Returncode: {0}'.format(returncode))
@cli.command()
@click.argument('env', type=click.Choice(['staging', 'production']))
@click.argument('role', type=str)
@click.option('--module-depth', type=int, default=-1, required=False,
help="""Specifies the depth of modules to show in the output.
By default this is -1, which will expand all.""")
@click.option('--color/--no-color', default=True, required=False,
help='Disables output with coloring.')
@click.pass_context
def show(ctx, env, role, module_depth, color):
ctx.obj['env'] = env
provider = ctx.obj['provider']
ctx.obj['bucket'], ctx.obj['profile'] = read_ini(provider, env)
ctx.obj['init_options'] = build_options(color=color, upgrade=ctx.obj['upgrade'])
ctx.obj['options'] = build_options(module_depth=module_depth)
roles = parse_roles(provider, role)
for c, r in roles:
_, _, returncode, _ = _show(ctx, c, r)
if returncode != 0:
raise RuntimeError('Returncode: {0}'.format(returncode))
@cli.command()
@click.argument('role', type=str)
@click.pass_context
def fmt(ctx, role):
provider = ctx.obj['provider']
ctx.obj['init_options'] = build_options()
ctx.obj['options'] = build_options()
roles = parse_roles(provider, role)
for c, r in roles:
_, _, returncode, _ = _fmt(ctx, c, r)
if returncode != 0:
raise RuntimeError('Returncode: {0}'.format(returncode))
if __name__ == '__main__':
cli()
S3_BUCKET=YOUR_BUCKET_NAME
S3_PROFILE=YOUR_PROFILE_NAME
.
├── aws
│ ├── _env_ini
│ │ ├── tfenv_production.ini
│ │ └── tfenv_staging.ini
│ ├── _env_tfvars
│ │ ├── all.tfvars
│ │ ├── production.tfvars
│ │ └── staging.tfvars
│ ├── _modules
│ │ └── xxx
│ ├── _template
│ │ ├── __backend.tf
│ │ ├── __provider.tf
│ │ ├── __variables_all.tf
│ │ ├── __variables_env.tf
│ │ └── __version.tf
│ └── {CATEGORY}
│ │── _env_tvars
│ │ ├── category.tfvars
│ │ ├── production_category.tfvars
│ │ └── staging_category.tfvars
│ ├── _template
│ │ └── __variables_category.tf
│ └── {ROLE}
│ ├── _env_tvars
│ │ ├── production.tfvars
│ │ └── staging.tfvars
│ ├── _variables.tf
│ ├── _outputs.tf
│ └── xxx.tf
├-- bin
│ └── tf.py
├── gcp
│ ├── _env_ini
│ │ ├── tfenv_production.ini
│ │ └── tfenv_staging.ini
│ ├── _env_tfvars
│ │ ├── all.tfvars
│ │ ├── production.tfvars
│ │ └── staging.tfvars
│ ├── _modules
│ │ └── xxx
│ ├── _template
│ │ ├── __backend.tf
│ │ ├── __provider.tf
│ │ ├── __variables_all.tf
│ │ ├── __variables_env.tf
│ │ └── __version.tf
│ └── {CATEGORY}
│ │── _env_tvars
│ │ ├── category.tfvars
│ │ ├── production_category.tfvars
│ │ └── staging_category.tfvars
│ ├── _template
│ │ └── __variables_category.tf
│ └── {ROLE}
│ ├── _env_tvars
│ │ ├── production.tfvars
│ │ └── staging.tfvars
│ ├── _variables.tf
│ ├── _outputs.tf
│ └── xxx.tf
├── .gitignore
├── buildspec.stg.yml
├── buildspec.prd.yml
├── Jenkinsfile
├── Pipfile
└── README.md
Usage: tf.py [OPTIONS] PROVIDER COMMAND [ARGS]...
Options:
--terraform TEXT Terraform binary path. (e.g. /usr/local/bin/terraform)
--upgrade / --no-upgrade Opt to upgrade modules and plugins as part of their respective installation
steps.
--notify / --no-notify Slack notification.
-w, --worker_count INTEGER The number of workers to be processed in parallel when multiple roles are'
' specified.
(Only when Slack notification is enabled)
--debug / --no-debug Set the logging level to DEBUG.
--tf-debug / --no-tf-debug Set the TF_LOG environment variable to TRACE.
-h, --help Show this message and exit.
Commands:
apply
destroy
fmt
plan
show
Usage: tf.py apply [OPTIONS] ENV ROLE
Options:
--input / --no-input Ask for input for variables if not directly set.
--lock / --no-lock Lock the state file when locking is supported.
--lock-timeout INTEGER Duration to retry a state lock.
--auto-approve / --no-auto-approve
Skip interactive approval of plan before applying.
--color / --no-color Enables output with coloring.
--parallelism INTEGER Limit the number of concurrent operation as Terraform walks the graph.
--refresh / --no-refresh Update the state for each resource prior to planning and applying. This has
no
effect if a plan file is given directly to apply.
--var TEXT Set a variable in the Terraform configuration.
This flag can be set multiple times.
Variable values are interpreted as HCL, so list and map values
can be specified via
this flag.
-h, --help Show this message and exit.
Usage: tf.py destroy [OPTIONS] ENV ROLE
Options:
--force / --no-force If --force is set, then the destroy confirmation will not be shown.
-h, --help Show this message and exit.
Usage: tf.py fmt [OPTIONS] ROLE
Options:
-h, --help Show this message and exit.
Usage: tf.py plan [OPTIONS] ENV ROLE
Options:
--destroy If set, generates a plan to destroy all the known resources.
--detailed-exitcode Return a detailed exit code when the command exits. When provided, this
argument changes
the exit codes and their meanings to provide more granular information about
what the
resulting plan contains:
0 = Succeeded with empty diff (no changes)
1 = Error
2 =
Succeeded with non-empty diff (changes present)
--input / --no-input Ask for input for variables if not directly set.
--lock / --no-lock Lock the state file when locking is supported.
--lock-timeout INTEGER Duration to retry a state lock.
--module-depth INTEGER Specifies the depth of modules to show in the output. This does not affect
the plan
itself, only the output shown. By default, this is -1, which will expand all.
--color / --no-color Enables output with coloring.
--parallelism INTEGER Limit the number of concurrent operation as Terraform walks the graph.
--refresh / --no-refresh Update the state prior to checking for differences.
--var TEXT Set a variable in the Terraform configuration. This flag can be set multiple
times.
Variable values are interpreted as HCL,
so list and map values can be specified via this
flag.
-h, --help Show this message and exit.
Usage: tf.py show [OPTIONS] ENV ROLE
Options:
--module-depth INTEGER Specifies the depth of modules to show in the output.
By default this is -1, which will
expand all.
--color / --no-color Disables output with coloring.
-h, --help Show this message and exit.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment