Created
July 17, 2025 04:24
-
-
Save kenzo0107/1cb26379891ee8b14773609367d5c81e to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# 設定 | |
REGION="ap-northeast-1" # 必要に応じて変更してください | |
echo "=== Aurora PostgreSQL Cluster Discovery ===" | |
echo "Region: $REGION" | |
echo "" | |
# Aurora PostgreSQL クラスターを取得 | |
echo "Discovering Aurora PostgreSQL clusters..." | |
AURORA_CLUSTERS=$(aws rds describe-db-clusters \ | |
--region $REGION \ | |
--query 'DBClusters[?Engine==`aurora-postgresql`].{ClusterIdentifier:DBClusterIdentifier,Status:Status,Engine:Engine}' \ | |
--output table) | |
if [ $? -eq 0 ]; then | |
echo "Available Aurora PostgreSQL Clusters:" | |
echo "$AURORA_CLUSTERS" | |
echo "" | |
# クラスター一覧を配列で取得 | |
CLUSTER_ARRAY=($(aws rds describe-db-clusters \ | |
--region $REGION \ | |
--query 'DBClusters[?Engine==`aurora-postgresql`].DBClusterIdentifier' \ | |
--output text)) | |
if [ ${#CLUSTER_ARRAY[@]} -gt 0 ]; then | |
# 複数クラスターがある場合は選択画面を表示 | |
if [ ${#CLUSTER_ARRAY[@]} -gt 1 ]; then | |
echo "Multiple clusters found. Please select one:" | |
for i in "${!CLUSTER_ARRAY[@]}"; do | |
echo "$((i+1))) ${CLUSTER_ARRAY[$i]}" | |
done | |
echo "" | |
echo "Enter selection (1-${#CLUSTER_ARRAY[@]}) or press Enter for #1:" | |
read selection | |
if [ -z "$selection" ]; then | |
selection=1 | |
fi | |
# 選択の検証 | |
if [[ "$selection" =~ ^[0-9]+$ ]] && [ "$selection" -ge 1 ] && [ "$selection" -le ${#CLUSTER_ARRAY[@]} ]; then | |
DB_CLUSTER_IDENTIFIER=${CLUSTER_ARRAY[$((selection-1))]} | |
else | |
echo "ERROR: Invalid selection" | |
exit 1 | |
fi | |
else | |
# クラスターが1つの場合は自動選択 | |
DB_CLUSTER_IDENTIFIER=${CLUSTER_ARRAY[0]} | |
fi | |
echo "Selected cluster: $DB_CLUSTER_IDENTIFIER" | |
# クラスターのライターインスタンスを取得 | |
echo "Finding writer instance for cluster: $DB_CLUSTER_IDENTIFIER" | |
DB_WRITER_INSTANCE=$(aws rds describe-db-clusters \ | |
--db-cluster-identifier $DB_CLUSTER_IDENTIFIER \ | |
--region $REGION \ | |
--query 'DBClusters[0].DBClusterMembers[?IsClusterWriter==`true`].DBInstanceIdentifier' \ | |
--output text) | |
if [ -n "$DB_WRITER_INSTANCE" ] && [ "$DB_WRITER_INSTANCE" != "None" ]; then | |
echo "Writer instance: $DB_WRITER_INSTANCE" | |
# クラスター詳細情報を取得 | |
echo "" | |
echo "=== Cluster Details ===" | |
aws rds describe-db-clusters \ | |
--db-cluster-identifier $DB_CLUSTER_IDENTIFIER \ | |
--region $REGION \ | |
--query 'DBClusters[0].{ | |
ClusterIdentifier:DBClusterIdentifier, | |
Engine:Engine, | |
EngineVersion:EngineVersion, | |
Status:Status, | |
MultiAZ:MultiAZ, | |
Members:DBClusterMembers[].{ | |
Instance:DBInstanceIdentifier, | |
IsWriter:IsClusterWriter, | |
Status:DBInstanceStatus | |
} | |
}' \ | |
--output table | |
else | |
echo "ERROR: No writer instance found for cluster $DB_CLUSTER_IDENTIFIER" | |
exit 1 | |
fi | |
else | |
echo "ERROR: No Aurora PostgreSQL clusters found in region $REGION" | |
exit 1 | |
fi | |
else | |
echo "ERROR: Failed to retrieve cluster information" | |
exit 1 | |
fi | |
echo "" | |
echo "=== Starting CDC Throughput Analysis ===" | |
# 時刻設定 | |
START_TIME=$(date -u -v-7d +%Y-%m-%dT%H:%M:%S) | |
END_TIME=$(date -u +%Y-%m-%dT%H:%M:%S) | |
echo "Cluster: $DB_CLUSTER_IDENTIFIER" | |
echo "Writer Instance: $DB_WRITER_INSTANCE" | |
echo "Period: $START_TIME to $END_TIME" | |
echo "" | |
# メトリクス取得関数 | |
get_metrics() { | |
local identifier=$1 | |
local dimension_name=$2 | |
local metric_name=$3 | |
local stat_type=$4 | |
local output_file=$5 | |
echo "Retrieving $metric_name ($stat_type) for $identifier..." | |
aws cloudwatch get-metric-statistics \ | |
--namespace AWS/RDS \ | |
--metric-name $metric_name \ | |
--dimensions Name=$dimension_name,Value=$identifier \ | |
--start-time "$START_TIME" \ | |
--end-time "$END_TIME" \ | |
--period 3600 \ | |
--statistics $stat_type \ | |
--region $REGION \ | |
--output json > "$output_file" | |
if [ $? -ne 0 ]; then | |
echo " Warning: Failed to retrieve $metric_name for $identifier" | |
return 1 | |
fi | |
return 0 | |
} | |
# メトリクス取得実行 | |
echo "=== Retrieving Metrics ===" | |
# クラスターレベルのメトリクス | |
get_metrics "$DB_CLUSTER_IDENTIFIER" "DBClusterIdentifier" "WriteThroughput" "Maximum" "cluster_write_max.json" | |
get_metrics "$DB_CLUSTER_IDENTIFIER" "DBClusterIdentifier" "WriteThroughput" "Average" "cluster_write_avg.json" | |
get_metrics "$DB_CLUSTER_IDENTIFIER" "DBClusterIdentifier" "WriteIOPS" "Maximum" "cluster_iops_max.json" | |
get_metrics "$DB_CLUSTER_IDENTIFIER" "DBClusterIdentifier" "WriteIOPS" "Average" "cluster_iops_avg.json" | |
# ライターインスタンスのメトリクス | |
get_metrics "$DB_WRITER_INSTANCE" "DBInstanceIdentifier" "WriteThroughput" "Maximum" "writer_write_max.json" | |
get_metrics "$DB_WRITER_INSTANCE" "DBInstanceIdentifier" "WriteThroughput" "Average" "writer_write_avg.json" | |
get_metrics "$DB_WRITER_INSTANCE" "DBInstanceIdentifier" "WriteIOPS" "Maximum" "writer_iops_max.json" | |
get_metrics "$DB_WRITER_INSTANCE" "DBInstanceIdentifier" "WriteIOPS" "Average" "writer_iops_avg.json" | |
# Aurora PostgreSQL 固有メトリクス(WALディスク使用量) | |
get_metrics "$DB_WRITER_INSTANCE" "DBInstanceIdentifier" "TransactionLogsDiskUsage" "Maximum" "writer_wal_disk.json" | |
echo "" | |
# メトリクス解析関数 | |
extract_value() { | |
local file=$1 | |
local stat_type=$2 | |
if [ ! -f "$file" ]; then | |
echo "0" | |
return | |
fi | |
# ファイルサイズチェック | |
if [ ! -s "$file" ]; then | |
echo "0" | |
return | |
fi | |
if command -v jq &> /dev/null; then | |
if [ "$stat_type" = "Maximum" ]; then | |
local value=$(cat "$file" | jq -r '.Datapoints[].Maximum' 2>/dev/null | grep -v null | sort -n | tail -1) | |
else | |
local value=$(cat "$file" | jq -r '.Datapoints[].Average' 2>/dev/null | grep -v null | awk '{sum+=$1; count++} END {if(count>0) print sum/count; else print 0}') | |
fi | |
else | |
if [ "$stat_type" = "Maximum" ]; then | |
local value=$(grep -o "\"Maximum\": [0-9.]*" "$file" | cut -d' ' -f2 | sort -n | tail -1) | |
else | |
local value=$(grep -o "\"Average\": [0-9.]*" "$file" | cut -d' ' -f2 | awk '{sum+=$1; count++} END {if(count>0) print sum/count; else print 0}') | |
fi | |
fi | |
echo "${value:-0}" | |
} | |
# データ抽出 | |
echo "=== Metric Analysis ===" | |
echo "" | |
# クラスターメトリクス | |
CLUSTER_WRITE_MAX=$(extract_value "cluster_write_max.json" "Maximum") | |
CLUSTER_WRITE_AVG=$(extract_value "cluster_write_avg.json" "Average") | |
CLUSTER_IOPS_MAX=$(extract_value "cluster_iops_max.json" "Maximum") | |
CLUSTER_IOPS_AVG=$(extract_value "cluster_iops_avg.json" "Average") | |
# ライターメトリクス | |
WRITER_WRITE_MAX=$(extract_value "writer_write_max.json" "Maximum") | |
WRITER_WRITE_AVG=$(extract_value "writer_write_avg.json" "Average") | |
WRITER_IOPS_MAX=$(extract_value "writer_iops_max.json" "Maximum") | |
WRITER_IOPS_AVG=$(extract_value "writer_iops_avg.json" "Average") | |
WRITER_WAL_DISK=$(extract_value "writer_wal_disk.json" "Maximum") | |
# 値の検証とフォールバック | |
validate_value() { | |
local value=$1 | |
if [ -z "$value" ] || [ "$value" = "null" ] || [ "$value" = "" ]; then | |
echo "0" | |
else | |
echo "$value" | |
fi | |
} | |
CLUSTER_WRITE_MAX=$(validate_value "$CLUSTER_WRITE_MAX") | |
CLUSTER_WRITE_AVG=$(validate_value "$CLUSTER_WRITE_AVG") | |
WRITER_WRITE_MAX=$(validate_value "$WRITER_WRITE_MAX") | |
WRITER_WRITE_AVG=$(validate_value "$WRITER_WRITE_AVG") | |
# 結果表示 | |
echo "Cluster-Level Metrics:" | |
echo " WriteThroughput - Maximum: $CLUSTER_WRITE_MAX bytes/sec" | |
echo " WriteThroughput - Average: $CLUSTER_WRITE_AVG bytes/sec" | |
echo " WriteIOPS - Maximum: $CLUSTER_IOPS_MAX ops/sec" | |
echo " WriteIOPS - Average: $CLUSTER_IOPS_AVG ops/sec" | |
echo "" | |
echo "Writer Instance Metrics:" | |
echo " WriteThroughput - Maximum: $WRITER_WRITE_MAX bytes/sec" | |
echo " WriteThroughput - Average: $WRITER_WRITE_AVG bytes/sec" | |
echo " WriteIOPS - Maximum: $WRITER_IOPS_MAX ops/sec" | |
echo " WriteIOPS - Average: $WRITER_IOPS_AVG ops/sec" | |
echo " TransactionLogsDiskUsage - Maximum: $WRITER_WAL_DISK bytes" | |
echo "" | |
# CDC 計算関数 | |
calculate_cdc() { | |
local throughput=$1 | |
local label=$2 | |
if [ -n "$throughput" ] && [ "$throughput" != "0" ] && [ "$throughput" != "" ]; then | |
# Aurora PostgreSQL CDC 係数: 0.8 (WAL overhead consideración) | |
local cdc_bytes=$(echo "$throughput" | awk '{printf "%.0f", $1 * 0.8}') | |
local cdc_mbps=$(echo "$cdc_bytes" | awk '{printf "%.3f", $1 * 8 / 1000000}') | |
printf " %-20s: %s bytes/sec (%.3f Mbps)\n" "$label" "$cdc_bytes" "$cdc_mbps" | |
else | |
printf " %-20s: No data available\n" "$label" | |
fi | |
} | |
# CDC 推定値 | |
echo "=== CDC Throughput Estimates ===" | |
echo "" | |
echo "Based on Cluster WriteThroughput:" | |
calculate_cdc "$CLUSTER_WRITE_MAX" "Peak CDC" | |
calculate_cdc "$CLUSTER_WRITE_AVG" "Average CDC" | |
echo "" | |
echo "Based on Writer Instance WriteThroughput:" | |
calculate_cdc "$WRITER_WRITE_MAX" "Peak CDC" | |
calculate_cdc "$WRITER_WRITE_AVG" "Average CDC" | |
echo "" | |
echo "=== Data Point Validation ===" | |
for file in cluster_write_max.json writer_write_max.json; do | |
if [ -f "$file" ]; then | |
if command -v jq &> /dev/null; then | |
datapoints=$(cat "$file" | jq '.Datapoints | length' 2>/dev/null) | |
else | |
datapoints=$(grep -c '"Timestamp"' "$file" 2>/dev/null) | |
fi | |
echo "$file: ${datapoints:-0} data points" | |
else | |
echo "$file: File not found" | |
fi | |
done | |
echo "" | |
echo "=== PostgreSQL-Specific Recommendations ===" | |
echo "1. Use cluster-level metrics for overall CDC capacity planning" | |
echo "2. Peak values should guide maximum Confluent Cloud throughput settings" | |
echo "3. Average values should guide standard operational capacity" | |
echo "4. Consider 20-50% safety margin for production deployment" | |
echo "5. Monitor TransactionLogsDiskUsage (WAL) for replication slot lag" | |
echo "6. Ensure logical replication parameters are properly configured:" | |
echo " - rds.logical_replication = 1" | |
echo " - rds.logical_wal_cache (appropriate size for workload)" | |
echo "7. PostgreSQL CDC coefficient (0.8) accounts for WAL overhead" | |
# クリーンアップ | |
echo "" | |
echo "Cleaning up temporary files..." | |
rm -f cluster_*.json writer_*.json | |
echo "Analysis complete." |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment