Skip to content

Instantly share code, notes, and snippets.

@miadabdi
Created July 22, 2025 09:51
Show Gist options
  • Save miadabdi/4c24bff676bfa8cd1f57a4840065676e to your computer and use it in GitHub Desktop.
Save miadabdi/4c24bff676bfa8cd1f57a4840065676e to your computer and use it in GitHub Desktop.
Bcrypt Password Hashing Benchmark with Python
#!/usr/bin/env python3
"""
Comprehensive bcrypt performance benchmarking script.
Tests hashing and verification performance across different cost factors and password lengths.
"""
import argparse
import statistics
import sys
import time
from typing import Dict, List
import bcrypt
import matplotlib.pyplot as plt
import pandas as pd
class BcryptBenchmark:
def __init__(self):
self.results = {"hash": [], "verify": []}
def generate_password(self, length: int = 12) -> str:
"""Generate a test password of specified length."""
import random
import string
chars = string.ascii_letters + string.digits + "!@#$%^&*"
return "".join(random.choice(chars) for _ in range(length))
def benchmark_hashing(
self, password: str, rounds: int, iterations: int = 10
) -> Dict:
"""Benchmark password hashing performance."""
times = []
hashes = []
print(f"Benchmarking hashing with {rounds} rounds, {iterations} iterations...")
for i in range(iterations):
start_time = time.perf_counter()
salt = bcrypt.gensalt(rounds=rounds)
hash_result = bcrypt.hashpw(password.encode("utf-8"), salt)
end_time = time.perf_counter()
times.append(end_time - start_time)
hashes.append(hash_result)
if (i + 1) % max(1, iterations // 4) == 0:
print(f" Progress: {i + 1}/{iterations} completed")
return {
"rounds": rounds,
"iterations": iterations,
"times": times,
"avg_time": statistics.mean(times),
"min_time": min(times),
"max_time": max(times),
"std_dev": statistics.stdev(times) if len(times) > 1 else 0,
"hashes": hashes,
}
def benchmark_verification(
self, password: str, hashed_passwords: List[bytes], iterations: int = 100
) -> Dict:
"""Benchmark password verification performance."""
times = []
print(
f"Benchmarking verification with {len(hashed_passwords)} different hashes, {iterations} verifications each..."
)
total_verifications = len(hashed_passwords) * iterations
completed = 0
for hash_pw in hashed_passwords:
for i in range(iterations):
start_time = time.perf_counter()
result = bcrypt.checkpw(password.encode("utf-8"), hash_pw)
end_time = time.perf_counter()
times.append(end_time - start_time)
completed += 1
if completed % max(1, total_verifications // 10) == 0:
print(f" Progress: {completed}/{total_verifications} completed")
return {
"iterations": len(times),
"times": times,
"avg_time": statistics.mean(times),
"min_time": min(times),
"max_time": max(times),
"std_dev": statistics.stdev(times) if len(times) > 1 else 0,
}
def run_comprehensive_benchmark(
self,
password_lengths: List[int] = [8, 12, 16, 32],
cost_factors: List[int] = [10, 11, 12, 13, 14],
hash_iterations: int = 5,
verify_iterations: int = 50,
) -> Dict:
"""Run comprehensive benchmark across multiple parameters."""
print("=" * 60)
print("COMPREHENSIVE BCRYPT PERFORMANCE BENCHMARK")
print("=" * 60)
all_results = {"hash_results": [], "verify_results": [], "summary": {}}
# Test different password lengths and cost factors
for pwd_len in password_lengths:
password = self.generate_password(pwd_len)
print(f"\nTesting password length: {pwd_len} characters")
print(f"Test password: {'*' * pwd_len}")
pwd_hash_results = []
pwd_verify_results = []
for rounds in cost_factors:
print(f"\n--- Cost Factor: {rounds} ---")
# Benchmark hashing
hash_result = self.benchmark_hashing(password, rounds, hash_iterations)
hash_result["password_length"] = pwd_len
pwd_hash_results.append(hash_result)
all_results["hash_results"].append(hash_result)
# Benchmark verification
verify_result = self.benchmark_verification(
password, hash_result["hashes"], verify_iterations
)
verify_result["password_length"] = pwd_len
verify_result["rounds"] = rounds
pwd_verify_results.append(verify_result)
all_results["verify_results"].append(verify_result)
print(f" Hash avg: {hash_result['avg_time']:.4f}s")
print(f" Verify avg: {verify_result['avg_time']:.6f}s")
# Generate summary statistics
all_results["summary"] = self._generate_summary(all_results)
return all_results
def _generate_summary(self, results: Dict) -> Dict:
"""Generate summary statistics from benchmark results."""
summary = {
"hash_performance": {},
"verify_performance": {},
"recommendations": [],
}
# Hash performance by cost factor
for rounds in set(r["rounds"] for r in results["hash_results"]):
round_times = [
r["avg_time"] for r in results["hash_results"] if r["rounds"] == rounds
]
summary["hash_performance"][rounds] = {
"avg_time": statistics.mean(round_times),
"min_time": min(round_times),
"max_time": max(round_times),
}
# Verify performance by cost factor
for rounds in set(r["rounds"] for r in results["verify_results"]):
round_times = [
r["avg_time"]
for r in results["verify_results"]
if r["rounds"] == rounds
]
summary["verify_performance"][rounds] = {
"avg_time": statistics.mean(round_times),
"min_time": min(round_times),
"max_time": max(round_times),
}
# Generate recommendations
fastest_hash = min(
summary["hash_performance"].items(), key=lambda x: x[1]["avg_time"]
)
slowest_hash = max(
summary["hash_performance"].items(), key=lambda x: x[1]["avg_time"]
)
summary["recommendations"] = [
f"Fastest hashing: Cost factor {fastest_hash[0]} (~{fastest_hash[1]['avg_time']:.3f}s)",
f"Slowest hashing: Cost factor {slowest_hash[0]} (~{slowest_hash[1]['avg_time']:.3f}s)",
"For web apps: Consider cost factor 12 (balance of security/performance)",
"For high-security: Consider cost factor 13-14 (higher security, slower)",
]
return summary
def print_detailed_results(self, results: Dict):
"""Print detailed benchmark results."""
print("\n" + "=" * 60)
print("DETAILED RESULTS")
print("=" * 60)
print("\nπŸ“Š HASHING PERFORMANCE")
print("-" * 40)
for result in results["hash_results"]:
print(
f"Password Length: {result['password_length']}, Cost Factor: {result['rounds']}"
)
print(f" Average: {result['avg_time']:.4f}s")
print(f" Range: {result['min_time']:.4f}s - {result['max_time']:.4f}s")
print(f" Std Dev: {result['std_dev']:.4f}s")
print()
print("\nπŸ” VERIFICATION PERFORMANCE")
print("-" * 40)
for result in results["verify_results"]:
print(
f"Password Length: {result['password_length']}, Cost Factor: {result['rounds']}"
)
print(f" Average: {result['avg_time']:.6f}s")
print(f" Range: {result['min_time']:.6f}s - {result['max_time']:.6f}s")
print(f" Std Dev: {result['std_dev']:.6f}s")
print()
print("\nπŸ“ˆ SUMMARY & RECOMMENDATIONS")
print("-" * 40)
for rec in results["summary"]["recommendations"]:
print(f"β€’ {rec}")
def save_results_to_csv(
self, results: Dict, filename: str = "bcrypt_benchmark_results.csv"
):
"""Save results to CSV file."""
try:
# Prepare hash results
hash_df = pd.DataFrame(
[
{
"operation": "hash",
"password_length": r["password_length"],
"cost_factor": r["rounds"],
"avg_time": r["avg_time"],
"min_time": r["min_time"],
"max_time": r["max_time"],
"std_dev": r["std_dev"],
"iterations": r["iterations"],
}
for r in results["hash_results"]
]
)
# Prepare verify results
verify_df = pd.DataFrame(
[
{
"operation": "verify",
"password_length": r["password_length"],
"cost_factor": r["rounds"],
"avg_time": r["avg_time"],
"min_time": r["min_time"],
"max_time": r["max_time"],
"std_dev": r["std_dev"],
"iterations": r["iterations"],
}
for r in results["verify_results"]
]
)
# Combine and save
combined_df = pd.concat([hash_df, verify_df], ignore_index=True)
combined_df.to_csv(filename, index=False)
print(f"\nπŸ’Ύ Results saved to {filename}")
except ImportError:
print("\n⚠️ pandas not available, skipping CSV export")
def plot_results(self, results: Dict, save_plot: bool = True):
"""Generate performance visualization plots."""
try:
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
fig.suptitle(
"Bcrypt Performance Benchmark Results", fontsize=16, fontweight="bold"
)
# Plot 1: Hash time by cost factor
cost_factors = sorted(set(r["rounds"] for r in results["hash_results"]))
hash_times_by_cost = []
for cf in cost_factors:
times = [
r["avg_time"] for r in results["hash_results"] if r["rounds"] == cf
]
hash_times_by_cost.append(statistics.mean(times))
ax1.plot(cost_factors, hash_times_by_cost, "bo-", linewidth=2, markersize=8)
ax1.set_xlabel("Cost Factor")
ax1.set_ylabel("Average Hash Time (seconds)")
ax1.set_title("Hash Performance vs Cost Factor")
ax1.grid(True, alpha=0.3)
ax1.set_yscale("log")
# Plot 2: Verify time by cost factor
verify_times_by_cost = []
for cf in cost_factors:
times = [
r["avg_time"]
for r in results["verify_results"]
if r["rounds"] == cf
]
verify_times_by_cost.append(statistics.mean(times))
ax2.plot(
cost_factors, verify_times_by_cost, "ro-", linewidth=2, markersize=8
)
ax2.set_xlabel("Cost Factor")
ax2.set_ylabel("Average Verify Time (seconds)")
ax2.set_title("Verification Performance vs Cost Factor")
ax2.grid(True, alpha=0.3)
# Plot 3: Hash time by password length
pwd_lengths = sorted(
set(r["password_length"] for r in results["hash_results"])
)
hash_times_by_length = []
for length in pwd_lengths:
times = [
r["avg_time"]
for r in results["hash_results"]
if r["password_length"] == length
]
hash_times_by_length.append(statistics.mean(times))
ax3.bar(pwd_lengths, hash_times_by_length, color="skyblue", alpha=0.7)
ax3.set_xlabel("Password Length")
ax3.set_ylabel("Average Hash Time (seconds)")
ax3.set_title("Hash Performance vs Password Length")
ax3.grid(True, alpha=0.3)
# Plot 4: Performance comparison
operations = ["Hash", "Verify"]
avg_times = [
statistics.mean([r["avg_time"] for r in results["hash_results"]]),
statistics.mean([r["avg_time"] for r in results["verify_results"]]),
]
bars = ax4.bar(operations, avg_times, color=["blue", "red"], alpha=0.7)
ax4.set_ylabel("Average Time (seconds)")
ax4.set_title("Hash vs Verify Performance")
ax4.grid(True, alpha=0.3)
# Add value labels on bars
for bar, time in zip(bars, avg_times):
height = bar.get_height()
ax4.text(
bar.get_x() + bar.get_width() / 2.0,
height,
f"{time:.4f}s",
ha="center",
va="bottom",
)
plt.tight_layout()
if save_plot:
plt.savefig("bcrypt_benchmark_plots.png", dpi=300, bbox_inches="tight")
print("\nπŸ“Š Plots saved to bcrypt_benchmark_plots.png")
plt.show()
except ImportError:
print("\n⚠️ matplotlib not available, skipping plot generation")
def main():
parser = argparse.ArgumentParser(
description="Benchmark bcrypt password hashing and verification"
)
parser.add_argument(
"--password-lengths",
nargs="+",
type=int,
default=[8, 12, 16, 32],
help="Password lengths to test (default: 8 12 16 32)",
)
parser.add_argument(
"--cost-factors",
nargs="+",
type=int,
default=[10, 11, 12, 13, 14],
help="Cost factors to test (default: 10 11 12 13 14)",
)
parser.add_argument(
"--hash-iterations",
type=int,
default=5,
help="Number of hashing iterations per test (default: 5)",
)
parser.add_argument(
"--verify-iterations",
type=int,
default=50,
help="Number of verification iterations per test (default: 50)",
)
parser.add_argument(
"--save-csv", action="store_true", help="Save results to CSV file"
)
parser.add_argument(
"--save-plot", action="store_true", help="Save performance plots"
)
parser.add_argument(
"--quick", action="store_true", help="Run quick benchmark (fewer iterations)"
)
args = parser.parse_args()
if args.quick:
args.hash_iterations = 3
args.verify_iterations = 20
args.cost_factors = [10, 12, 13]
args.password_lengths = [12, 16]
print("πŸš€ Running quick benchmark...")
# Check if bcrypt is available
try:
import bcrypt
except ImportError:
print("❌ Error: bcrypt library not found. Install it with: pip install bcrypt")
sys.exit(1)
# Run benchmark
benchmark = BcryptBenchmark()
try:
results = benchmark.run_comprehensive_benchmark(
password_lengths=args.password_lengths,
cost_factors=args.cost_factors,
hash_iterations=args.hash_iterations,
verify_iterations=args.verify_iterations,
)
benchmark.print_detailed_results(results)
if args.save_csv:
benchmark.save_results_to_csv(results)
if args.save_plot:
benchmark.plot_results(results, save_plot=True)
print("\nβœ… Benchmark completed successfully!")
except KeyboardInterrupt:
print("\n\n⚠️ Benchmark interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\n❌ Error during benchmark: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
@miadabdi
Copy link
Author

miadabdi commented Jul 22, 2025

Install Deps

pip install bcrypt pandas matplotlib

Basic benchmark

python bcrypt_benchmark.py

Quick test

python bcrypt_benchmark.py --quick

Custom parameters

python bcrypt_benchmark.py --cost-factors 12 13 14 --password-lengths 12 16 --save-csv --save-plot

High precision test

python bcrypt_benchmark.py --hash-iterations 10 --verify-iterations 100

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment