Skip to content

Instantly share code, notes, and snippets.

@niquola
Last active February 28, 2026 22:46
Show Gist options
  • Select an option

  • Save niquola/8be1d2223ebaae25812d4fb14bb487e1 to your computer and use it in GitHub Desktop.

Select an option

Save niquola/8be1d2223ebaae25812d4fb14bb487e1 to your computer and use it in GitHub Desktop.
PostgreSQL migration tool — single file, zero deps, Bun runtime
// PostgreSQL migration tool — single file, zero dependencies, Bun runtime.
//
// CLI:
// bun src/migrate.ts create <name> — create .up.sql + .down.sql templates
// bun src/migrate.ts up [count] — apply pending migrations
// bun src/migrate.ts down [count] — roll back recent (default: 1)
// bun src/migrate.ts status — show applied/pending
//
// Library:
// import { create, up, down, status } from "./migrate"
//
// Tests (in same file):
// bun test src/migrate.ts
//
// Migration format:
// migrations/YYYYMMDDHHMMSS_name.up.sql
// migrations/YYYYMMDDHHMMSS_name.down.sql
// Both files required. Sorted lexicographically. Plain SQL only.
//
// Design decisions:
// - Paired SQL files over single-file markers: just readFile, no parsing.
// - Both up+down required: forces rollback planning at write time.
// - pg_try_advisory_lock (fail-fast): no silent waits, clear error on contention.
// - One transaction per migration: rollback on failure, never records failed migration.
// - Checksum (wyhash) on .up.sql content: detects drift after apply.
// - Integrity check before every command: applied migrations must exist on disk
// with matching checksums.
// - DATABASE_URL via Bun's built-in sql: zero connection config.
//
// Env: DATABASE_URL (required, read automatically by Bun).
import { sql as db } from "bun";
import { mkdir } from "node:fs/promises";
import { join, basename } from "node:path";
const LOCK_ID = 791730412;
const DIR = join(process.cwd(), "migrations");
const RE = /^(\d{14})_(.+)\.(up|down)\.sql$/;
type Migration = { version: string; name: string; upPath: string; downPath: string; checksum: string };
type Applied = { version: string; checksum: string; applied_at: Date };
function hash(data: Uint8Array): string {
return Bun.hash(data).toString(16);
}
async function discoverMigrations(): Promise<Migration[]> {
const groups = new Map<string, { version: string; name: string; up?: string; down?: string }>();
for (const f of new Bun.Glob("*.sql").scanSync(DIR)) {
const m = RE.exec(f);
if (!m) throw new Error(`Invalid migration filename: ${f}`);
const [, version, name, dir] = m;
const key = `${version}_${name}`;
const g = groups.get(key) ?? groups.set(key, { version, name }).get(key)!;
g[dir as "up" | "down"] = join(DIR, f);
}
const result: Migration[] = [];
for (const [key, g] of [...groups].sort((a, b) => a[0].localeCompare(b[0]))) {
if (!g.up || !g.down) throw new Error(`Both .up.sql and .down.sql required for ${key}`);
const bytes = await Bun.file(g.up).bytes();
result.push({ version: g.version, name: g.name, upPath: g.up, downPath: g.down, checksum: hash(bytes) });
}
return result;
}
function validate(applied: Applied[], fileMap: Map<string, Migration>): void {
for (const entry of applied) {
const f = fileMap.get(entry.version);
if (!f) throw new Error(`Applied migration ${entry.version} missing from disk`);
if (entry.checksum !== f.checksum) throw new Error(`Checksum mismatch for ${entry.version}_${f.name}`);
}
}
async function withContext<T>(fn: (files: Migration[], applied: Applied[], fileMap: Map<string, Migration>) => Promise<T>): Promise<T> {
await db`CREATE TABLE IF NOT EXISTS schema_migrations (
version TEXT PRIMARY KEY, checksum TEXT NOT NULL, applied_at TIMESTAMPTZ NOT NULL DEFAULT now())`;
const [{ locked }] = await db`SELECT pg_try_advisory_lock(${LOCK_ID}) as locked`;
if (!locked) throw new Error("Another migration is running");
try {
const files = await discoverMigrations();
const applied: Applied[] = await db`SELECT version, checksum, applied_at FROM schema_migrations ORDER BY version` as any;
const fileMap = new Map(files.map(f => [f.version, f]));
validate(applied, fileMap);
return await fn(files, applied, fileMap);
} finally {
await db`SELECT pg_advisory_unlock(${LOCK_ID})`;
}
}
function slugify(name: string): string {
return name.trim().toLowerCase().replace(/[^a-z0-9]+/g, "_").replace(/^_+|_+$/g, "");
}
async function create(name: string): Promise<string> {
const slug = slugify(name);
if (!slug) throw new Error("Migration name must contain at least one alphanumeric character");
await mkdir(DIR, { recursive: true });
const version = new Date().toISOString().replace(/[-T:]/g, "").slice(0, 14);
const base = `${version}_${slug}`;
for (const ext of [".up.sql", ".down.sql"]) {
const path = join(DIR, base + ext);
if (await Bun.file(path).exists()) throw new Error(`Already exists: ${base}${ext}`);
await Bun.write(path, `-- ${slug} ${ext.slice(1, -4)}\n`);
console.log(`Created ${base}${ext}`);
}
return base;
}
async function up(count?: number): Promise<string[]> {
return withContext(async (files, applied) => {
const appliedSet = new Set(applied.map(a => a.version));
let pending = files.filter(f => !appliedSet.has(f.version));
if (count !== undefined) pending = pending.slice(0, count);
if (!pending.length) { console.log("Database is up to date."); return []; }
for (const m of pending) {
await db.begin(async (tx) => {
await tx.file(m.upPath);
await tx`INSERT INTO schema_migrations (version, checksum) VALUES (${m.version}, ${m.checksum})`;
});
console.log(`Applied ${m.version}_${m.name}`);
}
console.log(`\n${pending.length} migration(s) applied.`);
return pending.map(m => m.version);
});
}
async function down(count = 1): Promise<string[]> {
return withContext(async (_, applied, fileMap) => {
const toRollback = applied.reverse().slice(0, count);
if (!toRollback.length) { console.log("Nothing to roll back."); return []; }
for (const entry of toRollback) {
const m = fileMap.get(entry.version)!;
await db.begin(async (tx) => {
await tx.file(m.downPath);
await tx`DELETE FROM schema_migrations WHERE version = ${entry.version}`;
});
console.log(`Rolled back ${m.version}_${m.name}`);
}
console.log(`\n${toRollback.length} migration(s) rolled back.`);
return toRollback.map(e => e.version);
});
}
type StatusResult = { applied: string[]; pending: string[] };
async function status(): Promise<StatusResult> {
return withContext(async (files, applied) => {
const appliedSet = new Set(applied.map(a => a.version));
console.log("Migration Status\n");
for (const f of files) {
const a = applied.find(a => a.version === f.version);
const date = a ? ` applied ${a.applied_at.toISOString().replace("T", " ").slice(0, 19)}` : "";
console.log(` [${appliedSet.has(f.version) ? "x" : " "}] ${f.version}_${f.name}${date}`);
}
if (!files.length) console.log(" No migrations found.");
return {
applied: applied.map(a => a.version),
pending: files.filter(f => !appliedSet.has(f.version)).map(f => f.version),
};
});
}
export { create, up, down, status, type StatusResult };
// --- CLI (bun src/migrate.ts <command>) ---
if (import.meta.main && process.env.NODE_ENV !== "test") {
function parseCount(arg?: string): number | undefined {
if (!arg) return undefined;
const n = parseInt(arg, 10);
if (isNaN(n) || n < 1) throw new Error(`Invalid count: ${arg}`);
return n;
}
const [,, command, arg] = process.argv;
try {
switch (command) {
case "create":
if (!arg) { console.error("Usage: bun src/migrate.ts create <name>"); process.exit(1); }
await create(arg); break;
case "up": await up(parseCount(arg)); break;
case "down": await down(parseCount(arg) ?? 1); break;
case "status": await status(); break;
default:
console.log(`Usage: bun src/migrate.ts <command>\n\nCommands:\n create <name> Create .up.sql + .down.sql templates\n up [count] Apply pending migrations\n down [count] Roll back recent migrations (default: 1)\n status Show migration status`);
process.exit(command ? 1 : 0);
}
} catch (err) {
console.error(err instanceof Error ? err.message : String(err));
process.exit(1);
} finally {
db.close();
}
}
// --- Tests (bun test src/migrate.ts) ---
if (process.env.NODE_ENV === "test") {
const { test, expect, beforeEach, afterAll } = await import("bun:test");
const { mkdir, rm, writeFile, mkdtemp } = await import("node:fs/promises");
const { tmpdir } = await import("node:os");
const SELF = import.meta.path;
const tempDirs: string[] = [];
async function cli(cwd: string, ...args: string[]): Promise<{ stdout: string; stderr: string; exitCode: number }> {
const proc = Bun.spawn(["bun", SELF, ...args], {
cwd,
env: { ...process.env, NODE_ENV: "" },
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
]);
return { stdout, stderr, exitCode: await proc.exited };
}
async function resetDb(): Promise<void> {
await db.unsafe(`DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public`);
}
async function workspace(): Promise<string> {
const dir = await mkdtemp(join(tmpdir(), "migrate-test-"));
tempDirs.push(dir);
await mkdir(join(dir, "migrations"), { recursive: true });
return dir;
}
async function writeMigration(dir: string, version: string, name: string, upSql: string, downSql: string): Promise<void> {
await writeFile(join(dir, "migrations", `${version}_${name}.up.sql`), upSql);
await writeFile(join(dir, "migrations", `${version}_${name}.down.sql`), downSql);
}
beforeEach(async () => { await resetDb(); });
afterAll(async () => {
await resetDb();
db.close();
await Promise.all(tempDirs.map(d => rm(d, { recursive: true, force: true })));
});
test("applies migrations in order", async () => {
const dir = await workspace();
await writeMigration(dir, "20260101000000", "create_users",
"CREATE TABLE users (id SERIAL PRIMARY KEY, name TEXT NOT NULL);", "DROP TABLE users;");
await writeMigration(dir, "20260101000100", "create_posts",
"CREATE TABLE posts (id SERIAL PRIMARY KEY, user_id INT REFERENCES users(id), title TEXT);", "DROP TABLE posts;");
const result = await cli(dir, "up");
expect(result.exitCode).toBe(0);
expect(result.stdout).toContain("20260101000000_create_users");
expect(result.stdout).toContain("20260101000100_create_posts");
const applied = await db`SELECT version FROM schema_migrations ORDER BY version`;
expect(applied.map((r: any) => r.version)).toEqual(["20260101000000", "20260101000100"]);
});
test("idempotent up", async () => {
const dir = await workspace();
await writeMigration(dir, "20260101000000", "create_users",
"CREATE TABLE users (id SERIAL PRIMARY KEY, name TEXT NOT NULL);", "DROP TABLE users;");
await cli(dir, "up");
const second = await cli(dir, "up");
expect(second.exitCode).toBe(0);
expect(second.stdout).toContain("up to date");
});
test("rolls back last migration", async () => {
const dir = await workspace();
await writeMigration(dir, "20260101000000", "create_users",
"CREATE TABLE users (id SERIAL PRIMARY KEY, name TEXT NOT NULL);", "DROP TABLE users;");
await writeMigration(dir, "20260101000100", "create_posts",
"CREATE TABLE posts (id SERIAL PRIMARY KEY, title TEXT);", "DROP TABLE posts;");
await cli(dir, "up");
const result = await cli(dir, "down");
expect(result.exitCode).toBe(0);
expect(result.stdout).toContain("20260101000100_create_posts");
const [{ exists }] = await db`SELECT to_regclass('public.posts') IS NOT NULL as exists`;
expect(exists).toBe(false);
const [{ exists: usersExist }] = await db`SELECT to_regclass('public.users') IS NOT NULL as exists`;
expect(usersExist).toBe(true);
});
test("checksum drift failure", async () => {
const dir = await workspace();
await writeMigration(dir, "20260101000000", "create_users",
"CREATE TABLE users (id SERIAL PRIMARY KEY, name TEXT NOT NULL);", "DROP TABLE users;");
await cli(dir, "up");
await writeFile(join(dir, "migrations", "20260101000000_create_users.up.sql"),
"CREATE TABLE users (id SERIAL PRIMARY KEY, name TEXT, email TEXT);");
const result = await cli(dir, "up");
expect(result.exitCode).toBe(1);
expect(result.stderr).toContain("Checksum mismatch");
});
test("missing file integrity failure", async () => {
const dir = await workspace();
await writeMigration(dir, "20260101000000", "create_users",
"CREATE TABLE users (id SERIAL PRIMARY KEY, name TEXT NOT NULL);", "DROP TABLE users;");
await cli(dir, "up");
await rm(join(dir, "migrations", "20260101000000_create_users.up.sql"));
await rm(join(dir, "migrations", "20260101000000_create_users.down.sql"));
const result = await cli(dir, "status");
expect(result.exitCode).toBe(1);
expect(result.stderr).toContain("missing from disk");
});
test("lock contention failure", async () => {
const dir = await workspace();
await writeMigration(dir, "20260101000000", "create_users",
"CREATE TABLE users (id SERIAL PRIMARY KEY, name TEXT NOT NULL);", "DROP TABLE users;");
await db`SELECT pg_advisory_lock(${LOCK_ID})`;
try {
const result = await cli(dir, "up");
expect(result.exitCode).toBe(1);
expect(result.stderr).toContain("Another migration is running");
} finally {
await db`SELECT pg_advisory_unlock(${LOCK_ID})`;
}
});
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment