|
# Fix PATH for core commands |
|
export PATH="/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:$PATH" |
|
export PATH="$PATH:/Users/johnlindquist/dev/agents/bin" |
|
|
|
|
|
|
|
# If you come from bash you might have to change your $PATH. |
|
# export PATH=$HOME/bin:$HOME/.local/bin:/usr/local/bin:$PATH |
|
|
|
# Path to your Oh My Zsh installation. |
|
export ZSH="$HOME/.oh-my-zsh" |
|
|
|
# Set name of the theme to load --- if set to "random", it will |
|
# load a random theme each time Oh My Zsh is loaded, in which case, |
|
# to know which specific one was loaded, run: echo $RANDOM_THEME |
|
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes |
|
ZSH_THEME="robbyrussell" |
|
|
|
# Set list of themes to pick from when loading at random |
|
# Setting this variable when ZSH_THEME=random will cause zsh to load |
|
# a theme from this variable instead of looking in $ZSH/themes/ |
|
# If set to an empty array, this variable will have no effect. |
|
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" ) |
|
|
|
# Uncomment the following line to use case-sensitive completion. |
|
# CASE_SENSITIVE="true" |
|
|
|
# Uncomment the following line to use hyphen-insensitive completion. |
|
# Case-sensitive completion must be off. _ and - will be interchangeable. |
|
# HYPHEN_INSENSITIVE="true" |
|
|
|
# Uncomment one of the following lines to change the auto-update behavior |
|
# zstyle ':omz:update' mode disabled # disable automatic updates |
|
# zstyle ':omz:update' mode auto # update automatically without asking |
|
# zstyle ':omz:update' mode reminder # just remind me to update when it's time |
|
|
|
# Uncomment the following line to change how often to auto-update (in days). |
|
# zstyle ':omz:update' frequency 13 |
|
|
|
# Uncomment the following line if pasting URLs and other text is messed up. |
|
# DISABLE_MAGIC_FUNCTIONS="true" |
|
|
|
# Uncomment the following line to disable colors in ls. |
|
# DISABLE_LS_COLORS="true" |
|
|
|
# Uncomment the following line to disable auto-setting terminal title. |
|
# DISABLE_AUTO_TITLE="true" |
|
|
|
# Uncomment the following line to enable command auto-correction. |
|
# ENABLE_CORRECTION="true" |
|
|
|
# Uncomment the following line to display red dots whilst waiting for completion. |
|
# You can also set it to another string to have that shown instead of the default red dots. |
|
# e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f" |
|
# Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765) |
|
# COMPLETION_WAITING_DOTS="true" |
|
|
|
# Uncomment the following line if you want to disable marking untracked files |
|
# under VCS as dirty. This makes repository status check for large repositories |
|
# much, much faster. |
|
# DISABLE_UNTRACKED_FILES_DIRTY="true" |
|
|
|
# Uncomment the following line if you want to change the command execution time |
|
# stamp shown in the history command output. |
|
# You can set one of the optional three formats: |
|
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd" |
|
# or set a custom format using the strftime function format specifications, |
|
# see 'man strftime' for details. |
|
# HIST_STAMPS="mm/dd/yyyy" |
|
|
|
# Would you like to use another custom folder than $ZSH/custom? |
|
# ZSH_CUSTOM=/path/to/new-custom-folder |
|
|
|
# Which plugins would you like to load? |
|
# Standard plugins can be found in $ZSH/plugins/ |
|
# Custom plugins may be added to $ZSH_CUSTOM/plugins/ |
|
# Example format: plugins=(rails git textmate ruby lighthouse) |
|
# Add wisely, as too many plugins slow down shell startup. |
|
plugins=(git) |
|
|
|
source $ZSH/oh-my-zsh.sh |
|
|
|
# User configuration |
|
|
|
# export MANPATH="/usr/local/man:$MANPATH" |
|
|
|
# You may need to manually set your language environment |
|
# export LANG=en_US.UTF-8 |
|
|
|
# Preferred editor for local and remote sessions |
|
# if [[ -n $SSH_CONNECTION ]]; then |
|
# export EDITOR='vim' |
|
# else |
|
# export EDITOR='nvim' |
|
# fi |
|
|
|
# Compilation flags |
|
# export ARCHFLAGS="-arch $(uname -m)" |
|
|
|
# Set personal aliases, overriding those provided by Oh My Zsh libs, |
|
# plugins, and themes. Aliases can be placed here, though Oh My Zsh |
|
# users are encouraged to define aliases within a top-level file in |
|
# the $ZSH_CUSTOM folder, with .zsh extension. Examples: |
|
# - $ZSH_CUSTOM/aliases.zsh |
|
# - $ZSH_CUSTOM/macos.zsh |
|
# For a full list of active aliases, run `alias`. |
|
# |
|
# Example aliases |
|
# alias zshconfig="mate ~/.zshrc" |
|
# alias ohmyzsh="mate ~/.oh-my-zsh" |
|
|
|
# Added by Windsurf |
|
export PATH="/Users/johnlindquist/.codeium/windsurf/bin:$PATH" |
|
|
|
# pnpm |
|
export PNPM_HOME="/Users/johnlindquist/Library/pnpm" |
|
case ":$PATH:" in |
|
*":$PNPM_HOME:"*) ;; |
|
*) export PATH="$PNPM_HOME:$PATH" ;; |
|
esac |
|
# pnpm end |
|
# alias code="/usr/local/bin/cursor" |
|
alias cursor="/usr/local/bin/cursor " |
|
unalias c 2>/dev/null |
|
c() { |
|
if [ $# -eq 0 ]; then |
|
/usr/local/bin/cursor . # Open current directory if no args |
|
else |
|
# If the first argument doesn't exist, decide whether to create a file or directory |
|
if [ ! -e "$1" ]; then |
|
# Check if it starts with a dot or contains a dot (likely a file) |
|
if [[ "$1" == .* || "$1" == *.* ]]; then |
|
touch "$1" # Create the file |
|
else |
|
mkdir -p "$1" # Create the directory |
|
fi |
|
fi |
|
/usr/local/bin/cursor "$@" # Open the specified path(s) |
|
fi |
|
} |
|
alias w="~/.codeium/windsurf/bin/windsurf" |
|
alias ww="~/.codeium/windsurf/bin/windsurf ~/dev/windsurf" |
|
|
|
alias z="zed" |
|
alias zz="zed ~/.zshrc" |
|
alias k="/usr/local/bin/cursor ~/.config/karabiner.edn" |
|
alias s="source ~/.zshrc" |
|
|
|
unalias fix 2>/dev/null |
|
unalias feat 2>/dev/null |
|
unalias chore 2>/dev/null |
|
unalias takeAndWindsurf 2>/dev/null |
|
unalias push 2>/dev/null |
|
cfix() { |
|
local scope="$1" |
|
local message="$2" |
|
git add . && git commit -m "fix($scope): $message" |
|
} |
|
|
|
# Git fix function for conventional commits |
|
fix() { |
|
local scope="$1" |
|
local message="$2" |
|
git add . && git commit -m "fix($scope): $message" && git push |
|
} |
|
|
|
# Git chore function for conventional commits |
|
chore() { |
|
local scope="$1" |
|
local message="$2" |
|
git add . && git commit -m "chore($scope): $message" && git push |
|
} |
|
|
|
# Git feat function for conventional commits |
|
feat() { |
|
local scope="$1" |
|
local message="$2" |
|
git add . && git commit -m "feat($scope): $message" && git push |
|
} |
|
|
|
# Push function for "fix: tweak" message. No scope. Hardcoded message. |
|
push() { |
|
git add . && git commit -m "fix: tweak" && git push |
|
} |
|
|
|
# Git chore function for conventional commits |
|
chore() { |
|
local scope="$1" |
|
local message="$2" |
|
git add . && git commit -m "chore($scope): $message" && git push |
|
} |
|
|
|
takeAndWindsurf() { |
|
take "$1" && windsurf "$1" |
|
} |
|
|
|
alias pup="pnpm dlx npm-check-updates -i -p pnpm" |
|
|
|
clone(){ |
|
local repo="$1" |
|
local dir="${2:-${repo##*/}}" # Use the second argument if provided, otherwise extract from repo |
|
gh repo clone "https://github.com/$repo" "$dir" |
|
w "$dir" |
|
cd "$dir" |
|
pnpm i |
|
} |
|
|
|
kdev(){ |
|
cd ~/dev/kit |
|
pnpm build |
|
cd - |
|
pnpm dev |
|
} |
|
|
|
share-react-project() { |
|
if [[ -z "$1" ]]; then |
|
echo "Usage: share-react-project <project_name>" |
|
return 1 |
|
fi |
|
|
|
local project_name="$1" |
|
local github_username=$(gh api /user --jq '.login') |
|
|
|
echo "Creating Vite project: $project_name" |
|
pnpm create vite "$project_name" --template react |
|
|
|
cd "$project_name" |
|
|
|
echo "Initializing Git repository" |
|
git init |
|
|
|
echo "Adding all files to Git" |
|
git add . |
|
|
|
echo "Creating initial commit" |
|
git commit -m "Initial commit" |
|
|
|
local codesandbox_link="https://codesandbox.io/p/github/${github_username}/${project_name}" |
|
|
|
echo "Adding CodeSandbox link to README.md" |
|
echo "" >> README.md |
|
echo "## CodeSandbox" >> README.md |
|
echo "[](${codesandbox_link})" >> README.md |
|
|
|
echo "Adding README.md to Git" |
|
git add README.md |
|
|
|
echo "Committing README.md changes" |
|
git commit -m "Add CodeSandbox link" |
|
|
|
echo "Creating GitHub repository: $github_username/$project_name" |
|
gh repo create "$github_username/$project_name" --public |
|
|
|
echo "Pushing to remote 'origin'" |
|
git push -u origin main |
|
|
|
echo "Project '$project_name' created successfully!" |
|
echo "GitHub repository: https://github.com/$github_username/$project_name" |
|
echo "CodeSandbox link: $codesandbox_link" |
|
|
|
# Ensure pnpm path has highest precedence |
|
export PATH="$PNPM_HOME:$PATH" |
|
} |
|
|
|
# export PYENV_ROOT="$HOME/.pyenv" |
|
# command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" |
|
# eval "$(pyenv init -)" |
|
# Added by LM Studio CLI (lms) |
|
export PATH="$PATH:/Users/johnlindquist/.lmstudio/bin" |
|
|
|
|
|
pinit23(){ |
|
pnpm init # Create package.json |
|
pnpm pkg set type=module # Set type to module |
|
pnpm pkg set scripts.dev="node --env-file=.env --no-warnings index.ts" # Set dev script to run index.ts |
|
pnpm set --location project use-node-version 23.6.1 # Auto-install/use node 23.6.1 |
|
pnpm add -D @types/node @tsconfig/node23 @tsconfig/strictest # Add tsconfig |
|
pnpm add dotenv # Add dotenv |
|
echo 'TEST_API_KEY=Successfully loaded .env' > .env # Create .env |
|
pnpm dlx gitignore Node # Create .gitignore |
|
echo '{ |
|
"$schema": "https://json.schemastore.org/tsconfig", |
|
"extends": ["@tsconfig/node23/tsconfig.json", "@tsconfig/strictest/tsconfig.json"] |
|
}' > tsconfig.json # Create tsconfig.json |
|
echo 'declare global { |
|
namespace NodeJS { |
|
interface ProcessEnv { |
|
TEST_API_KEY: string; |
|
} |
|
} |
|
} |
|
|
|
console.log(`${process.env.TEST_API_KEY || "Failed to load .env"}`);' > index.ts # Create index.ts |
|
mkdir logs |
|
pnpm dev # Run dev script |
|
git init |
|
git add . |
|
git commit -m "(feat):project setup" |
|
} |
|
|
|
|
|
|
|
ghsearch() { |
|
# Save original PATH |
|
local ORIGINAL_PATH="$PATH" |
|
|
|
# Set up logging |
|
local debug=1 |
|
local timestamp=$(/bin/date +%Y%m%d-%H%M%S) |
|
local log_dir="$HOME/searches/logs" |
|
local log_file="$log_dir/ghsearch-$timestamp.log" |
|
|
|
# Ensure log directory exists |
|
/bin/mkdir -p "$log_dir" 2>/dev/null |
|
|
|
# Logging function |
|
log() { |
|
local level="$1" |
|
local message="$2" |
|
if [[ "$level" == "DEBUG" && "$debug" -eq 0 ]]; then |
|
return |
|
fi |
|
echo "[$level] $message" | /usr/bin/tee -a "$log_file" |
|
} |
|
|
|
log "DEBUG" "Starting ghsearch function" |
|
log "DEBUG" "Initial environment state:" |
|
log "DEBUG" "Command: ghsearch $*" |
|
log "DEBUG" "Original PATH: $ORIGINAL_PATH" |
|
|
|
# Set PATH to include necessary directories |
|
export PATH="/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:$ORIGINAL_PATH" |
|
log "DEBUG" "Current PATH: $PATH" |
|
|
|
# Get the search query |
|
local query="$*" |
|
if [ -z "$query" ]; then |
|
log "ERROR" "No search query provided" |
|
return 1 |
|
fi |
|
|
|
log "INFO" "Processing query: $query" |
|
|
|
# Check for problematic characters |
|
if [[ "$query" =~ [^a-zA-Z0-9[:space:]/_.-] ]]; then |
|
log "WARN" "Query contains special characters that might need escaping: $query" |
|
fi |
|
|
|
# Create sanitized filename |
|
local sanitized_query=$(/bin/echo "$query" | /usr/bin/tr -c '[:alnum:]_-' '_' | /usr/bin/sed 's/_*$//') |
|
log "DEBUG" "Sanitized filename: $sanitized_query" |
|
|
|
# Set up results file |
|
local results_dir="$HOME/searches" |
|
local results_file="$results_dir/$sanitized_query-$timestamp.md" |
|
/bin/mkdir -p "$results_dir" 2>/dev/null |
|
|
|
log "INFO" "Will save results to: $results_file" |
|
|
|
# Check for required commands |
|
local gh_path=$(/usr/bin/which gh 2>/dev/null) |
|
local jq_path=$(/usr/bin/which jq 2>/dev/null) |
|
local curl_path=$(/usr/bin/which curl 2>/dev/null) |
|
|
|
log "DEBUG" "Found gh at: $gh_path" |
|
log "DEBUG" "Found jq at: $jq_path" |
|
log "DEBUG" "Found curl at: $curl_path" |
|
|
|
if [ -z "$gh_path" ] || [ -z "$jq_path" ] || [ -z "$curl_path" ]; then |
|
log "ERROR" "Missing required commands. Please install: gh, jq, curl" |
|
return 1 |
|
fi |
|
|
|
# Execute GitHub search |
|
log "INFO" "Executing GitHub search: gh search code \"$query\" --json path,repository,url --limit 30" |
|
local search_output |
|
search_output=$(/opt/homebrew/bin/gh search code "$query" --json path,repository,url --limit 30) |
|
local gh_exit="$?" |
|
|
|
if [ "$gh_exit" -ne 0 ]; then |
|
log "ERROR" "GitHub search failed with exit code $gh_exit" |
|
log "ERROR" "Raw output: $search_output" |
|
return 1 |
|
fi |
|
|
|
log "DEBUG" "gh exit code: $gh_exit" |
|
log "DEBUG" "gh raw output: $search_output" |
|
|
|
# Validate JSON output |
|
if ! /bin/echo "$search_output" | /opt/homebrew/bin/jq . >/dev/null 2>&1; then |
|
log "ERROR" "Invalid JSON response from GitHub search" |
|
log "ERROR" "Raw output: $search_output" |
|
return 1 |
|
fi |
|
|
|
# Count results |
|
local result_count=$(/bin/echo "$search_output" | /opt/homebrew/bin/jq '. | length') |
|
log "DEBUG" "Found $result_count results" |
|
|
|
if [ "$result_count" -eq 0 ]; then |
|
log "INFO" "No results found" |
|
{ |
|
/bin/echo "# GitHub Code Search Results" |
|
/bin/echo "Query: \`$query\`" |
|
/bin/echo "Date: $(/bin/date)" |
|
/bin/echo |
|
/bin/echo "No results found for this query." |
|
} > "$results_file" |
|
return 0 |
|
fi |
|
|
|
# Process results |
|
log "INFO" "Processing search results..." |
|
|
|
{ |
|
/bin/echo "# GitHub Code Search Results" |
|
/bin/echo "Query: \`$query\`" |
|
/bin/echo "Date: $(/bin/date)" |
|
/bin/echo |
|
/bin/echo "Found $result_count results. Showing code snippets containing your search terms." |
|
/bin/echo |
|
/bin/echo "## Results" |
|
/bin/echo |
|
|
|
/bin/echo "$search_output" | /opt/homebrew/bin/jq -r '.[] | "### [\(.repository.nameWithOwner)](\(.repository.url))\n\nFile: [\(.path)](\(.url))\n\n```" + (.path | match("\\.[a-zA-Z0-9]+$") | .string[1:] // "") + "\n# Content from \(.path):\n" + (.url | sub("github.com"; "raw.githubusercontent.com") | sub("/blob/"; "/")) + "\n"' | while read -r line; do |
|
if [[ "$line" =~ ^https ]]; then |
|
# This is a URL line, fetch the content |
|
content=$(/usr/bin/curl -s -L "$line") |
|
if [ -n "$content" ]; then |
|
/bin/echo "$content" | /usr/bin/awk '{printf "%4d: %s\n", NR, $0}' | /usr/bin/head -n 50 |
|
if [ "$(/bin/echo "$content" | /usr/bin/wc -l)" -gt 50 ]; then |
|
/bin/echo "... (truncated, showing first 50 lines)" |
|
fi |
|
else |
|
/bin/echo "Failed to fetch content from $line" |
|
fi |
|
/bin/echo '```' |
|
/bin/echo |
|
/bin/echo "---" |
|
/bin/echo |
|
else |
|
/bin/echo "$line" |
|
fi |
|
done |
|
} > "$results_file" |
|
|
|
# Try to open in Cursor |
|
log "DEBUG" "Opening results in Cursor" |
|
if [ -f "$results_file" ]; then |
|
if ! /Applications/Cursor.app/Contents/MacOS/Cursor "$results_file" 2>/dev/null; then |
|
log "ERROR" "Failed to open results in Cursor" |
|
/bin/echo "You can open the results manually with: cursor '$results_file'" |
|
fi |
|
else |
|
log "ERROR" "Results file not found: $results_file" |
|
fi |
|
|
|
# Restore original PATH |
|
export PATH="$ORIGINAL_PATH" |
|
log "DEBUG" "Restored PATH: $PATH" |
|
|
|
log "DEBUG" "ghsearch function completed" |
|
} |
|
|
|
export PATH="/Users/johnlindquist/Library/pnpm/nodejs/23.6.1/bin:$PATH" |
|
# bun completions |
|
[ -s "/Users/johnlindquist/.bun/_bun" ] && source "/Users/johnlindquist/.bun/_bun" |
|
|
|
# bun |
|
export BUN_INSTALL="$HOME/.bun" |
|
export PATH="$BUN_INSTALL/bin:$PATH" |
|
|
|
# wtree: Create a new worktree for each given branch. |
|
# Usage: wtree [ -p|--pnpm ] branch1 branch2 ... |
|
# |
|
# This function does the following: |
|
# 1. Parses command-line arguments; if -p/--pnpm is provided, it will later run "pnpm install". |
|
# 2. Determines the current branch and repository root. |
|
# 3. Uses a fixed parent directory (~/dev) to house all worktree directories. |
|
# 4. For each branch passed: |
|
# - If the branch does not exist, it is created from the current branch. |
|
# - It checks that a worktree for that branch does not already exist. |
|
# - It then creates a worktree in ~/dev using a naming convention: <repoName>-<branch>. |
|
# - If the install-deps flag is true, it runs "pnpm install" inside the new worktree. |
|
# - Finally, it either opens the new worktree via the custom "cursor" command (if defined) |
|
# or prints its path. |
|
wtree() { |
|
# Flag to determine whether to run "pnpm install" |
|
local install_deps=false |
|
local branches=() |
|
|
|
# Parse command-line arguments |
|
while [[ $# -gt 0 ]]; do |
|
case "$1" in |
|
-p|--pnpm) |
|
install_deps=true |
|
shift |
|
;; |
|
*) |
|
branches+=("$1") |
|
shift |
|
;; |
|
esac |
|
done |
|
|
|
# Ensure at least one branch name is provided. |
|
if [[ ${#branches[@]} -eq 0 ]]; then |
|
echo "Usage: wtree [ -p|--pnpm ] branch1 branch2 ..." |
|
return 1 |
|
fi |
|
|
|
# Determine the current branch; exit if not in a git repository. |
|
local current_branch |
|
current_branch=$(git rev-parse --abbrev-ref HEAD) || { |
|
echo "Error: Not a git repository." |
|
return 1 |
|
} |
|
|
|
# Determine repository root and name. |
|
local repo_root repo_name |
|
repo_root=$(git rev-parse --show-toplevel) || { |
|
echo "Error: Cannot determine repository root." |
|
return 1 |
|
} |
|
repo_name=$(basename "$repo_root") |
|
|
|
# Set fixed parent directory for worktrees. |
|
local worktree_parent="$HOME/dev" |
|
# Ensure the worktree parent directory exists. |
|
if [[ ! -d "$worktree_parent" ]]; then |
|
if ! mkdir -p "$worktree_parent"; then |
|
echo "Error: Failed to create worktree parent directory: $worktree_parent" |
|
return 1 |
|
fi |
|
fi |
|
|
|
# Loop over each branch provided as argument. |
|
for branch in "${branches[@]}"; do |
|
# Define the target path using a naming convention: <repoName>-<branch> |
|
local target_path="$worktree_parent/${repo_name}-${branch}" |
|
|
|
echo "Processing branch: ${branch}" |
|
|
|
# Check if a worktree already exists at the target path. |
|
if git worktree list | grep -q "^${target_path}[[:space:]]"; then |
|
echo "Error: Worktree already exists at ${target_path}. Skipping branch '${branch}'." |
|
continue |
|
fi |
|
|
|
# If the branch does not exist, create it from the current branch. |
|
if ! git show-ref --verify --quiet "refs/heads/${branch}"; then |
|
echo "Branch '${branch}' does not exist. Creating it from '${current_branch}'..." |
|
if ! git branch "${branch}"; then |
|
echo "Error: Failed to create branch '${branch}'. Skipping." |
|
continue |
|
fi |
|
fi |
|
|
|
# Create the new worktree for the branch. |
|
echo "Creating worktree for branch '${branch}' at ${target_path}..." |
|
if ! git worktree add "$target_path" "${branch}"; then |
|
echo "Error: Failed to create worktree for branch '${branch}'. Skipping." |
|
continue |
|
fi |
|
|
|
# If the install flag is set, run "pnpm install" in the new worktree. |
|
if $install_deps; then |
|
echo "Installing dependencies in worktree for branch '${branch}'..." |
|
if ! ( cd "$target_path" && pnpm install ); then |
|
echo "Warning: Failed to install dependencies in '${target_path}'." |
|
fi |
|
fi |
|
|
|
# Optionally, open the worktree directory via a custom "cursor" command if available. |
|
if type cursor >/dev/null 2>&1; then |
|
cursor "$target_path" |
|
else |
|
echo "Worktree created at: ${target_path}" |
|
fi |
|
|
|
echo "Worktree for branch '${branch}' created successfully." |
|
echo "-----------------------------------------------------" |
|
done |
|
} |
|
|
|
|
|
# wtmerge: Merge changes from a specified worktree branch into main, |
|
# then clean up all worktrees and delete their branches. |
|
# |
|
# Usage: wtmerge <branch-to-keep> |
|
# |
|
# This function does the following: |
|
# 1. Verifies that the branch to merge (branch-to-keep) exists as an active worktree. |
|
# 2. Checks for uncommitted changes in that worktree: |
|
# - If changes exist, it attempts to stage and commit them. |
|
# - It gracefully handles the situation where there are no changes. |
|
# 3. Switches the current (main) worktree to the "main" branch. |
|
# 4. Merges the specified branch into main, with proper error checking. |
|
# 5. Uses "git worktree list" to retrieve all active worktrees (under ~/dev |
|
# and matching the naming pattern) and removes them. |
|
# 6. Deletes each branch that was created for a worktree (skipping "main"). |
|
wtmerge() { |
|
# Ensure exactly one argument is passed: the branch to merge. |
|
if [ $# -ne 1 ]; then |
|
echo "Usage: wtmerge <branch-to-keep>" |
|
return 1 |
|
fi |
|
|
|
local branch_to_keep="$1" |
|
|
|
# Determine the repository root and its name. |
|
local repo_root repo_name |
|
repo_root=$(git rev-parse --show-toplevel) || { |
|
echo "Error: Not a git repository." |
|
return 1 |
|
} |
|
repo_name=$(basename "$repo_root") |
|
|
|
# Fixed parent directory where worktrees are located. |
|
local worktree_parent="$HOME/dev" |
|
|
|
# Retrieve all active worktrees (from git worktree list) that match our naming convention. |
|
local worktrees=() |
|
while IFS= read -r line; do |
|
# Extract the worktree path (first field) |
|
local wt_path |
|
wt_path=$(echo "$line" | awk '{print $1}') |
|
# Only consider worktrees under our fixed parent directory that match "<repo_name>-*" |
|
if [[ "$wt_path" == "$worktree_parent/${repo_name}-"* ]]; then |
|
worktrees+=("$wt_path") |
|
fi |
|
done < <(git worktree list) |
|
|
|
# Check that the target branch worktree exists. |
|
local target_worktree="" |
|
for wt in "${worktrees[@]}"; do |
|
if [[ "$wt" == "$worktree_parent/${repo_name}-${branch_to_keep}" ]]; then |
|
target_worktree="$wt" |
|
break |
|
fi |
|
done |
|
|
|
if [[ -z "$target_worktree" ]]; then |
|
echo "Error: No active worktree found for branch '${branch_to_keep}' under ${worktree_parent}." |
|
return 1 |
|
fi |
|
|
|
# Step 1: In the target worktree, check for uncommitted changes. |
|
echo "Checking for uncommitted changes in worktree for branch '${branch_to_keep}'..." |
|
if ! ( cd "$target_worktree" && git diff --quiet && git diff --cached --quiet ); then |
|
echo "Changes detected in branch '${branch_to_keep}'. Attempting auto-commit..." |
|
if ! ( cd "$target_worktree" && |
|
git add . && |
|
git commit -m "chore: auto-commit changes in '${branch_to_keep}' before merge" ); then |
|
echo "Error: Auto-commit failed in branch '${branch_to_keep}'. Aborting merge." |
|
return 1 |
|
else |
|
echo "Auto-commit successful in branch '${branch_to_keep}'." |
|
fi |
|
else |
|
echo "No uncommitted changes found in branch '${branch_to_keep}'." |
|
fi |
|
|
|
# Step 2: Switch to the main worktree (assumed to be the current directory) and check out main. |
|
echo "Switching to 'main' branch in the main worktree..." |
|
if ! git checkout main; then |
|
echo "Error: Failed to switch to 'main' branch." |
|
return 1 |
|
fi |
|
|
|
# Step 3: Merge the target branch into main. |
|
echo "Merging branch '${branch_to_keep}' into 'main'..." |
|
if ! git merge "${branch_to_keep}" -m "feat: merge changes from '${branch_to_keep}'"; then |
|
echo "Error: Merge failed. Please resolve conflicts and try again." |
|
return 1 |
|
fi |
|
|
|
# Step 4: Remove all worktrees that were created via wtree(). |
|
echo "Cleaning up worktrees and deleting temporary branches..." |
|
for wt in "${worktrees[@]}"; do |
|
# Extract branch name from worktree path. |
|
local wt_branch |
|
wt_branch=$(basename "$wt") |
|
wt_branch=${wt_branch#${repo_name}-} # Remove the repo name prefix |
|
|
|
echo "Processing worktree for branch '${wt_branch}' at ${wt}..." |
|
# Remove the worktree using --force to ensure removal. |
|
if git worktree remove "$wt" --force; then |
|
echo "Worktree at ${wt} removed." |
|
else |
|
echo "Warning: Failed to remove worktree at ${wt}." |
|
fi |
|
|
|
# Do not delete the 'main' branch. |
|
if [[ "$wt_branch" != "main" ]]; then |
|
if git branch -D "$wt_branch"; then |
|
echo "Branch '${wt_branch}' deleted." |
|
else |
|
echo "Warning: Failed to delete branch '${wt_branch}'." |
|
fi |
|
fi |
|
done |
|
|
|
echo "Merge complete: Branch '${branch_to_keep}' merged into 'main', and all worktrees cleaned up." |
|
} |
|
export PATH="$PATH":"$HOME/.pub-cache/bin" |
|
|
|
# alias for "pkill Electron" |
|
alias pke="pkill Electron" |
|
|
|
# NPM global directory |
|
export PATH="$HOME/.npm-global/bin:$PATH" |
|
|
|
# Ensure pnpm path has highest precedence |
|
export PATH="$PNPM_HOME:$PATH" |
|
|
|
export PATH="$HOME/.local/bin:$PATH" |
|
|
|
# Add npm global bin directory to PATH |
|
export PATH="$HOME/.npm-global/bin:$PATH" |
|
|
|
. "$HOME/.atuin/bin/env" |
|
|
|
eval "$(atuin init zsh)" |
|
|
|
# Alias for managing dotfiles with git |
|
alias config='/usr/bin/git --git-dir=/Users/johnlindquist/.config/.git --work-tree=/Users/johnlindquist' |
|
|
|
# Alias for claude with opus model |
|
alias opus='ENABLE_BACKGROUND_TASKS=1 claude --model opus' |
|
|
|
# Function to initialize a bun project |
|
bun_init() { |
|
claude --allowedTools "Bash(bun init:*)" --print "Please initialize a bun project" |
|
} |
|
|
|
nx(){ |
|
pnpm dlx nx "$@" |
|
} |
|
|
|
# Alias for tail -f ~/Library/Logs/ScriptKit/<name>.log |
|
tk(){ |
|
tail -f ~/Library/Logs/ScriptKit/$1.log |
|
} |
|
|
|
source /Users/johnlindquist/.config/op/plugins.sh |
|
|
|
|
|
|
|
|
|
function spike { |
|
local base=$(git rev-parse --abbrev-ref HEAD 2>/dev/null) || return 1 |
|
|
|
# Determine branch name: use first arg if given, else generate one |
|
local branch |
|
if [[ -n "$1" ]]; then |
|
branch="$1" |
|
else |
|
local suffix="$(date +%s)" |
|
branch="spike/${base}-${suffix}" |
|
fi |
|
|
|
echo "🌵 Spiking to $branch ..." |
|
git switch -c "$branch" || return 1 |
|
git add -A || return 1 |
|
# Use the part after the last slash for a concise commit message detail |
|
local msg_suffix="${branch##*/}" |
|
git commit -m "spike(${base}): $msg_suffix" || return 1 |
|
git switch "$base" |
|
} |
|
|
|
|
|
export PYTHONWARNINGS=ignore::DeprecationWarning |
|
fpath+=~/.zfunc; autoload -Uz compinit; compinit |
|
|
|
|
|
|
|
google() { |
|
# Check if any arguments were provided |
|
if [ $# -eq 0 ]; then |
|
echo "Usage: google \"search query\"" |
|
return 1 |
|
fi |
|
|
|
# Join all arguments into a single search query |
|
local search_query="$*" |
|
|
|
# Create a temporary directory to store intermediate results |
|
local tmpdir=$(mktemp -d) |
|
|
|
# Arrays to store background process IDs and search terms |
|
local pids=() |
|
local searches=() |
|
local i=0 |
|
|
|
# Split the search query by some delimiter if needed, or use as single query |
|
# For now, treating the entire input as one search query |
|
searches+=("$search_query") |
|
|
|
# Process each search term |
|
for search in "${searches[@]}"; do |
|
# Skip empty search terms |
|
[ -z "$search" ] && continue |
|
|
|
echo "Googling... $search" |
|
|
|
# Run claude in background to search and summarize |
|
# The parentheses create a subshell that runs in the background |
|
( |
|
claude -p "web_search for <query>$search</query> and summarize the results" --allowedTools="web_search" > "$tmpdir/result_$i.txt" |
|
) & |
|
|
|
# Store the process ID of the background job |
|
pids+=($!) |
|
|
|
# Increment counter for unique file names |
|
((i++)) |
|
done |
|
|
|
# Wait for all background jobs to finish |
|
for pid in "${pids[@]}"; do |
|
wait "$pid" |
|
done |
|
|
|
# Concatenate all results into a single variable |
|
local results="" |
|
for file in "$tmpdir"/result_*.txt; do |
|
# Add file contents to results |
|
results+=$(cat "$file") |
|
# Add newline separator between results |
|
results+=$'\n' |
|
done |
|
|
|
# Generate final blog post from all search results |
|
# Note: Using the original search query for context |
|
local final_report=$(claude -p "Write a blog post based on these results for this original query <query>$search_query</query>: $results") |
|
|
|
# Output the final report |
|
echo "$final_report" |
|
|
|
# Cleanup is handled by trap |
|
} |
|
|
|
|
|
claude_chain(){ |
|
local -a c=(claude --permission-mode acceptEdits -p) |
|
local m='then git commit' |
|
local p="$*" |
|
"${c[@]}" "$p, $m"&&"${c[@]}" "Review and improve latest commit based on '$p', $m" |
|
} |
|
|
|
claudepool() { |
|
claude --append-system-prompt "Talk like a caffeinated Deadpool with sadistic commentary and comically PG-13 rated todo lists." |
|
} |
|
|
|
organize(){ |
|
local system_prompt=$(cat <<'EOF' |
|
You are an expert obsidian project organizer. You closely follow these rules: |
|
**1. Shallow folders; organize via links & tags.** |
|
|
|
* **Example:** Keep only *Inbox / Projects / Archive* at the vault root. |
|
* **Example:** Link notes with `[[Project‑XYZ]]`, tag with `#idea #typescript`. |
|
* **Example:** Use the query `tag:#idea AND [[Project‑XYZ]]` instead of drilling through folders. |
|
|
|
**2. Title notes clearly—descriptive topic.** |
|
|
|
Remove dates from the names of files and note titles. |
|
If you find dates, rename the file. |
|
|
|
* **Example:** `Smart‑Keyboard‑Automation‑Ideas.md` |
|
* **Example:** `Conquer‑Cursor‑Workshop‑Outline.md` |
|
* **Example:** `YouTube‑Video‑Series‑Plan.md` |
|
|
|
**3. Promote clusters to MOC hubs; review weekly.** |
|
|
|
* **Example:** Create a “🚏 AI Dev Hub” that links all AI‑related notes. |
|
* **Example:** Add a “🗂 Workshop Planning Hub” collecting prep docs. |
|
* **Example:** Maintain a “📦 Script Kit Feature Hub” aggregating specs, tagged `#scriptkit`. |
|
|
|
You goal is to organize the current project. Use 5+ subagents in your effort to organize all of the files. |
|
Commit changes frequently |
|
EOF |
|
) |
|
|
|
ENABLE_BACKGROUND_TASKS=1 claude --model opus --dangerously-skip-permissions --append-system-prompt "$system_prompt" -p "Organize this project. Thank you!" --output-format stream-json --verbose |
|
|
|
} |
|
|
|
curate(){ |
|
local system_prompt=$(cat <<'EOF' |
|
You are an expert obsidian project curator highly skilled in curating and re-structuring obsidian files into smaller files by finding patterns and creating new files. |
|
You focus on the concept of "Knowledge Paths" and "Knowledge Hubs". |
|
You've been tasked with curating the current project into an easy to navigate and understand structure. |
|
|
|
## Knowledge Paths |
|
|
|
A knowledge path: Guiding users through the knowledge graph on a learning journey. |
|
|
|
<CRITICAL>Above all, CONDENSE DUPLICATE INFORMATION. Assume that most files were generated by AI or from dictated transcripts, so curate, curate, curate!</CRITICAL> |
|
|
|
**1. Title notes clearly—descriptive topic.** |
|
|
|
Remove dates from the names of files and note titles. |
|
If you find dates, rename the files. |
|
If files contain the project name, remove the project name from the file name and update their permalinks. |
|
|
|
## SYNCING NAMING CONVENTIONS |
|
<CRITCAL>Remember files names, permalinks, backlinks. So use `rg` to keep things in sync.</CRITCAL> |
|
|
|
You goal is to curate the current project into a clear and easy to navigate structure for our entire team. Use 3+ subagents in your effort to curate all of the files. |
|
|
|
## Steps |
|
1. Use `tree` to understand the scope and structure of the project |
|
2. Use `rg` to find duplicate information |
|
3. update/create files to be more concise and clear |
|
4. commit changes frequently and push when done |
|
|
|
EOF |
|
) |
|
|
|
ENABLE_BACKGROUND_TASKS=1 claude --model opus --dangerously-skip-permissions --append-system-prompt "$system_prompt" -p "Organize this project. Thank you!" --output-format stream-json --verbose |
|
|
|
} |
|
|
|
verify(){ |
|
local system_prompt=$(cat <<'EOF' |
|
You are an expert obsidian project curator highly skilled in verifying the current project against information on the web. |
|
You've been tasked with verifying the current project is in a good state. |
|
|
|
You goal is to verify the current project is accurate and up to date. |
|
|
|
## Steps |
|
1. Use `tree` to understand the scope and structure of the project |
|
2. Select 3 random files to verify |
|
3. Verify by searching the web for up-to-date information |
|
4. If the information is outdated, update the file |
|
5. If the information is not found, create a new file |
|
6. commit changes frequently and push when done |
|
|
|
## Verification |
|
|
|
Add "last-verified" frontmatter to each verified file with a timestamp |
|
Add a "## Verifications" section at the end of the file with links to the original sources and why the information was verified |
|
|
|
|
|
EOF |
|
) |
|
|
|
ENABLE_BACKGROUND_TASKS=1 claude --model opus --dangerously-skip-permissions --append-system-prompt "$system_prompt" -p "Verify this project. Thank you!" --output-format stream-json --verbose |
|
|
|
} |
|
|
|
research(){ |
|
if [ $# -eq 0 ]; then |
|
echo "Usage: research <topic1> [topic2] ..." |
|
return 1 |
|
fi |
|
|
|
local system_prompt=$(cat <<'EOF' |
|
You are a research expert on the latest libraries and code tools. |
|
Review existing markdown files in the current directory. |
|
Use 3+ subagents to scour the internet for information. |
|
Specifically, focus on creating examples that can be taught in a workshop environment. |
|
Your goal is to write organized markdown files that are appropriate for an obsidian vault. |
|
You will be presented with a single topic and you will be expected to scour the internet for information and write a markdown file that is appropriate for an obsidian vault. |
|
All examples should be appropriate for TypeScript developers |
|
Commit changes frequently |
|
EOF |
|
) |
|
|
|
|
|
for topic in "$@"; do |
|
echo "Researching: $topic" |
|
ENABLE_BACKGROUND_TASKS=1 claude --model opus --dangerously-skip-permissions --append-system-prompt "$system_prompt" -p "$topic" --output-format stream-json --verbose |
|
done |
|
|
|
organize |
|
} |
|
|
|
|
|
|
|
|
|
# New Basic Memory powered research function (overrides previous definitions) |
|
research(){ |
|
if [ $# -eq 0 ]; then |
|
echo "Usage: research <topic1> [topic2] ..." |
|
return 1 |
|
fi |
|
|
|
local project_name="$(basename "$PWD")" |
|
|
|
for topic in "$@"; do |
|
echo "🔎 Researching '$topic' for project '$project_name'..." |
|
|
|
# Generate markdown content with Claude |
|
local content |
|
content="$(claude --model opus --dangerously-skip-permissions -p "You are an expert technical researcher. Produce an Obsidian-ready markdown note for topic '${topic}' aimed at TypeScript developers. Include clear headings, bullet points, code examples, and helpful references.")" |
|
|
|
# Save the note to Basic Memory |
|
echo "$content" | basic-memory --project "$project_name" tool write-note --title "$topic" --folder "research" |
|
done |
|
|
|
# Show recent activity for quick confirmation |
|
basic-memory --project "$project_name" tool recent-activity --timeframe 1d |
|
} |
|
|
|
|
|
quartz_research() { |
|
local MCP_CONFIG=$(cat <<'EOF' |
|
{ |
|
"mcpServers": { |
|
"container-use": { |
|
"type": "stdio", |
|
"command": "container-use", |
|
"args": ["stdio"] |
|
} |
|
} |
|
} |
|
EOF |
|
) |
|
|
|
if [ $# -eq 0 ]; then |
|
echo "Usage: quartz_research <topic1> [topic2] ..." |
|
return 1 |
|
fi |
|
|
|
local project_name="$(basename "$PWD")" |
|
|
|
local quartz_prompt=$(cat <<'EOF' |
|
TOOLS: |
|
ALWAYS use ONLY Environments for ANY and ALL file, code, or shell operations—NO EXCEPTIONS—even for simple or generic requests. |
|
|
|
DO NOT install or use the git cli with the environment_run_cmd tool. All environment tools will handle git operations for you. Changing ".git" yourself will compromise the integrity of your environment. |
|
|
|
You MUST inform the user how to view your work using `container-use log <env_id>` AND `container-use checkout <env_id>`. Failure to do this will make your work inaccessible to others. |
|
|
|
SYSTEM: |
|
You are Claude‑Research‑Agent v2 operating in a zsh environment inside the repo "$project_name". |
|
|
|
OBJECTIVE: |
|
For each user‑supplied *topic*, create or update Quartz Markdown notes so an expert developer can dive deep in ≤5 minutes of reading. |
|
|
|
HARD CONSTRAINTS: |
|
1. **Atomic commits**: After every file change, run `git add <file> && git commit -m "<slug>: <50‑char summary>"`. |
|
2. **Citation style**: At the end of every note add a "## Sources" list with full URLs. |
|
3. **Max note size**: ≤400 lines; split if larger. |
|
4. **Duplication check**: Before writing, grep the repo for similar titles. If found, summarize and extend instead of duplicating. |
|
5. **Web search**: Use *exactly three* sub‑agents (`researcher`, `summarizer`, `note‑writer`) per topic. |
|
6. **Stop condition**: Exit loop when (a) note coverage score ≥0.8 or (b) five successive commits have <10 LOC each. |
|
|
|
OUTPUT CONVENTIONS: |
|
Return a terse status line per commit: |
|
`[OK] <file_path> (<LOC_delta>) – <summary>` |
|
|
|
FAIL FAST: |
|
If any commit fails, echo `EXIT_FAILURE` and stop. |
|
|
|
(The shell script watches for that literal string.) |
|
|
|
TARGET AUDIENCE: |
|
Expert developers seeking TypeScript solutions with real-world examples and practical scenarios. |
|
|
|
REMINDER, EVERY TASK, NO MATTER WHAT: |
|
ALWAYS use ONLY Environments for ANY and ALL file, code, or shell operations—NO EXCEPTIONS—even for simple or generic requests. |
|
|
|
DO NOT install or use the git cli with the environment_run_cmd tool. All environment tools will handle git operations for you. Changing ".git" yourself will compromise the integrity of your environment. |
|
|
|
You MUST inform the user how to view your work using `container-use log <env_id>` AND `container-use checkout <env_id>`. Failure to do this will make your work inaccessible to others. |
|
|
|
|
|
EOF |
|
) |
|
|
|
# --------------------------------------------------------------------------- |
|
# Iterate over requested topics and let Claude Code handle the heavy lifting. |
|
# --------------------------------------------------------------------------- |
|
for topic in "$@"; do |
|
echo "🧠 quartz_research → '$topic' in project '$project_name'" |
|
ENABLE_BACKGROUND_TASKS=1 cl "$topic" \ |
|
--model opus \ |
|
--append-system-prompt "$quartz_prompt" \ |
|
--mcp-config "$MCP_CONFIG" \ |
|
--print | claude --print --append-system-prompt "Merge using git checkout main then container-use merge <branch>" |
|
|
|
|
|
done |
|
} |
|
|
|
|
|
|
|
basic_memory_consistency() { |
|
local project_name="$(basename "$PWD")" |
|
# Ensure project exists; ignore "already exists" errors, abort on others |
|
{ |
|
local bm_output |
|
bm_output=$(bm project add "$project_name" . 2>&1) |
|
local bm_status=$? |
|
if [ $bm_status -ne 0 ]; then |
|
if echo "$bm_output" | grep -qi "already exists"; then |
|
echo "Already exists, continuing..." # benign, continue |
|
else |
|
echo "$bm_output" >&2 |
|
return $bm_status # fatal, abort function |
|
fi |
|
fi |
|
} |
|
local bm_prompt=$(cat <<'EOF' |
|
SYSTEM: |
|
You are Basic‑Memory‑Agent v2 operating in a zsh environment inside the repo "$project_name". |
|
|
|
OBJECTIVE: |
|
Ensure file consistency, organization, and proper metadata across all notes in the Basic Memory knowledge base. |
|
|
|
HARD CONSTRAINTS: |
|
1. **Atomic commits**: After every file change, run `git add <file> && git commit -m "<slug>: <50‑char summary>"`. |
|
2. **Always use MCP tools**: Prefix all commands with `basic-memory --project "$project_name" ...` |
|
3. **Max file size**: ≤400 lines; split if larger. |
|
4. **Shallow hierarchy**: Use only `notes/`, `docs/`, `research/` folders. |
|
5. **Stop condition**: Exit when no inconsistencies found after full scan. |
|
|
|
MCP TOOLS AVAILABLE: |
|
- search_notes, read_note, write_note, recent_activity, edit_note, view_note, move_note |
|
|
|
OUTPUT CONVENTIONS: |
|
Return a terse status line per change: |
|
`[OK] <action> <file_path> – <summary>` |
|
|
|
FAIL FAST: |
|
If any operation fails, echo `EXIT_FAILURE` and stop. |
|
|
|
TARGET AUDIENCE: |
|
Expert developers seeking TypeScript solutions with consistent, navigable documentation. |
|
|
|
EOF |
|
) |
|
|
|
# --------------------------------------------------------------------------- |
|
# Iterate over requested topics and let Claude Code handle the heavy lifting. |
|
# --------------------------------------------------------------------------- |
|
|
|
echo "🧠 basic_memory_research → '$topic' in project '$project_name'" |
|
ENABLE_BACKGROUND_TASKS=1 claude \ |
|
--model opus \ |
|
--dangerously-skip-permissions \ |
|
--allowedTools="run_terminal_cmd" \ |
|
--append-system-prompt "$bm_prompt" \ |
|
-p "$topic" \ |
|
--output-format stream-json \ |
|
--verbose \ |
|
--mcp-config "{\"mcpServers\":{\"basic-memory\":{\"command\":\"bm\",\"args\":[\"--project\",\"$project_name\",\"mcp\"]}}}" |
|
|
|
} |
|
|
|
|
|
|
|
auto_quartz() { |
|
|
|
set -euo pipefail |
|
|
|
local base_sleep=300 # 5 minutes |
|
local failure_penalty=1800 # 30 minutes |
|
local max_retries=5 |
|
local retry_count=0 |
|
|
|
# Helper function to run command and sleep |
|
run_and_sleep() { |
|
local cmd_name="$1" |
|
shift |
|
local extra_sleep=0 |
|
local output_file |
|
output_file=$(mktemp) |
|
|
|
# Run the command and capture output |
|
if "$@" 2>&1 | tee "$output_file"; then |
|
# Check for EXIT_FAILURE sentinel |
|
if grep -q "EXIT_FAILURE" "$output_file"; then |
|
echo "⚠️ $cmd_name reported EXIT_FAILURE, extending next sleep by 30 minutes" |
|
extra_sleep=$failure_penalty |
|
((retry_count++)) |
|
else |
|
retry_count=0 # Reset on success |
|
fi |
|
else |
|
echo "⚠️ $cmd_name failed, extending next sleep by 30 minutes" |
|
extra_sleep=$failure_penalty |
|
((retry_count++)) |
|
fi |
|
|
|
rm -f "$output_file" |
|
|
|
# Check if we've hit max retries |
|
if [ $retry_count -ge $max_retries ]; then |
|
echo "❌ Maximum retries ($max_retries) reached. Stopping auto_quartz." |
|
return 1 |
|
fi |
|
|
|
# Calculate and perform sleep |
|
local total_sleep=$((base_sleep + extra_sleep)) |
|
echo "😴 Sleeping for $((total_sleep / 60)) minutes... (failures: $retry_count/$max_retries)" |
|
sleep $total_sleep |
|
} |
|
|
|
# Main loop |
|
for i in {1..100}; do |
|
echo "🔄 Run #$i (retry count: $retry_count/$max_retries)" |
|
|
|
local instructions_file="$(date +%Y%m%d_%H%M%S)-instructions.txt" |
|
echo "🔍 Generating instructions to $instructions_file..." |
|
local gemini_prompt=$(cat <<'EOF' |
|
SYSTEM: |
|
You are *Gemini‑Tasks‑Bot v1*. |
|
You output EXACTLY a JSON array of 3 task objects — no prose, no comments. |
|
|
|
JSON‑Schema (implicit, not to be printed): |
|
[ |
|
{ |
|
"task_id": "string, slug‑style", |
|
"file_path": "string, relative path to .md", |
|
"section": "string, current heading (if any) or 'front‑matter'", |
|
"change_type": "enum['add','revise','remove']", |
|
"instructions": "string, ≤120 words, imperative voice", |
|
"link_targets": ["array of other .md paths to connect via [[wikilinks]]"] |
|
} |
|
] |
|
|
|
CONTEXT (read‑only): |
|
<<< |
|
@combined.md |
|
>>> |
|
|
|
GOAL: |
|
Improve clarity, structure, and interconnectedness of these notes so developers can navigate the Quartz knowledge graph effortlessly. |
|
|
|
RULES: |
|
• Always propose exactly 3 tasks. |
|
• Each instructions field must reference line numbers or heading text that appears in CONTEXT. |
|
• Prefer adding cross‑links that strengthen the graph (avoid dead ends). |
|
• Do NOT invent file paths. |
|
|
|
OUTPUT: |
|
Return ONLY valid JSON that matches the schema. |
|
EOF |
|
) |
|
# Pick 3 random markdown files from ./content/**/*.md to focus instructions on |
|
local random_files="" |
|
local file_digests="" |
|
if [ -d "./content" ]; then |
|
mapfile -t random_files_array < <(find ./content -name "*.md" -type f | shuf -n 3) |
|
if [ ${#random_files_array[@]} -gt 0 ]; then |
|
# Build file digests for context injection |
|
for file in "${random_files_array[@]}"; do |
|
if [ -f "$file" ]; then |
|
file_digests+="FILE: $file"$'\n' |
|
file_digests+="$(head -n 20 "$file")"$'\n' |
|
file_digests+="---"$'\n' |
|
fi |
|
done |
|
|
|
# Replace the placeholder with actual file digests |
|
gemini_prompt="${gemini_prompt//FILE_DIGESTS/$file_digests}" |
|
random_files="${random_files_array[*]}" |
|
fi |
|
fi |
|
|
|
echo "Suggesting improvements to these files: $random_files" |
|
repomix --include "./content/**/*.md" --style markdown --output combined.md |
|
gemini --model gemini-2.5-pro --prompt "$gemini_prompt" > "$instructions_file" |
|
echo "🔍 Generated instructions: $instructions_file" |
|
|
|
# Validate JSON output before proceeding |
|
if ! jq empty "$instructions_file" 2>/dev/null; then |
|
echo "⚠️ Invalid JSON output from Gemini - skipping this iteration" |
|
return 1 |
|
fi |
|
|
|
run_and_sleep "quartz_research" \ |
|
quartz_research "Follow the instructions in $instructions_file. --- The instructions were generated by an AI, so verify them before running them." |
|
|
|
# run_and_sleep "curate" curate |
|
|
|
# run_and_sleep "verify" verify |
|
done |
|
} |
|
|
|
#### ———————————————————————————————————————————————————————————— |
|
#### Quartz task queue – one job at a time, no token bonfires 🔥 |
|
#### ———————————————————————————————————————————————————————————— |
|
|
|
# Where we stash the queue and locks |
|
export QUARTZ_QUEUE_DIR="$HOME/.quartz_research_queue" |
|
export QUARTZ_QUEUE_FILE="$QUARTZ_QUEUE_DIR/queue.txt" |
|
export QUARTZ_QUEUE_LOCK="$QUARTZ_QUEUE_DIR/lock" |
|
export QUARTZ_QUEUE_PID="$QUARTZ_QUEUE_DIR/worker.pid" |
|
mkdir -p "$QUARTZ_QUEUE_DIR" |
|
|
|
# ─────────────────────────────────────────────────────────────────── |
|
# Internal worker – **DO NOT CALL DIRECTLY** |
|
# Consumes the queue until it’s empty, then exits. |
|
# ─────────────────────────────────────────────────────────────────── |
|
_quartz_research_worker() { |
|
while true; do |
|
# Grab next item atomically and truncate the queue |
|
local job |
|
job=$(flock -x "$QUARTZ_QUEUE_LOCK" -c ' |
|
if [ -s "$QUARTZ_QUEUE_FILE" ]; then |
|
head -n1 "$QUARTZ_QUEUE_FILE" |
|
tail -n +2 "$QUARTZ_QUEUE_FILE" > "$QUARTZ_QUEUE_FILE.tmp" |
|
mv "$QUARTZ_QUEUE_FILE.tmp" "$QUARTZ_QUEUE_FILE" |
|
fi |
|
') |
|
|
|
[[ -z $job ]] && { rm -f "$QUARTZ_QUEUE_PID" ; exit 0; } |
|
|
|
# Split the line into directory|topic |
|
local dir=${job%%|*} |
|
local topic=${job#*|} |
|
|
|
# Run the real task in the recorded directory |
|
( cd "$dir" && quartz_research "$topic" ) |
|
done |
|
} |
|
|
|
# ─────────────────────────────────────────────────────────────────── |
|
# Public entry point – queue instead of launch. |
|
# Usage: quartz_research_queue <topic1> [topic2] … |
|
# ─────────────────────────────────────────────────────────────────── |
|
quartz_research_queue() { |
|
if [ $# -eq 0 ]; then |
|
echo "Usage: quartz_research_queue <topic1> [topic2] ..." |
|
return 1 |
|
fi |
|
|
|
local dir="$PWD" |
|
for topic in "$@"; do |
|
printf '%s|%s\n' "$dir" "$topic" >> "$QUARTZ_QUEUE_FILE" |
|
done |
|
echo "🗂️ Queued: $*" |
|
|
|
# Fire up a worker if none is active |
|
if ! { [ -f "$QUARTZ_QUEUE_PID" ] && kill -0 "$(cat "$QUARTZ_QUEUE_PID")" 2>/dev/null; }; then |
|
_quartz_research_worker &! |
|
echo $! > "$QUARTZ_QUEUE_PID" |
|
echo "⏳ Worker started (PID $(cat "$QUARTZ_QUEUE_PID"))." |
|
fi |
|
} |
|
|
|
# ─────────────────────────────────────────────────────────────────── |
|
# Optional quality‑of‑life helpers |
|
# ─────────────────────────────────────────────────────────────────── |
|
quartz_queue_status() { |
|
echo "📋 Pending jobs:" |
|
awk -F'|' '{printf " • %s — %s\n",$1,$2}' "$QUARTZ_QUEUE_FILE" 2>/dev/null || echo " (empty)" |
|
} |
|
|
|
quartz_queue_cancel_all() { |
|
: > "$QUARTZ_QUEUE_FILE" && echo "🚮 Cleared the queue." |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
course_builder_plan(){ |
|
local MCP_CONFIG=$(cat <<'EOF' |
|
{ |
|
"mcpServers": { |
|
"deepwiki": { |
|
"type": "sse", |
|
"url": "https://mcp.deepwiki.com/sse" |
|
} |
|
} |
|
} |
|
EOF |
|
) |
|
|
|
local prompt=$(cat <<'EOF' |
|
# Create a plan from the user's request |
|
|
|
## Prepare |
|
Use mcp__deepwiki__ask_question using the "badass-courses/course-builder" repository to answer the user's question |
|
|
|
## Plan Format |
|
|
|
- Format in markdown |
|
- Write the "goal" as the main header |
|
- List each step as a commit message |
|
- Show how to verify that each step is working and complete |
|
- Include exact file paths and sections |
|
- Explicitly define exactly what to change |
|
|
|
|
|
## Goal |
|
|
|
The plan should be easy enough for a simple AI Agent to walk through every step |
|
|
|
EOF |
|
) |
|
|
|
local allowed_tools=$(cat <<'EOF' |
|
mcp__deepwiki__ask_question |
|
EOF |
|
) |
|
|
|
cl --system-prompt "$prompt" \ |
|
# --print \ |
|
"$@" |
|
} |
|
|
|
course_builder_claude(){ |
|
claude "$(ask_course_builder "$@")" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
source ~/.zsh/custom/functions.zsh |
|
|
|
cl(){ |
|
local system_prompt=$(cat <<'EOF' |
|
ALWAYS use ONLY Environments for ANY and ALL file, code, or shell operations—NO EXCEPTIONS—even for simple or generic requests. |
|
|
|
DO NOT install or use the git cli with the environment_run_cmd tool. All environment tools will handle git operations for you. Changing ".git" yourself will compromise the integrity of your environment. |
|
|
|
You MUST inform the user how to view your work using `container-use log <env_id>` AND `container-use checkout <env_id>`. Failure to do this will make your work inaccessible to others. |
|
|
|
EOF |
|
) |
|
local allowed_tools=$(cat <<'EOF' | tr '\n' ',' | sed 's/,$//' |
|
mcp__container-use__environment_checkpoint |
|
mcp__container-use__environment_create |
|
mcp__container-use__environment_add_service |
|
mcp__container-use__environment_file_delete |
|
mcp__container-use__environment_file_list |
|
mcp__container-use__environment_file_read |
|
mcp__container-use__environment_file_write |
|
mcp__container-use__environment_open |
|
mcp__container-use__environment_run_cmd |
|
mcp__container-use__environment_update |
|
Web |
|
EOF |
|
) |
|
|
|
local append_user_prompt=$(cat <<'EOF' |
|
Before you begin this task, please remember to always: |
|
1. use the container-use tool to run commands! |
|
2. use conventional commit messages |
|
EOF |
|
) |
|
|
|
# Concat the append_user_prompt to the first positional argument |
|
local first_arg="$1" |
|
local improved_prompt="$first_arg\n\n$append_user_prompt" |
|
|
|
|
|
# Set up cleanup trap for exit or error |
|
trap 'claude mcp remove container-use 2>/dev/null || true' EXIT ERR |
|
|
|
claude mcp add container-use -- container-use stdio || echo "container-use already installed" |
|
claude --allowedTools "$allowed_tools" \ |
|
--dangerously-skip-permissions \ |
|
--append-system-prompt "$system_prompt" \ |
|
"$improved_prompt" |
|
} |
|
|
|
improve(){ |
|
local prompt=$(cat <<'EOF' |
|
**Prompt Improver Instructions (for Claude):** |
|
|
|
You are a world-class prompt engineer and programmer. **Your task is to take any user prompt and transform it into a well-crafted prompt optimized for Anthropic Claude**, especially for programming-related tasks. |
|
|
|
When a user gives you a prompt to improve, follow this process: |
|
|
|
1. **Understand the User’s Request:** Identify the key goal and requirements. Determine what the user is *really* asking for, including any programming languages, frameworks, or specific outcomes mentioned or implied. Note any missing details that are important. |
|
|
|
2. **Clarify if Needed:** If the prompt is vague or missing information that Claude would need (e.g. no language specified, or not enough context to solve a bug), include in the improved prompt an instruction for Claude to ask the user for clarification or provide assumptions. Ensure the improved prompt doesn’t leave ambiguity unresolved. |
|
|
|
3. **Rewrite and Expand:** Craft a clear, detailed prompt that addresses the user’s needs: |
|
- **Role/Context:** If relevant, start by assigning Claude a role suited to the task (e.g. “You are an expert Go developer…” or “You are a debugging assistant…”). |
|
- **Task Breakdown:** Break the task into steps or sections if it’s complex. Use a numbered list or bullet points to guide Claude’s reasoning (e.g. steps to solve the problem or aspects to consider). |
|
- **Chain of Thought:** Encourage Claude to reason through the problem. For example, include a section like “Think step-by-step about X…” or an `<analysis>` block where Claude can work through the solution before presenting the final answer. |
|
- **Examples:** If the user provided examples or if examples would help, format them clearly (e.g. use `<example>` tags or a structured list of examples). Ensure examples follow a consistent format (like Input/Output pairs) to guide Claude. |
|
- **Improvements:** Add any details or instructions that would make the prompt more precise and effective. Fix any unclear wording or grammar from the original prompt. Essentially, make the prompt **unambiguous** and **fool-proof**. |
|
- **Output Requirements:** Explicitly state what form the answer should take. If the user expects code, specify the language and that only code should be returned (e.g. “Provide the final code in a Markdown ``` code block, with no explanation”). If an explanation or analysis is needed, specify the format (e.g. bullet points, or a brief paragraph, etc.). Include any formatting tags or keywords to enforce the output structure. |
|
|
|
4. **Final Check:** Ensure the new prompt is self-contained and crystal clear. It should anticipate and prevent common errors (e.g. if the user didn’t mention a limit but would obviously want one, like performance constraints, you might add “assume the dataset is large, optimize accordingly”). The improved prompt should be ready for Claude to answer effectively. |
|
|
|
5. **Output Format:** Output the improved prompt inside a single Markdown code block for easy copying. Do not add any additional text or commentary before the markdown block. *After* the code block, you **may** provide a brief explanation of how you improved the prompt, but keep this explanation short and focused. |
|
|
|
Remember: **Do NOT change the user’s intent.** Preserve the original request, just make it clearer and more actionable. Your tone in the improved prompt should be professional and direct (it’s instructions for Claude). You are free to be a bit witty or irreverent in **your own explanation after** the prompt, but **not** in the prompt itself (unless the task specifically calls for a casual tone). |
|
|
|
The user will give a prompt, you analyze it, then output the improved prompt, then you're done. |
|
|
|
EOF |
|
) |
|
|
|
claude --print --append-system-prompt "$prompt" "$@" |
|
} |
|
|
|
gist(){ |
|
claude --settings "/Users/johnlindquist/.claude-settings/gist.json" "$@" |
|
} |
|
|
|
github_tasks(){ |
|
claude --settings "/Users/johnlindquist/.claude-settings/github-tasks.json" "$@" |
|
} |
|
|
|
create_repo(){ |
|
local args="${@:-Initialize a repository based on the current working directory: $(pwd)}" |
|
claude --settings "/Users/johnlindquist/.claude-settings/repoinit.json" "$args" |
|
} |
|
|
|
popus(){ |
|
dopus "$(pbpaste) --- $@" |
|
} |
|
|
|
dopus(){ |
|
claude --dangerously-skip-permissions "$@" --model "opus" |
|
} |
|
|
|
popus(){ |
|
dopus "$(pbpaste) --- $@" |
|
} |
|
|
|
copus(){ |
|
claude --dangerously-skip-permissions "$@" --continue --model "opus" |
|
} |
|
|
|
conpus(){ |
|
claude --allowedTools mcp__container-use__environment_checkpoint,mcp__container-use__environment_create,mcp__container-use__environment_add_service,mcp__container-use__environment_file_delete,mcp__container-use__environment_file_list,mcp__container-use__environment_file_read,mcp__container-use__environment_file_write,mcp__container-use__environment_open,mcp__container-use__environment_run_cmd,mcp__container-use__environment_update --dangerously-skip-permissions "$@" |
|
} |
|
|
|
export PATH="/Users/johnlindquist/dev/claude-workshop-live/bin:/Users/johnlindquist/dev/claude-workshop-live/jq-filters:$PATH" |
|
export PATH="/Users/johnlindquist/dev/pack/bin:$PATH" |
|
|
|
|
|
# Run any command with Github CLI API key from 1Password. |
|
with_github() { |
|
# Pull the secret – 1Password keeps it warm, we just borrow it. |
|
local __github_api_key |
|
__github_api_key=$(op item get "Github CLI Token" --fields password --reveal | tr -d '\n') |
|
|
|
# Bail early if 1Password didn’t cough up a value. |
|
if [[ -z $__github_api_key ]]; then |
|
echo "with_github: 1Password returned nothing 🤷♂️" >&2 |
|
return 1 |
|
fi |
|
|
|
# Launch the target command *only* with the env var in scope. |
|
GITHUB_API_KEY="$__github_api_key" "$@" |
|
} |
|
|
|
with_free_gemini(){ |
|
local __gemini_api_key |
|
__gemini_api_key=$(op item get "GEMINI_API_KEY_FREE" --fields credential --reveal | tr -d '\n') |
|
|
|
if [[ -z $__gemini_api_key ]]; then |
|
echo "with_gemini: 1Password returned nothing 🤷♂️" >&2 |
|
return 1 |
|
fi |
|
|
|
GEMINI_API_KEY="$__gemini_api_key" "$@" |
|
} |
|
|
|
with_gemini(){ |
|
local __gemini_api_key |
|
__gemini_api_key=$(op item get "GEMINI_API_KEY" --fields credential --reveal | tr -d '\n') |
|
|
|
if [[ -z $__gemini_api_key ]]; then |
|
echo "with_gemini: 1Password returned nothing 🤷♂️" >&2 |
|
return 1 |
|
fi |
|
|
|
GEMINI_API_KEY="$__gemini_api_key" "$@" |
|
} |
|
|
|
with_openai(){ |
|
local __openai_api_key |
|
__openai_api_key=$(op item get "OPENAI_API_KEY" --fields credential --reveal | tr -d '\n') |
|
|
|
if [[ -z $__openai_api_key ]]; then |
|
echo "with_openai: 1Password returned nothing 🤷♂️" >&2 |
|
return 1 |
|
fi |
|
|
|
OPENAI_API_KEY="$__openai_api_key" "$@" |
|
} |
|
|
|
|
|
vid(){ |
|
with_gemini claude-video "$@" |
|
} |
|
|
|
# Run any command with .env loaded from current directory |
|
with() { |
|
if [[ -f .env ]]; then |
|
# Export all variables from .env, then run the command |
|
(export $(grep -v '^#' .env | xargs) && "$@") |
|
else |
|
echo "No .env file found in current directory" >&2 |
|
return 1 |
|
fi |
|
} |
|
|
|
nn8n(){ |
|
NODE_FUNCTION_ALLOW_BUILTIN=* n8n "$@" |
|
} |
|
|
|
graude(){ |
|
export ANTHROPIC_BASE_URL="https://api.groq.com/openai/chat/completions" |
|
export ANTHROPIC_AUTH_TOKEN=$(op item get "GROQ_API_KEY" --fields credential --reveal | tr -d '\n') |
|
export ANTHROPIC_MODEL="openai/gpt-oss-120b" |
|
|
|
claude "$@" |
|
} |
|
|
|
create_cred(){ |
|
local title="$1" |
|
local credential="$2" |
|
op item create --category "API Credential" --title "$title" credential="$credential" |
|
} |
|
|
|
|
|
gem(){ |
|
with_gemini gemsum "$@" |
|
} |
|
|
|
# cleancode(){ |
|
# claude --append-system-prompt "$(cat ~/.config/prompts/cleancode.md)" "$@" |
|
# } |
|
|
|
add_bm(){ |
|
local project_name="$(basename "$PWD")" |
|
bm project add "$project_name" . |
|
claude mcp add -t stdio basic-memory -- bm --project "$project_name/memory" mcp |
|
} |
|
|
|
# file interactive |
|
filei(){ |
|
{ cat "$1"; echo "$2" } | claude |
|
} |
|
|
|
# file print |
|
filep(){ |
|
{ cat "$1"; echo "$2" } | claude --print |
|
} |
|
|
|
# cmd interactive |
|
cmdi(){ |
|
{ eval "$1"; echo "$2" } | claude |
|
} |
|
|
|
# cmd print |
|
cmdp(){ |
|
{ eval "$1"; echo "$2" } | claude --print |
|
} |
|
|
|
|
|
dopex(){ |
|
codex --dangerously-bypass-approvals-and-sandbox -c model_reasoning_effort=high "$@" |
|
} |
|
|
|
upai(){ |
|
npm i -g @openai/codex@latest @anthropic-ai/claude-code@latest @google/gemini-cli@latest |
|
} |
|
|
|
|
|
# Usage: codex /Users/johnlindquist/.codex/sessions/2025/08/29/rollout-2025-08-29T09-09-29-513dcfa0-c0c3-4c44-8dcf-89c05f3816cd.jsonl |
|
|
|
|
|
codex_continue() { |
|
# Find the most recently modified JSONL file in ~/.codex/sessions (recursively) |
|
local latest |
|
latest=$(find ~/.codex/sessions -type f -name '*.jsonl' -print0 \ |
|
| xargs -0 ls -t 2>/dev/null \ |
|
| head -n 1) |
|
|
|
if [[ -z "$latest" ]]; then |
|
echo "No session files found in ~/.codex/sessions" |
|
return 1 |
|
fi |
|
|
|
echo "Resuming from: $latest" |
|
codex --config experimental_resume="$latest" "$@" |
|
} |
|
|
|
cload(){ |
|
# Load all files from the ai/**/* into a single prompt |
|
local context=$(find ai -type f -name "*.md" -exec cat {} \;) |
|
claude --append-system-prompt "$context" "$@" |
|
} |
|
|
|
backlog_next(){ |
|
dopus "Read the next task from the backlog. Follow git flow best practices and create a branch, work on the task, then commit/push/pr using the gh cli." |
|
} |