Skip to content

Instantly share code, notes, and snippets.

@jerieljan
Last active December 12, 2024 06:16
Show Gist options
  • Save jerieljan/bfa8ab8c63290dadef21a97a53c40169 to your computer and use it in GitHub Desktop.
Save jerieljan/bfa8ab8c63290dadef21a97a53c40169 to your computer and use it in GitHub Desktop.
A basic zsh-completion for Simon Willison's `llm`. https://llm.datasette.io
#compdef llm
# Instructions:
#
# - Have llm working on your setup, of course. Have some models or templates to actually use.
# - Have zsh and zsh-completions configured and working (i.e., installed, fpath configured, tabbing works for other commands, etc)
# - Place this file `_llm` alongside your completions (e.g., ~/.oh-my-zsh/custom/plugins/zsh_completions/src/)
# - Restart your terminal session or `source ~/.zshrc`
# - Try it out by tabbing while `-m` or `-t` and it should suggest whatever models you have available or templates you've configured.
# Define a blacklist of model IDs
blacklist_models=("gpt-3.5-turbo") # Add the models you want to exclude here (e.g., "gpt-4" "gpt-4-0125-preview" "gpt-4-1106-preview")
# Function to fetch model IDs and filter out blacklisted ones
function _llm_models {
# Cache the models temporarily
if [[ -z ${__llm_model_ids_cache} ]]; then
# Fetch model IDs
local models
models=("${(@f)$(llm models | awk -F': ' '{print $2}' | awk '{print $1}')}")
# Filter out blacklisted models
local model
for model in "${models[@]}"; do
if [[ ! " ${blacklist_models[@]} " =~ " ${model} " ]]; then
__llm_model_ids_cache+=("${model}")
fi
done
fi
echo "${__llm_model_ids_cache[@]}"
}
# Function to fetch template IDs
function _llm_templates {
# Cache the templates temporarily
if [[ -z ${__llm_template_ids_cache} ]]; then
# Fetch template IDs
local templates
templates=("${(@f)$(llm templates | awk -F':' '{print $1}')}")
# Cache the templates
__llm_template_ids_cache=("${templates[@]}")
fi
echo "${__llm_template_ids_cache[@]}"
}
# _llm is the function that handles the completion
function _llm {
local context state state_descr line
typeset -A opt_args
# Define common arguments
local -a common_opts=(
'--version[Show the version and exit.]'
'--help[Show this message and exit.]'
'-a[Attachment path or URL]:file:_files'
'--attach[Attachment path or URL]:file:_files'
)
# Define commands and their descriptions
local -a commands=(
'prompt:Execute a prompt'
'aliases:Manage model aliases'
'chat:Hold an ongoing chat with a model'
'collections:View and manage collections of embeddings'
'embed:Embed text and store or return the result'
'embed-models:Manage available embedding models'
'embed-multi:Store embeddings for multiple strings at once'
'install:Install packages from PyPI into the same environment as LLM'
'keys:Manage stored API keys for different models'
'logs:Tools for exploring logged prompts and responses'
'models:Manage available models'
'ollama:Commands for working with models hosted on Ollama'
'openai:Commands for working directly with the OpenAI API'
'plugins:List installed plugins'
'similar:Return top N similar IDs from a collection'
'templates:Manage stored prompt templates'
'uninstall:Uninstall Python packages from the LLM environment'
)
# Configure argument parsing and completion
_arguments -C \
"${common_opts[@]}" \
'1:command:->cmds' \
'-m[Model to use]:model id:->models' \
'--model[Model to use]:model id:->models' \
'-t[Template to use]:template id:->templates' \
'--template[Template to use]:template id:->templates' \
'*::option:->options'
case $state in
cmds)
_describe -t commands 'llm commands' commands
;;
models)
compadd $(_llm_models)
;;
templates)
compadd $(_llm_templates)
;;
options)
case $words[1] in
prompt | aliases | chat | collections | embed | embed-models | embed-multi | install | keys | logs | models | ollama | openai | plugins | similar | templates | uninstall)
# Here, you may add specific options for the indicated commands if needed
;;
esac
;;
esac
}
# Initialize the completion system
_llm "$@"
@jerieljan
Copy link
Author

2024-12-12

  • Added support for the attachment -a flags to properly look up files.
  • Minor changes on the help text. I made more in line with what llm actually uses.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment