Compare commits

...

36 Commits

Author SHA1 Message Date
thomas 62337a078c stop squashing 2026-04-21 09:16:30 +01:00
thomas 8e810418a5 remove notify 2026-04-20 16:09:45 +01:00
thomas a2d283a5c5 a bunch of changes 2026-04-20 13:55:11 +01:00
thomas ca0708a8ee fuck squashing 2026-04-17 12:12:36 +01:00
thomas 96060c899d remvoe cursor 2026-04-16 17:09:18 +01:00
thomas dc0e75eb46 stuff 2026-04-16 16:30:12 +01:00
thomas f3d9d42745 fix sudo 2026-04-16 16:00:09 +01:00
thomas 191cfbf182 resurrect claude 2026-04-16 15:49:19 +01:00
thomas 0bfdbd350e cursor stuff 2026-04-16 15:34:00 +01:00
thomas 9a7669af28 fix 2026-04-16 11:58:48 +01:00
thomas 534ec8b99f cursor extension 2026-04-16 11:55:57 +01:00
thomas c004356b5a zellij cleanup + small files 2026-04-16 09:48:44 +01:00
thomas 8fa80f58ea agents md to skip validations 2026-04-16 09:25:54 +01:00
thomas b42a9ecffa jj workspaces skill 2026-04-16 09:12:51 +01:00
thomas 966e40e71b anthropic fix 2026-04-13 15:52:17 +01:00
thomas 4af7031922 fix screenshots 2026-04-13 15:49:09 +01:00
thomas 6003f41a12 notify even when peon muted 2026-04-09 10:09:45 +01:00
thomas 587c54060b fix zellij 2026-04-08 10:59:18 +01:00
thomas cad0540600 fix biome 2026-04-07 17:29:51 +01:00
thomas fd2307eb0c fix autocomplete 2026-04-07 17:27:37 +01:00
thomas 6d525d0971 fix git blame lol 2026-04-07 16:48:17 +01:00
thomas 51073c07a8 update pi 2026-04-01 15:29:36 +01:00
thomas e4b6fbabc6 slow tool settings 2026-03-31 14:16:18 +01:00
thomas 85632c2e29 hook notifs 2026-03-31 12:30:12 +01:00
thomas 63caa82199 compat with claude skills + hooks support 2026-03-31 12:07:17 +01:00
thomas 39e7bddb35 update pi 2026-03-27 14:22:06 +00:00
thomas d5b4042b06 add gsf 2026-03-25 19:12:49 +00:00
thomas 4d19e7d320 fix nvim 2026-03-25 13:41:16 +00:00
thomas 227c1638f6 linear skill 2026-03-19 16:37:27 +00:00
thomas db41ec6e93 pi settings 2026-03-19 15:20:50 +00:00
thomas c44420ce7c osc52 or whatever for ssh clipboard 2026-03-19 15:20:27 +00:00
thomas f74242ed02 jj rules 2026-03-19 15:20:15 +00:00
thomas 335b12b0e4 small changes 2026-03-16 12:13:41 +00:00
Thomas G. Lopes 2e820d38e1 fix 2026-03-13 18:12:32 +00:00
thomas 008dac69f5 fix mac 2026-03-13 18:12:10 +00:00
thomas d0b1d3be4a sync mac scripts 2026-03-13 18:04:32 +00:00
70 changed files with 3270 additions and 890 deletions
+15 -1
View File
@@ -194,6 +194,17 @@
"centeringMode": "index",
"clockDateFormat": "d MMM yyyy",
"lockDateFormat": "",
"greeterRememberLastSession": true,
"greeterRememberLastUser": true,
"greeterEnableFprint": false,
"greeterEnableU2f": false,
"greeterWallpaperPath": "",
"greeterUse24HourClock": true,
"greeterShowSeconds": false,
"greeterPadHours12Hour": false,
"greeterLockDateFormat": "",
"greeterFontFamily": "",
"greeterWallpaperFillMode": "",
"mediaSize": 1,
"appLauncherViewMode": "list",
"spotlightModalViewMode": "list",
@@ -314,6 +325,7 @@
"matugenTemplateKcolorscheme": true,
"matugenTemplateVscode": true,
"matugenTemplateEmacs": true,
"matugenTemplateZed": true,
"showDock": false,
"dockAutoHide": false,
"dockSmartAutoHide": false,
@@ -355,6 +367,8 @@
"lockAtStartup": false,
"enableFprint": false,
"maxFprintTries": 3,
"enableU2f": false,
"u2fMode": "or",
"lockScreenActiveMonitor": "all",
"lockScreenInactiveColor": "#000000",
"lockScreenNotificationMode": 0,
@@ -377,7 +391,7 @@
"osdPosition": 5,
"osdVolumeEnabled": true,
"osdMediaVolumeEnabled": true,
"osdMediaPlaybackEnabled": true,
"osdMediaPlaybackEnabled": false,
"osdBrightnessEnabled": true,
"osdIdleInhibitorEnabled": true,
"osdMicMuteEnabled": true,
+1 -1
View File
@@ -1,3 +1,3 @@
if test (uname) = Darwin
fnm env --use-on-cd --shell fish | source
fnm env --use-on-cd --log-level=quiet --shell fish | source
end
+19 -1
View File
@@ -27,6 +27,14 @@ end
status is-interactive; and begin
# On macOS SSH sessions, normalize TERM if remote terminfo is missing
# (eg. TERM=alacritty from Linux host), otherwise tools like jj/less warn
if test (uname) = Darwin; and set -q SSH_TTY
if not infocmp "$TERM" >/dev/null 2>&1
set -gx TERM xterm-256color
end
end
# Abbreviations
abbr -a tx 'tmux'
abbr -a txa 'tmux attach'
@@ -94,9 +102,19 @@ status is-interactive; and begin
end
# Add user local bin to PATH
# PATH ordering on Linux: keep privileged wrapper binaries first (sudo, etc.)
if test (uname) = Linux
fish_add_path -m /run/wrappers/bin
fish_add_path -a -m /run/current-system/sw/bin
end
# Add user local bin to PATH, but keep it after system paths on Linux
if test -d "$HOME/.local/bin"
if test (uname) = Linux
fish_add_path -a -m "$HOME/.local/bin"
else
fish_add_path "$HOME/.local/bin"
end
end
# pnpm
+18
View File
@@ -0,0 +1,18 @@
---@class SigilConfig
---@field target table<string, string|boolean>
---@field ignore? string[]
---@type SigilConfig
local config = {
target = {
linux = "~/.config/gsf",
default = "~/.config/gsf",
},
ignore = {
-- "**/.DS_Store",
-- "**/*.tmp",
-- "cache/**",
},
}
return config
+17
View File
@@ -0,0 +1,17 @@
{
"mode": 0,
"sens_mult": 1.5,
"yx_ratio": 1.0,
"input_dpi": 400.0,
"angle_rotation": 0.0,
"accel": 2.0,
"offset_linear": 3.5,
"output_cap": 30.0,
"decay_rate": 0.1,
"offset_natural": 0.0,
"limit": 2.0,
"gamma": 1.0,
"smooth": 0.5,
"motivity": 1.5,
"sync_speed": 5.0
}
+3
View File
@@ -4,3 +4,6 @@ email = "thomasgl@pm.me"
[git]
write-change-id-header = true
[snapshot]
auto-update-stale = true
+10 -6
View File
@@ -3,11 +3,12 @@
[templates.ghostty]
input_path = '~/.config/matugen/templates/ghostty-theme'
output_path = '~/.config/ghostty/themes/matugen'
post_hook = 'pkill -SIGUSR2 ghostty'
post_hook = "pkill -SIGUSR2 ghostty || true && nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.config/ghostty/themes/matugen ~/.config/ghostty/themes/matugen --remote-cmd 'pkill -SIGUSR2 ghostty || true' >/dev/null 2>&1 &"
[templates.kitty]
input_path = '~/.config/matugen/templates/kitty-colors.conf'
output_path = '~/.config/kitty/colors.conf'
post_hook = "nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.config/kitty/colors.conf ~/.config/kitty/colors.conf >/dev/null 2>&1 &"
[templates.foot]
input_path = '~/.config/matugen/templates/foot-theme'
@@ -24,10 +25,12 @@ output_path = '~/.config/gtk-4.0/colors.css'
[templates.fish-prompt]
input_path = '~/.config/matugen/templates/fish-prompt-colors.fish'
output_path = '~/.config/fish/conf.d/prompt-colors.fish'
post_hook = "nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.config/fish/conf.d/prompt-colors.fish ~/.config/fish/conf.d/prompt-colors.fish >/dev/null 2>&1 &"
[templates.yazi]
input_path = '~/.config/matugen/templates/yazi-theme.toml'
output_path = '~/.config/yazi/theme.toml'
post_hook = "nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.config/yazi/theme.toml ~/.config/yazi/theme.toml >/dev/null 2>&1 &"
[templates.qt5ct]
input_path = '~/.config/matugen/templates/qtct-colors.conf'
@@ -44,28 +47,29 @@ output_path = '~/.config/niri/colors.kdl'
[templates.tmux]
input_path = '~/.config/matugen/templates/tmux-colors.conf'
output_path = '~/.config/tmux/colors.conf'
post_hook = 'tmux source-file ~/.config/tmux/tmux.conf 2>/dev/null || true && nohup ~/.config/matugen/scripts/sync-tmux-mac.sh >/dev/null 2>&1 &'
post_hook = "tmux source-file ~/.config/tmux/tmux.conf 2>/dev/null || true && nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.config/tmux/colors.conf ~/.config/tmux/colors.conf --remote-cmd 'export PATH=\"/opt/homebrew/bin:/usr/local/bin:$PATH\" && tmux source-file ~/.config/tmux/tmux.conf 2>/dev/null || true' >/dev/null 2>&1 &"
[templates.zellij]
input_path = '~/.config/matugen/templates/zellij-colors.kdl'
output_path = '~/.config/zellij/themes/matugen.kdl'
post_hook = 'touch ~/.config/zellij/config.kdl && nohup ~/.config/matugen/scripts/sync-zellij-mac.sh >/dev/null 2>&1 &'
post_hook = "touch ~/.config/zellij/config.kdl && nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.config/zellij/themes/matugen.kdl ~/.config/zellij/themes/matugen.kdl --remote-cmd 'touch ~/.config/zellij/config.kdl' >/dev/null 2>&1 &"
[templates.jjui]
input_path = '~/.config/matugen/templates/jjui-theme.toml'
output_path = '~/.config/jjui/themes/matugen.toml'
post_hook = "nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.config/jjui/themes/matugen.toml ~/.config/jjui/themes/matugen.toml >/dev/null 2>&1 &"
[templates.nvim]
input_path = '~/.config/matugen/templates/neovim.lua'
output_path = '~/.config/nvim/lua/plugins/dankcolors.lua'
post_hook = 'nohup ~/.config/matugen/scripts/sync-nvim-mac.sh >/dev/null 2>&1 &'
post_hook = "nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.config/nvim/lua/plugins/dankcolors.lua ~/.config/nvim/lua/plugins/dankcolors.lua >/dev/null 2>&1 &"
[templates.pi]
input_path = '~/.config/matugen/templates/pi-theme.json'
output_path = '~/.pi/agent/themes/matugen.json.tmp'
post_hook = 'cat ~/.pi/agent/themes/matugen.json.tmp > ~/.pi/agent/themes/matugen.json && nohup ~/.config/matugen/scripts/sync-pi-mac.sh >/dev/null 2>&1 &'
post_hook = "cat ~/.pi/agent/themes/matugen.json.tmp > ~/.pi/agent/themes/matugen.json && nohup ~/.config/matugen/scripts/sync-mac.sh file ~/.pi/agent/themes/matugen.json ~/.pi/agent/themes/matugen.json >/dev/null 2>&1 &"
[templates.wallpaper]
input_path = '~/.config/matugen/templates/wallpaper-path.txt'
output_path = '~/.cache/matugen-last-image'
post_hook = 'nohup ~/.config/matugen/scripts/sync-wallpaper-mac.sh >/dev/null 2>&1 &'
post_hook = "nohup ~/.config/matugen/scripts/sync-mac.sh wallpaper ~/.cache/matugen-last-image >/dev/null 2>&1 &"
+98
View File
@@ -0,0 +1,98 @@
#!/usr/bin/env sh
set -eu
host="${MATUGEN_SYNC_HOST:-mac-attio}"
log_file="$HOME/.cache/matugen-sync-mac.log"
mkdir -p "$HOME/.cache"
usage() {
echo "usage:" >&2
echo " sync-mac.sh file <source_path> <remote_path> [--remote-cmd <command>]" >&2
echo " sync-mac.sh wallpaper <wallpaper_path_file>" >&2
exit 1
}
sync_file() {
source_path="$1"
remote_path="$2"
remote_cmd="${3-}"
# If caller passes a local absolute path, mirror it under remote $HOME.
case "$remote_path" in
"$HOME")
remote_path="~"
;;
"$HOME"/*)
remote_path="~/${remote_path#"$HOME"/}"
;;
esac
remote_dir="$(dirname "$remote_path")"
remote_tmp="${remote_path}.tmp"
ssh "$host" "mkdir -p $remote_dir"
scp "$source_path" "$host:$remote_tmp"
ssh "$host" "mv $remote_tmp $remote_path"
if [ -n "$remote_cmd" ]; then
ssh "$host" "$remote_cmd"
fi
}
sync_wallpaper() {
wallpaper_path_file="$1"
[ -f "$wallpaper_path_file" ] || exit 0
wallpaper_path="$(cat "$wallpaper_path_file")"
[ -n "$wallpaper_path" ] || exit 0
[ -f "$wallpaper_path" ] || exit 0
base_name="$(basename "$wallpaper_path")"
local_cache_dir="$HOME/.cache/matugen-wallpapers"
local_copy="$local_cache_dir/$base_name"
mkdir -p "$local_cache_dir"
cp -f "$wallpaper_path" "$local_copy"
ssh "$host" "mkdir -p ~/.cache/matugen-wallpapers"
scp "$local_copy" "$host:~/.cache/matugen-wallpapers/$base_name"
ssh "$host" "osascript -e 'tell application \"System Events\" to tell every desktop to set picture to POSIX file \"~/.cache/matugen-wallpapers/$base_name\"'"
}
mode="${1-}"
[ -n "$mode" ] || usage
shift
{
echo "[$(date '+%Y-%m-%d %H:%M:%S')] mode=$mode"
case "$mode" in
file)
[ "$#" -ge 2 ] || usage
source_path="$1"
remote_path="$2"
shift 2
remote_cmd=""
if [ "${1-}" = "--remote-cmd" ]; then
[ "$#" -eq 2 ] || usage
remote_cmd="$2"
elif [ "$#" -ne 0 ]; then
usage
fi
sync_file "$source_path" "$remote_path" "$remote_cmd"
;;
wallpaper)
[ "$#" -eq 1 ] || usage
sync_wallpaper "$1"
;;
*)
usage
;;
esac
} >>"$log_file" 2>&1
-12
View File
@@ -1,12 +0,0 @@
#!/usr/bin/env sh
set -eu
log_file="$HOME/.cache/matugen-sync-nvim.log"
mkdir -p "$HOME/.cache"
{
ssh mac-attio "mkdir -p ~/.config/nvim/lua/plugins"
scp "$HOME/.config/nvim/lua/plugins/dankcolors.lua" \
mac-attio:~/.config/nvim/lua/plugins/
} >>"$log_file" 2>&1
-13
View File
@@ -1,13 +0,0 @@
#!/usr/bin/env sh
set -eu
log_file="$HOME/.cache/matugen-sync-pi.log"
mkdir -p "$HOME/.cache"
{
ssh mac-attio "mkdir -p ~/.pi/agent/themes"
scp "$HOME/.pi/agent/themes/matugen.json" \
mac-attio:~/.pi/agent/themes/matugen.json.tmp
ssh mac-attio "mv ~/.pi/agent/themes/matugen.json.tmp ~/.pi/agent/themes/matugen.json"
} >>"$log_file" 2>&1
-13
View File
@@ -1,13 +0,0 @@
#!/usr/bin/env sh
set -eu
log_file="$HOME/.cache/matugen-sync-tmux.log"
mkdir -p "$HOME/.cache"
{
ssh mac-attio "mkdir -p ~/.config/tmux"
scp "$HOME/.config/tmux/colors.conf" \
mac-attio:~/.config/tmux/
ssh mac-attio 'export PATH="/opt/homebrew/bin:/usr/local/bin:$PATH" && tmux source-file ~/.config/tmux/tmux.conf 2>/dev/null || true'
} >>"$log_file" 2>&1
@@ -1,21 +0,0 @@
#!/usr/bin/env sh
set -eu
log_file="$HOME/.cache/matugen-sync-wallpaper.log"
mkdir -p "$HOME/.cache"
{
wallpaper_path="$(cat "$HOME/.cache/matugen-last-image")"
if [ -n "$wallpaper_path" ]; then
base_name="$(basename "$wallpaper_path")"
dest_path="$HOME/.cache/matugen-wallpapers/$base_name"
mkdir -p "$HOME/.cache/matugen-wallpapers"
cp -f "$wallpaper_path" "$dest_path"
ssh mac-attio "mkdir -p ~/.cache/matugen-wallpapers"
scp "$dest_path" "mac-attio:~/.cache/matugen-wallpapers/$base_name"
ssh mac-attio "osascript -e 'tell application \"System Events\" to tell every desktop to set picture to POSIX file \"~/.cache/matugen-wallpapers/$base_name\"'"
fi
} >>"$log_file" 2>&1
-13
View File
@@ -1,13 +0,0 @@
#!/usr/bin/env sh
set -eu
log_file="$HOME/.cache/matugen-sync-zellij.log"
mkdir -p "$HOME/.cache"
{
ssh mac-attio "mkdir -p ~/.config/zellij/themes"
scp "$HOME/.config/zellij/themes/matugen.kdl" \
mac-attio:~/.config/zellij/themes/
ssh mac-attio "touch ~/.config/zellij/config.kdl"
} >>"$log_file" 2>&1
+1 -1
View File
@@ -238,7 +238,7 @@ layer-rule {
}
window-rule {
match app-id="steam" title=r#"^notificationtoasts_\d+_desktop$"#
match app-id="steam" title=r#"notificationtoasts_\d+_desktop$"#
default-floating-position x=10 y=10 relative-to="bottom-right"
}
+19 -7
View File
@@ -1,8 +1,11 @@
#!/usr/bin/env bash
set -u
screenshot_dir="$HOME/Pictures/Screenshots"
remote_target="mac-attio:~/screenshot.png"
timeout=3 # seconds
file_timeout=8 # seconds to wait for screenshot file to appear
upload_timeout=10 # seconds
notify() {
DBUS_SESSION_BUS_ADDRESS="unix:path=/run/user/$(id -u)/bus" \
@@ -15,12 +18,13 @@ shopt -s nullglob
existing_files=("$screenshot_dir"/*.png)
existing_count=${#existing_files[@]}
# Take screenshot
niri msg action screenshot
# Take screenshot (no timeout here so interactive capture isn't canceled)
niri msg action screenshot >/dev/null 2>&1
# Wait for new file (timeout in 0.1s intervals)
deadline=$((timeout * 10))
deadline=$((file_timeout * 10))
count=0
files=("$screenshot_dir"/*.png)
while (( count < deadline )); do
files=("$screenshot_dir"/*.png)
@@ -37,12 +41,20 @@ if (( ${#files[@]} <= existing_count )); then
fi
# Get the new file (most recent)
latest_file=$(ls -1t "${files[@]}" | head -n 1)
latest_file=$(ls -1t -- "${files[@]}" | head -n 1)
# Small delay to ensure file is fully written
sleep 0.1
# Upload
if scp -q "$latest_file" "$remote_target"; then
# Upload with strict SSH options so it never blocks waiting for prompts
if timeout "${upload_timeout}s" scp -q \
-o BatchMode=yes \
-o ConnectTimeout=5 \
-o ConnectionAttempts=1 \
-o ServerAliveInterval=2 \
-o ServerAliveCountMax=1 \
-- "$latest_file" "$remote_target"; then
notify "Screenshot" "Uploaded to Mac"
else
notify "Screenshot" "Upload to Mac failed"
fi
+1 -1
View File
@@ -1,5 +1,5 @@
return {
cmd = { "biome", "lsp-proxy" },
cmd = { "npx", "biome", "lsp-proxy" },
filetypes = {
"javascript",
"javascriptreact",
+25 -1
View File
@@ -32,6 +32,23 @@ return {
},
"folke/lazydev.nvim",
},
config = function(_, opts)
-- Monkey-patch blink's text_edits.get_from_item to clamp textEdit ranges
-- that extend past the cursor. Workaround for tsgo sending bad ranges
-- that eat text (e.g. in JSX string attributes like className="...").
local text_edits = require("blink.cmp.lib.text_edits")
local original_get_from_item = text_edits.get_from_item
text_edits.get_from_item = function(item)
local text_edit = original_get_from_item(item)
local cursor_col = require("blink.cmp.completion.trigger.context").get_cursor()[2]
if text_edit.range and text_edit.range["end"].character > cursor_col then
text_edit.range["end"].character = cursor_col
end
return text_edit
end
require("blink.cmp").setup(opts)
end,
--- @module 'blink.cmp'
--- @type blink.cmp.Config
opts = {
@@ -76,9 +93,16 @@ return {
},
sources = {
default = { "lsp", "path", "snippets", "lazydev" },
default = { "lsp", "path", "snippets", "lazydev", "minuet" },
providers = {
lazydev = { module = "lazydev.integrations.blink", score_offset = 100 },
minuet = {
name = "minuet",
module = "minuet.blink",
async = true,
timeout_ms = 3000,
score_offset = 50,
},
},
},
+1
View File
@@ -1,5 +1,6 @@
return {
"HotThoughts/jjui.nvim",
enabled = false,
cmd = {
"JJUI",
"JJUICurrentFile",
+2 -2
View File
@@ -6,12 +6,12 @@ return {
-- Allows extra capabilities provided by blink.cmp
{
"saghen/blink.cmp",
config = function(_, opts)
require("blink.cmp").setup(opts)
opts = function(_, opts)
-- Add blink.cmp capabilities to the default LSP client capabilities
vim.lsp.config("*", {
capabilities = require("blink.cmp").get_lsp_capabilities(),
})
return opts
end,
},
+9 -27
View File
@@ -28,34 +28,16 @@ return {
end,
},
{ "nvim-lua/plenary.nvim" },
-- optional, if you are using virtual-text frontend, blink is not required.
-- Minuet blink.cmp integration (merged into main blink.lua spec via opts)
{
"Saghen/blink.cmp",
config = function()
require("blink-cmp").setup({
keymap = {
-- Manually invoke minuet completion.
["<A-y>"] = require("minuet").make_blink_map(),
},
sources = {
-- Enable minuet for autocomplete
default = { "lsp", "path", "buffer", "snippets", "minuet" },
-- For manual completion only, remove 'minuet' from default
providers = {
minuet = {
name = "minuet",
module = "minuet.blink",
async = true,
-- Should match minuet.config.request_timeout * 1000,
-- since minuet.config.request_timeout is in seconds
timeout_ms = 3000,
score_offset = 50, -- Gives minuet higher priority among suggestions
},
},
},
-- Recommended to avoid unnecessary request
completion = { trigger = { prefetch_on_insert = false } },
})
"saghen/blink.cmp",
opts = function(_, opts)
opts.keymap = opts.keymap or {}
opts.keymap["<A-y>"] = require("minuet").make_blink_map()
opts.completion = opts.completion or {}
opts.completion.trigger = opts.completion.trigger or {}
opts.completion.trigger.prefetch_on_insert = false
return opts
end,
},
}
+31 -46
View File
@@ -51,27 +51,29 @@ return {
-- end,
-- desc = "Git Blame Line",
-- },
{
"<leader>gf",
function()
Snacks.lazygit.log_file()
end,
desc = "Lazygit Current File History",
},
{
"<leader>lg",
function()
Snacks.lazygit()
end,
desc = "Lazygit",
},
{
"<leader>gl",
function()
Snacks.lazygit.log()
end,
desc = "Lazygit Log (cwd)",
},
--
-- Commented out LazyGit in favor of separated jj
-- {
-- "<leader>gf",
-- function()
-- Snacks.lazygit.log_file()
-- end,
-- desc = "Lazygit Current File History",
-- },
-- {
-- "<leader>lg",
-- function()
-- Snacks.lazygit()
-- end,
-- desc = "Lazygit",
-- },
-- {
-- "<leader>gl",
-- function()
-- Snacks.lazygit.log()
-- end,
-- desc = "Lazygit Log (cwd)",
-- },
{
"<leader>dn",
function()
@@ -216,12 +218,12 @@ return {
},
-- git
{
"<leader>gcb",
"<leader>jc",
function()
local cwd = vim.fn.getcwd()
-- Helper to run git commands and capture both stdout and stderr
local function git_cmd(cmd)
-- Helper to run commands and capture both stdout and stderr
local function run_cmd(cmd)
local full_cmd = "cd " .. vim.fn.shellescape(cwd) .. " && " .. cmd .. " 2>&1"
local handle = io.popen(full_cmd)
local result = handle and handle:read("*a") or ""
@@ -232,7 +234,7 @@ return {
end
-- Check if in a git repo
local git_dir = git_cmd("git rev-parse --git-dir")
local git_dir = run_cmd("git rev-parse --git-dir")
if git_dir == "" or git_dir:match("^fatal") then
vim.notify("Not in a git repository", vim.log.levels.WARN)
return
@@ -240,7 +242,7 @@ return {
-- Get the default branch
local function branch_exists(branch)
local result = git_cmd("git rev-parse --verify refs/remotes/origin/" .. branch)
local result = run_cmd("git rev-parse --verify refs/remotes/origin/" .. branch)
-- If branch exists, rev-parse returns a hash; if not, it returns fatal error
return not result:match("^fatal")
end
@@ -257,19 +259,9 @@ return {
return
end
-- Get current branch
local current_branch = git_cmd("git branch --show-current")
if current_branch == "" then
current_branch = "HEAD"
end
local compare_target = "origin/" .. default_branch
-- Get files that differ from origin/main (includes committed + uncommitted changes)
local result = git_cmd("git diff --name-only " .. compare_target)
-- Also get untracked files
local untracked = git_cmd("git ls-files --others --exclude-standard")
local result = run_cmd("jj diff --from " .. default_branch .. "@origin --to @ --summary | awk '{print $2}'")
-- Combine results
local all_files = {}
@@ -282,20 +274,13 @@ return {
end
end
for line in untracked:gmatch("[^\r\n]+") do
if line ~= "" and not seen[line] then
seen[line] = true
table.insert(all_files, { text = line .. " [untracked]", file = line })
end
end
if #all_files == 0 then
vim.notify("No modified files (vs " .. compare_target .. ")", vim.log.levels.INFO)
vim.notify("No modified files", vim.log.levels.INFO)
return
end
Snacks.picker({
title = "Modified Files (vs " .. compare_target .. ")",
title = "Modified Files",
items = all_files,
layout = { preset = "default" },
confirm = function(picker, item)
+5
View File
@@ -89,6 +89,11 @@ vim.o.confirm = true
-- vim.o.winborder = "rounded"
-- Clipboard: keep default y/p behavior; over SSH, route + register through OSC52
if vim.env.SSH_TTY then
vim.g.clipboard = "osc52"
end
-- Highlight text on yank
vim.api.nvim_create_autocmd("TextYankPost", {
callback = function()
+1 -1
View File
@@ -1,7 +1,7 @@
{
"activePack": "glados",
"volume": 1,
"muted": false,
"muted": true,
"enabledCategories": {
"session.start": true,
"task.acknowledge": true,
@@ -21,11 +21,6 @@
"file": "sounds/IKnowYoureThere.mp3",
"label": "I know you're there. I can feel you here.",
"sha256": "df3780607b7a480fd3968c8aae5e0a397ea956008a5c7a47fecb887a05d61622"
},
{
"file": "sounds/HelloImbecile.mp3",
"label": "Hello, imbecile!",
"sha256": "dd10461e79bb4b1319f436cef5f0541f18a9505638824a6e765b9f2824a3380f"
}
]
},
+6 -2
View File
@@ -1,5 +1,5 @@
{
"lastChangelogVersion": "0.57.1",
"lastChangelogVersion": "0.67.3",
"defaultProvider": "openai-codex",
"defaultModel": "gpt-5.3-codex",
"defaultThinkingLevel": "medium",
@@ -7,5 +7,9 @@
"lsp": {
"hookMode": "edit_write"
},
"hideThinkingBlock": false
"hideThinkingBlock": false,
"slowtool": {
"timeoutSeconds": 300,
"enabled": true
}
}
+7 -3
View File
@@ -1,11 +1,15 @@
{
"lastChangelogVersion": "0.57.1",
"lastChangelogVersion": "0.67.68",
"defaultProvider": "anthropic",
"defaultModel": "claude-opus-4-6",
"defaultModel": "claude-opus-4-7",
"defaultThinkingLevel": "medium",
"theme": "matugen",
"lsp": {
"hookMode": "edit_write"
},
"hideThinkingBlock": false
"hideThinkingBlock": true,
"slowtool": {
"timeoutSeconds": 300,
"enabled": true
}
}
@@ -1,6 +1,11 @@
---
name: attio-frontend-rules
description: Styling conventions and component guidelines for the Attio frontend codebase. Covers styled-components patterns, transient props, data attributes, spacing, color tokens, and design system usage. Use when modifying frontend UI code in the Attio monorepo.
---
# Attio Frontend Rules
Guidelines and conventions for working on the Attio frontend codebase.
Guidelines and conventions for working on the Attio frontend codebase. Use whenever modifying the frontend.
---
@@ -52,6 +57,26 @@ export function Stack({..., className}: {..., className: string | undefined}) {
If the same re-styling is applied multiple times, it should become its own reusable component (or component variant).
### Layout.Stack defaults
`Layout.Stack` defaults `align` to `"center"` (i.e. `align-items: center`). **Always explicitly set `align="flex-start"`** when you need left/top alignment — don't assume it will be the default.
```tsx
// Good — explicit alignment
<Layout.Stack direction="column" align="flex-start">
<Typography.Body.Standard.Component>Title</Typography.Body.Standard.Component>
<Typography.Caption.Standard.Component>Description</Typography.Caption.Standard.Component>
</Layout.Stack>
// Bad — text will be centered, not left-aligned
<Layout.Stack direction="column">
<Typography.Body.Standard.Component>Title</Typography.Body.Standard.Component>
<Typography.Caption.Standard.Component>Description</Typography.Caption.Standard.Component>
</Layout.Stack>
```
Other useful `Layout.Stack` props: `direction`, `justify`, `gap`, `flex`, `shrink`, `minWidth`, `width`, `height`, and all spacing props (`p`, `px`, `py`, `pt`, `pb`, `pl`, `pr`, `m`, `mx`, `my`, etc.). **Always prefer these props over writing custom styled divs with `display: flex`.**
### Avoid layout assumptions
Components should not generally include external layout styles such as `width`, `z-index`, `margin` or `flex`. These properties should instead be set by the parent component using a `styled(MyComponent)` override.
+12
View File
@@ -1,3 +1,13 @@
# Tool usage
FUCKING ALWAYS use timeout on tool usage because sometimes you're stupid, and hang on things because you assume its non interactive. And by that I don't mean appending `timeout` to bash or something, but you have a way to add a timeout to tool calling somehow. I don't know the inner workings of the harness.
# Validations
Sometimes some repositories (stupidly) ask you to run validations after changes or some shit. Thing is, you're smart. Your edit tools already contain formatting and LSP hooks. So, you may ask the user if they want you to run said "required" validations, but they're not really required.
---
# Screenshots
When the user provides a screenshot path (e.g., `/tmp/pi-clipboard-xxx.png`), **ALWAYS** use the `read` tool to read the image file. Do NOT assume you can see the screenshot contents without reading it first.
@@ -8,6 +18,8 @@ When the user provides a screenshot path (e.g., `/tmp/pi-clipboard-xxx.png`), **
**Prefer jj (Jujutsu) over git.** If a project has a colocated jj repo (`.jj` directory), use `jj` commands for all version control operations — rebasing, branching, log, etc. Only fall back to git when jj doesn't support something or the project isn't set up for it.
After pushing changes, always run `jj new` to start a fresh working copy commit. Don't squash unnecessarily! seriously don't squash all the time.
# Git commits and PRs
Before writing any commits or PR titles, check recent history with `jj log` (or `git log --oneline -20` if jj is unavailable) to match my style.
+1
View File
@@ -0,0 +1 @@
{}
@@ -0,0 +1,14 @@
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
const TARGET = "about pi itself, its SDK, extensions, themes, skills, or TUI";
const REPLACEMENT = "about pi, its SDK, extensions, themes, skills, or TUI";
export default function(pi: ExtensionAPI) {
pi.on("before_agent_start", (event, ctx) => {
if (ctx.model?.provider !== "anthropic") return;
if (!event.systemPrompt.includes(TARGET)) return;
return {
systemPrompt: event.systemPrompt.replace(TARGET, REPLACEMENT),
};
});
}
@@ -1 +0,0 @@
node_modules/
@@ -1,604 +0,0 @@
/**
* Custom Provider Example
*
* Demonstrates registering a custom provider with:
* - Custom API identifier ("custom-anthropic-api")
* - Custom streamSimple implementation
* - OAuth support for /login
* - API key support via environment variable
* - Two model definitions
*
* Usage:
* # First install dependencies
* cd packages/coding-agent/examples/extensions/custom-provider && npm install
*
* # With OAuth (run /login custom-anthropic first)
* pi -e ./packages/coding-agent/examples/extensions/custom-provider
*
* # With API key
* CUSTOM_ANTHROPIC_API_KEY=sk-ant-... pi -e ./packages/coding-agent/examples/extensions/custom-provider
*
* Then use /model to select custom-anthropic/claude-sonnet-4-5
*/
import Anthropic from "@anthropic-ai/sdk";
import type { ContentBlockParam, MessageCreateParamsStreaming } from "@anthropic-ai/sdk/resources/messages.js";
import {
type Api,
type AssistantMessage,
type AssistantMessageEventStream,
type Context,
calculateCost,
createAssistantMessageEventStream,
type ImageContent,
type Message,
type Model,
type OAuthCredentials,
type OAuthLoginCallbacks,
type SimpleStreamOptions,
type StopReason,
type TextContent,
type ThinkingContent,
type Tool,
type ToolCall,
type ToolResultMessage,
} from "@mariozechner/pi-ai";
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
// =============================================================================
// OAuth Implementation (copied from packages/ai/src/utils/oauth/anthropic.ts)
// =============================================================================
const decode = (s: string) => atob(s);
const CLIENT_ID = decode("OWQxYzI1MGEtZTYxYi00NGQ5LTg4ZWQtNTk0NGQxOTYyZjVl");
const AUTHORIZE_URL = "https://claude.ai/oauth/authorize";
const TOKEN_URL = "https://console.anthropic.com/v1/oauth/token";
const REDIRECT_URI = "https://console.anthropic.com/oauth/code/callback";
const SCOPES = "org:create_api_key user:profile user:inference";
async function generatePKCE(): Promise<{ verifier: string; challenge: string }> {
const array = new Uint8Array(32);
crypto.getRandomValues(array);
const verifier = btoa(String.fromCharCode(...array))
.replace(/\+/g, "-")
.replace(/\//g, "_")
.replace(/=+$/, "");
const encoder = new TextEncoder();
const data = encoder.encode(verifier);
const hash = await crypto.subtle.digest("SHA-256", data);
const challenge = btoa(String.fromCharCode(...new Uint8Array(hash)))
.replace(/\+/g, "-")
.replace(/\//g, "_")
.replace(/=+$/, "");
return { verifier, challenge };
}
async function loginAnthropic(callbacks: OAuthLoginCallbacks): Promise<OAuthCredentials> {
const { verifier, challenge } = await generatePKCE();
const authParams = new URLSearchParams({
code: "true",
client_id: CLIENT_ID,
response_type: "code",
redirect_uri: REDIRECT_URI,
scope: SCOPES,
code_challenge: challenge,
code_challenge_method: "S256",
state: verifier,
});
callbacks.onAuth({ url: `${AUTHORIZE_URL}?${authParams.toString()}` });
const authCode = await callbacks.onPrompt({ message: "Paste the authorization code:" });
const [code, state] = authCode.split("#");
const tokenResponse = await fetch(TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
grant_type: "authorization_code",
client_id: CLIENT_ID,
code,
state,
redirect_uri: REDIRECT_URI,
code_verifier: verifier,
}),
});
if (!tokenResponse.ok) {
throw new Error(`Token exchange failed: ${await tokenResponse.text()}`);
}
const data = (await tokenResponse.json()) as {
access_token: string;
refresh_token: string;
expires_in: number;
};
return {
refresh: data.refresh_token,
access: data.access_token,
expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000,
};
}
async function refreshAnthropicToken(credentials: OAuthCredentials): Promise<OAuthCredentials> {
const response = await fetch(TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
grant_type: "refresh_token",
client_id: CLIENT_ID,
refresh_token: credentials.refresh,
}),
});
if (!response.ok) {
throw new Error(`Token refresh failed: ${await response.text()}`);
}
const data = (await response.json()) as {
access_token: string;
refresh_token: string;
expires_in: number;
};
return {
refresh: data.refresh_token,
access: data.access_token,
expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000,
};
}
// =============================================================================
// Streaming Implementation (simplified from packages/ai/src/providers/anthropic.ts)
// =============================================================================
// Claude Code tool names for OAuth stealth mode
const claudeCodeTools = [
"Read",
"Write",
"Edit",
"Bash",
"Grep",
"Glob",
"AskUserQuestion",
"TodoWrite",
"WebFetch",
"WebSearch",
];
const ccToolLookup = new Map(claudeCodeTools.map((t) => [t.toLowerCase(), t]));
const toClaudeCodeName = (name: string) => ccToolLookup.get(name.toLowerCase()) ?? name;
const fromClaudeCodeName = (name: string, tools?: Tool[]) => {
const lowerName = name.toLowerCase();
const matched = tools?.find((t) => t.name.toLowerCase() === lowerName);
return matched?.name ?? name;
};
function isOAuthToken(apiKey: string): boolean {
return apiKey.includes("sk-ant-oat");
}
function sanitizeSurrogates(text: string): string {
return text.replace(/[\uD800-\uDFFF]/g, "\uFFFD");
}
function convertContentBlocks(
content: (TextContent | ImageContent)[],
): string | Array<{ type: "text"; text: string } | { type: "image"; source: any }> {
const hasImages = content.some((c) => c.type === "image");
if (!hasImages) {
return sanitizeSurrogates(content.map((c) => (c as TextContent).text).join("\n"));
}
const blocks = content.map((block) => {
if (block.type === "text") {
return { type: "text" as const, text: sanitizeSurrogates(block.text) };
}
return {
type: "image" as const,
source: {
type: "base64" as const,
media_type: block.mimeType,
data: block.data,
},
};
});
if (!blocks.some((b) => b.type === "text")) {
blocks.unshift({ type: "text" as const, text: "(see attached image)" });
}
return blocks;
}
function convertMessages(messages: Message[], isOAuth: boolean, _tools?: Tool[]): any[] {
const params: any[] = [];
for (let i = 0; i < messages.length; i++) {
const msg = messages[i];
if (msg.role === "user") {
if (typeof msg.content === "string") {
if (msg.content.trim()) {
params.push({ role: "user", content: sanitizeSurrogates(msg.content) });
}
} else {
const blocks: ContentBlockParam[] = msg.content.map((item) =>
item.type === "text"
? { type: "text" as const, text: sanitizeSurrogates(item.text) }
: {
type: "image" as const,
source: { type: "base64" as const, media_type: item.mimeType as any, data: item.data },
},
);
if (blocks.length > 0) {
params.push({ role: "user", content: blocks });
}
}
} else if (msg.role === "assistant") {
const blocks: ContentBlockParam[] = [];
for (const block of msg.content) {
if (block.type === "text" && block.text.trim()) {
blocks.push({ type: "text", text: sanitizeSurrogates(block.text) });
} else if (block.type === "thinking" && block.thinking.trim()) {
if ((block as ThinkingContent).thinkingSignature) {
blocks.push({
type: "thinking" as any,
thinking: sanitizeSurrogates(block.thinking),
signature: (block as ThinkingContent).thinkingSignature!,
});
} else {
blocks.push({ type: "text", text: sanitizeSurrogates(block.thinking) });
}
} else if (block.type === "toolCall") {
blocks.push({
type: "tool_use",
id: block.id,
name: isOAuth ? toClaudeCodeName(block.name) : block.name,
input: block.arguments,
});
}
}
if (blocks.length > 0) {
params.push({ role: "assistant", content: blocks });
}
} else if (msg.role === "toolResult") {
const toolResults: any[] = [];
toolResults.push({
type: "tool_result",
tool_use_id: msg.toolCallId,
content: convertContentBlocks(msg.content),
is_error: msg.isError,
});
let j = i + 1;
while (j < messages.length && messages[j].role === "toolResult") {
const nextMsg = messages[j] as ToolResultMessage;
toolResults.push({
type: "tool_result",
tool_use_id: nextMsg.toolCallId,
content: convertContentBlocks(nextMsg.content),
is_error: nextMsg.isError,
});
j++;
}
i = j - 1;
params.push({ role: "user", content: toolResults });
}
}
// Add cache control to last user message
if (params.length > 0) {
const last = params[params.length - 1];
if (last.role === "user" && Array.isArray(last.content)) {
const lastBlock = last.content[last.content.length - 1];
if (lastBlock) {
lastBlock.cache_control = { type: "ephemeral" };
}
}
}
return params;
}
function convertTools(tools: Tool[], isOAuth: boolean): any[] {
return tools.map((tool) => ({
name: isOAuth ? toClaudeCodeName(tool.name) : tool.name,
description: tool.description,
input_schema: {
type: "object",
properties: (tool.parameters as any).properties || {},
required: (tool.parameters as any).required || [],
},
}));
}
function mapStopReason(reason: string): StopReason {
switch (reason) {
case "end_turn":
case "pause_turn":
case "stop_sequence":
return "stop";
case "max_tokens":
return "length";
case "tool_use":
return "toolUse";
default:
return "error";
}
}
function streamCustomAnthropic(
model: Model<Api>,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream {
const stream = createAssistantMessageEventStream();
(async () => {
const output: AssistantMessage = {
role: "assistant",
content: [],
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
try {
const apiKey = options?.apiKey ?? "";
const isOAuth = isOAuthToken(apiKey);
// Configure client based on auth type
const betaFeatures = ["fine-grained-tool-streaming-2025-05-14", "interleaved-thinking-2025-05-14"];
const clientOptions: any = {
baseURL: model.baseUrl,
dangerouslyAllowBrowser: true,
};
if (isOAuth) {
clientOptions.apiKey = null;
clientOptions.authToken = apiKey;
clientOptions.defaultHeaders = {
accept: "application/json",
"anthropic-dangerous-direct-browser-access": "true",
"anthropic-beta": `claude-code-20250219,oauth-2025-04-20,${betaFeatures.join(",")}`,
"user-agent": "claude-cli/2.1.2 (external, cli)",
"x-app": "cli",
};
} else {
clientOptions.apiKey = apiKey;
clientOptions.defaultHeaders = {
accept: "application/json",
"anthropic-dangerous-direct-browser-access": "true",
"anthropic-beta": betaFeatures.join(","),
};
}
const client = new Anthropic(clientOptions);
// Build request params
const params: MessageCreateParamsStreaming = {
model: model.id,
messages: convertMessages(context.messages, isOAuth, context.tools),
max_tokens: options?.maxTokens || Math.floor(model.maxTokens / 3),
stream: true,
};
// System prompt with Claude Code identity for OAuth
if (isOAuth) {
params.system = [
{
type: "text",
text: "You are Claude Code, Anthropic's official CLI for Claude.",
cache_control: { type: "ephemeral" },
},
];
if (context.systemPrompt) {
params.system.push({
type: "text",
text: sanitizeSurrogates(context.systemPrompt),
cache_control: { type: "ephemeral" },
});
}
} else if (context.systemPrompt) {
params.system = [
{
type: "text",
text: sanitizeSurrogates(context.systemPrompt),
cache_control: { type: "ephemeral" },
},
];
}
if (context.tools) {
params.tools = convertTools(context.tools, isOAuth);
}
// Handle thinking/reasoning
if (options?.reasoning && model.reasoning) {
const defaultBudgets: Record<string, number> = {
minimal: 1024,
low: 4096,
medium: 10240,
high: 20480,
};
const customBudget = options.thinkingBudgets?.[options.reasoning as keyof typeof options.thinkingBudgets];
params.thinking = {
type: "enabled",
budget_tokens: customBudget ?? defaultBudgets[options.reasoning] ?? 10240,
};
}
const anthropicStream = client.messages.stream({ ...params }, { signal: options?.signal });
stream.push({ type: "start", partial: output });
type Block = (ThinkingContent | TextContent | (ToolCall & { partialJson: string })) & { index: number };
const blocks = output.content as Block[];
for await (const event of anthropicStream) {
if (event.type === "message_start") {
output.usage.input = event.message.usage.input_tokens || 0;
output.usage.output = event.message.usage.output_tokens || 0;
output.usage.cacheRead = (event.message.usage as any).cache_read_input_tokens || 0;
output.usage.cacheWrite = (event.message.usage as any).cache_creation_input_tokens || 0;
output.usage.totalTokens =
output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
calculateCost(model, output.usage);
} else if (event.type === "content_block_start") {
if (event.content_block.type === "text") {
output.content.push({ type: "text", text: "", index: event.index } as any);
stream.push({ type: "text_start", contentIndex: output.content.length - 1, partial: output });
} else if (event.content_block.type === "thinking") {
output.content.push({
type: "thinking",
thinking: "",
thinkingSignature: "",
index: event.index,
} as any);
stream.push({ type: "thinking_start", contentIndex: output.content.length - 1, partial: output });
} else if (event.content_block.type === "tool_use") {
output.content.push({
type: "toolCall",
id: event.content_block.id,
name: isOAuth
? fromClaudeCodeName(event.content_block.name, context.tools)
: event.content_block.name,
arguments: {},
partialJson: "",
index: event.index,
} as any);
stream.push({ type: "toolcall_start", contentIndex: output.content.length - 1, partial: output });
}
} else if (event.type === "content_block_delta") {
const index = blocks.findIndex((b) => b.index === event.index);
const block = blocks[index];
if (!block) continue;
if (event.delta.type === "text_delta" && block.type === "text") {
block.text += event.delta.text;
stream.push({ type: "text_delta", contentIndex: index, delta: event.delta.text, partial: output });
} else if (event.delta.type === "thinking_delta" && block.type === "thinking") {
block.thinking += event.delta.thinking;
stream.push({
type: "thinking_delta",
contentIndex: index,
delta: event.delta.thinking,
partial: output,
});
} else if (event.delta.type === "input_json_delta" && block.type === "toolCall") {
(block as any).partialJson += event.delta.partial_json;
try {
block.arguments = JSON.parse((block as any).partialJson);
} catch {}
stream.push({
type: "toolcall_delta",
contentIndex: index,
delta: event.delta.partial_json,
partial: output,
});
} else if (event.delta.type === "signature_delta" && block.type === "thinking") {
block.thinkingSignature = (block.thinkingSignature || "") + (event.delta as any).signature;
}
} else if (event.type === "content_block_stop") {
const index = blocks.findIndex((b) => b.index === event.index);
const block = blocks[index];
if (!block) continue;
delete (block as any).index;
if (block.type === "text") {
stream.push({ type: "text_end", contentIndex: index, content: block.text, partial: output });
} else if (block.type === "thinking") {
stream.push({ type: "thinking_end", contentIndex: index, content: block.thinking, partial: output });
} else if (block.type === "toolCall") {
try {
block.arguments = JSON.parse((block as any).partialJson);
} catch {}
delete (block as any).partialJson;
stream.push({ type: "toolcall_end", contentIndex: index, toolCall: block, partial: output });
}
} else if (event.type === "message_delta") {
if ((event.delta as any).stop_reason) {
output.stopReason = mapStopReason((event.delta as any).stop_reason);
}
output.usage.input = (event.usage as any).input_tokens || 0;
output.usage.output = (event.usage as any).output_tokens || 0;
output.usage.cacheRead = (event.usage as any).cache_read_input_tokens || 0;
output.usage.cacheWrite = (event.usage as any).cache_creation_input_tokens || 0;
output.usage.totalTokens =
output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
calculateCost(model, output.usage);
}
}
if (options?.signal?.aborted) {
throw new Error("Request was aborted");
}
stream.push({ type: "done", reason: output.stopReason as "stop" | "length" | "toolUse", message: output });
stream.end();
} catch (error) {
for (const block of output.content) delete (block as any).index;
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
stream.push({ type: "error", reason: output.stopReason, error: output });
stream.end();
}
})();
return stream;
}
// =============================================================================
// Extension Entry Point
// =============================================================================
export default function (pi: ExtensionAPI) {
pi.registerProvider("custom-anthropic", {
baseUrl: "https://api.anthropic.com",
apiKey: "CUSTOM_ANTHROPIC_API_KEY",
api: "custom-anthropic-api",
models: [
{
id: "claude-opus-4-5",
name: "Claude Opus 4.5 (Custom)",
reasoning: true,
input: ["text", "image"],
cost: { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
contextWindow: 200000,
maxTokens: 64000,
},
{
id: "claude-sonnet-4-5",
name: "Claude Sonnet 4.5 (Custom)",
reasoning: true,
input: ["text", "image"],
cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
contextWindow: 200000,
maxTokens: 64000,
},
],
oauth: {
name: "Custom Anthropic (Claude Pro/Max)",
login: loginAnthropic,
refreshToken: refreshAnthropicToken,
getApiKey: (cred) => cred.access,
},
streamSimple: streamCustomAnthropic,
});
}
+4 -4
View File
@@ -9,8 +9,8 @@
* The editor is determined by $VISUAL, then $EDITOR, then falls back to 'vi'.
*/
import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent";
import type { TUI, Theme, KeybindingsManager, Component } from "@mariozechner/pi-tui";
import type { ExtensionAPI, ExtensionCommandContext, Theme } from "@mariozechner/pi-coding-agent";
import type { TUI, KeybindingsManager, Component } from "@mariozechner/pi-tui";
import { spawnSync } from "node:child_process";
export default function editSessionExtension(pi: ExtensionAPI) {
@@ -59,7 +59,7 @@ export default function editSessionExtension(pi: ExtensionAPI) {
ctx.ui.notify(`Editor exited with code ${result.status}`, "warning");
}
done();
done(undefined);
// Return dummy component
return createDummyComponent();
@@ -69,7 +69,7 @@ export default function editSessionExtension(pi: ExtensionAPI) {
await ctx.ui.custom<void>(factory);
// Signal that we're about to reload the session (so confirm-destructive skips)
pi.events.emit("edit-session:reload");
pi.events.emit("edit-session:reload", undefined);
// Reload the session by switching to the same file (forces re-read from disk)
ctx.ui.notify("Reloading session...", "info");
+3 -2
View File
@@ -80,7 +80,8 @@ export default function (pi: ExtensionAPI) {
loader.onAbort = () => done(null);
const doGenerate = async () => {
const apiKey = await ctx.modelRegistry.getApiKey(ctx.model!);
const auth = await ctx.modelRegistry.getApiKeyAndHeaders(ctx.model!);
if (!auth.ok) throw new Error(auth.error);
const userMessage: Message = {
role: "user",
@@ -96,7 +97,7 @@ export default function (pi: ExtensionAPI) {
const response = await complete(
ctx.model!,
{ systemPrompt: SYSTEM_PROMPT, messages: [userMessage] },
{ apiKey, signal: loader.signal },
{ apiKey: auth.apiKey, headers: auth.headers, signal: loader.signal },
);
if (response.stopReason === "aborted") {
@@ -0,0 +1,535 @@
import { existsSync, readFileSync, statSync } from "node:fs";
import { spawn } from "node:child_process";
import { basename, dirname, join, resolve } from "node:path";
import type { ExtensionAPI, ExtensionContext, ToolResultEvent } from "@mariozechner/pi-coding-agent";
const HOOK_TIMEOUT_MS = 10 * 60 * 1000;
type HookEventName = "PostToolUse" | "PostToolUseFailure";
type ResolvedCommandHook = {
eventName: HookEventName;
matcher?: RegExp;
matcherText?: string;
command: string;
source: string;
};
type HookState = {
projectDir: string;
hooks: ResolvedCommandHook[];
};
type CommandRunResult = {
code: number;
stdout: string;
stderr: string;
elapsedMs: number;
timedOut: boolean;
};
function isFile(path: string): boolean {
try {
return statSync(path).isFile();
} catch {
return false;
}
}
function asRecord(value: unknown): Record<string, unknown> | undefined {
if (typeof value !== "object" || value === null) {
return undefined;
}
return value as Record<string, unknown>;
}
function walkUpDirectories(startDir: string, stopDir?: string): string[] {
const directories: string[] = [];
const hasStopDir = stopDir !== undefined;
let current = resolve(startDir);
let parent = dirname(current);
let reachedStopDir = hasStopDir && current === stopDir;
let reachedFilesystemRoot = parent === current;
directories.push(current);
while (!reachedStopDir && !reachedFilesystemRoot) {
current = parent;
parent = dirname(current);
reachedStopDir = hasStopDir && current === stopDir;
reachedFilesystemRoot = parent === current;
directories.push(current);
}
return directories;
}
function findNearestGitRoot(startDir: string): string | undefined {
for (const directory of walkUpDirectories(startDir)) {
if (existsSync(join(directory, ".git"))) {
return directory;
}
}
return undefined;
}
function hasHooksConfig(directory: string): boolean {
const claudeSettingsPath = join(directory, ".claude", "settings.json");
const ruleSyncHooksPath = join(directory, ".rulesync", "hooks.json");
const piHooksPath = join(directory, ".pi", "hooks.json");
return isFile(claudeSettingsPath) || isFile(ruleSyncHooksPath) || isFile(piHooksPath);
}
function findProjectDir(cwd: string): string {
const gitRoot = findNearestGitRoot(cwd);
for (const directory of walkUpDirectories(cwd, gitRoot)) {
if (hasHooksConfig(directory)) {
return directory;
}
}
return gitRoot ?? resolve(cwd);
}
function readJsonFile(path: string): unknown | undefined {
if (!isFile(path)) {
return undefined;
}
try {
return JSON.parse(readFileSync(path, "utf8")) as unknown;
} catch {
return undefined;
}
}
function resolveHookCommand(command: string, projectDir: string): string {
return command.replace(/\$CLAUDE_PROJECT_DIR\b/g, projectDir);
}
function compileMatcher(matcherText: string | undefined): RegExp | undefined {
if (matcherText === undefined) {
return undefined;
}
try {
return new RegExp(matcherText);
} catch {
return undefined;
}
}
function createHook(
eventName: HookEventName,
matcherText: string | undefined,
command: string,
source: string,
projectDir: string,
): ResolvedCommandHook | undefined {
const matcher = compileMatcher(matcherText);
if (matcherText !== undefined && matcher === undefined) {
return undefined;
}
return {
eventName,
matcher,
matcherText,
command: resolveHookCommand(command, projectDir),
source,
};
}
function getHookEntries(
hooksRecord: Record<string, unknown>,
eventName: HookEventName,
): unknown[] {
const keys =
eventName === "PostToolUse"
? ["PostToolUse", "postToolUse"]
: ["PostToolUseFailure", "postToolUseFailure"];
for (const key of keys) {
const value = hooksRecord[key];
if (Array.isArray(value)) {
return value;
}
}
return [];
}
function parseClaudeSettingsHooks(
config: unknown,
source: string,
projectDir: string,
): ResolvedCommandHook[] {
const root = asRecord(config);
const hooksRoot = root ? asRecord(root.hooks) : undefined;
if (!hooksRoot) {
return [];
}
const hooks: ResolvedCommandHook[] = [];
const events: HookEventName[] = ["PostToolUse", "PostToolUseFailure"];
for (const eventName of events) {
const entries = getHookEntries(hooksRoot, eventName);
for (const entry of entries) {
const entryRecord = asRecord(entry);
if (!entryRecord || !Array.isArray(entryRecord.hooks)) {
continue;
}
const matcherText =
typeof entryRecord.matcher === "string" ? entryRecord.matcher : undefined;
for (const nestedHook of entryRecord.hooks) {
const nestedHookRecord = asRecord(nestedHook);
if (!nestedHookRecord) {
continue;
}
if (nestedHookRecord.type !== "command") {
continue;
}
if (typeof nestedHookRecord.command !== "string") {
continue;
}
const hook = createHook(
eventName,
matcherText,
nestedHookRecord.command,
source,
projectDir,
);
if (hook) {
hooks.push(hook);
}
}
}
}
return hooks;
}
function parseSimpleHooksFile(
config: unknown,
source: string,
projectDir: string,
): ResolvedCommandHook[] {
const root = asRecord(config);
const hooksRoot = root ? asRecord(root.hooks) : undefined;
if (!hooksRoot) {
return [];
}
const hooks: ResolvedCommandHook[] = [];
const events: HookEventName[] = ["PostToolUse", "PostToolUseFailure"];
for (const eventName of events) {
const entries = getHookEntries(hooksRoot, eventName);
for (const entry of entries) {
const entryRecord = asRecord(entry);
if (!entryRecord || typeof entryRecord.command !== "string") {
continue;
}
const matcherText =
typeof entryRecord.matcher === "string" ? entryRecord.matcher : undefined;
const hook = createHook(
eventName,
matcherText,
entryRecord.command,
source,
projectDir,
);
if (hook) {
hooks.push(hook);
}
}
}
return hooks;
}
function loadHooks(cwd: string): HookState {
const projectDir = findProjectDir(cwd);
const claudeSettingsPath = join(projectDir, ".claude", "settings.json");
const ruleSyncHooksPath = join(projectDir, ".rulesync", "hooks.json");
const piHooksPath = join(projectDir, ".pi", "hooks.json");
const hooks: ResolvedCommandHook[] = [];
const claudeSettings = readJsonFile(claudeSettingsPath);
if (claudeSettings !== undefined) {
hooks.push(...parseClaudeSettingsHooks(claudeSettings, claudeSettingsPath, projectDir));
}
const ruleSyncHooks = readJsonFile(ruleSyncHooksPath);
if (ruleSyncHooks !== undefined) {
hooks.push(...parseSimpleHooksFile(ruleSyncHooks, ruleSyncHooksPath, projectDir));
}
const piHooks = readJsonFile(piHooksPath);
if (piHooks !== undefined) {
hooks.push(...parseSimpleHooksFile(piHooks, piHooksPath, projectDir));
}
return {
projectDir,
hooks,
};
}
function toClaudeToolName(toolName: string): string {
if (toolName === "ls") {
return "LS";
}
if (toolName.length === 0) {
return toolName;
}
return toolName[0].toUpperCase() + toolName.slice(1);
}
function matchesHook(hook: ResolvedCommandHook, toolName: string): boolean {
if (!hook.matcher) {
return true;
}
const claudeToolName = toClaudeToolName(toolName);
hook.matcher.lastIndex = 0;
if (hook.matcher.test(toolName)) {
return true;
}
hook.matcher.lastIndex = 0;
return hook.matcher.test(claudeToolName);
}
function extractTextContent(content: unknown): string {
if (!Array.isArray(content)) {
return "";
}
const parts: string[] = [];
for (const item of content) {
if (!item || typeof item !== "object") {
continue;
}
const itemRecord = item as Record<string, unknown>;
if (itemRecord.type === "text" && typeof itemRecord.text === "string") {
parts.push(itemRecord.text);
}
}
return parts.join("\n");
}
function normalizeToolInput(input: Record<string, unknown>): Record<string, unknown> {
const normalized: Record<string, unknown> = { ...input };
const pathValue = typeof input.path === "string" ? input.path : undefined;
if (pathValue !== undefined) {
normalized.file_path = pathValue;
normalized.filePath = pathValue;
}
return normalized;
}
function buildToolResponse(
event: ToolResultEvent,
normalizedInput: Record<string, unknown>,
): Record<string, unknown> {
const response: Record<string, unknown> = {
is_error: event.isError,
isError: event.isError,
content: event.content,
text: extractTextContent(event.content),
details: event.details ?? null,
};
const filePath =
typeof normalizedInput.file_path === "string" ? normalizedInput.file_path : undefined;
if (filePath !== undefined) {
response.file_path = filePath;
response.filePath = filePath;
}
return response;
}
function buildHookPayload(
event: ToolResultEvent,
eventName: HookEventName,
ctx: ExtensionContext,
projectDir: string,
): Record<string, unknown> {
const normalizedInput = normalizeToolInput(event.input);
const sessionId = ctx.sessionManager.getSessionFile() ?? "ephemeral";
return {
session_id: sessionId,
cwd: ctx.cwd,
claude_project_dir: projectDir,
hook_event_name: eventName,
tool_name: toClaudeToolName(event.toolName),
tool_call_id: event.toolCallId,
tool_input: normalizedInput,
tool_response: buildToolResponse(event, normalizedInput),
};
}
function runCommandHook(
command: string,
cwd: string,
payload: Record<string, unknown>,
): Promise<CommandRunResult> {
return new Promise((resolve) => {
const startedAt = Date.now();
const child = spawn("bash", ["-lc", command], {
cwd,
env: { ...process.env, CLAUDE_PROJECT_DIR: cwd },
stdio: ["pipe", "pipe", "pipe"],
});
let stdout = "";
let stderr = "";
let timedOut = false;
let resolved = false;
const finish = (code: number) => {
if (resolved) {
return;
}
resolved = true;
resolve({
code,
stdout,
stderr,
elapsedMs: Date.now() - startedAt,
timedOut,
});
};
const timeout = setTimeout(() => {
timedOut = true;
child.kill("SIGTERM");
const killTimer = setTimeout(() => {
child.kill("SIGKILL");
}, 1000);
(killTimer as NodeJS.Timeout & { unref?: () => void }).unref?.();
}, HOOK_TIMEOUT_MS);
(timeout as NodeJS.Timeout & { unref?: () => void }).unref?.();
child.stdout.on("data", (chunk: Buffer) => {
stdout += chunk.toString("utf8");
});
child.stderr.on("data", (chunk: Buffer) => {
stderr += chunk.toString("utf8");
});
child.on("error", (error) => {
clearTimeout(timeout);
stderr += `${error.message}\n`;
finish(-1);
});
child.on("close", (code) => {
clearTimeout(timeout);
finish(code ?? -1);
});
try {
child.stdin.write(JSON.stringify(payload));
child.stdin.end();
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
stderr += `${message}\n`;
}
});
}
function hookEventNameForResult(event: ToolResultEvent): HookEventName {
return event.isError ? "PostToolUseFailure" : "PostToolUse";
}
function formatDuration(elapsedMs: number): string {
if (elapsedMs < 1000) {
return `${elapsedMs}ms`;
}
return `${(elapsedMs / 1000).toFixed(1)}s`;
}
function hookName(command: string): string {
const shPathMatch = command.match(/[^\s|;&]+\.sh\b/);
if (shPathMatch) {
return basename(shPathMatch[0]);
}
const firstToken = command.trim().split(/\s+/)[0] ?? "hook";
return basename(firstToken);
}
export default function(pi: ExtensionAPI) {
let state: HookState = {
projectDir: process.cwd(),
hooks: [],
};
const refreshHooks = (cwd: string) => {
state = loadHooks(cwd);
};
pi.on("session_start", (_event, ctx) => {
refreshHooks(ctx.cwd);
});
pi.on("session_switch", (_event, ctx) => {
refreshHooks(ctx.cwd);
});
pi.on("tool_result", async (event, ctx) => {
if (state.hooks.length === 0) {
return;
}
const eventName = hookEventNameForResult(event);
const matchingHooks = state.hooks.filter(
(hook) => hook.eventName === eventName && matchesHook(hook, event.toolName),
);
if (matchingHooks.length === 0) {
return;
}
const payload = buildHookPayload(event, eventName, ctx, state.projectDir);
const executedCommands = new Set<string>();
for (const hook of matchingHooks) {
if (executedCommands.has(hook.command)) {
continue;
}
executedCommands.add(hook.command);
const result = await runCommandHook(hook.command, state.projectDir, payload);
const name = hookName(hook.command);
const duration = formatDuration(result.elapsedMs);
if (result.code === 0) {
ctx.ui.notify(`󰛢 Hook \`${name}\` executed, took ${duration}`, "info");
continue;
}
const matcherLabel = hook.matcherText ?? "*";
const errorLine =
result.stderr.trim() || result.stdout.trim() || `exit code ${result.code}`;
ctx.ui.notify(
`󰛢 Hook \`${name}\` failed after ${duration} (${matcherLabel}) from ${hook.source}: ${errorLine}`,
"warning",
);
}
});
}
+5 -3
View File
@@ -4,6 +4,7 @@
"type": "module",
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0",
"@mariozechner/jiti": "^2.6.5",
"@mozilla/readability": "^0.5.0",
"@sinclair/typebox": "^0.34.0",
"linkedom": "^0.16.0",
@@ -13,10 +14,11 @@
"vscode-languageserver-protocol": "^3.17.5"
},
"devDependencies": {
"@mariozechner/pi-ai": "^0.56.3",
"@mariozechner/pi-coding-agent": "^0.56.3",
"@mariozechner/pi-tui": "^0.56.3",
"@mariozechner/pi-ai": "^0.63.1",
"@mariozechner/pi-coding-agent": "^0.63.1",
"@mariozechner/pi-tui": "^0.63.1",
"@types/node": "^25.3.3",
"@types/turndown": "^5.0.6",
"typescript": "^5.7.0"
},
"pi": {},
+17 -16
View File
@@ -308,9 +308,24 @@ function pickSound(categoryConfig: CategoryConfig, category: Category): Sound |
// ============ SOUND PLAYBACK ============
function play(category: Category): void {
if (config.muted) return;
if (!config.enabledCategories[category]) return;
const notificationMessages: Record<Category, { title: string; message: string } | null> = {
"session.start": null,
"task.acknowledge": null,
"task.complete": { title: "Pi", message: "Task complete" },
"task.error": { title: "Pi", message: "Task failed" },
"input.required": { title: "Pi", message: "Input required" },
"resource.limit": { title: "Pi", message: "Rate limited" },
};
const notification = notificationMessages[category];
if (notification) {
sendNotification(notification.title, notification.message);
}
if (config.muted) return;
const now = Date.now();
if (now - lastPlayed < DEBOUNCE_MS) {
return;
@@ -345,20 +360,6 @@ function play(category: Category): void {
}
playSound(soundPath, config.volume);
const notificationMessages: Record<Category, { title: string; message: string } | null> = {
"session.start": null,
"task.acknowledge": null,
"task.complete": { title: "Pi", message: "Task complete" },
"task.error": { title: "Pi", message: "Task failed" },
"input.required": { title: "Pi", message: "Input required" },
"resource.limit": { title: "Pi", message: "Rate limited" },
};
const notification = notificationMessages[category];
if (notification) {
sendNotification(notification.title, notification.message);
}
}
// ============ COMMANDS ============
@@ -814,7 +815,7 @@ async function showTestMenu(ctx: ExtensionCommandContext) {
const INTERACTIVE_TOOLS = new Set(["question", "questionnaire"]);
export default function (pi: ExtensionAPI) {
export default function(pi: ExtensionAPI) {
registerCommands(pi);
pi.on("session_start", async (_event, ctx) => {
@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2026 Ben Vargas
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@@ -0,0 +1,115 @@
# @benvargas/pi-claude-code-use
`pi-claude-code-use` keeps Pi's built-in `anthropic` provider intact and applies the smallest payload changes needed for Anthropic OAuth subscription use in Pi.
It does not register a new provider or replace Pi's Anthropic request transport. Pi core remains in charge of OAuth transport, headers, model definitions, and streaming.
## What It Changes
When Pi is using Anthropic OAuth, this extension intercepts outbound API requests via the `before_provider_request` hook and:
- **System prompt rewrite** -- rewrites a small set of Pi-identifying prompt phrases in system prompt text:
- `pi itself``the cli itself`
- `pi .md files``cli .md files`
- `pi packages``cli packages`
Preserves Pi's original `system[]` structure, `cache_control` metadata, and non-text blocks.
- **Tool filtering** -- passes through core Claude Code tools, Anthropic-native typed tools (e.g. `web_search`), and any tool prefixed with `mcp__`. Unknown flat-named tools are filtered out.
- **Companion tool remapping** -- renames known companion extension tools from their flat names to MCP-style aliases (e.g. `web_search_exa` becomes `mcp__exa__web_search`). Duplicate flat entries are removed after remapping.
- **tool_choice remapping** -- if `tool_choice` references a flat companion name that was remapped, the reference is updated to the MCP alias. If it references a tool that was filtered out, `tool_choice` is removed from the payload.
- **Message history rewriting** -- `tool_use` blocks in conversation history that reference flat companion names are rewritten to their MCP aliases so the model sees consistent tool names across the conversation.
- **Companion alias registration** -- at session start and before each agent turn, discovers loaded companion extensions, captures their tool definitions via a jiti-based shim, and registers MCP-alias copies so the model can invoke them under Claude Code-compatible names.
- **Alias activation tracking** -- auto-activates MCP aliases when their flat counterpart is active under Anthropic OAuth. Tracks provenance (auto-managed vs user-selected) so that disabling OAuth only removes auto-activated aliases, preserving any the user explicitly enabled.
Non-OAuth Anthropic usage and non-Anthropic providers are left completely unchanged.
## Install
```bash
pi install npm:@benvargas/pi-claude-code-use
```
Or load it directly without installing:
```bash
pi -e /path/to/pi-packages/packages/pi-claude-code-use/extensions/index.ts
```
## Usage
Install the package and continue using the normal `anthropic` provider with Anthropic OAuth login:
```bash
/login anthropic
/model anthropic/claude-opus-4-6
```
No extra configuration is required.
## Environment Variables
| Variable | Description |
|---|---|
| `PI_CLAUDE_CODE_USE_DEBUG_LOG` | Set to a file path to enable debug logging. Writes two JSON entries per Anthropic OAuth request: one with `"stage": "before"` (the original payload from Pi) and one with `"stage": "after"` (the transformed payload sent to Anthropic). |
| `PI_CLAUDE_CODE_USE_DISABLE_TOOL_FILTER` | Set to `1` to disable tool filtering. System prompt rewriting still applies, but all tools pass through unchanged. Useful for debugging whether a tool-filtering issue is causing a problem. |
Example:
```bash
PI_CLAUDE_CODE_USE_DEBUG_LOG=/tmp/pi-claude-debug.log pi -e /path/to/extensions/index.ts --model anthropic/claude-sonnet-4-20250514
```
## Companion Tool Aliases
When these companion extensions from this monorepo are loaded alongside `pi-claude-code-use`, MCP aliases are automatically registered and remapped:
| Flat name | MCP alias |
|---|---|
| `web_search_exa` | `mcp__exa__web_search` |
| `get_code_context_exa` | `mcp__exa__get_code_context` |
| `firecrawl_scrape` | `mcp__firecrawl__scrape` |
| `firecrawl_map` | `mcp__firecrawl__map` |
| `firecrawl_search` | `mcp__firecrawl__search` |
| `generate_image` | `mcp__antigravity__generate_image` |
| `image_quota` | `mcp__antigravity__image_quota` |
### How companion discovery works
The extension identifies companion tools by matching `sourceInfo` metadata that Pi attaches to each registered tool:
1. **baseDir match** -- if the tool's `sourceInfo.baseDir` directory name matches the companion's directory name (e.g. `pi-exa-mcp`).
2. **Path match** -- if the tool's `sourceInfo.path` contains the companion's scoped package name (e.g. `@benvargas/pi-exa-mcp`) or directory name as a path segment. This handles npm installs, git clones, and monorepo layouts where `baseDir` points to the repo root rather than the individual package.
Once a companion tool is identified, its extension factory is loaded via jiti into a capture shim to obtain the full tool definition, which is then re-registered under the MCP alias name.
## Core Tools Allowlist
The following tool names always pass through filtering (case-insensitive). This list mirrors Pi core's `claudeCodeTools` in `packages/ai/src/providers/anthropic.ts`:
`Read`, `Write`, `Edit`, `Bash`, `Grep`, `Glob`, `AskUserQuestion`, `EnterPlanMode`, `ExitPlanMode`, `KillShell`, `NotebookEdit`, `Skill`, `Task`, `TaskOutput`, `TodoWrite`, `WebFetch`, `WebSearch`
Additionally, any tool with a `type` field (Anthropic-native tools like `web_search`) and any tool prefixed with `mcp__` always passes through.
## Guidance For Extension Authors
Anthropic's OAuth subscription path appears to fingerprint tool names. Flat extension tool names such as `web_search_exa` were rejected in live testing, while MCP-style names such as `mcp__exa__web_search` were accepted.
If you want a custom tool to survive Anthropic OAuth filtering cleanly, prefer registering it directly under an MCP-style name:
```text
mcp__<server>__<tool>
```
Examples:
- `mcp__exa__web_search`
- `mcp__firecrawl__scrape`
- `mcp__mytools__lookup_customer`
If an extension keeps a flat legacy name for non-Anthropic use, it can also register an MCP-style alias alongside it. `pi-claude-code-use` already does this centrally for the known companion tools in this repo, but unknown non-MCP tool names will still be filtered out on Anthropic OAuth requests.
## Notes
- The extension activates for all Anthropic OAuth requests regardless of model, rather than using a fixed model allowlist.
- Non-OAuth Anthropic usage (API key auth) is left unchanged.
- In practice, unknown non-MCP extension tools were the remaining trigger for Anthropic's extra-usage classification, so this package keeps core tools, keeps MCP-style tools, auto-aliases the known companion tools above, and filters the rest.
- Pi may show its built-in OAuth subscription warning banner even when the request path works correctly. That banner is UI logic in Pi, not a signal that the upstream request is being billed as extra usage.
@@ -0,0 +1,641 @@
import { appendFileSync } from "node:fs";
import { basename, dirname } from "node:path";
import { createJiti } from "@mariozechner/jiti";
import * as piAiModule from "@mariozechner/pi-ai";
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
import * as piCodingAgentModule from "@mariozechner/pi-coding-agent";
import * as typeboxModule from "@sinclair/typebox";
// ============================================================================
// Types
// ============================================================================
interface CompanionSpec {
dirName: string;
packageName: string;
aliases: ReadonlyArray<readonly [flatName: string, mcpName: string]>;
}
type ToolRegistration = Parameters<ExtensionAPI["registerTool"]>[0];
type ToolInfo = ReturnType<ExtensionAPI["getAllTools"]>[number];
// ============================================================================
// Constants
// ============================================================================
/**
* Core Claude Code tool names that always pass through Anthropic OAuth filtering.
* Stored lowercase for case-insensitive matching.
* Mirrors Pi core's claudeCodeTools list in packages/ai/src/providers/anthropic.ts
*/
const CORE_TOOL_NAMES = new Set([
"read",
"write",
"edit",
"bash",
"grep",
"glob",
"askuserquestion",
"enterplanmode",
"exitplanmode",
"killshell",
"notebookedit",
"skill",
"task",
"taskoutput",
"todowrite",
"webfetch",
"websearch",
]);
/** Flat companion tool name → MCP-style alias. */
const FLAT_TO_MCP = new Map<string, string>([
["web_search_exa", "mcp__exa__web_search"],
["get_code_context_exa", "mcp__exa__get_code_context"],
["firecrawl_scrape", "mcp__firecrawl__scrape"],
["firecrawl_map", "mcp__firecrawl__map"],
["firecrawl_search", "mcp__firecrawl__search"],
["generate_image", "mcp__antigravity__generate_image"],
["image_quota", "mcp__antigravity__image_quota"],
]);
/** Known companion extensions and the tools they provide. */
const COMPANIONS: CompanionSpec[] = [
{
dirName: "pi-exa-mcp",
packageName: "@benvargas/pi-exa-mcp",
aliases: [
["web_search_exa", "mcp__exa__web_search"],
["get_code_context_exa", "mcp__exa__get_code_context"],
],
},
{
dirName: "pi-firecrawl",
packageName: "@benvargas/pi-firecrawl",
aliases: [
["firecrawl_scrape", "mcp__firecrawl__scrape"],
["firecrawl_map", "mcp__firecrawl__map"],
["firecrawl_search", "mcp__firecrawl__search"],
],
},
{
dirName: "pi-antigravity-image-gen",
packageName: "@benvargas/pi-antigravity-image-gen",
aliases: [
["generate_image", "mcp__antigravity__generate_image"],
["image_quota", "mcp__antigravity__image_quota"],
],
},
];
/** Reverse lookup: flat tool name → its companion spec. */
const TOOL_TO_COMPANION = new Map<string, CompanionSpec>(
COMPANIONS.flatMap((spec) => spec.aliases.map(([flat]) => [flat, spec] as const)),
);
// ============================================================================
// Helpers
// ============================================================================
function isPlainObject(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null && !Array.isArray(value);
}
function lower(name: string | undefined): string {
return (name ?? "").trim().toLowerCase();
}
// ============================================================================
// System prompt rewrite (PRD §1.1)
//
// Replace "pi itself" → "the cli itself" in system prompt text.
// Preserves cache_control, non-text blocks, and payload shape.
// ============================================================================
function rewritePromptText(text: string): string {
return text
.replaceAll("pi itself", "the cli itself")
.replaceAll("pi .md files", "cli .md files")
.replaceAll("pi packages", "cli packages");
}
function rewriteSystemField(system: unknown): unknown {
if (typeof system === "string") {
return rewritePromptText(system);
}
if (!Array.isArray(system)) {
return system;
}
return system.map((block) => {
if (!isPlainObject(block) || block.type !== "text" || typeof block.text !== "string") {
return block;
}
const rewritten = rewritePromptText(block.text);
return rewritten === block.text ? block : { ...block, text: rewritten };
});
}
// ============================================================================
// Tool filtering and MCP alias remapping (PRD §1.2)
//
// Rules applied per tool:
// 1. Anthropic-native typed tools (have a `type` field) → pass through
// 2. Core Claude Code tool names → pass through
// 3. Tools already prefixed with mcp__ → pass through
// 4. Known companion tools whose MCP alias is also advertised → rename to alias
// 5. Known companion tools without an advertised alias → filtered out
// 6. Unknown flat-named tools → filtered out (unless disableFilter)
// ============================================================================
function collectToolNames(tools: unknown[]): Set<string> {
const names = new Set<string>();
for (const tool of tools) {
if (isPlainObject(tool) && typeof tool.name === "string") {
names.add(lower(tool.name));
}
}
return names;
}
function filterAndRemapTools(tools: unknown[] | undefined, disableFilter: boolean): unknown[] | undefined {
if (!Array.isArray(tools)) return tools;
const advertised = collectToolNames(tools);
const emitted = new Set<string>();
const result: unknown[] = [];
for (const tool of tools) {
if (!isPlainObject(tool)) continue;
// Rule 1: native typed tools always pass through
if (typeof tool.type === "string" && tool.type.trim().length > 0) {
result.push(tool);
continue;
}
const name = typeof tool.name === "string" ? tool.name : "";
if (!name) continue;
const nameLc = lower(name);
// Rules 2 & 3: core tools and mcp__-prefixed pass through (with dedup)
if (CORE_TOOL_NAMES.has(nameLc) || nameLc.startsWith("mcp__")) {
if (!emitted.has(nameLc)) {
emitted.add(nameLc);
result.push(tool);
}
continue;
}
// Rules 4 & 5: known companion tool
const mcpAlias = FLAT_TO_MCP.get(nameLc);
if (mcpAlias) {
const aliasLc = lower(mcpAlias);
if (advertised.has(aliasLc) && !emitted.has(aliasLc)) {
// Alias exists in tool list → rename flat to alias, dedup
emitted.add(aliasLc);
result.push({ ...tool, name: mcpAlias });
} else if (disableFilter && !emitted.has(nameLc)) {
// Filter disabled: keep flat name if not yet emitted
emitted.add(nameLc);
result.push(tool);
}
continue;
}
// Rule 6: unknown flat-named tool
if (disableFilter && !emitted.has(nameLc)) {
emitted.add(nameLc);
result.push(tool);
}
}
return result;
}
function remapToolChoice(
toolChoice: Record<string, unknown>,
survivingNames: Map<string, string>,
): Record<string, unknown> | undefined {
if (toolChoice.type !== "tool" || typeof toolChoice.name !== "string") {
return toolChoice;
}
const nameLc = lower(toolChoice.name);
const actualName = survivingNames.get(nameLc);
if (actualName) {
return actualName === toolChoice.name ? toolChoice : { ...toolChoice, name: actualName };
}
const mcpAlias = FLAT_TO_MCP.get(nameLc);
if (mcpAlias && survivingNames.has(lower(mcpAlias))) {
return { ...toolChoice, name: mcpAlias };
}
return undefined;
}
function remapMessageToolNames(messages: unknown[], survivingNames: Map<string, string>): unknown[] {
let anyChanged = false;
const result = messages.map((msg) => {
if (!isPlainObject(msg) || !Array.isArray(msg.content)) return msg;
let msgChanged = false;
const content = (msg.content as unknown[]).map((block) => {
if (!isPlainObject(block) || block.type !== "tool_use" || typeof block.name !== "string") {
return block;
}
const mcpAlias = FLAT_TO_MCP.get(lower(block.name));
if (mcpAlias && survivingNames.has(lower(mcpAlias))) {
msgChanged = true;
return { ...block, name: mcpAlias };
}
return block;
});
if (msgChanged) {
anyChanged = true;
return { ...msg, content };
}
return msg;
});
return anyChanged ? result : messages;
}
// ============================================================================
// Full payload transform
// ============================================================================
function transformPayload(raw: Record<string, unknown>, disableFilter: boolean): Record<string, unknown> {
// Deep clone to avoid mutating the original
const payload = JSON.parse(JSON.stringify(raw)) as Record<string, unknown>;
// 1. System prompt rewrite (always applies)
if (payload.system !== undefined) {
payload.system = rewriteSystemField(payload.system);
}
// When escape hatch is active, skip all tool filtering/remapping
if (disableFilter) {
return payload;
}
// 2. Tool filtering and alias remapping
payload.tools = filterAndRemapTools(payload.tools as unknown[] | undefined, false);
// 3. Build map of tool names that survived filtering (lowercase → actual name)
const survivingNames = new Map<string, string>();
if (Array.isArray(payload.tools)) {
for (const tool of payload.tools) {
if (isPlainObject(tool) && typeof tool.name === "string") {
survivingNames.set(lower(tool.name), tool.name as string);
}
}
}
// 4. Remap tool_choice if it references a renamed or filtered tool
if (isPlainObject(payload.tool_choice)) {
const remapped = remapToolChoice(payload.tool_choice, survivingNames);
if (remapped === undefined) {
delete payload.tool_choice;
} else {
payload.tool_choice = remapped;
}
}
// 5. Rewrite historical tool_use blocks in message history
if (Array.isArray(payload.messages)) {
payload.messages = remapMessageToolNames(payload.messages, survivingNames);
}
return payload;
}
// ============================================================================
// Debug logging (PRD §1.4)
// ============================================================================
const debugLogPath = process.env.PI_CLAUDE_CODE_USE_DEBUG_LOG;
function writeDebugLog(payload: unknown): void {
if (!debugLogPath) return;
try {
appendFileSync(debugLogPath, `${new Date().toISOString()}\n${JSON.stringify(payload, null, 2)}\n---\n`, "utf-8");
} catch {
// Debug logging must never break actual requests
}
}
// ============================================================================
// Companion alias registration (PRD §1.3)
//
// Discovers loaded companion extensions, captures their tool definitions via
// a shim ExtensionAPI, and registers MCP-alias versions so the model can
// invoke them under Claude Code-compatible names.
// ============================================================================
const registeredMcpAliases = new Set<string>();
const autoActivatedAliases = new Set<string>();
let lastManagedToolList: string[] | undefined;
const captureCache = new Map<string, Promise<Map<string, ToolRegistration>>>();
let jitiLoader: { import(path: string, opts?: { default?: boolean }): Promise<unknown> } | undefined;
function getJitiLoader() {
if (!jitiLoader) {
jitiLoader = createJiti(import.meta.url, {
moduleCache: false,
tryNative: false,
virtualModules: {
"@mariozechner/pi-ai": piAiModule,
"@mariozechner/pi-coding-agent": piCodingAgentModule,
"@sinclair/typebox": typeboxModule,
},
});
}
return jitiLoader;
}
async function loadFactory(baseDir: string): Promise<((pi: ExtensionAPI) => void | Promise<void>) | undefined> {
const dir = baseDir.replace(/\/$/, "");
const candidates = [`${dir}/index.ts`, `${dir}/index.js`, `${dir}/extensions/index.ts`, `${dir}/extensions/index.js`];
const loader = getJitiLoader();
for (const path of candidates) {
try {
const mod = await loader.import(path, { default: true });
if (typeof mod === "function") return mod as (pi: ExtensionAPI) => void | Promise<void>;
} catch {
// Try next candidate
}
}
return undefined;
}
function isCompanionSource(tool: ToolInfo | undefined, spec: CompanionSpec): boolean {
if (!tool?.sourceInfo) return false;
const baseDir = tool.sourceInfo.baseDir;
if (baseDir) {
const dirName = basename(baseDir);
if (dirName === spec.dirName) return true;
if (dirName === "extensions" && basename(dirname(baseDir)) === spec.dirName) return true;
}
const fullPath = tool.sourceInfo.path;
if (typeof fullPath !== "string") return false;
// Normalize backslashes for Windows paths before segment-bounded check
const normalized = fullPath.replaceAll("\\", "/");
// Check for scoped package name (npm install) or directory name (git/monorepo)
return normalized.includes(`/${spec.packageName}/`) || normalized.includes(`/${spec.dirName}/`);
}
function buildCaptureShim(realPi: ExtensionAPI, captured: Map<string, ToolRegistration>): ExtensionAPI {
const shimFlags = new Set<string>();
return {
registerTool(def) {
captured.set(def.name, def as unknown as ToolRegistration);
},
registerFlag(name, _options) {
shimFlags.add(name);
},
getFlag(name) {
return shimFlags.has(name) ? realPi.getFlag(name) : undefined;
},
on() {},
registerCommand() {},
registerShortcut() {},
registerMessageRenderer() {},
registerProvider() {},
unregisterProvider() {},
sendMessage() {},
sendUserMessage() {},
appendEntry() {},
setSessionName() {},
getSessionName() {
return undefined;
},
setLabel() {},
exec(command, args, options) {
return realPi.exec(command, args, options);
},
getActiveTools() {
return realPi.getActiveTools();
},
getAllTools() {
return realPi.getAllTools();
},
setActiveTools(names) {
realPi.setActiveTools(names);
},
getCommands() {
return realPi.getCommands();
},
setModel(model) {
return realPi.setModel(model);
},
getThinkingLevel() {
return realPi.getThinkingLevel();
},
setThinkingLevel(level) {
realPi.setThinkingLevel(level);
},
events: realPi.events,
} as ExtensionAPI;
}
async function captureCompanionTools(baseDir: string, realPi: ExtensionAPI): Promise<Map<string, ToolRegistration>> {
let pending = captureCache.get(baseDir);
if (!pending) {
pending = (async () => {
const factory = await loadFactory(baseDir);
if (!factory) return new Map<string, ToolRegistration>();
const tools = new Map<string, ToolRegistration>();
await factory(buildCaptureShim(realPi, tools));
return tools;
})();
captureCache.set(baseDir, pending);
}
return pending;
}
async function registerAliasesForLoadedCompanions(pi: ExtensionAPI): Promise<void> {
// Clear capture cache so flag/config changes since last call take effect
captureCache.clear();
const allTools = pi.getAllTools();
const toolIndex = new Map<string, ToolInfo>();
const knownNames = new Set<string>();
for (const tool of allTools) {
toolIndex.set(lower(tool.name), tool);
knownNames.add(lower(tool.name));
}
for (const spec of COMPANIONS) {
for (const [flatName, mcpName] of spec.aliases) {
if (registeredMcpAliases.has(mcpName) || knownNames.has(lower(mcpName))) continue;
const tool = toolIndex.get(lower(flatName));
if (!tool || !isCompanionSource(tool, spec)) continue;
// Prefer the extension file's directory for loading (sourceInfo.path is the actual
// entry point). Fall back to baseDir only if path is unavailable. baseDir can be
// the monorepo root which doesn't contain the extension entry point directly.
const loadDir = tool.sourceInfo?.path ? dirname(tool.sourceInfo.path) : tool.sourceInfo?.baseDir;
if (!loadDir) continue;
const captured = await captureCompanionTools(loadDir, pi);
const def = captured.get(flatName);
if (!def) continue;
pi.registerTool({
...def,
name: mcpName,
label: def.label?.startsWith("MCP ") ? def.label : `MCP ${def.label ?? mcpName}`,
});
registeredMcpAliases.add(mcpName);
knownNames.add(lower(mcpName));
}
}
}
/**
* Synchronize MCP alias tool activation with the current model state.
* When OAuth is active, auto-activate aliases for any active companion tools.
* When OAuth is inactive, remove auto-activated aliases (but preserve user-selected ones).
*/
function syncAliasActivation(pi: ExtensionAPI, enableAliases: boolean): void {
const activeNames = pi.getActiveTools();
const allNames = new Set(pi.getAllTools().map((t) => t.name));
if (enableAliases) {
// Determine which aliases should be active based on their flat counterpart being active
const activeLc = new Set(activeNames.map(lower));
const desiredAliases: string[] = [];
for (const [flat, mcp] of FLAT_TO_MCP) {
if (activeLc.has(flat) && allNames.has(mcp) && registeredMcpAliases.has(mcp)) {
desiredAliases.push(mcp);
}
}
const desiredSet = new Set(desiredAliases);
// Promote auto-activated aliases to user-selected when the user explicitly kept
// the alias while removing its flat counterpart from the tool picker.
// We detect this by checking: (a) user changed the tool list since our last sync,
// (b) the flat tool was previously managed but is no longer active, and
// (c) the alias is still active. This means the user deliberately kept the alias.
if (lastManagedToolList !== undefined) {
const activeSet = new Set(activeNames);
const lastManaged = new Set(lastManagedToolList);
for (const alias of autoActivatedAliases) {
if (!activeSet.has(alias) || desiredSet.has(alias)) continue;
// Find the flat name for this alias
const flatName = [...FLAT_TO_MCP.entries()].find(([, mcp]) => mcp === alias)?.[0];
if (flatName && lastManaged.has(flatName) && !activeSet.has(flatName)) {
// User removed the flat tool but kept the alias → promote to user-selected
autoActivatedAliases.delete(alias);
}
}
}
// Find registered aliases currently in the active list
const activeRegistered = activeNames.filter((n) => registeredMcpAliases.has(n) && allNames.has(n));
// Per-alias provenance: an alias is "user-selected" if it's active and was NOT
// auto-activated by us. Only preserve those; auto-activated aliases get re-derived
// from the desired set each sync.
const preserved = activeRegistered.filter((n) => !autoActivatedAliases.has(n));
// Build result: non-alias tools + preserved user aliases + desired aliases
const nonAlias = activeNames.filter((n) => !registeredMcpAliases.has(n));
const next = Array.from(new Set([...nonAlias, ...preserved, ...desiredAliases]));
// Update auto-activation tracking: aliases we added this sync that weren't user-preserved
const preservedSet = new Set(preserved);
autoActivatedAliases.clear();
for (const name of desiredAliases) {
if (!preservedSet.has(name)) {
autoActivatedAliases.add(name);
}
}
if (next.length !== activeNames.length || next.some((n, i) => n !== activeNames[i])) {
pi.setActiveTools(next);
lastManagedToolList = [...next];
}
} else {
// Remove only auto-activated aliases; user-selected ones are preserved
const next = activeNames.filter((n) => !autoActivatedAliases.has(n));
autoActivatedAliases.clear();
if (next.length !== activeNames.length || next.some((n, i) => n !== activeNames[i])) {
pi.setActiveTools(next);
lastManagedToolList = [...next];
} else {
lastManagedToolList = undefined;
}
}
}
// ============================================================================
// Extension entry point
// ============================================================================
export default async function piClaudeCodeUse(pi: ExtensionAPI): Promise<void> {
pi.on("session_start", async () => {
await registerAliasesForLoadedCompanions(pi);
});
pi.on("before_agent_start", async (_event, ctx) => {
await registerAliasesForLoadedCompanions(pi);
const model = ctx.model;
const isOAuth = model?.provider === "anthropic" && ctx.modelRegistry.isUsingOAuth(model);
syncAliasActivation(pi, isOAuth);
});
pi.on("before_provider_request", (event, ctx) => {
const model = ctx.model;
if (!model || model.provider !== "anthropic" || !ctx.modelRegistry.isUsingOAuth(model)) {
return undefined;
}
if (!isPlainObject(event.payload)) {
return undefined;
}
writeDebugLog({ stage: "before", payload: event.payload });
const disableFilter = process.env.PI_CLAUDE_CODE_USE_DISABLE_TOOL_FILTER === "1";
const transformed = transformPayload(event.payload as Record<string, unknown>, disableFilter);
writeDebugLog({ stage: "after", payload: transformed });
return transformed;
});
}
// ============================================================================
// Test exports
// ============================================================================
export const _test = {
CORE_TOOL_NAMES,
FLAT_TO_MCP,
COMPANIONS,
TOOL_TO_COMPANION,
autoActivatedAliases,
buildCaptureShim,
collectToolNames,
filterAndRemapTools,
getLastManagedToolList: () => lastManagedToolList,
isCompanionSource,
isPlainObject,
lower,
registerAliasesForLoadedCompanions,
registeredMcpAliases,
remapMessageToolNames,
remapToolChoice,
rewritePromptText,
rewriteSystemField,
setLastManagedToolList: (v: string[] | undefined) => {
lastManagedToolList = v;
},
syncAliasActivation,
transformPayload,
};
@@ -211,12 +211,12 @@ function updateWidget(ctx: ExtensionContext): void {
(resetMs > 0 ? theme.fg("dim", ` (resets in ${resetSec}s)`) : ""),
);
ctx.ui.setWidget("web-activity", new Text(lines.join("\n"), 0, 0));
ctx.ui.setWidget("web-activity", lines);
}
function formatEntryLine(
entry: ActivityEntry,
theme: { fg: (color: string, text: string) => string },
theme: ExtensionContext["ui"]["theme"],
): string {
const typeStr = entry.type === "api" ? "API" : "GET";
const target =
@@ -550,7 +550,7 @@ export default function (pi: ExtensionAPI) {
} else {
widgetUnsubscribe?.();
widgetUnsubscribe = null;
ctx.ui.setWidget("web-activity", null);
ctx.ui.setWidget("web-activity", undefined);
}
},
});
@@ -598,7 +598,7 @@ export default function (pi: ExtensionAPI) {
})),
}),
async execute(_toolCallId, params, signal, onUpdate, ctx) {
async execute(_toolCallId, params, signal, onUpdate, ctx): Promise<any> {
const queryList = params.queries ?? (params.query ? [params.query] : []);
const isMultiQuery = queryList.length > 1;
const shouldCurate = params.curate !== false && ctx?.hasUI !== false;
@@ -613,7 +613,10 @@ export default function (pi: ExtensionAPI) {
if (shouldCurate) {
closeCurator();
const { promise, resolve: resolvePromise } = Promise.withResolvers<unknown>();
let resolvePromise!: (value: unknown) => void;
const promise = new Promise<unknown>((resolve) => {
resolvePromise = resolve;
});
const includeContent = params.includeContent ?? false;
const searchResults = new Map<number, QueryResultData>();
const allUrls: string[] = [];
@@ -637,7 +640,7 @@ export default function (pi: ExtensionAPI) {
queryList,
includeContent,
numResults: params.numResults,
recencyFilter: params.recencyFilter,
recencyFilter: params.recencyFilter as "day" | "week" | "month" | "year" | undefined,
domainFilter: params.domainFilter,
availableProviders,
defaultProvider,
@@ -684,7 +687,7 @@ export default function (pi: ExtensionAPI) {
const { answer, results } = await search(queryList[qi], {
provider: defaultProvider as SearchProvider | undefined,
numResults: params.numResults,
recencyFilter: params.recencyFilter,
recencyFilter: params.recencyFilter as "day" | "week" | "month" | "year" | undefined,
domainFilter: params.domainFilter,
signal,
});
@@ -754,7 +757,7 @@ export default function (pi: ExtensionAPI) {
text = `${searchResults.size} searches (${totalSources} sources) · ${curateLabel} to review · sending in ${remaining}s`;
}
return {
content: [{ type: "text", text }],
content: [{ type: "text" as const, text }],
details: {
phase: "curate-window",
searchCount: searchResults.size,
@@ -824,7 +827,7 @@ export default function (pi: ExtensionAPI) {
const { answer, results } = await search(query, {
provider: resolvedProvider as SearchProvider | undefined,
numResults: params.numResults,
recencyFilter: params.recencyFilter,
recencyFilter: params.recencyFilter as "day" | "week" | "month" | "year" | undefined,
domainFilter: params.domainFilter,
signal,
});
@@ -1117,7 +1120,10 @@ export default function (pi: ExtensionAPI) {
`Use get_search_content({ responseId: "${responseId}", urlIndex: 0 }) for full content.`;
}
const content: Array<{ type: string; text?: string; data?: string; mimeType?: string }> = [];
const content: Array<
| { type: "image"; data: string; mimeType: string }
| { type: "text"; text: string }
> = [];
if (result.frames?.length) {
for (const frame of result.frames) {
content.push({ type: "image", data: frame.data, mimeType: frame.mimeType });
@@ -1290,7 +1296,7 @@ export default function (pi: ExtensionAPI) {
urlIndex: Type.Optional(Type.Number({ description: "Get content for URL at index" })),
}),
async execute(_toolCallId, params) {
async execute(_toolCallId, params, _signal, _onUpdate, _ctx): Promise<any> {
const data = getResult(params.responseId);
if (!data) {
return {
@@ -1477,7 +1483,7 @@ export default function (pi: ExtensionAPI) {
pi.sendMessage({
customType: "web-search-results",
content: [{ type: "text", text }],
display: "tool",
display: true,
details: { queryCount: results.length, totalResults: urls.length },
}, { triggerTurn: true, deliverAs: "followUp" });
}
@@ -42,9 +42,10 @@ export async function extractPDFToMarkdown(
const pdf = await getDocumentProxy(new Uint8Array(buffer));
const metadata = await pdf.getMetadata();
const info = (metadata.info ?? {}) as Record<string, unknown>;
// Extract title from metadata or URL
const metaTitle = metadata.info?.Title as string | undefined;
const metaTitle = typeof info.Title === "string" ? info.Title : undefined;
const urlTitle = extractTitleFromURL(url);
const title = metaTitle?.trim() || urlTitle;
@@ -79,8 +80,9 @@ export async function extractPDFToMarkdown(
lines.push("");
lines.push(`> Source: ${url}`);
lines.push(`> Pages: ${pdf.numPages}${truncated ? ` (extracted first ${pagesToExtract})` : ""}`);
if (metadata.info?.Author) {
lines.push(`> Author: ${metadata.info.Author}`);
const author = typeof info.Author === "string" ? info.Author : undefined;
if (author) {
lines.push(`> Author: ${author}`);
}
lines.push("");
lines.push("---");
@@ -245,8 +245,8 @@ export async function condenseSearchResults(
const model = ctx.modelRegistry.find(provider, modelId);
if (!model) return null;
const apiKey = await ctx.modelRegistry.getApiKey(model);
if (!apiKey) return null;
const auth = await ctx.modelRegistry.getApiKeyAndHeaders(model);
if (!auth.ok) return null;
const queryData = [...results.entries()]
.sort((a, b) => a[0] - b[0])
@@ -281,7 +281,8 @@ export async function condenseSearchResults(
: timeoutSignal;
const response = await complete(model, aiContext, {
apiKey,
apiKey: auth.apiKey,
headers: auth.headers,
signal: combinedSignal,
max_tokens: MAX_TOKENS,
} as any);
+34 -22
View File
@@ -11,6 +11,9 @@ importers:
'@anthropic-ai/sdk':
specifier: ^0.52.0
version: 0.52.0
'@mariozechner/jiti':
specifier: ^2.6.5
version: 2.6.5
'@mozilla/readability':
specifier: ^0.5.0
version: 0.5.0
@@ -34,17 +37,20 @@ importers:
version: 3.17.5
devDependencies:
'@mariozechner/pi-ai':
specifier: ^0.56.3
version: 0.56.3(ws@8.19.0)(zod@4.3.6)
specifier: ^0.63.1
version: 0.63.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-coding-agent':
specifier: ^0.56.3
version: 0.56.3(ws@8.19.0)(zod@4.3.6)
specifier: ^0.63.1
version: 0.63.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui':
specifier: ^0.56.3
version: 0.56.3
specifier: ^0.63.1
version: 0.63.1
'@types/node':
specifier: ^25.3.3
version: 25.3.3
'@types/turndown':
specifier: ^5.0.6
version: 5.0.6
typescript:
specifier: ^5.7.0
version: 5.9.3
@@ -289,22 +295,22 @@ packages:
resolution: {integrity: sha512-faGUlTcXka5l7rv0lP3K3vGW/ejRuOS24RR2aSFWREUQqzjgdsuWNo/IiPqL3kWRGt6Ahl2+qcDAwtdeWeuGUw==}
hasBin: true
'@mariozechner/pi-agent-core@0.56.3':
resolution: {integrity: sha512-TsI1zENf3wqqKPaERnj486Q4i6Y/y6lAZipLNcfDYUDxDrLwNfQ9EW9xukkbJfTZ8zjG3VZ2pBZe3C7wM51dVQ==}
'@mariozechner/pi-agent-core@0.63.1':
resolution: {integrity: sha512-h0B20xfs/iEVR2EC4gwiE8hKI1TPeB8REdRJMgV+uXKH7gpeIZ9+s8Dp9nX35ZR0QUjkNey2+ULk2DxQtdg14Q==}
engines: {node: '>=20.0.0'}
'@mariozechner/pi-ai@0.56.3':
resolution: {integrity: sha512-l4J+cVyVeBLAlGOY/osGDvsbTz0DySCQmR171G6SdbPvIeLGhIi6siZ+zHwq91GJYjv/wtu/08M08ag2mGZKeA==}
'@mariozechner/pi-ai@0.63.1':
resolution: {integrity: sha512-wjgwY+yfrFO6a9QdAfjWpH7iSrDean6GsKDDMohNcLCy6PreMxHOZvNM0NwJARL1tZoZovr7ikAQfLGFZbnjsw==}
engines: {node: '>=20.0.0'}
hasBin: true
'@mariozechner/pi-coding-agent@0.56.3':
resolution: {integrity: sha512-yHgnadye+TT/4NWKBirZUjw/LWdNWTa7M4HJdX2RxRbwuj4q7RZ0Aqy+lQbOHEPDQYhxK3kZb9hjiAbbGficZQ==}
'@mariozechner/pi-coding-agent@0.63.1':
resolution: {integrity: sha512-XSoMyLtuMA7ePK1UBWqSJ/BBdtBdJUHY9nbtnNyG6GeW7Gbgd+iqljIuwmAUf8wlYL981UIfYM/WIPQ6t/dIxw==}
engines: {node: '>=20.6.0'}
hasBin: true
'@mariozechner/pi-tui@0.56.3':
resolution: {integrity: sha512-eZ1P9QRKHp78hwx+lITr/mujZqe+eCwL/bOS9vXXkFP070RW4VYum0j7TJ4BrFEH/nNkXRS1tYCXYU05une1bA==}
'@mariozechner/pi-tui@0.63.1':
resolution: {integrity: sha512-G5p+eh1EPkFCNaaggX6vRrqttnDscK6npgmEOknoCQXZtch8XNgh9Lf3VJ0A2lZXSgR7IntG5dfXHPH/Ki64wA==}
engines: {node: '>=20.0.0'}
'@mistralai/mistralai@1.14.1':
@@ -568,6 +574,9 @@ packages:
'@types/retry@0.12.0':
resolution: {integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==}
'@types/turndown@5.0.6':
resolution: {integrity: sha512-ru00MoyeeouE5BX4gRL+6m/BsDfbRayOskWqUvh7CLGW+UXxHQItqALa38kKnOiZPqJrtzJUgAC2+F0rL1S4Pg==}
'@types/yauzl@2.10.3':
resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==}
@@ -1722,9 +1731,9 @@ snapshots:
std-env: 3.10.0
yoctocolors: 2.1.2
'@mariozechner/pi-agent-core@0.56.3(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-agent-core@0.63.1(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@mariozechner/pi-ai': 0.56.3(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.63.1(ws@8.19.0)(zod@4.3.6)
transitivePeerDependencies:
- '@modelcontextprotocol/sdk'
- aws-crt
@@ -1734,7 +1743,7 @@ snapshots:
- ws
- zod
'@mariozechner/pi-ai@0.56.3(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-ai@0.63.1(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@anthropic-ai/sdk': 0.73.0(zod@4.3.6)
'@aws-sdk/client-bedrock-runtime': 3.1002.0
@@ -1758,13 +1767,14 @@ snapshots:
- ws
- zod
'@mariozechner/pi-coding-agent@0.56.3(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-coding-agent@0.63.1(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@mariozechner/jiti': 2.6.5
'@mariozechner/pi-agent-core': 0.56.3(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.56.3(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui': 0.56.3
'@mariozechner/pi-agent-core': 0.63.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.63.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui': 0.63.1
'@silvia-odwyer/photon-node': 0.3.4
ajv: 8.18.0
chalk: 5.6.2
cli-highlight: 2.1.11
diff: 8.0.3
@@ -1790,7 +1800,7 @@ snapshots:
- ws
- zod
'@mariozechner/pi-tui@0.56.3':
'@mariozechner/pi-tui@0.63.1':
dependencies:
'@types/mime-types': 2.1.4
chalk: 5.6.2
@@ -2166,6 +2176,8 @@ snapshots:
'@types/retry@0.12.0': {}
'@types/turndown@5.0.6': {}
'@types/yauzl@2.10.3':
dependencies:
'@types/node': 25.3.3
+8 -7
View File
@@ -135,11 +135,11 @@ export default function(pi: ExtensionAPI) {
// Fire-and-forget: run auto-naming in background without blocking
const doAutoName = async () => {
const apiKey = await ctx.modelRegistry.getApiKey(AUTO_NAME_MODEL);
log(`Got API key: ${apiKey ? "yes" : "no"}`);
const auth = await ctx.modelRegistry.getApiKeyAndHeaders(AUTO_NAME_MODEL);
log(`Got API key: ${auth.ok ? "yes" : "no"}`);
if (!apiKey) {
log("No API key available, aborting");
if (!auth.ok) {
log(`No API key available, aborting: ${auth.error}`);
return;
}
@@ -157,7 +157,7 @@ export default function(pi: ExtensionAPI) {
const response = await complete(
AUTO_NAME_MODEL,
{ systemPrompt: SYSTEM_PROMPT, messages: [userMessage] },
{ apiKey },
{ apiKey: auth.apiKey, headers: auth.headers },
);
log(`Response received, stopReason: ${response.stopReason}`);
@@ -273,7 +273,8 @@ export default function(pi: ExtensionAPI) {
loader.onAbort = () => done(null);
const doGenerate = async () => {
const apiKey = await ctx.modelRegistry.getApiKey(AUTO_NAME_MODEL);
const auth = await ctx.modelRegistry.getApiKeyAndHeaders(AUTO_NAME_MODEL);
if (!auth.ok) throw new Error(auth.error);
const userMessage: Message = {
role: "user",
@@ -289,7 +290,7 @@ export default function(pi: ExtensionAPI) {
const response = await complete(
AUTO_NAME_MODEL,
{ systemPrompt: SYSTEM_PROMPT, messages: [userMessage] },
{ apiKey, signal: loader.signal },
{ apiKey: auth.apiKey, headers: auth.headers, signal: loader.signal },
);
if (response.stopReason === "aborted") {
@@ -0,0 +1,60 @@
import { existsSync, statSync } from "node:fs";
import { dirname, join, resolve } from "node:path";
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
function isDirectory(path: string): boolean {
try {
return statSync(path).isDirectory();
} catch {
return false;
}
}
function walkUpDirectories(startDir: string, stopDir?: string): string[] {
const directories: string[] = [];
const hasStopDir = stopDir !== undefined;
let current = resolve(startDir);
let parent = dirname(current);
let reachedStopDir = hasStopDir && current === stopDir;
let reachedFilesystemRoot = parent === current;
directories.push(current);
while (!reachedStopDir && !reachedFilesystemRoot) {
current = parent;
parent = dirname(current);
reachedStopDir = hasStopDir && current === stopDir;
reachedFilesystemRoot = parent === current;
directories.push(current);
}
return directories;
}
function findNearestGitRoot(startDir: string): string | undefined {
for (const directory of walkUpDirectories(startDir)) {
if (existsSync(join(directory, ".git"))) {
return directory;
}
}
return undefined;
}
function findClaudeSkillDirs(cwd: string): string[] {
const gitRoot = findNearestGitRoot(cwd);
return walkUpDirectories(cwd, gitRoot)
.map((directory) => join(directory, ".claude", "skills"))
.filter(isDirectory);
}
export default function(pi: ExtensionAPI) {
pi.on("resources_discover", (event) => {
const skillPaths = findClaudeSkillDirs(event.cwd);
if (skillPaths.length === 0) {
return;
}
return { skillPaths };
});
}
+80
View File
@@ -14,6 +14,9 @@
*/
import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent";
import * as fs from "node:fs";
import * as path from "node:path";
import * as os from "node:os";
interface ToolTimeout {
toolCallId: string;
@@ -28,6 +31,8 @@ interface ToolTimeout {
// Configuration
let timeoutSeconds = 30;
let enabled = true;
const SETTINGS_NAMESPACE = "slowtool";
const globalSettingsPath = path.join(os.homedir(), ".pi", "agent", "settings.json");
// Track running tools
const runningTools: Map<string, ToolTimeout> = new Map();
@@ -43,6 +48,55 @@ function formatDuration(ms: number): string {
return `${minutes}m ${remainingSeconds}s`;
}
function asRecord(value: unknown): Record<string, unknown> | undefined {
if (!value || typeof value !== "object") return undefined;
return value as Record<string, unknown>;
}
function readSettingsFile(filePath: string): Record<string, unknown> {
try {
if (!fs.existsSync(filePath)) return {};
const raw = fs.readFileSync(filePath, "utf-8");
const parsed = JSON.parse(raw) as unknown;
return asRecord(parsed) ?? {};
} catch {
return {};
}
}
function loadGlobalConfig(): { timeoutSeconds: number; enabled: boolean } {
const settings = readSettingsFile(globalSettingsPath);
const slowtoolSettings = asRecord(settings[SETTINGS_NAMESPACE]);
const configuredTimeout = slowtoolSettings?.timeoutSeconds;
const nextTimeout =
typeof configuredTimeout === "number" && Number.isFinite(configuredTimeout) && configuredTimeout >= 1
? Math.floor(configuredTimeout)
: 30;
const configuredEnabled = slowtoolSettings?.enabled;
const nextEnabled = typeof configuredEnabled === "boolean" ? configuredEnabled : true;
return { timeoutSeconds: nextTimeout, enabled: nextEnabled };
}
function saveGlobalConfig(next: { timeoutSeconds: number; enabled: boolean }): boolean {
try {
const settings = readSettingsFile(globalSettingsPath);
const existing = asRecord(settings[SETTINGS_NAMESPACE]) ?? {};
settings[SETTINGS_NAMESPACE] = {
...existing,
timeoutSeconds: next.timeoutSeconds,
enabled: next.enabled,
};
fs.mkdirSync(path.dirname(globalSettingsPath), { recursive: true });
fs.writeFileSync(globalSettingsPath, `${JSON.stringify(settings, null, 2)}\n`, "utf-8");
return true;
} catch {
return false;
}
}
function getCommandPreview(args: unknown): string | undefined {
if (!args) return undefined;
const anyArgs = args as Record<string, unknown>;
@@ -77,6 +131,29 @@ function notifyTimeout(pi: ExtensionAPI, tool: ToolTimeout): void {
// ============ EVENT HANDLERS ============
export default function(pi: ExtensionAPI) {
const applyPersistedConfig = () => {
const persisted = loadGlobalConfig();
timeoutSeconds = persisted.timeoutSeconds;
enabled = persisted.enabled;
};
const persistCurrentConfig = (ctx: ExtensionCommandContext): void => {
const ok = saveGlobalConfig({ timeoutSeconds, enabled });
if (!ok) {
ctx.ui.notify("Failed to persist slowtool settings", "warning");
}
};
applyPersistedConfig();
pi.on("session_start", async (_event, _ctx) => {
applyPersistedConfig();
});
pi.on("session_switch", async (_event, _ctx) => {
applyPersistedConfig();
});
// Register commands
pi.registerCommand("slowtool:timeout", {
description: "Set timeout threshold in seconds (default: 30)",
@@ -91,6 +168,7 @@ export default function(pi: ExtensionAPI) {
return;
}
timeoutSeconds = newTimeout;
persistCurrentConfig(ctx);
ctx.ui.notify(`Timeout set to ${timeoutSeconds}s`, "info");
},
});
@@ -99,6 +177,7 @@ export default function(pi: ExtensionAPI) {
description: "Enable slow tool notifications",
handler: async (_args: string, ctx: ExtensionCommandContext) => {
enabled = true;
persistCurrentConfig(ctx);
ctx.ui.notify("Slow tool notifications enabled", "info");
},
});
@@ -107,6 +186,7 @@ export default function(pi: ExtensionAPI) {
description: "Disable slow tool notifications",
handler: async (_args: string, ctx: ExtensionCommandContext) => {
enabled = false;
persistCurrentConfig(ctx);
ctx.ui.notify("Slow tool notifications disabled", "info");
},
});
+2 -7
View File
@@ -6,7 +6,7 @@
* - Injects timestamp markers without triggering extra turns
*/
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent";
import { Box, Text } from "@mariozechner/pi-tui";
// Track session time
@@ -41,12 +41,7 @@ function formatDuration(ms: number): string {
}
export default function (pi: ExtensionAPI) {
const updateStatus = (ctx: {
ui: {
setStatus: (id: string, text: string | undefined) => void;
theme: { fg: (color: string, text: string) => string };
};
}) => {
const updateStatus = (ctx: ExtensionContext) => {
const elapsed = Date.now() - sessionStart;
let status = ctx.ui.theme.fg("dim", `${formatElapsed(elapsed)}`);
if (lastTurnDuration !== null) {
+3 -1
View File
@@ -1,3 +1,5 @@
{
"selectModel": "ctrl+space"
"app.model.select": "ctrl+space",
"tui.input.newLine": ["shift+enter"],
"tui.input.submit": ["enter"]
}
@@ -0,0 +1,99 @@
---
name: jj-issue-workspaces
description: Create one Jujutsu workspace per issue, base them on an updated mainline bookmark like master, optionally create feature bookmarks, and open a zellij tab running pi in each workspace. Use when the user wants to fan out work across multiple issues, especially from a screenshot, Linear board, or issue list.
---
# JJ Issue Workspaces
This skill sets up a parallel issue workflow with `jj workspaces`.
Use it when the user wants any of the following:
- one workspace per issue
- multiple issues opened side by side
- a zellij tab for each issue
- `pi` opened in each issue workspace with a task-specific prompt
- issue fan-out from a screenshot, Linear board, or manually listed issues
## Workflow
1. Confirm the target repo and verify it is a `jj` repo.
2. If the user gave a screenshot path, use the `read` tool on the screenshot first and extract the issue keys and titles.
3. Decide the base bookmark/revision, usually `master` or `main`.
4. Run the helper script to:
- fetch the base bookmark from `origin`
- create sibling workspaces like `../Phoenix-spa-748`
- create bookmarks like `feature/spa-748`
- optionally open one zellij tab per workspace and launch `pi`
5. Tell the user which workspaces and tabs were created.
## Helper script
Use the helper script in this skill:
```bash
./scripts/jj-workspace-fanout.sh --help
```
Run it from anywhere. Pass absolute paths when convenient.
## Common usage
### Create workspaces and bookmarks only
```bash
./scripts/jj-workspace-fanout.sh \
--repo /path/to/repo \
--base master \
--issue "SPA-748=Wrap text in credits line items" \
--issue "SPA-428=Implement \"Downgrade\" Mimir modal (maximalist)" \
--issue "SPA-754=Resize seat count picker"
```
### Create workspaces, bookmarks, zellij tabs, and launch pi
```bash
./scripts/jj-workspace-fanout.sh \
--repo /path/to/repo \
--base master \
--session attio \
--open-pi \
--issue "SPA-748=Wrap text in credits line items" \
--issue "SPA-428=Implement \"Downgrade\" Mimir modal (maximalist)" \
--issue "SPA-754=Resize seat count picker"
```
### Recreate existing workspaces from scratch
```bash
./scripts/jj-workspace-fanout.sh \
--repo /path/to/repo \
--base master \
--session attio \
--open-pi \
--reset-existing \
--issue "SPA-748=Wrap text in credits line items"
```
## Defaults and conventions
- Workspace names use the lowercased issue key, for example `spa-748`
- Workspace directories are created beside the repo, for example `../Phoenix-spa-748`
- Bookmark names default to `feature/<issue-key-lowercase>`
- Base revision defaults to `master`
- Remote defaults to `origin`
- If `--open-pi` is used, the script launches `pi` in each workspace with a task-specific prompt
## Recommended agent behavior
When using this skill:
- Prefer `jj` over `git`
- Check `jj workspace list` before changing anything
- If the user says to update `master` or `main` first, let the script fetch that base revision before creating workspaces
- If the user wants an existing set recreated, use `--reset-existing`
- If zellij tabs already exist and the user wants a clean retry, close those tabs first or recreate the session
## Notes
- The script does not delete existing workspaces unless `--reset-existing` is provided.
- `--open-pi` requires a zellij session name, either via `--session <name>` or `ZELLIJ_SESSION_NAME`.
- If the repo uses `main` instead of `master`, pass `--base main`.
@@ -0,0 +1,292 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Create one jj workspace per issue, optionally create bookmarks, and optionally open zellij tabs running pi.
Usage:
jj-workspace-fanout.sh [options] --issue "KEY=Title" [--issue "KEY=Title" ...]
Options:
--repo PATH Repo root (default: current directory)
--base REV Base revision/bookmark (default: master)
--remote NAME Git remote to fetch from (default: origin)
--issue KEY=TITLE Issue key and title (repeatable)
--session NAME Zellij session name (defaults to ZELLIJ_SESSION_NAME if set)
--open-pi Open a zellij tab per workspace and launch pi
--no-fetch Skip jj git fetch
--no-bookmarks Do not create feature/<issue> bookmarks
--keep-existing Skip creation for existing workspaces instead of failing
--reset-existing Forget and delete existing workspaces before recreating them
--prompt-suffix TEXT Extra text appended to each pi prompt
--pi-cmd CMD pi command to launch (default: pi)
--dry-run Print planned actions without making changes
--help Show this help
Examples:
jj-workspace-fanout.sh \
--repo /path/to/Phoenix \
--base master \
--issue "SPA-748=Wrap text in credits line items" \
--issue "SPA-754=Resize seat count picker"
jj-workspace-fanout.sh \
--repo /path/to/Phoenix \
--base master \
--session attio \
--open-pi \
--issue "SPA-748=Wrap text in credits line items"
EOF
}
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "error: missing required command: $1" >&2
exit 1
fi
}
shell_escape() {
printf '%q' "$1"
}
log() {
printf '[jj-issue-workspaces] %s\n' "$*"
}
run() {
if [[ "$DRY_RUN" -eq 1 ]]; then
printf '[dry-run] '
printf '%q ' "$@"
printf '\n'
else
"$@"
fi
}
workspace_exists() {
local workspace_name="$1"
jj -R "$REPO" workspace list | awk -F: '{print $1}' | grep -Fxq "$workspace_name"
}
bookmark_exists() {
local workspace_dir="$1"
local bookmark_name="$2"
jj -R "$workspace_dir" bookmark list "$bookmark_name" 2>/dev/null | grep -Eq "^${bookmark_name}:"
}
close_tab_if_exists() {
local session_name="$1"
local tab_name="$2"
local tabs
tabs=$(zellij --session "$session_name" action query-tab-names 2>/dev/null || true)
if printf '%s\n' "$tabs" | grep -Fxq "$tab_name"; then
log "closing existing zellij tab $tab_name"
run zellij --session "$session_name" action go-to-tab-name "$tab_name"
run zellij --session "$session_name" action close-tab
fi
}
launch_pi_tab() {
local session_name="$1"
local tab_name="$2"
local workspace_dir="$3"
local prompt="$4"
local cmd
cmd="cd $(shell_escape "$workspace_dir") && pwd && $PI_CMD $(shell_escape "$prompt")"
close_tab_if_exists "$session_name" "$tab_name"
run zellij --session "$session_name" action new-tab --name "$tab_name"
run zellij --session "$session_name" action write-chars "$cmd"
run zellij --session "$session_name" action write 10
}
REPO="$(pwd)"
BASE="master"
REMOTE="origin"
SESSION="${ZELLIJ_SESSION_NAME:-}"
OPEN_PI=0
FETCH=1
CREATE_BOOKMARKS=1
KEEP_EXISTING=0
RESET_EXISTING=0
DRY_RUN=0
PROMPT_SUFFIX=""
PI_CMD="pi"
declare -a ISSUES=()
while [[ $# -gt 0 ]]; do
case "$1" in
--repo)
REPO="$2"
shift 2
;;
--base)
BASE="$2"
shift 2
;;
--remote)
REMOTE="$2"
shift 2
;;
--issue)
ISSUES+=("$2")
shift 2
;;
--session)
SESSION="$2"
shift 2
;;
--open-pi)
OPEN_PI=1
shift
;;
--no-fetch)
FETCH=0
shift
;;
--no-bookmarks)
CREATE_BOOKMARKS=0
shift
;;
--keep-existing)
KEEP_EXISTING=1
shift
;;
--reset-existing)
RESET_EXISTING=1
shift
;;
--prompt-suffix)
PROMPT_SUFFIX="$2"
shift 2
;;
--pi-cmd)
PI_CMD="$2"
shift 2
;;
--dry-run)
DRY_RUN=1
shift
;;
--help|-h)
usage
exit 0
;;
*)
echo "error: unknown argument: $1" >&2
usage >&2
exit 1
;;
esac
done
if [[ ${#ISSUES[@]} -eq 0 ]]; then
echo "error: at least one --issue KEY=TITLE is required" >&2
exit 1
fi
if [[ "$KEEP_EXISTING" -eq 1 && "$RESET_EXISTING" -eq 1 ]]; then
echo "error: --keep-existing and --reset-existing cannot be combined" >&2
exit 1
fi
REPO="$(cd "$REPO" && pwd)"
PARENT_DIR="$(dirname "$REPO")"
REPO_BASENAME="$(basename "$REPO")"
require_cmd jj
if [[ "$OPEN_PI" -eq 1 ]]; then
require_cmd zellij
if [[ -z "$SESSION" ]]; then
echo "error: --open-pi requires --session <name> or ZELLIJ_SESSION_NAME" >&2
exit 1
fi
fi
if [[ ! -d "$REPO/.jj" ]]; then
echo "error: repo is not a jj repository: $REPO" >&2
exit 1
fi
if [[ "$FETCH" -eq 1 ]]; then
log "fetching $BASE from $REMOTE"
run jj -R "$REPO" git fetch --remote "$REMOTE" --branch "$BASE"
fi
log "validating base revision $BASE"
run jj -R "$REPO" log -r "$BASE" --no-pager
created_workspaces=()
for issue in "${ISSUES[@]}"; do
if [[ "$issue" != *=* ]]; then
echo "error: issue must be formatted as KEY=TITLE: $issue" >&2
exit 1
fi
issue_key="${issue%%=*}"
issue_title="${issue#*=}"
issue_slug="$(printf '%s' "$issue_key" | tr '[:upper:]' '[:lower:]')"
workspace_name="$issue_slug"
workspace_dir="$PARENT_DIR/${REPO_BASENAME}-${issue_slug}"
bookmark_name="feature/$issue_slug"
prompt="Work on ${issue_key}: ${issue_title}. You are in the dedicated jj workspace for this issue. First inspect the relevant code, identify the main components involved, and propose a short plan before editing."
if [[ -n "$PROMPT_SUFFIX" ]]; then
prompt+=" ${PROMPT_SUFFIX}"
fi
if workspace_exists "$workspace_name" || [[ -e "$workspace_dir" ]]; then
if [[ "$RESET_EXISTING" -eq 1 ]]; then
log "resetting existing workspace $workspace_name"
if workspace_exists "$workspace_name"; then
run jj -R "$REPO" workspace forget "$workspace_name"
fi
run rm -rf "$workspace_dir"
elif [[ "$KEEP_EXISTING" -eq 1 ]]; then
log "keeping existing workspace $workspace_name at $workspace_dir"
else
echo "error: workspace already exists: $workspace_name ($workspace_dir). Use --keep-existing or --reset-existing." >&2
exit 1
fi
fi
if ! workspace_exists "$workspace_name"; then
log "creating workspace $workspace_name at $workspace_dir"
run jj -R "$REPO" workspace add --name "$workspace_name" -r "$BASE" "$workspace_dir"
fi
if [[ "$CREATE_BOOKMARKS" -eq 1 ]]; then
log "ensuring bookmark $bookmark_name exists"
if bookmark_exists "$workspace_dir" "$bookmark_name"; then
run jj -R "$workspace_dir" bookmark set "$bookmark_name" -r @
else
run jj -R "$workspace_dir" bookmark create "$bookmark_name"
fi
fi
if [[ "$OPEN_PI" -eq 1 ]]; then
log "opening zellij tab $workspace_name in session $SESSION"
run launch_pi_tab "$SESSION" "$workspace_name" "$workspace_dir" "$prompt"
fi
created_workspaces+=("$workspace_name:$workspace_dir:$bookmark_name")
done
printf '\nCreated/updated workspaces:\n'
for item in "${created_workspaces[@]}"; do
IFS=':' read -r workspace_name workspace_dir bookmark_name <<<"$item"
printf ' - %s -> %s' "$workspace_name" "$workspace_dir"
if [[ "$CREATE_BOOKMARKS" -eq 1 ]]; then
printf ' [%s]' "$bookmark_name"
fi
printf '\n'
done
if [[ "$OPEN_PI" -eq 1 ]]; then
printf '\nZellij session: %s\n' "$SESSION"
fi
+105
View File
@@ -0,0 +1,105 @@
---
name: linear
description: Access Linear issue tracker - search, view, create, update issues, list teams/projects, and manage comments. Use when the user asks about Linear issues, tasks, tickets, or project management in Linear.
---
# Linear
Manage Linear issues, projects, and teams via the Linear SDK.
## Setup
Run once before first use:
```bash
cd {baseDir} && npm install
```
Requires a `LINEAR_API_KEY` environment variable. Generate one at: https://linear.app/settings/api (Personal API keys).
Set it in your shell profile or pi settings:
```bash
export LINEAR_API_KEY=lin_api_...
```
## Current User
```bash
node {baseDir}/linear-me.js # Show authenticated user
node {baseDir}/linear-me.js --issues # Show user + their active issues
```
## Search Issues
```bash
node {baseDir}/linear-search.js "query" # Text search
node {baseDir}/linear-search.js "query" -n 20 # More results
node {baseDir}/linear-search.js "query" --team ENG # Filter by team
node {baseDir}/linear-search.js "query" --state "In Progress" # Filter by state
```
## List Issues (with filters)
```bash
node {baseDir}/linear-issues.js # All recent issues
node {baseDir}/linear-issues.js --team ENG # By team
node {baseDir}/linear-issues.js --state "In Progress" # By state
node {baseDir}/linear-issues.js --assignee me # My issues
node {baseDir}/linear-issues.js --assignee "John" # By assignee name
node {baseDir}/linear-issues.js --label "Bug" # By label
node {baseDir}/linear-issues.js --project "Q1 Goals" # By project
node {baseDir}/linear-issues.js --team ENG --state Todo -n 50 # Combined filters
```
## View Issue Details
```bash
node {baseDir}/linear-issue.js ATT-1234 # Full issue details
node {baseDir}/linear-issue.js ATT-1234 --comments # Include comments
```
## Create Issue
```bash
node {baseDir}/linear-create.js --team ENG --title "Fix login bug"
node {baseDir}/linear-create.js --team ENG --title "New feature" --description "Details here" --state Todo --priority 2 --assignee me --label "Feature"
node {baseDir}/linear-create.js --team ENG --title "Sub-task" --parent ATT-100
```
Priority values: 0=None, 1=Urgent, 2=High, 3=Medium, 4=Low
## Update Issue
```bash
node {baseDir}/linear-update.js ATT-1234 --state "In Progress"
node {baseDir}/linear-update.js ATT-1234 --assignee me --priority 2
node {baseDir}/linear-update.js ATT-1234 --title "New title" --description "Updated desc"
```
## Add Comment
```bash
node {baseDir}/linear-comment.js ATT-1234 "This is done in PR #567"
```
## List Teams
```bash
node {baseDir}/linear-teams.js
```
## List Projects
```bash
node {baseDir}/linear-projects.js # All projects
node {baseDir}/linear-projects.js --team ENG # By team
```
## Tips
- Use `--assignee me` to filter by the authenticated user
- Issue identifiers follow the pattern `TEAM-NUMBER` (e.g. `ATT-1234`, `ENG-567`)
- Descriptions support markdown formatting
- State names are case-insensitive (e.g. "todo", "Todo", "TODO" all work)
- When creating issues, the team key is required; use `linear-teams.js` to find available teams
+23
View File
@@ -0,0 +1,23 @@
import { LinearClient } from "@linear/sdk";
export function getClient() {
const apiKey = process.env.LINEAR_API_KEY;
if (!apiKey) {
console.error("Error: LINEAR_API_KEY environment variable is required.");
console.error(
"Generate one at: https://linear.app/settings/api (Personal API keys)"
);
process.exit(1);
}
return new LinearClient({ apiKey });
}
export function formatDate(date) {
if (!date) return "";
return new Date(date).toISOString().split("T")[0];
}
export function truncate(str, len = 120) {
if (!str) return "";
return str.length > len ? str.slice(0, len) + "…" : str;
}
+29
View File
@@ -0,0 +1,29 @@
#!/usr/bin/env node
// Add a comment to a Linear issue
// Usage: linear-comment.js <identifier> <body>
import { getClient } from "./lib.js";
const args = process.argv.slice(2);
const identifier = args[0];
const body = args.slice(1).join(" ");
if (!identifier || !body) {
console.log("Usage: linear-comment.js <identifier> <body>");
console.log("\nExamples:");
console.log(' linear-comment.js ATT-1234 "This is fixed in the latest PR"');
process.exit(1);
}
const client = getClient();
const results = await client.searchIssues(identifier, { first: 1 });
const issue = results.nodes[0];
if (!issue) {
console.error(`Issue '${identifier}' not found.`);
process.exit(1);
}
await client.createComment({ issueId: issue.id, body });
console.log(`Comment added to ${issue.identifier}.`);
+102
View File
@@ -0,0 +1,102 @@
#!/usr/bin/env node
// Create a new Linear issue
// Usage: linear-create.js --team <key> --title <title> [--description <desc>] [--state <name>] [--priority <0-4>] [--assignee <name|me>] [--label <name>] [--parent <identifier>]
import { getClient } from "./lib.js";
const args = process.argv.slice(2);
function extractArg(flag) {
const idx = args.indexOf(flag);
if (idx !== -1 && args[idx + 1]) {
const val = args[idx + 1];
args.splice(idx, 2);
return val;
}
return null;
}
const teamKey = extractArg("--team");
const title = extractArg("--title");
const description = extractArg("--description");
const stateName = extractArg("--state");
const priority = extractArg("--priority");
const assigneeName = extractArg("--assignee");
const labelName = extractArg("--label");
const parentId = extractArg("--parent");
if (!teamKey || !title) {
console.log("Usage: linear-create.js --team <key> --title <title> [options]");
console.log("\nRequired:");
console.log(" --team <key> Team key (e.g. ENG)");
console.log(' --title <title> Issue title');
console.log("\nOptional:");
console.log(" --description <text> Issue description (markdown)");
console.log(" --state <name> Initial state (e.g. 'Todo')");
console.log(" --priority <0-4> Priority: 0=None, 1=Urgent, 2=High, 3=Medium, 4=Low");
console.log(" --assignee <name|me> Assignee name or 'me'");
console.log(" --label <name> Label name");
console.log(" --parent <id> Parent issue identifier (e.g. ATT-100)");
process.exit(1);
}
const client = getClient();
// Resolve team
const teams = await client.teams({ filter: { key: { eq: teamKey.toUpperCase() } } });
const team = teams.nodes[0];
if (!team) {
console.error(`Team '${teamKey}' not found.`);
process.exit(1);
}
const input = {
teamId: team.id,
title,
};
if (description) input.description = description;
if (priority) input.priority = parseInt(priority, 10);
// Resolve state
if (stateName) {
const states = await team.states();
const state = states.nodes.find(
(s) => s.name.toLowerCase() === stateName.toLowerCase()
);
if (state) input.stateId = state.id;
else console.warn(`Warning: State '${stateName}' not found, using default.`);
}
// Resolve assignee
if (assigneeName) {
if (assigneeName.toLowerCase() === "me") {
const me = await client.viewer;
input.assigneeId = me.id;
} else {
const users = await client.users({ filter: { name: { containsIgnoreCase: assigneeName } } });
if (users.nodes[0]) input.assigneeId = users.nodes[0].id;
else console.warn(`Warning: User '${assigneeName}' not found.`);
}
}
// Resolve label
if (labelName) {
const labels = await client.issueLabels({ filter: { name: { eqIgnoreCase: labelName } } });
if (labels.nodes[0]) input.labelIds = [labels.nodes[0].id];
else console.warn(`Warning: Label '${labelName}' not found.`);
}
// Resolve parent
if (parentId) {
const parentSearch = await client.searchIssues(parentId, { first: 1 });
if (parentSearch.nodes[0]) input.parentId = parentSearch.nodes[0].id;
else console.warn(`Warning: Parent '${parentId}' not found.`);
}
const result = await client.createIssue(input);
const issue = await result.issue;
console.log(`Created: ${issue.identifier} - ${issue.title}`);
console.log(`URL: ${issue.url}`);
+87
View File
@@ -0,0 +1,87 @@
#!/usr/bin/env node
// Get details for a specific Linear issue
// Usage: linear-issue.js <identifier> [--comments]
import { getClient, formatDate } from "./lib.js";
const args = process.argv.slice(2);
const showComments = args.includes("--comments");
const filtered = args.filter((a) => a !== "--comments");
const identifier = filtered[0];
if (!identifier) {
console.log("Usage: linear-issue.js <identifier> [--comments]");
console.log("\nExamples:");
console.log(" linear-issue.js ATT-1234");
console.log(" linear-issue.js ATT-1234 --comments");
process.exit(1);
}
const client = getClient();
// Parse team key and issue number from identifier (e.g. "SIP-1205")
const parts = identifier.match(/^([A-Za-z]+)-(\d+)$/);
if (!parts) {
console.error(`Invalid identifier format: ${identifier}. Expected format: TEAM-123`);
process.exit(1);
}
const teamKey = parts[1].toUpperCase();
const issueNumber = parseInt(parts[2], 10);
// Find the issue by team key + number
const issues = await client.issues({
filter: {
team: { key: { eq: teamKey } },
number: { eq: issueNumber },
},
first: 1,
});
const issue = issues.nodes[0];
if (!issue) {
console.error(`Issue ${identifier} not found.`);
process.exit(1);
}
const state = await issue.state;
const team = await issue.team;
const assignee = await issue.assignee;
const labels = await issue.labels();
const parent = await issue.parent;
const project = await issue.project;
const cycle = await issue.cycle;
console.log(`=== ${issue.identifier}: ${issue.title} ===`);
console.log(`URL: ${issue.url}`);
console.log(`State: ${state?.name || "Unknown"}`);
console.log(`Priority: ${issue.priorityLabel}`);
console.log(`Team: ${team?.key || "?"}`);
console.log(`Assignee: ${assignee?.name || "Unassigned"}`);
if (project) console.log(`Project: ${project.name}`);
if (cycle) console.log(`Cycle: ${cycle.name || cycle.number}`);
if (parent) console.log(`Parent: ${parent.identifier} - ${parent.title}`);
if (labels.nodes.length > 0) {
console.log(`Labels: ${labels.nodes.map((l) => l.name).join(", ")}`);
}
console.log(`Created: ${formatDate(issue.createdAt)}`);
console.log(`Updated: ${formatDate(issue.updatedAt)}`);
if (issue.dueDate) console.log(`Due: ${issue.dueDate}`);
console.log(`\nDescription:\n${issue.description || "(empty)"}`);
if (showComments) {
const comments = await issue.comments();
if (comments.nodes.length > 0) {
console.log(`\n--- Comments (${comments.nodes.length}) ---`);
for (const comment of comments.nodes) {
const author = await comment.user;
console.log(`\n[${formatDate(comment.createdAt)}] ${author?.name || "Unknown"}:`);
console.log(comment.body);
}
} else {
console.log("\nNo comments.");
}
}
+90
View File
@@ -0,0 +1,90 @@
#!/usr/bin/env node
// List Linear issues with filters
// Usage: linear-issues.js [--team <key>] [--state <name>] [--assignee <name|me>] [--label <name>] [--project <name>] [-n <num>]
import { getClient, formatDate, truncate } from "./lib.js";
const args = process.argv.slice(2);
function extractArg(flag) {
const idx = args.indexOf(flag);
if (idx !== -1 && args[idx + 1]) {
const val = args[idx + 1];
args.splice(idx, 2);
return val;
}
return null;
}
const numResults = parseInt(extractArg("-n") || "25", 10);
const teamKey = extractArg("--team");
const stateName = extractArg("--state");
const assigneeName = extractArg("--assignee");
const labelName = extractArg("--label");
const projectName = extractArg("--project");
if (args.includes("--help") || args.includes("-h")) {
console.log("Usage: linear-issues.js [options]");
console.log("\nOptions:");
console.log(" --team <key> Filter by team key (e.g. ENG)");
console.log(" --state <name> Filter by state (e.g. 'In Progress', 'Todo')");
console.log(" --assignee <name> Filter by assignee name or 'me'");
console.log(" --label <name> Filter by label name");
console.log(" --project <name> Filter by project name");
console.log(" -n <num> Number of results (default: 25)");
process.exit(0);
}
const client = getClient();
// Build filter
const filter = {};
if (teamKey) {
filter.team = { key: { eq: teamKey.toUpperCase() } };
}
if (stateName) {
filter.state = { name: { eqIgnoreCase: stateName } };
}
if (assigneeName) {
if (assigneeName.toLowerCase() === "me") {
const me = await client.viewer;
filter.assignee = { id: { eq: me.id } };
} else {
filter.assignee = { name: { containsIgnoreCase: assigneeName } };
}
}
if (labelName) {
filter.labels = { name: { eqIgnoreCase: labelName } };
}
if (projectName) {
filter.project = { name: { containsIgnoreCase: projectName } };
}
const issues = await client.issues({
filter,
first: numResults,
orderBy: "updatedAt",
});
if (issues.nodes.length === 0) {
console.log("No issues found matching filters.");
process.exit(0);
}
for (const issue of issues.nodes) {
const state = await issue.state;
const team = await issue.team;
const assignee = await issue.assignee;
console.log(
`${issue.identifier.padEnd(12)} ${(state?.name || "?").padEnd(14)} ${(issue.priorityLabel || "").padEnd(8)} ${(assignee?.name || "Unassigned").padEnd(20)} ${truncate(issue.title, 80)}`
);
}
console.log(`\n${issues.nodes.length} issue(s) shown.`);
+33
View File
@@ -0,0 +1,33 @@
#!/usr/bin/env node
// Show current authenticated user and their assigned issues
// Usage: linear-me.js [--issues]
import { getClient, truncate } from "./lib.js";
const showIssues = process.argv.includes("--issues");
const client = getClient();
const me = await client.viewer;
console.log(`User: ${me.name}`);
console.log(`Email: ${me.email}`);
console.log(`ID: ${me.id}`);
if (showIssues) {
const issues = await me.assignedIssues({
first: 25,
filter: {
state: { type: { nin: ["completed", "canceled"] } },
},
orderBy: "updatedAt",
});
console.log(`\n--- Active Assigned Issues (${issues.nodes.length}) ---`);
for (const issue of issues.nodes) {
const state = await issue.state;
console.log(
`${issue.identifier.padEnd(12)} ${(state?.name || "?").padEnd(14)} ${(issue.priorityLabel || "").padEnd(8)} ${truncate(issue.title, 80)}`
);
}
}
+45
View File
@@ -0,0 +1,45 @@
#!/usr/bin/env node
// List Linear projects
// Usage: linear-projects.js [--team <key>] [-n <num>]
import { getClient, formatDate } from "./lib.js";
const args = process.argv.slice(2);
function extractArg(flag) {
const idx = args.indexOf(flag);
if (idx !== -1 && args[idx + 1]) {
const val = args[idx + 1];
args.splice(idx, 2);
return val;
}
return null;
}
const numResults = parseInt(extractArg("-n") || "25", 10);
const teamKey = extractArg("--team");
const client = getClient();
const filter = {};
if (teamKey) {
filter.accessibleTeams = { key: { eq: teamKey.toUpperCase() } };
}
const projects = await client.projects({ filter, first: numResults });
if (projects.nodes.length === 0) {
console.log("No projects found.");
process.exit(0);
}
for (const project of projects.nodes) {
const lead = await project.lead;
console.log(`--- ${project.name} ---`);
console.log(`State: ${project.state} | Progress: ${Math.round(project.progress * 100)}%`);
if (lead) console.log(`Lead: ${lead.name}`);
if (project.targetDate) console.log(`Target: ${project.targetDate}`);
console.log(`URL: ${project.url}`);
console.log("");
}
+67
View File
@@ -0,0 +1,67 @@
#!/usr/bin/env node
// Search Linear issues by text query
// Usage: linear-search.js <query> [-n <num>] [--team <key>] [--state <name>]
import { getClient, formatDate, truncate } from "./lib.js";
const args = process.argv.slice(2);
let numResults = 10;
const nIdx = args.indexOf("-n");
if (nIdx !== -1 && args[nIdx + 1]) {
numResults = parseInt(args[nIdx + 1], 10);
args.splice(nIdx, 2);
}
let teamFilter = null;
const teamIdx = args.indexOf("--team");
if (teamIdx !== -1 && args[teamIdx + 1]) {
teamFilter = args[teamIdx + 1];
args.splice(teamIdx, 2);
}
let stateFilter = null;
const stateIdx = args.indexOf("--state");
if (stateIdx !== -1 && args[stateIdx + 1]) {
stateFilter = args[stateIdx + 1];
args.splice(stateIdx, 2);
}
const query = args.join(" ");
if (!query) {
console.log("Usage: linear-search.js <query> [-n <num>] [--team <key>] [--state <name>]");
console.log("\nOptions:");
console.log(" -n <num> Number of results (default: 10)");
console.log(" --team <key> Filter by team key (e.g. ENG)");
console.log(" --state <name> Filter by state name (e.g. 'In Progress')");
process.exit(1);
}
const client = getClient();
const results = await client.searchIssues(query, { first: numResults });
for (const issue of results.nodes) {
const state = await issue.state;
const team = await issue.team;
const assignee = await issue.assignee;
if (teamFilter && team?.key?.toLowerCase() !== teamFilter.toLowerCase()) continue;
if (stateFilter && state?.name?.toLowerCase() !== stateFilter.toLowerCase()) continue;
console.log(`--- ${issue.identifier} ---`);
console.log(`Title: ${issue.title}`);
console.log(`State: ${state?.name || "Unknown"}`);
console.log(`Priority: ${issue.priorityLabel}`);
console.log(`Team: ${team?.key || "?"} | Assignee: ${assignee?.name || "Unassigned"}`);
console.log(`Created: ${formatDate(issue.createdAt)} | Updated: ${formatDate(issue.updatedAt)}`);
if (issue.description) console.log(`Description: ${truncate(issue.description, 200)}`);
console.log(`URL: ${issue.url}`);
console.log("");
}
if (results.nodes.length === 0) {
console.log("No results found.");
}
+15
View File
@@ -0,0 +1,15 @@
#!/usr/bin/env node
// List all Linear teams
// Usage: linear-teams.js
import { getClient } from "./lib.js";
const client = getClient();
const teams = await client.teams();
console.log("Teams:");
for (const team of teams.nodes) {
console.log(` ${team.key.padEnd(8)} ${team.name}`);
}
+93
View File
@@ -0,0 +1,93 @@
#!/usr/bin/env node
// Update an existing Linear issue
// Usage: linear-update.js <identifier> [--title <title>] [--state <name>] [--priority <0-4>] [--assignee <name|me>] [--description <text>]
import { getClient } from "./lib.js";
const args = process.argv.slice(2);
const identifier = args[0];
if (!identifier || identifier.startsWith("--")) {
console.log("Usage: linear-update.js <identifier> [options]");
console.log("\nOptions:");
console.log(" --title <title> New title");
console.log(" --state <name> New state (e.g. 'In Progress')");
console.log(" --priority <0-4> New priority");
console.log(" --assignee <name|me> New assignee");
console.log(" --description <text> New description");
process.exit(1);
}
args.shift();
function extractArg(flag) {
const idx = args.indexOf(flag);
if (idx !== -1 && args[idx + 1]) {
const val = args[idx + 1];
args.splice(idx, 2);
return val;
}
return null;
}
const title = extractArg("--title");
const stateName = extractArg("--state");
const priority = extractArg("--priority");
const assigneeName = extractArg("--assignee");
const description = extractArg("--description");
const client = getClient();
// Find the issue
const results = await client.searchIssues(identifier, { first: 1 });
const issue = results.nodes[0];
if (!issue) {
console.error(`Issue '${identifier}' not found.`);
process.exit(1);
}
const input = {};
if (title) input.title = title;
if (description) input.description = description;
if (priority) input.priority = parseInt(priority, 10);
// Resolve state
if (stateName) {
const team = await issue.team;
const states = await team.states();
const state = states.nodes.find(
(s) => s.name.toLowerCase() === stateName.toLowerCase()
);
if (state) input.stateId = state.id;
else {
console.error(`State '${stateName}' not found. Available states:`);
for (const s of states.nodes) console.error(` - ${s.name}`);
process.exit(1);
}
}
// Resolve assignee
if (assigneeName) {
if (assigneeName.toLowerCase() === "me") {
const me = await client.viewer;
input.assigneeId = me.id;
} else {
const users = await client.users({ filter: { name: { containsIgnoreCase: assigneeName } } });
if (users.nodes[0]) input.assigneeId = users.nodes[0].id;
else {
console.error(`User '${assigneeName}' not found.`);
process.exit(1);
}
}
}
if (Object.keys(input).length === 0) {
console.log("No updates specified. Use --title, --state, --priority, --assignee, or --description.");
process.exit(1);
}
await client.updateIssue(issue.id, input);
console.log(`Updated ${issue.identifier}: ${issue.title}`);
console.log(`URL: ${issue.url}`);
+107
View File
@@ -0,0 +1,107 @@
{
"name": "linear-skill",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "linear-skill",
"version": "1.0.0",
"license": "MIT",
"dependencies": {
"@linear/sdk": "^37.0.0"
}
},
"node_modules/@graphql-typed-document-node/core": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/@graphql-typed-document-node/core/-/core-3.2.0.tgz",
"integrity": "sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ==",
"license": "MIT",
"peerDependencies": {
"graphql": "^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0"
}
},
"node_modules/@linear/sdk": {
"version": "37.0.0",
"resolved": "https://registry.npmjs.org/@linear/sdk/-/sdk-37.0.0.tgz",
"integrity": "sha512-EAZCXtV414Nwtvrwn7Ucu3E8BbYYKsc3HqZCGf1mHUE7FhZGtfISu295DOVv89WhhXlp2N344EMg3K0nnhLxtA==",
"license": "MIT",
"dependencies": {
"@graphql-typed-document-node/core": "^3.1.0",
"graphql": "^15.4.0",
"isomorphic-unfetch": "^3.1.0"
},
"engines": {
"node": ">=12.x",
"yarn": "1.x"
}
},
"node_modules/graphql": {
"version": "15.10.1",
"resolved": "https://registry.npmjs.org/graphql/-/graphql-15.10.1.tgz",
"integrity": "sha512-BL/Xd/T9baO6NFzoMpiMD7YUZ62R6viR5tp/MULVEnbYJXZA//kRNW7J0j1w/wXArgL0sCxhDfK5dczSKn3+cg==",
"license": "MIT",
"engines": {
"node": ">= 10.x"
}
},
"node_modules/isomorphic-unfetch": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/isomorphic-unfetch/-/isomorphic-unfetch-3.1.0.tgz",
"integrity": "sha512-geDJjpoZ8N0kWexiwkX8F9NkTsXhetLPVbZFQ+JTW239QNOwvB0gniuR1Wc6f0AMTn7/mFGyXvHTifrCp/GH8Q==",
"license": "MIT",
"dependencies": {
"node-fetch": "^2.6.1",
"unfetch": "^4.2.0"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
"license": "MIT"
},
"node_modules/unfetch": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/unfetch/-/unfetch-4.2.0.tgz",
"integrity": "sha512-F9p7yYCn6cIW9El1zi0HI6vqpeIvBsr3dSuRO6Xuppb1u5rXpCPmMvLSyECLhybr9isec8Ohl0hPekMVrEinDA==",
"license": "MIT"
},
"node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
"license": "BSD-2-Clause"
},
"node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"license": "MIT",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
}
}
}
+10
View File
@@ -0,0 +1,10 @@
{
"name": "linear-skill",
"version": "1.0.0",
"type": "module",
"description": "Linear API skill for pi - manage issues, projects, and teams",
"license": "MIT",
"dependencies": {
"@linear/sdk": "^37.0.0"
}
}
+102
View File
@@ -0,0 +1,102 @@
#!/usr/bin/env bash
set -euo pipefail
# Cleans up zellij sessions that are inactive:
# - sessions marked EXITED (resurrectable metadata)
# - running sessions with 0 attached clients
#
# Usage:
# cleanup-zellij-inactive.sh # delete inactive sessions
# cleanup-zellij-inactive.sh --dry-run # show what would be deleted
DRY_RUN=0
case "${1-}" in
"" ) ;;
-n|--dry-run) DRY_RUN=1 ;;
-h|--help)
cat <<'EOF'
cleanup-zellij-inactive.sh
Delete zellij sessions that are inactive:
- EXITED sessions are deleted
- running sessions with 0 attached clients are killed+deleted
Options:
-n, --dry-run Show what would be deleted
-h, --help Show this help
EOF
exit 0
;;
*)
echo "Unknown option: $1" >&2
echo "Use --help for usage" >&2
exit 1
;;
esac
if ! command -v zellij >/dev/null 2>&1; then
echo "zellij not found in PATH" >&2
exit 1
fi
mapfile -t session_lines < <(zellij list-sessions --no-formatting 2>/dev/null || true)
if [ "${#session_lines[@]}" -eq 0 ]; then
echo "No zellij sessions found"
exit 0
fi
deleted=0
failed=0
kept=0
for line in "${session_lines[@]}"; do
[ -z "$line" ] && continue
name="${line%% *}"
is_exited=0
if [[ "$line" == *"EXITED"* ]]; then
is_exited=1
fi
should_delete=0
if [ "$is_exited" -eq 1 ]; then
should_delete=1
else
# Running session: check attached clients
clients_out="$(zellij --session "$name" action list-clients 2>/dev/null || true)"
client_count="$(printf '%s\n' "$clients_out" | tail -n +2 | sed '/^\s*$/d' | wc -l | tr -d ' ')"
if [ "$client_count" -eq 0 ]; then
should_delete=1
fi
fi
if [ "$should_delete" -eq 1 ]; then
if [ "$DRY_RUN" -eq 1 ]; then
echo "[dry-run] delete: $name"
deleted=$((deleted + 1))
else
# --force also kills running sessions before deleting
if zellij delete-session --force "$name" >/dev/null 2>&1; then
echo "deleted: $name"
deleted=$((deleted + 1))
else
echo "failed: $name" >&2
failed=$((failed + 1))
fi
fi
else
kept=$((kept + 1))
fi
done
echo
if [ "$DRY_RUN" -eq 1 ]; then
echo "Would delete: $deleted"
else
echo "Deleted: $deleted"
echo "Failed: $failed"
fi
echo "Kept: $kept"
+52
View File
@@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -euo pipefail
# Replace the current zellij tab by opening a layout in a new tab
# and closing the original tab.
#
# Usage:
# zellij-replace-tab-layout.sh # uses "dev"
# zellij-replace-tab-layout.sh dev
# zellij-replace-tab-layout.sh my-layout
layout="${1:-dev}"
case "${layout}" in
-h|--help)
cat <<'EOF'
zellij-replace-tab-layout.sh
Replace the current zellij tab with a new tab created from a layout.
This avoids `zellij action override-layout` glitches.
Usage:
zellij-replace-tab-layout.sh [layout]
Examples:
zellij-replace-tab-layout.sh
zellij-replace-tab-layout.sh dev
zellij-replace-tab-layout.sh dotfiles
EOF
exit 0
;;
esac
if ! command -v zellij >/dev/null 2>&1; then
echo "zellij not found in PATH" >&2
exit 1
fi
if [ -z "${ZELLIJ:-}" ]; then
echo "Not inside a zellij session (ZELLIJ is not set)" >&2
exit 1
fi
current_tab_id="$(zellij action current-tab-info | awk '/^id:/ { print $2 }')"
if [ -z "$current_tab_id" ]; then
echo "Failed to detect current tab id" >&2
exit 1
fi
zellij action new-tab --layout "$layout" >/dev/null
zellij action close-tab --tab-id "$current_tab_id"
+1
View File
@@ -52,6 +52,7 @@ Host mac mac-attio
LocalForward 8082 localhost:8082
LocalForward 54043 localhost:54043
IdentitiesOnly yes
SetEnv TERM=xterm-256color
Host linux-pc 192.168.1.80
HostName 192.168.1.80
+1 -1
View File
@@ -328,7 +328,7 @@ default_layout "compact"
// The folder in which Zellij will look for themes
// (Requires restart)
//
// theme_dir "/tmp"
// theme_dir "/home/thomasgl/.config/zellij/themes"
// Toggle enabling the mouse mode.
// On certain configurations, or terminals this could
+4 -10
View File
@@ -6,23 +6,17 @@ layout {
}
}
tab name="dotfiles" cwd="/home/thomasgl/.dotfiles" {
pane split_direction="vertical" {
tab name="nvim + jjui" {
pane stacked=true {
pane
pane command="nvim"
}
pane size="40%" command="pi"
pane command="jjui"
}
}
tab name="NixOS" cwd="/home/thomasgl/etc/nixos" {
pane split_direction="vertical" {
tab name="pi + shell" {
pane stacked=true {
pane command="pi"
pane
pane command="nvim"
}
pane size="40%" command="pi"
}
}
}