Compare commits

23 Commits

Author SHA1 Message Date
thomas 62337a078c stop squashing 2026-04-21 09:16:30 +01:00
thomas 8e810418a5 remove notify 2026-04-20 16:09:45 +01:00
thomas a2d283a5c5 a bunch of changes 2026-04-20 13:55:11 +01:00
thomas ca0708a8ee fuck squashing 2026-04-17 12:12:36 +01:00
thomas 96060c899d remvoe cursor 2026-04-16 17:09:18 +01:00
thomas dc0e75eb46 stuff 2026-04-16 16:30:12 +01:00
thomas f3d9d42745 fix sudo 2026-04-16 16:00:09 +01:00
thomas 191cfbf182 resurrect claude 2026-04-16 15:49:19 +01:00
thomas 0bfdbd350e cursor stuff 2026-04-16 15:34:00 +01:00
thomas 9a7669af28 fix 2026-04-16 11:58:48 +01:00
thomas 534ec8b99f cursor extension 2026-04-16 11:55:57 +01:00
thomas c004356b5a zellij cleanup + small files 2026-04-16 09:48:44 +01:00
thomas 8fa80f58ea agents md to skip validations 2026-04-16 09:25:54 +01:00
thomas b42a9ecffa jj workspaces skill 2026-04-16 09:12:51 +01:00
thomas 966e40e71b anthropic fix 2026-04-13 15:52:17 +01:00
thomas 4af7031922 fix screenshots 2026-04-13 15:49:09 +01:00
thomas 6003f41a12 notify even when peon muted 2026-04-09 10:09:45 +01:00
thomas 587c54060b fix zellij 2026-04-08 10:59:18 +01:00
thomas cad0540600 fix biome 2026-04-07 17:29:51 +01:00
thomas fd2307eb0c fix autocomplete 2026-04-07 17:27:37 +01:00
thomas 6d525d0971 fix git blame lol 2026-04-07 16:48:17 +01:00
thomas 51073c07a8 update pi 2026-04-01 15:29:36 +01:00
thomas e4b6fbabc6 slow tool settings 2026-03-31 14:16:18 +01:00
31 changed files with 1562 additions and 697 deletions
+14
View File
@@ -194,6 +194,17 @@
"centeringMode": "index",
"clockDateFormat": "d MMM yyyy",
"lockDateFormat": "",
"greeterRememberLastSession": true,
"greeterRememberLastUser": true,
"greeterEnableFprint": false,
"greeterEnableU2f": false,
"greeterWallpaperPath": "",
"greeterUse24HourClock": true,
"greeterShowSeconds": false,
"greeterPadHours12Hour": false,
"greeterLockDateFormat": "",
"greeterFontFamily": "",
"greeterWallpaperFillMode": "",
"mediaSize": 1,
"appLauncherViewMode": "list",
"spotlightModalViewMode": "list",
@@ -314,6 +325,7 @@
"matugenTemplateKcolorscheme": true,
"matugenTemplateVscode": true,
"matugenTemplateEmacs": true,
"matugenTemplateZed": true,
"showDock": false,
"dockAutoHide": false,
"dockSmartAutoHide": false,
@@ -355,6 +367,8 @@
"lockAtStartup": false,
"enableFprint": false,
"maxFprintTries": 3,
"enableU2f": false,
"u2fMode": "or",
"lockScreenActiveMonitor": "all",
"lockScreenInactiveColor": "#000000",
"lockScreenNotificationMode": 0,
+1 -1
View File
@@ -1,3 +1,3 @@
if test (uname) = Darwin
fnm env --use-on-cd --shell fish | source
fnm env --use-on-cd --log-level=quiet --shell fish | source
end
+12 -2
View File
@@ -102,9 +102,19 @@ status is-interactive; and begin
end
# Add user local bin to PATH
# PATH ordering on Linux: keep privileged wrapper binaries first (sudo, etc.)
if test (uname) = Linux
fish_add_path -m /run/wrappers/bin
fish_add_path -a -m /run/current-system/sw/bin
end
# Add user local bin to PATH, but keep it after system paths on Linux
if test -d "$HOME/.local/bin"
fish_add_path "$HOME/.local/bin"
if test (uname) = Linux
fish_add_path -a -m "$HOME/.local/bin"
else
fish_add_path "$HOME/.local/bin"
end
end
# pnpm
+3
View File
@@ -4,3 +4,6 @@ email = "thomasgl@pm.me"
[git]
write-change-id-header = true
[snapshot]
auto-update-stale = true
+19 -7
View File
@@ -1,8 +1,11 @@
#!/usr/bin/env bash
set -u
screenshot_dir="$HOME/Pictures/Screenshots"
remote_target="mac-attio:~/screenshot.png"
timeout=3 # seconds
file_timeout=8 # seconds to wait for screenshot file to appear
upload_timeout=10 # seconds
notify() {
DBUS_SESSION_BUS_ADDRESS="unix:path=/run/user/$(id -u)/bus" \
@@ -15,12 +18,13 @@ shopt -s nullglob
existing_files=("$screenshot_dir"/*.png)
existing_count=${#existing_files[@]}
# Take screenshot
niri msg action screenshot
# Take screenshot (no timeout here so interactive capture isn't canceled)
niri msg action screenshot >/dev/null 2>&1
# Wait for new file (timeout in 0.1s intervals)
deadline=$((timeout * 10))
deadline=$((file_timeout * 10))
count=0
files=("$screenshot_dir"/*.png)
while (( count < deadline )); do
files=("$screenshot_dir"/*.png)
@@ -37,12 +41,20 @@ if (( ${#files[@]} <= existing_count )); then
fi
# Get the new file (most recent)
latest_file=$(ls -1t "${files[@]}" | head -n 1)
latest_file=$(ls -1t -- "${files[@]}" | head -n 1)
# Small delay to ensure file is fully written
sleep 0.1
# Upload
if scp -q "$latest_file" "$remote_target"; then
# Upload with strict SSH options so it never blocks waiting for prompts
if timeout "${upload_timeout}s" scp -q \
-o BatchMode=yes \
-o ConnectTimeout=5 \
-o ConnectionAttempts=1 \
-o ServerAliveInterval=2 \
-o ServerAliveCountMax=1 \
-- "$latest_file" "$remote_target"; then
notify "Screenshot" "Uploaded to Mac"
else
notify "Screenshot" "Upload to Mac failed"
fi
+1 -1
View File
@@ -1,5 +1,5 @@
return {
cmd = { "biome", "lsp-proxy" },
cmd = { "npx", "biome", "lsp-proxy" },
filetypes = {
"javascript",
"javascriptreact",
+25 -1
View File
@@ -32,6 +32,23 @@ return {
},
"folke/lazydev.nvim",
},
config = function(_, opts)
-- Monkey-patch blink's text_edits.get_from_item to clamp textEdit ranges
-- that extend past the cursor. Workaround for tsgo sending bad ranges
-- that eat text (e.g. in JSX string attributes like className="...").
local text_edits = require("blink.cmp.lib.text_edits")
local original_get_from_item = text_edits.get_from_item
text_edits.get_from_item = function(item)
local text_edit = original_get_from_item(item)
local cursor_col = require("blink.cmp.completion.trigger.context").get_cursor()[2]
if text_edit.range and text_edit.range["end"].character > cursor_col then
text_edit.range["end"].character = cursor_col
end
return text_edit
end
require("blink.cmp").setup(opts)
end,
--- @module 'blink.cmp'
--- @type blink.cmp.Config
opts = {
@@ -76,9 +93,16 @@ return {
},
sources = {
default = { "lsp", "path", "snippets", "lazydev" },
default = { "lsp", "path", "snippets", "lazydev", "minuet" },
providers = {
lazydev = { module = "lazydev.integrations.blink", score_offset = 100 },
minuet = {
name = "minuet",
module = "minuet.blink",
async = true,
timeout_ms = 3000,
score_offset = 50,
},
},
},
+1
View File
@@ -1,5 +1,6 @@
return {
"HotThoughts/jjui.nvim",
enabled = false,
cmd = {
"JJUI",
"JJUICurrentFile",
+2 -2
View File
@@ -6,12 +6,12 @@ return {
-- Allows extra capabilities provided by blink.cmp
{
"saghen/blink.cmp",
config = function(_, opts)
require("blink.cmp").setup(opts)
opts = function(_, opts)
-- Add blink.cmp capabilities to the default LSP client capabilities
vim.lsp.config("*", {
capabilities = require("blink.cmp").get_lsp_capabilities(),
})
return opts
end,
},
+9 -27
View File
@@ -28,34 +28,16 @@ return {
end,
},
{ "nvim-lua/plenary.nvim" },
-- optional, if you are using virtual-text frontend, blink is not required.
-- Minuet blink.cmp integration (merged into main blink.lua spec via opts)
{
"Saghen/blink.cmp",
config = function()
require("blink-cmp").setup({
keymap = {
-- Manually invoke minuet completion.
["<A-y>"] = require("minuet").make_blink_map(),
},
sources = {
-- Enable minuet for autocomplete
default = { "lsp", "path", "buffer", "snippets", "minuet" },
-- For manual completion only, remove 'minuet' from default
providers = {
minuet = {
name = "minuet",
module = "minuet.blink",
async = true,
-- Should match minuet.config.request_timeout * 1000,
-- since minuet.config.request_timeout is in seconds
timeout_ms = 3000,
score_offset = 50, -- Gives minuet higher priority among suggestions
},
},
},
-- Recommended to avoid unnecessary request
completion = { trigger = { prefetch_on_insert = false } },
})
"saghen/blink.cmp",
opts = function(_, opts)
opts.keymap = opts.keymap or {}
opts.keymap["<A-y>"] = require("minuet").make_blink_map()
opts.completion = opts.completion or {}
opts.completion.trigger = opts.completion.trigger or {}
opts.completion.trigger.prefetch_on_insert = false
return opts
end,
},
}
+8 -25
View File
@@ -218,12 +218,12 @@ return {
},
-- git
{
"<leader>gcb",
"<leader>jc",
function()
local cwd = vim.fn.getcwd()
-- Helper to run git commands and capture both stdout and stderr
local function git_cmd(cmd)
-- Helper to run commands and capture both stdout and stderr
local function run_cmd(cmd)
local full_cmd = "cd " .. vim.fn.shellescape(cwd) .. " && " .. cmd .. " 2>&1"
local handle = io.popen(full_cmd)
local result = handle and handle:read("*a") or ""
@@ -234,7 +234,7 @@ return {
end
-- Check if in a git repo
local git_dir = git_cmd("git rev-parse --git-dir")
local git_dir = run_cmd("git rev-parse --git-dir")
if git_dir == "" or git_dir:match("^fatal") then
vim.notify("Not in a git repository", vim.log.levels.WARN)
return
@@ -242,7 +242,7 @@ return {
-- Get the default branch
local function branch_exists(branch)
local result = git_cmd("git rev-parse --verify refs/remotes/origin/" .. branch)
local result = run_cmd("git rev-parse --verify refs/remotes/origin/" .. branch)
-- If branch exists, rev-parse returns a hash; if not, it returns fatal error
return not result:match("^fatal")
end
@@ -259,19 +259,9 @@ return {
return
end
-- Get current branch
local current_branch = git_cmd("git branch --show-current")
if current_branch == "" then
current_branch = "HEAD"
end
local compare_target = "origin/" .. default_branch
-- Get files that differ from origin/main (includes committed + uncommitted changes)
local result = git_cmd("git diff --name-only " .. compare_target)
-- Also get untracked files
local untracked = git_cmd("git ls-files --others --exclude-standard")
local result = run_cmd("jj diff --from " .. default_branch .. "@origin --to @ --summary | awk '{print $2}'")
-- Combine results
local all_files = {}
@@ -284,20 +274,13 @@ return {
end
end
for line in untracked:gmatch("[^\r\n]+") do
if line ~= "" and not seen[line] then
seen[line] = true
table.insert(all_files, { text = line .. " [untracked]", file = line })
end
end
if #all_files == 0 then
vim.notify("No modified files (vs " .. compare_target .. ")", vim.log.levels.INFO)
vim.notify("No modified files", vim.log.levels.INFO)
return
end
Snacks.picker({
title = "Modified Files (vs " .. compare_target .. ")",
title = "Modified Files",
items = all_files,
layout = { preset = "default" },
confirm = function(picker, item)
+1 -1
View File
@@ -1,7 +1,7 @@
{
"activePack": "glados",
"volume": 1,
"muted": false,
"muted": true,
"enabledCategories": {
"session.start": true,
"task.acknowledge": true,
+7 -3
View File
@@ -1,11 +1,15 @@
{
"lastChangelogVersion": "0.63.1",
"lastChangelogVersion": "0.67.3",
"defaultProvider": "openai-codex",
"defaultModel": "gpt-5.3-codex",
"defaultThinkingLevel": "high",
"defaultThinkingLevel": "medium",
"theme": "matugen",
"lsp": {
"hookMode": "edit_write"
},
"hideThinkingBlock": false
"hideThinkingBlock": false,
"slowtool": {
"timeoutSeconds": 300,
"enabled": true
}
}
+7 -3
View File
@@ -1,11 +1,15 @@
{
"lastChangelogVersion": "0.57.1",
"lastChangelogVersion": "0.67.68",
"defaultProvider": "anthropic",
"defaultModel": "claude-opus-4-6",
"defaultModel": "claude-opus-4-7",
"defaultThinkingLevel": "medium",
"theme": "matugen",
"lsp": {
"hookMode": "edit_write"
},
"hideThinkingBlock": false
"hideThinkingBlock": true,
"slowtool": {
"timeoutSeconds": 300,
"enabled": true
}
}
+11 -1
View File
@@ -1,3 +1,13 @@
# Tool usage
FUCKING ALWAYS use timeout on tool usage because sometimes you're stupid, and hang on things because you assume its non interactive. And by that I don't mean appending `timeout` to bash or something, but you have a way to add a timeout to tool calling somehow. I don't know the inner workings of the harness.
# Validations
Sometimes some repositories (stupidly) ask you to run validations after changes or some shit. Thing is, you're smart. Your edit tools already contain formatting and LSP hooks. So, you may ask the user if they want you to run said "required" validations, but they're not really required.
---
# Screenshots
When the user provides a screenshot path (e.g., `/tmp/pi-clipboard-xxx.png`), **ALWAYS** use the `read` tool to read the image file. Do NOT assume you can see the screenshot contents without reading it first.
@@ -8,7 +18,7 @@ When the user provides a screenshot path (e.g., `/tmp/pi-clipboard-xxx.png`), **
**Prefer jj (Jujutsu) over git.** If a project has a colocated jj repo (`.jj` directory), use `jj` commands for all version control operations — rebasing, branching, log, etc. Only fall back to git when jj doesn't support something or the project isn't set up for it.
After pushing changes, always run `jj new` to start a fresh working copy commit.
After pushing changes, always run `jj new` to start a fresh working copy commit. Don't squash unnecessarily! seriously don't squash all the time.
# Git commits and PRs
@@ -0,0 +1,14 @@
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
const TARGET = "about pi itself, its SDK, extensions, themes, skills, or TUI";
const REPLACEMENT = "about pi, its SDK, extensions, themes, skills, or TUI";
export default function(pi: ExtensionAPI) {
pi.on("before_agent_start", (event, ctx) => {
if (ctx.model?.provider !== "anthropic") return;
if (!event.systemPrompt.includes(TARGET)) return;
return {
systemPrompt: event.systemPrompt.replace(TARGET, REPLACEMENT),
};
});
}
@@ -1 +0,0 @@
node_modules/
@@ -1,604 +0,0 @@
/**
* Custom Provider Example
*
* Demonstrates registering a custom provider with:
* - Custom API identifier ("custom-anthropic-api")
* - Custom streamSimple implementation
* - OAuth support for /login
* - API key support via environment variable
* - Two model definitions
*
* Usage:
* # First install dependencies
* cd packages/coding-agent/examples/extensions/custom-provider && npm install
*
* # With OAuth (run /login custom-anthropic first)
* pi -e ./packages/coding-agent/examples/extensions/custom-provider
*
* # With API key
* CUSTOM_ANTHROPIC_API_KEY=sk-ant-... pi -e ./packages/coding-agent/examples/extensions/custom-provider
*
* Then use /model to select custom-anthropic/claude-sonnet-4-5
*/
import Anthropic from "@anthropic-ai/sdk";
import type { ContentBlockParam, MessageCreateParamsStreaming } from "@anthropic-ai/sdk/resources/messages.js";
import {
type Api,
type AssistantMessage,
type AssistantMessageEventStream,
type Context,
calculateCost,
createAssistantMessageEventStream,
type ImageContent,
type Message,
type Model,
type OAuthCredentials,
type OAuthLoginCallbacks,
type SimpleStreamOptions,
type StopReason,
type TextContent,
type ThinkingContent,
type Tool,
type ToolCall,
type ToolResultMessage,
} from "@mariozechner/pi-ai";
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
// =============================================================================
// OAuth Implementation (copied from packages/ai/src/utils/oauth/anthropic.ts)
// =============================================================================
const decode = (s: string) => atob(s);
const CLIENT_ID = decode("OWQxYzI1MGEtZTYxYi00NGQ5LTg4ZWQtNTk0NGQxOTYyZjVl");
const AUTHORIZE_URL = "https://claude.ai/oauth/authorize";
const TOKEN_URL = "https://console.anthropic.com/v1/oauth/token";
const REDIRECT_URI = "https://console.anthropic.com/oauth/code/callback";
const SCOPES = "org:create_api_key user:profile user:inference";
async function generatePKCE(): Promise<{ verifier: string; challenge: string }> {
const array = new Uint8Array(32);
crypto.getRandomValues(array);
const verifier = btoa(String.fromCharCode(...array))
.replace(/\+/g, "-")
.replace(/\//g, "_")
.replace(/=+$/, "");
const encoder = new TextEncoder();
const data = encoder.encode(verifier);
const hash = await crypto.subtle.digest("SHA-256", data);
const challenge = btoa(String.fromCharCode(...new Uint8Array(hash)))
.replace(/\+/g, "-")
.replace(/\//g, "_")
.replace(/=+$/, "");
return { verifier, challenge };
}
async function loginAnthropic(callbacks: OAuthLoginCallbacks): Promise<OAuthCredentials> {
const { verifier, challenge } = await generatePKCE();
const authParams = new URLSearchParams({
code: "true",
client_id: CLIENT_ID,
response_type: "code",
redirect_uri: REDIRECT_URI,
scope: SCOPES,
code_challenge: challenge,
code_challenge_method: "S256",
state: verifier,
});
callbacks.onAuth({ url: `${AUTHORIZE_URL}?${authParams.toString()}` });
const authCode = await callbacks.onPrompt({ message: "Paste the authorization code:" });
const [code, state] = authCode.split("#");
const tokenResponse = await fetch(TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
grant_type: "authorization_code",
client_id: CLIENT_ID,
code,
state,
redirect_uri: REDIRECT_URI,
code_verifier: verifier,
}),
});
if (!tokenResponse.ok) {
throw new Error(`Token exchange failed: ${await tokenResponse.text()}`);
}
const data = (await tokenResponse.json()) as {
access_token: string;
refresh_token: string;
expires_in: number;
};
return {
refresh: data.refresh_token,
access: data.access_token,
expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000,
};
}
async function refreshAnthropicToken(credentials: OAuthCredentials): Promise<OAuthCredentials> {
const response = await fetch(TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
grant_type: "refresh_token",
client_id: CLIENT_ID,
refresh_token: credentials.refresh,
}),
});
if (!response.ok) {
throw new Error(`Token refresh failed: ${await response.text()}`);
}
const data = (await response.json()) as {
access_token: string;
refresh_token: string;
expires_in: number;
};
return {
refresh: data.refresh_token,
access: data.access_token,
expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000,
};
}
// =============================================================================
// Streaming Implementation (simplified from packages/ai/src/providers/anthropic.ts)
// =============================================================================
// Claude Code tool names for OAuth stealth mode
const claudeCodeTools = [
"Read",
"Write",
"Edit",
"Bash",
"Grep",
"Glob",
"AskUserQuestion",
"TodoWrite",
"WebFetch",
"WebSearch",
];
const ccToolLookup = new Map(claudeCodeTools.map((t) => [t.toLowerCase(), t]));
const toClaudeCodeName = (name: string) => ccToolLookup.get(name.toLowerCase()) ?? name;
const fromClaudeCodeName = (name: string, tools?: Tool[]) => {
const lowerName = name.toLowerCase();
const matched = tools?.find((t) => t.name.toLowerCase() === lowerName);
return matched?.name ?? name;
};
function isOAuthToken(apiKey: string): boolean {
return apiKey.includes("sk-ant-oat");
}
function sanitizeSurrogates(text: string): string {
return text.replace(/[\uD800-\uDFFF]/g, "\uFFFD");
}
function convertContentBlocks(
content: (TextContent | ImageContent)[],
): string | Array<{ type: "text"; text: string } | { type: "image"; source: any }> {
const hasImages = content.some((c) => c.type === "image");
if (!hasImages) {
return sanitizeSurrogates(content.map((c) => (c as TextContent).text).join("\n"));
}
const blocks = content.map((block) => {
if (block.type === "text") {
return { type: "text" as const, text: sanitizeSurrogates(block.text) };
}
return {
type: "image" as const,
source: {
type: "base64" as const,
media_type: block.mimeType,
data: block.data,
},
};
});
if (!blocks.some((b) => b.type === "text")) {
blocks.unshift({ type: "text" as const, text: "(see attached image)" });
}
return blocks;
}
function convertMessages(messages: Message[], isOAuth: boolean, _tools?: Tool[]): any[] {
const params: any[] = [];
for (let i = 0; i < messages.length; i++) {
const msg = messages[i];
if (msg.role === "user") {
if (typeof msg.content === "string") {
if (msg.content.trim()) {
params.push({ role: "user", content: sanitizeSurrogates(msg.content) });
}
} else {
const blocks: ContentBlockParam[] = msg.content.map((item) =>
item.type === "text"
? { type: "text" as const, text: sanitizeSurrogates(item.text) }
: {
type: "image" as const,
source: { type: "base64" as const, media_type: item.mimeType as any, data: item.data },
},
);
if (blocks.length > 0) {
params.push({ role: "user", content: blocks });
}
}
} else if (msg.role === "assistant") {
const blocks: ContentBlockParam[] = [];
for (const block of msg.content) {
if (block.type === "text" && block.text.trim()) {
blocks.push({ type: "text", text: sanitizeSurrogates(block.text) });
} else if (block.type === "thinking" && block.thinking.trim()) {
if ((block as ThinkingContent).thinkingSignature) {
blocks.push({
type: "thinking" as any,
thinking: sanitizeSurrogates(block.thinking),
signature: (block as ThinkingContent).thinkingSignature!,
});
} else {
blocks.push({ type: "text", text: sanitizeSurrogates(block.thinking) });
}
} else if (block.type === "toolCall") {
blocks.push({
type: "tool_use",
id: block.id,
name: isOAuth ? toClaudeCodeName(block.name) : block.name,
input: block.arguments,
});
}
}
if (blocks.length > 0) {
params.push({ role: "assistant", content: blocks });
}
} else if (msg.role === "toolResult") {
const toolResults: any[] = [];
toolResults.push({
type: "tool_result",
tool_use_id: msg.toolCallId,
content: convertContentBlocks(msg.content),
is_error: msg.isError,
});
let j = i + 1;
while (j < messages.length && messages[j].role === "toolResult") {
const nextMsg = messages[j] as ToolResultMessage;
toolResults.push({
type: "tool_result",
tool_use_id: nextMsg.toolCallId,
content: convertContentBlocks(nextMsg.content),
is_error: nextMsg.isError,
});
j++;
}
i = j - 1;
params.push({ role: "user", content: toolResults });
}
}
// Add cache control to last user message
if (params.length > 0) {
const last = params[params.length - 1];
if (last.role === "user" && Array.isArray(last.content)) {
const lastBlock = last.content[last.content.length - 1];
if (lastBlock) {
lastBlock.cache_control = { type: "ephemeral" };
}
}
}
return params;
}
function convertTools(tools: Tool[], isOAuth: boolean): any[] {
return tools.map((tool) => ({
name: isOAuth ? toClaudeCodeName(tool.name) : tool.name,
description: tool.description,
input_schema: {
type: "object",
properties: (tool.parameters as any).properties || {},
required: (tool.parameters as any).required || [],
},
}));
}
function mapStopReason(reason: string): StopReason {
switch (reason) {
case "end_turn":
case "pause_turn":
case "stop_sequence":
return "stop";
case "max_tokens":
return "length";
case "tool_use":
return "toolUse";
default:
return "error";
}
}
function streamCustomAnthropic(
model: Model<Api>,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream {
const stream = createAssistantMessageEventStream();
(async () => {
const output: AssistantMessage = {
role: "assistant",
content: [],
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
try {
const apiKey = options?.apiKey ?? "";
const isOAuth = isOAuthToken(apiKey);
// Configure client based on auth type
const betaFeatures = ["fine-grained-tool-streaming-2025-05-14", "interleaved-thinking-2025-05-14"];
const clientOptions: any = {
baseURL: model.baseUrl,
dangerouslyAllowBrowser: true,
};
if (isOAuth) {
clientOptions.apiKey = null;
clientOptions.authToken = apiKey;
clientOptions.defaultHeaders = {
accept: "application/json",
"anthropic-dangerous-direct-browser-access": "true",
"anthropic-beta": `claude-code-20250219,oauth-2025-04-20,${betaFeatures.join(",")}`,
"user-agent": "claude-cli/2.1.2 (external, cli)",
"x-app": "cli",
};
} else {
clientOptions.apiKey = apiKey;
clientOptions.defaultHeaders = {
accept: "application/json",
"anthropic-dangerous-direct-browser-access": "true",
"anthropic-beta": betaFeatures.join(","),
};
}
const client = new Anthropic(clientOptions);
// Build request params
const params: MessageCreateParamsStreaming = {
model: model.id,
messages: convertMessages(context.messages, isOAuth, context.tools),
max_tokens: options?.maxTokens || Math.floor(model.maxTokens / 3),
stream: true,
};
// System prompt with Claude Code identity for OAuth
if (isOAuth) {
params.system = [
{
type: "text",
text: "You are Claude Code, Anthropic's official CLI for Claude.",
cache_control: { type: "ephemeral" },
},
];
if (context.systemPrompt) {
params.system.push({
type: "text",
text: sanitizeSurrogates(context.systemPrompt),
cache_control: { type: "ephemeral" },
});
}
} else if (context.systemPrompt) {
params.system = [
{
type: "text",
text: sanitizeSurrogates(context.systemPrompt),
cache_control: { type: "ephemeral" },
},
];
}
if (context.tools) {
params.tools = convertTools(context.tools, isOAuth);
}
// Handle thinking/reasoning
if (options?.reasoning && model.reasoning) {
const defaultBudgets: Record<string, number> = {
minimal: 1024,
low: 4096,
medium: 10240,
high: 20480,
};
const customBudget = options.thinkingBudgets?.[options.reasoning as keyof typeof options.thinkingBudgets];
params.thinking = {
type: "enabled",
budget_tokens: customBudget ?? defaultBudgets[options.reasoning] ?? 10240,
};
}
const anthropicStream = client.messages.stream({ ...params }, { signal: options?.signal });
stream.push({ type: "start", partial: output });
type Block = (ThinkingContent | TextContent | (ToolCall & { partialJson: string })) & { index: number };
const blocks = output.content as Block[];
for await (const event of anthropicStream) {
if (event.type === "message_start") {
output.usage.input = event.message.usage.input_tokens || 0;
output.usage.output = event.message.usage.output_tokens || 0;
output.usage.cacheRead = (event.message.usage as any).cache_read_input_tokens || 0;
output.usage.cacheWrite = (event.message.usage as any).cache_creation_input_tokens || 0;
output.usage.totalTokens =
output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
calculateCost(model, output.usage);
} else if (event.type === "content_block_start") {
if (event.content_block.type === "text") {
output.content.push({ type: "text", text: "", index: event.index } as any);
stream.push({ type: "text_start", contentIndex: output.content.length - 1, partial: output });
} else if (event.content_block.type === "thinking") {
output.content.push({
type: "thinking",
thinking: "",
thinkingSignature: "",
index: event.index,
} as any);
stream.push({ type: "thinking_start", contentIndex: output.content.length - 1, partial: output });
} else if (event.content_block.type === "tool_use") {
output.content.push({
type: "toolCall",
id: event.content_block.id,
name: isOAuth
? fromClaudeCodeName(event.content_block.name, context.tools)
: event.content_block.name,
arguments: {},
partialJson: "",
index: event.index,
} as any);
stream.push({ type: "toolcall_start", contentIndex: output.content.length - 1, partial: output });
}
} else if (event.type === "content_block_delta") {
const index = blocks.findIndex((b) => b.index === event.index);
const block = blocks[index];
if (!block) continue;
if (event.delta.type === "text_delta" && block.type === "text") {
block.text += event.delta.text;
stream.push({ type: "text_delta", contentIndex: index, delta: event.delta.text, partial: output });
} else if (event.delta.type === "thinking_delta" && block.type === "thinking") {
block.thinking += event.delta.thinking;
stream.push({
type: "thinking_delta",
contentIndex: index,
delta: event.delta.thinking,
partial: output,
});
} else if (event.delta.type === "input_json_delta" && block.type === "toolCall") {
(block as any).partialJson += event.delta.partial_json;
try {
block.arguments = JSON.parse((block as any).partialJson);
} catch {}
stream.push({
type: "toolcall_delta",
contentIndex: index,
delta: event.delta.partial_json,
partial: output,
});
} else if (event.delta.type === "signature_delta" && block.type === "thinking") {
block.thinkingSignature = (block.thinkingSignature || "") + (event.delta as any).signature;
}
} else if (event.type === "content_block_stop") {
const index = blocks.findIndex((b) => b.index === event.index);
const block = blocks[index];
if (!block) continue;
delete (block as any).index;
if (block.type === "text") {
stream.push({ type: "text_end", contentIndex: index, content: block.text, partial: output });
} else if (block.type === "thinking") {
stream.push({ type: "thinking_end", contentIndex: index, content: block.thinking, partial: output });
} else if (block.type === "toolCall") {
try {
block.arguments = JSON.parse((block as any).partialJson);
} catch {}
delete (block as any).partialJson;
stream.push({ type: "toolcall_end", contentIndex: index, toolCall: block, partial: output });
}
} else if (event.type === "message_delta") {
if ((event.delta as any).stop_reason) {
output.stopReason = mapStopReason((event.delta as any).stop_reason);
}
output.usage.input = (event.usage as any).input_tokens || 0;
output.usage.output = (event.usage as any).output_tokens || 0;
output.usage.cacheRead = (event.usage as any).cache_read_input_tokens || 0;
output.usage.cacheWrite = (event.usage as any).cache_creation_input_tokens || 0;
output.usage.totalTokens =
output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
calculateCost(model, output.usage);
}
}
if (options?.signal?.aborted) {
throw new Error("Request was aborted");
}
stream.push({ type: "done", reason: output.stopReason as "stop" | "length" | "toolUse", message: output });
stream.end();
} catch (error) {
for (const block of output.content) delete (block as any).index;
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
stream.push({ type: "error", reason: output.stopReason, error: output });
stream.end();
}
})();
return stream;
}
// =============================================================================
// Extension Entry Point
// =============================================================================
export default function (pi: ExtensionAPI) {
pi.registerProvider("custom-anthropic", {
baseUrl: "https://api.anthropic.com",
apiKey: "CUSTOM_ANTHROPIC_API_KEY",
api: "custom-anthropic-api",
models: [
{
id: "claude-opus-4-5",
name: "Claude Opus 4.5 (Custom)",
reasoning: true,
input: ["text", "image"],
cost: { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
contextWindow: 200000,
maxTokens: 64000,
},
{
id: "claude-sonnet-4-5",
name: "Claude Sonnet 4.5 (Custom)",
reasoning: true,
input: ["text", "image"],
cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
contextWindow: 200000,
maxTokens: 64000,
},
],
oauth: {
name: "Custom Anthropic (Claude Pro/Max)",
login: loginAnthropic,
refreshToken: refreshAnthropicToken,
getApiKey: (cred) => cred.access,
},
streamSimple: streamCustomAnthropic,
});
}
+1
View File
@@ -4,6 +4,7 @@
"type": "module",
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0",
"@mariozechner/jiti": "^2.6.5",
"@mozilla/readability": "^0.5.0",
"@sinclair/typebox": "^0.34.0",
"linkedom": "^0.16.0",
+17 -16
View File
@@ -308,9 +308,24 @@ function pickSound(categoryConfig: CategoryConfig, category: Category): Sound |
// ============ SOUND PLAYBACK ============
function play(category: Category): void {
if (config.muted) return;
if (!config.enabledCategories[category]) return;
const notificationMessages: Record<Category, { title: string; message: string } | null> = {
"session.start": null,
"task.acknowledge": null,
"task.complete": { title: "Pi", message: "Task complete" },
"task.error": { title: "Pi", message: "Task failed" },
"input.required": { title: "Pi", message: "Input required" },
"resource.limit": { title: "Pi", message: "Rate limited" },
};
const notification = notificationMessages[category];
if (notification) {
sendNotification(notification.title, notification.message);
}
if (config.muted) return;
const now = Date.now();
if (now - lastPlayed < DEBOUNCE_MS) {
return;
@@ -345,20 +360,6 @@ function play(category: Category): void {
}
playSound(soundPath, config.volume);
const notificationMessages: Record<Category, { title: string; message: string } | null> = {
"session.start": null,
"task.acknowledge": null,
"task.complete": { title: "Pi", message: "Task complete" },
"task.error": { title: "Pi", message: "Task failed" },
"input.required": { title: "Pi", message: "Input required" },
"resource.limit": { title: "Pi", message: "Rate limited" },
};
const notification = notificationMessages[category];
if (notification) {
sendNotification(notification.title, notification.message);
}
}
// ============ COMMANDS ============
@@ -814,7 +815,7 @@ async function showTestMenu(ctx: ExtensionCommandContext) {
const INTERACTIVE_TOOLS = new Set(["question", "questionnaire"]);
export default function (pi: ExtensionAPI) {
export default function(pi: ExtensionAPI) {
registerCommands(pi);
pi.on("session_start", async (_event, ctx) => {
@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2026 Ben Vargas
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@@ -0,0 +1,115 @@
# @benvargas/pi-claude-code-use
`pi-claude-code-use` keeps Pi's built-in `anthropic` provider intact and applies the smallest payload changes needed for Anthropic OAuth subscription use in Pi.
It does not register a new provider or replace Pi's Anthropic request transport. Pi core remains in charge of OAuth transport, headers, model definitions, and streaming.
## What It Changes
When Pi is using Anthropic OAuth, this extension intercepts outbound API requests via the `before_provider_request` hook and:
- **System prompt rewrite** -- rewrites a small set of Pi-identifying prompt phrases in system prompt text:
- `pi itself``the cli itself`
- `pi .md files``cli .md files`
- `pi packages``cli packages`
Preserves Pi's original `system[]` structure, `cache_control` metadata, and non-text blocks.
- **Tool filtering** -- passes through core Claude Code tools, Anthropic-native typed tools (e.g. `web_search`), and any tool prefixed with `mcp__`. Unknown flat-named tools are filtered out.
- **Companion tool remapping** -- renames known companion extension tools from their flat names to MCP-style aliases (e.g. `web_search_exa` becomes `mcp__exa__web_search`). Duplicate flat entries are removed after remapping.
- **tool_choice remapping** -- if `tool_choice` references a flat companion name that was remapped, the reference is updated to the MCP alias. If it references a tool that was filtered out, `tool_choice` is removed from the payload.
- **Message history rewriting** -- `tool_use` blocks in conversation history that reference flat companion names are rewritten to their MCP aliases so the model sees consistent tool names across the conversation.
- **Companion alias registration** -- at session start and before each agent turn, discovers loaded companion extensions, captures their tool definitions via a jiti-based shim, and registers MCP-alias copies so the model can invoke them under Claude Code-compatible names.
- **Alias activation tracking** -- auto-activates MCP aliases when their flat counterpart is active under Anthropic OAuth. Tracks provenance (auto-managed vs user-selected) so that disabling OAuth only removes auto-activated aliases, preserving any the user explicitly enabled.
Non-OAuth Anthropic usage and non-Anthropic providers are left completely unchanged.
## Install
```bash
pi install npm:@benvargas/pi-claude-code-use
```
Or load it directly without installing:
```bash
pi -e /path/to/pi-packages/packages/pi-claude-code-use/extensions/index.ts
```
## Usage
Install the package and continue using the normal `anthropic` provider with Anthropic OAuth login:
```bash
/login anthropic
/model anthropic/claude-opus-4-6
```
No extra configuration is required.
## Environment Variables
| Variable | Description |
|---|---|
| `PI_CLAUDE_CODE_USE_DEBUG_LOG` | Set to a file path to enable debug logging. Writes two JSON entries per Anthropic OAuth request: one with `"stage": "before"` (the original payload from Pi) and one with `"stage": "after"` (the transformed payload sent to Anthropic). |
| `PI_CLAUDE_CODE_USE_DISABLE_TOOL_FILTER` | Set to `1` to disable tool filtering. System prompt rewriting still applies, but all tools pass through unchanged. Useful for debugging whether a tool-filtering issue is causing a problem. |
Example:
```bash
PI_CLAUDE_CODE_USE_DEBUG_LOG=/tmp/pi-claude-debug.log pi -e /path/to/extensions/index.ts --model anthropic/claude-sonnet-4-20250514
```
## Companion Tool Aliases
When these companion extensions from this monorepo are loaded alongside `pi-claude-code-use`, MCP aliases are automatically registered and remapped:
| Flat name | MCP alias |
|---|---|
| `web_search_exa` | `mcp__exa__web_search` |
| `get_code_context_exa` | `mcp__exa__get_code_context` |
| `firecrawl_scrape` | `mcp__firecrawl__scrape` |
| `firecrawl_map` | `mcp__firecrawl__map` |
| `firecrawl_search` | `mcp__firecrawl__search` |
| `generate_image` | `mcp__antigravity__generate_image` |
| `image_quota` | `mcp__antigravity__image_quota` |
### How companion discovery works
The extension identifies companion tools by matching `sourceInfo` metadata that Pi attaches to each registered tool:
1. **baseDir match** -- if the tool's `sourceInfo.baseDir` directory name matches the companion's directory name (e.g. `pi-exa-mcp`).
2. **Path match** -- if the tool's `sourceInfo.path` contains the companion's scoped package name (e.g. `@benvargas/pi-exa-mcp`) or directory name as a path segment. This handles npm installs, git clones, and monorepo layouts where `baseDir` points to the repo root rather than the individual package.
Once a companion tool is identified, its extension factory is loaded via jiti into a capture shim to obtain the full tool definition, which is then re-registered under the MCP alias name.
## Core Tools Allowlist
The following tool names always pass through filtering (case-insensitive). This list mirrors Pi core's `claudeCodeTools` in `packages/ai/src/providers/anthropic.ts`:
`Read`, `Write`, `Edit`, `Bash`, `Grep`, `Glob`, `AskUserQuestion`, `EnterPlanMode`, `ExitPlanMode`, `KillShell`, `NotebookEdit`, `Skill`, `Task`, `TaskOutput`, `TodoWrite`, `WebFetch`, `WebSearch`
Additionally, any tool with a `type` field (Anthropic-native tools like `web_search`) and any tool prefixed with `mcp__` always passes through.
## Guidance For Extension Authors
Anthropic's OAuth subscription path appears to fingerprint tool names. Flat extension tool names such as `web_search_exa` were rejected in live testing, while MCP-style names such as `mcp__exa__web_search` were accepted.
If you want a custom tool to survive Anthropic OAuth filtering cleanly, prefer registering it directly under an MCP-style name:
```text
mcp__<server>__<tool>
```
Examples:
- `mcp__exa__web_search`
- `mcp__firecrawl__scrape`
- `mcp__mytools__lookup_customer`
If an extension keeps a flat legacy name for non-Anthropic use, it can also register an MCP-style alias alongside it. `pi-claude-code-use` already does this centrally for the known companion tools in this repo, but unknown non-MCP tool names will still be filtered out on Anthropic OAuth requests.
## Notes
- The extension activates for all Anthropic OAuth requests regardless of model, rather than using a fixed model allowlist.
- Non-OAuth Anthropic usage (API key auth) is left unchanged.
- In practice, unknown non-MCP extension tools were the remaining trigger for Anthropic's extra-usage classification, so this package keeps core tools, keeps MCP-style tools, auto-aliases the known companion tools above, and filters the rest.
- Pi may show its built-in OAuth subscription warning banner even when the request path works correctly. That banner is UI logic in Pi, not a signal that the upstream request is being billed as extra usage.
@@ -0,0 +1,641 @@
import { appendFileSync } from "node:fs";
import { basename, dirname } from "node:path";
import { createJiti } from "@mariozechner/jiti";
import * as piAiModule from "@mariozechner/pi-ai";
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
import * as piCodingAgentModule from "@mariozechner/pi-coding-agent";
import * as typeboxModule from "@sinclair/typebox";
// ============================================================================
// Types
// ============================================================================
interface CompanionSpec {
dirName: string;
packageName: string;
aliases: ReadonlyArray<readonly [flatName: string, mcpName: string]>;
}
type ToolRegistration = Parameters<ExtensionAPI["registerTool"]>[0];
type ToolInfo = ReturnType<ExtensionAPI["getAllTools"]>[number];
// ============================================================================
// Constants
// ============================================================================
/**
* Core Claude Code tool names that always pass through Anthropic OAuth filtering.
* Stored lowercase for case-insensitive matching.
* Mirrors Pi core's claudeCodeTools list in packages/ai/src/providers/anthropic.ts
*/
const CORE_TOOL_NAMES = new Set([
"read",
"write",
"edit",
"bash",
"grep",
"glob",
"askuserquestion",
"enterplanmode",
"exitplanmode",
"killshell",
"notebookedit",
"skill",
"task",
"taskoutput",
"todowrite",
"webfetch",
"websearch",
]);
/** Flat companion tool name → MCP-style alias. */
const FLAT_TO_MCP = new Map<string, string>([
["web_search_exa", "mcp__exa__web_search"],
["get_code_context_exa", "mcp__exa__get_code_context"],
["firecrawl_scrape", "mcp__firecrawl__scrape"],
["firecrawl_map", "mcp__firecrawl__map"],
["firecrawl_search", "mcp__firecrawl__search"],
["generate_image", "mcp__antigravity__generate_image"],
["image_quota", "mcp__antigravity__image_quota"],
]);
/** Known companion extensions and the tools they provide. */
const COMPANIONS: CompanionSpec[] = [
{
dirName: "pi-exa-mcp",
packageName: "@benvargas/pi-exa-mcp",
aliases: [
["web_search_exa", "mcp__exa__web_search"],
["get_code_context_exa", "mcp__exa__get_code_context"],
],
},
{
dirName: "pi-firecrawl",
packageName: "@benvargas/pi-firecrawl",
aliases: [
["firecrawl_scrape", "mcp__firecrawl__scrape"],
["firecrawl_map", "mcp__firecrawl__map"],
["firecrawl_search", "mcp__firecrawl__search"],
],
},
{
dirName: "pi-antigravity-image-gen",
packageName: "@benvargas/pi-antigravity-image-gen",
aliases: [
["generate_image", "mcp__antigravity__generate_image"],
["image_quota", "mcp__antigravity__image_quota"],
],
},
];
/** Reverse lookup: flat tool name → its companion spec. */
const TOOL_TO_COMPANION = new Map<string, CompanionSpec>(
COMPANIONS.flatMap((spec) => spec.aliases.map(([flat]) => [flat, spec] as const)),
);
// ============================================================================
// Helpers
// ============================================================================
function isPlainObject(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null && !Array.isArray(value);
}
function lower(name: string | undefined): string {
return (name ?? "").trim().toLowerCase();
}
// ============================================================================
// System prompt rewrite (PRD §1.1)
//
// Replace "pi itself" → "the cli itself" in system prompt text.
// Preserves cache_control, non-text blocks, and payload shape.
// ============================================================================
function rewritePromptText(text: string): string {
return text
.replaceAll("pi itself", "the cli itself")
.replaceAll("pi .md files", "cli .md files")
.replaceAll("pi packages", "cli packages");
}
function rewriteSystemField(system: unknown): unknown {
if (typeof system === "string") {
return rewritePromptText(system);
}
if (!Array.isArray(system)) {
return system;
}
return system.map((block) => {
if (!isPlainObject(block) || block.type !== "text" || typeof block.text !== "string") {
return block;
}
const rewritten = rewritePromptText(block.text);
return rewritten === block.text ? block : { ...block, text: rewritten };
});
}
// ============================================================================
// Tool filtering and MCP alias remapping (PRD §1.2)
//
// Rules applied per tool:
// 1. Anthropic-native typed tools (have a `type` field) → pass through
// 2. Core Claude Code tool names → pass through
// 3. Tools already prefixed with mcp__ → pass through
// 4. Known companion tools whose MCP alias is also advertised → rename to alias
// 5. Known companion tools without an advertised alias → filtered out
// 6. Unknown flat-named tools → filtered out (unless disableFilter)
// ============================================================================
function collectToolNames(tools: unknown[]): Set<string> {
const names = new Set<string>();
for (const tool of tools) {
if (isPlainObject(tool) && typeof tool.name === "string") {
names.add(lower(tool.name));
}
}
return names;
}
function filterAndRemapTools(tools: unknown[] | undefined, disableFilter: boolean): unknown[] | undefined {
if (!Array.isArray(tools)) return tools;
const advertised = collectToolNames(tools);
const emitted = new Set<string>();
const result: unknown[] = [];
for (const tool of tools) {
if (!isPlainObject(tool)) continue;
// Rule 1: native typed tools always pass through
if (typeof tool.type === "string" && tool.type.trim().length > 0) {
result.push(tool);
continue;
}
const name = typeof tool.name === "string" ? tool.name : "";
if (!name) continue;
const nameLc = lower(name);
// Rules 2 & 3: core tools and mcp__-prefixed pass through (with dedup)
if (CORE_TOOL_NAMES.has(nameLc) || nameLc.startsWith("mcp__")) {
if (!emitted.has(nameLc)) {
emitted.add(nameLc);
result.push(tool);
}
continue;
}
// Rules 4 & 5: known companion tool
const mcpAlias = FLAT_TO_MCP.get(nameLc);
if (mcpAlias) {
const aliasLc = lower(mcpAlias);
if (advertised.has(aliasLc) && !emitted.has(aliasLc)) {
// Alias exists in tool list → rename flat to alias, dedup
emitted.add(aliasLc);
result.push({ ...tool, name: mcpAlias });
} else if (disableFilter && !emitted.has(nameLc)) {
// Filter disabled: keep flat name if not yet emitted
emitted.add(nameLc);
result.push(tool);
}
continue;
}
// Rule 6: unknown flat-named tool
if (disableFilter && !emitted.has(nameLc)) {
emitted.add(nameLc);
result.push(tool);
}
}
return result;
}
function remapToolChoice(
toolChoice: Record<string, unknown>,
survivingNames: Map<string, string>,
): Record<string, unknown> | undefined {
if (toolChoice.type !== "tool" || typeof toolChoice.name !== "string") {
return toolChoice;
}
const nameLc = lower(toolChoice.name);
const actualName = survivingNames.get(nameLc);
if (actualName) {
return actualName === toolChoice.name ? toolChoice : { ...toolChoice, name: actualName };
}
const mcpAlias = FLAT_TO_MCP.get(nameLc);
if (mcpAlias && survivingNames.has(lower(mcpAlias))) {
return { ...toolChoice, name: mcpAlias };
}
return undefined;
}
function remapMessageToolNames(messages: unknown[], survivingNames: Map<string, string>): unknown[] {
let anyChanged = false;
const result = messages.map((msg) => {
if (!isPlainObject(msg) || !Array.isArray(msg.content)) return msg;
let msgChanged = false;
const content = (msg.content as unknown[]).map((block) => {
if (!isPlainObject(block) || block.type !== "tool_use" || typeof block.name !== "string") {
return block;
}
const mcpAlias = FLAT_TO_MCP.get(lower(block.name));
if (mcpAlias && survivingNames.has(lower(mcpAlias))) {
msgChanged = true;
return { ...block, name: mcpAlias };
}
return block;
});
if (msgChanged) {
anyChanged = true;
return { ...msg, content };
}
return msg;
});
return anyChanged ? result : messages;
}
// ============================================================================
// Full payload transform
// ============================================================================
function transformPayload(raw: Record<string, unknown>, disableFilter: boolean): Record<string, unknown> {
// Deep clone to avoid mutating the original
const payload = JSON.parse(JSON.stringify(raw)) as Record<string, unknown>;
// 1. System prompt rewrite (always applies)
if (payload.system !== undefined) {
payload.system = rewriteSystemField(payload.system);
}
// When escape hatch is active, skip all tool filtering/remapping
if (disableFilter) {
return payload;
}
// 2. Tool filtering and alias remapping
payload.tools = filterAndRemapTools(payload.tools as unknown[] | undefined, false);
// 3. Build map of tool names that survived filtering (lowercase → actual name)
const survivingNames = new Map<string, string>();
if (Array.isArray(payload.tools)) {
for (const tool of payload.tools) {
if (isPlainObject(tool) && typeof tool.name === "string") {
survivingNames.set(lower(tool.name), tool.name as string);
}
}
}
// 4. Remap tool_choice if it references a renamed or filtered tool
if (isPlainObject(payload.tool_choice)) {
const remapped = remapToolChoice(payload.tool_choice, survivingNames);
if (remapped === undefined) {
delete payload.tool_choice;
} else {
payload.tool_choice = remapped;
}
}
// 5. Rewrite historical tool_use blocks in message history
if (Array.isArray(payload.messages)) {
payload.messages = remapMessageToolNames(payload.messages, survivingNames);
}
return payload;
}
// ============================================================================
// Debug logging (PRD §1.4)
// ============================================================================
const debugLogPath = process.env.PI_CLAUDE_CODE_USE_DEBUG_LOG;
function writeDebugLog(payload: unknown): void {
if (!debugLogPath) return;
try {
appendFileSync(debugLogPath, `${new Date().toISOString()}\n${JSON.stringify(payload, null, 2)}\n---\n`, "utf-8");
} catch {
// Debug logging must never break actual requests
}
}
// ============================================================================
// Companion alias registration (PRD §1.3)
//
// Discovers loaded companion extensions, captures their tool definitions via
// a shim ExtensionAPI, and registers MCP-alias versions so the model can
// invoke them under Claude Code-compatible names.
// ============================================================================
const registeredMcpAliases = new Set<string>();
const autoActivatedAliases = new Set<string>();
let lastManagedToolList: string[] | undefined;
const captureCache = new Map<string, Promise<Map<string, ToolRegistration>>>();
let jitiLoader: { import(path: string, opts?: { default?: boolean }): Promise<unknown> } | undefined;
function getJitiLoader() {
if (!jitiLoader) {
jitiLoader = createJiti(import.meta.url, {
moduleCache: false,
tryNative: false,
virtualModules: {
"@mariozechner/pi-ai": piAiModule,
"@mariozechner/pi-coding-agent": piCodingAgentModule,
"@sinclair/typebox": typeboxModule,
},
});
}
return jitiLoader;
}
async function loadFactory(baseDir: string): Promise<((pi: ExtensionAPI) => void | Promise<void>) | undefined> {
const dir = baseDir.replace(/\/$/, "");
const candidates = [`${dir}/index.ts`, `${dir}/index.js`, `${dir}/extensions/index.ts`, `${dir}/extensions/index.js`];
const loader = getJitiLoader();
for (const path of candidates) {
try {
const mod = await loader.import(path, { default: true });
if (typeof mod === "function") return mod as (pi: ExtensionAPI) => void | Promise<void>;
} catch {
// Try next candidate
}
}
return undefined;
}
function isCompanionSource(tool: ToolInfo | undefined, spec: CompanionSpec): boolean {
if (!tool?.sourceInfo) return false;
const baseDir = tool.sourceInfo.baseDir;
if (baseDir) {
const dirName = basename(baseDir);
if (dirName === spec.dirName) return true;
if (dirName === "extensions" && basename(dirname(baseDir)) === spec.dirName) return true;
}
const fullPath = tool.sourceInfo.path;
if (typeof fullPath !== "string") return false;
// Normalize backslashes for Windows paths before segment-bounded check
const normalized = fullPath.replaceAll("\\", "/");
// Check for scoped package name (npm install) or directory name (git/monorepo)
return normalized.includes(`/${spec.packageName}/`) || normalized.includes(`/${spec.dirName}/`);
}
function buildCaptureShim(realPi: ExtensionAPI, captured: Map<string, ToolRegistration>): ExtensionAPI {
const shimFlags = new Set<string>();
return {
registerTool(def) {
captured.set(def.name, def as unknown as ToolRegistration);
},
registerFlag(name, _options) {
shimFlags.add(name);
},
getFlag(name) {
return shimFlags.has(name) ? realPi.getFlag(name) : undefined;
},
on() {},
registerCommand() {},
registerShortcut() {},
registerMessageRenderer() {},
registerProvider() {},
unregisterProvider() {},
sendMessage() {},
sendUserMessage() {},
appendEntry() {},
setSessionName() {},
getSessionName() {
return undefined;
},
setLabel() {},
exec(command, args, options) {
return realPi.exec(command, args, options);
},
getActiveTools() {
return realPi.getActiveTools();
},
getAllTools() {
return realPi.getAllTools();
},
setActiveTools(names) {
realPi.setActiveTools(names);
},
getCommands() {
return realPi.getCommands();
},
setModel(model) {
return realPi.setModel(model);
},
getThinkingLevel() {
return realPi.getThinkingLevel();
},
setThinkingLevel(level) {
realPi.setThinkingLevel(level);
},
events: realPi.events,
} as ExtensionAPI;
}
async function captureCompanionTools(baseDir: string, realPi: ExtensionAPI): Promise<Map<string, ToolRegistration>> {
let pending = captureCache.get(baseDir);
if (!pending) {
pending = (async () => {
const factory = await loadFactory(baseDir);
if (!factory) return new Map<string, ToolRegistration>();
const tools = new Map<string, ToolRegistration>();
await factory(buildCaptureShim(realPi, tools));
return tools;
})();
captureCache.set(baseDir, pending);
}
return pending;
}
async function registerAliasesForLoadedCompanions(pi: ExtensionAPI): Promise<void> {
// Clear capture cache so flag/config changes since last call take effect
captureCache.clear();
const allTools = pi.getAllTools();
const toolIndex = new Map<string, ToolInfo>();
const knownNames = new Set<string>();
for (const tool of allTools) {
toolIndex.set(lower(tool.name), tool);
knownNames.add(lower(tool.name));
}
for (const spec of COMPANIONS) {
for (const [flatName, mcpName] of spec.aliases) {
if (registeredMcpAliases.has(mcpName) || knownNames.has(lower(mcpName))) continue;
const tool = toolIndex.get(lower(flatName));
if (!tool || !isCompanionSource(tool, spec)) continue;
// Prefer the extension file's directory for loading (sourceInfo.path is the actual
// entry point). Fall back to baseDir only if path is unavailable. baseDir can be
// the monorepo root which doesn't contain the extension entry point directly.
const loadDir = tool.sourceInfo?.path ? dirname(tool.sourceInfo.path) : tool.sourceInfo?.baseDir;
if (!loadDir) continue;
const captured = await captureCompanionTools(loadDir, pi);
const def = captured.get(flatName);
if (!def) continue;
pi.registerTool({
...def,
name: mcpName,
label: def.label?.startsWith("MCP ") ? def.label : `MCP ${def.label ?? mcpName}`,
});
registeredMcpAliases.add(mcpName);
knownNames.add(lower(mcpName));
}
}
}
/**
* Synchronize MCP alias tool activation with the current model state.
* When OAuth is active, auto-activate aliases for any active companion tools.
* When OAuth is inactive, remove auto-activated aliases (but preserve user-selected ones).
*/
function syncAliasActivation(pi: ExtensionAPI, enableAliases: boolean): void {
const activeNames = pi.getActiveTools();
const allNames = new Set(pi.getAllTools().map((t) => t.name));
if (enableAliases) {
// Determine which aliases should be active based on their flat counterpart being active
const activeLc = new Set(activeNames.map(lower));
const desiredAliases: string[] = [];
for (const [flat, mcp] of FLAT_TO_MCP) {
if (activeLc.has(flat) && allNames.has(mcp) && registeredMcpAliases.has(mcp)) {
desiredAliases.push(mcp);
}
}
const desiredSet = new Set(desiredAliases);
// Promote auto-activated aliases to user-selected when the user explicitly kept
// the alias while removing its flat counterpart from the tool picker.
// We detect this by checking: (a) user changed the tool list since our last sync,
// (b) the flat tool was previously managed but is no longer active, and
// (c) the alias is still active. This means the user deliberately kept the alias.
if (lastManagedToolList !== undefined) {
const activeSet = new Set(activeNames);
const lastManaged = new Set(lastManagedToolList);
for (const alias of autoActivatedAliases) {
if (!activeSet.has(alias) || desiredSet.has(alias)) continue;
// Find the flat name for this alias
const flatName = [...FLAT_TO_MCP.entries()].find(([, mcp]) => mcp === alias)?.[0];
if (flatName && lastManaged.has(flatName) && !activeSet.has(flatName)) {
// User removed the flat tool but kept the alias → promote to user-selected
autoActivatedAliases.delete(alias);
}
}
}
// Find registered aliases currently in the active list
const activeRegistered = activeNames.filter((n) => registeredMcpAliases.has(n) && allNames.has(n));
// Per-alias provenance: an alias is "user-selected" if it's active and was NOT
// auto-activated by us. Only preserve those; auto-activated aliases get re-derived
// from the desired set each sync.
const preserved = activeRegistered.filter((n) => !autoActivatedAliases.has(n));
// Build result: non-alias tools + preserved user aliases + desired aliases
const nonAlias = activeNames.filter((n) => !registeredMcpAliases.has(n));
const next = Array.from(new Set([...nonAlias, ...preserved, ...desiredAliases]));
// Update auto-activation tracking: aliases we added this sync that weren't user-preserved
const preservedSet = new Set(preserved);
autoActivatedAliases.clear();
for (const name of desiredAliases) {
if (!preservedSet.has(name)) {
autoActivatedAliases.add(name);
}
}
if (next.length !== activeNames.length || next.some((n, i) => n !== activeNames[i])) {
pi.setActiveTools(next);
lastManagedToolList = [...next];
}
} else {
// Remove only auto-activated aliases; user-selected ones are preserved
const next = activeNames.filter((n) => !autoActivatedAliases.has(n));
autoActivatedAliases.clear();
if (next.length !== activeNames.length || next.some((n, i) => n !== activeNames[i])) {
pi.setActiveTools(next);
lastManagedToolList = [...next];
} else {
lastManagedToolList = undefined;
}
}
}
// ============================================================================
// Extension entry point
// ============================================================================
export default async function piClaudeCodeUse(pi: ExtensionAPI): Promise<void> {
pi.on("session_start", async () => {
await registerAliasesForLoadedCompanions(pi);
});
pi.on("before_agent_start", async (_event, ctx) => {
await registerAliasesForLoadedCompanions(pi);
const model = ctx.model;
const isOAuth = model?.provider === "anthropic" && ctx.modelRegistry.isUsingOAuth(model);
syncAliasActivation(pi, isOAuth);
});
pi.on("before_provider_request", (event, ctx) => {
const model = ctx.model;
if (!model || model.provider !== "anthropic" || !ctx.modelRegistry.isUsingOAuth(model)) {
return undefined;
}
if (!isPlainObject(event.payload)) {
return undefined;
}
writeDebugLog({ stage: "before", payload: event.payload });
const disableFilter = process.env.PI_CLAUDE_CODE_USE_DISABLE_TOOL_FILTER === "1";
const transformed = transformPayload(event.payload as Record<string, unknown>, disableFilter);
writeDebugLog({ stage: "after", payload: transformed });
return transformed;
});
}
// ============================================================================
// Test exports
// ============================================================================
export const _test = {
CORE_TOOL_NAMES,
FLAT_TO_MCP,
COMPANIONS,
TOOL_TO_COMPANION,
autoActivatedAliases,
buildCaptureShim,
collectToolNames,
filterAndRemapTools,
getLastManagedToolList: () => lastManagedToolList,
isCompanionSource,
isPlainObject,
lower,
registerAliasesForLoadedCompanions,
registeredMcpAliases,
remapMessageToolNames,
remapToolChoice,
rewritePromptText,
rewriteSystemField,
setLastManagedToolList: (v: string[] | undefined) => {
lastManagedToolList = v;
},
syncAliasActivation,
transformPayload,
};
+3
View File
@@ -11,6 +11,9 @@ importers:
'@anthropic-ai/sdk':
specifier: ^0.52.0
version: 0.52.0
'@mariozechner/jiti':
specifier: ^2.6.5
version: 2.6.5
'@mozilla/readability':
specifier: ^0.5.0
version: 0.5.0
+80
View File
@@ -14,6 +14,9 @@
*/
import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent";
import * as fs from "node:fs";
import * as path from "node:path";
import * as os from "node:os";
interface ToolTimeout {
toolCallId: string;
@@ -28,6 +31,8 @@ interface ToolTimeout {
// Configuration
let timeoutSeconds = 30;
let enabled = true;
const SETTINGS_NAMESPACE = "slowtool";
const globalSettingsPath = path.join(os.homedir(), ".pi", "agent", "settings.json");
// Track running tools
const runningTools: Map<string, ToolTimeout> = new Map();
@@ -43,6 +48,55 @@ function formatDuration(ms: number): string {
return `${minutes}m ${remainingSeconds}s`;
}
function asRecord(value: unknown): Record<string, unknown> | undefined {
if (!value || typeof value !== "object") return undefined;
return value as Record<string, unknown>;
}
function readSettingsFile(filePath: string): Record<string, unknown> {
try {
if (!fs.existsSync(filePath)) return {};
const raw = fs.readFileSync(filePath, "utf-8");
const parsed = JSON.parse(raw) as unknown;
return asRecord(parsed) ?? {};
} catch {
return {};
}
}
function loadGlobalConfig(): { timeoutSeconds: number; enabled: boolean } {
const settings = readSettingsFile(globalSettingsPath);
const slowtoolSettings = asRecord(settings[SETTINGS_NAMESPACE]);
const configuredTimeout = slowtoolSettings?.timeoutSeconds;
const nextTimeout =
typeof configuredTimeout === "number" && Number.isFinite(configuredTimeout) && configuredTimeout >= 1
? Math.floor(configuredTimeout)
: 30;
const configuredEnabled = slowtoolSettings?.enabled;
const nextEnabled = typeof configuredEnabled === "boolean" ? configuredEnabled : true;
return { timeoutSeconds: nextTimeout, enabled: nextEnabled };
}
function saveGlobalConfig(next: { timeoutSeconds: number; enabled: boolean }): boolean {
try {
const settings = readSettingsFile(globalSettingsPath);
const existing = asRecord(settings[SETTINGS_NAMESPACE]) ?? {};
settings[SETTINGS_NAMESPACE] = {
...existing,
timeoutSeconds: next.timeoutSeconds,
enabled: next.enabled,
};
fs.mkdirSync(path.dirname(globalSettingsPath), { recursive: true });
fs.writeFileSync(globalSettingsPath, `${JSON.stringify(settings, null, 2)}\n`, "utf-8");
return true;
} catch {
return false;
}
}
function getCommandPreview(args: unknown): string | undefined {
if (!args) return undefined;
const anyArgs = args as Record<string, unknown>;
@@ -77,6 +131,29 @@ function notifyTimeout(pi: ExtensionAPI, tool: ToolTimeout): void {
// ============ EVENT HANDLERS ============
export default function(pi: ExtensionAPI) {
const applyPersistedConfig = () => {
const persisted = loadGlobalConfig();
timeoutSeconds = persisted.timeoutSeconds;
enabled = persisted.enabled;
};
const persistCurrentConfig = (ctx: ExtensionCommandContext): void => {
const ok = saveGlobalConfig({ timeoutSeconds, enabled });
if (!ok) {
ctx.ui.notify("Failed to persist slowtool settings", "warning");
}
};
applyPersistedConfig();
pi.on("session_start", async (_event, _ctx) => {
applyPersistedConfig();
});
pi.on("session_switch", async (_event, _ctx) => {
applyPersistedConfig();
});
// Register commands
pi.registerCommand("slowtool:timeout", {
description: "Set timeout threshold in seconds (default: 30)",
@@ -91,6 +168,7 @@ export default function(pi: ExtensionAPI) {
return;
}
timeoutSeconds = newTimeout;
persistCurrentConfig(ctx);
ctx.ui.notify(`Timeout set to ${timeoutSeconds}s`, "info");
},
});
@@ -99,6 +177,7 @@ export default function(pi: ExtensionAPI) {
description: "Enable slow tool notifications",
handler: async (_args: string, ctx: ExtensionCommandContext) => {
enabled = true;
persistCurrentConfig(ctx);
ctx.ui.notify("Slow tool notifications enabled", "info");
},
});
@@ -107,6 +186,7 @@ export default function(pi: ExtensionAPI) {
description: "Disable slow tool notifications",
handler: async (_args: string, ctx: ExtensionCommandContext) => {
enabled = false;
persistCurrentConfig(ctx);
ctx.ui.notify("Slow tool notifications disabled", "info");
},
});
+3 -1
View File
@@ -1,3 +1,5 @@
{
"app.model.select": "ctrl+space"
"app.model.select": "ctrl+space",
"tui.input.newLine": ["shift+enter"],
"tui.input.submit": ["enter"]
}
@@ -0,0 +1,99 @@
---
name: jj-issue-workspaces
description: Create one Jujutsu workspace per issue, base them on an updated mainline bookmark like master, optionally create feature bookmarks, and open a zellij tab running pi in each workspace. Use when the user wants to fan out work across multiple issues, especially from a screenshot, Linear board, or issue list.
---
# JJ Issue Workspaces
This skill sets up a parallel issue workflow with `jj workspaces`.
Use it when the user wants any of the following:
- one workspace per issue
- multiple issues opened side by side
- a zellij tab for each issue
- `pi` opened in each issue workspace with a task-specific prompt
- issue fan-out from a screenshot, Linear board, or manually listed issues
## Workflow
1. Confirm the target repo and verify it is a `jj` repo.
2. If the user gave a screenshot path, use the `read` tool on the screenshot first and extract the issue keys and titles.
3. Decide the base bookmark/revision, usually `master` or `main`.
4. Run the helper script to:
- fetch the base bookmark from `origin`
- create sibling workspaces like `../Phoenix-spa-748`
- create bookmarks like `feature/spa-748`
- optionally open one zellij tab per workspace and launch `pi`
5. Tell the user which workspaces and tabs were created.
## Helper script
Use the helper script in this skill:
```bash
./scripts/jj-workspace-fanout.sh --help
```
Run it from anywhere. Pass absolute paths when convenient.
## Common usage
### Create workspaces and bookmarks only
```bash
./scripts/jj-workspace-fanout.sh \
--repo /path/to/repo \
--base master \
--issue "SPA-748=Wrap text in credits line items" \
--issue "SPA-428=Implement \"Downgrade\" Mimir modal (maximalist)" \
--issue "SPA-754=Resize seat count picker"
```
### Create workspaces, bookmarks, zellij tabs, and launch pi
```bash
./scripts/jj-workspace-fanout.sh \
--repo /path/to/repo \
--base master \
--session attio \
--open-pi \
--issue "SPA-748=Wrap text in credits line items" \
--issue "SPA-428=Implement \"Downgrade\" Mimir modal (maximalist)" \
--issue "SPA-754=Resize seat count picker"
```
### Recreate existing workspaces from scratch
```bash
./scripts/jj-workspace-fanout.sh \
--repo /path/to/repo \
--base master \
--session attio \
--open-pi \
--reset-existing \
--issue "SPA-748=Wrap text in credits line items"
```
## Defaults and conventions
- Workspace names use the lowercased issue key, for example `spa-748`
- Workspace directories are created beside the repo, for example `../Phoenix-spa-748`
- Bookmark names default to `feature/<issue-key-lowercase>`
- Base revision defaults to `master`
- Remote defaults to `origin`
- If `--open-pi` is used, the script launches `pi` in each workspace with a task-specific prompt
## Recommended agent behavior
When using this skill:
- Prefer `jj` over `git`
- Check `jj workspace list` before changing anything
- If the user says to update `master` or `main` first, let the script fetch that base revision before creating workspaces
- If the user wants an existing set recreated, use `--reset-existing`
- If zellij tabs already exist and the user wants a clean retry, close those tabs first or recreate the session
## Notes
- The script does not delete existing workspaces unless `--reset-existing` is provided.
- `--open-pi` requires a zellij session name, either via `--session <name>` or `ZELLIJ_SESSION_NAME`.
- If the repo uses `main` instead of `master`, pass `--base main`.
@@ -0,0 +1,292 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Create one jj workspace per issue, optionally create bookmarks, and optionally open zellij tabs running pi.
Usage:
jj-workspace-fanout.sh [options] --issue "KEY=Title" [--issue "KEY=Title" ...]
Options:
--repo PATH Repo root (default: current directory)
--base REV Base revision/bookmark (default: master)
--remote NAME Git remote to fetch from (default: origin)
--issue KEY=TITLE Issue key and title (repeatable)
--session NAME Zellij session name (defaults to ZELLIJ_SESSION_NAME if set)
--open-pi Open a zellij tab per workspace and launch pi
--no-fetch Skip jj git fetch
--no-bookmarks Do not create feature/<issue> bookmarks
--keep-existing Skip creation for existing workspaces instead of failing
--reset-existing Forget and delete existing workspaces before recreating them
--prompt-suffix TEXT Extra text appended to each pi prompt
--pi-cmd CMD pi command to launch (default: pi)
--dry-run Print planned actions without making changes
--help Show this help
Examples:
jj-workspace-fanout.sh \
--repo /path/to/Phoenix \
--base master \
--issue "SPA-748=Wrap text in credits line items" \
--issue "SPA-754=Resize seat count picker"
jj-workspace-fanout.sh \
--repo /path/to/Phoenix \
--base master \
--session attio \
--open-pi \
--issue "SPA-748=Wrap text in credits line items"
EOF
}
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "error: missing required command: $1" >&2
exit 1
fi
}
shell_escape() {
printf '%q' "$1"
}
log() {
printf '[jj-issue-workspaces] %s\n' "$*"
}
run() {
if [[ "$DRY_RUN" -eq 1 ]]; then
printf '[dry-run] '
printf '%q ' "$@"
printf '\n'
else
"$@"
fi
}
workspace_exists() {
local workspace_name="$1"
jj -R "$REPO" workspace list | awk -F: '{print $1}' | grep -Fxq "$workspace_name"
}
bookmark_exists() {
local workspace_dir="$1"
local bookmark_name="$2"
jj -R "$workspace_dir" bookmark list "$bookmark_name" 2>/dev/null | grep -Eq "^${bookmark_name}:"
}
close_tab_if_exists() {
local session_name="$1"
local tab_name="$2"
local tabs
tabs=$(zellij --session "$session_name" action query-tab-names 2>/dev/null || true)
if printf '%s\n' "$tabs" | grep -Fxq "$tab_name"; then
log "closing existing zellij tab $tab_name"
run zellij --session "$session_name" action go-to-tab-name "$tab_name"
run zellij --session "$session_name" action close-tab
fi
}
launch_pi_tab() {
local session_name="$1"
local tab_name="$2"
local workspace_dir="$3"
local prompt="$4"
local cmd
cmd="cd $(shell_escape "$workspace_dir") && pwd && $PI_CMD $(shell_escape "$prompt")"
close_tab_if_exists "$session_name" "$tab_name"
run zellij --session "$session_name" action new-tab --name "$tab_name"
run zellij --session "$session_name" action write-chars "$cmd"
run zellij --session "$session_name" action write 10
}
REPO="$(pwd)"
BASE="master"
REMOTE="origin"
SESSION="${ZELLIJ_SESSION_NAME:-}"
OPEN_PI=0
FETCH=1
CREATE_BOOKMARKS=1
KEEP_EXISTING=0
RESET_EXISTING=0
DRY_RUN=0
PROMPT_SUFFIX=""
PI_CMD="pi"
declare -a ISSUES=()
while [[ $# -gt 0 ]]; do
case "$1" in
--repo)
REPO="$2"
shift 2
;;
--base)
BASE="$2"
shift 2
;;
--remote)
REMOTE="$2"
shift 2
;;
--issue)
ISSUES+=("$2")
shift 2
;;
--session)
SESSION="$2"
shift 2
;;
--open-pi)
OPEN_PI=1
shift
;;
--no-fetch)
FETCH=0
shift
;;
--no-bookmarks)
CREATE_BOOKMARKS=0
shift
;;
--keep-existing)
KEEP_EXISTING=1
shift
;;
--reset-existing)
RESET_EXISTING=1
shift
;;
--prompt-suffix)
PROMPT_SUFFIX="$2"
shift 2
;;
--pi-cmd)
PI_CMD="$2"
shift 2
;;
--dry-run)
DRY_RUN=1
shift
;;
--help|-h)
usage
exit 0
;;
*)
echo "error: unknown argument: $1" >&2
usage >&2
exit 1
;;
esac
done
if [[ ${#ISSUES[@]} -eq 0 ]]; then
echo "error: at least one --issue KEY=TITLE is required" >&2
exit 1
fi
if [[ "$KEEP_EXISTING" -eq 1 && "$RESET_EXISTING" -eq 1 ]]; then
echo "error: --keep-existing and --reset-existing cannot be combined" >&2
exit 1
fi
REPO="$(cd "$REPO" && pwd)"
PARENT_DIR="$(dirname "$REPO")"
REPO_BASENAME="$(basename "$REPO")"
require_cmd jj
if [[ "$OPEN_PI" -eq 1 ]]; then
require_cmd zellij
if [[ -z "$SESSION" ]]; then
echo "error: --open-pi requires --session <name> or ZELLIJ_SESSION_NAME" >&2
exit 1
fi
fi
if [[ ! -d "$REPO/.jj" ]]; then
echo "error: repo is not a jj repository: $REPO" >&2
exit 1
fi
if [[ "$FETCH" -eq 1 ]]; then
log "fetching $BASE from $REMOTE"
run jj -R "$REPO" git fetch --remote "$REMOTE" --branch "$BASE"
fi
log "validating base revision $BASE"
run jj -R "$REPO" log -r "$BASE" --no-pager
created_workspaces=()
for issue in "${ISSUES[@]}"; do
if [[ "$issue" != *=* ]]; then
echo "error: issue must be formatted as KEY=TITLE: $issue" >&2
exit 1
fi
issue_key="${issue%%=*}"
issue_title="${issue#*=}"
issue_slug="$(printf '%s' "$issue_key" | tr '[:upper:]' '[:lower:]')"
workspace_name="$issue_slug"
workspace_dir="$PARENT_DIR/${REPO_BASENAME}-${issue_slug}"
bookmark_name="feature/$issue_slug"
prompt="Work on ${issue_key}: ${issue_title}. You are in the dedicated jj workspace for this issue. First inspect the relevant code, identify the main components involved, and propose a short plan before editing."
if [[ -n "$PROMPT_SUFFIX" ]]; then
prompt+=" ${PROMPT_SUFFIX}"
fi
if workspace_exists "$workspace_name" || [[ -e "$workspace_dir" ]]; then
if [[ "$RESET_EXISTING" -eq 1 ]]; then
log "resetting existing workspace $workspace_name"
if workspace_exists "$workspace_name"; then
run jj -R "$REPO" workspace forget "$workspace_name"
fi
run rm -rf "$workspace_dir"
elif [[ "$KEEP_EXISTING" -eq 1 ]]; then
log "keeping existing workspace $workspace_name at $workspace_dir"
else
echo "error: workspace already exists: $workspace_name ($workspace_dir). Use --keep-existing or --reset-existing." >&2
exit 1
fi
fi
if ! workspace_exists "$workspace_name"; then
log "creating workspace $workspace_name at $workspace_dir"
run jj -R "$REPO" workspace add --name "$workspace_name" -r "$BASE" "$workspace_dir"
fi
if [[ "$CREATE_BOOKMARKS" -eq 1 ]]; then
log "ensuring bookmark $bookmark_name exists"
if bookmark_exists "$workspace_dir" "$bookmark_name"; then
run jj -R "$workspace_dir" bookmark set "$bookmark_name" -r @
else
run jj -R "$workspace_dir" bookmark create "$bookmark_name"
fi
fi
if [[ "$OPEN_PI" -eq 1 ]]; then
log "opening zellij tab $workspace_name in session $SESSION"
run launch_pi_tab "$SESSION" "$workspace_name" "$workspace_dir" "$prompt"
fi
created_workspaces+=("$workspace_name:$workspace_dir:$bookmark_name")
done
printf '\nCreated/updated workspaces:\n'
for item in "${created_workspaces[@]}"; do
IFS=':' read -r workspace_name workspace_dir bookmark_name <<<"$item"
printf ' - %s -> %s' "$workspace_name" "$workspace_dir"
if [[ "$CREATE_BOOKMARKS" -eq 1 ]]; then
printf ' [%s]' "$bookmark_name"
fi
printf '\n'
done
if [[ "$OPEN_PI" -eq 1 ]]; then
printf '\nZellij session: %s\n' "$SESSION"
fi
+102
View File
@@ -0,0 +1,102 @@
#!/usr/bin/env bash
set -euo pipefail
# Cleans up zellij sessions that are inactive:
# - sessions marked EXITED (resurrectable metadata)
# - running sessions with 0 attached clients
#
# Usage:
# cleanup-zellij-inactive.sh # delete inactive sessions
# cleanup-zellij-inactive.sh --dry-run # show what would be deleted
DRY_RUN=0
case "${1-}" in
"" ) ;;
-n|--dry-run) DRY_RUN=1 ;;
-h|--help)
cat <<'EOF'
cleanup-zellij-inactive.sh
Delete zellij sessions that are inactive:
- EXITED sessions are deleted
- running sessions with 0 attached clients are killed+deleted
Options:
-n, --dry-run Show what would be deleted
-h, --help Show this help
EOF
exit 0
;;
*)
echo "Unknown option: $1" >&2
echo "Use --help for usage" >&2
exit 1
;;
esac
if ! command -v zellij >/dev/null 2>&1; then
echo "zellij not found in PATH" >&2
exit 1
fi
mapfile -t session_lines < <(zellij list-sessions --no-formatting 2>/dev/null || true)
if [ "${#session_lines[@]}" -eq 0 ]; then
echo "No zellij sessions found"
exit 0
fi
deleted=0
failed=0
kept=0
for line in "${session_lines[@]}"; do
[ -z "$line" ] && continue
name="${line%% *}"
is_exited=0
if [[ "$line" == *"EXITED"* ]]; then
is_exited=1
fi
should_delete=0
if [ "$is_exited" -eq 1 ]; then
should_delete=1
else
# Running session: check attached clients
clients_out="$(zellij --session "$name" action list-clients 2>/dev/null || true)"
client_count="$(printf '%s\n' "$clients_out" | tail -n +2 | sed '/^\s*$/d' | wc -l | tr -d ' ')"
if [ "$client_count" -eq 0 ]; then
should_delete=1
fi
fi
if [ "$should_delete" -eq 1 ]; then
if [ "$DRY_RUN" -eq 1 ]; then
echo "[dry-run] delete: $name"
deleted=$((deleted + 1))
else
# --force also kills running sessions before deleting
if zellij delete-session --force "$name" >/dev/null 2>&1; then
echo "deleted: $name"
deleted=$((deleted + 1))
else
echo "failed: $name" >&2
failed=$((failed + 1))
fi
fi
else
kept=$((kept + 1))
fi
done
echo
if [ "$DRY_RUN" -eq 1 ]; then
echo "Would delete: $deleted"
else
echo "Deleted: $deleted"
echo "Failed: $failed"
fi
echo "Kept: $kept"
+52
View File
@@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -euo pipefail
# Replace the current zellij tab by opening a layout in a new tab
# and closing the original tab.
#
# Usage:
# zellij-replace-tab-layout.sh # uses "dev"
# zellij-replace-tab-layout.sh dev
# zellij-replace-tab-layout.sh my-layout
layout="${1:-dev}"
case "${layout}" in
-h|--help)
cat <<'EOF'
zellij-replace-tab-layout.sh
Replace the current zellij tab with a new tab created from a layout.
This avoids `zellij action override-layout` glitches.
Usage:
zellij-replace-tab-layout.sh [layout]
Examples:
zellij-replace-tab-layout.sh
zellij-replace-tab-layout.sh dev
zellij-replace-tab-layout.sh dotfiles
EOF
exit 0
;;
esac
if ! command -v zellij >/dev/null 2>&1; then
echo "zellij not found in PATH" >&2
exit 1
fi
if [ -z "${ZELLIJ:-}" ]; then
echo "Not inside a zellij session (ZELLIJ is not set)" >&2
exit 1
fi
current_tab_id="$(zellij action current-tab-info | awk '/^id:/ { print $2 }')"
if [ -z "$current_tab_id" ]; then
echo "Failed to detect current tab id" >&2
exit 1
fi
zellij action new-tab --layout "$layout" >/dev/null
zellij action close-tab --tab-id "$current_tab_id"
+1 -1
View File
@@ -328,7 +328,7 @@ default_layout "compact"
// The folder in which Zellij will look for themes
// (Requires restart)
//
// theme_dir "/tmp"
// theme_dir "/home/thomasgl/.config/zellij/themes"
// Toggle enabling the mouse mode.
// On certain configurations, or terminals this could