Compare commits

..

12 Commits

Author SHA1 Message Date
thomas 62337a078c stop squashing 2026-04-21 09:16:30 +01:00
thomas 8e810418a5 remove notify 2026-04-20 16:09:45 +01:00
thomas a2d283a5c5 a bunch of changes 2026-04-20 13:55:11 +01:00
thomas ca0708a8ee fuck squashing 2026-04-17 12:12:36 +01:00
thomas 96060c899d remvoe cursor 2026-04-16 17:09:18 +01:00
thomas dc0e75eb46 stuff 2026-04-16 16:30:12 +01:00
thomas f3d9d42745 fix sudo 2026-04-16 16:00:09 +01:00
thomas 191cfbf182 resurrect claude 2026-04-16 15:49:19 +01:00
thomas 0bfdbd350e cursor stuff 2026-04-16 15:34:00 +01:00
thomas 9a7669af28 fix 2026-04-16 11:58:48 +01:00
thomas 534ec8b99f cursor extension 2026-04-16 11:55:57 +01:00
thomas c004356b5a zellij cleanup + small files 2026-04-16 09:48:44 +01:00
18 changed files with 974 additions and 641 deletions
+12 -2
View File
@@ -102,9 +102,19 @@ status is-interactive; and begin
end
# Add user local bin to PATH
# PATH ordering on Linux: keep privileged wrapper binaries first (sudo, etc.)
if test (uname) = Linux
fish_add_path -m /run/wrappers/bin
fish_add_path -a -m /run/current-system/sw/bin
end
# Add user local bin to PATH, but keep it after system paths on Linux
if test -d "$HOME/.local/bin"
fish_add_path "$HOME/.local/bin"
if test (uname) = Linux
fish_add_path -a -m "$HOME/.local/bin"
else
fish_add_path "$HOME/.local/bin"
end
end
# pnpm
+3
View File
@@ -4,3 +4,6 @@ email = "thomasgl@pm.me"
[git]
write-change-id-header = true
[snapshot]
auto-update-stale = true
+1
View File
@@ -1,5 +1,6 @@
return {
"HotThoughts/jjui.nvim",
enabled = false,
cmd = {
"JJUI",
"JJUICurrentFile",
+8 -25
View File
@@ -218,12 +218,12 @@ return {
},
-- git
{
"<leader>gcb",
"<leader>jc",
function()
local cwd = vim.fn.getcwd()
-- Helper to run git commands and capture both stdout and stderr
local function git_cmd(cmd)
-- Helper to run commands and capture both stdout and stderr
local function run_cmd(cmd)
local full_cmd = "cd " .. vim.fn.shellescape(cwd) .. " && " .. cmd .. " 2>&1"
local handle = io.popen(full_cmd)
local result = handle and handle:read("*a") or ""
@@ -234,7 +234,7 @@ return {
end
-- Check if in a git repo
local git_dir = git_cmd("git rev-parse --git-dir")
local git_dir = run_cmd("git rev-parse --git-dir")
if git_dir == "" or git_dir:match("^fatal") then
vim.notify("Not in a git repository", vim.log.levels.WARN)
return
@@ -242,7 +242,7 @@ return {
-- Get the default branch
local function branch_exists(branch)
local result = git_cmd("git rev-parse --verify refs/remotes/origin/" .. branch)
local result = run_cmd("git rev-parse --verify refs/remotes/origin/" .. branch)
-- If branch exists, rev-parse returns a hash; if not, it returns fatal error
return not result:match("^fatal")
end
@@ -259,19 +259,9 @@ return {
return
end
-- Get current branch
local current_branch = git_cmd("git branch --show-current")
if current_branch == "" then
current_branch = "HEAD"
end
local compare_target = "origin/" .. default_branch
-- Get files that differ from origin/main (includes committed + uncommitted changes)
local result = git_cmd("git diff --name-only " .. compare_target)
-- Also get untracked files
local untracked = git_cmd("git ls-files --others --exclude-standard")
local result = run_cmd("jj diff --from " .. default_branch .. "@origin --to @ --summary | awk '{print $2}'")
-- Combine results
local all_files = {}
@@ -284,20 +274,13 @@ return {
end
end
for line in untracked:gmatch("[^\r\n]+") do
if line ~= "" and not seen[line] then
seen[line] = true
table.insert(all_files, { text = line .. " [untracked]", file = line })
end
end
if #all_files == 0 then
vim.notify("No modified files (vs " .. compare_target .. ")", vim.log.levels.INFO)
vim.notify("No modified files", vim.log.levels.INFO)
return
end
Snacks.picker({
title = "Modified Files (vs " .. compare_target .. ")",
title = "Modified Files",
items = all_files,
layout = { preset = "default" },
confirm = function(picker, item)
+1 -1
View File
@@ -1,7 +1,7 @@
{
"activePack": "solid_snake",
"volume": 1.5,
"muted": true,
"muted": false,
"enabledCategories": {
"session.start": true,
"task.acknowledge": true,
+3 -3
View File
@@ -1,15 +1,15 @@
{
"lastChangelogVersion": "0.66.1",
"lastChangelogVersion": "0.67.3",
"defaultProvider": "openai-codex",
"defaultModel": "gpt-5.3-codex",
"defaultThinkingLevel": "high",
"defaultThinkingLevel": "medium",
"theme": "matugen",
"lsp": {
"hookMode": "edit_write"
},
"hideThinkingBlock": false,
"slowtool": {
"timeoutSeconds": 120,
"timeoutSeconds": 300,
"enabled": true
}
}
+3 -3
View File
@@ -1,7 +1,7 @@
{
"lastChangelogVersion": "0.66.1",
"defaultProvider": "openai-codex",
"defaultModel": "gpt-5.4",
"lastChangelogVersion": "0.67.68",
"defaultProvider": "anthropic",
"defaultModel": "claude-opus-4-7",
"defaultThinkingLevel": "medium",
"theme": "matugen",
"lsp": {
+5 -1
View File
@@ -1,3 +1,7 @@
# Tool usage
FUCKING ALWAYS use timeout on tool usage because sometimes you're stupid, and hang on things because you assume its non interactive. And by that I don't mean appending `timeout` to bash or something, but you have a way to add a timeout to tool calling somehow. I don't know the inner workings of the harness.
# Validations
Sometimes some repositories (stupidly) ask you to run validations after changes or some shit. Thing is, you're smart. Your edit tools already contain formatting and LSP hooks. So, you may ask the user if they want you to run said "required" validations, but they're not really required.
@@ -14,7 +18,7 @@ When the user provides a screenshot path (e.g., `/tmp/pi-clipboard-xxx.png`), **
**Prefer jj (Jujutsu) over git.** If a project has a colocated jj repo (`.jj` directory), use `jj` commands for all version control operations — rebasing, branching, log, etc. Only fall back to git when jj doesn't support something or the project isn't set up for it.
After pushing changes, always run `jj new` to start a fresh working copy commit.
After pushing changes, always run `jj new` to start a fresh working copy commit. Don't squash unnecessarily! seriously don't squash all the time.
# Git commits and PRs
@@ -1 +0,0 @@
node_modules/
@@ -1,604 +0,0 @@
/**
* Custom Provider Example
*
* Demonstrates registering a custom provider with:
* - Custom API identifier ("custom-anthropic-api")
* - Custom streamSimple implementation
* - OAuth support for /login
* - API key support via environment variable
* - Two model definitions
*
* Usage:
* # First install dependencies
* cd packages/coding-agent/examples/extensions/custom-provider && npm install
*
* # With OAuth (run /login custom-anthropic first)
* pi -e ./packages/coding-agent/examples/extensions/custom-provider
*
* # With API key
* CUSTOM_ANTHROPIC_API_KEY=sk-ant-... pi -e ./packages/coding-agent/examples/extensions/custom-provider
*
* Then use /model to select custom-anthropic/claude-sonnet-4-5
*/
import Anthropic from "@anthropic-ai/sdk";
import type { ContentBlockParam, MessageCreateParamsStreaming } from "@anthropic-ai/sdk/resources/messages.js";
import {
type Api,
type AssistantMessage,
type AssistantMessageEventStream,
type Context,
calculateCost,
createAssistantMessageEventStream,
type ImageContent,
type Message,
type Model,
type OAuthCredentials,
type OAuthLoginCallbacks,
type SimpleStreamOptions,
type StopReason,
type TextContent,
type ThinkingContent,
type Tool,
type ToolCall,
type ToolResultMessage,
} from "@mariozechner/pi-ai";
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
// =============================================================================
// OAuth Implementation (copied from packages/ai/src/utils/oauth/anthropic.ts)
// =============================================================================
const decode = (s: string) => atob(s);
const CLIENT_ID = decode("OWQxYzI1MGEtZTYxYi00NGQ5LTg4ZWQtNTk0NGQxOTYyZjVl");
const AUTHORIZE_URL = "https://claude.ai/oauth/authorize";
const TOKEN_URL = "https://console.anthropic.com/v1/oauth/token";
const REDIRECT_URI = "https://console.anthropic.com/oauth/code/callback";
const SCOPES = "org:create_api_key user:profile user:inference";
async function generatePKCE(): Promise<{ verifier: string; challenge: string }> {
const array = new Uint8Array(32);
crypto.getRandomValues(array);
const verifier = btoa(String.fromCharCode(...array))
.replace(/\+/g, "-")
.replace(/\//g, "_")
.replace(/=+$/, "");
const encoder = new TextEncoder();
const data = encoder.encode(verifier);
const hash = await crypto.subtle.digest("SHA-256", data);
const challenge = btoa(String.fromCharCode(...new Uint8Array(hash)))
.replace(/\+/g, "-")
.replace(/\//g, "_")
.replace(/=+$/, "");
return { verifier, challenge };
}
async function loginAnthropic(callbacks: OAuthLoginCallbacks): Promise<OAuthCredentials> {
const { verifier, challenge } = await generatePKCE();
const authParams = new URLSearchParams({
code: "true",
client_id: CLIENT_ID,
response_type: "code",
redirect_uri: REDIRECT_URI,
scope: SCOPES,
code_challenge: challenge,
code_challenge_method: "S256",
state: verifier,
});
callbacks.onAuth({ url: `${AUTHORIZE_URL}?${authParams.toString()}` });
const authCode = await callbacks.onPrompt({ message: "Paste the authorization code:" });
const [code, state] = authCode.split("#");
const tokenResponse = await fetch(TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
grant_type: "authorization_code",
client_id: CLIENT_ID,
code,
state,
redirect_uri: REDIRECT_URI,
code_verifier: verifier,
}),
});
if (!tokenResponse.ok) {
throw new Error(`Token exchange failed: ${await tokenResponse.text()}`);
}
const data = (await tokenResponse.json()) as {
access_token: string;
refresh_token: string;
expires_in: number;
};
return {
refresh: data.refresh_token,
access: data.access_token,
expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000,
};
}
async function refreshAnthropicToken(credentials: OAuthCredentials): Promise<OAuthCredentials> {
const response = await fetch(TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
grant_type: "refresh_token",
client_id: CLIENT_ID,
refresh_token: credentials.refresh,
}),
});
if (!response.ok) {
throw new Error(`Token refresh failed: ${await response.text()}`);
}
const data = (await response.json()) as {
access_token: string;
refresh_token: string;
expires_in: number;
};
return {
refresh: data.refresh_token,
access: data.access_token,
expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000,
};
}
// =============================================================================
// Streaming Implementation (simplified from packages/ai/src/providers/anthropic.ts)
// =============================================================================
// Claude Code tool names for OAuth stealth mode
const claudeCodeTools = [
"Read",
"Write",
"Edit",
"Bash",
"Grep",
"Glob",
"AskUserQuestion",
"TodoWrite",
"WebFetch",
"WebSearch",
];
const ccToolLookup = new Map(claudeCodeTools.map((t) => [t.toLowerCase(), t]));
const toClaudeCodeName = (name: string) => ccToolLookup.get(name.toLowerCase()) ?? name;
const fromClaudeCodeName = (name: string, tools?: Tool[]) => {
const lowerName = name.toLowerCase();
const matched = tools?.find((t) => t.name.toLowerCase() === lowerName);
return matched?.name ?? name;
};
function isOAuthToken(apiKey: string): boolean {
return apiKey.includes("sk-ant-oat");
}
function sanitizeSurrogates(text: string): string {
return text.replace(/[\uD800-\uDFFF]/g, "\uFFFD");
}
function convertContentBlocks(
content: (TextContent | ImageContent)[],
): string | Array<{ type: "text"; text: string } | { type: "image"; source: any }> {
const hasImages = content.some((c) => c.type === "image");
if (!hasImages) {
return sanitizeSurrogates(content.map((c) => (c as TextContent).text).join("\n"));
}
const blocks = content.map((block) => {
if (block.type === "text") {
return { type: "text" as const, text: sanitizeSurrogates(block.text) };
}
return {
type: "image" as const,
source: {
type: "base64" as const,
media_type: block.mimeType,
data: block.data,
},
};
});
if (!blocks.some((b) => b.type === "text")) {
blocks.unshift({ type: "text" as const, text: "(see attached image)" });
}
return blocks;
}
function convertMessages(messages: Message[], isOAuth: boolean, _tools?: Tool[]): any[] {
const params: any[] = [];
for (let i = 0; i < messages.length; i++) {
const msg = messages[i];
if (msg.role === "user") {
if (typeof msg.content === "string") {
if (msg.content.trim()) {
params.push({ role: "user", content: sanitizeSurrogates(msg.content) });
}
} else {
const blocks: ContentBlockParam[] = msg.content.map((item) =>
item.type === "text"
? { type: "text" as const, text: sanitizeSurrogates(item.text) }
: {
type: "image" as const,
source: { type: "base64" as const, media_type: item.mimeType as any, data: item.data },
},
);
if (blocks.length > 0) {
params.push({ role: "user", content: blocks });
}
}
} else if (msg.role === "assistant") {
const blocks: ContentBlockParam[] = [];
for (const block of msg.content) {
if (block.type === "text" && block.text.trim()) {
blocks.push({ type: "text", text: sanitizeSurrogates(block.text) });
} else if (block.type === "thinking" && block.thinking.trim()) {
if ((block as ThinkingContent).thinkingSignature) {
blocks.push({
type: "thinking" as any,
thinking: sanitizeSurrogates(block.thinking),
signature: (block as ThinkingContent).thinkingSignature!,
});
} else {
blocks.push({ type: "text", text: sanitizeSurrogates(block.thinking) });
}
} else if (block.type === "toolCall") {
blocks.push({
type: "tool_use",
id: block.id,
name: isOAuth ? toClaudeCodeName(block.name) : block.name,
input: block.arguments,
});
}
}
if (blocks.length > 0) {
params.push({ role: "assistant", content: blocks });
}
} else if (msg.role === "toolResult") {
const toolResults: any[] = [];
toolResults.push({
type: "tool_result",
tool_use_id: msg.toolCallId,
content: convertContentBlocks(msg.content),
is_error: msg.isError,
});
let j = i + 1;
while (j < messages.length && messages[j].role === "toolResult") {
const nextMsg = messages[j] as ToolResultMessage;
toolResults.push({
type: "tool_result",
tool_use_id: nextMsg.toolCallId,
content: convertContentBlocks(nextMsg.content),
is_error: nextMsg.isError,
});
j++;
}
i = j - 1;
params.push({ role: "user", content: toolResults });
}
}
// Add cache control to last user message
if (params.length > 0) {
const last = params[params.length - 1];
if (last.role === "user" && Array.isArray(last.content)) {
const lastBlock = last.content[last.content.length - 1];
if (lastBlock) {
lastBlock.cache_control = { type: "ephemeral" };
}
}
}
return params;
}
function convertTools(tools: Tool[], isOAuth: boolean): any[] {
return tools.map((tool) => ({
name: isOAuth ? toClaudeCodeName(tool.name) : tool.name,
description: tool.description,
input_schema: {
type: "object",
properties: (tool.parameters as any).properties || {},
required: (tool.parameters as any).required || [],
},
}));
}
function mapStopReason(reason: string): StopReason {
switch (reason) {
case "end_turn":
case "pause_turn":
case "stop_sequence":
return "stop";
case "max_tokens":
return "length";
case "tool_use":
return "toolUse";
default:
return "error";
}
}
function streamCustomAnthropic(
model: Model<Api>,
context: Context,
options?: SimpleStreamOptions,
): AssistantMessageEventStream {
const stream = createAssistantMessageEventStream();
(async () => {
const output: AssistantMessage = {
role: "assistant",
content: [],
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
try {
const apiKey = options?.apiKey ?? "";
const isOAuth = isOAuthToken(apiKey);
// Configure client based on auth type
const betaFeatures = ["fine-grained-tool-streaming-2025-05-14", "interleaved-thinking-2025-05-14"];
const clientOptions: any = {
baseURL: model.baseUrl,
dangerouslyAllowBrowser: true,
};
if (isOAuth) {
clientOptions.apiKey = null;
clientOptions.authToken = apiKey;
clientOptions.defaultHeaders = {
accept: "application/json",
"anthropic-dangerous-direct-browser-access": "true",
"anthropic-beta": `claude-code-20250219,oauth-2025-04-20,${betaFeatures.join(",")}`,
"user-agent": "claude-cli/2.1.2 (external, cli)",
"x-app": "cli",
};
} else {
clientOptions.apiKey = apiKey;
clientOptions.defaultHeaders = {
accept: "application/json",
"anthropic-dangerous-direct-browser-access": "true",
"anthropic-beta": betaFeatures.join(","),
};
}
const client = new Anthropic(clientOptions);
// Build request params
const params: MessageCreateParamsStreaming = {
model: model.id,
messages: convertMessages(context.messages, isOAuth, context.tools),
max_tokens: options?.maxTokens || Math.floor(model.maxTokens / 3),
stream: true,
};
// System prompt with Claude Code identity for OAuth
if (isOAuth) {
params.system = [
{
type: "text",
text: "You are Claude Code, Anthropic's official CLI for Claude.",
cache_control: { type: "ephemeral" },
},
];
if (context.systemPrompt) {
params.system.push({
type: "text",
text: sanitizeSurrogates(context.systemPrompt),
cache_control: { type: "ephemeral" },
});
}
} else if (context.systemPrompt) {
params.system = [
{
type: "text",
text: sanitizeSurrogates(context.systemPrompt),
cache_control: { type: "ephemeral" },
},
];
}
if (context.tools) {
params.tools = convertTools(context.tools, isOAuth);
}
// Handle thinking/reasoning
if (options?.reasoning && model.reasoning) {
const defaultBudgets: Record<string, number> = {
minimal: 1024,
low: 4096,
medium: 10240,
high: 20480,
};
const customBudget = options.thinkingBudgets?.[options.reasoning as keyof typeof options.thinkingBudgets];
params.thinking = {
type: "enabled",
budget_tokens: customBudget ?? defaultBudgets[options.reasoning] ?? 10240,
};
}
const anthropicStream = client.messages.stream({ ...params }, { signal: options?.signal });
stream.push({ type: "start", partial: output });
type Block = (ThinkingContent | TextContent | (ToolCall & { partialJson: string })) & { index: number };
const blocks = output.content as Block[];
for await (const event of anthropicStream) {
if (event.type === "message_start") {
output.usage.input = event.message.usage.input_tokens || 0;
output.usage.output = event.message.usage.output_tokens || 0;
output.usage.cacheRead = (event.message.usage as any).cache_read_input_tokens || 0;
output.usage.cacheWrite = (event.message.usage as any).cache_creation_input_tokens || 0;
output.usage.totalTokens =
output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
calculateCost(model, output.usage);
} else if (event.type === "content_block_start") {
if (event.content_block.type === "text") {
output.content.push({ type: "text", text: "", index: event.index } as any);
stream.push({ type: "text_start", contentIndex: output.content.length - 1, partial: output });
} else if (event.content_block.type === "thinking") {
output.content.push({
type: "thinking",
thinking: "",
thinkingSignature: "",
index: event.index,
} as any);
stream.push({ type: "thinking_start", contentIndex: output.content.length - 1, partial: output });
} else if (event.content_block.type === "tool_use") {
output.content.push({
type: "toolCall",
id: event.content_block.id,
name: isOAuth
? fromClaudeCodeName(event.content_block.name, context.tools)
: event.content_block.name,
arguments: {},
partialJson: "",
index: event.index,
} as any);
stream.push({ type: "toolcall_start", contentIndex: output.content.length - 1, partial: output });
}
} else if (event.type === "content_block_delta") {
const index = blocks.findIndex((b) => b.index === event.index);
const block = blocks[index];
if (!block) continue;
if (event.delta.type === "text_delta" && block.type === "text") {
block.text += event.delta.text;
stream.push({ type: "text_delta", contentIndex: index, delta: event.delta.text, partial: output });
} else if (event.delta.type === "thinking_delta" && block.type === "thinking") {
block.thinking += event.delta.thinking;
stream.push({
type: "thinking_delta",
contentIndex: index,
delta: event.delta.thinking,
partial: output,
});
} else if (event.delta.type === "input_json_delta" && block.type === "toolCall") {
(block as any).partialJson += event.delta.partial_json;
try {
block.arguments = JSON.parse((block as any).partialJson);
} catch {}
stream.push({
type: "toolcall_delta",
contentIndex: index,
delta: event.delta.partial_json,
partial: output,
});
} else if (event.delta.type === "signature_delta" && block.type === "thinking") {
block.thinkingSignature = (block.thinkingSignature || "") + (event.delta as any).signature;
}
} else if (event.type === "content_block_stop") {
const index = blocks.findIndex((b) => b.index === event.index);
const block = blocks[index];
if (!block) continue;
delete (block as any).index;
if (block.type === "text") {
stream.push({ type: "text_end", contentIndex: index, content: block.text, partial: output });
} else if (block.type === "thinking") {
stream.push({ type: "thinking_end", contentIndex: index, content: block.thinking, partial: output });
} else if (block.type === "toolCall") {
try {
block.arguments = JSON.parse((block as any).partialJson);
} catch {}
delete (block as any).partialJson;
stream.push({ type: "toolcall_end", contentIndex: index, toolCall: block, partial: output });
}
} else if (event.type === "message_delta") {
if ((event.delta as any).stop_reason) {
output.stopReason = mapStopReason((event.delta as any).stop_reason);
}
output.usage.input = (event.usage as any).input_tokens || 0;
output.usage.output = (event.usage as any).output_tokens || 0;
output.usage.cacheRead = (event.usage as any).cache_read_input_tokens || 0;
output.usage.cacheWrite = (event.usage as any).cache_creation_input_tokens || 0;
output.usage.totalTokens =
output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
calculateCost(model, output.usage);
}
}
if (options?.signal?.aborted) {
throw new Error("Request was aborted");
}
stream.push({ type: "done", reason: output.stopReason as "stop" | "length" | "toolUse", message: output });
stream.end();
} catch (error) {
for (const block of output.content) delete (block as any).index;
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
stream.push({ type: "error", reason: output.stopReason, error: output });
stream.end();
}
})();
return stream;
}
// =============================================================================
// Extension Entry Point
// =============================================================================
export default function (pi: ExtensionAPI) {
pi.registerProvider("custom-anthropic", {
baseUrl: "https://api.anthropic.com",
apiKey: "CUSTOM_ANTHROPIC_API_KEY",
api: "custom-anthropic-api",
models: [
{
id: "claude-opus-4-5",
name: "Claude Opus 4.5 (Custom)",
reasoning: true,
input: ["text", "image"],
cost: { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
contextWindow: 200000,
maxTokens: 64000,
},
{
id: "claude-sonnet-4-5",
name: "Claude Sonnet 4.5 (Custom)",
reasoning: true,
input: ["text", "image"],
cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
contextWindow: 200000,
maxTokens: 64000,
},
],
oauth: {
name: "Custom Anthropic (Claude Pro/Max)",
login: loginAnthropic,
refreshToken: refreshAnthropicToken,
getApiKey: (cred) => cred.access,
},
streamSimple: streamCustomAnthropic,
});
}
+1
View File
@@ -4,6 +4,7 @@
"type": "module",
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0",
"@mariozechner/jiti": "^2.6.5",
"@mozilla/readability": "^0.5.0",
"@sinclair/typebox": "^0.34.0",
"linkedom": "^0.16.0",
@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2026 Ben Vargas
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@@ -0,0 +1,115 @@
# @benvargas/pi-claude-code-use
`pi-claude-code-use` keeps Pi's built-in `anthropic` provider intact and applies the smallest payload changes needed for Anthropic OAuth subscription use in Pi.
It does not register a new provider or replace Pi's Anthropic request transport. Pi core remains in charge of OAuth transport, headers, model definitions, and streaming.
## What It Changes
When Pi is using Anthropic OAuth, this extension intercepts outbound API requests via the `before_provider_request` hook and:
- **System prompt rewrite** -- rewrites a small set of Pi-identifying prompt phrases in system prompt text:
- `pi itself``the cli itself`
- `pi .md files``cli .md files`
- `pi packages``cli packages`
Preserves Pi's original `system[]` structure, `cache_control` metadata, and non-text blocks.
- **Tool filtering** -- passes through core Claude Code tools, Anthropic-native typed tools (e.g. `web_search`), and any tool prefixed with `mcp__`. Unknown flat-named tools are filtered out.
- **Companion tool remapping** -- renames known companion extension tools from their flat names to MCP-style aliases (e.g. `web_search_exa` becomes `mcp__exa__web_search`). Duplicate flat entries are removed after remapping.
- **tool_choice remapping** -- if `tool_choice` references a flat companion name that was remapped, the reference is updated to the MCP alias. If it references a tool that was filtered out, `tool_choice` is removed from the payload.
- **Message history rewriting** -- `tool_use` blocks in conversation history that reference flat companion names are rewritten to their MCP aliases so the model sees consistent tool names across the conversation.
- **Companion alias registration** -- at session start and before each agent turn, discovers loaded companion extensions, captures their tool definitions via a jiti-based shim, and registers MCP-alias copies so the model can invoke them under Claude Code-compatible names.
- **Alias activation tracking** -- auto-activates MCP aliases when their flat counterpart is active under Anthropic OAuth. Tracks provenance (auto-managed vs user-selected) so that disabling OAuth only removes auto-activated aliases, preserving any the user explicitly enabled.
Non-OAuth Anthropic usage and non-Anthropic providers are left completely unchanged.
## Install
```bash
pi install npm:@benvargas/pi-claude-code-use
```
Or load it directly without installing:
```bash
pi -e /path/to/pi-packages/packages/pi-claude-code-use/extensions/index.ts
```
## Usage
Install the package and continue using the normal `anthropic` provider with Anthropic OAuth login:
```bash
/login anthropic
/model anthropic/claude-opus-4-6
```
No extra configuration is required.
## Environment Variables
| Variable | Description |
|---|---|
| `PI_CLAUDE_CODE_USE_DEBUG_LOG` | Set to a file path to enable debug logging. Writes two JSON entries per Anthropic OAuth request: one with `"stage": "before"` (the original payload from Pi) and one with `"stage": "after"` (the transformed payload sent to Anthropic). |
| `PI_CLAUDE_CODE_USE_DISABLE_TOOL_FILTER` | Set to `1` to disable tool filtering. System prompt rewriting still applies, but all tools pass through unchanged. Useful for debugging whether a tool-filtering issue is causing a problem. |
Example:
```bash
PI_CLAUDE_CODE_USE_DEBUG_LOG=/tmp/pi-claude-debug.log pi -e /path/to/extensions/index.ts --model anthropic/claude-sonnet-4-20250514
```
## Companion Tool Aliases
When these companion extensions from this monorepo are loaded alongside `pi-claude-code-use`, MCP aliases are automatically registered and remapped:
| Flat name | MCP alias |
|---|---|
| `web_search_exa` | `mcp__exa__web_search` |
| `get_code_context_exa` | `mcp__exa__get_code_context` |
| `firecrawl_scrape` | `mcp__firecrawl__scrape` |
| `firecrawl_map` | `mcp__firecrawl__map` |
| `firecrawl_search` | `mcp__firecrawl__search` |
| `generate_image` | `mcp__antigravity__generate_image` |
| `image_quota` | `mcp__antigravity__image_quota` |
### How companion discovery works
The extension identifies companion tools by matching `sourceInfo` metadata that Pi attaches to each registered tool:
1. **baseDir match** -- if the tool's `sourceInfo.baseDir` directory name matches the companion's directory name (e.g. `pi-exa-mcp`).
2. **Path match** -- if the tool's `sourceInfo.path` contains the companion's scoped package name (e.g. `@benvargas/pi-exa-mcp`) or directory name as a path segment. This handles npm installs, git clones, and monorepo layouts where `baseDir` points to the repo root rather than the individual package.
Once a companion tool is identified, its extension factory is loaded via jiti into a capture shim to obtain the full tool definition, which is then re-registered under the MCP alias name.
## Core Tools Allowlist
The following tool names always pass through filtering (case-insensitive). This list mirrors Pi core's `claudeCodeTools` in `packages/ai/src/providers/anthropic.ts`:
`Read`, `Write`, `Edit`, `Bash`, `Grep`, `Glob`, `AskUserQuestion`, `EnterPlanMode`, `ExitPlanMode`, `KillShell`, `NotebookEdit`, `Skill`, `Task`, `TaskOutput`, `TodoWrite`, `WebFetch`, `WebSearch`
Additionally, any tool with a `type` field (Anthropic-native tools like `web_search`) and any tool prefixed with `mcp__` always passes through.
## Guidance For Extension Authors
Anthropic's OAuth subscription path appears to fingerprint tool names. Flat extension tool names such as `web_search_exa` were rejected in live testing, while MCP-style names such as `mcp__exa__web_search` were accepted.
If you want a custom tool to survive Anthropic OAuth filtering cleanly, prefer registering it directly under an MCP-style name:
```text
mcp__<server>__<tool>
```
Examples:
- `mcp__exa__web_search`
- `mcp__firecrawl__scrape`
- `mcp__mytools__lookup_customer`
If an extension keeps a flat legacy name for non-Anthropic use, it can also register an MCP-style alias alongside it. `pi-claude-code-use` already does this centrally for the known companion tools in this repo, but unknown non-MCP tool names will still be filtered out on Anthropic OAuth requests.
## Notes
- The extension activates for all Anthropic OAuth requests regardless of model, rather than using a fixed model allowlist.
- Non-OAuth Anthropic usage (API key auth) is left unchanged.
- In practice, unknown non-MCP extension tools were the remaining trigger for Anthropic's extra-usage classification, so this package keeps core tools, keeps MCP-style tools, auto-aliases the known companion tools above, and filters the rest.
- Pi may show its built-in OAuth subscription warning banner even when the request path works correctly. That banner is UI logic in Pi, not a signal that the upstream request is being billed as extra usage.
@@ -0,0 +1,641 @@
import { appendFileSync } from "node:fs";
import { basename, dirname } from "node:path";
import { createJiti } from "@mariozechner/jiti";
import * as piAiModule from "@mariozechner/pi-ai";
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
import * as piCodingAgentModule from "@mariozechner/pi-coding-agent";
import * as typeboxModule from "@sinclair/typebox";
// ============================================================================
// Types
// ============================================================================
interface CompanionSpec {
dirName: string;
packageName: string;
aliases: ReadonlyArray<readonly [flatName: string, mcpName: string]>;
}
type ToolRegistration = Parameters<ExtensionAPI["registerTool"]>[0];
type ToolInfo = ReturnType<ExtensionAPI["getAllTools"]>[number];
// ============================================================================
// Constants
// ============================================================================
/**
* Core Claude Code tool names that always pass through Anthropic OAuth filtering.
* Stored lowercase for case-insensitive matching.
* Mirrors Pi core's claudeCodeTools list in packages/ai/src/providers/anthropic.ts
*/
const CORE_TOOL_NAMES = new Set([
"read",
"write",
"edit",
"bash",
"grep",
"glob",
"askuserquestion",
"enterplanmode",
"exitplanmode",
"killshell",
"notebookedit",
"skill",
"task",
"taskoutput",
"todowrite",
"webfetch",
"websearch",
]);
/** Flat companion tool name → MCP-style alias. */
const FLAT_TO_MCP = new Map<string, string>([
["web_search_exa", "mcp__exa__web_search"],
["get_code_context_exa", "mcp__exa__get_code_context"],
["firecrawl_scrape", "mcp__firecrawl__scrape"],
["firecrawl_map", "mcp__firecrawl__map"],
["firecrawl_search", "mcp__firecrawl__search"],
["generate_image", "mcp__antigravity__generate_image"],
["image_quota", "mcp__antigravity__image_quota"],
]);
/** Known companion extensions and the tools they provide. */
const COMPANIONS: CompanionSpec[] = [
{
dirName: "pi-exa-mcp",
packageName: "@benvargas/pi-exa-mcp",
aliases: [
["web_search_exa", "mcp__exa__web_search"],
["get_code_context_exa", "mcp__exa__get_code_context"],
],
},
{
dirName: "pi-firecrawl",
packageName: "@benvargas/pi-firecrawl",
aliases: [
["firecrawl_scrape", "mcp__firecrawl__scrape"],
["firecrawl_map", "mcp__firecrawl__map"],
["firecrawl_search", "mcp__firecrawl__search"],
],
},
{
dirName: "pi-antigravity-image-gen",
packageName: "@benvargas/pi-antigravity-image-gen",
aliases: [
["generate_image", "mcp__antigravity__generate_image"],
["image_quota", "mcp__antigravity__image_quota"],
],
},
];
/** Reverse lookup: flat tool name → its companion spec. */
const TOOL_TO_COMPANION = new Map<string, CompanionSpec>(
COMPANIONS.flatMap((spec) => spec.aliases.map(([flat]) => [flat, spec] as const)),
);
// ============================================================================
// Helpers
// ============================================================================
function isPlainObject(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null && !Array.isArray(value);
}
function lower(name: string | undefined): string {
return (name ?? "").trim().toLowerCase();
}
// ============================================================================
// System prompt rewrite (PRD §1.1)
//
// Replace "pi itself" → "the cli itself" in system prompt text.
// Preserves cache_control, non-text blocks, and payload shape.
// ============================================================================
function rewritePromptText(text: string): string {
return text
.replaceAll("pi itself", "the cli itself")
.replaceAll("pi .md files", "cli .md files")
.replaceAll("pi packages", "cli packages");
}
function rewriteSystemField(system: unknown): unknown {
if (typeof system === "string") {
return rewritePromptText(system);
}
if (!Array.isArray(system)) {
return system;
}
return system.map((block) => {
if (!isPlainObject(block) || block.type !== "text" || typeof block.text !== "string") {
return block;
}
const rewritten = rewritePromptText(block.text);
return rewritten === block.text ? block : { ...block, text: rewritten };
});
}
// ============================================================================
// Tool filtering and MCP alias remapping (PRD §1.2)
//
// Rules applied per tool:
// 1. Anthropic-native typed tools (have a `type` field) → pass through
// 2. Core Claude Code tool names → pass through
// 3. Tools already prefixed with mcp__ → pass through
// 4. Known companion tools whose MCP alias is also advertised → rename to alias
// 5. Known companion tools without an advertised alias → filtered out
// 6. Unknown flat-named tools → filtered out (unless disableFilter)
// ============================================================================
function collectToolNames(tools: unknown[]): Set<string> {
const names = new Set<string>();
for (const tool of tools) {
if (isPlainObject(tool) && typeof tool.name === "string") {
names.add(lower(tool.name));
}
}
return names;
}
function filterAndRemapTools(tools: unknown[] | undefined, disableFilter: boolean): unknown[] | undefined {
if (!Array.isArray(tools)) return tools;
const advertised = collectToolNames(tools);
const emitted = new Set<string>();
const result: unknown[] = [];
for (const tool of tools) {
if (!isPlainObject(tool)) continue;
// Rule 1: native typed tools always pass through
if (typeof tool.type === "string" && tool.type.trim().length > 0) {
result.push(tool);
continue;
}
const name = typeof tool.name === "string" ? tool.name : "";
if (!name) continue;
const nameLc = lower(name);
// Rules 2 & 3: core tools and mcp__-prefixed pass through (with dedup)
if (CORE_TOOL_NAMES.has(nameLc) || nameLc.startsWith("mcp__")) {
if (!emitted.has(nameLc)) {
emitted.add(nameLc);
result.push(tool);
}
continue;
}
// Rules 4 & 5: known companion tool
const mcpAlias = FLAT_TO_MCP.get(nameLc);
if (mcpAlias) {
const aliasLc = lower(mcpAlias);
if (advertised.has(aliasLc) && !emitted.has(aliasLc)) {
// Alias exists in tool list → rename flat to alias, dedup
emitted.add(aliasLc);
result.push({ ...tool, name: mcpAlias });
} else if (disableFilter && !emitted.has(nameLc)) {
// Filter disabled: keep flat name if not yet emitted
emitted.add(nameLc);
result.push(tool);
}
continue;
}
// Rule 6: unknown flat-named tool
if (disableFilter && !emitted.has(nameLc)) {
emitted.add(nameLc);
result.push(tool);
}
}
return result;
}
function remapToolChoice(
toolChoice: Record<string, unknown>,
survivingNames: Map<string, string>,
): Record<string, unknown> | undefined {
if (toolChoice.type !== "tool" || typeof toolChoice.name !== "string") {
return toolChoice;
}
const nameLc = lower(toolChoice.name);
const actualName = survivingNames.get(nameLc);
if (actualName) {
return actualName === toolChoice.name ? toolChoice : { ...toolChoice, name: actualName };
}
const mcpAlias = FLAT_TO_MCP.get(nameLc);
if (mcpAlias && survivingNames.has(lower(mcpAlias))) {
return { ...toolChoice, name: mcpAlias };
}
return undefined;
}
function remapMessageToolNames(messages: unknown[], survivingNames: Map<string, string>): unknown[] {
let anyChanged = false;
const result = messages.map((msg) => {
if (!isPlainObject(msg) || !Array.isArray(msg.content)) return msg;
let msgChanged = false;
const content = (msg.content as unknown[]).map((block) => {
if (!isPlainObject(block) || block.type !== "tool_use" || typeof block.name !== "string") {
return block;
}
const mcpAlias = FLAT_TO_MCP.get(lower(block.name));
if (mcpAlias && survivingNames.has(lower(mcpAlias))) {
msgChanged = true;
return { ...block, name: mcpAlias };
}
return block;
});
if (msgChanged) {
anyChanged = true;
return { ...msg, content };
}
return msg;
});
return anyChanged ? result : messages;
}
// ============================================================================
// Full payload transform
// ============================================================================
function transformPayload(raw: Record<string, unknown>, disableFilter: boolean): Record<string, unknown> {
// Deep clone to avoid mutating the original
const payload = JSON.parse(JSON.stringify(raw)) as Record<string, unknown>;
// 1. System prompt rewrite (always applies)
if (payload.system !== undefined) {
payload.system = rewriteSystemField(payload.system);
}
// When escape hatch is active, skip all tool filtering/remapping
if (disableFilter) {
return payload;
}
// 2. Tool filtering and alias remapping
payload.tools = filterAndRemapTools(payload.tools as unknown[] | undefined, false);
// 3. Build map of tool names that survived filtering (lowercase → actual name)
const survivingNames = new Map<string, string>();
if (Array.isArray(payload.tools)) {
for (const tool of payload.tools) {
if (isPlainObject(tool) && typeof tool.name === "string") {
survivingNames.set(lower(tool.name), tool.name as string);
}
}
}
// 4. Remap tool_choice if it references a renamed or filtered tool
if (isPlainObject(payload.tool_choice)) {
const remapped = remapToolChoice(payload.tool_choice, survivingNames);
if (remapped === undefined) {
delete payload.tool_choice;
} else {
payload.tool_choice = remapped;
}
}
// 5. Rewrite historical tool_use blocks in message history
if (Array.isArray(payload.messages)) {
payload.messages = remapMessageToolNames(payload.messages, survivingNames);
}
return payload;
}
// ============================================================================
// Debug logging (PRD §1.4)
// ============================================================================
const debugLogPath = process.env.PI_CLAUDE_CODE_USE_DEBUG_LOG;
function writeDebugLog(payload: unknown): void {
if (!debugLogPath) return;
try {
appendFileSync(debugLogPath, `${new Date().toISOString()}\n${JSON.stringify(payload, null, 2)}\n---\n`, "utf-8");
} catch {
// Debug logging must never break actual requests
}
}
// ============================================================================
// Companion alias registration (PRD §1.3)
//
// Discovers loaded companion extensions, captures their tool definitions via
// a shim ExtensionAPI, and registers MCP-alias versions so the model can
// invoke them under Claude Code-compatible names.
// ============================================================================
const registeredMcpAliases = new Set<string>();
const autoActivatedAliases = new Set<string>();
let lastManagedToolList: string[] | undefined;
const captureCache = new Map<string, Promise<Map<string, ToolRegistration>>>();
let jitiLoader: { import(path: string, opts?: { default?: boolean }): Promise<unknown> } | undefined;
function getJitiLoader() {
if (!jitiLoader) {
jitiLoader = createJiti(import.meta.url, {
moduleCache: false,
tryNative: false,
virtualModules: {
"@mariozechner/pi-ai": piAiModule,
"@mariozechner/pi-coding-agent": piCodingAgentModule,
"@sinclair/typebox": typeboxModule,
},
});
}
return jitiLoader;
}
async function loadFactory(baseDir: string): Promise<((pi: ExtensionAPI) => void | Promise<void>) | undefined> {
const dir = baseDir.replace(/\/$/, "");
const candidates = [`${dir}/index.ts`, `${dir}/index.js`, `${dir}/extensions/index.ts`, `${dir}/extensions/index.js`];
const loader = getJitiLoader();
for (const path of candidates) {
try {
const mod = await loader.import(path, { default: true });
if (typeof mod === "function") return mod as (pi: ExtensionAPI) => void | Promise<void>;
} catch {
// Try next candidate
}
}
return undefined;
}
function isCompanionSource(tool: ToolInfo | undefined, spec: CompanionSpec): boolean {
if (!tool?.sourceInfo) return false;
const baseDir = tool.sourceInfo.baseDir;
if (baseDir) {
const dirName = basename(baseDir);
if (dirName === spec.dirName) return true;
if (dirName === "extensions" && basename(dirname(baseDir)) === spec.dirName) return true;
}
const fullPath = tool.sourceInfo.path;
if (typeof fullPath !== "string") return false;
// Normalize backslashes for Windows paths before segment-bounded check
const normalized = fullPath.replaceAll("\\", "/");
// Check for scoped package name (npm install) or directory name (git/monorepo)
return normalized.includes(`/${spec.packageName}/`) || normalized.includes(`/${spec.dirName}/`);
}
function buildCaptureShim(realPi: ExtensionAPI, captured: Map<string, ToolRegistration>): ExtensionAPI {
const shimFlags = new Set<string>();
return {
registerTool(def) {
captured.set(def.name, def as unknown as ToolRegistration);
},
registerFlag(name, _options) {
shimFlags.add(name);
},
getFlag(name) {
return shimFlags.has(name) ? realPi.getFlag(name) : undefined;
},
on() {},
registerCommand() {},
registerShortcut() {},
registerMessageRenderer() {},
registerProvider() {},
unregisterProvider() {},
sendMessage() {},
sendUserMessage() {},
appendEntry() {},
setSessionName() {},
getSessionName() {
return undefined;
},
setLabel() {},
exec(command, args, options) {
return realPi.exec(command, args, options);
},
getActiveTools() {
return realPi.getActiveTools();
},
getAllTools() {
return realPi.getAllTools();
},
setActiveTools(names) {
realPi.setActiveTools(names);
},
getCommands() {
return realPi.getCommands();
},
setModel(model) {
return realPi.setModel(model);
},
getThinkingLevel() {
return realPi.getThinkingLevel();
},
setThinkingLevel(level) {
realPi.setThinkingLevel(level);
},
events: realPi.events,
} as ExtensionAPI;
}
async function captureCompanionTools(baseDir: string, realPi: ExtensionAPI): Promise<Map<string, ToolRegistration>> {
let pending = captureCache.get(baseDir);
if (!pending) {
pending = (async () => {
const factory = await loadFactory(baseDir);
if (!factory) return new Map<string, ToolRegistration>();
const tools = new Map<string, ToolRegistration>();
await factory(buildCaptureShim(realPi, tools));
return tools;
})();
captureCache.set(baseDir, pending);
}
return pending;
}
async function registerAliasesForLoadedCompanions(pi: ExtensionAPI): Promise<void> {
// Clear capture cache so flag/config changes since last call take effect
captureCache.clear();
const allTools = pi.getAllTools();
const toolIndex = new Map<string, ToolInfo>();
const knownNames = new Set<string>();
for (const tool of allTools) {
toolIndex.set(lower(tool.name), tool);
knownNames.add(lower(tool.name));
}
for (const spec of COMPANIONS) {
for (const [flatName, mcpName] of spec.aliases) {
if (registeredMcpAliases.has(mcpName) || knownNames.has(lower(mcpName))) continue;
const tool = toolIndex.get(lower(flatName));
if (!tool || !isCompanionSource(tool, spec)) continue;
// Prefer the extension file's directory for loading (sourceInfo.path is the actual
// entry point). Fall back to baseDir only if path is unavailable. baseDir can be
// the monorepo root which doesn't contain the extension entry point directly.
const loadDir = tool.sourceInfo?.path ? dirname(tool.sourceInfo.path) : tool.sourceInfo?.baseDir;
if (!loadDir) continue;
const captured = await captureCompanionTools(loadDir, pi);
const def = captured.get(flatName);
if (!def) continue;
pi.registerTool({
...def,
name: mcpName,
label: def.label?.startsWith("MCP ") ? def.label : `MCP ${def.label ?? mcpName}`,
});
registeredMcpAliases.add(mcpName);
knownNames.add(lower(mcpName));
}
}
}
/**
* Synchronize MCP alias tool activation with the current model state.
* When OAuth is active, auto-activate aliases for any active companion tools.
* When OAuth is inactive, remove auto-activated aliases (but preserve user-selected ones).
*/
function syncAliasActivation(pi: ExtensionAPI, enableAliases: boolean): void {
const activeNames = pi.getActiveTools();
const allNames = new Set(pi.getAllTools().map((t) => t.name));
if (enableAliases) {
// Determine which aliases should be active based on their flat counterpart being active
const activeLc = new Set(activeNames.map(lower));
const desiredAliases: string[] = [];
for (const [flat, mcp] of FLAT_TO_MCP) {
if (activeLc.has(flat) && allNames.has(mcp) && registeredMcpAliases.has(mcp)) {
desiredAliases.push(mcp);
}
}
const desiredSet = new Set(desiredAliases);
// Promote auto-activated aliases to user-selected when the user explicitly kept
// the alias while removing its flat counterpart from the tool picker.
// We detect this by checking: (a) user changed the tool list since our last sync,
// (b) the flat tool was previously managed but is no longer active, and
// (c) the alias is still active. This means the user deliberately kept the alias.
if (lastManagedToolList !== undefined) {
const activeSet = new Set(activeNames);
const lastManaged = new Set(lastManagedToolList);
for (const alias of autoActivatedAliases) {
if (!activeSet.has(alias) || desiredSet.has(alias)) continue;
// Find the flat name for this alias
const flatName = [...FLAT_TO_MCP.entries()].find(([, mcp]) => mcp === alias)?.[0];
if (flatName && lastManaged.has(flatName) && !activeSet.has(flatName)) {
// User removed the flat tool but kept the alias → promote to user-selected
autoActivatedAliases.delete(alias);
}
}
}
// Find registered aliases currently in the active list
const activeRegistered = activeNames.filter((n) => registeredMcpAliases.has(n) && allNames.has(n));
// Per-alias provenance: an alias is "user-selected" if it's active and was NOT
// auto-activated by us. Only preserve those; auto-activated aliases get re-derived
// from the desired set each sync.
const preserved = activeRegistered.filter((n) => !autoActivatedAliases.has(n));
// Build result: non-alias tools + preserved user aliases + desired aliases
const nonAlias = activeNames.filter((n) => !registeredMcpAliases.has(n));
const next = Array.from(new Set([...nonAlias, ...preserved, ...desiredAliases]));
// Update auto-activation tracking: aliases we added this sync that weren't user-preserved
const preservedSet = new Set(preserved);
autoActivatedAliases.clear();
for (const name of desiredAliases) {
if (!preservedSet.has(name)) {
autoActivatedAliases.add(name);
}
}
if (next.length !== activeNames.length || next.some((n, i) => n !== activeNames[i])) {
pi.setActiveTools(next);
lastManagedToolList = [...next];
}
} else {
// Remove only auto-activated aliases; user-selected ones are preserved
const next = activeNames.filter((n) => !autoActivatedAliases.has(n));
autoActivatedAliases.clear();
if (next.length !== activeNames.length || next.some((n, i) => n !== activeNames[i])) {
pi.setActiveTools(next);
lastManagedToolList = [...next];
} else {
lastManagedToolList = undefined;
}
}
}
// ============================================================================
// Extension entry point
// ============================================================================
export default async function piClaudeCodeUse(pi: ExtensionAPI): Promise<void> {
pi.on("session_start", async () => {
await registerAliasesForLoadedCompanions(pi);
});
pi.on("before_agent_start", async (_event, ctx) => {
await registerAliasesForLoadedCompanions(pi);
const model = ctx.model;
const isOAuth = model?.provider === "anthropic" && ctx.modelRegistry.isUsingOAuth(model);
syncAliasActivation(pi, isOAuth);
});
pi.on("before_provider_request", (event, ctx) => {
const model = ctx.model;
if (!model || model.provider !== "anthropic" || !ctx.modelRegistry.isUsingOAuth(model)) {
return undefined;
}
if (!isPlainObject(event.payload)) {
return undefined;
}
writeDebugLog({ stage: "before", payload: event.payload });
const disableFilter = process.env.PI_CLAUDE_CODE_USE_DISABLE_TOOL_FILTER === "1";
const transformed = transformPayload(event.payload as Record<string, unknown>, disableFilter);
writeDebugLog({ stage: "after", payload: transformed });
return transformed;
});
}
// ============================================================================
// Test exports
// ============================================================================
export const _test = {
CORE_TOOL_NAMES,
FLAT_TO_MCP,
COMPANIONS,
TOOL_TO_COMPANION,
autoActivatedAliases,
buildCaptureShim,
collectToolNames,
filterAndRemapTools,
getLastManagedToolList: () => lastManagedToolList,
isCompanionSource,
isPlainObject,
lower,
registerAliasesForLoadedCompanions,
registeredMcpAliases,
remapMessageToolNames,
remapToolChoice,
rewritePromptText,
rewriteSystemField,
setLastManagedToolList: (v: string[] | undefined) => {
lastManagedToolList = v;
},
syncAliasActivation,
transformPayload,
};
+3
View File
@@ -11,6 +11,9 @@ importers:
'@anthropic-ai/sdk':
specifier: ^0.52.0
version: 0.52.0
'@mariozechner/jiti':
specifier: ^2.6.5
version: 2.6.5
'@mozilla/readability':
specifier: ^0.5.0
version: 0.5.0
+3 -1
View File
@@ -1,3 +1,5 @@
{
"app.model.select": "ctrl+space"
"app.model.select": "ctrl+space",
"tui.input.newLine": ["shift+enter"],
"tui.input.submit": ["enter"]
}
+102
View File
@@ -0,0 +1,102 @@
#!/usr/bin/env bash
set -euo pipefail
# Cleans up zellij sessions that are inactive:
# - sessions marked EXITED (resurrectable metadata)
# - running sessions with 0 attached clients
#
# Usage:
# cleanup-zellij-inactive.sh # delete inactive sessions
# cleanup-zellij-inactive.sh --dry-run # show what would be deleted
DRY_RUN=0
case "${1-}" in
"" ) ;;
-n|--dry-run) DRY_RUN=1 ;;
-h|--help)
cat <<'EOF'
cleanup-zellij-inactive.sh
Delete zellij sessions that are inactive:
- EXITED sessions are deleted
- running sessions with 0 attached clients are killed+deleted
Options:
-n, --dry-run Show what would be deleted
-h, --help Show this help
EOF
exit 0
;;
*)
echo "Unknown option: $1" >&2
echo "Use --help for usage" >&2
exit 1
;;
esac
if ! command -v zellij >/dev/null 2>&1; then
echo "zellij not found in PATH" >&2
exit 1
fi
mapfile -t session_lines < <(zellij list-sessions --no-formatting 2>/dev/null || true)
if [ "${#session_lines[@]}" -eq 0 ]; then
echo "No zellij sessions found"
exit 0
fi
deleted=0
failed=0
kept=0
for line in "${session_lines[@]}"; do
[ -z "$line" ] && continue
name="${line%% *}"
is_exited=0
if [[ "$line" == *"EXITED"* ]]; then
is_exited=1
fi
should_delete=0
if [ "$is_exited" -eq 1 ]; then
should_delete=1
else
# Running session: check attached clients
clients_out="$(zellij --session "$name" action list-clients 2>/dev/null || true)"
client_count="$(printf '%s\n' "$clients_out" | tail -n +2 | sed '/^\s*$/d' | wc -l | tr -d ' ')"
if [ "$client_count" -eq 0 ]; then
should_delete=1
fi
fi
if [ "$should_delete" -eq 1 ]; then
if [ "$DRY_RUN" -eq 1 ]; then
echo "[dry-run] delete: $name"
deleted=$((deleted + 1))
else
# --force also kills running sessions before deleting
if zellij delete-session --force "$name" >/dev/null 2>&1; then
echo "deleted: $name"
deleted=$((deleted + 1))
else
echo "failed: $name" >&2
failed=$((failed + 1))
fi
fi
else
kept=$((kept + 1))
fi
done
echo
if [ "$DRY_RUN" -eq 1 ]; then
echo "Would delete: $deleted"
else
echo "Deleted: $deleted"
echo "Failed: $failed"
fi
echo "Kept: $kept"
+52
View File
@@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -euo pipefail
# Replace the current zellij tab by opening a layout in a new tab
# and closing the original tab.
#
# Usage:
# zellij-replace-tab-layout.sh # uses "dev"
# zellij-replace-tab-layout.sh dev
# zellij-replace-tab-layout.sh my-layout
layout="${1:-dev}"
case "${layout}" in
-h|--help)
cat <<'EOF'
zellij-replace-tab-layout.sh
Replace the current zellij tab with a new tab created from a layout.
This avoids `zellij action override-layout` glitches.
Usage:
zellij-replace-tab-layout.sh [layout]
Examples:
zellij-replace-tab-layout.sh
zellij-replace-tab-layout.sh dev
zellij-replace-tab-layout.sh dotfiles
EOF
exit 0
;;
esac
if ! command -v zellij >/dev/null 2>&1; then
echo "zellij not found in PATH" >&2
exit 1
fi
if [ -z "${ZELLIJ:-}" ]; then
echo "Not inside a zellij session (ZELLIJ is not set)" >&2
exit 1
fi
current_tab_id="$(zellij action current-tab-info | awk '/^id:/ { print $2 }')"
if [ -z "$current_tab_id" ]; then
echo "Failed to detect current tab id" >&2
exit 1
fi
zellij action new-tab --layout "$layout" >/dev/null
zellij action close-tab --tab-id "$current_tab_id"