first commit
This commit is contained in:
commit
727c2a0f1e
18
.gitignore
vendored
Normal file
18
.gitignore
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
.DS_Store
|
||||
|
||||
node_modules/
|
||||
|
||||
dist/
|
||||
build/
|
||||
coverage/
|
||||
.cache/
|
||||
.turbo/
|
||||
|
||||
*.log
|
||||
*.tmp
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
.claude/
|
||||
.idea/
|
||||
.vscode/
|
||||
50
README.md
Normal file
50
README.md
Normal file
@ -0,0 +1,50 @@
|
||||
|
||||
# Restored Claude Code Source
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
This repository is a restored Claude Code source tree reconstructed primarily from source maps and missing-module backfilling.
|
||||
|
||||
It is not the original upstream repository state. Some files were unrecoverable from source maps and have been replaced with compatibility shims or degraded implementations so the
|
||||
project can install and run again.
|
||||
|
||||
## Current status
|
||||
|
||||
- The source tree is restorable and runnable in a local development workflow.
|
||||
- `bun install` succeeds.
|
||||
- `bun run version` succeeds.
|
||||
- `bun run dev` starts the restored CLI entrypoint and remains running as an interactive process.
|
||||
- A number of modules still contain restoration-time fallbacks, so behavior may differ from the original Claude Code implementation.
|
||||
|
||||
## Why this exists
|
||||
|
||||
Source maps do not contain a full original repository:
|
||||
|
||||
- type-only files are often missing
|
||||
- build-time generated files may be absent
|
||||
- private package wrappers and native bindings may not be recoverable
|
||||
- dynamic imports and resource files are frequently incomplete
|
||||
|
||||
This repository fills those gaps enough to produce a usable, runnable restored workspace.
|
||||
|
||||
## Run
|
||||
|
||||
Requirements:
|
||||
|
||||
- Bun 1.3.5 or newer
|
||||
- Node.js 24 or newer
|
||||
|
||||
Install dependencies:
|
||||
|
||||
```bash
|
||||
bun install
|
||||
|
||||
Run the restored CLI:
|
||||
|
||||
bun run dev
|
||||
|
||||
Print the restored version:
|
||||
|
||||
bun run version
|
||||
974
bun.lock
Normal file
974
bun.lock
Normal file
@ -0,0 +1,974 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "@anthropic-ai/claude-code",
|
||||
"dependencies": {
|
||||
"@alcalzone/ansi-tokenize": "*",
|
||||
"@ant/claude-for-chrome-mcp": "file:./shims/ant-claude-for-chrome-mcp",
|
||||
"@ant/computer-use-input": "file:./shims/ant-computer-use-input",
|
||||
"@ant/computer-use-mcp": "file:./shims/ant-computer-use-mcp",
|
||||
"@ant/computer-use-swift": "file:./shims/ant-computer-use-swift",
|
||||
"@anthropic-ai/claude-agent-sdk": "*",
|
||||
"@anthropic-ai/mcpb": "*",
|
||||
"@anthropic-ai/sandbox-runtime": "*",
|
||||
"@anthropic-ai/sdk": "*",
|
||||
"@aws-sdk/client-bedrock-runtime": "*",
|
||||
"@commander-js/extra-typings": "*",
|
||||
"@growthbook/growthbook": "*",
|
||||
"@modelcontextprotocol/sdk": "*",
|
||||
"@opentelemetry/api": "*",
|
||||
"@opentelemetry/api-logs": "*",
|
||||
"@opentelemetry/core": "*",
|
||||
"@opentelemetry/resources": "*",
|
||||
"@opentelemetry/sdk-logs": "*",
|
||||
"@opentelemetry/sdk-metrics": "*",
|
||||
"@opentelemetry/sdk-trace-base": "*",
|
||||
"@opentelemetry/semantic-conventions": "*",
|
||||
"ajv": "*",
|
||||
"asciichart": "*",
|
||||
"auto-bind": "*",
|
||||
"axios": "*",
|
||||
"bidi-js": "*",
|
||||
"chalk": "*",
|
||||
"chokidar": "*",
|
||||
"cli-boxes": "*",
|
||||
"code-excerpt": "*",
|
||||
"color-diff-napi": "file:./shims/color-diff-napi",
|
||||
"diff": "*",
|
||||
"emoji-regex": "*",
|
||||
"env-paths": "*",
|
||||
"execa": "*",
|
||||
"figures": "*",
|
||||
"fuse.js": "*",
|
||||
"get-east-asian-width": "*",
|
||||
"google-auth-library": "*",
|
||||
"highlight.js": "*",
|
||||
"https-proxy-agent": "*",
|
||||
"ignore": "*",
|
||||
"indent-string": "*",
|
||||
"ink": "*",
|
||||
"jsonc-parser": "*",
|
||||
"lodash-es": "*",
|
||||
"lru-cache": "*",
|
||||
"marked": "*",
|
||||
"modifiers-napi": "file:./shims/modifiers-napi",
|
||||
"p-map": "*",
|
||||
"picomatch": "*",
|
||||
"proper-lockfile": "*",
|
||||
"qrcode": "*",
|
||||
"react": "*",
|
||||
"react-reconciler": "*",
|
||||
"semver": "*",
|
||||
"shell-quote": "*",
|
||||
"signal-exit": "*",
|
||||
"stack-utils": "*",
|
||||
"strip-ansi": "*",
|
||||
"supports-hyperlinks": "*",
|
||||
"tree-kill": "*",
|
||||
"type-fest": "*",
|
||||
"undici": "*",
|
||||
"url-handler-napi": "file:./shims/url-handler-napi",
|
||||
"usehooks-ts": "*",
|
||||
"vscode-jsonrpc": "*",
|
||||
"vscode-languageserver-protocol": "*",
|
||||
"vscode-languageserver-types": "*",
|
||||
"wrap-ansi": "*",
|
||||
"ws": "*",
|
||||
"xss": "*",
|
||||
"yaml": "*",
|
||||
"zod": "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.3.0", "https://mirrors.cloud.tencent.com/npm/@alcalzone/ansi-tokenize/-/ansi-tokenize-0.3.0.tgz", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-p+CMKJ93HFmLkjXKlXiVGlMQEuRb6H0MokBSwUsX+S6BRX8eV5naFZpQJFfJHjRZY0Hmnqy1/r6UWl3x+19zYA=="],
|
||||
|
||||
"@ant/claude-for-chrome-mcp": ["@ant/claude-for-chrome-mcp@file:shims/ant-claude-for-chrome-mcp", {}],
|
||||
|
||||
"@ant/computer-use-input": ["@ant/computer-use-input@file:shims/ant-computer-use-input", {}],
|
||||
|
||||
"@ant/computer-use-mcp": ["@ant/computer-use-mcp@file:shims/ant-computer-use-mcp", {}],
|
||||
|
||||
"@ant/computer-use-swift": ["@ant/computer-use-swift@file:shims/ant-computer-use-swift", {}],
|
||||
|
||||
"@anthropic-ai/claude-agent-sdk": ["@anthropic-ai/claude-agent-sdk@0.2.88", "https://mirrors.cloud.tencent.com/npm/@anthropic-ai/claude-agent-sdk/-/claude-agent-sdk-0.2.88.tgz", { "dependencies": { "@anthropic-ai/sdk": "^0.74.0", "@modelcontextprotocol/sdk": "^1.27.1" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "^0.34.2", "@img/sharp-darwin-x64": "^0.34.2", "@img/sharp-linux-arm": "^0.34.2", "@img/sharp-linux-arm64": "^0.34.2", "@img/sharp-linux-x64": "^0.34.2", "@img/sharp-linuxmusl-arm64": "^0.34.2", "@img/sharp-linuxmusl-x64": "^0.34.2", "@img/sharp-win32-arm64": "^0.34.2", "@img/sharp-win32-x64": "^0.34.2" }, "peerDependencies": { "zod": "^4.0.0" } }, "sha512-hm9AYD8UGpGouOlmWB6kMRjIUCMtO13N3HDsviu7/htOXJZ/KKypgEd5yW04Ro6421SwX4KfQNrwayJ6R227+g=="],
|
||||
|
||||
"@anthropic-ai/mcpb": ["@anthropic-ai/mcpb@2.1.2", "https://mirrors.cloud.tencent.com/npm/@anthropic-ai/mcpb/-/mcpb-2.1.2.tgz", { "dependencies": { "@inquirer/prompts": "^6.0.1", "commander": "^13.1.0", "fflate": "^0.8.2", "galactus": "^1.0.0", "ignore": "^7.0.5", "node-forge": "^1.3.2", "pretty-bytes": "^5.6.0", "zod": "^3.25.67", "zod-to-json-schema": "^3.24.6" }, "bin": { "mcpb": "dist/cli/cli.js" } }, "sha512-goRbBC8ySo7SWb7tRzr+tL6FxDc4JPTRCdgfD2omba7freofvjq5rom1lBnYHZHo6Mizs1jAHJeN53aZbDoy8A=="],
|
||||
|
||||
"@anthropic-ai/sandbox-runtime": ["@anthropic-ai/sandbox-runtime@0.0.44", "https://mirrors.cloud.tencent.com/npm/@anthropic-ai/sandbox-runtime/-/sandbox-runtime-0.0.44.tgz", { "dependencies": { "@pondwader/socks5-server": "^1.0.10", "@types/lodash-es": "^4.17.12", "commander": "^12.1.0", "lodash-es": "^4.17.23", "shell-quote": "^1.8.3", "zod": "^3.24.1" }, "bin": { "srt": "dist/cli.js" } }, "sha512-mmyjq0mzsHnQZyiU+FGYyaiJcPckuQpP78VB8iqFi2IOu8rcb9i5SmaOKyJENJNfY8l/1grzLMQgWq4Apvmozw=="],
|
||||
|
||||
"@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.80.0", "https://mirrors.cloud.tencent.com/npm/@anthropic-ai/sdk/-/sdk-0.80.0.tgz", { "dependencies": { "json-schema-to-ts": "^3.1.1" }, "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" }, "optionalPeers": ["zod"], "bin": { "anthropic-ai-sdk": "bin/cli" } }, "sha512-WeXLn7zNVk3yjeshn+xZHvld6AoFUOR3Sep6pSoHho5YbSi6HwcirqgPA5ccFuW8QTVJAAU7N8uQQC6Wa9TG+g=="],
|
||||
|
||||
"@aws-crypto/crc32": ["@aws-crypto/crc32@5.2.0", "https://mirrors.cloud.tencent.com/npm/@aws-crypto/crc32/-/crc32-5.2.0.tgz", { "dependencies": { "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "tslib": "^2.6.2" } }, "sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg=="],
|
||||
|
||||
"@aws-crypto/sha256-browser": ["@aws-crypto/sha256-browser@5.2.0", "https://mirrors.cloud.tencent.com/npm/@aws-crypto/sha256-browser/-/sha256-browser-5.2.0.tgz", { "dependencies": { "@aws-crypto/sha256-js": "^5.2.0", "@aws-crypto/supports-web-crypto": "^5.2.0", "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "@aws-sdk/util-locate-window": "^3.0.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-AXfN/lGotSQwu6HNcEsIASo7kWXZ5HYWvfOmSNKDsEqC4OashTp8alTmaz+F7TC2L083SFv5RdB+qU3Vs1kZqw=="],
|
||||
|
||||
"@aws-crypto/sha256-js": ["@aws-crypto/sha256-js@5.2.0", "https://mirrors.cloud.tencent.com/npm/@aws-crypto/sha256-js/-/sha256-js-5.2.0.tgz", { "dependencies": { "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "tslib": "^2.6.2" } }, "sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA=="],
|
||||
|
||||
"@aws-crypto/supports-web-crypto": ["@aws-crypto/supports-web-crypto@5.2.0", "https://mirrors.cloud.tencent.com/npm/@aws-crypto/supports-web-crypto/-/supports-web-crypto-5.2.0.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-iAvUotm021kM33eCdNfwIN//F77/IADDSs58i+MDaOqFrVjZo9bAal0NK7HurRuWLLpF1iLX7gbWrjHjeo+YFg=="],
|
||||
|
||||
"@aws-crypto/util": ["@aws-crypto/util@5.2.0", "https://mirrors.cloud.tencent.com/npm/@aws-crypto/util/-/util-5.2.0.tgz", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="],
|
||||
|
||||
"@aws-sdk/client-bedrock-runtime": ["@aws-sdk/client-bedrock-runtime@3.1020.0", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/client-bedrock-runtime/-/client-bedrock-runtime-3.1020.0.tgz", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.26", "@aws-sdk/credential-provider-node": "^3.972.28", "@aws-sdk/eventstream-handler-node": "^3.972.12", "@aws-sdk/middleware-eventstream": "^3.972.8", "@aws-sdk/middleware-host-header": "^3.972.8", "@aws-sdk/middleware-logger": "^3.972.8", "@aws-sdk/middleware-recursion-detection": "^3.972.9", "@aws-sdk/middleware-user-agent": "^3.972.27", "@aws-sdk/middleware-websocket": "^3.972.14", "@aws-sdk/region-config-resolver": "^3.972.10", "@aws-sdk/token-providers": "3.1020.0", "@aws-sdk/types": "^3.973.6", "@aws-sdk/util-endpoints": "^3.996.5", "@aws-sdk/util-user-agent-browser": "^3.972.8", "@aws-sdk/util-user-agent-node": "^3.973.13", "@smithy/config-resolver": "^4.4.13", "@smithy/core": "^3.23.13", "@smithy/eventstream-serde-browser": "^4.2.12", "@smithy/eventstream-serde-config-resolver": "^4.3.12", "@smithy/eventstream-serde-node": "^4.2.12", "@smithy/fetch-http-handler": "^5.3.15", "@smithy/hash-node": "^4.2.12", "@smithy/invalid-dependency": "^4.2.12", "@smithy/middleware-content-length": "^4.2.12", "@smithy/middleware-endpoint": "^4.4.28", "@smithy/middleware-retry": "^4.4.45", "@smithy/middleware-serde": "^4.2.16", "@smithy/middleware-stack": "^4.2.12", "@smithy/node-config-provider": "^4.3.12", "@smithy/node-http-handler": "^4.5.1", "@smithy/protocol-http": "^5.3.12", "@smithy/smithy-client": "^4.12.8", "@smithy/types": "^4.13.1", "@smithy/url-parser": "^4.2.12", "@smithy/util-base64": "^4.3.2", "@smithy/util-body-length-browser": "^4.2.2", "@smithy/util-body-length-node": "^4.2.3", "@smithy/util-defaults-mode-browser": "^4.3.44", "@smithy/util-defaults-mode-node": "^4.2.48", "@smithy/util-endpoints": "^3.3.3", "@smithy/util-middleware": "^4.2.12", "@smithy/util-retry": "^4.2.12", "@smithy/util-stream": "^4.5.21", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-nqDCbaB05gRc3FuIEN74Mo04+k8RNI0YT2YBAU/9nioqgDyoqzMx8Ia2QWaw9UhUyIHMBjcFEfKIPfCZx7caCw=="],
|
||||
|
||||
"@aws-sdk/core": ["@aws-sdk/core@3.973.26", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/core/-/core-3.973.26.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@aws-sdk/xml-builder": "^3.972.16", "@smithy/core": "^3.23.13", "@smithy/node-config-provider": "^4.3.12", "@smithy/property-provider": "^4.2.12", "@smithy/protocol-http": "^5.3.12", "@smithy/signature-v4": "^5.3.12", "@smithy/smithy-client": "^4.12.8", "@smithy/types": "^4.13.1", "@smithy/util-base64": "^4.3.2", "@smithy/util-middleware": "^4.2.12", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-A/E6n2W42ruU+sfWk+mMUOyVXbsSgGrY3MJ9/0Az5qUdG67y8I6HYzzoAa+e/lzxxl1uCYmEL6BTMi9ZiZnplQ=="],
|
||||
|
||||
"@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.24", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/credential-provider-env/-/credential-provider-env-3.972.24.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/types": "^3.973.6", "@smithy/property-provider": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-FWg8uFmT6vQM7VuzELzwVo5bzExGaKHdubn0StjgrcU5FvuLExUe+k06kn/40uKv59rYzhez8eFNM4yYE/Yb/w=="],
|
||||
|
||||
"@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.26", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/credential-provider-http/-/credential-provider-http-3.972.26.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/types": "^3.973.6", "@smithy/fetch-http-handler": "^5.3.15", "@smithy/node-http-handler": "^4.5.1", "@smithy/property-provider": "^4.2.12", "@smithy/protocol-http": "^5.3.12", "@smithy/smithy-client": "^4.12.8", "@smithy/types": "^4.13.1", "@smithy/util-stream": "^4.5.21", "tslib": "^2.6.2" } }, "sha512-CY4ppZ+qHYqcXqBVi//sdHST1QK3KzOEiLtpLsc9W2k2vfZPKExGaQIsOwcyvjpjUEolotitmd3mUNY56IwDEA=="],
|
||||
|
||||
"@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.27", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.972.27.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/credential-provider-env": "^3.972.24", "@aws-sdk/credential-provider-http": "^3.972.26", "@aws-sdk/credential-provider-login": "^3.972.27", "@aws-sdk/credential-provider-process": "^3.972.24", "@aws-sdk/credential-provider-sso": "^3.972.27", "@aws-sdk/credential-provider-web-identity": "^3.972.27", "@aws-sdk/nested-clients": "^3.996.17", "@aws-sdk/types": "^3.973.6", "@smithy/credential-provider-imds": "^4.2.12", "@smithy/property-provider": "^4.2.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-Um26EsNSUfVUX0wUXnUA1W3wzKhVy6nviEElsh5lLZUYj9bk6DXOPnpte0gt+WHubcVfVsRk40bbm4KaroTEag=="],
|
||||
|
||||
"@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.27", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/credential-provider-login/-/credential-provider-login-3.972.27.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/nested-clients": "^3.996.17", "@aws-sdk/types": "^3.973.6", "@smithy/property-provider": "^4.2.12", "@smithy/protocol-http": "^5.3.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-t3ehEtHomGZwg5Gixw4fYbYtG9JBnjfAjSDabxhPEu/KLLUp0BB37/APX7MSKXQhX6ZH7pseuACFJ19NrAkNdg=="],
|
||||
|
||||
"@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.28", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/credential-provider-node/-/credential-provider-node-3.972.28.tgz", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.24", "@aws-sdk/credential-provider-http": "^3.972.26", "@aws-sdk/credential-provider-ini": "^3.972.27", "@aws-sdk/credential-provider-process": "^3.972.24", "@aws-sdk/credential-provider-sso": "^3.972.27", "@aws-sdk/credential-provider-web-identity": "^3.972.27", "@aws-sdk/types": "^3.973.6", "@smithy/credential-provider-imds": "^4.2.12", "@smithy/property-provider": "^4.2.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-rren+P6k5rShG5PX61iVi40kKdueyuMLBRTctQbyR5LooO9Ygr5L6R7ilG7RF1957NSH3KC3TU206fZuKwjSpQ=="],
|
||||
|
||||
"@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.24", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/credential-provider-process/-/credential-provider-process-3.972.24.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/types": "^3.973.6", "@smithy/property-provider": "^4.2.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-Q2k/XLrFXhEztPHqj4SLCNID3hEPdlhh1CDLBpNnM+1L8fq7P+yON9/9M1IGN/dA5W45v44ylERfXtDAlmMNmw=="],
|
||||
|
||||
"@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.27", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.972.27.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/nested-clients": "^3.996.17", "@aws-sdk/token-providers": "3.1020.0", "@aws-sdk/types": "^3.973.6", "@smithy/property-provider": "^4.2.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-CWXeGjlbBuHcm9appZUgXKP2zHDyTti0/+gXpSFJ2J3CnSwf1KWjicjN0qG2ozkMH6blrrzMrimeIOEYNl238Q=="],
|
||||
|
||||
"@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.27", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.972.27.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/nested-clients": "^3.996.17", "@aws-sdk/types": "^3.973.6", "@smithy/property-provider": "^4.2.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-CUY4hQIFswdQNEsRGEzGBUKGMK5KpqmNDdu2ROMgI+45PLFS8H0y3Tm7kvM16uvvw3n1pVxk85tnRVUTgtaa1w=="],
|
||||
|
||||
"@aws-sdk/eventstream-handler-node": ["@aws-sdk/eventstream-handler-node@3.972.12", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/eventstream-handler-node/-/eventstream-handler-node-3.972.12.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@smithy/eventstream-codec": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-ruyc/MNR6e+cUrGCth7fLQ12RXBZDy/bV06tgqB9Z5n/0SN/C0m6bsQEV8FF9zPI6VSAOaRd0rNgmpYVnGawrQ=="],
|
||||
|
||||
"@aws-sdk/middleware-eventstream": ["@aws-sdk/middleware-eventstream@3.972.8", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/middleware-eventstream/-/middleware-eventstream-3.972.8.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-r+oP+tbCxgqXVC3pu3MUVePgSY0ILMjA+aEwOosS77m3/DRbtvHrHwqvMcw+cjANMeGzJ+i0ar+n77KXpRA8RQ=="],
|
||||
|
||||
"@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.8", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/middleware-host-header/-/middleware-host-header-3.972.8.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-wAr2REfKsqoKQ+OkNqvOShnBoh+nkPurDKW7uAeVSu6kUECnWlSJiPvnoqxGlfousEY/v9LfS9sNc46hjSYDIQ=="],
|
||||
|
||||
"@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.8", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/middleware-logger/-/middleware-logger-3.972.8.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-CWl5UCM57WUFaFi5kB7IBY1UmOeLvNZAZ2/OZ5l20ldiJ3TiIz1pC65gYj8X0BCPWkeR1E32mpsCk1L1I4n+lA=="],
|
||||
|
||||
"@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.9", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.972.9.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-/Wt5+CT8dpTFQxEJ9iGy/UGrXr7p2wlIOEHvIr/YcHYByzoLjrqkYqXdJjd9UIgWjv7eqV2HnFJen93UTuwfTQ=="],
|
||||
|
||||
"@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.27", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.972.27.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/types": "^3.973.6", "@aws-sdk/util-endpoints": "^3.996.5", "@smithy/core": "^3.23.13", "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "@smithy/util-retry": "^4.2.12", "tslib": "^2.6.2" } }, "sha512-TIRLO5UR2+FVUGmhYoAwVkKhcVzywEDX/5LzR9tjy1h8FQAXOtFg2IqgmwvxU7y933rkTn9rl6AdgcAUgQ1/Kg=="],
|
||||
|
||||
"@aws-sdk/middleware-websocket": ["@aws-sdk/middleware-websocket@3.972.14", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/middleware-websocket/-/middleware-websocket-3.972.14.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@aws-sdk/util-format-url": "^3.972.8", "@smithy/eventstream-codec": "^4.2.12", "@smithy/eventstream-serde-browser": "^4.2.12", "@smithy/fetch-http-handler": "^5.3.15", "@smithy/protocol-http": "^5.3.12", "@smithy/signature-v4": "^5.3.12", "@smithy/types": "^4.13.1", "@smithy/util-base64": "^4.3.2", "@smithy/util-hex-encoding": "^4.2.2", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-qnfDlIHjm6DrTYNvWOUbnZdVKgtoKbO/Qzj+C0Wp5Y7VUrsvBRQtGKxD+hc+mRTS4N0kBJ6iZ3+zxm4N1OSyjg=="],
|
||||
|
||||
"@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.996.17", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/nested-clients/-/nested-clients-3.996.17.tgz", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.26", "@aws-sdk/middleware-host-header": "^3.972.8", "@aws-sdk/middleware-logger": "^3.972.8", "@aws-sdk/middleware-recursion-detection": "^3.972.9", "@aws-sdk/middleware-user-agent": "^3.972.27", "@aws-sdk/region-config-resolver": "^3.972.10", "@aws-sdk/types": "^3.973.6", "@aws-sdk/util-endpoints": "^3.996.5", "@aws-sdk/util-user-agent-browser": "^3.972.8", "@aws-sdk/util-user-agent-node": "^3.973.13", "@smithy/config-resolver": "^4.4.13", "@smithy/core": "^3.23.13", "@smithy/fetch-http-handler": "^5.3.15", "@smithy/hash-node": "^4.2.12", "@smithy/invalid-dependency": "^4.2.12", "@smithy/middleware-content-length": "^4.2.12", "@smithy/middleware-endpoint": "^4.4.28", "@smithy/middleware-retry": "^4.4.45", "@smithy/middleware-serde": "^4.2.16", "@smithy/middleware-stack": "^4.2.12", "@smithy/node-config-provider": "^4.3.12", "@smithy/node-http-handler": "^4.5.1", "@smithy/protocol-http": "^5.3.12", "@smithy/smithy-client": "^4.12.8", "@smithy/types": "^4.13.1", "@smithy/url-parser": "^4.2.12", "@smithy/util-base64": "^4.3.2", "@smithy/util-body-length-browser": "^4.2.2", "@smithy/util-body-length-node": "^4.2.3", "@smithy/util-defaults-mode-browser": "^4.3.44", "@smithy/util-defaults-mode-node": "^4.2.48", "@smithy/util-endpoints": "^3.3.3", "@smithy/util-middleware": "^4.2.12", "@smithy/util-retry": "^4.2.12", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-7B0HIX0tEFmOSJuWzdHZj1WhMXSryM+h66h96ZkqSncoY7J6wq61KOu4Kr57b/YnJP3J/EeQYVFulgR281h+7A=="],
|
||||
|
||||
"@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.10", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/region-config-resolver/-/region-config-resolver-3.972.10.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@smithy/config-resolver": "^4.4.13", "@smithy/node-config-provider": "^4.3.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-1dq9ToC6e070QvnVhhbAs3bb5r6cQ10gTVc6cyRV5uvQe7P138TV2uG2i6+Yok4bAkVAcx5AqkTEBUvWEtBlsQ=="],
|
||||
|
||||
"@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.1020.0", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/token-providers/-/token-providers-3.1020.0.tgz", { "dependencies": { "@aws-sdk/core": "^3.973.26", "@aws-sdk/nested-clients": "^3.996.17", "@aws-sdk/types": "^3.973.6", "@smithy/property-provider": "^4.2.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-T61KA/VKl0zVUubdxigr1ut7SEpwE1/4CIKb14JDLyTAOne2yWKtQE1dDCSHl0UqrZNwW/bTt+EBHfQbslZJdw=="],
|
||||
|
||||
"@aws-sdk/types": ["@aws-sdk/types@3.973.6", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/types/-/types-3.973.6.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-Atfcy4E++beKtwJHiDln2Nby8W/mam64opFPTiHEqgsthqeydFS1pY+OUlN1ouNOmf8ArPU/6cDS65anOP3KQw=="],
|
||||
|
||||
"@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.996.5", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/util-endpoints/-/util-endpoints-3.996.5.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@smithy/types": "^4.13.1", "@smithy/url-parser": "^4.2.12", "@smithy/util-endpoints": "^3.3.3", "tslib": "^2.6.2" } }, "sha512-Uh93L5sXFNbyR5sEPMzUU8tJ++Ku97EY4udmC01nB8Zu+xfBPwpIwJ6F7snqQeq8h2pf+8SGN5/NoytfKgYPIw=="],
|
||||
|
||||
"@aws-sdk/util-format-url": ["@aws-sdk/util-format-url@3.972.8", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/util-format-url/-/util-format-url-3.972.8.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@smithy/querystring-builder": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-J6DS9oocrgxM8xlUTTmQOuwRF6rnAGEujAN9SAzllcrQmwn5iJ58ogxy3SEhD0Q7JZvlA5jvIXBkpQRqEqlE9A=="],
|
||||
|
||||
"@aws-sdk/util-locate-window": ["@aws-sdk/util-locate-window@3.965.5", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/util-locate-window/-/util-locate-window-3.965.5.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-WhlJNNINQB+9qtLtZJcpQdgZw3SCDCpXdUJP7cToGwHbCWCnRckGlc6Bx/OhWwIYFNAn+FIydY8SZ0QmVu3xTQ=="],
|
||||
|
||||
"@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.8", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.972.8.tgz", { "dependencies": { "@aws-sdk/types": "^3.973.6", "@smithy/types": "^4.13.1", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-B3KGXJviV2u6Cdw2SDY2aDhoJkVfY/Q/Trwk2CMSkikE1Oi6gRzxhvhIfiRpHfmIsAhV4EA54TVEX8K6CbHbkA=="],
|
||||
|
||||
"@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.973.13", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.973.13.tgz", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.27", "@aws-sdk/types": "^3.973.6", "@smithy/node-config-provider": "^4.3.12", "@smithy/types": "^4.13.1", "@smithy/util-config-provider": "^4.2.2", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-s1dCJ0J9WU9UPkT3FFqhKTSquYTkqWXGRaapHFyWwwJH86ZussewhNST5R5TwXVL1VSHq4aJVl9fWK+svaRVCQ=="],
|
||||
|
||||
"@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.16", "https://mirrors.cloud.tencent.com/npm/@aws-sdk/xml-builder/-/xml-builder-3.972.16.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "fast-xml-parser": "5.5.8", "tslib": "^2.6.2" } }, "sha512-iu2pyvaqmeatIJLURLqx9D+4jKAdTH20ntzB6BFwjyN7V960r4jK32mx0Zf7YbtOYAbmbtQfDNuL60ONinyw7A=="],
|
||||
|
||||
"@aws/lambda-invoke-store": ["@aws/lambda-invoke-store@0.2.4", "https://mirrors.cloud.tencent.com/npm/@aws/lambda-invoke-store/-/lambda-invoke-store-0.2.4.tgz", {}, "sha512-iY8yvjE0y651BixKNPgmv1WrQc+GZ142sb0z4gYnChDDY2YqI4P/jsSopBWrKfAt7LOJAkOXt7rC/hms+WclQQ=="],
|
||||
|
||||
"@babel/runtime": ["@babel/runtime@7.29.2", "https://mirrors.cloud.tencent.com/npm/@babel/runtime/-/runtime-7.29.2.tgz", {}, "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g=="],
|
||||
|
||||
"@commander-js/extra-typings": ["@commander-js/extra-typings@14.0.0", "https://mirrors.cloud.tencent.com/npm/@commander-js/extra-typings/-/extra-typings-14.0.0.tgz", { "peerDependencies": { "commander": "~14.0.0" } }, "sha512-hIn0ncNaJRLkZrxBIp5AsW/eXEHNKYQBh0aPdoUqNgD+Io3NIykQqpKFyKcuasZhicGaEZJX/JBSIkZ4e5x8Dg=="],
|
||||
|
||||
"@growthbook/growthbook": ["@growthbook/growthbook@1.6.5", "https://mirrors.cloud.tencent.com/npm/@growthbook/growthbook/-/growthbook-1.6.5.tgz", { "dependencies": { "dom-mutator": "^0.6.0" } }, "sha512-mUaMsgeUTpRIUOTn33EUXHRK6j7pxBjwqH4WpQyq+pukjd1AIzWlEa6w7i6bInJUcweGgP2beXZmaP6b6UPn7A=="],
|
||||
|
||||
"@hono/node-server": ["@hono/node-server@1.19.12", "https://mirrors.cloud.tencent.com/npm/@hono/node-server/-/node-server-1.19.12.tgz", { "peerDependencies": { "hono": "^4" } }, "sha512-txsUW4SQ1iilgE0l9/e9VQWmELXifEFvmdA1j6WFh/aFPj99hIntrSsq/if0UWyGVkmrRPKA1wCeP+UCr1B9Uw=="],
|
||||
|
||||
"@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.5.tgz", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.2.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w=="],
|
||||
|
||||
"@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.5.tgz", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.2.4" }, "os": "darwin", "cpu": "x64" }, "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw=="],
|
||||
|
||||
"@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.2.4", "https://mirrors.cloud.tencent.com/npm/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.4.tgz", { "os": "darwin", "cpu": "arm64" }, "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g=="],
|
||||
|
||||
"@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.2.4", "https://mirrors.cloud.tencent.com/npm/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.4.tgz", { "os": "darwin", "cpu": "x64" }, "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg=="],
|
||||
|
||||
"@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.2.4", "https://mirrors.cloud.tencent.com/npm/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.4.tgz", { "os": "linux", "cpu": "arm" }, "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A=="],
|
||||
|
||||
"@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.2.4", "https://mirrors.cloud.tencent.com/npm/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.4.tgz", { "os": "linux", "cpu": "arm64" }, "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw=="],
|
||||
|
||||
"@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.2.4", "https://mirrors.cloud.tencent.com/npm/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.4.tgz", { "os": "linux", "cpu": "x64" }, "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw=="],
|
||||
|
||||
"@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.2.4", "https://mirrors.cloud.tencent.com/npm/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.4.tgz", { "os": "linux", "cpu": "arm64" }, "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw=="],
|
||||
|
||||
"@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.2.4", "https://mirrors.cloud.tencent.com/npm/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.4.tgz", { "os": "linux", "cpu": "x64" }, "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg=="],
|
||||
|
||||
"@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.5.tgz", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.2.4" }, "os": "linux", "cpu": "arm" }, "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw=="],
|
||||
|
||||
"@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.5.tgz", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg=="],
|
||||
|
||||
"@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.5.tgz", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ=="],
|
||||
|
||||
"@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.5.tgz", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg=="],
|
||||
|
||||
"@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.5.tgz", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q=="],
|
||||
|
||||
"@img/sharp-win32-arm64": ["@img/sharp-win32-arm64@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.5.tgz", { "os": "win32", "cpu": "arm64" }, "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g=="],
|
||||
|
||||
"@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.34.5", "https://mirrors.cloud.tencent.com/npm/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.5.tgz", { "os": "win32", "cpu": "x64" }, "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw=="],
|
||||
|
||||
"@inquirer/checkbox": ["@inquirer/checkbox@3.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/checkbox/-/checkbox-3.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/figures": "^1.0.6", "@inquirer/type": "^2.0.0", "ansi-escapes": "^4.3.2", "yoctocolors-cjs": "^2.1.2" } }, "sha512-0hm2nrToWUdD6/UHnel/UKGdk1//ke5zGUpHIvk5ZWmaKezlGxZkOJXNSWsdxO/rEqTkbB3lNC2J6nBElV2aAQ=="],
|
||||
|
||||
"@inquirer/confirm": ["@inquirer/confirm@4.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/confirm/-/confirm-4.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/type": "^2.0.0" } }, "sha512-46yL28o2NJ9doViqOy0VDcoTzng7rAb6yPQKU7VDLqkmbCaH4JqK4yk4XqlzNWy9PVC5pG1ZUXPBQv+VqnYs2w=="],
|
||||
|
||||
"@inquirer/core": ["@inquirer/core@9.2.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/core/-/core-9.2.1.tgz", { "dependencies": { "@inquirer/figures": "^1.0.6", "@inquirer/type": "^2.0.0", "@types/mute-stream": "^0.0.4", "@types/node": "^22.5.5", "@types/wrap-ansi": "^3.0.0", "ansi-escapes": "^4.3.2", "cli-width": "^4.1.0", "mute-stream": "^1.0.0", "signal-exit": "^4.1.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^6.2.0", "yoctocolors-cjs": "^2.1.2" } }, "sha512-F2VBt7W/mwqEU4bL0RnHNZmC/OxzNx9cOYxHqnXX3MP6ruYvZUZAW9imgN9+h/uBT/oP8Gh888J2OZSbjSeWcg=="],
|
||||
|
||||
"@inquirer/editor": ["@inquirer/editor@3.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/editor/-/editor-3.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/type": "^2.0.0", "external-editor": "^3.1.0" } }, "sha512-VA96GPFaSOVudjKFraokEEmUQg/Lub6OXvbIEZU1SDCmBzRkHGhxoFAVaF30nyiB4m5cEbDgiI2QRacXZ2hw9Q=="],
|
||||
|
||||
"@inquirer/expand": ["@inquirer/expand@3.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/expand/-/expand-3.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/type": "^2.0.0", "yoctocolors-cjs": "^2.1.2" } }, "sha512-ToG8d6RIbnVpbdPdiN7BCxZGiHOTomOX94C2FaT5KOHupV40tKEDozp12res6cMIfRKrXLJyexAZhWVHgbALSQ=="],
|
||||
|
||||
"@inquirer/figures": ["@inquirer/figures@1.0.15", "https://mirrors.cloud.tencent.com/npm/@inquirer/figures/-/figures-1.0.15.tgz", {}, "sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g=="],
|
||||
|
||||
"@inquirer/input": ["@inquirer/input@3.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/input/-/input-3.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/type": "^2.0.0" } }, "sha512-BDuPBmpvi8eMCxqC5iacloWqv+5tQSJlUafYWUe31ow1BVXjW2a5qe3dh4X/Z25Wp22RwvcaLCc2siHobEOfzg=="],
|
||||
|
||||
"@inquirer/number": ["@inquirer/number@2.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/number/-/number-2.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/type": "^2.0.0" } }, "sha512-QpR8jPhRjSmlr/mD2cw3IR8HRO7lSVOnqUvQa8scv1Lsr3xoAMMworcYW3J13z3ppjBFBD2ef1Ci6AE5Qn8goQ=="],
|
||||
|
||||
"@inquirer/password": ["@inquirer/password@3.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/password/-/password-3.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/type": "^2.0.0", "ansi-escapes": "^4.3.2" } }, "sha512-haoeEPUisD1NeE2IanLOiFr4wcTXGWrBOyAyPZi1FfLJuXOzNmxCJPgUrGYKVh+Y8hfGJenIfz5Wb/DkE9KkMQ=="],
|
||||
|
||||
"@inquirer/prompts": ["@inquirer/prompts@6.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/prompts/-/prompts-6.0.1.tgz", { "dependencies": { "@inquirer/checkbox": "^3.0.1", "@inquirer/confirm": "^4.0.1", "@inquirer/editor": "^3.0.1", "@inquirer/expand": "^3.0.1", "@inquirer/input": "^3.0.1", "@inquirer/number": "^2.0.1", "@inquirer/password": "^3.0.1", "@inquirer/rawlist": "^3.0.1", "@inquirer/search": "^2.0.1", "@inquirer/select": "^3.0.1" } }, "sha512-yl43JD/86CIj3Mz5mvvLJqAOfIup7ncxfJ0Btnl0/v5TouVUyeEdcpknfgc+yMevS/48oH9WAkkw93m7otLb/A=="],
|
||||
|
||||
"@inquirer/rawlist": ["@inquirer/rawlist@3.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/rawlist/-/rawlist-3.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/type": "^2.0.0", "yoctocolors-cjs": "^2.1.2" } }, "sha512-VgRtFIwZInUzTiPLSfDXK5jLrnpkuSOh1ctfaoygKAdPqjcjKYmGh6sCY1pb0aGnCGsmhUxoqLDUAU0ud+lGXQ=="],
|
||||
|
||||
"@inquirer/search": ["@inquirer/search@2.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/search/-/search-2.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/figures": "^1.0.6", "@inquirer/type": "^2.0.0", "yoctocolors-cjs": "^2.1.2" } }, "sha512-r5hBKZk3g5MkIzLVoSgE4evypGqtOannnB3PKTG9NRZxyFRKcfzrdxXXPcoJQsxJPzvdSU2Rn7pB7lw0GCmGAg=="],
|
||||
|
||||
"@inquirer/select": ["@inquirer/select@3.0.1", "https://mirrors.cloud.tencent.com/npm/@inquirer/select/-/select-3.0.1.tgz", { "dependencies": { "@inquirer/core": "^9.2.1", "@inquirer/figures": "^1.0.6", "@inquirer/type": "^2.0.0", "ansi-escapes": "^4.3.2", "yoctocolors-cjs": "^2.1.2" } }, "sha512-lUDGUxPhdWMkN/fHy1Lk7pF3nK1fh/gqeyWXmctefhxLYxlDsc7vsPBEpxrfVGDsVdyYJsiJoD4bJ1b623cV1Q=="],
|
||||
|
||||
"@inquirer/type": ["@inquirer/type@2.0.0", "https://mirrors.cloud.tencent.com/npm/@inquirer/type/-/type-2.0.0.tgz", { "dependencies": { "mute-stream": "^1.0.0" } }, "sha512-XvJRx+2KR3YXyYtPUUy+qd9i7p+GO9Ko6VIIpWlBrpWwXDv8WLFeHTxz35CfQFUiBMLXlGHhGzys7lqit9gWag=="],
|
||||
|
||||
"@modelcontextprotocol/sdk": ["@modelcontextprotocol/sdk@1.29.0", "https://mirrors.cloud.tencent.com/npm/@modelcontextprotocol/sdk/-/sdk-1.29.0.tgz", { "dependencies": { "@hono/node-server": "^1.19.9", "ajv": "^8.17.1", "ajv-formats": "^3.0.1", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "eventsource-parser": "^3.0.0", "express": "^5.2.1", "express-rate-limit": "^8.2.1", "hono": "^4.11.4", "jose": "^6.1.3", "json-schema-typed": "^8.0.2", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.25 || ^4.0", "zod-to-json-schema": "^3.25.1" }, "peerDependencies": { "@cfworker/json-schema": "^4.1.1" }, "optionalPeers": ["@cfworker/json-schema"] }, "sha512-zo37mZA9hJWpULgkRpowewez1y6ML5GsXJPY8FI0tBBCd77HEvza4jDqRKOXgHNn867PVGCyTdzqpz0izu5ZjQ=="],
|
||||
|
||||
"@opentelemetry/api": ["@opentelemetry/api@1.9.1", "https://mirrors.cloud.tencent.com/npm/@opentelemetry/api/-/api-1.9.1.tgz", {}, "sha512-gLyJlPHPZYdAk1JENA9LeHejZe1Ti77/pTeFm/nMXmQH/HFZlcS/O2XJB+L8fkbrNSqhdtlvjBVjxwUYanNH5Q=="],
|
||||
|
||||
"@opentelemetry/api-logs": ["@opentelemetry/api-logs@0.214.0", "https://mirrors.cloud.tencent.com/npm/@opentelemetry/api-logs/-/api-logs-0.214.0.tgz", { "dependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-40lSJeqYO8Uz2Yj7u94/SJWE/wONa7rmMKjI1ZcIjgf3MHNHv1OZUCrCETGuaRF62d5pQD1wKIW+L4lmSMTzZA=="],
|
||||
|
||||
"@opentelemetry/core": ["@opentelemetry/core@2.6.1", "https://mirrors.cloud.tencent.com/npm/@opentelemetry/core/-/core-2.6.1.tgz", { "dependencies": { "@opentelemetry/semantic-conventions": "^1.29.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-8xHSGWpJP9wBxgBpnqGL0R3PbdWQndL1Qp50qrg71+B28zK5OQmUgcDKLJgzyAAV38t4tOyLMGDD60LneR5W8g=="],
|
||||
|
||||
"@opentelemetry/resources": ["@opentelemetry/resources@2.6.1", "https://mirrors.cloud.tencent.com/npm/@opentelemetry/resources/-/resources-2.6.1.tgz", { "dependencies": { "@opentelemetry/core": "2.6.1", "@opentelemetry/semantic-conventions": "^1.29.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, "sha512-lID/vxSuKWXM55XhAKNoYXu9Cutoq5hFdkbTdI/zDKQktXzcWBVhNsOkiZFTMU9UtEWuGRNe0HUgmsFldIdxVA=="],
|
||||
|
||||
"@opentelemetry/sdk-logs": ["@opentelemetry/sdk-logs@0.214.0", "https://mirrors.cloud.tencent.com/npm/@opentelemetry/sdk-logs/-/sdk-logs-0.214.0.tgz", { "dependencies": { "@opentelemetry/api-logs": "0.214.0", "@opentelemetry/core": "2.6.1", "@opentelemetry/resources": "2.6.1", "@opentelemetry/semantic-conventions": "^1.29.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.4.0 <1.10.0" } }, "sha512-zf6acnScjhsaBUU22zXZ/sLWim1dfhUAbGXdMmHmNG3LfBnQ3DKsOCITb2IZwoUsNNMTogqFKBnlIPPftUgGwA=="],
|
||||
|
||||
"@opentelemetry/sdk-metrics": ["@opentelemetry/sdk-metrics@2.6.1", "https://mirrors.cloud.tencent.com/npm/@opentelemetry/sdk-metrics/-/sdk-metrics-2.6.1.tgz", { "dependencies": { "@opentelemetry/core": "2.6.1", "@opentelemetry/resources": "2.6.1" }, "peerDependencies": { "@opentelemetry/api": ">=1.9.0 <1.10.0" } }, "sha512-9t9hJHX15meBy2NmTJxL+NJfXmnausR2xUDvE19XQce0Qi/GBtDGamU8nS1RMbdgDmhgpm3VaOu2+fiS/SfTpQ=="],
|
||||
|
||||
"@opentelemetry/sdk-trace-base": ["@opentelemetry/sdk-trace-base@2.6.1", "https://mirrors.cloud.tencent.com/npm/@opentelemetry/sdk-trace-base/-/sdk-trace-base-2.6.1.tgz", { "dependencies": { "@opentelemetry/core": "2.6.1", "@opentelemetry/resources": "2.6.1", "@opentelemetry/semantic-conventions": "^1.29.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, "sha512-r86ut4T1e8vNwB35CqCcKd45yzqH6/6Wzvpk2/cZB8PsPLlZFTvrh8yfOS3CYZYcUmAx4hHTZJ8AO8Dj8nrdhw=="],
|
||||
|
||||
"@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.40.0", "https://mirrors.cloud.tencent.com/npm/@opentelemetry/semantic-conventions/-/semantic-conventions-1.40.0.tgz", {}, "sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw=="],
|
||||
|
||||
"@pondwader/socks5-server": ["@pondwader/socks5-server@1.0.10", "https://mirrors.cloud.tencent.com/npm/@pondwader/socks5-server/-/socks5-server-1.0.10.tgz", {}, "sha512-bQY06wzzR8D2+vVCUoBsr5QS2U6UgPUQRmErNwtsuI6vLcyRKkafjkr3KxbtGFf9aBBIV2mcvlsKD1UYaIV+sg=="],
|
||||
|
||||
"@sec-ant/readable-stream": ["@sec-ant/readable-stream@0.4.1", "https://mirrors.cloud.tencent.com/npm/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz", {}, "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg=="],
|
||||
|
||||
"@sindresorhus/merge-streams": ["@sindresorhus/merge-streams@4.0.0", "https://mirrors.cloud.tencent.com/npm/@sindresorhus/merge-streams/-/merge-streams-4.0.0.tgz", {}, "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ=="],
|
||||
|
||||
"@smithy/config-resolver": ["@smithy/config-resolver@4.4.13", "https://mirrors.cloud.tencent.com/npm/@smithy/config-resolver/-/config-resolver-4.4.13.tgz", { "dependencies": { "@smithy/node-config-provider": "^4.3.12", "@smithy/types": "^4.13.1", "@smithy/util-config-provider": "^4.2.2", "@smithy/util-endpoints": "^3.3.3", "@smithy/util-middleware": "^4.2.12", "tslib": "^2.6.2" } }, "sha512-iIzMC5NmOUP6WL6o8iPBjFhUhBZ9pPjpUpQYWMUFQqKyXXzOftbfK8zcQCz/jFV1Psmf05BK5ypx4K2r4Tnwdg=="],
|
||||
|
||||
"@smithy/core": ["@smithy/core@3.23.13", "https://mirrors.cloud.tencent.com/npm/@smithy/core/-/core-3.23.13.tgz", { "dependencies": { "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "@smithy/url-parser": "^4.2.12", "@smithy/util-base64": "^4.3.2", "@smithy/util-body-length-browser": "^4.2.2", "@smithy/util-middleware": "^4.2.12", "@smithy/util-stream": "^4.5.21", "@smithy/util-utf8": "^4.2.2", "@smithy/uuid": "^1.1.2", "tslib": "^2.6.2" } }, "sha512-J+2TT9D6oGsUVXVEMvz8h2EmdVnkBiy2auCie4aSJMvKlzUtO5hqjEzXhoCUkIMo7gAYjbQcN0g/MMSXEhDs1Q=="],
|
||||
|
||||
"@smithy/credential-provider-imds": ["@smithy/credential-provider-imds@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/credential-provider-imds/-/credential-provider-imds-4.2.12.tgz", { "dependencies": { "@smithy/node-config-provider": "^4.3.12", "@smithy/property-provider": "^4.2.12", "@smithy/types": "^4.13.1", "@smithy/url-parser": "^4.2.12", "tslib": "^2.6.2" } }, "sha512-cr2lR792vNZcYMriSIj+Um3x9KWrjcu98kn234xA6reOAFMmbRpQMOv8KPgEmLLtx3eldU6c5wALKFqNOhugmg=="],
|
||||
|
||||
"@smithy/eventstream-codec": ["@smithy/eventstream-codec@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/eventstream-codec/-/eventstream-codec-4.2.12.tgz", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@smithy/types": "^4.13.1", "@smithy/util-hex-encoding": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FE3bZdEl62ojmy8x4FHqxq2+BuOHlcxiH5vaZ6aqHJr3AIZzwF5jfx8dEiU/X0a8RboyNDjmXjlbr8AdEyLgiA=="],
|
||||
|
||||
"@smithy/eventstream-serde-browser": ["@smithy/eventstream-serde-browser@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/eventstream-serde-browser/-/eventstream-serde-browser-4.2.12.tgz", { "dependencies": { "@smithy/eventstream-serde-universal": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-XUSuMxlTxV5pp4VpqZf6Sa3vT/Q75FVkLSpSSE3KkWBvAQWeuWt1msTv8fJfgA4/jcJhrbrbMzN1AC/hvPmm5A=="],
|
||||
|
||||
"@smithy/eventstream-serde-config-resolver": ["@smithy/eventstream-serde-config-resolver@4.3.12", "https://mirrors.cloud.tencent.com/npm/@smithy/eventstream-serde-config-resolver/-/eventstream-serde-config-resolver-4.3.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-7epsAZ3QvfHkngz6RXQYseyZYHlmWXSTPOfPmXkiS+zA6TBNo1awUaMFL9vxyXlGdoELmCZyZe1nQE+imbmV+Q=="],
|
||||
|
||||
"@smithy/eventstream-serde-node": ["@smithy/eventstream-serde-node@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/eventstream-serde-node/-/eventstream-serde-node-4.2.12.tgz", { "dependencies": { "@smithy/eventstream-serde-universal": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-D1pFuExo31854eAvg89KMn9Oab/wEeJR6Buy32B49A9Ogdtx5fwZPqBHUlDzaCDpycTFk2+fSQgX689Qsk7UGA=="],
|
||||
|
||||
"@smithy/eventstream-serde-universal": ["@smithy/eventstream-serde-universal@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/eventstream-serde-universal/-/eventstream-serde-universal-4.2.12.tgz", { "dependencies": { "@smithy/eventstream-codec": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-+yNuTiyBACxOJUTvbsNsSOfH9G9oKbaJE1lNL3YHpGcuucl6rPZMi3nrpehpVOVR2E07YqFFmtwpImtpzlouHQ=="],
|
||||
|
||||
"@smithy/fetch-http-handler": ["@smithy/fetch-http-handler@5.3.15", "https://mirrors.cloud.tencent.com/npm/@smithy/fetch-http-handler/-/fetch-http-handler-5.3.15.tgz", { "dependencies": { "@smithy/protocol-http": "^5.3.12", "@smithy/querystring-builder": "^4.2.12", "@smithy/types": "^4.13.1", "@smithy/util-base64": "^4.3.2", "tslib": "^2.6.2" } }, "sha512-T4jFU5N/yiIfrtrsb9uOQn7RdELdM/7HbyLNr6uO/mpkj1ctiVs7CihVr51w4LyQlXWDpXFn4BElf1WmQvZu/A=="],
|
||||
|
||||
"@smithy/hash-node": ["@smithy/hash-node@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/hash-node/-/hash-node-4.2.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "@smithy/util-buffer-from": "^4.2.2", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-QhBYbGrbxTkZ43QoTPrK72DoYviDeg6YKDrHTMJbbC+A0sml3kSjzFtXP7BtbyJnXojLfTQldGdUR0RGD8dA3w=="],
|
||||
|
||||
"@smithy/invalid-dependency": ["@smithy/invalid-dependency@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/invalid-dependency/-/invalid-dependency-4.2.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-/4F1zb7Z8LOu1PalTdESFHR0RbPwHd3FcaG1sI3UEIriQTWakysgJr65lc1jj6QY5ye7aFsisajotH6UhWfm/g=="],
|
||||
|
||||
"@smithy/is-array-buffer": ["@smithy/is-array-buffer@4.2.2", "https://mirrors.cloud.tencent.com/npm/@smithy/is-array-buffer/-/is-array-buffer-4.2.2.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow=="],
|
||||
|
||||
"@smithy/middleware-content-length": ["@smithy/middleware-content-length@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/middleware-content-length/-/middleware-content-length-4.2.12.tgz", { "dependencies": { "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-YE58Yz+cvFInWI/wOTrB+DbvUVz/pLn5mC5MvOV4fdRUc6qGwygyngcucRQjAhiCEbmfLOXX0gntSIcgMvAjmA=="],
|
||||
|
||||
"@smithy/middleware-endpoint": ["@smithy/middleware-endpoint@4.4.28", "https://mirrors.cloud.tencent.com/npm/@smithy/middleware-endpoint/-/middleware-endpoint-4.4.28.tgz", { "dependencies": { "@smithy/core": "^3.23.13", "@smithy/middleware-serde": "^4.2.16", "@smithy/node-config-provider": "^4.3.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "@smithy/url-parser": "^4.2.12", "@smithy/util-middleware": "^4.2.12", "tslib": "^2.6.2" } }, "sha512-p1gfYpi91CHcs5cBq982UlGlDrxoYUX6XdHSo91cQ2KFuz6QloHosO7Jc60pJiVmkWrKOV8kFYlGFFbQ2WUKKQ=="],
|
||||
|
||||
"@smithy/middleware-retry": ["@smithy/middleware-retry@4.4.45", "https://mirrors.cloud.tencent.com/npm/@smithy/middleware-retry/-/middleware-retry-4.4.45.tgz", { "dependencies": { "@smithy/node-config-provider": "^4.3.12", "@smithy/protocol-http": "^5.3.12", "@smithy/service-error-classification": "^4.2.12", "@smithy/smithy-client": "^4.12.8", "@smithy/types": "^4.13.1", "@smithy/util-middleware": "^4.2.12", "@smithy/util-retry": "^4.2.12", "@smithy/uuid": "^1.1.2", "tslib": "^2.6.2" } }, "sha512-td1PxpwDIaw5/oP/xIRxBGxJKoF1L4DBAwbZ8wjMuXBYOP/r2ZE/Ocou+mBHx/yk9knFEtDBwhSrYVn+Mz4pHw=="],
|
||||
|
||||
"@smithy/middleware-serde": ["@smithy/middleware-serde@4.2.16", "https://mirrors.cloud.tencent.com/npm/@smithy/middleware-serde/-/middleware-serde-4.2.16.tgz", { "dependencies": { "@smithy/core": "^3.23.13", "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-beqfV+RZ9RSv+sQqor3xroUUYgRFCGRw6niGstPG8zO9LgTl0B0MCucxjmrH/2WwksQN7UUgI7KNANoZv+KALA=="],
|
||||
|
||||
"@smithy/middleware-stack": ["@smithy/middleware-stack@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/middleware-stack/-/middleware-stack-4.2.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-kruC5gRHwsCOuyCd4ouQxYjgRAym2uDlCvQ5acuMtRrcdfg7mFBg6blaxcJ09STpt3ziEkis6bhg1uwrWU7txw=="],
|
||||
|
||||
"@smithy/node-config-provider": ["@smithy/node-config-provider@4.3.12", "https://mirrors.cloud.tencent.com/npm/@smithy/node-config-provider/-/node-config-provider-4.3.12.tgz", { "dependencies": { "@smithy/property-provider": "^4.2.12", "@smithy/shared-ini-file-loader": "^4.4.7", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-tr2oKX2xMcO+rBOjobSwVAkV05SIfUKz8iI53rzxEmgW3GOOPOv0UioSDk+J8OpRQnpnhsO3Af6IEBabQBVmiw=="],
|
||||
|
||||
"@smithy/node-http-handler": ["@smithy/node-http-handler@4.5.1", "https://mirrors.cloud.tencent.com/npm/@smithy/node-http-handler/-/node-http-handler-4.5.1.tgz", { "dependencies": { "@smithy/protocol-http": "^5.3.12", "@smithy/querystring-builder": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-ejjxdAXjkPIs9lyYyVutOGNOraqUE9v/NjGMKwwFrfOM354wfSD8lmlj8hVwUzQmlLLF4+udhfCX9Exnbmvfzw=="],
|
||||
|
||||
"@smithy/property-provider": ["@smithy/property-provider@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/property-provider/-/property-provider-4.2.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-jqve46eYU1v7pZ5BM+fmkbq3DerkSluPr5EhvOcHxygxzD05ByDRppRwRPPpFrsFo5yDtCYLKu+kreHKVrvc7A=="],
|
||||
|
||||
"@smithy/protocol-http": ["@smithy/protocol-http@5.3.12", "https://mirrors.cloud.tencent.com/npm/@smithy/protocol-http/-/protocol-http-5.3.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-fit0GZK9I1xoRlR4jXmbLhoN0OdEpa96ul8M65XdmXnxXkuMxM0Y8HDT0Fh0Xb4I85MBvBClOzgSrV1X2s1Hxw=="],
|
||||
|
||||
"@smithy/querystring-builder": ["@smithy/querystring-builder@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/querystring-builder/-/querystring-builder-4.2.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "@smithy/util-uri-escape": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-6wTZjGABQufekycfDGMEB84BgtdOE/rCVTov+EDXQ8NHKTUNIp/j27IliwP7tjIU9LR+sSzyGBOXjeEtVgzCHg=="],
|
||||
|
||||
"@smithy/querystring-parser": ["@smithy/querystring-parser@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/querystring-parser/-/querystring-parser-4.2.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-P2OdvrgiAKpkPNKlKUtWbNZKB1XjPxM086NeVhK+W+wI46pIKdWBe5QyXvhUm3MEcyS/rkLvY8rZzyUdmyDZBw=="],
|
||||
|
||||
"@smithy/service-error-classification": ["@smithy/service-error-classification@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/service-error-classification/-/service-error-classification-4.2.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1" } }, "sha512-LlP29oSQN0Tw0b6D0Xo6BIikBswuIiGYbRACy5ujw/JgWSzTdYj46U83ssf6Ux0GyNJVivs2uReU8pt7Eu9okQ=="],
|
||||
|
||||
"@smithy/shared-ini-file-loader": ["@smithy/shared-ini-file-loader@4.4.7", "https://mirrors.cloud.tencent.com/npm/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-4.4.7.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-HrOKWsUb+otTeo1HxVWeEb99t5ER1XrBi/xka2Wv6NVmTbuCUC1dvlrksdvxFtODLBjsC+PHK+fuy2x/7Ynyiw=="],
|
||||
|
||||
"@smithy/signature-v4": ["@smithy/signature-v4@5.3.12", "https://mirrors.cloud.tencent.com/npm/@smithy/signature-v4/-/signature-v4-5.3.12.tgz", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "@smithy/util-hex-encoding": "^4.2.2", "@smithy/util-middleware": "^4.2.12", "@smithy/util-uri-escape": "^4.2.2", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-B/FBwO3MVOL00DaRSXfXfa/TRXRheagt/q5A2NM13u7q+sHS59EOVGQNfG7DkmVtdQm5m3vOosoKAXSqn/OEgw=="],
|
||||
|
||||
"@smithy/smithy-client": ["@smithy/smithy-client@4.12.8", "https://mirrors.cloud.tencent.com/npm/@smithy/smithy-client/-/smithy-client-4.12.8.tgz", { "dependencies": { "@smithy/core": "^3.23.13", "@smithy/middleware-endpoint": "^4.4.28", "@smithy/middleware-stack": "^4.2.12", "@smithy/protocol-http": "^5.3.12", "@smithy/types": "^4.13.1", "@smithy/util-stream": "^4.5.21", "tslib": "^2.6.2" } }, "sha512-aJaAX7vHe5i66smoSSID7t4rKY08PbD8EBU7DOloixvhOozfYWdcSYE4l6/tjkZ0vBZhGjheWzB2mh31sLgCMA=="],
|
||||
|
||||
"@smithy/types": ["@smithy/types@4.13.1", "https://mirrors.cloud.tencent.com/npm/@smithy/types/-/types-4.13.1.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-787F3yzE2UiJIQ+wYW1CVg2odHjmaWLGksnKQHUrK/lYZSEcy1msuLVvxaR/sI2/aDe9U+TBuLsXnr3vod1g0g=="],
|
||||
|
||||
"@smithy/url-parser": ["@smithy/url-parser@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/url-parser/-/url-parser-4.2.12.tgz", { "dependencies": { "@smithy/querystring-parser": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-wOPKPEpso+doCZGIlr+e1lVI6+9VAKfL4kZWFgzVgGWY2hZxshNKod4l2LXS3PRC9otH/JRSjtEHqQ/7eLciRA=="],
|
||||
|
||||
"@smithy/util-base64": ["@smithy/util-base64@4.3.2", "https://mirrors.cloud.tencent.com/npm/@smithy/util-base64/-/util-base64-4.3.2.tgz", { "dependencies": { "@smithy/util-buffer-from": "^4.2.2", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-XRH6b0H/5A3SgblmMa5ErXQ2XKhfbQB+Fm/oyLZ2O2kCUrwgg55bU0RekmzAhuwOjA9qdN5VU2BprOvGGUkOOQ=="],
|
||||
|
||||
"@smithy/util-body-length-browser": ["@smithy/util-body-length-browser@4.2.2", "https://mirrors.cloud.tencent.com/npm/@smithy/util-body-length-browser/-/util-body-length-browser-4.2.2.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-JKCrLNOup3OOgmzeaKQwi4ZCTWlYR5H4Gm1r2uTMVBXoemo1UEghk5vtMi1xSu2ymgKVGW631e2fp9/R610ZjQ=="],
|
||||
|
||||
"@smithy/util-body-length-node": ["@smithy/util-body-length-node@4.2.3", "https://mirrors.cloud.tencent.com/npm/@smithy/util-body-length-node/-/util-body-length-node-4.2.3.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-ZkJGvqBzMHVHE7r/hcuCxlTY8pQr1kMtdsVPs7ex4mMU+EAbcXppfo5NmyxMYi2XU49eqaz56j2gsk4dHHPG/g=="],
|
||||
|
||||
"@smithy/util-buffer-from": ["@smithy/util-buffer-from@4.2.2", "https://mirrors.cloud.tencent.com/npm/@smithy/util-buffer-from/-/util-buffer-from-4.2.2.tgz", { "dependencies": { "@smithy/is-array-buffer": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q=="],
|
||||
|
||||
"@smithy/util-config-provider": ["@smithy/util-config-provider@4.2.2", "https://mirrors.cloud.tencent.com/npm/@smithy/util-config-provider/-/util-config-provider-4.2.2.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-dWU03V3XUprJwaUIFVv4iOnS1FC9HnMHDfUrlNDSh4315v0cWyaIErP8KiqGVbf5z+JupoVpNM7ZB3jFiTejvQ=="],
|
||||
|
||||
"@smithy/util-defaults-mode-browser": ["@smithy/util-defaults-mode-browser@4.3.44", "https://mirrors.cloud.tencent.com/npm/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-4.3.44.tgz", { "dependencies": { "@smithy/property-provider": "^4.2.12", "@smithy/smithy-client": "^4.12.8", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-eZg6XzaCbVr2S5cAErU5eGBDaOVTuTo1I65i4tQcHENRcZ8rMWhQy1DaIYUSLyZjsfXvmCqZrstSMYyGFocvHA=="],
|
||||
|
||||
"@smithy/util-defaults-mode-node": ["@smithy/util-defaults-mode-node@4.2.48", "https://mirrors.cloud.tencent.com/npm/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-4.2.48.tgz", { "dependencies": { "@smithy/config-resolver": "^4.4.13", "@smithy/credential-provider-imds": "^4.2.12", "@smithy/node-config-provider": "^4.3.12", "@smithy/property-provider": "^4.2.12", "@smithy/smithy-client": "^4.12.8", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-FqOKTlqSaoV3nzO55pMs5NBnZX8EhoI0DGmn9kbYeXWppgHD6dchyuj2HLqp4INJDJbSrj6OFYJkAh/WhSzZPg=="],
|
||||
|
||||
"@smithy/util-endpoints": ["@smithy/util-endpoints@3.3.3", "https://mirrors.cloud.tencent.com/npm/@smithy/util-endpoints/-/util-endpoints-3.3.3.tgz", { "dependencies": { "@smithy/node-config-provider": "^4.3.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-VACQVe50j0HZPjpwWcjyT51KUQ4AnsvEaQ2lKHOSL4mNLD0G9BjEniQ+yCt1qqfKfiAHRAts26ud7hBjamrwig=="],
|
||||
|
||||
"@smithy/util-hex-encoding": ["@smithy/util-hex-encoding@4.2.2", "https://mirrors.cloud.tencent.com/npm/@smithy/util-hex-encoding/-/util-hex-encoding-4.2.2.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-Qcz3W5vuHK4sLQdyT93k/rfrUwdJ8/HZ+nMUOyGdpeGA1Wxt65zYwi3oEl9kOM+RswvYq90fzkNDahPS8K0OIg=="],
|
||||
|
||||
"@smithy/util-middleware": ["@smithy/util-middleware@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/util-middleware/-/util-middleware-4.2.12.tgz", { "dependencies": { "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-Er805uFUOvgc0l8nv0e0su0VFISoxhJ/AwOn3gL2NWNY2LUEldP5WtVcRYSQBcjg0y9NfG8JYrCJaYDpupBHJQ=="],
|
||||
|
||||
"@smithy/util-retry": ["@smithy/util-retry@4.2.12", "https://mirrors.cloud.tencent.com/npm/@smithy/util-retry/-/util-retry-4.2.12.tgz", { "dependencies": { "@smithy/service-error-classification": "^4.2.12", "@smithy/types": "^4.13.1", "tslib": "^2.6.2" } }, "sha512-1zopLDUEOwumjcHdJ1mwBHddubYF8GMQvstVCLC54Y46rqoHwlIU+8ZzUeaBcD+WCJHyDGSeZ2ml9YSe9aqcoQ=="],
|
||||
|
||||
"@smithy/util-stream": ["@smithy/util-stream@4.5.21", "https://mirrors.cloud.tencent.com/npm/@smithy/util-stream/-/util-stream-4.5.21.tgz", { "dependencies": { "@smithy/fetch-http-handler": "^5.3.15", "@smithy/node-http-handler": "^4.5.1", "@smithy/types": "^4.13.1", "@smithy/util-base64": "^4.3.2", "@smithy/util-buffer-from": "^4.2.2", "@smithy/util-hex-encoding": "^4.2.2", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-KzSg+7KKywLnkoKejRtIBXDmwBfjGvg1U1i/etkC7XSWUyFCoLno1IohV2c74IzQqdhX5y3uE44r/8/wuK+A7Q=="],
|
||||
|
||||
"@smithy/util-uri-escape": ["@smithy/util-uri-escape@4.2.2", "https://mirrors.cloud.tencent.com/npm/@smithy/util-uri-escape/-/util-uri-escape-4.2.2.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-2kAStBlvq+lTXHyAZYfJRb/DfS3rsinLiwb+69SstC9Vb0s9vNWkRwpnj918Pfi85mzi42sOqdV72OLxWAISnw=="],
|
||||
|
||||
"@smithy/util-utf8": ["@smithy/util-utf8@4.2.2", "https://mirrors.cloud.tencent.com/npm/@smithy/util-utf8/-/util-utf8-4.2.2.tgz", { "dependencies": { "@smithy/util-buffer-from": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw=="],
|
||||
|
||||
"@smithy/uuid": ["@smithy/uuid@1.1.2", "https://mirrors.cloud.tencent.com/npm/@smithy/uuid/-/uuid-1.1.2.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g=="],
|
||||
|
||||
"@types/lodash": ["@types/lodash@4.17.24", "https://mirrors.cloud.tencent.com/npm/@types/lodash/-/lodash-4.17.24.tgz", {}, "sha512-gIW7lQLZbue7lRSWEFql49QJJWThrTFFeIMJdp3eH4tKoxm1OvEPg02rm4wCCSHS0cL3/Fizimb35b7k8atwsQ=="],
|
||||
|
||||
"@types/lodash-es": ["@types/lodash-es@4.17.12", "https://mirrors.cloud.tencent.com/npm/@types/lodash-es/-/lodash-es-4.17.12.tgz", { "dependencies": { "@types/lodash": "*" } }, "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ=="],
|
||||
|
||||
"@types/mute-stream": ["@types/mute-stream@0.0.4", "https://mirrors.cloud.tencent.com/npm/@types/mute-stream/-/mute-stream-0.0.4.tgz", { "dependencies": { "@types/node": "*" } }, "sha512-CPM9nzrCPPJHQNA9keH9CVkVI+WR5kMa+7XEs5jcGQ0VoAGnLv242w8lIVgwAEfmE4oufJRaTc9PNLQl0ioAow=="],
|
||||
|
||||
"@types/node": ["@types/node@22.19.15", "https://mirrors.cloud.tencent.com/npm/@types/node/-/node-22.19.15.tgz", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg=="],
|
||||
|
||||
"@types/wrap-ansi": ["@types/wrap-ansi@3.0.0", "https://mirrors.cloud.tencent.com/npm/@types/wrap-ansi/-/wrap-ansi-3.0.0.tgz", {}, "sha512-ltIpx+kM7g/MLRZfkbL7EsCEjfzCcScLpkg37eXEtx5kmrAKBkTJwd1GIAjDSL8wTpM6Hzn5YO4pSb91BEwu1g=="],
|
||||
|
||||
"accepts": ["accepts@2.0.0", "https://mirrors.cloud.tencent.com/npm/accepts/-/accepts-2.0.0.tgz", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="],
|
||||
|
||||
"agent-base": ["agent-base@8.0.0", "https://mirrors.cloud.tencent.com/npm/agent-base/-/agent-base-8.0.0.tgz", {}, "sha512-QT8i0hCz6C/KQ+KTAbSNwCHDGdmUJl2tp2ZpNlGSWCfhUNVbYG2WLE3MdZGBAgXPV4GAvjGMxo+C1hroyxmZEg=="],
|
||||
|
||||
"ajv": ["ajv@8.18.0", "https://mirrors.cloud.tencent.com/npm/ajv/-/ajv-8.18.0.tgz", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A=="],
|
||||
|
||||
"ajv-formats": ["ajv-formats@3.0.1", "https://mirrors.cloud.tencent.com/npm/ajv-formats/-/ajv-formats-3.0.1.tgz", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ=="],
|
||||
|
||||
"ansi-escapes": ["ansi-escapes@7.3.0", "https://mirrors.cloud.tencent.com/npm/ansi-escapes/-/ansi-escapes-7.3.0.tgz", { "dependencies": { "environment": "^1.0.0" } }, "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg=="],
|
||||
|
||||
"ansi-regex": ["ansi-regex@6.2.2", "https://mirrors.cloud.tencent.com/npm/ansi-regex/-/ansi-regex-6.2.2.tgz", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="],
|
||||
|
||||
"ansi-styles": ["ansi-styles@6.2.3", "https://mirrors.cloud.tencent.com/npm/ansi-styles/-/ansi-styles-6.2.3.tgz", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="],
|
||||
|
||||
"asciichart": ["asciichart@1.5.25", "https://mirrors.cloud.tencent.com/npm/asciichart/-/asciichart-1.5.25.tgz", {}, "sha512-PNxzXIPPOtWq8T7bgzBtk9cI2lgS4SJZthUHEiQ1aoIc3lNzGfUvIvo9LiAnq26TACo9t1/4qP6KTGAUbzX9Xg=="],
|
||||
|
||||
"asynckit": ["asynckit@0.4.0", "https://mirrors.cloud.tencent.com/npm/asynckit/-/asynckit-0.4.0.tgz", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="],
|
||||
|
||||
"auto-bind": ["auto-bind@5.0.1", "https://mirrors.cloud.tencent.com/npm/auto-bind/-/auto-bind-5.0.1.tgz", {}, "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg=="],
|
||||
|
||||
"axios": ["axios@1.14.0", "https://mirrors.cloud.tencent.com/npm/axios/-/axios-1.14.0.tgz", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^2.1.0" } }, "sha512-3Y8yrqLSwjuzpXuZ0oIYZ/XGgLwUIBU3uLvbcpb0pidD9ctpShJd43KSlEEkVQg6DS0G9NKyzOvBfUtDKEyHvQ=="],
|
||||
|
||||
"base64-js": ["base64-js@1.5.1", "https://mirrors.cloud.tencent.com/npm/base64-js/-/base64-js-1.5.1.tgz", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="],
|
||||
|
||||
"bidi-js": ["bidi-js@1.0.3", "https://mirrors.cloud.tencent.com/npm/bidi-js/-/bidi-js-1.0.3.tgz", { "dependencies": { "require-from-string": "^2.0.2" } }, "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw=="],
|
||||
|
||||
"bignumber.js": ["bignumber.js@9.3.1", "https://mirrors.cloud.tencent.com/npm/bignumber.js/-/bignumber.js-9.3.1.tgz", {}, "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ=="],
|
||||
|
||||
"body-parser": ["body-parser@2.2.2", "https://mirrors.cloud.tencent.com/npm/body-parser/-/body-parser-2.2.2.tgz", { "dependencies": { "bytes": "^3.1.2", "content-type": "^1.0.5", "debug": "^4.4.3", "http-errors": "^2.0.0", "iconv-lite": "^0.7.0", "on-finished": "^2.4.1", "qs": "^6.14.1", "raw-body": "^3.0.1", "type-is": "^2.0.1" } }, "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA=="],
|
||||
|
||||
"bowser": ["bowser@2.14.1", "https://mirrors.cloud.tencent.com/npm/bowser/-/bowser-2.14.1.tgz", {}, "sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg=="],
|
||||
|
||||
"buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "https://mirrors.cloud.tencent.com/npm/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="],
|
||||
|
||||
"bytes": ["bytes@3.1.2", "https://mirrors.cloud.tencent.com/npm/bytes/-/bytes-3.1.2.tgz", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="],
|
||||
|
||||
"call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "https://mirrors.cloud.tencent.com/npm/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="],
|
||||
|
||||
"call-bound": ["call-bound@1.0.4", "https://mirrors.cloud.tencent.com/npm/call-bound/-/call-bound-1.0.4.tgz", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg=="],
|
||||
|
||||
"camelcase": ["camelcase@5.3.1", "https://mirrors.cloud.tencent.com/npm/camelcase/-/camelcase-5.3.1.tgz", {}, "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg=="],
|
||||
|
||||
"chalk": ["chalk@5.6.2", "https://mirrors.cloud.tencent.com/npm/chalk/-/chalk-5.6.2.tgz", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="],
|
||||
|
||||
"chardet": ["chardet@0.7.0", "https://mirrors.cloud.tencent.com/npm/chardet/-/chardet-0.7.0.tgz", {}, "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA=="],
|
||||
|
||||
"chokidar": ["chokidar@5.0.0", "https://mirrors.cloud.tencent.com/npm/chokidar/-/chokidar-5.0.0.tgz", { "dependencies": { "readdirp": "^5.0.0" } }, "sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw=="],
|
||||
|
||||
"cli-boxes": ["cli-boxes@4.0.1", "https://mirrors.cloud.tencent.com/npm/cli-boxes/-/cli-boxes-4.0.1.tgz", {}, "sha512-5IOn+jcCEHEraYolBPs/sT4BxYCe2nHg374OPiItB1O96KZFseS2gthU4twyYzeDcFew4DaUM/xwc5BQf08JJw=="],
|
||||
|
||||
"cli-cursor": ["cli-cursor@4.0.0", "https://mirrors.cloud.tencent.com/npm/cli-cursor/-/cli-cursor-4.0.0.tgz", { "dependencies": { "restore-cursor": "^4.0.0" } }, "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg=="],
|
||||
|
||||
"cli-truncate": ["cli-truncate@5.2.0", "https://mirrors.cloud.tencent.com/npm/cli-truncate/-/cli-truncate-5.2.0.tgz", { "dependencies": { "slice-ansi": "^8.0.0", "string-width": "^8.2.0" } }, "sha512-xRwvIOMGrfOAnM1JYtqQImuaNtDEv9v6oIYAs4LIHwTiKee8uwvIi363igssOC0O5U04i4AlENs79LQLu9tEMw=="],
|
||||
|
||||
"cli-width": ["cli-width@4.1.0", "https://mirrors.cloud.tencent.com/npm/cli-width/-/cli-width-4.1.0.tgz", {}, "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ=="],
|
||||
|
||||
"cliui": ["cliui@6.0.0", "https://mirrors.cloud.tencent.com/npm/cliui/-/cliui-6.0.0.tgz", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^6.2.0" } }, "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ=="],
|
||||
|
||||
"code-excerpt": ["code-excerpt@4.0.0", "https://mirrors.cloud.tencent.com/npm/code-excerpt/-/code-excerpt-4.0.0.tgz", { "dependencies": { "convert-to-spaces": "^2.0.1" } }, "sha512-xxodCmBen3iy2i0WtAK8FlFNrRzjUqjRsMfho58xT/wvZU1YTM3fCnRjcy1gJPMepaRlgm/0e6w8SpWHpn3/cA=="],
|
||||
|
||||
"color-convert": ["color-convert@2.0.1", "https://mirrors.cloud.tencent.com/npm/color-convert/-/color-convert-2.0.1.tgz", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="],
|
||||
|
||||
"color-diff-napi": ["color-diff-napi@file:shims/color-diff-napi", {}],
|
||||
|
||||
"color-name": ["color-name@1.1.4", "https://mirrors.cloud.tencent.com/npm/color-name/-/color-name-1.1.4.tgz", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="],
|
||||
|
||||
"combined-stream": ["combined-stream@1.0.8", "https://mirrors.cloud.tencent.com/npm/combined-stream/-/combined-stream-1.0.8.tgz", { "dependencies": { "delayed-stream": "~1.0.0" } }, "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg=="],
|
||||
|
||||
"commander": ["commander@13.1.0", "https://mirrors.cloud.tencent.com/npm/commander/-/commander-13.1.0.tgz", {}, "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw=="],
|
||||
|
||||
"content-disposition": ["content-disposition@1.0.1", "https://mirrors.cloud.tencent.com/npm/content-disposition/-/content-disposition-1.0.1.tgz", {}, "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q=="],
|
||||
|
||||
"content-type": ["content-type@1.0.5", "https://mirrors.cloud.tencent.com/npm/content-type/-/content-type-1.0.5.tgz", {}, "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="],
|
||||
|
||||
"convert-to-spaces": ["convert-to-spaces@2.0.1", "https://mirrors.cloud.tencent.com/npm/convert-to-spaces/-/convert-to-spaces-2.0.1.tgz", {}, "sha512-rcQ1bsQO9799wq24uE5AM2tAILy4gXGIK/njFWcVQkGNZ96edlpY+A7bjwvzjYvLDyzmG1MmMLZhpcsb+klNMQ=="],
|
||||
|
||||
"cookie": ["cookie@0.7.2", "https://mirrors.cloud.tencent.com/npm/cookie/-/cookie-0.7.2.tgz", {}, "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w=="],
|
||||
|
||||
"cookie-signature": ["cookie-signature@1.2.2", "https://mirrors.cloud.tencent.com/npm/cookie-signature/-/cookie-signature-1.2.2.tgz", {}, "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg=="],
|
||||
|
||||
"cors": ["cors@2.8.6", "https://mirrors.cloud.tencent.com/npm/cors/-/cors-2.8.6.tgz", { "dependencies": { "object-assign": "^4", "vary": "^1" } }, "sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw=="],
|
||||
|
||||
"cross-spawn": ["cross-spawn@7.0.6", "https://mirrors.cloud.tencent.com/npm/cross-spawn/-/cross-spawn-7.0.6.tgz", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="],
|
||||
|
||||
"cssfilter": ["cssfilter@0.0.10", "https://mirrors.cloud.tencent.com/npm/cssfilter/-/cssfilter-0.0.10.tgz", {}, "sha512-FAaLDaplstoRsDR8XGYH51znUN0UY7nMc6Z9/fvE8EXGwvJE9hu7W2vHwx1+bd6gCYnln9nLbzxFTrcO9YQDZw=="],
|
||||
|
||||
"data-uri-to-buffer": ["data-uri-to-buffer@4.0.1", "https://mirrors.cloud.tencent.com/npm/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", {}, "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A=="],
|
||||
|
||||
"debug": ["debug@4.4.3", "https://mirrors.cloud.tencent.com/npm/debug/-/debug-4.4.3.tgz", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="],
|
||||
|
||||
"decamelize": ["decamelize@1.2.0", "https://mirrors.cloud.tencent.com/npm/decamelize/-/decamelize-1.2.0.tgz", {}, "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA=="],
|
||||
|
||||
"delayed-stream": ["delayed-stream@1.0.0", "https://mirrors.cloud.tencent.com/npm/delayed-stream/-/delayed-stream-1.0.0.tgz", {}, "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="],
|
||||
|
||||
"depd": ["depd@2.0.0", "https://mirrors.cloud.tencent.com/npm/depd/-/depd-2.0.0.tgz", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="],
|
||||
|
||||
"diff": ["diff@8.0.4", "https://mirrors.cloud.tencent.com/npm/diff/-/diff-8.0.4.tgz", {}, "sha512-DPi0FmjiSU5EvQV0++GFDOJ9ASQUVFh5kD+OzOnYdi7n3Wpm9hWWGfB/O2blfHcMVTL5WkQXSnRiK9makhrcnw=="],
|
||||
|
||||
"dijkstrajs": ["dijkstrajs@1.0.3", "https://mirrors.cloud.tencent.com/npm/dijkstrajs/-/dijkstrajs-1.0.3.tgz", {}, "sha512-qiSlmBq9+BCdCA/L46dw8Uy93mloxsPSbwnm5yrKn2vMPiy8KyAskTF6zuV/j5BMsmOGZDPs7KjU+mjb670kfA=="],
|
||||
|
||||
"dom-mutator": ["dom-mutator@0.6.0", "https://mirrors.cloud.tencent.com/npm/dom-mutator/-/dom-mutator-0.6.0.tgz", {}, "sha512-iCt9o0aYfXMUkz/43ZOAUFQYotjGB+GNbYJiJdz4TgXkyToXbbRy5S6FbTp72lRBtfpUMwEc1KmpFEU4CZeoNg=="],
|
||||
|
||||
"dunder-proto": ["dunder-proto@1.0.1", "https://mirrors.cloud.tencent.com/npm/dunder-proto/-/dunder-proto-1.0.1.tgz", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="],
|
||||
|
||||
"ecdsa-sig-formatter": ["ecdsa-sig-formatter@1.0.11", "https://mirrors.cloud.tencent.com/npm/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ=="],
|
||||
|
||||
"ee-first": ["ee-first@1.1.1", "https://mirrors.cloud.tencent.com/npm/ee-first/-/ee-first-1.1.1.tgz", {}, "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="],
|
||||
|
||||
"emoji-regex": ["emoji-regex@10.6.0", "https://mirrors.cloud.tencent.com/npm/emoji-regex/-/emoji-regex-10.6.0.tgz", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="],
|
||||
|
||||
"encodeurl": ["encodeurl@2.0.0", "https://mirrors.cloud.tencent.com/npm/encodeurl/-/encodeurl-2.0.0.tgz", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="],
|
||||
|
||||
"env-paths": ["env-paths@4.0.0", "https://mirrors.cloud.tencent.com/npm/env-paths/-/env-paths-4.0.0.tgz", { "dependencies": { "is-safe-filename": "^0.1.0" } }, "sha512-pxP8eL2SwwaTRi/KHYwLYXinDs7gL3jxFcBYmEdYfZmZXbaVDvdppd0XBU8qVz03rDfKZMXg1omHCbsJjZrMsw=="],
|
||||
|
||||
"environment": ["environment@1.1.0", "https://mirrors.cloud.tencent.com/npm/environment/-/environment-1.1.0.tgz", {}, "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q=="],
|
||||
|
||||
"es-define-property": ["es-define-property@1.0.1", "https://mirrors.cloud.tencent.com/npm/es-define-property/-/es-define-property-1.0.1.tgz", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="],
|
||||
|
||||
"es-errors": ["es-errors@1.3.0", "https://mirrors.cloud.tencent.com/npm/es-errors/-/es-errors-1.3.0.tgz", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="],
|
||||
|
||||
"es-object-atoms": ["es-object-atoms@1.1.1", "https://mirrors.cloud.tencent.com/npm/es-object-atoms/-/es-object-atoms-1.1.1.tgz", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="],
|
||||
|
||||
"es-set-tostringtag": ["es-set-tostringtag@2.1.0", "https://mirrors.cloud.tencent.com/npm/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="],
|
||||
|
||||
"es-toolkit": ["es-toolkit@1.45.1", "https://mirrors.cloud.tencent.com/npm/es-toolkit/-/es-toolkit-1.45.1.tgz", {}, "sha512-/jhoOj/Fx+A+IIyDNOvO3TItGmlMKhtX8ISAHKE90c4b/k1tqaqEZ+uUqfpU8DMnW5cgNJv606zS55jGvza0Xw=="],
|
||||
|
||||
"escape-html": ["escape-html@1.0.3", "https://mirrors.cloud.tencent.com/npm/escape-html/-/escape-html-1.0.3.tgz", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="],
|
||||
|
||||
"escape-string-regexp": ["escape-string-regexp@2.0.0", "https://mirrors.cloud.tencent.com/npm/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", {}, "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w=="],
|
||||
|
||||
"etag": ["etag@1.8.1", "https://mirrors.cloud.tencent.com/npm/etag/-/etag-1.8.1.tgz", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="],
|
||||
|
||||
"eventsource": ["eventsource@3.0.7", "https://mirrors.cloud.tencent.com/npm/eventsource/-/eventsource-3.0.7.tgz", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="],
|
||||
|
||||
"eventsource-parser": ["eventsource-parser@3.0.6", "https://mirrors.cloud.tencent.com/npm/eventsource-parser/-/eventsource-parser-3.0.6.tgz", {}, "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg=="],
|
||||
|
||||
"execa": ["execa@9.6.1", "https://mirrors.cloud.tencent.com/npm/execa/-/execa-9.6.1.tgz", { "dependencies": { "@sindresorhus/merge-streams": "^4.0.0", "cross-spawn": "^7.0.6", "figures": "^6.1.0", "get-stream": "^9.0.0", "human-signals": "^8.0.1", "is-plain-obj": "^4.1.0", "is-stream": "^4.0.1", "npm-run-path": "^6.0.0", "pretty-ms": "^9.2.0", "signal-exit": "^4.1.0", "strip-final-newline": "^4.0.0", "yoctocolors": "^2.1.1" } }, "sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA=="],
|
||||
|
||||
"express": ["express@5.2.1", "https://mirrors.cloud.tencent.com/npm/express/-/express-5.2.1.tgz", { "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.1", "content-disposition": "^1.0.0", "content-type": "^1.0.5", "cookie": "^0.7.1", "cookie-signature": "^1.2.1", "debug": "^4.4.0", "depd": "^2.0.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "finalhandler": "^2.1.0", "fresh": "^2.0.0", "http-errors": "^2.0.0", "merge-descriptors": "^2.0.0", "mime-types": "^3.0.0", "on-finished": "^2.4.1", "once": "^1.4.0", "parseurl": "^1.3.3", "proxy-addr": "^2.0.7", "qs": "^6.14.0", "range-parser": "^1.2.1", "router": "^2.2.0", "send": "^1.1.0", "serve-static": "^2.2.0", "statuses": "^2.0.1", "type-is": "^2.0.1", "vary": "^1.1.2" } }, "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw=="],
|
||||
|
||||
"express-rate-limit": ["express-rate-limit@8.3.2", "https://mirrors.cloud.tencent.com/npm/express-rate-limit/-/express-rate-limit-8.3.2.tgz", { "dependencies": { "ip-address": "10.1.0" }, "peerDependencies": { "express": ">= 4.11" } }, "sha512-77VmFeJkO0/rvimEDuUC5H30oqUC4EyOhyGccfqoLebB0oiEYfM7nwPrsDsBL1gsTpwfzX8SFy2MT3TDyRq+bg=="],
|
||||
|
||||
"extend": ["extend@3.0.2", "https://mirrors.cloud.tencent.com/npm/extend/-/extend-3.0.2.tgz", {}, "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="],
|
||||
|
||||
"external-editor": ["external-editor@3.1.0", "https://mirrors.cloud.tencent.com/npm/external-editor/-/external-editor-3.1.0.tgz", { "dependencies": { "chardet": "^0.7.0", "iconv-lite": "^0.4.24", "tmp": "^0.0.33" } }, "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew=="],
|
||||
|
||||
"fast-deep-equal": ["fast-deep-equal@3.1.3", "https://mirrors.cloud.tencent.com/npm/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="],
|
||||
|
||||
"fast-uri": ["fast-uri@3.1.0", "https://mirrors.cloud.tencent.com/npm/fast-uri/-/fast-uri-3.1.0.tgz", {}, "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA=="],
|
||||
|
||||
"fast-xml-builder": ["fast-xml-builder@1.1.4", "https://mirrors.cloud.tencent.com/npm/fast-xml-builder/-/fast-xml-builder-1.1.4.tgz", { "dependencies": { "path-expression-matcher": "^1.1.3" } }, "sha512-f2jhpN4Eccy0/Uz9csxh3Nu6q4ErKxf0XIsasomfOihuSUa3/xw6w8dnOtCDgEItQFJG8KyXPzQXzcODDrrbOg=="],
|
||||
|
||||
"fast-xml-parser": ["fast-xml-parser@5.5.8", "https://mirrors.cloud.tencent.com/npm/fast-xml-parser/-/fast-xml-parser-5.5.8.tgz", { "dependencies": { "fast-xml-builder": "^1.1.4", "path-expression-matcher": "^1.2.0", "strnum": "^2.2.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-Z7Fh2nVQSb2d+poDViM063ix2ZGt9jmY1nWhPfHBOK2Hgnb/OW3P4Et3P/81SEej0J7QbWtJqxO05h8QYfK7LQ=="],
|
||||
|
||||
"fetch-blob": ["fetch-blob@3.2.0", "https://mirrors.cloud.tencent.com/npm/fetch-blob/-/fetch-blob-3.2.0.tgz", { "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" } }, "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ=="],
|
||||
|
||||
"fflate": ["fflate@0.8.2", "https://mirrors.cloud.tencent.com/npm/fflate/-/fflate-0.8.2.tgz", {}, "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A=="],
|
||||
|
||||
"figures": ["figures@6.1.0", "https://mirrors.cloud.tencent.com/npm/figures/-/figures-6.1.0.tgz", { "dependencies": { "is-unicode-supported": "^2.0.0" } }, "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg=="],
|
||||
|
||||
"finalhandler": ["finalhandler@2.1.1", "https://mirrors.cloud.tencent.com/npm/finalhandler/-/finalhandler-2.1.1.tgz", { "dependencies": { "debug": "^4.4.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "on-finished": "^2.4.1", "parseurl": "^1.3.3", "statuses": "^2.0.1" } }, "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA=="],
|
||||
|
||||
"find-up": ["find-up@4.1.0", "https://mirrors.cloud.tencent.com/npm/find-up/-/find-up-4.1.0.tgz", { "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" } }, "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw=="],
|
||||
|
||||
"flora-colossus": ["flora-colossus@2.0.0", "https://mirrors.cloud.tencent.com/npm/flora-colossus/-/flora-colossus-2.0.0.tgz", { "dependencies": { "debug": "^4.3.4", "fs-extra": "^10.1.0" } }, "sha512-dz4HxH6pOvbUzZpZ/yXhafjbR2I8cenK5xL0KtBFb7U2ADsR+OwXifnxZjij/pZWF775uSCMzWVd+jDik2H2IA=="],
|
||||
|
||||
"follow-redirects": ["follow-redirects@1.15.11", "https://mirrors.cloud.tencent.com/npm/follow-redirects/-/follow-redirects-1.15.11.tgz", {}, "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ=="],
|
||||
|
||||
"form-data": ["form-data@4.0.5", "https://mirrors.cloud.tencent.com/npm/form-data/-/form-data-4.0.5.tgz", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", "hasown": "^2.0.2", "mime-types": "^2.1.12" } }, "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w=="],
|
||||
|
||||
"formdata-polyfill": ["formdata-polyfill@4.0.10", "https://mirrors.cloud.tencent.com/npm/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", { "dependencies": { "fetch-blob": "^3.1.2" } }, "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g=="],
|
||||
|
||||
"forwarded": ["forwarded@0.2.0", "https://mirrors.cloud.tencent.com/npm/forwarded/-/forwarded-0.2.0.tgz", {}, "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="],
|
||||
|
||||
"fresh": ["fresh@2.0.0", "https://mirrors.cloud.tencent.com/npm/fresh/-/fresh-2.0.0.tgz", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="],
|
||||
|
||||
"fs-extra": ["fs-extra@10.1.0", "https://mirrors.cloud.tencent.com/npm/fs-extra/-/fs-extra-10.1.0.tgz", { "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" } }, "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ=="],
|
||||
|
||||
"function-bind": ["function-bind@1.1.2", "https://mirrors.cloud.tencent.com/npm/function-bind/-/function-bind-1.1.2.tgz", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
|
||||
|
||||
"fuse.js": ["fuse.js@7.1.0", "https://mirrors.cloud.tencent.com/npm/fuse.js/-/fuse.js-7.1.0.tgz", {}, "sha512-trLf4SzuuUxfusZADLINj+dE8clK1frKdmqiJNb1Es75fmI5oY6X2mxLVUciLLjxqw/xr72Dhy+lER6dGd02FQ=="],
|
||||
|
||||
"galactus": ["galactus@1.0.0", "https://mirrors.cloud.tencent.com/npm/galactus/-/galactus-1.0.0.tgz", { "dependencies": { "debug": "^4.3.4", "flora-colossus": "^2.0.0", "fs-extra": "^10.1.0" } }, "sha512-R1fam6D4CyKQGNlvJne4dkNF+PvUUl7TAJInvTGa9fti9qAv95quQz29GXapA4d8Ec266mJJxFVh82M4GIIGDQ=="],
|
||||
|
||||
"gaxios": ["gaxios@7.1.4", "https://mirrors.cloud.tencent.com/npm/gaxios/-/gaxios-7.1.4.tgz", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2" } }, "sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA=="],
|
||||
|
||||
"gcp-metadata": ["gcp-metadata@8.1.2", "https://mirrors.cloud.tencent.com/npm/gcp-metadata/-/gcp-metadata-8.1.2.tgz", { "dependencies": { "gaxios": "^7.0.0", "google-logging-utils": "^1.0.0", "json-bigint": "^1.0.0" } }, "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg=="],
|
||||
|
||||
"get-caller-file": ["get-caller-file@2.0.5", "https://mirrors.cloud.tencent.com/npm/get-caller-file/-/get-caller-file-2.0.5.tgz", {}, "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="],
|
||||
|
||||
"get-east-asian-width": ["get-east-asian-width@1.5.0", "https://mirrors.cloud.tencent.com/npm/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz", {}, "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA=="],
|
||||
|
||||
"get-intrinsic": ["get-intrinsic@1.3.0", "https://mirrors.cloud.tencent.com/npm/get-intrinsic/-/get-intrinsic-1.3.0.tgz", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="],
|
||||
|
||||
"get-proto": ["get-proto@1.0.1", "https://mirrors.cloud.tencent.com/npm/get-proto/-/get-proto-1.0.1.tgz", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="],
|
||||
|
||||
"get-stream": ["get-stream@9.0.1", "https://mirrors.cloud.tencent.com/npm/get-stream/-/get-stream-9.0.1.tgz", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="],
|
||||
|
||||
"google-auth-library": ["google-auth-library@10.6.2", "https://mirrors.cloud.tencent.com/npm/google-auth-library/-/google-auth-library-10.6.2.tgz", { "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", "gaxios": "^7.1.4", "gcp-metadata": "8.1.2", "google-logging-utils": "1.1.3", "jws": "^4.0.0" } }, "sha512-e27Z6EThmVNNvtYASwQxose/G57rkRuaRbQyxM2bvYLLX/GqWZ5chWq2EBoUchJbCc57eC9ArzO5wMsEmWftCw=="],
|
||||
|
||||
"google-logging-utils": ["google-logging-utils@1.1.3", "https://mirrors.cloud.tencent.com/npm/google-logging-utils/-/google-logging-utils-1.1.3.tgz", {}, "sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA=="],
|
||||
|
||||
"gopd": ["gopd@1.2.0", "https://mirrors.cloud.tencent.com/npm/gopd/-/gopd-1.2.0.tgz", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="],
|
||||
|
||||
"graceful-fs": ["graceful-fs@4.2.11", "https://mirrors.cloud.tencent.com/npm/graceful-fs/-/graceful-fs-4.2.11.tgz", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="],
|
||||
|
||||
"has-flag": ["has-flag@5.0.1", "https://mirrors.cloud.tencent.com/npm/has-flag/-/has-flag-5.0.1.tgz", {}, "sha512-CsNUt5x9LUdx6hnk/E2SZLsDyvfqANZSUq4+D3D8RzDJ2M+HDTIkF60ibS1vHaK55vzgiZw1bEPFG9yH7l33wA=="],
|
||||
|
||||
"has-symbols": ["has-symbols@1.1.0", "https://mirrors.cloud.tencent.com/npm/has-symbols/-/has-symbols-1.1.0.tgz", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="],
|
||||
|
||||
"has-tostringtag": ["has-tostringtag@1.0.2", "https://mirrors.cloud.tencent.com/npm/has-tostringtag/-/has-tostringtag-1.0.2.tgz", { "dependencies": { "has-symbols": "^1.0.3" } }, "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw=="],
|
||||
|
||||
"hasown": ["hasown@2.0.2", "https://mirrors.cloud.tencent.com/npm/hasown/-/hasown-2.0.2.tgz", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="],
|
||||
|
||||
"highlight.js": ["highlight.js@11.11.1", "https://mirrors.cloud.tencent.com/npm/highlight.js/-/highlight.js-11.11.1.tgz", {}, "sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w=="],
|
||||
|
||||
"hono": ["hono@4.12.9", "https://mirrors.cloud.tencent.com/npm/hono/-/hono-4.12.9.tgz", {}, "sha512-wy3T8Zm2bsEvxKZM5w21VdHDDcwVS1yUFFY6i8UobSsKfFceT7TOwhbhfKsDyx7tYQlmRM5FLpIuYvNFyjctiA=="],
|
||||
|
||||
"http-errors": ["http-errors@2.0.1", "https://mirrors.cloud.tencent.com/npm/http-errors/-/http-errors-2.0.1.tgz", { "dependencies": { "depd": "~2.0.0", "inherits": "~2.0.4", "setprototypeof": "~1.2.0", "statuses": "~2.0.2", "toidentifier": "~1.0.1" } }, "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ=="],
|
||||
|
||||
"https-proxy-agent": ["https-proxy-agent@8.0.0", "https://mirrors.cloud.tencent.com/npm/https-proxy-agent/-/https-proxy-agent-8.0.0.tgz", { "dependencies": { "agent-base": "8.0.0", "debug": "^4.3.4" } }, "sha512-YYeW+iCnAS3xhvj2dvVoWgsbca3RfQy/IlaNHHOtDmU0jMqPI9euIq3Y9BJETdxk16h9NHHCKqp/KB9nIMStCQ=="],
|
||||
|
||||
"human-signals": ["human-signals@8.0.1", "https://mirrors.cloud.tencent.com/npm/human-signals/-/human-signals-8.0.1.tgz", {}, "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ=="],
|
||||
|
||||
"iconv-lite": ["iconv-lite@0.7.2", "https://mirrors.cloud.tencent.com/npm/iconv-lite/-/iconv-lite-0.7.2.tgz", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw=="],
|
||||
|
||||
"ignore": ["ignore@7.0.5", "https://mirrors.cloud.tencent.com/npm/ignore/-/ignore-7.0.5.tgz", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="],
|
||||
|
||||
"indent-string": ["indent-string@5.0.0", "https://mirrors.cloud.tencent.com/npm/indent-string/-/indent-string-5.0.0.tgz", {}, "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg=="],
|
||||
|
||||
"inherits": ["inherits@2.0.4", "https://mirrors.cloud.tencent.com/npm/inherits/-/inherits-2.0.4.tgz", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="],
|
||||
|
||||
"ink": ["ink@6.8.0", "https://mirrors.cloud.tencent.com/npm/ink/-/ink-6.8.0.tgz", { "dependencies": { "@alcalzone/ansi-tokenize": "^0.2.4", "ansi-escapes": "^7.3.0", "ansi-styles": "^6.2.1", "auto-bind": "^5.0.1", "chalk": "^5.6.0", "cli-boxes": "^3.0.0", "cli-cursor": "^4.0.0", "cli-truncate": "^5.1.1", "code-excerpt": "^4.0.0", "es-toolkit": "^1.39.10", "indent-string": "^5.0.0", "is-in-ci": "^2.0.0", "patch-console": "^2.0.0", "react-reconciler": "^0.33.0", "scheduler": "^0.27.0", "signal-exit": "^3.0.7", "slice-ansi": "^8.0.0", "stack-utils": "^2.0.6", "string-width": "^8.1.1", "terminal-size": "^4.0.1", "type-fest": "^5.4.1", "widest-line": "^6.0.0", "wrap-ansi": "^9.0.0", "ws": "^8.18.0", "yoga-layout": "~3.2.1" }, "peerDependencies": { "@types/react": ">=19.0.0", "react": ">=19.0.0", "react-devtools-core": ">=6.1.2" }, "optionalPeers": ["@types/react", "react-devtools-core"] }, "sha512-sbl1RdLOgkO9isK42WCZlJCFN9hb++sX9dsklOvfd1YQ3bQ2AiFu12Q6tFlr0HvEUvzraJntQCCpfEoUe9DSzA=="],
|
||||
|
||||
"ip-address": ["ip-address@10.1.0", "https://mirrors.cloud.tencent.com/npm/ip-address/-/ip-address-10.1.0.tgz", {}, "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q=="],
|
||||
|
||||
"ipaddr.js": ["ipaddr.js@1.9.1", "https://mirrors.cloud.tencent.com/npm/ipaddr.js/-/ipaddr.js-1.9.1.tgz", {}, "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="],
|
||||
|
||||
"is-fullwidth-code-point": ["is-fullwidth-code-point@5.1.0", "https://mirrors.cloud.tencent.com/npm/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", { "dependencies": { "get-east-asian-width": "^1.3.1" } }, "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ=="],
|
||||
|
||||
"is-in-ci": ["is-in-ci@2.0.0", "https://mirrors.cloud.tencent.com/npm/is-in-ci/-/is-in-ci-2.0.0.tgz", { "bin": { "is-in-ci": "cli.js" } }, "sha512-cFeerHriAnhrQSbpAxL37W1wcJKUUX07HyLWZCW1URJT/ra3GyUTzBgUnh24TMVfNTV2Hij2HLxkPHFZfOZy5w=="],
|
||||
|
||||
"is-plain-obj": ["is-plain-obj@4.1.0", "https://mirrors.cloud.tencent.com/npm/is-plain-obj/-/is-plain-obj-4.1.0.tgz", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="],
|
||||
|
||||
"is-promise": ["is-promise@4.0.0", "https://mirrors.cloud.tencent.com/npm/is-promise/-/is-promise-4.0.0.tgz", {}, "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ=="],
|
||||
|
||||
"is-safe-filename": ["is-safe-filename@0.1.1", "https://mirrors.cloud.tencent.com/npm/is-safe-filename/-/is-safe-filename-0.1.1.tgz", {}, "sha512-4SrR7AdnY11LHfDKTZY1u6Ga3RuxZdl3YKWWShO5iyuG5h8QS4GD2tOb04peBJ5I7pXbR+CGBNEhTcwK+FzN3g=="],
|
||||
|
||||
"is-stream": ["is-stream@4.0.1", "https://mirrors.cloud.tencent.com/npm/is-stream/-/is-stream-4.0.1.tgz", {}, "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A=="],
|
||||
|
||||
"is-unicode-supported": ["is-unicode-supported@2.1.0", "https://mirrors.cloud.tencent.com/npm/is-unicode-supported/-/is-unicode-supported-2.1.0.tgz", {}, "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ=="],
|
||||
|
||||
"isexe": ["isexe@2.0.0", "https://mirrors.cloud.tencent.com/npm/isexe/-/isexe-2.0.0.tgz", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="],
|
||||
|
||||
"jose": ["jose@6.2.2", "https://mirrors.cloud.tencent.com/npm/jose/-/jose-6.2.2.tgz", {}, "sha512-d7kPDd34KO/YnzaDOlikGpOurfF0ByC2sEV4cANCtdqLlTfBlw2p14O/5d/zv40gJPbIQxfES3nSx1/oYNyuZQ=="],
|
||||
|
||||
"json-bigint": ["json-bigint@1.0.0", "https://mirrors.cloud.tencent.com/npm/json-bigint/-/json-bigint-1.0.0.tgz", { "dependencies": { "bignumber.js": "^9.0.0" } }, "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ=="],
|
||||
|
||||
"json-schema-to-ts": ["json-schema-to-ts@3.1.1", "https://mirrors.cloud.tencent.com/npm/json-schema-to-ts/-/json-schema-to-ts-3.1.1.tgz", { "dependencies": { "@babel/runtime": "^7.18.3", "ts-algebra": "^2.0.0" } }, "sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g=="],
|
||||
|
||||
"json-schema-traverse": ["json-schema-traverse@1.0.0", "https://mirrors.cloud.tencent.com/npm/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="],
|
||||
|
||||
"json-schema-typed": ["json-schema-typed@8.0.2", "https://mirrors.cloud.tencent.com/npm/json-schema-typed/-/json-schema-typed-8.0.2.tgz", {}, "sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA=="],
|
||||
|
||||
"jsonc-parser": ["jsonc-parser@3.3.1", "https://mirrors.cloud.tencent.com/npm/jsonc-parser/-/jsonc-parser-3.3.1.tgz", {}, "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ=="],
|
||||
|
||||
"jsonfile": ["jsonfile@6.2.0", "https://mirrors.cloud.tencent.com/npm/jsonfile/-/jsonfile-6.2.0.tgz", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg=="],
|
||||
|
||||
"jwa": ["jwa@2.0.1", "https://mirrors.cloud.tencent.com/npm/jwa/-/jwa-2.0.1.tgz", { "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg=="],
|
||||
|
||||
"jws": ["jws@4.0.1", "https://mirrors.cloud.tencent.com/npm/jws/-/jws-4.0.1.tgz", { "dependencies": { "jwa": "^2.0.1", "safe-buffer": "^5.0.1" } }, "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA=="],
|
||||
|
||||
"locate-path": ["locate-path@5.0.0", "https://mirrors.cloud.tencent.com/npm/locate-path/-/locate-path-5.0.0.tgz", { "dependencies": { "p-locate": "^4.1.0" } }, "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g=="],
|
||||
|
||||
"lodash-es": ["lodash-es@4.17.23", "https://mirrors.cloud.tencent.com/npm/lodash-es/-/lodash-es-4.17.23.tgz", {}, "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="],
|
||||
|
||||
"lodash.debounce": ["lodash.debounce@4.0.8", "https://mirrors.cloud.tencent.com/npm/lodash.debounce/-/lodash.debounce-4.0.8.tgz", {}, "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow=="],
|
||||
|
||||
"lru-cache": ["lru-cache@11.2.7", "https://mirrors.cloud.tencent.com/npm/lru-cache/-/lru-cache-11.2.7.tgz", {}, "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA=="],
|
||||
|
||||
"marked": ["marked@17.0.5", "https://mirrors.cloud.tencent.com/npm/marked/-/marked-17.0.5.tgz", { "bin": { "marked": "bin/marked.js" } }, "sha512-6hLvc0/JEbRjRgzI6wnT2P1XuM1/RrrDEX0kPt0N7jGm1133g6X7DlxFasUIx+72aKAr904GTxhSLDrd5DIlZg=="],
|
||||
|
||||
"math-intrinsics": ["math-intrinsics@1.1.0", "https://mirrors.cloud.tencent.com/npm/math-intrinsics/-/math-intrinsics-1.1.0.tgz", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="],
|
||||
|
||||
"media-typer": ["media-typer@1.1.0", "https://mirrors.cloud.tencent.com/npm/media-typer/-/media-typer-1.1.0.tgz", {}, "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw=="],
|
||||
|
||||
"merge-descriptors": ["merge-descriptors@2.0.0", "https://mirrors.cloud.tencent.com/npm/merge-descriptors/-/merge-descriptors-2.0.0.tgz", {}, "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g=="],
|
||||
|
||||
"mime-db": ["mime-db@1.54.0", "https://mirrors.cloud.tencent.com/npm/mime-db/-/mime-db-1.54.0.tgz", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="],
|
||||
|
||||
"mime-types": ["mime-types@3.0.2", "https://mirrors.cloud.tencent.com/npm/mime-types/-/mime-types-3.0.2.tgz", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A=="],
|
||||
|
||||
"mimic-fn": ["mimic-fn@2.1.0", "https://mirrors.cloud.tencent.com/npm/mimic-fn/-/mimic-fn-2.1.0.tgz", {}, "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="],
|
||||
|
||||
"modifiers-napi": ["modifiers-napi@file:shims/modifiers-napi", {}],
|
||||
|
||||
"ms": ["ms@2.1.3", "https://mirrors.cloud.tencent.com/npm/ms/-/ms-2.1.3.tgz", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
|
||||
|
||||
"mute-stream": ["mute-stream@1.0.0", "https://mirrors.cloud.tencent.com/npm/mute-stream/-/mute-stream-1.0.0.tgz", {}, "sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA=="],
|
||||
|
||||
"negotiator": ["negotiator@1.0.0", "https://mirrors.cloud.tencent.com/npm/negotiator/-/negotiator-1.0.0.tgz", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="],
|
||||
|
||||
"node-domexception": ["node-domexception@1.0.0", "https://mirrors.cloud.tencent.com/npm/node-domexception/-/node-domexception-1.0.0.tgz", {}, "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ=="],
|
||||
|
||||
"node-fetch": ["node-fetch@3.3.2", "https://mirrors.cloud.tencent.com/npm/node-fetch/-/node-fetch-3.3.2.tgz", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="],
|
||||
|
||||
"node-forge": ["node-forge@1.4.0", "https://mirrors.cloud.tencent.com/npm/node-forge/-/node-forge-1.4.0.tgz", {}, "sha512-LarFH0+6VfriEhqMMcLX2F7SwSXeWwnEAJEsYm5QKWchiVYVvJyV9v7UDvUv+w5HO23ZpQTXDv/GxdDdMyOuoQ=="],
|
||||
|
||||
"npm-run-path": ["npm-run-path@6.0.0", "https://mirrors.cloud.tencent.com/npm/npm-run-path/-/npm-run-path-6.0.0.tgz", { "dependencies": { "path-key": "^4.0.0", "unicorn-magic": "^0.3.0" } }, "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA=="],
|
||||
|
||||
"object-assign": ["object-assign@4.1.1", "https://mirrors.cloud.tencent.com/npm/object-assign/-/object-assign-4.1.1.tgz", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="],
|
||||
|
||||
"object-inspect": ["object-inspect@1.13.4", "https://mirrors.cloud.tencent.com/npm/object-inspect/-/object-inspect-1.13.4.tgz", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="],
|
||||
|
||||
"on-finished": ["on-finished@2.4.1", "https://mirrors.cloud.tencent.com/npm/on-finished/-/on-finished-2.4.1.tgz", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="],
|
||||
|
||||
"once": ["once@1.4.0", "https://mirrors.cloud.tencent.com/npm/once/-/once-1.4.0.tgz", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
|
||||
|
||||
"onetime": ["onetime@5.1.2", "https://mirrors.cloud.tencent.com/npm/onetime/-/onetime-5.1.2.tgz", { "dependencies": { "mimic-fn": "^2.1.0" } }, "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg=="],
|
||||
|
||||
"os-tmpdir": ["os-tmpdir@1.0.2", "https://mirrors.cloud.tencent.com/npm/os-tmpdir/-/os-tmpdir-1.0.2.tgz", {}, "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g=="],
|
||||
|
||||
"p-limit": ["p-limit@2.3.0", "https://mirrors.cloud.tencent.com/npm/p-limit/-/p-limit-2.3.0.tgz", { "dependencies": { "p-try": "^2.0.0" } }, "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w=="],
|
||||
|
||||
"p-locate": ["p-locate@4.1.0", "https://mirrors.cloud.tencent.com/npm/p-locate/-/p-locate-4.1.0.tgz", { "dependencies": { "p-limit": "^2.2.0" } }, "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A=="],
|
||||
|
||||
"p-map": ["p-map@7.0.4", "https://mirrors.cloud.tencent.com/npm/p-map/-/p-map-7.0.4.tgz", {}, "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ=="],
|
||||
|
||||
"p-try": ["p-try@2.2.0", "https://mirrors.cloud.tencent.com/npm/p-try/-/p-try-2.2.0.tgz", {}, "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ=="],
|
||||
|
||||
"parse-ms": ["parse-ms@4.0.0", "https://mirrors.cloud.tencent.com/npm/parse-ms/-/parse-ms-4.0.0.tgz", {}, "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw=="],
|
||||
|
||||
"parseurl": ["parseurl@1.3.3", "https://mirrors.cloud.tencent.com/npm/parseurl/-/parseurl-1.3.3.tgz", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="],
|
||||
|
||||
"patch-console": ["patch-console@2.0.0", "https://mirrors.cloud.tencent.com/npm/patch-console/-/patch-console-2.0.0.tgz", {}, "sha512-0YNdUceMdaQwoKce1gatDScmMo5pu/tfABfnzEqeG0gtTmd7mh/WcwgUjtAeOU7N8nFFlbQBnFK2gXW5fGvmMA=="],
|
||||
|
||||
"path-exists": ["path-exists@4.0.0", "https://mirrors.cloud.tencent.com/npm/path-exists/-/path-exists-4.0.0.tgz", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="],
|
||||
|
||||
"path-expression-matcher": ["path-expression-matcher@1.2.0", "https://mirrors.cloud.tencent.com/npm/path-expression-matcher/-/path-expression-matcher-1.2.0.tgz", {}, "sha512-DwmPWeFn+tq7TiyJ2CxezCAirXjFxvaiD03npak3cRjlP9+OjTmSy1EpIrEbh+l6JgUundniloMLDQ/6VTdhLQ=="],
|
||||
|
||||
"path-key": ["path-key@3.1.1", "https://mirrors.cloud.tencent.com/npm/path-key/-/path-key-3.1.1.tgz", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="],
|
||||
|
||||
"path-to-regexp": ["path-to-regexp@8.4.1", "https://mirrors.cloud.tencent.com/npm/path-to-regexp/-/path-to-regexp-8.4.1.tgz", {}, "sha512-fvU78fIjZ+SBM9YwCknCvKOUKkLVqtWDVctl0s7xIqfmfb38t2TT4ZU2gHm+Z8xGwgW+QWEU3oQSAzIbo89Ggw=="],
|
||||
|
||||
"picomatch": ["picomatch@4.0.4", "https://mirrors.cloud.tencent.com/npm/picomatch/-/picomatch-4.0.4.tgz", {}, "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A=="],
|
||||
|
||||
"pkce-challenge": ["pkce-challenge@5.0.1", "https://mirrors.cloud.tencent.com/npm/pkce-challenge/-/pkce-challenge-5.0.1.tgz", {}, "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ=="],
|
||||
|
||||
"pngjs": ["pngjs@5.0.0", "https://mirrors.cloud.tencent.com/npm/pngjs/-/pngjs-5.0.0.tgz", {}, "sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw=="],
|
||||
|
||||
"pretty-bytes": ["pretty-bytes@5.6.0", "https://mirrors.cloud.tencent.com/npm/pretty-bytes/-/pretty-bytes-5.6.0.tgz", {}, "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg=="],
|
||||
|
||||
"pretty-ms": ["pretty-ms@9.3.0", "https://mirrors.cloud.tencent.com/npm/pretty-ms/-/pretty-ms-9.3.0.tgz", { "dependencies": { "parse-ms": "^4.0.0" } }, "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ=="],
|
||||
|
||||
"proper-lockfile": ["proper-lockfile@4.1.2", "https://mirrors.cloud.tencent.com/npm/proper-lockfile/-/proper-lockfile-4.1.2.tgz", { "dependencies": { "graceful-fs": "^4.2.4", "retry": "^0.12.0", "signal-exit": "^3.0.2" } }, "sha512-TjNPblN4BwAWMXU8s9AEz4JmQxnD1NNL7bNOY/AKUzyamc379FWASUhc/K1pL2noVb+XmZKLL68cjzLsiOAMaA=="],
|
||||
|
||||
"proxy-addr": ["proxy-addr@2.0.7", "https://mirrors.cloud.tencent.com/npm/proxy-addr/-/proxy-addr-2.0.7.tgz", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="],
|
||||
|
||||
"proxy-from-env": ["proxy-from-env@2.1.0", "https://mirrors.cloud.tencent.com/npm/proxy-from-env/-/proxy-from-env-2.1.0.tgz", {}, "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA=="],
|
||||
|
||||
"qrcode": ["qrcode@1.5.4", "https://mirrors.cloud.tencent.com/npm/qrcode/-/qrcode-1.5.4.tgz", { "dependencies": { "dijkstrajs": "^1.0.1", "pngjs": "^5.0.0", "yargs": "^15.3.1" }, "bin": { "qrcode": "bin/qrcode" } }, "sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg=="],
|
||||
|
||||
"qs": ["qs@6.15.0", "https://mirrors.cloud.tencent.com/npm/qs/-/qs-6.15.0.tgz", { "dependencies": { "side-channel": "^1.1.0" } }, "sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ=="],
|
||||
|
||||
"range-parser": ["range-parser@1.2.1", "https://mirrors.cloud.tencent.com/npm/range-parser/-/range-parser-1.2.1.tgz", {}, "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="],
|
||||
|
||||
"raw-body": ["raw-body@3.0.2", "https://mirrors.cloud.tencent.com/npm/raw-body/-/raw-body-3.0.2.tgz", { "dependencies": { "bytes": "~3.1.2", "http-errors": "~2.0.1", "iconv-lite": "~0.7.0", "unpipe": "~1.0.0" } }, "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA=="],
|
||||
|
||||
"react": ["react@19.2.4", "https://mirrors.cloud.tencent.com/npm/react/-/react-19.2.4.tgz", {}, "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ=="],
|
||||
|
||||
"react-reconciler": ["react-reconciler@0.33.0", "https://mirrors.cloud.tencent.com/npm/react-reconciler/-/react-reconciler-0.33.0.tgz", { "dependencies": { "scheduler": "^0.27.0" }, "peerDependencies": { "react": "^19.2.0" } }, "sha512-KetWRytFv1epdpJc3J4G75I4WrplZE5jOL7Yq0p34+OVOKF4Se7WrdIdVC45XsSSmUTlht2FM/fM1FZb1mfQeA=="],
|
||||
|
||||
"readdirp": ["readdirp@5.0.0", "https://mirrors.cloud.tencent.com/npm/readdirp/-/readdirp-5.0.0.tgz", {}, "sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ=="],
|
||||
|
||||
"require-directory": ["require-directory@2.1.1", "https://mirrors.cloud.tencent.com/npm/require-directory/-/require-directory-2.1.1.tgz", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="],
|
||||
|
||||
"require-from-string": ["require-from-string@2.0.2", "https://mirrors.cloud.tencent.com/npm/require-from-string/-/require-from-string-2.0.2.tgz", {}, "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="],
|
||||
|
||||
"require-main-filename": ["require-main-filename@2.0.0", "https://mirrors.cloud.tencent.com/npm/require-main-filename/-/require-main-filename-2.0.0.tgz", {}, "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="],
|
||||
|
||||
"restore-cursor": ["restore-cursor@4.0.0", "https://mirrors.cloud.tencent.com/npm/restore-cursor/-/restore-cursor-4.0.0.tgz", { "dependencies": { "onetime": "^5.1.0", "signal-exit": "^3.0.2" } }, "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg=="],
|
||||
|
||||
"retry": ["retry@0.12.0", "https://mirrors.cloud.tencent.com/npm/retry/-/retry-0.12.0.tgz", {}, "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow=="],
|
||||
|
||||
"router": ["router@2.2.0", "https://mirrors.cloud.tencent.com/npm/router/-/router-2.2.0.tgz", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="],
|
||||
|
||||
"safe-buffer": ["safe-buffer@5.2.1", "https://mirrors.cloud.tencent.com/npm/safe-buffer/-/safe-buffer-5.2.1.tgz", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="],
|
||||
|
||||
"safer-buffer": ["safer-buffer@2.1.2", "https://mirrors.cloud.tencent.com/npm/safer-buffer/-/safer-buffer-2.1.2.tgz", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="],
|
||||
|
||||
"scheduler": ["scheduler@0.27.0", "https://mirrors.cloud.tencent.com/npm/scheduler/-/scheduler-0.27.0.tgz", {}, "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="],
|
||||
|
||||
"semver": ["semver@7.7.4", "https://mirrors.cloud.tencent.com/npm/semver/-/semver-7.7.4.tgz", { "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="],
|
||||
|
||||
"send": ["send@1.2.1", "https://mirrors.cloud.tencent.com/npm/send/-/send-1.2.1.tgz", { "dependencies": { "debug": "^4.4.3", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", "http-errors": "^2.0.1", "mime-types": "^3.0.2", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", "statuses": "^2.0.2" } }, "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ=="],
|
||||
|
||||
"serve-static": ["serve-static@2.2.1", "https://mirrors.cloud.tencent.com/npm/serve-static/-/serve-static-2.2.1.tgz", { "dependencies": { "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "parseurl": "^1.3.3", "send": "^1.2.0" } }, "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw=="],
|
||||
|
||||
"set-blocking": ["set-blocking@2.0.0", "https://mirrors.cloud.tencent.com/npm/set-blocking/-/set-blocking-2.0.0.tgz", {}, "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw=="],
|
||||
|
||||
"setprototypeof": ["setprototypeof@1.2.0", "https://mirrors.cloud.tencent.com/npm/setprototypeof/-/setprototypeof-1.2.0.tgz", {}, "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="],
|
||||
|
||||
"shebang-command": ["shebang-command@2.0.0", "https://mirrors.cloud.tencent.com/npm/shebang-command/-/shebang-command-2.0.0.tgz", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="],
|
||||
|
||||
"shebang-regex": ["shebang-regex@3.0.0", "https://mirrors.cloud.tencent.com/npm/shebang-regex/-/shebang-regex-3.0.0.tgz", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="],
|
||||
|
||||
"shell-quote": ["shell-quote@1.8.3", "https://mirrors.cloud.tencent.com/npm/shell-quote/-/shell-quote-1.8.3.tgz", {}, "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw=="],
|
||||
|
||||
"side-channel": ["side-channel@1.1.0", "https://mirrors.cloud.tencent.com/npm/side-channel/-/side-channel-1.1.0.tgz", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="],
|
||||
|
||||
"side-channel-list": ["side-channel-list@1.0.0", "https://mirrors.cloud.tencent.com/npm/side-channel-list/-/side-channel-list-1.0.0.tgz", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA=="],
|
||||
|
||||
"side-channel-map": ["side-channel-map@1.0.1", "https://mirrors.cloud.tencent.com/npm/side-channel-map/-/side-channel-map-1.0.1.tgz", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3" } }, "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA=="],
|
||||
|
||||
"side-channel-weakmap": ["side-channel-weakmap@1.0.2", "https://mirrors.cloud.tencent.com/npm/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="],
|
||||
|
||||
"signal-exit": ["signal-exit@4.1.0", "https://mirrors.cloud.tencent.com/npm/signal-exit/-/signal-exit-4.1.0.tgz", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="],
|
||||
|
||||
"slice-ansi": ["slice-ansi@8.0.0", "https://mirrors.cloud.tencent.com/npm/slice-ansi/-/slice-ansi-8.0.0.tgz", { "dependencies": { "ansi-styles": "^6.2.3", "is-fullwidth-code-point": "^5.1.0" } }, "sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg=="],
|
||||
|
||||
"stack-utils": ["stack-utils@2.0.6", "https://mirrors.cloud.tencent.com/npm/stack-utils/-/stack-utils-2.0.6.tgz", { "dependencies": { "escape-string-regexp": "^2.0.0" } }, "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ=="],
|
||||
|
||||
"statuses": ["statuses@2.0.2", "https://mirrors.cloud.tencent.com/npm/statuses/-/statuses-2.0.2.tgz", {}, "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw=="],
|
||||
|
||||
"string-width": ["string-width@8.2.0", "https://mirrors.cloud.tencent.com/npm/string-width/-/string-width-8.2.0.tgz", { "dependencies": { "get-east-asian-width": "^1.5.0", "strip-ansi": "^7.1.2" } }, "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw=="],
|
||||
|
||||
"strip-ansi": ["strip-ansi@7.2.0", "https://mirrors.cloud.tencent.com/npm/strip-ansi/-/strip-ansi-7.2.0.tgz", { "dependencies": { "ansi-regex": "^6.2.2" } }, "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w=="],
|
||||
|
||||
"strip-final-newline": ["strip-final-newline@4.0.0", "https://mirrors.cloud.tencent.com/npm/strip-final-newline/-/strip-final-newline-4.0.0.tgz", {}, "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw=="],
|
||||
|
||||
"strnum": ["strnum@2.2.2", "https://mirrors.cloud.tencent.com/npm/strnum/-/strnum-2.2.2.tgz", {}, "sha512-DnR90I+jtXNSTXWdwrEy9FakW7UX+qUZg28gj5fk2vxxl7uS/3bpI4fjFYVmdK9etptYBPNkpahuQnEwhwECqA=="],
|
||||
|
||||
"supports-color": ["supports-color@10.2.2", "https://mirrors.cloud.tencent.com/npm/supports-color/-/supports-color-10.2.2.tgz", {}, "sha512-SS+jx45GF1QjgEXQx4NJZV9ImqmO2NPz5FNsIHrsDjh2YsHnawpan7SNQ1o8NuhrbHZy9AZhIoCUiCeaW/C80g=="],
|
||||
|
||||
"supports-hyperlinks": ["supports-hyperlinks@4.4.0", "https://mirrors.cloud.tencent.com/npm/supports-hyperlinks/-/supports-hyperlinks-4.4.0.tgz", { "dependencies": { "has-flag": "^5.0.1", "supports-color": "^10.2.2" } }, "sha512-UKbpT93hN5Nr9go5UY7bopIB9YQlMz9nm/ct4IXt/irb5YRkn9WaqrOBJGZ5Pwvsd5FQzSVeYlGdXoCAPQZrPg=="],
|
||||
|
||||
"tagged-tag": ["tagged-tag@1.0.0", "https://mirrors.cloud.tencent.com/npm/tagged-tag/-/tagged-tag-1.0.0.tgz", {}, "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng=="],
|
||||
|
||||
"terminal-size": ["terminal-size@4.0.1", "https://mirrors.cloud.tencent.com/npm/terminal-size/-/terminal-size-4.0.1.tgz", {}, "sha512-avMLDQpUI9I5XFrklECw1ZEUPJhqzcwSWsyyI8blhRLT+8N1jLJWLWWYQpB2q2xthq8xDvjZPISVh53T/+CLYQ=="],
|
||||
|
||||
"tmp": ["tmp@0.0.33", "https://mirrors.cloud.tencent.com/npm/tmp/-/tmp-0.0.33.tgz", { "dependencies": { "os-tmpdir": "~1.0.2" } }, "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw=="],
|
||||
|
||||
"toidentifier": ["toidentifier@1.0.1", "https://mirrors.cloud.tencent.com/npm/toidentifier/-/toidentifier-1.0.1.tgz", {}, "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="],
|
||||
|
||||
"tree-kill": ["tree-kill@1.2.2", "https://mirrors.cloud.tencent.com/npm/tree-kill/-/tree-kill-1.2.2.tgz", { "bin": { "tree-kill": "cli.js" } }, "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A=="],
|
||||
|
||||
"ts-algebra": ["ts-algebra@2.0.0", "https://mirrors.cloud.tencent.com/npm/ts-algebra/-/ts-algebra-2.0.0.tgz", {}, "sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw=="],
|
||||
|
||||
"tslib": ["tslib@2.8.1", "https://mirrors.cloud.tencent.com/npm/tslib/-/tslib-2.8.1.tgz", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"type-fest": ["type-fest@5.5.0", "https://mirrors.cloud.tencent.com/npm/type-fest/-/type-fest-5.5.0.tgz", { "dependencies": { "tagged-tag": "^1.0.0" } }, "sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g=="],
|
||||
|
||||
"type-is": ["type-is@2.0.1", "https://mirrors.cloud.tencent.com/npm/type-is/-/type-is-2.0.1.tgz", { "dependencies": { "content-type": "^1.0.5", "media-typer": "^1.1.0", "mime-types": "^3.0.0" } }, "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw=="],
|
||||
|
||||
"undici": ["undici@7.24.6", "https://mirrors.cloud.tencent.com/npm/undici/-/undici-7.24.6.tgz", {}, "sha512-Xi4agocCbRzt0yYMZGMA6ApD7gvtUFaxm4ZmeacWI4cZxaF6C+8I8QfofC20NAePiB/IcvZmzkJ7XPa471AEtA=="],
|
||||
|
||||
"undici-types": ["undici-types@6.21.0", "https://mirrors.cloud.tencent.com/npm/undici-types/-/undici-types-6.21.0.tgz", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="],
|
||||
|
||||
"unicorn-magic": ["unicorn-magic@0.3.0", "https://mirrors.cloud.tencent.com/npm/unicorn-magic/-/unicorn-magic-0.3.0.tgz", {}, "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA=="],
|
||||
|
||||
"universalify": ["universalify@2.0.1", "https://mirrors.cloud.tencent.com/npm/universalify/-/universalify-2.0.1.tgz", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="],
|
||||
|
||||
"unpipe": ["unpipe@1.0.0", "https://mirrors.cloud.tencent.com/npm/unpipe/-/unpipe-1.0.0.tgz", {}, "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ=="],
|
||||
|
||||
"url-handler-napi": ["url-handler-napi@file:shims/url-handler-napi", {}],
|
||||
|
||||
"usehooks-ts": ["usehooks-ts@3.1.1", "https://mirrors.cloud.tencent.com/npm/usehooks-ts/-/usehooks-ts-3.1.1.tgz", { "dependencies": { "lodash.debounce": "^4.0.8" }, "peerDependencies": { "react": "^16.8.0 || ^17 || ^18 || ^19 || ^19.0.0-rc" } }, "sha512-I4diPp9Cq6ieSUH2wu+fDAVQO43xwtulo+fKEidHUwZPnYImbtkTjzIJYcDcJqxgmX31GVqNFURodvcgHcW0pA=="],
|
||||
|
||||
"vary": ["vary@1.1.2", "https://mirrors.cloud.tencent.com/npm/vary/-/vary-1.1.2.tgz", {}, "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="],
|
||||
|
||||
"vscode-jsonrpc": ["vscode-jsonrpc@8.2.1", "https://mirrors.cloud.tencent.com/npm/vscode-jsonrpc/-/vscode-jsonrpc-8.2.1.tgz", {}, "sha512-kdjOSJ2lLIn7r1rtrMbbNCHjyMPfRnowdKjBQ+mGq6NAW5QY2bEZC/khaC5OR8svbbjvLEaIXkOq45e2X9BIbQ=="],
|
||||
|
||||
"vscode-languageserver-protocol": ["vscode-languageserver-protocol@3.17.5", "https://mirrors.cloud.tencent.com/npm/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz", { "dependencies": { "vscode-jsonrpc": "8.2.0", "vscode-languageserver-types": "3.17.5" } }, "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg=="],
|
||||
|
||||
"vscode-languageserver-types": ["vscode-languageserver-types@3.17.5", "https://mirrors.cloud.tencent.com/npm/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz", {}, "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg=="],
|
||||
|
||||
"web-streams-polyfill": ["web-streams-polyfill@3.3.3", "https://mirrors.cloud.tencent.com/npm/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", {}, "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw=="],
|
||||
|
||||
"which": ["which@2.0.2", "https://mirrors.cloud.tencent.com/npm/which/-/which-2.0.2.tgz", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="],
|
||||
|
||||
"which-module": ["which-module@2.0.1", "https://mirrors.cloud.tencent.com/npm/which-module/-/which-module-2.0.1.tgz", {}, "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ=="],
|
||||
|
||||
"widest-line": ["widest-line@6.0.0", "https://mirrors.cloud.tencent.com/npm/widest-line/-/widest-line-6.0.0.tgz", { "dependencies": { "string-width": "^8.1.0" } }, "sha512-U89AsyEeAsyoF0zVJBkG9zBgekjgjK7yk9sje3F4IQpXBJ10TF6ByLlIfjMhcmHMJgHZI4KHt4rdNfktzxIAMA=="],
|
||||
|
||||
"wrap-ansi": ["wrap-ansi@10.0.0", "https://mirrors.cloud.tencent.com/npm/wrap-ansi/-/wrap-ansi-10.0.0.tgz", { "dependencies": { "ansi-styles": "^6.2.3", "string-width": "^8.2.0", "strip-ansi": "^7.1.2" } }, "sha512-SGcvg80f0wUy2/fXES19feHMz8E0JoXv2uNgHOu4Dgi2OrCy1lqwFYEJz1BLbDI0exjPMe/ZdzZ/YpGECBG/aQ=="],
|
||||
|
||||
"wrappy": ["wrappy@1.0.2", "https://mirrors.cloud.tencent.com/npm/wrappy/-/wrappy-1.0.2.tgz", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="],
|
||||
|
||||
"ws": ["ws@8.20.0", "https://mirrors.cloud.tencent.com/npm/ws/-/ws-8.20.0.tgz", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="],
|
||||
|
||||
"xss": ["xss@1.0.15", "https://mirrors.cloud.tencent.com/npm/xss/-/xss-1.0.15.tgz", { "dependencies": { "commander": "^2.20.3", "cssfilter": "0.0.10" }, "bin": { "xss": "bin/xss" } }, "sha512-FVdlVVC67WOIPvfOwhoMETV72f6GbW7aOabBC3WxN/oUdoEMDyLz4OgRv5/gck2ZeNqEQu+Tb0kloovXOfpYVg=="],
|
||||
|
||||
"y18n": ["y18n@4.0.3", "https://mirrors.cloud.tencent.com/npm/y18n/-/y18n-4.0.3.tgz", {}, "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ=="],
|
||||
|
||||
"yaml": ["yaml@2.8.3", "https://mirrors.cloud.tencent.com/npm/yaml/-/yaml-2.8.3.tgz", { "bin": { "yaml": "bin.mjs" } }, "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg=="],
|
||||
|
||||
"yargs": ["yargs@15.4.1", "https://mirrors.cloud.tencent.com/npm/yargs/-/yargs-15.4.1.tgz", { "dependencies": { "cliui": "^6.0.0", "decamelize": "^1.2.0", "find-up": "^4.1.0", "get-caller-file": "^2.0.1", "require-directory": "^2.1.1", "require-main-filename": "^2.0.0", "set-blocking": "^2.0.0", "string-width": "^4.2.0", "which-module": "^2.0.0", "y18n": "^4.0.0", "yargs-parser": "^18.1.2" } }, "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A=="],
|
||||
|
||||
"yargs-parser": ["yargs-parser@18.1.3", "https://mirrors.cloud.tencent.com/npm/yargs-parser/-/yargs-parser-18.1.3.tgz", { "dependencies": { "camelcase": "^5.0.0", "decamelize": "^1.2.0" } }, "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ=="],
|
||||
|
||||
"yoctocolors": ["yoctocolors@2.1.2", "https://mirrors.cloud.tencent.com/npm/yoctocolors/-/yoctocolors-2.1.2.tgz", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="],
|
||||
|
||||
"yoctocolors-cjs": ["yoctocolors-cjs@2.1.3", "https://mirrors.cloud.tencent.com/npm/yoctocolors-cjs/-/yoctocolors-cjs-2.1.3.tgz", {}, "sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw=="],
|
||||
|
||||
"yoga-layout": ["yoga-layout@3.2.1", "https://mirrors.cloud.tencent.com/npm/yoga-layout/-/yoga-layout-3.2.1.tgz", {}, "sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ=="],
|
||||
|
||||
"zod": ["zod@4.3.6", "https://mirrors.cloud.tencent.com/npm/zod/-/zod-4.3.6.tgz", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="],
|
||||
|
||||
"zod-to-json-schema": ["zod-to-json-schema@3.25.2", "https://mirrors.cloud.tencent.com/npm/zod-to-json-schema/-/zod-to-json-schema-3.25.2.tgz", { "peerDependencies": { "zod": "^3.25.28 || ^4" } }, "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA=="],
|
||||
|
||||
"@anthropic-ai/claude-agent-sdk/@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.74.0", "https://mirrors.cloud.tencent.com/npm/@anthropic-ai/sdk/-/sdk-0.74.0.tgz", { "dependencies": { "json-schema-to-ts": "^3.1.1" }, "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" }, "optionalPeers": ["zod"], "bin": { "anthropic-ai-sdk": "bin/cli" } }, "sha512-srbJV7JKsc5cQ6eVuFzjZO7UR3xEPJqPamHFIe29bs38Ij2IripoAhC0S5NslNbaFUYqBKypmmpzMTpqfHEUDw=="],
|
||||
|
||||
"@anthropic-ai/mcpb/zod": ["zod@3.25.76", "https://mirrors.cloud.tencent.com/npm/zod/-/zod-3.25.76.tgz", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="],
|
||||
|
||||
"@anthropic-ai/sandbox-runtime/commander": ["commander@12.1.0", "https://mirrors.cloud.tencent.com/npm/commander/-/commander-12.1.0.tgz", {}, "sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA=="],
|
||||
|
||||
"@anthropic-ai/sandbox-runtime/zod": ["zod@3.25.76", "https://mirrors.cloud.tencent.com/npm/zod/-/zod-3.25.76.tgz", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="],
|
||||
|
||||
"@aws-crypto/sha256-browser/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "https://mirrors.cloud.tencent.com/npm/@smithy/util-utf8/-/util-utf8-2.3.0.tgz", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="],
|
||||
|
||||
"@aws-crypto/util/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "https://mirrors.cloud.tencent.com/npm/@smithy/util-utf8/-/util-utf8-2.3.0.tgz", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="],
|
||||
|
||||
"@inquirer/checkbox/ansi-escapes": ["ansi-escapes@4.3.2", "https://mirrors.cloud.tencent.com/npm/ansi-escapes/-/ansi-escapes-4.3.2.tgz", { "dependencies": { "type-fest": "^0.21.3" } }, "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ=="],
|
||||
|
||||
"@inquirer/core/ansi-escapes": ["ansi-escapes@4.3.2", "https://mirrors.cloud.tencent.com/npm/ansi-escapes/-/ansi-escapes-4.3.2.tgz", { "dependencies": { "type-fest": "^0.21.3" } }, "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ=="],
|
||||
|
||||
"@inquirer/core/strip-ansi": ["strip-ansi@6.0.1", "https://mirrors.cloud.tencent.com/npm/strip-ansi/-/strip-ansi-6.0.1.tgz", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"@inquirer/core/wrap-ansi": ["wrap-ansi@6.2.0", "https://mirrors.cloud.tencent.com/npm/wrap-ansi/-/wrap-ansi-6.2.0.tgz", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="],
|
||||
|
||||
"@inquirer/password/ansi-escapes": ["ansi-escapes@4.3.2", "https://mirrors.cloud.tencent.com/npm/ansi-escapes/-/ansi-escapes-4.3.2.tgz", { "dependencies": { "type-fest": "^0.21.3" } }, "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ=="],
|
||||
|
||||
"@inquirer/select/ansi-escapes": ["ansi-escapes@4.3.2", "https://mirrors.cloud.tencent.com/npm/ansi-escapes/-/ansi-escapes-4.3.2.tgz", { "dependencies": { "type-fest": "^0.21.3" } }, "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ=="],
|
||||
|
||||
"cliui/string-width": ["string-width@4.2.3", "https://mirrors.cloud.tencent.com/npm/string-width/-/string-width-4.2.3.tgz", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"cliui/strip-ansi": ["strip-ansi@6.0.1", "https://mirrors.cloud.tencent.com/npm/strip-ansi/-/strip-ansi-6.0.1.tgz", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"cliui/wrap-ansi": ["wrap-ansi@6.2.0", "https://mirrors.cloud.tencent.com/npm/wrap-ansi/-/wrap-ansi-6.2.0.tgz", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="],
|
||||
|
||||
"external-editor/iconv-lite": ["iconv-lite@0.4.24", "https://mirrors.cloud.tencent.com/npm/iconv-lite/-/iconv-lite-0.4.24.tgz", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3" } }, "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA=="],
|
||||
|
||||
"form-data/mime-types": ["mime-types@2.1.35", "https://mirrors.cloud.tencent.com/npm/mime-types/-/mime-types-2.1.35.tgz", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
|
||||
|
||||
"gaxios/https-proxy-agent": ["https-proxy-agent@7.0.6", "https://mirrors.cloud.tencent.com/npm/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="],
|
||||
|
||||
"ink/@alcalzone/ansi-tokenize": ["@alcalzone/ansi-tokenize@0.2.5", "https://mirrors.cloud.tencent.com/npm/@alcalzone/ansi-tokenize/-/ansi-tokenize-0.2.5.tgz", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-3NX/MpTdroi0aKz134A6RC2Gb2iXVECN4QaAXnvCIxxIm3C3AVB1mkUe8NaaiyvOpDfsrqWhYtj+Q6a62RrTsw=="],
|
||||
|
||||
"ink/cli-boxes": ["cli-boxes@3.0.0", "https://mirrors.cloud.tencent.com/npm/cli-boxes/-/cli-boxes-3.0.0.tgz", {}, "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g=="],
|
||||
|
||||
"ink/signal-exit": ["signal-exit@3.0.7", "https://mirrors.cloud.tencent.com/npm/signal-exit/-/signal-exit-3.0.7.tgz", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="],
|
||||
|
||||
"ink/wrap-ansi": ["wrap-ansi@9.0.2", "https://mirrors.cloud.tencent.com/npm/wrap-ansi/-/wrap-ansi-9.0.2.tgz", { "dependencies": { "ansi-styles": "^6.2.1", "string-width": "^7.0.0", "strip-ansi": "^7.1.0" } }, "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww=="],
|
||||
|
||||
"npm-run-path/path-key": ["path-key@4.0.0", "https://mirrors.cloud.tencent.com/npm/path-key/-/path-key-4.0.0.tgz", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="],
|
||||
|
||||
"proper-lockfile/signal-exit": ["signal-exit@3.0.7", "https://mirrors.cloud.tencent.com/npm/signal-exit/-/signal-exit-3.0.7.tgz", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="],
|
||||
|
||||
"restore-cursor/signal-exit": ["signal-exit@3.0.7", "https://mirrors.cloud.tencent.com/npm/signal-exit/-/signal-exit-3.0.7.tgz", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="],
|
||||
|
||||
"vscode-languageserver-protocol/vscode-jsonrpc": ["vscode-jsonrpc@8.2.0", "https://mirrors.cloud.tencent.com/npm/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz", {}, "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA=="],
|
||||
|
||||
"xss/commander": ["commander@2.20.3", "https://mirrors.cloud.tencent.com/npm/commander/-/commander-2.20.3.tgz", {}, "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="],
|
||||
|
||||
"yargs/string-width": ["string-width@4.2.3", "https://mirrors.cloud.tencent.com/npm/string-width/-/string-width-4.2.3.tgz", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"@aws-crypto/sha256-browser/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "https://mirrors.cloud.tencent.com/npm/@smithy/util-buffer-from/-/util-buffer-from-2.2.0.tgz", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
||||
|
||||
"@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "https://mirrors.cloud.tencent.com/npm/@smithy/util-buffer-from/-/util-buffer-from-2.2.0.tgz", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
||||
|
||||
"@inquirer/checkbox/ansi-escapes/type-fest": ["type-fest@0.21.3", "https://mirrors.cloud.tencent.com/npm/type-fest/-/type-fest-0.21.3.tgz", {}, "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w=="],
|
||||
|
||||
"@inquirer/core/ansi-escapes/type-fest": ["type-fest@0.21.3", "https://mirrors.cloud.tencent.com/npm/type-fest/-/type-fest-0.21.3.tgz", {}, "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w=="],
|
||||
|
||||
"@inquirer/core/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "https://mirrors.cloud.tencent.com/npm/ansi-regex/-/ansi-regex-5.0.1.tgz", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"@inquirer/core/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "https://mirrors.cloud.tencent.com/npm/ansi-styles/-/ansi-styles-4.3.0.tgz", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"@inquirer/core/wrap-ansi/string-width": ["string-width@4.2.3", "https://mirrors.cloud.tencent.com/npm/string-width/-/string-width-4.2.3.tgz", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
||||
"@inquirer/password/ansi-escapes/type-fest": ["type-fest@0.21.3", "https://mirrors.cloud.tencent.com/npm/type-fest/-/type-fest-0.21.3.tgz", {}, "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w=="],
|
||||
|
||||
"@inquirer/select/ansi-escapes/type-fest": ["type-fest@0.21.3", "https://mirrors.cloud.tencent.com/npm/type-fest/-/type-fest-0.21.3.tgz", {}, "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w=="],
|
||||
|
||||
"cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "https://mirrors.cloud.tencent.com/npm/emoji-regex/-/emoji-regex-8.0.0.tgz", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"cliui/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "https://mirrors.cloud.tencent.com/npm/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "https://mirrors.cloud.tencent.com/npm/ansi-regex/-/ansi-regex-5.0.1.tgz", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "https://mirrors.cloud.tencent.com/npm/ansi-styles/-/ansi-styles-4.3.0.tgz", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"form-data/mime-types/mime-db": ["mime-db@1.52.0", "https://mirrors.cloud.tencent.com/npm/mime-db/-/mime-db-1.52.0.tgz", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="],
|
||||
|
||||
"gaxios/https-proxy-agent/agent-base": ["agent-base@7.1.4", "https://mirrors.cloud.tencent.com/npm/agent-base/-/agent-base-7.1.4.tgz", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="],
|
||||
|
||||
"ink/wrap-ansi/string-width": ["string-width@7.2.0", "https://mirrors.cloud.tencent.com/npm/string-width/-/string-width-7.2.0.tgz", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="],
|
||||
|
||||
"yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "https://mirrors.cloud.tencent.com/npm/emoji-regex/-/emoji-regex-8.0.0.tgz", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "https://mirrors.cloud.tencent.com/npm/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "https://mirrors.cloud.tencent.com/npm/strip-ansi/-/strip-ansi-6.0.1.tgz", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"@aws-crypto/sha256-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "https://mirrors.cloud.tencent.com/npm/@smithy/is-array-buffer/-/is-array-buffer-2.2.0.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
||||
|
||||
"@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "https://mirrors.cloud.tencent.com/npm/@smithy/is-array-buffer/-/is-array-buffer-2.2.0.tgz", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
||||
|
||||
"@inquirer/core/wrap-ansi/string-width/emoji-regex": ["emoji-regex@8.0.0", "https://mirrors.cloud.tencent.com/npm/emoji-regex/-/emoji-regex-8.0.0.tgz", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
||||
"@inquirer/core/wrap-ansi/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "https://mirrors.cloud.tencent.com/npm/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||
|
||||
"yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "https://mirrors.cloud.tencent.com/npm/ansi-regex/-/ansi-regex-5.0.1.tgz", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
}
|
||||
}
|
||||
1
image-processor.node
Normal file
1
image-processor.node
Normal file
@ -0,0 +1 @@
|
||||
placeholder
|
||||
98
package.json
Normal file
98
package.json
Normal file
@ -0,0 +1,98 @@
|
||||
{
|
||||
"name": "@anthropic-ai/claude-code",
|
||||
"version": "999.0.0-restored",
|
||||
"private": true,
|
||||
"description": "Restored Claude Code source tree reconstructed from source maps.",
|
||||
"license": "SEE LICENSE IN LICENSE.md",
|
||||
"type": "module",
|
||||
"packageManager": "bun@1.3.5",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/anthropics/claude-code.git"
|
||||
},
|
||||
"engines": {
|
||||
"bun": ">=1.3.5",
|
||||
"node": ">=24.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev": "bun run ./src/dev-entry.ts",
|
||||
"start": "bun run ./src/dev-entry.ts",
|
||||
"version": "bun run ./src/dev-entry.ts --version"
|
||||
},
|
||||
"dependencies": {
|
||||
"@alcalzone/ansi-tokenize": "*",
|
||||
"@ant/claude-for-chrome-mcp": "file:./shims/ant-claude-for-chrome-mcp",
|
||||
"@ant/computer-use-input": "file:./shims/ant-computer-use-input",
|
||||
"@ant/computer-use-mcp": "file:./shims/ant-computer-use-mcp",
|
||||
"@ant/computer-use-swift": "file:./shims/ant-computer-use-swift",
|
||||
"@anthropic-ai/claude-agent-sdk": "*",
|
||||
"@anthropic-ai/mcpb": "*",
|
||||
"@anthropic-ai/sandbox-runtime": "*",
|
||||
"@anthropic-ai/sdk": "*",
|
||||
"@aws-sdk/client-bedrock-runtime": "*",
|
||||
"@commander-js/extra-typings": "*",
|
||||
"@growthbook/growthbook": "*",
|
||||
"@modelcontextprotocol/sdk": "*",
|
||||
"@opentelemetry/api": "*",
|
||||
"@opentelemetry/api-logs": "*",
|
||||
"@opentelemetry/core": "*",
|
||||
"@opentelemetry/resources": "*",
|
||||
"@opentelemetry/sdk-logs": "*",
|
||||
"@opentelemetry/sdk-metrics": "*",
|
||||
"@opentelemetry/sdk-trace-base": "*",
|
||||
"@opentelemetry/semantic-conventions": "*",
|
||||
"ajv": "*",
|
||||
"asciichart": "*",
|
||||
"auto-bind": "*",
|
||||
"axios": "*",
|
||||
"bidi-js": "*",
|
||||
"chalk": "*",
|
||||
"chokidar": "*",
|
||||
"cli-boxes": "*",
|
||||
"code-excerpt": "*",
|
||||
"diff": "*",
|
||||
"emoji-regex": "*",
|
||||
"env-paths": "*",
|
||||
"execa": "*",
|
||||
"figures": "*",
|
||||
"fuse.js": "*",
|
||||
"get-east-asian-width": "*",
|
||||
"google-auth-library": "*",
|
||||
"highlight.js": "*",
|
||||
"https-proxy-agent": "*",
|
||||
"ignore": "*",
|
||||
"indent-string": "*",
|
||||
"ink": "*",
|
||||
"jsonc-parser": "*",
|
||||
"lodash-es": "*",
|
||||
"lru-cache": "*",
|
||||
"marked": "*",
|
||||
"p-map": "*",
|
||||
"picomatch": "*",
|
||||
"proper-lockfile": "*",
|
||||
"qrcode": "*",
|
||||
"react": "*",
|
||||
"react-reconciler": "*",
|
||||
"semver": "*",
|
||||
"shell-quote": "*",
|
||||
"signal-exit": "*",
|
||||
"stack-utils": "*",
|
||||
"strip-ansi": "*",
|
||||
"supports-hyperlinks": "*",
|
||||
"tree-kill": "*",
|
||||
"type-fest": "*",
|
||||
"undici": "*",
|
||||
"usehooks-ts": "*",
|
||||
"vscode-jsonrpc": "*",
|
||||
"vscode-languageserver-protocol": "*",
|
||||
"vscode-languageserver-types": "*",
|
||||
"wrap-ansi": "*",
|
||||
"ws": "*",
|
||||
"xss": "*",
|
||||
"yaml": "*",
|
||||
"zod": "*",
|
||||
"color-diff-napi": "file:./shims/color-diff-napi",
|
||||
"modifiers-napi": "file:./shims/modifiers-napi",
|
||||
"url-handler-napi": "file:./shims/url-handler-napi"
|
||||
}
|
||||
}
|
||||
BIN
preview.png
Normal file
BIN
preview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 257 KiB |
25
shims/ant-claude-for-chrome-mcp/index.ts
Normal file
25
shims/ant-claude-for-chrome-mcp/index.ts
Normal file
@ -0,0 +1,25 @@
|
||||
export type PermissionMode =
|
||||
| 'ask'
|
||||
| 'skip_all_permission_checks'
|
||||
| 'follow_a_plan'
|
||||
|
||||
export type Logger = {
|
||||
info(message: string): void
|
||||
warn(message: string): void
|
||||
error(message: string): void
|
||||
}
|
||||
|
||||
export type ClaudeForChromeContext = {
|
||||
serverName?: string
|
||||
logger?: Logger
|
||||
}
|
||||
|
||||
export const BROWSER_TOOLS: Array<{ name: string }> = []
|
||||
|
||||
export function createClaudeForChromeMcpServer(_context: ClaudeForChromeContext) {
|
||||
return {
|
||||
async connect() {},
|
||||
setRequestHandler() {},
|
||||
async close() {},
|
||||
}
|
||||
}
|
||||
6
shims/ant-claude-for-chrome-mcp/package.json
Normal file
6
shims/ant-claude-for-chrome-mcp/package.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "@ant/claude-for-chrome-mcp",
|
||||
"version": "0.0.0-restored",
|
||||
"type": "module",
|
||||
"main": "./index.ts"
|
||||
}
|
||||
23
shims/ant-computer-use-input/index.ts
Normal file
23
shims/ant-computer-use-input/index.ts
Normal file
@ -0,0 +1,23 @@
|
||||
export type ComputerUseInputAPI = {
|
||||
moveMouse(x: number, y: number, smooth?: boolean): Promise<void>
|
||||
mouseLocation(): Promise<{ x: number; y: number }>
|
||||
key(key: string, action?: 'press' | 'release' | 'click'): Promise<void>
|
||||
keys(keys: string[]): Promise<void>
|
||||
leftClick(): Promise<void>
|
||||
rightClick(): Promise<void>
|
||||
doubleClick(): Promise<void>
|
||||
middleClick(): Promise<void>
|
||||
dragMouse(x: number, y: number): Promise<void>
|
||||
scroll(x: number, y: number): Promise<void>
|
||||
type(text: string): Promise<void>
|
||||
}
|
||||
|
||||
export type ComputerUseInput =
|
||||
| ({ isSupported: false } & Partial<ComputerUseInputAPI>)
|
||||
| ({ isSupported: true } & ComputerUseInputAPI)
|
||||
|
||||
const unsupported: ComputerUseInput = {
|
||||
isSupported: false,
|
||||
}
|
||||
|
||||
export default unsupported
|
||||
6
shims/ant-computer-use-input/package.json
Normal file
6
shims/ant-computer-use-input/package.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "@ant/computer-use-input",
|
||||
"version": "0.0.0-restored",
|
||||
"type": "module",
|
||||
"main": "./index.ts"
|
||||
}
|
||||
59
shims/ant-computer-use-mcp/index.ts
Normal file
59
shims/ant-computer-use-mcp/index.ts
Normal file
@ -0,0 +1,59 @@
|
||||
export const DEFAULT_GRANT_FLAGS = {
|
||||
accessibility: false,
|
||||
screenRecording: false,
|
||||
}
|
||||
|
||||
export const API_RESIZE_PARAMS = {}
|
||||
|
||||
export function targetImageSize(width: number, height: number) {
|
||||
return [width, height] as const
|
||||
}
|
||||
|
||||
export function buildComputerUseTools() {
|
||||
return [] as Array<{ name: string }>
|
||||
}
|
||||
|
||||
export function createComputerUseMcpServer() {
|
||||
return {
|
||||
async connect() {},
|
||||
setRequestHandler() {},
|
||||
async close() {},
|
||||
}
|
||||
}
|
||||
|
||||
export function bindSessionContext() {
|
||||
return async () => ({
|
||||
is_error: true,
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: 'Computer use is unavailable in the restored development build.',
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
export type DisplayGeometry = Record<string, unknown>
|
||||
export type FrontmostApp = Record<string, unknown>
|
||||
export type InstalledApp = { name?: string; bundleId?: string }
|
||||
export type ResolvePrepareCaptureResult = Record<string, unknown>
|
||||
export type RunningApp = Record<string, unknown>
|
||||
export type ScreenshotResult = Record<string, unknown>
|
||||
export type ScreenshotDims = {
|
||||
width: number
|
||||
height: number
|
||||
displayWidth?: number
|
||||
displayHeight?: number
|
||||
displayId?: number
|
||||
originX?: number
|
||||
originY?: number
|
||||
}
|
||||
export type CuPermissionRequest = Record<string, unknown>
|
||||
export type CuPermissionResponse = Record<string, unknown>
|
||||
export type CuCallToolResult = {
|
||||
is_error?: boolean
|
||||
content?: Array<{ type: string; text?: string }>
|
||||
telemetry?: Record<string, unknown>
|
||||
}
|
||||
export type ComputerUseSessionContext = Record<string, unknown>
|
||||
export type ComputerExecutor = Record<string, unknown>
|
||||
11
shims/ant-computer-use-mcp/package.json
Normal file
11
shims/ant-computer-use-mcp/package.json
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"name": "@ant/computer-use-mcp",
|
||||
"version": "0.0.0-restored",
|
||||
"type": "module",
|
||||
"main": "./index.ts",
|
||||
"exports": {
|
||||
".": "./index.ts",
|
||||
"./types": "./types.ts",
|
||||
"./sentinelApps": "./sentinelApps.ts"
|
||||
}
|
||||
}
|
||||
3
shims/ant-computer-use-mcp/sentinelApps.ts
Normal file
3
shims/ant-computer-use-mcp/sentinelApps.ts
Normal file
@ -0,0 +1,3 @@
|
||||
export function getSentinelCategory() {
|
||||
return null
|
||||
}
|
||||
15
shims/ant-computer-use-mcp/types.ts
Normal file
15
shims/ant-computer-use-mcp/types.ts
Normal file
@ -0,0 +1,15 @@
|
||||
export const DEFAULT_GRANT_FLAGS = {
|
||||
accessibility: false,
|
||||
screenRecording: false,
|
||||
}
|
||||
|
||||
export type CoordinateMode = 'screen' | 'viewport'
|
||||
export type CuSubGates = Record<string, boolean>
|
||||
export type Logger = {
|
||||
info(message: string): void
|
||||
warn(message: string): void
|
||||
error(message: string): void
|
||||
}
|
||||
export type ComputerUseHostAdapter = Record<string, unknown>
|
||||
export type CuPermissionRequest = Record<string, unknown>
|
||||
export type CuPermissionResponse = Record<string, unknown>
|
||||
27
shims/ant-computer-use-swift/index.ts
Normal file
27
shims/ant-computer-use-swift/index.ts
Normal file
@ -0,0 +1,27 @@
|
||||
export type ComputerUseAPI = {
|
||||
screens?: {
|
||||
list(): Promise<unknown[]>
|
||||
}
|
||||
apps?: {
|
||||
listInstalled(): Promise<unknown[]>
|
||||
listRunning(): Promise<unknown[]>
|
||||
}
|
||||
}
|
||||
|
||||
const stub: ComputerUseAPI = {
|
||||
screens: {
|
||||
async list() {
|
||||
return []
|
||||
},
|
||||
},
|
||||
apps: {
|
||||
async listInstalled() {
|
||||
return []
|
||||
},
|
||||
async listRunning() {
|
||||
return []
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
export default stub
|
||||
6
shims/ant-computer-use-swift/package.json
Normal file
6
shims/ant-computer-use-swift/package.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "@ant/computer-use-swift",
|
||||
"version": "0.0.0-restored",
|
||||
"type": "module",
|
||||
"main": "./index.ts"
|
||||
}
|
||||
12
shims/color-diff-napi/index.ts
Normal file
12
shims/color-diff-napi/index.ts
Normal file
@ -0,0 +1,12 @@
|
||||
export {
|
||||
ColorDiff,
|
||||
ColorFile,
|
||||
getSyntaxTheme,
|
||||
getNativeModule,
|
||||
} from '../../src/native-ts/color-diff/index.ts'
|
||||
export type {
|
||||
ColorDiffClass,
|
||||
ColorFileClass,
|
||||
Hunk,
|
||||
SyntaxTheme,
|
||||
} from '../../src/native-ts/color-diff/index.ts'
|
||||
6
shims/color-diff-napi/package.json
Normal file
6
shims/color-diff-napi/package.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "color-diff-napi",
|
||||
"version": "0.0.0-restored",
|
||||
"type": "module",
|
||||
"main": "./index.ts"
|
||||
}
|
||||
5
shims/modifiers-napi/index.ts
Normal file
5
shims/modifiers-napi/index.ts
Normal file
@ -0,0 +1,5 @@
|
||||
export {
|
||||
getModifiers,
|
||||
isModifierPressed,
|
||||
prewarm,
|
||||
} from '../../vendor/modifiers-napi-src/index.ts'
|
||||
6
shims/modifiers-napi/package.json
Normal file
6
shims/modifiers-napi/package.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "modifiers-napi",
|
||||
"version": "0.0.0-restored",
|
||||
"type": "module",
|
||||
"main": "./index.ts"
|
||||
}
|
||||
1
shims/url-handler-napi/index.ts
Normal file
1
shims/url-handler-napi/index.ts
Normal file
@ -0,0 +1 @@
|
||||
export { waitForUrlEvent } from '../../vendor/url-handler-src/index.ts'
|
||||
6
shims/url-handler-napi/package.json
Normal file
6
shims/url-handler-napi/package.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "url-handler-napi",
|
||||
"version": "0.0.0-restored",
|
||||
"type": "module",
|
||||
"main": "./index.ts"
|
||||
}
|
||||
1295
src/QueryEngine.ts
Normal file
1295
src/QueryEngine.ts
Normal file
File diff suppressed because it is too large
Load Diff
125
src/Task.ts
Normal file
125
src/Task.ts
Normal file
@ -0,0 +1,125 @@
|
||||
import { randomBytes } from 'crypto'
|
||||
import type { AppState } from './state/AppState.js'
|
||||
import type { AgentId } from './types/ids.js'
|
||||
import { getTaskOutputPath } from './utils/task/diskOutput.js'
|
||||
|
||||
export type TaskType =
|
||||
| 'local_bash'
|
||||
| 'local_agent'
|
||||
| 'remote_agent'
|
||||
| 'in_process_teammate'
|
||||
| 'local_workflow'
|
||||
| 'monitor_mcp'
|
||||
| 'dream'
|
||||
|
||||
export type TaskStatus =
|
||||
| 'pending'
|
||||
| 'running'
|
||||
| 'completed'
|
||||
| 'failed'
|
||||
| 'killed'
|
||||
|
||||
/**
|
||||
* True when a task is in a terminal state and will not transition further.
|
||||
* Used to guard against injecting messages into dead teammates, evicting
|
||||
* finished tasks from AppState, and orphan-cleanup paths.
|
||||
*/
|
||||
export function isTerminalTaskStatus(status: TaskStatus): boolean {
|
||||
return status === 'completed' || status === 'failed' || status === 'killed'
|
||||
}
|
||||
|
||||
export type TaskHandle = {
|
||||
taskId: string
|
||||
cleanup?: () => void
|
||||
}
|
||||
|
||||
export type SetAppState = (f: (prev: AppState) => AppState) => void
|
||||
|
||||
export type TaskContext = {
|
||||
abortController: AbortController
|
||||
getAppState: () => AppState
|
||||
setAppState: SetAppState
|
||||
}
|
||||
|
||||
// Base fields shared by all task states
|
||||
export type TaskStateBase = {
|
||||
id: string
|
||||
type: TaskType
|
||||
status: TaskStatus
|
||||
description: string
|
||||
toolUseId?: string
|
||||
startTime: number
|
||||
endTime?: number
|
||||
totalPausedMs?: number
|
||||
outputFile: string
|
||||
outputOffset: number
|
||||
notified: boolean
|
||||
}
|
||||
|
||||
export type LocalShellSpawnInput = {
|
||||
command: string
|
||||
description: string
|
||||
timeout?: number
|
||||
toolUseId?: string
|
||||
agentId?: AgentId
|
||||
/** UI display variant: description-as-label, dialog title, status bar pill. */
|
||||
kind?: 'bash' | 'monitor'
|
||||
}
|
||||
|
||||
// What getTaskByType dispatches for: kill. spawn/render were never
|
||||
// called polymorphically (removed in #22546). All six kill implementations
|
||||
// use only setAppState — getAppState/abortController were dead weight.
|
||||
export type Task = {
|
||||
name: string
|
||||
type: TaskType
|
||||
kill(taskId: string, setAppState: SetAppState): Promise<void>
|
||||
}
|
||||
|
||||
// Task ID prefixes
|
||||
const TASK_ID_PREFIXES: Record<string, string> = {
|
||||
local_bash: 'b', // Keep as 'b' for backward compatibility
|
||||
local_agent: 'a',
|
||||
remote_agent: 'r',
|
||||
in_process_teammate: 't',
|
||||
local_workflow: 'w',
|
||||
monitor_mcp: 'm',
|
||||
dream: 'd',
|
||||
}
|
||||
|
||||
// Get task ID prefix
|
||||
function getTaskIdPrefix(type: TaskType): string {
|
||||
return TASK_ID_PREFIXES[type] ?? 'x'
|
||||
}
|
||||
|
||||
// Case-insensitive-safe alphabet (digits + lowercase) for task IDs.
|
||||
// 36^8 ≈ 2.8 trillion combinations, sufficient to resist brute-force symlink attacks.
|
||||
const TASK_ID_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
|
||||
|
||||
export function generateTaskId(type: TaskType): string {
|
||||
const prefix = getTaskIdPrefix(type)
|
||||
const bytes = randomBytes(8)
|
||||
let id = prefix
|
||||
for (let i = 0; i < 8; i++) {
|
||||
id += TASK_ID_ALPHABET[bytes[i]! % TASK_ID_ALPHABET.length]
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
export function createTaskStateBase(
|
||||
id: string,
|
||||
type: TaskType,
|
||||
description: string,
|
||||
toolUseId?: string,
|
||||
): TaskStateBase {
|
||||
return {
|
||||
id,
|
||||
type,
|
||||
status: 'pending',
|
||||
description,
|
||||
toolUseId,
|
||||
startTime: Date.now(),
|
||||
outputFile: getTaskOutputPath(id),
|
||||
outputOffset: 0,
|
||||
notified: false,
|
||||
}
|
||||
}
|
||||
792
src/Tool.ts
Normal file
792
src/Tool.ts
Normal file
@ -0,0 +1,792 @@
|
||||
import type {
|
||||
ToolResultBlockParam,
|
||||
ToolUseBlockParam,
|
||||
} from '@anthropic-ai/sdk/resources/index.mjs'
|
||||
import type {
|
||||
ElicitRequestURLParams,
|
||||
ElicitResult,
|
||||
} from '@modelcontextprotocol/sdk/types.js'
|
||||
import type { UUID } from 'crypto'
|
||||
import type { z } from 'zod/v4'
|
||||
import type { Command } from './commands.js'
|
||||
import type { CanUseToolFn } from './hooks/useCanUseTool.js'
|
||||
import type { ThinkingConfig } from './utils/thinking.js'
|
||||
|
||||
export type ToolInputJSONSchema = {
|
||||
[x: string]: unknown
|
||||
type: 'object'
|
||||
properties?: {
|
||||
[x: string]: unknown
|
||||
}
|
||||
}
|
||||
|
||||
import type { Notification } from './context/notifications.js'
|
||||
import type {
|
||||
MCPServerConnection,
|
||||
ServerResource,
|
||||
} from './services/mcp/types.js'
|
||||
import type {
|
||||
AgentDefinition,
|
||||
AgentDefinitionsResult,
|
||||
} from './tools/AgentTool/loadAgentsDir.js'
|
||||
import type {
|
||||
AssistantMessage,
|
||||
AttachmentMessage,
|
||||
Message,
|
||||
ProgressMessage,
|
||||
SystemLocalCommandMessage,
|
||||
SystemMessage,
|
||||
UserMessage,
|
||||
} from './types/message.js'
|
||||
// Import permission types from centralized location to break import cycles
|
||||
// Import PermissionResult from centralized location to break import cycles
|
||||
import type {
|
||||
AdditionalWorkingDirectory,
|
||||
PermissionMode,
|
||||
PermissionResult,
|
||||
} from './types/permissions.js'
|
||||
// Import tool progress types from centralized location to break import cycles
|
||||
import type {
|
||||
AgentToolProgress,
|
||||
BashProgress,
|
||||
MCPProgress,
|
||||
REPLToolProgress,
|
||||
SkillToolProgress,
|
||||
TaskOutputProgress,
|
||||
ToolProgressData,
|
||||
WebSearchProgress,
|
||||
} from './types/tools.js'
|
||||
import type { FileStateCache } from './utils/fileStateCache.js'
|
||||
import type { DenialTrackingState } from './utils/permissions/denialTracking.js'
|
||||
import type { SystemPrompt } from './utils/systemPromptType.js'
|
||||
import type { ContentReplacementState } from './utils/toolResultStorage.js'
|
||||
|
||||
// Re-export progress types for backwards compatibility
|
||||
export type {
|
||||
AgentToolProgress,
|
||||
BashProgress,
|
||||
MCPProgress,
|
||||
REPLToolProgress,
|
||||
SkillToolProgress,
|
||||
TaskOutputProgress,
|
||||
WebSearchProgress,
|
||||
}
|
||||
|
||||
import type { SpinnerMode } from './components/Spinner.js'
|
||||
import type { QuerySource } from './constants/querySource.js'
|
||||
import type { SDKStatus } from './entrypoints/agentSdkTypes.js'
|
||||
import type { AppState } from './state/AppState.js'
|
||||
import type {
|
||||
HookProgress,
|
||||
PromptRequest,
|
||||
PromptResponse,
|
||||
} from './types/hooks.js'
|
||||
import type { AgentId } from './types/ids.js'
|
||||
import type { DeepImmutable } from './types/utils.js'
|
||||
import type { AttributionState } from './utils/commitAttribution.js'
|
||||
import type { FileHistoryState } from './utils/fileHistory.js'
|
||||
import type { Theme, ThemeName } from './utils/theme.js'
|
||||
|
||||
export type QueryChainTracking = {
|
||||
chainId: string
|
||||
depth: number
|
||||
}
|
||||
|
||||
export type ValidationResult =
|
||||
| { result: true }
|
||||
| {
|
||||
result: false
|
||||
message: string
|
||||
errorCode: number
|
||||
}
|
||||
|
||||
export type SetToolJSXFn = (
|
||||
args: {
|
||||
jsx: React.ReactNode | null
|
||||
shouldHidePromptInput: boolean
|
||||
shouldContinueAnimation?: true
|
||||
showSpinner?: boolean
|
||||
isLocalJSXCommand?: boolean
|
||||
isImmediate?: boolean
|
||||
/** Set to true to clear a local JSX command (e.g., from its onDone callback) */
|
||||
clearLocalJSX?: boolean
|
||||
} | null,
|
||||
) => void
|
||||
|
||||
// Import tool permission types from centralized location to break import cycles
|
||||
import type { ToolPermissionRulesBySource } from './types/permissions.js'
|
||||
|
||||
// Re-export for backwards compatibility
|
||||
export type { ToolPermissionRulesBySource }
|
||||
|
||||
// Apply DeepImmutable to the imported type
|
||||
export type ToolPermissionContext = DeepImmutable<{
|
||||
mode: PermissionMode
|
||||
additionalWorkingDirectories: Map<string, AdditionalWorkingDirectory>
|
||||
alwaysAllowRules: ToolPermissionRulesBySource
|
||||
alwaysDenyRules: ToolPermissionRulesBySource
|
||||
alwaysAskRules: ToolPermissionRulesBySource
|
||||
isBypassPermissionsModeAvailable: boolean
|
||||
isAutoModeAvailable?: boolean
|
||||
strippedDangerousRules?: ToolPermissionRulesBySource
|
||||
/** When true, permission prompts are auto-denied (e.g., background agents that can't show UI) */
|
||||
shouldAvoidPermissionPrompts?: boolean
|
||||
/** When true, automated checks (classifier, hooks) are awaited before showing the permission dialog (coordinator workers) */
|
||||
awaitAutomatedChecksBeforeDialog?: boolean
|
||||
/** Stores the permission mode before model-initiated plan mode entry, so it can be restored on exit */
|
||||
prePlanMode?: PermissionMode
|
||||
}>
|
||||
|
||||
export const getEmptyToolPermissionContext: () => ToolPermissionContext =
|
||||
() => ({
|
||||
mode: 'default',
|
||||
additionalWorkingDirectories: new Map(),
|
||||
alwaysAllowRules: {},
|
||||
alwaysDenyRules: {},
|
||||
alwaysAskRules: {},
|
||||
isBypassPermissionsModeAvailable: false,
|
||||
})
|
||||
|
||||
export type CompactProgressEvent =
|
||||
| {
|
||||
type: 'hooks_start'
|
||||
hookType: 'pre_compact' | 'post_compact' | 'session_start'
|
||||
}
|
||||
| { type: 'compact_start' }
|
||||
| { type: 'compact_end' }
|
||||
|
||||
export type ToolUseContext = {
|
||||
options: {
|
||||
commands: Command[]
|
||||
debug: boolean
|
||||
mainLoopModel: string
|
||||
tools: Tools
|
||||
verbose: boolean
|
||||
thinkingConfig: ThinkingConfig
|
||||
mcpClients: MCPServerConnection[]
|
||||
mcpResources: Record<string, ServerResource[]>
|
||||
isNonInteractiveSession: boolean
|
||||
agentDefinitions: AgentDefinitionsResult
|
||||
maxBudgetUsd?: number
|
||||
/** Custom system prompt that replaces the default system prompt */
|
||||
customSystemPrompt?: string
|
||||
/** Additional system prompt appended after the main system prompt */
|
||||
appendSystemPrompt?: string
|
||||
/** Override querySource for analytics tracking */
|
||||
querySource?: QuerySource
|
||||
/** Optional callback to get the latest tools (e.g., after MCP servers connect mid-query) */
|
||||
refreshTools?: () => Tools
|
||||
}
|
||||
abortController: AbortController
|
||||
readFileState: FileStateCache
|
||||
getAppState(): AppState
|
||||
setAppState(f: (prev: AppState) => AppState): void
|
||||
/**
|
||||
* Always-shared setAppState for session-scoped infrastructure (background
|
||||
* tasks, session hooks). Unlike setAppState, which is no-op for async agents
|
||||
* (see createSubagentContext), this always reaches the root store so agents
|
||||
* at any nesting depth can register/clean up infrastructure that outlives
|
||||
* a single turn. Only set by createSubagentContext; main-thread contexts
|
||||
* fall back to setAppState.
|
||||
*/
|
||||
setAppStateForTasks?: (f: (prev: AppState) => AppState) => void
|
||||
/**
|
||||
* Optional handler for URL elicitations triggered by tool call errors (-32042).
|
||||
* In print/SDK mode, this delegates to structuredIO.handleElicitation.
|
||||
* In REPL mode, this is undefined and the queue-based UI path is used.
|
||||
*/
|
||||
handleElicitation?: (
|
||||
serverName: string,
|
||||
params: ElicitRequestURLParams,
|
||||
signal: AbortSignal,
|
||||
) => Promise<ElicitResult>
|
||||
setToolJSX?: SetToolJSXFn
|
||||
addNotification?: (notif: Notification) => void
|
||||
/** Append a UI-only system message to the REPL message list. Stripped at the
|
||||
* normalizeMessagesForAPI boundary — the Exclude<> makes that type-enforced. */
|
||||
appendSystemMessage?: (
|
||||
msg: Exclude<SystemMessage, SystemLocalCommandMessage>,
|
||||
) => void
|
||||
/** Send an OS-level notification (iTerm2, Kitty, Ghostty, bell, etc.) */
|
||||
sendOSNotification?: (opts: {
|
||||
message: string
|
||||
notificationType: string
|
||||
}) => void
|
||||
nestedMemoryAttachmentTriggers?: Set<string>
|
||||
/**
|
||||
* CLAUDE.md paths already injected as nested_memory attachments this
|
||||
* session. Dedup for memoryFilesToAttachments — readFileState is an LRU
|
||||
* that evicts entries in busy sessions, so its .has() check alone can
|
||||
* re-inject the same CLAUDE.md dozens of times.
|
||||
*/
|
||||
loadedNestedMemoryPaths?: Set<string>
|
||||
dynamicSkillDirTriggers?: Set<string>
|
||||
/** Skill names surfaced via skill_discovery this session. Telemetry only (feeds was_discovered). */
|
||||
discoveredSkillNames?: Set<string>
|
||||
userModified?: boolean
|
||||
setInProgressToolUseIDs: (f: (prev: Set<string>) => Set<string>) => void
|
||||
/** Only wired in interactive (REPL) contexts; SDK/QueryEngine don't set this. */
|
||||
setHasInterruptibleToolInProgress?: (v: boolean) => void
|
||||
setResponseLength: (f: (prev: number) => number) => void
|
||||
/** Ant-only: push a new API metrics entry for OTPS tracking.
|
||||
* Called by subagent streaming when a new API request starts. */
|
||||
pushApiMetricsEntry?: (ttftMs: number) => void
|
||||
setStreamMode?: (mode: SpinnerMode) => void
|
||||
onCompactProgress?: (event: CompactProgressEvent) => void
|
||||
setSDKStatus?: (status: SDKStatus) => void
|
||||
openMessageSelector?: () => void
|
||||
updateFileHistoryState: (
|
||||
updater: (prev: FileHistoryState) => FileHistoryState,
|
||||
) => void
|
||||
updateAttributionState: (
|
||||
updater: (prev: AttributionState) => AttributionState,
|
||||
) => void
|
||||
setConversationId?: (id: UUID) => void
|
||||
agentId?: AgentId // Only set for subagents; use getSessionId() for session ID. Hooks use this to distinguish subagent calls.
|
||||
agentType?: string // Subagent type name. For the main thread's --agent type, hooks fall back to getMainThreadAgentType().
|
||||
/** When true, canUseTool must always be called even when hooks auto-approve.
|
||||
* Used by speculation for overlay file path rewriting. */
|
||||
requireCanUseTool?: boolean
|
||||
messages: Message[]
|
||||
fileReadingLimits?: {
|
||||
maxTokens?: number
|
||||
maxSizeBytes?: number
|
||||
}
|
||||
globLimits?: {
|
||||
maxResults?: number
|
||||
}
|
||||
toolDecisions?: Map<
|
||||
string,
|
||||
{
|
||||
source: string
|
||||
decision: 'accept' | 'reject'
|
||||
timestamp: number
|
||||
}
|
||||
>
|
||||
queryTracking?: QueryChainTracking
|
||||
/** Callback factory for requesting interactive prompts from the user.
|
||||
* Returns a prompt callback bound to the given source name.
|
||||
* Only available in interactive (REPL) contexts. */
|
||||
requestPrompt?: (
|
||||
sourceName: string,
|
||||
toolInputSummary?: string | null,
|
||||
) => (request: PromptRequest) => Promise<PromptResponse>
|
||||
toolUseId?: string
|
||||
criticalSystemReminder_EXPERIMENTAL?: string
|
||||
/** When true, preserve toolUseResult on messages even for subagents.
|
||||
* Used by in-process teammates whose transcripts are viewable by the user. */
|
||||
preserveToolUseResults?: boolean
|
||||
/** Local denial tracking state for async subagents whose setAppState is a
|
||||
* no-op. Without this, the denial counter never accumulates and the
|
||||
* fallback-to-prompting threshold is never reached. Mutable — the
|
||||
* permissions code updates it in place. */
|
||||
localDenialTracking?: DenialTrackingState
|
||||
/**
|
||||
* Per-conversation-thread content replacement state for the tool result
|
||||
* budget. When present, query.ts applies the aggregate tool result budget.
|
||||
* Main thread: REPL provisions once (never resets — stale UUID keys
|
||||
* are inert). Subagents: createSubagentContext clones the parent's state
|
||||
* by default (cache-sharing forks need identical decisions), or
|
||||
* resumeAgentBackground threads one reconstructed from sidechain records.
|
||||
*/
|
||||
contentReplacementState?: ContentReplacementState
|
||||
/**
|
||||
* Parent's rendered system prompt bytes, frozen at turn start.
|
||||
* Used by fork subagents to share the parent's prompt cache — re-calling
|
||||
* getSystemPrompt() at fork-spawn time can diverge (GrowthBook cold→warm)
|
||||
* and bust the cache. See forkSubagent.ts.
|
||||
*/
|
||||
renderedSystemPrompt?: SystemPrompt
|
||||
}
|
||||
|
||||
// Re-export ToolProgressData from centralized location
|
||||
export type { ToolProgressData }
|
||||
|
||||
export type Progress = ToolProgressData | HookProgress
|
||||
|
||||
export type ToolProgress<P extends ToolProgressData> = {
|
||||
toolUseID: string
|
||||
data: P
|
||||
}
|
||||
|
||||
export function filterToolProgressMessages(
|
||||
progressMessagesForMessage: ProgressMessage[],
|
||||
): ProgressMessage<ToolProgressData>[] {
|
||||
return progressMessagesForMessage.filter(
|
||||
(msg): msg is ProgressMessage<ToolProgressData> =>
|
||||
msg.data?.type !== 'hook_progress',
|
||||
)
|
||||
}
|
||||
|
||||
export type ToolResult<T> = {
|
||||
data: T
|
||||
newMessages?: (
|
||||
| UserMessage
|
||||
| AssistantMessage
|
||||
| AttachmentMessage
|
||||
| SystemMessage
|
||||
)[]
|
||||
// contextModifier is only honored for tools that aren't concurrency safe.
|
||||
contextModifier?: (context: ToolUseContext) => ToolUseContext
|
||||
/** MCP protocol metadata (structuredContent, _meta) to pass through to SDK consumers */
|
||||
mcpMeta?: {
|
||||
_meta?: Record<string, unknown>
|
||||
structuredContent?: Record<string, unknown>
|
||||
}
|
||||
}
|
||||
|
||||
export type ToolCallProgress<P extends ToolProgressData = ToolProgressData> = (
|
||||
progress: ToolProgress<P>,
|
||||
) => void
|
||||
|
||||
// Type for any schema that outputs an object with string keys
|
||||
export type AnyObject = z.ZodType<{ [key: string]: unknown }>
|
||||
|
||||
/**
|
||||
* Checks if a tool matches the given name (primary name or alias).
|
||||
*/
|
||||
export function toolMatchesName(
|
||||
tool: { name: string; aliases?: string[] },
|
||||
name: string,
|
||||
): boolean {
|
||||
return tool.name === name || (tool.aliases?.includes(name) ?? false)
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds a tool by name or alias from a list of tools.
|
||||
*/
|
||||
export function findToolByName(tools: Tools, name: string): Tool | undefined {
|
||||
return tools.find(t => toolMatchesName(t, name))
|
||||
}
|
||||
|
||||
export type Tool<
|
||||
Input extends AnyObject = AnyObject,
|
||||
Output = unknown,
|
||||
P extends ToolProgressData = ToolProgressData,
|
||||
> = {
|
||||
/**
|
||||
* Optional aliases for backwards compatibility when a tool is renamed.
|
||||
* The tool can be looked up by any of these names in addition to its primary name.
|
||||
*/
|
||||
aliases?: string[]
|
||||
/**
|
||||
* One-line capability phrase used by ToolSearch for keyword matching.
|
||||
* Helps the model find this tool via keyword search when it's deferred.
|
||||
* 3–10 words, no trailing period.
|
||||
* Prefer terms not already in the tool name (e.g. 'jupyter' for NotebookEdit).
|
||||
*/
|
||||
searchHint?: string
|
||||
call(
|
||||
args: z.infer<Input>,
|
||||
context: ToolUseContext,
|
||||
canUseTool: CanUseToolFn,
|
||||
parentMessage: AssistantMessage,
|
||||
onProgress?: ToolCallProgress<P>,
|
||||
): Promise<ToolResult<Output>>
|
||||
description(
|
||||
input: z.infer<Input>,
|
||||
options: {
|
||||
isNonInteractiveSession: boolean
|
||||
toolPermissionContext: ToolPermissionContext
|
||||
tools: Tools
|
||||
},
|
||||
): Promise<string>
|
||||
readonly inputSchema: Input
|
||||
// Type for MCP tools that can specify their input schema directly in JSON Schema format
|
||||
// rather than converting from Zod schema
|
||||
readonly inputJSONSchema?: ToolInputJSONSchema
|
||||
// Optional because TungstenTool doesn't define this. TODO: Make it required.
|
||||
// When we do that, we can also go through and make this a bit more type-safe.
|
||||
outputSchema?: z.ZodType<unknown>
|
||||
inputsEquivalent?(a: z.infer<Input>, b: z.infer<Input>): boolean
|
||||
isConcurrencySafe(input: z.infer<Input>): boolean
|
||||
isEnabled(): boolean
|
||||
isReadOnly(input: z.infer<Input>): boolean
|
||||
/** Defaults to false. Only set when the tool performs irreversible operations (delete, overwrite, send). */
|
||||
isDestructive?(input: z.infer<Input>): boolean
|
||||
/**
|
||||
* What should happen when the user submits a new message while this tool
|
||||
* is running.
|
||||
*
|
||||
* - `'cancel'` — stop the tool and discard its result
|
||||
* - `'block'` — keep running; the new message waits
|
||||
*
|
||||
* Defaults to `'block'` when not implemented.
|
||||
*/
|
||||
interruptBehavior?(): 'cancel' | 'block'
|
||||
/**
|
||||
* Returns information about whether this tool use is a search or read operation
|
||||
* that should be collapsed into a condensed display in the UI. Examples include
|
||||
* file searching (Grep, Glob), file reading (Read), and bash commands like find,
|
||||
* grep, wc, etc.
|
||||
*
|
||||
* Returns an object indicating whether the operation is a search or read operation:
|
||||
* - `isSearch: true` for search operations (grep, find, glob patterns)
|
||||
* - `isRead: true` for read operations (cat, head, tail, file read)
|
||||
* - `isList: true` for directory-listing operations (ls, tree, du)
|
||||
* - All can be false if the operation shouldn't be collapsed
|
||||
*/
|
||||
isSearchOrReadCommand?(input: z.infer<Input>): {
|
||||
isSearch: boolean
|
||||
isRead: boolean
|
||||
isList?: boolean
|
||||
}
|
||||
isOpenWorld?(input: z.infer<Input>): boolean
|
||||
requiresUserInteraction?(): boolean
|
||||
isMcp?: boolean
|
||||
isLsp?: boolean
|
||||
/**
|
||||
* When true, this tool is deferred (sent with defer_loading: true) and requires
|
||||
* ToolSearch to be used before it can be called.
|
||||
*/
|
||||
readonly shouldDefer?: boolean
|
||||
/**
|
||||
* When true, this tool is never deferred — its full schema appears in the
|
||||
* initial prompt even when ToolSearch is enabled. For MCP tools, set via
|
||||
* `_meta['anthropic/alwaysLoad']`. Use for tools the model must see on
|
||||
* turn 1 without a ToolSearch round-trip.
|
||||
*/
|
||||
readonly alwaysLoad?: boolean
|
||||
/**
|
||||
* For MCP tools: the server and tool names as received from the MCP server (unnormalized).
|
||||
* Present on all MCP tools regardless of whether `name` is prefixed (mcp__server__tool)
|
||||
* or unprefixed (CLAUDE_AGENT_SDK_MCP_NO_PREFIX mode).
|
||||
*/
|
||||
mcpInfo?: { serverName: string; toolName: string }
|
||||
readonly name: string
|
||||
/**
|
||||
* Maximum size in characters for tool result before it gets persisted to disk.
|
||||
* When exceeded, the result is saved to a file and Claude receives a preview
|
||||
* with the file path instead of the full content.
|
||||
*
|
||||
* Set to Infinity for tools whose output must never be persisted (e.g. Read,
|
||||
* where persisting creates a circular Read→file→Read loop and the tool
|
||||
* already self-bounds via its own limits).
|
||||
*/
|
||||
maxResultSizeChars: number
|
||||
/**
|
||||
* When true, enables strict mode for this tool, which causes the API to
|
||||
* more strictly adhere to tool instructions and parameter schemas.
|
||||
* Only applied when the tengu_tool_pear is enabled.
|
||||
*/
|
||||
readonly strict?: boolean
|
||||
|
||||
/**
|
||||
* Called on copies of tool_use input before observers see it (SDK stream,
|
||||
* transcript, canUseTool, PreToolUse/PostToolUse hooks). Mutate in place
|
||||
* to add legacy/derived fields. Must be idempotent. The original API-bound
|
||||
* input is never mutated (preserves prompt cache). Not re-applied when a
|
||||
* hook/permission returns a fresh updatedInput — those own their shape.
|
||||
*/
|
||||
backfillObservableInput?(input: Record<string, unknown>): void
|
||||
|
||||
/**
|
||||
* Determines if this tool is allowed to run with this input in the current context.
|
||||
* It informs the model of why the tool use failed, and does not directly display any UI.
|
||||
* @param input
|
||||
* @param context
|
||||
*/
|
||||
validateInput?(
|
||||
input: z.infer<Input>,
|
||||
context: ToolUseContext,
|
||||
): Promise<ValidationResult>
|
||||
|
||||
/**
|
||||
* Determines if the user is asked for permission. Only called after validateInput() passes.
|
||||
* General permission logic is in permissions.ts. This method contains tool-specific logic.
|
||||
* @param input
|
||||
* @param context
|
||||
*/
|
||||
checkPermissions(
|
||||
input: z.infer<Input>,
|
||||
context: ToolUseContext,
|
||||
): Promise<PermissionResult>
|
||||
|
||||
// Optional method for tools that operate on a file path
|
||||
getPath?(input: z.infer<Input>): string
|
||||
|
||||
/**
|
||||
* Prepare a matcher for hook `if` conditions (permission-rule patterns like
|
||||
* "git *" from "Bash(git *)"). Called once per hook-input pair; any
|
||||
* expensive parsing happens here. Returns a closure that is called per
|
||||
* hook pattern. If not implemented, only tool-name-level matching works.
|
||||
*/
|
||||
preparePermissionMatcher?(
|
||||
input: z.infer<Input>,
|
||||
): Promise<(pattern: string) => boolean>
|
||||
|
||||
prompt(options: {
|
||||
getToolPermissionContext: () => Promise<ToolPermissionContext>
|
||||
tools: Tools
|
||||
agents: AgentDefinition[]
|
||||
allowedAgentTypes?: string[]
|
||||
}): Promise<string>
|
||||
userFacingName(input: Partial<z.infer<Input>> | undefined): string
|
||||
userFacingNameBackgroundColor?(
|
||||
input: Partial<z.infer<Input>> | undefined,
|
||||
): keyof Theme | undefined
|
||||
/**
|
||||
* Transparent wrappers (e.g. REPL) delegate all rendering to their progress
|
||||
* handler, which emits native-looking blocks for each inner tool call.
|
||||
* The wrapper itself shows nothing.
|
||||
*/
|
||||
isTransparentWrapper?(): boolean
|
||||
/**
|
||||
* Returns a short string summary of this tool use for display in compact views.
|
||||
* @param input The tool input
|
||||
* @returns A short string summary, or null to not display
|
||||
*/
|
||||
getToolUseSummary?(input: Partial<z.infer<Input>> | undefined): string | null
|
||||
/**
|
||||
* Returns a human-readable present-tense activity description for spinner display.
|
||||
* Example: "Reading src/foo.ts", "Running bun test", "Searching for pattern"
|
||||
* @param input The tool input
|
||||
* @returns Activity description string, or null to fall back to tool name
|
||||
*/
|
||||
getActivityDescription?(
|
||||
input: Partial<z.infer<Input>> | undefined,
|
||||
): string | null
|
||||
/**
|
||||
* Returns a compact representation of this tool use for the auto-mode
|
||||
* security classifier. Examples: `ls -la` for Bash, `/tmp/x: new content`
|
||||
* for Edit. Return '' to skip this tool in the classifier transcript
|
||||
* (e.g. tools with no security relevance). May return an object to avoid
|
||||
* double-encoding when the caller JSON-wraps the value.
|
||||
*/
|
||||
toAutoClassifierInput(input: z.infer<Input>): unknown
|
||||
mapToolResultToToolResultBlockParam(
|
||||
content: Output,
|
||||
toolUseID: string,
|
||||
): ToolResultBlockParam
|
||||
/**
|
||||
* Optional. When omitted, the tool result renders nothing (same as returning
|
||||
* null). Omit for tools whose results are surfaced elsewhere (e.g., TodoWrite
|
||||
* updates the todo panel, not the transcript).
|
||||
*/
|
||||
renderToolResultMessage?(
|
||||
content: Output,
|
||||
progressMessagesForMessage: ProgressMessage<P>[],
|
||||
options: {
|
||||
style?: 'condensed'
|
||||
theme: ThemeName
|
||||
tools: Tools
|
||||
verbose: boolean
|
||||
isTranscriptMode?: boolean
|
||||
isBriefOnly?: boolean
|
||||
/** Original tool_use input, when available. Useful for compact result
|
||||
* summaries that reference what was requested (e.g. "Sent to #foo"). */
|
||||
input?: unknown
|
||||
},
|
||||
): React.ReactNode
|
||||
/**
|
||||
* Flattened text of what renderToolResultMessage shows IN TRANSCRIPT
|
||||
* MODE (verbose=true, isTranscriptMode=true). For transcript search
|
||||
* indexing: the index counts occurrences in this string, the highlight
|
||||
* overlay scans the actual screen buffer. For count ≡ highlight, this
|
||||
* must return the text that ends up visible — not the model-facing
|
||||
* serialization from mapToolResultToToolResultBlockParam (which adds
|
||||
* system-reminders, persisted-output wrappers).
|
||||
*
|
||||
* Chrome can be skipped (under-count is fine). "Found 3 files in 12ms"
|
||||
* isn't worth indexing. Phantoms are not fine — text that's claimed
|
||||
* here but doesn't render is a count≠highlight bug.
|
||||
*
|
||||
* Optional: omitted → field-name heuristic in transcriptSearch.ts.
|
||||
* Drift caught by test/utils/transcriptSearch.renderFidelity.test.tsx
|
||||
* which renders sample outputs and flags text that's indexed-but-not-
|
||||
* rendered (phantom) or rendered-but-not-indexed (under-count warning).
|
||||
*/
|
||||
extractSearchText?(out: Output): string
|
||||
/**
|
||||
* Render the tool use message. Note that `input` is partial because we render
|
||||
* the message as soon as possible, possibly before tool parameters have fully
|
||||
* streamed in.
|
||||
*/
|
||||
renderToolUseMessage(
|
||||
input: Partial<z.infer<Input>>,
|
||||
options: { theme: ThemeName; verbose: boolean; commands?: Command[] },
|
||||
): React.ReactNode
|
||||
/**
|
||||
* Returns true when the non-verbose rendering of this output is truncated
|
||||
* (i.e., clicking to expand would reveal more content). Gates
|
||||
* click-to-expand in fullscreen — only messages where verbose actually
|
||||
* shows more get a hover/click affordance. Unset means never truncated.
|
||||
*/
|
||||
isResultTruncated?(output: Output): boolean
|
||||
/**
|
||||
* Renders an optional tag to display after the tool use message.
|
||||
* Used for additional metadata like timeout, model, resume ID, etc.
|
||||
* Returns null to not display anything.
|
||||
*/
|
||||
renderToolUseTag?(input: Partial<z.infer<Input>>): React.ReactNode
|
||||
/**
|
||||
* Optional. When omitted, no progress UI is shown while the tool runs.
|
||||
*/
|
||||
renderToolUseProgressMessage?(
|
||||
progressMessagesForMessage: ProgressMessage<P>[],
|
||||
options: {
|
||||
tools: Tools
|
||||
verbose: boolean
|
||||
terminalSize?: { columns: number; rows: number }
|
||||
inProgressToolCallCount?: number
|
||||
isTranscriptMode?: boolean
|
||||
},
|
||||
): React.ReactNode
|
||||
renderToolUseQueuedMessage?(): React.ReactNode
|
||||
/**
|
||||
* Optional. When omitted, falls back to <FallbackToolUseRejectedMessage />.
|
||||
* Only define this for tools that need custom rejection UI (e.g., file edits
|
||||
* that show the rejected diff).
|
||||
*/
|
||||
renderToolUseRejectedMessage?(
|
||||
input: z.infer<Input>,
|
||||
options: {
|
||||
columns: number
|
||||
messages: Message[]
|
||||
style?: 'condensed'
|
||||
theme: ThemeName
|
||||
tools: Tools
|
||||
verbose: boolean
|
||||
progressMessagesForMessage: ProgressMessage<P>[]
|
||||
isTranscriptMode?: boolean
|
||||
},
|
||||
): React.ReactNode
|
||||
/**
|
||||
* Optional. When omitted, falls back to <FallbackToolUseErrorMessage />.
|
||||
* Only define this for tools that need custom error UI (e.g., search tools
|
||||
* that show "File not found" instead of the raw error).
|
||||
*/
|
||||
renderToolUseErrorMessage?(
|
||||
result: ToolResultBlockParam['content'],
|
||||
options: {
|
||||
progressMessagesForMessage: ProgressMessage<P>[]
|
||||
tools: Tools
|
||||
verbose: boolean
|
||||
isTranscriptMode?: boolean
|
||||
},
|
||||
): React.ReactNode
|
||||
|
||||
/**
|
||||
* Renders multiple parallel instances of this tool as a group.
|
||||
* @returns React node to render, or null to fall back to individual rendering
|
||||
*/
|
||||
/**
|
||||
* Renders multiple tool uses as a group (non-verbose mode only).
|
||||
* In verbose mode, individual tool uses render at their original positions.
|
||||
* @returns React node to render, or null to fall back to individual rendering
|
||||
*/
|
||||
renderGroupedToolUse?(
|
||||
toolUses: Array<{
|
||||
param: ToolUseBlockParam
|
||||
isResolved: boolean
|
||||
isError: boolean
|
||||
isInProgress: boolean
|
||||
progressMessages: ProgressMessage<P>[]
|
||||
result?: {
|
||||
param: ToolResultBlockParam
|
||||
output: unknown
|
||||
}
|
||||
}>,
|
||||
options: {
|
||||
shouldAnimate: boolean
|
||||
tools: Tools
|
||||
},
|
||||
): React.ReactNode | null
|
||||
}
|
||||
|
||||
/**
|
||||
* A collection of tools. Use this type instead of `Tool[]` to make it easier
|
||||
* to track where tool sets are assembled, passed, and filtered across the codebase.
|
||||
*/
|
||||
export type Tools = readonly Tool[]
|
||||
|
||||
/**
|
||||
* Methods that `buildTool` supplies a default for. A `ToolDef` may omit these;
|
||||
* the resulting `Tool` always has them.
|
||||
*/
|
||||
type DefaultableToolKeys =
|
||||
| 'isEnabled'
|
||||
| 'isConcurrencySafe'
|
||||
| 'isReadOnly'
|
||||
| 'isDestructive'
|
||||
| 'checkPermissions'
|
||||
| 'toAutoClassifierInput'
|
||||
| 'userFacingName'
|
||||
|
||||
/**
|
||||
* Tool definition accepted by `buildTool`. Same shape as `Tool` but with the
|
||||
* defaultable methods optional — `buildTool` fills them in so callers always
|
||||
* see a complete `Tool`.
|
||||
*/
|
||||
export type ToolDef<
|
||||
Input extends AnyObject = AnyObject,
|
||||
Output = unknown,
|
||||
P extends ToolProgressData = ToolProgressData,
|
||||
> = Omit<Tool<Input, Output, P>, DefaultableToolKeys> &
|
||||
Partial<Pick<Tool<Input, Output, P>, DefaultableToolKeys>>
|
||||
|
||||
/**
|
||||
* Type-level spread mirroring `{ ...TOOL_DEFAULTS, ...def }`. For each
|
||||
* defaultable key: if D provides it (required), D's type wins; if D omits
|
||||
* it or has it optional (inherited from Partial<> in the constraint), the
|
||||
* default fills in. All other keys come from D verbatim — preserving arity,
|
||||
* optional presence, and literal types exactly as `satisfies Tool` did.
|
||||
*/
|
||||
type BuiltTool<D> = Omit<D, DefaultableToolKeys> & {
|
||||
[K in DefaultableToolKeys]-?: K extends keyof D
|
||||
? undefined extends D[K]
|
||||
? ToolDefaults[K]
|
||||
: D[K]
|
||||
: ToolDefaults[K]
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a complete `Tool` from a partial definition, filling in safe defaults
|
||||
* for the commonly-stubbed methods. All tool exports should go through this so
|
||||
* that defaults live in one place and callers never need `?.() ?? default`.
|
||||
*
|
||||
* Defaults (fail-closed where it matters):
|
||||
* - `isEnabled` → `true`
|
||||
* - `isConcurrencySafe` → `false` (assume not safe)
|
||||
* - `isReadOnly` → `false` (assume writes)
|
||||
* - `isDestructive` → `false`
|
||||
* - `checkPermissions` → `{ behavior: 'allow', updatedInput }` (defer to general permission system)
|
||||
* - `toAutoClassifierInput` → `''` (skip classifier — security-relevant tools must override)
|
||||
* - `userFacingName` → `name`
|
||||
*/
|
||||
const TOOL_DEFAULTS = {
|
||||
isEnabled: () => true,
|
||||
isConcurrencySafe: (_input?: unknown) => false,
|
||||
isReadOnly: (_input?: unknown) => false,
|
||||
isDestructive: (_input?: unknown) => false,
|
||||
checkPermissions: (
|
||||
input: { [key: string]: unknown },
|
||||
_ctx?: ToolUseContext,
|
||||
): Promise<PermissionResult> =>
|
||||
Promise.resolve({ behavior: 'allow', updatedInput: input }),
|
||||
toAutoClassifierInput: (_input?: unknown) => '',
|
||||
userFacingName: (_input?: unknown) => '',
|
||||
}
|
||||
|
||||
// The defaults type is the ACTUAL shape of TOOL_DEFAULTS (optional params so
|
||||
// both 0-arg and full-arg call sites type-check — stubs varied in arity and
|
||||
// tests relied on that), not the interface's strict signatures.
|
||||
type ToolDefaults = typeof TOOL_DEFAULTS
|
||||
|
||||
// D infers the concrete object-literal type from the call site. The
|
||||
// constraint provides contextual typing for method parameters; `any` in
|
||||
// constraint position is structural and never leaks into the return type.
|
||||
// BuiltTool<D> mirrors runtime `{...TOOL_DEFAULTS, ...def}` at the type level.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
type AnyToolDef = ToolDef<any, any, any>
|
||||
|
||||
export function buildTool<D extends AnyToolDef>(def: D): BuiltTool<D> {
|
||||
// The runtime spread is straightforward; the `as` bridges the gap between
|
||||
// the structural-any constraint and the precise BuiltTool<D> return. The
|
||||
// type semantics are proven by the 0-error typecheck across all 60+ tools.
|
||||
return {
|
||||
...TOOL_DEFAULTS,
|
||||
userFacingName: () => def.name,
|
||||
...def,
|
||||
} as BuiltTool<D>
|
||||
}
|
||||
14
src/assistant/index.ts
Normal file
14
src/assistant/index.ts
Normal file
@ -0,0 +1,14 @@
|
||||
function readAssistantModeFlag(): boolean {
|
||||
return (
|
||||
process.env.CLAUDE_CODE_ASSISTANT_MODE === '1' ||
|
||||
process.env.CLAUDE_CODE_ASSISTANT_MODE === 'true'
|
||||
)
|
||||
}
|
||||
|
||||
export function isAssistantMode(): boolean {
|
||||
return readAssistantModeFlag()
|
||||
}
|
||||
|
||||
export function isAssistantModeEnabled(): boolean {
|
||||
return readAssistantModeFlag()
|
||||
}
|
||||
3
src/assistant/sessionDiscovery.ts
Normal file
3
src/assistant/sessionDiscovery.ts
Normal file
@ -0,0 +1,3 @@
|
||||
export async function discoverAssistantSessions() {
|
||||
return []
|
||||
}
|
||||
87
src/assistant/sessionHistory.ts
Normal file
87
src/assistant/sessionHistory.ts
Normal file
@ -0,0 +1,87 @@
|
||||
import axios from 'axios'
|
||||
import { getOauthConfig } from '../constants/oauth.js'
|
||||
import type { SDKMessage } from '../entrypoints/agentSdkTypes.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { getOAuthHeaders, prepareApiRequest } from '../utils/teleport/api.js'
|
||||
|
||||
export const HISTORY_PAGE_SIZE = 100
|
||||
|
||||
export type HistoryPage = {
|
||||
/** Chronological order within the page. */
|
||||
events: SDKMessage[]
|
||||
/** Oldest event ID in this page → before_id cursor for next-older page. */
|
||||
firstId: string | null
|
||||
/** true = older events exist. */
|
||||
hasMore: boolean
|
||||
}
|
||||
|
||||
type SessionEventsResponse = {
|
||||
data: SDKMessage[]
|
||||
has_more: boolean
|
||||
first_id: string | null
|
||||
last_id: string | null
|
||||
}
|
||||
|
||||
export type HistoryAuthCtx = {
|
||||
baseUrl: string
|
||||
headers: Record<string, string>
|
||||
}
|
||||
|
||||
/** Prepare auth + headers + base URL once, reuse across pages. */
|
||||
export async function createHistoryAuthCtx(
|
||||
sessionId: string,
|
||||
): Promise<HistoryAuthCtx> {
|
||||
const { accessToken, orgUUID } = await prepareApiRequest()
|
||||
return {
|
||||
baseUrl: `${getOauthConfig().BASE_API_URL}/v1/sessions/${sessionId}/events`,
|
||||
headers: {
|
||||
...getOAuthHeaders(accessToken),
|
||||
'anthropic-beta': 'ccr-byoc-2025-07-29',
|
||||
'x-organization-uuid': orgUUID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchPage(
|
||||
ctx: HistoryAuthCtx,
|
||||
params: Record<string, string | number | boolean>,
|
||||
label: string,
|
||||
): Promise<HistoryPage | null> {
|
||||
const resp = await axios
|
||||
.get<SessionEventsResponse>(ctx.baseUrl, {
|
||||
headers: ctx.headers,
|
||||
params,
|
||||
timeout: 15000,
|
||||
validateStatus: () => true,
|
||||
})
|
||||
.catch(() => null)
|
||||
if (!resp || resp.status !== 200) {
|
||||
logForDebugging(`[${label}] HTTP ${resp?.status ?? 'error'}`)
|
||||
return null
|
||||
}
|
||||
return {
|
||||
events: Array.isArray(resp.data.data) ? resp.data.data : [],
|
||||
firstId: resp.data.first_id,
|
||||
hasMore: resp.data.has_more,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Newest page: last `limit` events, chronological, via anchor_to_latest.
|
||||
* has_more=true means older events exist.
|
||||
*/
|
||||
export async function fetchLatestEvents(
|
||||
ctx: HistoryAuthCtx,
|
||||
limit = HISTORY_PAGE_SIZE,
|
||||
): Promise<HistoryPage | null> {
|
||||
return fetchPage(ctx, { limit, anchor_to_latest: true }, 'fetchLatestEvents')
|
||||
}
|
||||
|
||||
/** Older page: events immediately before `beforeId` cursor. */
|
||||
export async function fetchOlderEvents(
|
||||
ctx: HistoryAuthCtx,
|
||||
beforeId: string,
|
||||
limit = HISTORY_PAGE_SIZE,
|
||||
): Promise<HistoryPage | null> {
|
||||
return fetchPage(ctx, { limit, before_id: beforeId }, 'fetchOlderEvents')
|
||||
}
|
||||
1758
src/bootstrap/state.ts
Normal file
1758
src/bootstrap/state.ts
Normal file
File diff suppressed because it is too large
Load Diff
539
src/bridge/bridgeApi.ts
Normal file
539
src/bridge/bridgeApi.ts
Normal file
@ -0,0 +1,539 @@
|
||||
import axios from 'axios'
|
||||
|
||||
import { debugBody, extractErrorDetail } from './debugUtils.js'
|
||||
import {
|
||||
BRIDGE_LOGIN_INSTRUCTION,
|
||||
type BridgeApiClient,
|
||||
type BridgeConfig,
|
||||
type PermissionResponseEvent,
|
||||
type WorkResponse,
|
||||
} from './types.js'
|
||||
|
||||
type BridgeApiDeps = {
|
||||
baseUrl: string
|
||||
getAccessToken: () => string | undefined
|
||||
runnerVersion: string
|
||||
onDebug?: (msg: string) => void
|
||||
/**
|
||||
* Called on 401 to attempt OAuth token refresh. Returns true if refreshed,
|
||||
* in which case the request is retried once. Injected because
|
||||
* handleOAuth401Error from utils/auth.ts transitively pulls in config.ts →
|
||||
* file.ts → permissions/filesystem.ts → sessionStorage.ts → commands.ts
|
||||
* (~1300 modules). Daemon callers using env-var tokens omit this — their
|
||||
* tokens don't refresh, so 401 goes straight to BridgeFatalError.
|
||||
*/
|
||||
onAuth401?: (staleAccessToken: string) => Promise<boolean>
|
||||
/**
|
||||
* Returns the trusted device token to send as X-Trusted-Device-Token on
|
||||
* bridge API calls. Bridge sessions have SecurityTier=ELEVATED on the
|
||||
* server (CCR v2); when the server's enforcement flag is on,
|
||||
* ConnectBridgeWorker requires a trusted device at JWT-issuance.
|
||||
* Optional — when absent or returning undefined, the header is omitted
|
||||
* and the server falls through to its flag-off/no-op path. The CLI-side
|
||||
* gate is tengu_sessions_elevated_auth_enforcement (see trustedDevice.ts).
|
||||
*/
|
||||
getTrustedDeviceToken?: () => string | undefined
|
||||
}
|
||||
|
||||
const BETA_HEADER = 'environments-2025-11-01'
|
||||
|
||||
/** Allowlist pattern for server-provided IDs used in URL path segments. */
|
||||
const SAFE_ID_PATTERN = /^[a-zA-Z0-9_-]+$/
|
||||
|
||||
/**
|
||||
* Validate that a server-provided ID is safe to interpolate into a URL path.
|
||||
* Prevents path traversal (e.g. `../../admin`) and injection via IDs that
|
||||
* contain slashes, dots, or other special characters.
|
||||
*/
|
||||
export function validateBridgeId(id: string, label: string): string {
|
||||
if (!id || !SAFE_ID_PATTERN.test(id)) {
|
||||
throw new Error(`Invalid ${label}: contains unsafe characters`)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
/** Fatal bridge errors that should not be retried (e.g. auth failures). */
|
||||
export class BridgeFatalError extends Error {
|
||||
readonly status: number
|
||||
/** Server-provided error type, e.g. "environment_expired". */
|
||||
readonly errorType: string | undefined
|
||||
constructor(message: string, status: number, errorType?: string) {
|
||||
super(message)
|
||||
this.name = 'BridgeFatalError'
|
||||
this.status = status
|
||||
this.errorType = errorType
|
||||
}
|
||||
}
|
||||
|
||||
export function createBridgeApiClient(deps: BridgeApiDeps): BridgeApiClient {
|
||||
function debug(msg: string): void {
|
||||
deps.onDebug?.(msg)
|
||||
}
|
||||
|
||||
let consecutiveEmptyPolls = 0
|
||||
const EMPTY_POLL_LOG_INTERVAL = 100
|
||||
|
||||
function getHeaders(accessToken: string): Record<string, string> {
|
||||
const headers: Record<string, string> = {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': BETA_HEADER,
|
||||
'x-environment-runner-version': deps.runnerVersion,
|
||||
}
|
||||
const deviceToken = deps.getTrustedDeviceToken?.()
|
||||
if (deviceToken) {
|
||||
headers['X-Trusted-Device-Token'] = deviceToken
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
function resolveAuth(): string {
|
||||
const accessToken = deps.getAccessToken()
|
||||
if (!accessToken) {
|
||||
throw new Error(BRIDGE_LOGIN_INSTRUCTION)
|
||||
}
|
||||
return accessToken
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute an OAuth-authenticated request with a single retry on 401.
|
||||
* On 401, attempts token refresh via handleOAuth401Error (same pattern as
|
||||
* withRetry.ts for v1/messages). If refresh succeeds, retries the request
|
||||
* once with the new token. If refresh fails or the retry also returns 401,
|
||||
* the 401 response is returned for handleErrorStatus to throw BridgeFatalError.
|
||||
*/
|
||||
async function withOAuthRetry<T>(
|
||||
fn: (accessToken: string) => Promise<{ status: number; data: T }>,
|
||||
context: string,
|
||||
): Promise<{ status: number; data: T }> {
|
||||
const accessToken = resolveAuth()
|
||||
const response = await fn(accessToken)
|
||||
|
||||
if (response.status !== 401) {
|
||||
return response
|
||||
}
|
||||
|
||||
if (!deps.onAuth401) {
|
||||
debug(`[bridge:api] ${context}: 401 received, no refresh handler`)
|
||||
return response
|
||||
}
|
||||
|
||||
// Attempt token refresh — matches the pattern in withRetry.ts
|
||||
debug(`[bridge:api] ${context}: 401 received, attempting token refresh`)
|
||||
const refreshed = await deps.onAuth401(accessToken)
|
||||
if (refreshed) {
|
||||
debug(`[bridge:api] ${context}: Token refreshed, retrying request`)
|
||||
const newToken = resolveAuth()
|
||||
const retryResponse = await fn(newToken)
|
||||
if (retryResponse.status !== 401) {
|
||||
return retryResponse
|
||||
}
|
||||
debug(`[bridge:api] ${context}: Retry after refresh also got 401`)
|
||||
} else {
|
||||
debug(`[bridge:api] ${context}: Token refresh failed`)
|
||||
}
|
||||
|
||||
// Refresh failed — return 401 for handleErrorStatus to throw
|
||||
return response
|
||||
}
|
||||
|
||||
return {
|
||||
async registerBridgeEnvironment(
|
||||
config: BridgeConfig,
|
||||
): Promise<{ environment_id: string; environment_secret: string }> {
|
||||
debug(
|
||||
`[bridge:api] POST /v1/environments/bridge bridgeId=${config.bridgeId}`,
|
||||
)
|
||||
|
||||
const response = await withOAuthRetry(
|
||||
(token: string) =>
|
||||
axios.post<{
|
||||
environment_id: string
|
||||
environment_secret: string
|
||||
}>(
|
||||
`${deps.baseUrl}/v1/environments/bridge`,
|
||||
{
|
||||
machine_name: config.machineName,
|
||||
directory: config.dir,
|
||||
branch: config.branch,
|
||||
git_repo_url: config.gitRepoUrl,
|
||||
// Advertise session capacity so claude.ai/code can show
|
||||
// "2/4 sessions" badges and only block the picker when
|
||||
// actually at capacity. Backends that don't yet accept
|
||||
// this field will silently ignore it.
|
||||
max_sessions: config.maxSessions,
|
||||
// worker_type lets claude.ai filter environments by origin
|
||||
// (e.g. assistant picker only shows assistant-mode workers).
|
||||
// Desktop cowork app sends "cowork"; we send a distinct value.
|
||||
metadata: { worker_type: config.workerType },
|
||||
// Idempotent re-registration: if we have a backend-issued
|
||||
// environment_id from a prior session (--session-id resume),
|
||||
// send it back so the backend reattaches instead of creating
|
||||
// a new env. The backend may still hand back a fresh ID if
|
||||
// the old one expired — callers must compare the response.
|
||||
...(config.reuseEnvironmentId && {
|
||||
environment_id: config.reuseEnvironmentId,
|
||||
}),
|
||||
},
|
||||
{
|
||||
headers: getHeaders(token),
|
||||
timeout: 15_000,
|
||||
validateStatus: status => status < 500,
|
||||
},
|
||||
),
|
||||
'Registration',
|
||||
)
|
||||
|
||||
handleErrorStatus(response.status, response.data, 'Registration')
|
||||
debug(
|
||||
`[bridge:api] POST /v1/environments/bridge -> ${response.status} environment_id=${response.data.environment_id}`,
|
||||
)
|
||||
debug(
|
||||
`[bridge:api] >>> ${debugBody({ machine_name: config.machineName, directory: config.dir, branch: config.branch, git_repo_url: config.gitRepoUrl, max_sessions: config.maxSessions, metadata: { worker_type: config.workerType } })}`,
|
||||
)
|
||||
debug(`[bridge:api] <<< ${debugBody(response.data)}`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
async pollForWork(
|
||||
environmentId: string,
|
||||
environmentSecret: string,
|
||||
signal?: AbortSignal,
|
||||
reclaimOlderThanMs?: number,
|
||||
): Promise<WorkResponse | null> {
|
||||
validateBridgeId(environmentId, 'environmentId')
|
||||
|
||||
// Save and reset so errors break the "consecutive empty" streak.
|
||||
// Restored below when the response is truly empty.
|
||||
const prevEmptyPolls = consecutiveEmptyPolls
|
||||
consecutiveEmptyPolls = 0
|
||||
|
||||
const response = await axios.get<WorkResponse | null>(
|
||||
`${deps.baseUrl}/v1/environments/${environmentId}/work/poll`,
|
||||
{
|
||||
headers: getHeaders(environmentSecret),
|
||||
params:
|
||||
reclaimOlderThanMs !== undefined
|
||||
? { reclaim_older_than_ms: reclaimOlderThanMs }
|
||||
: undefined,
|
||||
timeout: 10_000,
|
||||
signal,
|
||||
validateStatus: status => status < 500,
|
||||
},
|
||||
)
|
||||
|
||||
handleErrorStatus(response.status, response.data, 'Poll')
|
||||
|
||||
// Empty body or null = no work available
|
||||
if (!response.data) {
|
||||
consecutiveEmptyPolls = prevEmptyPolls + 1
|
||||
if (
|
||||
consecutiveEmptyPolls === 1 ||
|
||||
consecutiveEmptyPolls % EMPTY_POLL_LOG_INTERVAL === 0
|
||||
) {
|
||||
debug(
|
||||
`[bridge:api] GET .../work/poll -> ${response.status} (no work, ${consecutiveEmptyPolls} consecutive empty polls)`,
|
||||
)
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
debug(
|
||||
`[bridge:api] GET .../work/poll -> ${response.status} workId=${response.data.id} type=${response.data.data?.type}${response.data.data?.id ? ` sessionId=${response.data.data.id}` : ''}`,
|
||||
)
|
||||
debug(`[bridge:api] <<< ${debugBody(response.data)}`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
async acknowledgeWork(
|
||||
environmentId: string,
|
||||
workId: string,
|
||||
sessionToken: string,
|
||||
): Promise<void> {
|
||||
validateBridgeId(environmentId, 'environmentId')
|
||||
validateBridgeId(workId, 'workId')
|
||||
|
||||
debug(`[bridge:api] POST .../work/${workId}/ack`)
|
||||
|
||||
const response = await axios.post(
|
||||
`${deps.baseUrl}/v1/environments/${environmentId}/work/${workId}/ack`,
|
||||
{},
|
||||
{
|
||||
headers: getHeaders(sessionToken),
|
||||
timeout: 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
)
|
||||
|
||||
handleErrorStatus(response.status, response.data, 'Acknowledge')
|
||||
debug(`[bridge:api] POST .../work/${workId}/ack -> ${response.status}`)
|
||||
},
|
||||
|
||||
async stopWork(
|
||||
environmentId: string,
|
||||
workId: string,
|
||||
force: boolean,
|
||||
): Promise<void> {
|
||||
validateBridgeId(environmentId, 'environmentId')
|
||||
validateBridgeId(workId, 'workId')
|
||||
|
||||
debug(`[bridge:api] POST .../work/${workId}/stop force=${force}`)
|
||||
|
||||
const response = await withOAuthRetry(
|
||||
(token: string) =>
|
||||
axios.post(
|
||||
`${deps.baseUrl}/v1/environments/${environmentId}/work/${workId}/stop`,
|
||||
{ force },
|
||||
{
|
||||
headers: getHeaders(token),
|
||||
timeout: 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
),
|
||||
'StopWork',
|
||||
)
|
||||
|
||||
handleErrorStatus(response.status, response.data, 'StopWork')
|
||||
debug(`[bridge:api] POST .../work/${workId}/stop -> ${response.status}`)
|
||||
},
|
||||
|
||||
async deregisterEnvironment(environmentId: string): Promise<void> {
|
||||
validateBridgeId(environmentId, 'environmentId')
|
||||
|
||||
debug(`[bridge:api] DELETE /v1/environments/bridge/${environmentId}`)
|
||||
|
||||
const response = await withOAuthRetry(
|
||||
(token: string) =>
|
||||
axios.delete(
|
||||
`${deps.baseUrl}/v1/environments/bridge/${environmentId}`,
|
||||
{
|
||||
headers: getHeaders(token),
|
||||
timeout: 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
),
|
||||
'Deregister',
|
||||
)
|
||||
|
||||
handleErrorStatus(response.status, response.data, 'Deregister')
|
||||
debug(
|
||||
`[bridge:api] DELETE /v1/environments/bridge/${environmentId} -> ${response.status}`,
|
||||
)
|
||||
},
|
||||
|
||||
async archiveSession(sessionId: string): Promise<void> {
|
||||
validateBridgeId(sessionId, 'sessionId')
|
||||
|
||||
debug(`[bridge:api] POST /v1/sessions/${sessionId}/archive`)
|
||||
|
||||
const response = await withOAuthRetry(
|
||||
(token: string) =>
|
||||
axios.post(
|
||||
`${deps.baseUrl}/v1/sessions/${sessionId}/archive`,
|
||||
{},
|
||||
{
|
||||
headers: getHeaders(token),
|
||||
timeout: 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
),
|
||||
'ArchiveSession',
|
||||
)
|
||||
|
||||
// 409 = already archived (idempotent, not an error)
|
||||
if (response.status === 409) {
|
||||
debug(
|
||||
`[bridge:api] POST /v1/sessions/${sessionId}/archive -> 409 (already archived)`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
handleErrorStatus(response.status, response.data, 'ArchiveSession')
|
||||
debug(
|
||||
`[bridge:api] POST /v1/sessions/${sessionId}/archive -> ${response.status}`,
|
||||
)
|
||||
},
|
||||
|
||||
async reconnectSession(
|
||||
environmentId: string,
|
||||
sessionId: string,
|
||||
): Promise<void> {
|
||||
validateBridgeId(environmentId, 'environmentId')
|
||||
validateBridgeId(sessionId, 'sessionId')
|
||||
|
||||
debug(
|
||||
`[bridge:api] POST /v1/environments/${environmentId}/bridge/reconnect session_id=${sessionId}`,
|
||||
)
|
||||
|
||||
const response = await withOAuthRetry(
|
||||
(token: string) =>
|
||||
axios.post(
|
||||
`${deps.baseUrl}/v1/environments/${environmentId}/bridge/reconnect`,
|
||||
{ session_id: sessionId },
|
||||
{
|
||||
headers: getHeaders(token),
|
||||
timeout: 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
),
|
||||
'ReconnectSession',
|
||||
)
|
||||
|
||||
handleErrorStatus(response.status, response.data, 'ReconnectSession')
|
||||
debug(`[bridge:api] POST .../bridge/reconnect -> ${response.status}`)
|
||||
},
|
||||
|
||||
async heartbeatWork(
|
||||
environmentId: string,
|
||||
workId: string,
|
||||
sessionToken: string,
|
||||
): Promise<{ lease_extended: boolean; state: string }> {
|
||||
validateBridgeId(environmentId, 'environmentId')
|
||||
validateBridgeId(workId, 'workId')
|
||||
|
||||
debug(`[bridge:api] POST .../work/${workId}/heartbeat`)
|
||||
|
||||
const response = await axios.post<{
|
||||
lease_extended: boolean
|
||||
state: string
|
||||
last_heartbeat: string
|
||||
ttl_seconds: number
|
||||
}>(
|
||||
`${deps.baseUrl}/v1/environments/${environmentId}/work/${workId}/heartbeat`,
|
||||
{},
|
||||
{
|
||||
headers: getHeaders(sessionToken),
|
||||
timeout: 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
)
|
||||
|
||||
handleErrorStatus(response.status, response.data, 'Heartbeat')
|
||||
debug(
|
||||
`[bridge:api] POST .../work/${workId}/heartbeat -> ${response.status} lease_extended=${response.data.lease_extended} state=${response.data.state}`,
|
||||
)
|
||||
return response.data
|
||||
},
|
||||
|
||||
async sendPermissionResponseEvent(
|
||||
sessionId: string,
|
||||
event: PermissionResponseEvent,
|
||||
sessionToken: string,
|
||||
): Promise<void> {
|
||||
validateBridgeId(sessionId, 'sessionId')
|
||||
|
||||
debug(
|
||||
`[bridge:api] POST /v1/sessions/${sessionId}/events type=${event.type}`,
|
||||
)
|
||||
|
||||
const response = await axios.post(
|
||||
`${deps.baseUrl}/v1/sessions/${sessionId}/events`,
|
||||
{ events: [event] },
|
||||
{
|
||||
headers: getHeaders(sessionToken),
|
||||
timeout: 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
)
|
||||
|
||||
handleErrorStatus(
|
||||
response.status,
|
||||
response.data,
|
||||
'SendPermissionResponseEvent',
|
||||
)
|
||||
debug(
|
||||
`[bridge:api] POST /v1/sessions/${sessionId}/events -> ${response.status}`,
|
||||
)
|
||||
debug(`[bridge:api] >>> ${debugBody({ events: [event] })}`)
|
||||
debug(`[bridge:api] <<< ${debugBody(response.data)}`)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
function handleErrorStatus(
|
||||
status: number,
|
||||
data: unknown,
|
||||
context: string,
|
||||
): void {
|
||||
if (status === 200 || status === 204) {
|
||||
return
|
||||
}
|
||||
const detail = extractErrorDetail(data)
|
||||
const errorType = extractErrorTypeFromData(data)
|
||||
switch (status) {
|
||||
case 401:
|
||||
throw new BridgeFatalError(
|
||||
`${context}: Authentication failed (401)${detail ? `: ${detail}` : ''}. ${BRIDGE_LOGIN_INSTRUCTION}`,
|
||||
401,
|
||||
errorType,
|
||||
)
|
||||
case 403:
|
||||
throw new BridgeFatalError(
|
||||
isExpiredErrorType(errorType)
|
||||
? 'Remote Control session has expired. Please restart with `claude remote-control` or /remote-control.'
|
||||
: `${context}: Access denied (403)${detail ? `: ${detail}` : ''}. Check your organization permissions.`,
|
||||
403,
|
||||
errorType,
|
||||
)
|
||||
case 404:
|
||||
throw new BridgeFatalError(
|
||||
detail ??
|
||||
`${context}: Not found (404). Remote Control may not be available for this organization.`,
|
||||
404,
|
||||
errorType,
|
||||
)
|
||||
case 410:
|
||||
throw new BridgeFatalError(
|
||||
detail ??
|
||||
'Remote Control session has expired. Please restart with `claude remote-control` or /remote-control.',
|
||||
410,
|
||||
errorType ?? 'environment_expired',
|
||||
)
|
||||
case 429:
|
||||
throw new Error(`${context}: Rate limited (429). Polling too frequently.`)
|
||||
default:
|
||||
throw new Error(
|
||||
`${context}: Failed with status ${status}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/** Check whether an error type string indicates a session/environment expiry. */
|
||||
export function isExpiredErrorType(errorType: string | undefined): boolean {
|
||||
if (!errorType) {
|
||||
return false
|
||||
}
|
||||
return errorType.includes('expired') || errorType.includes('lifetime')
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether a BridgeFatalError is a suppressible 403 permission error.
|
||||
* These are 403 errors for scopes like 'external_poll_sessions' or operations
|
||||
* like StopWork that fail because the user's role lacks 'environments:manage'.
|
||||
* They don't affect core functionality and shouldn't be shown to users.
|
||||
*/
|
||||
export function isSuppressible403(err: BridgeFatalError): boolean {
|
||||
if (err.status !== 403) {
|
||||
return false
|
||||
}
|
||||
return (
|
||||
err.message.includes('external_poll_sessions') ||
|
||||
err.message.includes('environments:manage')
|
||||
)
|
||||
}
|
||||
|
||||
function extractErrorTypeFromData(data: unknown): string | undefined {
|
||||
if (data && typeof data === 'object') {
|
||||
if (
|
||||
'error' in data &&
|
||||
data.error &&
|
||||
typeof data.error === 'object' &&
|
||||
'type' in data.error &&
|
||||
typeof data.error.type === 'string'
|
||||
) {
|
||||
return data.error.type
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
48
src/bridge/bridgeConfig.ts
Normal file
48
src/bridge/bridgeConfig.ts
Normal file
@ -0,0 +1,48 @@
|
||||
/**
|
||||
* Shared bridge auth/URL resolution. Consolidates the ant-only
|
||||
* CLAUDE_BRIDGE_* dev overrides that were previously copy-pasted across
|
||||
* a dozen files — inboundAttachments, BriefTool/upload, bridgeMain,
|
||||
* initReplBridge, remoteBridgeCore, daemon workers, /rename,
|
||||
* /remote-control.
|
||||
*
|
||||
* Two layers: *Override() returns the ant-only env var (or undefined);
|
||||
* the non-Override versions fall through to the real OAuth store/config.
|
||||
* Callers that compose with a different auth source (e.g. daemon workers
|
||||
* using IPC auth) use the Override getters directly.
|
||||
*/
|
||||
|
||||
import { getOauthConfig } from '../constants/oauth.js'
|
||||
import { getClaudeAIOAuthTokens } from '../utils/auth.js'
|
||||
|
||||
/** Ant-only dev override: CLAUDE_BRIDGE_OAUTH_TOKEN, else undefined. */
|
||||
export function getBridgeTokenOverride(): string | undefined {
|
||||
return (
|
||||
(process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_OAUTH_TOKEN) ||
|
||||
undefined
|
||||
)
|
||||
}
|
||||
|
||||
/** Ant-only dev override: CLAUDE_BRIDGE_BASE_URL, else undefined. */
|
||||
export function getBridgeBaseUrlOverride(): string | undefined {
|
||||
return (
|
||||
(process.env.USER_TYPE === 'ant' && process.env.CLAUDE_BRIDGE_BASE_URL) ||
|
||||
undefined
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Access token for bridge API calls: dev override first, then the OAuth
|
||||
* keychain. Undefined means "not logged in".
|
||||
*/
|
||||
export function getBridgeAccessToken(): string | undefined {
|
||||
return getBridgeTokenOverride() ?? getClaudeAIOAuthTokens()?.accessToken
|
||||
}
|
||||
|
||||
/**
|
||||
* Base URL for bridge API calls: dev override first, then the production
|
||||
* OAuth config. Always returns a URL.
|
||||
*/
|
||||
export function getBridgeBaseUrl(): string {
|
||||
return getBridgeBaseUrlOverride() ?? getOauthConfig().BASE_API_URL
|
||||
}
|
||||
135
src/bridge/bridgeDebug.ts
Normal file
135
src/bridge/bridgeDebug.ts
Normal file
@ -0,0 +1,135 @@
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { BridgeFatalError } from './bridgeApi.js'
|
||||
import type { BridgeApiClient } from './types.js'
|
||||
|
||||
/**
|
||||
* Ant-only fault injection for manually testing bridge recovery paths.
|
||||
*
|
||||
* Real failure modes this targets (BQ 2026-03-12, 7-day window):
|
||||
* poll 404 not_found_error — 147K sessions/week, dead onEnvironmentLost gate
|
||||
* ws_closed 1002/1006 — 22K sessions/week, zombie poll after close
|
||||
* register transient failure — residual: network blips during doReconnect
|
||||
*
|
||||
* Usage: /bridge-kick <subcommand> from the REPL while Remote Control is
|
||||
* connected, then tail debug.log to watch the recovery machinery react.
|
||||
*
|
||||
* Module-level state is intentional here: one bridge per REPL process, the
|
||||
* /bridge-kick slash command has no other way to reach into initBridgeCore's
|
||||
* closures, and teardown clears the slot.
|
||||
*/
|
||||
|
||||
/** One-shot fault to inject on the next matching api call. */
|
||||
type BridgeFault = {
|
||||
method:
|
||||
| 'pollForWork'
|
||||
| 'registerBridgeEnvironment'
|
||||
| 'reconnectSession'
|
||||
| 'heartbeatWork'
|
||||
/** Fatal errors go through handleErrorStatus → BridgeFatalError. Transient
|
||||
* errors surface as plain axios rejections (5xx / network). Recovery code
|
||||
* distinguishes the two: fatal → teardown, transient → retry/backoff. */
|
||||
kind: 'fatal' | 'transient'
|
||||
status: number
|
||||
errorType?: string
|
||||
/** Remaining injections. Decremented on consume; removed at 0. */
|
||||
count: number
|
||||
}
|
||||
|
||||
export type BridgeDebugHandle = {
|
||||
/** Invoke the transport's permanent-close handler directly. Tests the
|
||||
* ws_closed → reconnectEnvironmentWithSession escalation (#22148). */
|
||||
fireClose: (code: number) => void
|
||||
/** Call reconnectEnvironmentWithSession() — same as SIGUSR2 but
|
||||
* reachable from the slash command. */
|
||||
forceReconnect: () => void
|
||||
/** Queue a fault for the next N calls to the named api method. */
|
||||
injectFault: (fault: BridgeFault) => void
|
||||
/** Abort the at-capacity sleep so an injected poll fault lands
|
||||
* immediately instead of up to 10min later. */
|
||||
wakePollLoop: () => void
|
||||
/** env/session IDs for the debug.log grep. */
|
||||
describe: () => string
|
||||
}
|
||||
|
||||
let debugHandle: BridgeDebugHandle | null = null
|
||||
const faultQueue: BridgeFault[] = []
|
||||
|
||||
export function registerBridgeDebugHandle(h: BridgeDebugHandle): void {
|
||||
debugHandle = h
|
||||
}
|
||||
|
||||
export function clearBridgeDebugHandle(): void {
|
||||
debugHandle = null
|
||||
faultQueue.length = 0
|
||||
}
|
||||
|
||||
export function getBridgeDebugHandle(): BridgeDebugHandle | null {
|
||||
return debugHandle
|
||||
}
|
||||
|
||||
export function injectBridgeFault(fault: BridgeFault): void {
|
||||
faultQueue.push(fault)
|
||||
logForDebugging(
|
||||
`[bridge:debug] Queued fault: ${fault.method} ${fault.kind}/${fault.status}${fault.errorType ? `/${fault.errorType}` : ''} ×${fault.count}`,
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrap a BridgeApiClient so each call first checks the fault queue. If a
|
||||
* matching fault is queued, throw the specified error instead of calling
|
||||
* through. Delegates everything else to the real client.
|
||||
*
|
||||
* Only called when USER_TYPE === 'ant' — zero overhead in external builds.
|
||||
*/
|
||||
export function wrapApiForFaultInjection(
|
||||
api: BridgeApiClient,
|
||||
): BridgeApiClient {
|
||||
function consume(method: BridgeFault['method']): BridgeFault | null {
|
||||
const idx = faultQueue.findIndex(f => f.method === method)
|
||||
if (idx === -1) return null
|
||||
const fault = faultQueue[idx]!
|
||||
fault.count--
|
||||
if (fault.count <= 0) faultQueue.splice(idx, 1)
|
||||
return fault
|
||||
}
|
||||
|
||||
function throwFault(fault: BridgeFault, context: string): never {
|
||||
logForDebugging(
|
||||
`[bridge:debug] Injecting ${fault.kind} fault into ${context}: status=${fault.status} errorType=${fault.errorType ?? 'none'}`,
|
||||
)
|
||||
if (fault.kind === 'fatal') {
|
||||
throw new BridgeFatalError(
|
||||
`[injected] ${context} ${fault.status}`,
|
||||
fault.status,
|
||||
fault.errorType,
|
||||
)
|
||||
}
|
||||
// Transient: mimic an axios rejection (5xx / network). No .status on
|
||||
// the error itself — that's how the catch blocks distinguish.
|
||||
throw new Error(`[injected transient] ${context} ${fault.status}`)
|
||||
}
|
||||
|
||||
return {
|
||||
...api,
|
||||
async pollForWork(envId, secret, signal, reclaimMs) {
|
||||
const f = consume('pollForWork')
|
||||
if (f) throwFault(f, 'Poll')
|
||||
return api.pollForWork(envId, secret, signal, reclaimMs)
|
||||
},
|
||||
async registerBridgeEnvironment(config) {
|
||||
const f = consume('registerBridgeEnvironment')
|
||||
if (f) throwFault(f, 'Registration')
|
||||
return api.registerBridgeEnvironment(config)
|
||||
},
|
||||
async reconnectSession(envId, sessionId) {
|
||||
const f = consume('reconnectSession')
|
||||
if (f) throwFault(f, 'ReconnectSession')
|
||||
return api.reconnectSession(envId, sessionId)
|
||||
},
|
||||
async heartbeatWork(envId, workId, token) {
|
||||
const f = consume('heartbeatWork')
|
||||
if (f) throwFault(f, 'Heartbeat')
|
||||
return api.heartbeatWork(envId, workId, token)
|
||||
},
|
||||
}
|
||||
}
|
||||
202
src/bridge/bridgeEnabled.ts
Normal file
202
src/bridge/bridgeEnabled.ts
Normal file
@ -0,0 +1,202 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import {
|
||||
checkGate_CACHED_OR_BLOCKING,
|
||||
getDynamicConfig_CACHED_MAY_BE_STALE,
|
||||
getFeatureValue_CACHED_MAY_BE_STALE,
|
||||
} from '../services/analytics/growthbook.js'
|
||||
// Namespace import breaks the bridgeEnabled → auth → config → bridgeEnabled
|
||||
// cycle — authModule.foo is a live binding, so by the time the helpers below
|
||||
// call it, auth.js is fully loaded. Previously used require() for the same
|
||||
// deferral, but require() hits a CJS cache that diverges from the ESM
|
||||
// namespace after mock.module() (daemon/auth.test.ts), breaking spyOn.
|
||||
import * as authModule from '../utils/auth.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import { lt } from '../utils/semver.js'
|
||||
|
||||
/**
|
||||
* Runtime check for bridge mode entitlement.
|
||||
*
|
||||
* Remote Control requires a claude.ai subscription (the bridge auths to CCR
|
||||
* with the claude.ai OAuth token). isClaudeAISubscriber() excludes
|
||||
* Bedrock/Vertex/Foundry, apiKeyHelper/gateway deployments, env-var API keys,
|
||||
* and Console API logins — none of which have the OAuth token CCR needs.
|
||||
* See github.com/deshaw/anthropic-issues/issues/24.
|
||||
*
|
||||
* The `feature('BRIDGE_MODE')` guard ensures the GrowthBook string literal
|
||||
* is only referenced when bridge mode is enabled at build time.
|
||||
*/
|
||||
export function isBridgeEnabled(): boolean {
|
||||
// Positive ternary pattern — see docs/feature-gating.md.
|
||||
// Negative pattern (if (!feature(...)) return) does not eliminate
|
||||
// inline string literals from external builds.
|
||||
return feature('BRIDGE_MODE')
|
||||
? isClaudeAISubscriber() &&
|
||||
getFeatureValue_CACHED_MAY_BE_STALE('tengu_ccr_bridge', false)
|
||||
: false
|
||||
}
|
||||
|
||||
/**
|
||||
* Blocking entitlement check for Remote Control.
|
||||
*
|
||||
* Returns cached `true` immediately (fast path). If the disk cache says
|
||||
* `false` or is missing, awaits GrowthBook init and fetches the fresh
|
||||
* server value (slow path, max ~5s), then writes it to disk.
|
||||
*
|
||||
* Use at entitlement gates where a stale `false` would unfairly block access.
|
||||
* For user-facing error paths, prefer `getBridgeDisabledReason()` which gives
|
||||
* a specific diagnostic. For render-body UI visibility checks, use
|
||||
* `isBridgeEnabled()` instead.
|
||||
*/
|
||||
export async function isBridgeEnabledBlocking(): Promise<boolean> {
|
||||
return feature('BRIDGE_MODE')
|
||||
? isClaudeAISubscriber() &&
|
||||
(await checkGate_CACHED_OR_BLOCKING('tengu_ccr_bridge'))
|
||||
: false
|
||||
}
|
||||
|
||||
/**
|
||||
* Diagnostic message for why Remote Control is unavailable, or null if
|
||||
* it's enabled. Call this instead of a bare `isBridgeEnabledBlocking()`
|
||||
* check when you need to show the user an actionable error.
|
||||
*
|
||||
* The GrowthBook gate targets on organizationUUID, which comes from
|
||||
* config.oauthAccount — populated by /api/oauth/profile during login.
|
||||
* That endpoint requires the user:profile scope. Tokens without it
|
||||
* (setup-token, CLAUDE_CODE_OAUTH_TOKEN env var, or pre-scope-expansion
|
||||
* logins) leave oauthAccount unpopulated, so the gate falls back to
|
||||
* false and users see a dead-end "not enabled" message with no hint
|
||||
* that re-login would fix it. See CC-1165 / gh-33105.
|
||||
*/
|
||||
export async function getBridgeDisabledReason(): Promise<string | null> {
|
||||
if (feature('BRIDGE_MODE')) {
|
||||
if (!isClaudeAISubscriber()) {
|
||||
return 'Remote Control requires a claude.ai subscription. Run `claude auth login` to sign in with your claude.ai account.'
|
||||
}
|
||||
if (!hasProfileScope()) {
|
||||
return 'Remote Control requires a full-scope login token. Long-lived tokens (from `claude setup-token` or CLAUDE_CODE_OAUTH_TOKEN) are limited to inference-only for security reasons. Run `claude auth login` to use Remote Control.'
|
||||
}
|
||||
if (!getOauthAccountInfo()?.organizationUuid) {
|
||||
return 'Unable to determine your organization for Remote Control eligibility. Run `claude auth login` to refresh your account information.'
|
||||
}
|
||||
if (!(await checkGate_CACHED_OR_BLOCKING('tengu_ccr_bridge'))) {
|
||||
return 'Remote Control is not yet enabled for your account.'
|
||||
}
|
||||
return null
|
||||
}
|
||||
return 'Remote Control is not available in this build.'
|
||||
}
|
||||
|
||||
// try/catch: main.tsx:5698 calls isBridgeEnabled() while defining the Commander
|
||||
// program, before enableConfigs() runs. isClaudeAISubscriber() → getGlobalConfig()
|
||||
// throws "Config accessed before allowed" there. Pre-config, no OAuth token can
|
||||
// exist anyway — false is correct. Same swallow getFeatureValue_CACHED_MAY_BE_STALE
|
||||
// already does at growthbook.ts:775-780.
|
||||
function isClaudeAISubscriber(): boolean {
|
||||
try {
|
||||
return authModule.isClaudeAISubscriber()
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
function hasProfileScope(): boolean {
|
||||
try {
|
||||
return authModule.hasProfileScope()
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
function getOauthAccountInfo(): ReturnType<
|
||||
typeof authModule.getOauthAccountInfo
|
||||
> {
|
||||
try {
|
||||
return authModule.getOauthAccountInfo()
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Runtime check for the env-less (v2) REPL bridge path.
|
||||
* Returns true when the GrowthBook flag `tengu_bridge_repl_v2` is enabled.
|
||||
*
|
||||
* This gates which implementation initReplBridge uses — NOT whether bridge
|
||||
* is available at all (see isBridgeEnabled above). Daemon/print paths stay
|
||||
* on the env-based implementation regardless of this gate.
|
||||
*/
|
||||
export function isEnvLessBridgeEnabled(): boolean {
|
||||
return feature('BRIDGE_MODE')
|
||||
? getFeatureValue_CACHED_MAY_BE_STALE('tengu_bridge_repl_v2', false)
|
||||
: false
|
||||
}
|
||||
|
||||
/**
|
||||
* Kill-switch for the `cse_*` → `session_*` client-side retag shim.
|
||||
*
|
||||
* The shim exists because compat/convert.go:27 validates TagSession and the
|
||||
* claude.ai frontend routes on `session_*`, while v2 worker endpoints hand out
|
||||
* `cse_*`. Once the server tags by environment_kind and the frontend accepts
|
||||
* `cse_*` directly, flip this to false to make toCompatSessionId a no-op.
|
||||
* Defaults to true — the shim stays active until explicitly disabled.
|
||||
*/
|
||||
export function isCseShimEnabled(): boolean {
|
||||
return feature('BRIDGE_MODE')
|
||||
? getFeatureValue_CACHED_MAY_BE_STALE(
|
||||
'tengu_bridge_repl_v2_cse_shim_enabled',
|
||||
true,
|
||||
)
|
||||
: true
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an error message if the current CLI version is below the
|
||||
* minimum required for the v1 (env-based) Remote Control path, or null if the
|
||||
* version is fine. The v2 (env-less) path uses checkEnvLessBridgeMinVersion()
|
||||
* in envLessBridgeConfig.ts instead — the two implementations have independent
|
||||
* version floors.
|
||||
*
|
||||
* Uses cached (non-blocking) GrowthBook config. If GrowthBook hasn't
|
||||
* loaded yet, the default '0.0.0' means the check passes — a safe fallback.
|
||||
*/
|
||||
export function checkBridgeMinVersion(): string | null {
|
||||
// Positive pattern — see docs/feature-gating.md.
|
||||
// Negative pattern (if (!feature(...)) return) does not eliminate
|
||||
// inline string literals from external builds.
|
||||
if (feature('BRIDGE_MODE')) {
|
||||
const config = getDynamicConfig_CACHED_MAY_BE_STALE<{
|
||||
minVersion: string
|
||||
}>('tengu_bridge_min_version', { minVersion: '0.0.0' })
|
||||
if (config.minVersion && lt(MACRO.VERSION, config.minVersion)) {
|
||||
return `Your version of Claude Code (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${config.minVersion} or higher is required. Run \`claude update\` to update.`
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Default for remoteControlAtStartup when the user hasn't explicitly set it.
|
||||
* When the CCR_AUTO_CONNECT build flag is present (ant-only) and the
|
||||
* tengu_cobalt_harbor GrowthBook gate is on, all sessions connect to CCR by
|
||||
* default — the user can still opt out by setting remoteControlAtStartup=false
|
||||
* in config (explicit settings always win over this default).
|
||||
*
|
||||
* Defined here rather than in config.ts to avoid a direct
|
||||
* config.ts → growthbook.ts import cycle (growthbook.ts → user.ts → config.ts).
|
||||
*/
|
||||
export function getCcrAutoConnectDefault(): boolean {
|
||||
return feature('CCR_AUTO_CONNECT')
|
||||
? getFeatureValue_CACHED_MAY_BE_STALE('tengu_cobalt_harbor', false)
|
||||
: false
|
||||
}
|
||||
|
||||
/**
|
||||
* Opt-in CCR mirror mode — every local session spawns an outbound-only
|
||||
* Remote Control session that receives forwarded events. Separate from
|
||||
* getCcrAutoConnectDefault (bidirectional Remote Control). Env var wins for
|
||||
* local opt-in; GrowthBook controls rollout.
|
||||
*/
|
||||
export function isCcrMirrorEnabled(): boolean {
|
||||
return feature('CCR_MIRROR')
|
||||
? isEnvTruthy(process.env.CLAUDE_CODE_CCR_MIRROR) ||
|
||||
getFeatureValue_CACHED_MAY_BE_STALE('tengu_ccr_mirror', false)
|
||||
: false
|
||||
}
|
||||
2999
src/bridge/bridgeMain.ts
Normal file
2999
src/bridge/bridgeMain.ts
Normal file
File diff suppressed because it is too large
Load Diff
461
src/bridge/bridgeMessaging.ts
Normal file
461
src/bridge/bridgeMessaging.ts
Normal file
@ -0,0 +1,461 @@
|
||||
/**
|
||||
* Shared transport-layer helpers for bridge message handling.
|
||||
*
|
||||
* Extracted from replBridge.ts so both the env-based core (initBridgeCore)
|
||||
* and the env-less core (initEnvLessBridgeCore) can use the same ingress
|
||||
* parsing, control-request handling, and echo-dedup machinery.
|
||||
*
|
||||
* Everything here is pure — no closure over bridge-specific state. All
|
||||
* collaborators (transport, sessionId, UUID sets, callbacks) are passed
|
||||
* as params.
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'crypto'
|
||||
import type { SDKMessage } from '../entrypoints/agentSdkTypes.js'
|
||||
import type {
|
||||
SDKControlRequest,
|
||||
SDKControlResponse,
|
||||
} from '../entrypoints/sdk/controlTypes.js'
|
||||
import type { SDKResultSuccess } from '../entrypoints/sdk/coreTypes.js'
|
||||
import { logEvent } from '../services/analytics/index.js'
|
||||
import { EMPTY_USAGE } from '../services/api/emptyUsage.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
import { normalizeControlMessageKeys } from '../utils/controlMessageCompat.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { stripDisplayTagsAllowEmpty } from '../utils/displayTags.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import type { PermissionMode } from '../utils/permissions/PermissionMode.js'
|
||||
import { jsonParse } from '../utils/slowOperations.js'
|
||||
import type { ReplBridgeTransport } from './replBridgeTransport.js'
|
||||
|
||||
// ─── Type guards ─────────────────────────────────────────────────────────────
|
||||
|
||||
/** Type predicate for parsed WebSocket messages. SDKMessage is a
|
||||
* discriminated union on `type` — validating the discriminant is
|
||||
* sufficient for the predicate; callers narrow further via the union. */
|
||||
export function isSDKMessage(value: unknown): value is SDKMessage {
|
||||
return (
|
||||
value !== null &&
|
||||
typeof value === 'object' &&
|
||||
'type' in value &&
|
||||
typeof value.type === 'string'
|
||||
)
|
||||
}
|
||||
|
||||
/** Type predicate for control_response messages from the server. */
|
||||
export function isSDKControlResponse(
|
||||
value: unknown,
|
||||
): value is SDKControlResponse {
|
||||
return (
|
||||
value !== null &&
|
||||
typeof value === 'object' &&
|
||||
'type' in value &&
|
||||
value.type === 'control_response' &&
|
||||
'response' in value
|
||||
)
|
||||
}
|
||||
|
||||
/** Type predicate for control_request messages from the server. */
|
||||
export function isSDKControlRequest(
|
||||
value: unknown,
|
||||
): value is SDKControlRequest {
|
||||
return (
|
||||
value !== null &&
|
||||
typeof value === 'object' &&
|
||||
'type' in value &&
|
||||
value.type === 'control_request' &&
|
||||
'request_id' in value &&
|
||||
'request' in value
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* True for message types that should be forwarded to the bridge transport.
|
||||
* The server only wants user/assistant turns and slash-command system events;
|
||||
* everything else (tool_result, progress, etc.) is internal REPL chatter.
|
||||
*/
|
||||
export function isEligibleBridgeMessage(m: Message): boolean {
|
||||
// Virtual messages (REPL inner calls) are display-only — bridge/SDK
|
||||
// consumers see the REPL tool_use/result which summarizes the work.
|
||||
if ((m.type === 'user' || m.type === 'assistant') && m.isVirtual) {
|
||||
return false
|
||||
}
|
||||
return (
|
||||
m.type === 'user' ||
|
||||
m.type === 'assistant' ||
|
||||
(m.type === 'system' && m.subtype === 'local_command')
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract title-worthy text from a Message for onUserMessage. Returns
|
||||
* undefined for messages that shouldn't title the session: non-user, meta
|
||||
* (nudges), tool results, compact summaries, non-human origins (task
|
||||
* notifications, channel messages), or pure display-tag content
|
||||
* (<ide_opened_file>, <session-start-hook>, etc.).
|
||||
*
|
||||
* Synthetic interrupts ([Request interrupted by user]) are NOT filtered here —
|
||||
* isSyntheticMessage lives in messages.ts (heavy import, pulls command
|
||||
* registry). The initialMessages path in initReplBridge checks it; the
|
||||
* writeMessages path reaching an interrupt as the *first* message is
|
||||
* implausible (an interrupt implies a prior prompt already flowed through).
|
||||
*/
|
||||
export function extractTitleText(m: Message): string | undefined {
|
||||
if (m.type !== 'user' || m.isMeta || m.toolUseResult || m.isCompactSummary)
|
||||
return undefined
|
||||
if (m.origin && m.origin.kind !== 'human') return undefined
|
||||
const content = m.message.content
|
||||
let raw: string | undefined
|
||||
if (typeof content === 'string') {
|
||||
raw = content
|
||||
} else {
|
||||
for (const block of content) {
|
||||
if (block.type === 'text') {
|
||||
raw = block.text
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!raw) return undefined
|
||||
const clean = stripDisplayTagsAllowEmpty(raw)
|
||||
return clean || undefined
|
||||
}
|
||||
|
||||
// ─── Ingress routing ─────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Parse an ingress WebSocket message and route it to the appropriate handler.
|
||||
* Ignores messages whose UUID is in recentPostedUUIDs (echoes of what we sent)
|
||||
* or in recentInboundUUIDs (re-deliveries we've already forwarded — e.g.
|
||||
* server replayed history after a transport swap lost the seq-num cursor).
|
||||
*/
|
||||
export function handleIngressMessage(
|
||||
data: string,
|
||||
recentPostedUUIDs: BoundedUUIDSet,
|
||||
recentInboundUUIDs: BoundedUUIDSet,
|
||||
onInboundMessage: ((msg: SDKMessage) => void | Promise<void>) | undefined,
|
||||
onPermissionResponse?: ((response: SDKControlResponse) => void) | undefined,
|
||||
onControlRequest?: ((request: SDKControlRequest) => void) | undefined,
|
||||
): void {
|
||||
try {
|
||||
const parsed: unknown = normalizeControlMessageKeys(jsonParse(data))
|
||||
|
||||
// control_response is not an SDKMessage — check before the type guard
|
||||
if (isSDKControlResponse(parsed)) {
|
||||
logForDebugging('[bridge:repl] Ingress message type=control_response')
|
||||
onPermissionResponse?.(parsed)
|
||||
return
|
||||
}
|
||||
|
||||
// control_request from the server (initialize, set_model, can_use_tool).
|
||||
// Must respond promptly or the server kills the WS (~10-14s timeout).
|
||||
if (isSDKControlRequest(parsed)) {
|
||||
logForDebugging(
|
||||
`[bridge:repl] Inbound control_request subtype=${parsed.request.subtype}`,
|
||||
)
|
||||
onControlRequest?.(parsed)
|
||||
return
|
||||
}
|
||||
|
||||
if (!isSDKMessage(parsed)) return
|
||||
|
||||
// Check for UUID to detect echoes of our own messages
|
||||
const uuid =
|
||||
'uuid' in parsed && typeof parsed.uuid === 'string'
|
||||
? parsed.uuid
|
||||
: undefined
|
||||
|
||||
if (uuid && recentPostedUUIDs.has(uuid)) {
|
||||
logForDebugging(
|
||||
`[bridge:repl] Ignoring echo: type=${parsed.type} uuid=${uuid}`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Defensive dedup: drop inbound prompts we've already forwarded. The
|
||||
// SSE seq-num carryover (lastTransportSequenceNum) is the primary fix
|
||||
// for history-replay; this catches edge cases where that negotiation
|
||||
// fails (server ignores from_sequence_num, transport died before
|
||||
// receiving any frames, etc).
|
||||
if (uuid && recentInboundUUIDs.has(uuid)) {
|
||||
logForDebugging(
|
||||
`[bridge:repl] Ignoring re-delivered inbound: type=${parsed.type} uuid=${uuid}`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[bridge:repl] Ingress message type=${parsed.type}${uuid ? ` uuid=${uuid}` : ''}`,
|
||||
)
|
||||
|
||||
if (parsed.type === 'user') {
|
||||
if (uuid) recentInboundUUIDs.add(uuid)
|
||||
logEvent('tengu_bridge_message_received', {
|
||||
is_repl: true,
|
||||
})
|
||||
// Fire-and-forget — handler may be async (attachment resolution).
|
||||
void onInboundMessage?.(parsed)
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[bridge:repl] Ignoring non-user inbound message: type=${parsed.type}`,
|
||||
)
|
||||
}
|
||||
} catch (err) {
|
||||
logForDebugging(
|
||||
`[bridge:repl] Failed to parse ingress message: ${errorMessage(err)}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Server-initiated control requests ───────────────────────────────────────
|
||||
|
||||
export type ServerControlRequestHandlers = {
|
||||
transport: ReplBridgeTransport | null
|
||||
sessionId: string
|
||||
/**
|
||||
* When true, all mutable requests (interrupt, set_model, set_permission_mode,
|
||||
* set_max_thinking_tokens) reply with an error instead of false-success.
|
||||
* initialize still replies success — the server kills the connection otherwise.
|
||||
* Used by the outbound-only bridge mode and the SDK's /bridge subpath so claude.ai sees a
|
||||
* proper error instead of "action succeeded but nothing happened locally".
|
||||
*/
|
||||
outboundOnly?: boolean
|
||||
onInterrupt?: () => void
|
||||
onSetModel?: (model: string | undefined) => void
|
||||
onSetMaxThinkingTokens?: (maxTokens: number | null) => void
|
||||
onSetPermissionMode?: (
|
||||
mode: PermissionMode,
|
||||
) => { ok: true } | { ok: false; error: string }
|
||||
}
|
||||
|
||||
const OUTBOUND_ONLY_ERROR =
|
||||
'This session is outbound-only. Enable Remote Control locally to allow inbound control.'
|
||||
|
||||
/**
|
||||
* Respond to inbound control_request messages from the server. The server
|
||||
* sends these for session lifecycle events (initialize, set_model) and
|
||||
* for turn-level coordination (interrupt, set_max_thinking_tokens). If we
|
||||
* don't respond, the server hangs and kills the WS after ~10-14s.
|
||||
*
|
||||
* Previously a closure inside initBridgeCore's onWorkReceived; now takes
|
||||
* collaborators as params so both cores can use it.
|
||||
*/
|
||||
export function handleServerControlRequest(
|
||||
request: SDKControlRequest,
|
||||
handlers: ServerControlRequestHandlers,
|
||||
): void {
|
||||
const {
|
||||
transport,
|
||||
sessionId,
|
||||
outboundOnly,
|
||||
onInterrupt,
|
||||
onSetModel,
|
||||
onSetMaxThinkingTokens,
|
||||
onSetPermissionMode,
|
||||
} = handlers
|
||||
if (!transport) {
|
||||
logForDebugging(
|
||||
'[bridge:repl] Cannot respond to control_request: transport not configured',
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
let response: SDKControlResponse
|
||||
|
||||
// Outbound-only: reply error for mutable requests so claude.ai doesn't show
|
||||
// false success. initialize must still succeed (server kills the connection
|
||||
// if it doesn't — see comment above).
|
||||
if (outboundOnly && request.request.subtype !== 'initialize') {
|
||||
response = {
|
||||
type: 'control_response',
|
||||
response: {
|
||||
subtype: 'error',
|
||||
request_id: request.request_id,
|
||||
error: OUTBOUND_ONLY_ERROR,
|
||||
},
|
||||
}
|
||||
const event = { ...response, session_id: sessionId }
|
||||
void transport.write(event)
|
||||
logForDebugging(
|
||||
`[bridge:repl] Rejected ${request.request.subtype} (outbound-only) request_id=${request.request_id}`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
switch (request.request.subtype) {
|
||||
case 'initialize':
|
||||
// Respond with minimal capabilities — the REPL handles
|
||||
// commands, models, and account info itself.
|
||||
response = {
|
||||
type: 'control_response',
|
||||
response: {
|
||||
subtype: 'success',
|
||||
request_id: request.request_id,
|
||||
response: {
|
||||
commands: [],
|
||||
output_style: 'normal',
|
||||
available_output_styles: ['normal'],
|
||||
models: [],
|
||||
account: {},
|
||||
pid: process.pid,
|
||||
},
|
||||
},
|
||||
}
|
||||
break
|
||||
|
||||
case 'set_model':
|
||||
onSetModel?.(request.request.model)
|
||||
response = {
|
||||
type: 'control_response',
|
||||
response: {
|
||||
subtype: 'success',
|
||||
request_id: request.request_id,
|
||||
},
|
||||
}
|
||||
break
|
||||
|
||||
case 'set_max_thinking_tokens':
|
||||
onSetMaxThinkingTokens?.(request.request.max_thinking_tokens)
|
||||
response = {
|
||||
type: 'control_response',
|
||||
response: {
|
||||
subtype: 'success',
|
||||
request_id: request.request_id,
|
||||
},
|
||||
}
|
||||
break
|
||||
|
||||
case 'set_permission_mode': {
|
||||
// The callback returns a policy verdict so we can send an error
|
||||
// control_response without importing isAutoModeGateEnabled /
|
||||
// isBypassPermissionsModeDisabled here (bootstrap-isolation). If no
|
||||
// callback is registered (daemon context, which doesn't wire this —
|
||||
// see daemonBridge.ts), return an error verdict rather than a silent
|
||||
// false-success: the mode is never actually applied in that context,
|
||||
// so success would lie to the client.
|
||||
const verdict = onSetPermissionMode?.(request.request.mode) ?? {
|
||||
ok: false,
|
||||
error:
|
||||
'set_permission_mode is not supported in this context (onSetPermissionMode callback not registered)',
|
||||
}
|
||||
if (verdict.ok) {
|
||||
response = {
|
||||
type: 'control_response',
|
||||
response: {
|
||||
subtype: 'success',
|
||||
request_id: request.request_id,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
response = {
|
||||
type: 'control_response',
|
||||
response: {
|
||||
subtype: 'error',
|
||||
request_id: request.request_id,
|
||||
error: verdict.error,
|
||||
},
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'interrupt':
|
||||
onInterrupt?.()
|
||||
response = {
|
||||
type: 'control_response',
|
||||
response: {
|
||||
subtype: 'success',
|
||||
request_id: request.request_id,
|
||||
},
|
||||
}
|
||||
break
|
||||
|
||||
default:
|
||||
// Unknown subtype — respond with error so the server doesn't
|
||||
// hang waiting for a reply that never comes.
|
||||
response = {
|
||||
type: 'control_response',
|
||||
response: {
|
||||
subtype: 'error',
|
||||
request_id: request.request_id,
|
||||
error: `REPL bridge does not handle control_request subtype: ${request.request.subtype}`,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const event = { ...response, session_id: sessionId }
|
||||
void transport.write(event)
|
||||
logForDebugging(
|
||||
`[bridge:repl] Sent control_response for ${request.request.subtype} request_id=${request.request_id} result=${response.response.subtype}`,
|
||||
)
|
||||
}
|
||||
|
||||
// ─── Result message (for session archival on teardown) ───────────────────────
|
||||
|
||||
/**
|
||||
* Build a minimal `SDKResultSuccess` message for session archival.
|
||||
* The server needs this event before a WS close to trigger archival.
|
||||
*/
|
||||
export function makeResultMessage(sessionId: string): SDKResultSuccess {
|
||||
return {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
duration_ms: 0,
|
||||
duration_api_ms: 0,
|
||||
is_error: false,
|
||||
num_turns: 0,
|
||||
result: '',
|
||||
stop_reason: null,
|
||||
total_cost_usd: 0,
|
||||
usage: { ...EMPTY_USAGE },
|
||||
modelUsage: {},
|
||||
permission_denials: [],
|
||||
session_id: sessionId,
|
||||
uuid: randomUUID(),
|
||||
}
|
||||
}
|
||||
|
||||
// ─── BoundedUUIDSet (echo-dedup ring buffer) ─────────────────────────────────
|
||||
|
||||
/**
|
||||
* FIFO-bounded set backed by a circular buffer. Evicts the oldest entry
|
||||
* when capacity is reached, keeping memory usage constant at O(capacity).
|
||||
*
|
||||
* Messages are added in chronological order, so evicted entries are always
|
||||
* the oldest. The caller relies on external ordering (the hook's
|
||||
* lastWrittenIndexRef) as the primary dedup — this set is a secondary
|
||||
* safety net for echo filtering and race-condition dedup.
|
||||
*/
|
||||
export class BoundedUUIDSet {
|
||||
private readonly capacity: number
|
||||
private readonly ring: (string | undefined)[]
|
||||
private readonly set = new Set<string>()
|
||||
private writeIdx = 0
|
||||
|
||||
constructor(capacity: number) {
|
||||
this.capacity = capacity
|
||||
this.ring = new Array<string | undefined>(capacity)
|
||||
}
|
||||
|
||||
add(uuid: string): void {
|
||||
if (this.set.has(uuid)) return
|
||||
// Evict the entry at the current write position (if occupied)
|
||||
const evicted = this.ring[this.writeIdx]
|
||||
if (evicted !== undefined) {
|
||||
this.set.delete(evicted)
|
||||
}
|
||||
this.ring[this.writeIdx] = uuid
|
||||
this.set.add(uuid)
|
||||
this.writeIdx = (this.writeIdx + 1) % this.capacity
|
||||
}
|
||||
|
||||
has(uuid: string): boolean {
|
||||
return this.set.has(uuid)
|
||||
}
|
||||
|
||||
clear(): void {
|
||||
this.set.clear()
|
||||
this.ring.fill(undefined)
|
||||
this.writeIdx = 0
|
||||
}
|
||||
}
|
||||
43
src/bridge/bridgePermissionCallbacks.ts
Normal file
43
src/bridge/bridgePermissionCallbacks.ts
Normal file
@ -0,0 +1,43 @@
|
||||
import type { PermissionUpdate } from '../utils/permissions/PermissionUpdateSchema.js'
|
||||
|
||||
type BridgePermissionResponse = {
|
||||
behavior: 'allow' | 'deny'
|
||||
updatedInput?: Record<string, unknown>
|
||||
updatedPermissions?: PermissionUpdate[]
|
||||
message?: string
|
||||
}
|
||||
|
||||
type BridgePermissionCallbacks = {
|
||||
sendRequest(
|
||||
requestId: string,
|
||||
toolName: string,
|
||||
input: Record<string, unknown>,
|
||||
toolUseId: string,
|
||||
description: string,
|
||||
permissionSuggestions?: PermissionUpdate[],
|
||||
blockedPath?: string,
|
||||
): void
|
||||
sendResponse(requestId: string, response: BridgePermissionResponse): void
|
||||
/** Cancel a pending control_request so the web app can dismiss its prompt. */
|
||||
cancelRequest(requestId: string): void
|
||||
onResponse(
|
||||
requestId: string,
|
||||
handler: (response: BridgePermissionResponse) => void,
|
||||
): () => void // returns unsubscribe
|
||||
}
|
||||
|
||||
/** Type predicate for validating a parsed control_response payload
|
||||
* as a BridgePermissionResponse. Checks the required `behavior`
|
||||
* discriminant rather than using an unsafe `as` cast. */
|
||||
function isBridgePermissionResponse(
|
||||
value: unknown,
|
||||
): value is BridgePermissionResponse {
|
||||
if (!value || typeof value !== 'object') return false
|
||||
return (
|
||||
'behavior' in value &&
|
||||
(value.behavior === 'allow' || value.behavior === 'deny')
|
||||
)
|
||||
}
|
||||
|
||||
export { isBridgePermissionResponse }
|
||||
export type { BridgePermissionCallbacks, BridgePermissionResponse }
|
||||
210
src/bridge/bridgePointer.ts
Normal file
210
src/bridge/bridgePointer.ts
Normal file
@ -0,0 +1,210 @@
|
||||
import { mkdir, readFile, stat, unlink, writeFile } from 'fs/promises'
|
||||
import { dirname, join } from 'path'
|
||||
import { z } from 'zod/v4'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { isENOENT } from '../utils/errors.js'
|
||||
import { getWorktreePathsPortable } from '../utils/getWorktreePathsPortable.js'
|
||||
import { lazySchema } from '../utils/lazySchema.js'
|
||||
import {
|
||||
getProjectsDir,
|
||||
sanitizePath,
|
||||
} from '../utils/sessionStoragePortable.js'
|
||||
import { jsonParse, jsonStringify } from '../utils/slowOperations.js'
|
||||
|
||||
/**
|
||||
* Upper bound on worktree fanout. git worktree list is naturally bounded
|
||||
* (50 is a LOT), but this caps the parallel stat() burst and guards against
|
||||
* pathological setups. Above this, --continue falls back to current-dir-only.
|
||||
*/
|
||||
const MAX_WORKTREE_FANOUT = 50
|
||||
|
||||
/**
|
||||
* Crash-recovery pointer for Remote Control sessions.
|
||||
*
|
||||
* Written immediately after a bridge session is created, periodically
|
||||
* refreshed during the session, and cleared on clean shutdown. If the
|
||||
* process dies unclean (crash, kill -9, terminal closed), the pointer
|
||||
* persists. On next startup, `claude remote-control` detects it and offers
|
||||
* to resume via the --session-id flow from #20460.
|
||||
*
|
||||
* Staleness is checked against the file's mtime (not an embedded timestamp)
|
||||
* so that a periodic re-write with the same content serves as a refresh —
|
||||
* matches the backend's rolling BRIDGE_LAST_POLL_TTL (4h) semantics. A
|
||||
* bridge that's been polling for 5+ hours and then crashes still has a
|
||||
* fresh pointer as long as the refresh ran within the window.
|
||||
*
|
||||
* Scoped per working directory (alongside transcript JSONL files) so two
|
||||
* concurrent bridges in different repos don't clobber each other.
|
||||
*/
|
||||
|
||||
export const BRIDGE_POINTER_TTL_MS = 4 * 60 * 60 * 1000
|
||||
|
||||
const BridgePointerSchema = lazySchema(() =>
|
||||
z.object({
|
||||
sessionId: z.string(),
|
||||
environmentId: z.string(),
|
||||
source: z.enum(['standalone', 'repl']),
|
||||
}),
|
||||
)
|
||||
|
||||
export type BridgePointer = z.infer<ReturnType<typeof BridgePointerSchema>>
|
||||
|
||||
export function getBridgePointerPath(dir: string): string {
|
||||
return join(getProjectsDir(), sanitizePath(dir), 'bridge-pointer.json')
|
||||
}
|
||||
|
||||
/**
|
||||
* Write the pointer. Also used to refresh mtime during long sessions —
|
||||
* calling with the same IDs is a cheap no-content-change write that bumps
|
||||
* the staleness clock. Best-effort — a crash-recovery file must never
|
||||
* itself cause a crash. Logs and swallows on error.
|
||||
*/
|
||||
export async function writeBridgePointer(
|
||||
dir: string,
|
||||
pointer: BridgePointer,
|
||||
): Promise<void> {
|
||||
const path = getBridgePointerPath(dir)
|
||||
try {
|
||||
await mkdir(dirname(path), { recursive: true })
|
||||
await writeFile(path, jsonStringify(pointer), 'utf8')
|
||||
logForDebugging(`[bridge:pointer] wrote ${path}`)
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(`[bridge:pointer] write failed: ${err}`, { level: 'warn' })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the pointer and its age (ms since last write). Operates directly
|
||||
* and handles errors — no existence check (CLAUDE.md TOCTOU rule). Returns
|
||||
* null on any failure: missing file, corrupted JSON, schema mismatch, or
|
||||
* stale (mtime > 4h ago). Stale/invalid pointers are deleted so they don't
|
||||
* keep re-prompting after the backend has already GC'd the env.
|
||||
*/
|
||||
export async function readBridgePointer(
|
||||
dir: string,
|
||||
): Promise<(BridgePointer & { ageMs: number }) | null> {
|
||||
const path = getBridgePointerPath(dir)
|
||||
let raw: string
|
||||
let mtimeMs: number
|
||||
try {
|
||||
// stat for mtime (staleness anchor), then read. Two syscalls, but both
|
||||
// are needed — mtime IS the data we return, not a TOCTOU guard.
|
||||
mtimeMs = (await stat(path)).mtimeMs
|
||||
raw = await readFile(path, 'utf8')
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
|
||||
const parsed = BridgePointerSchema().safeParse(safeJsonParse(raw))
|
||||
if (!parsed.success) {
|
||||
logForDebugging(`[bridge:pointer] invalid schema, clearing: ${path}`)
|
||||
await clearBridgePointer(dir)
|
||||
return null
|
||||
}
|
||||
|
||||
const ageMs = Math.max(0, Date.now() - mtimeMs)
|
||||
if (ageMs > BRIDGE_POINTER_TTL_MS) {
|
||||
logForDebugging(`[bridge:pointer] stale (>4h mtime), clearing: ${path}`)
|
||||
await clearBridgePointer(dir)
|
||||
return null
|
||||
}
|
||||
|
||||
return { ...parsed.data, ageMs }
|
||||
}
|
||||
|
||||
/**
|
||||
* Worktree-aware read for `--continue`. The REPL bridge writes its pointer
|
||||
* to `getOriginalCwd()` which EnterWorktreeTool/activeWorktreeSession can
|
||||
* mutate to a worktree path — but `claude remote-control --continue` runs
|
||||
* with `resolve('.')` = shell CWD. This fans out across git worktree
|
||||
* siblings to find the freshest pointer, matching /resume's semantics.
|
||||
*
|
||||
* Fast path: checks `dir` first. Only shells out to `git worktree list` if
|
||||
* that misses — the common case (pointer in launch dir) is one stat, zero
|
||||
* exec. Fanout reads run in parallel; capped at MAX_WORKTREE_FANOUT.
|
||||
*
|
||||
* Returns the pointer AND the dir it was found in, so the caller can clear
|
||||
* the right file on resume failure.
|
||||
*/
|
||||
export async function readBridgePointerAcrossWorktrees(
|
||||
dir: string,
|
||||
): Promise<{ pointer: BridgePointer & { ageMs: number }; dir: string } | null> {
|
||||
// Fast path: current dir. Covers standalone bridge (always matches) and
|
||||
// REPL bridge when no worktree mutation happened.
|
||||
const here = await readBridgePointer(dir)
|
||||
if (here) {
|
||||
return { pointer: here, dir }
|
||||
}
|
||||
|
||||
// Fanout: scan worktree siblings. getWorktreePathsPortable has a 5s
|
||||
// timeout and returns [] on any error (not a git repo, git not installed).
|
||||
const worktrees = await getWorktreePathsPortable(dir)
|
||||
if (worktrees.length <= 1) return null
|
||||
if (worktrees.length > MAX_WORKTREE_FANOUT) {
|
||||
logForDebugging(
|
||||
`[bridge:pointer] ${worktrees.length} worktrees exceeds fanout cap ${MAX_WORKTREE_FANOUT}, skipping`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// Dedupe against `dir` so we don't re-stat it. sanitizePath normalizes
|
||||
// case/separators so worktree-list output matches our fast-path key even
|
||||
// on Windows where git may emit C:/ vs stored c:/.
|
||||
const dirKey = sanitizePath(dir)
|
||||
const candidates = worktrees.filter(wt => sanitizePath(wt) !== dirKey)
|
||||
|
||||
// Parallel stat+read. Each readBridgePointer is a stat() that ENOENTs
|
||||
// for worktrees with no pointer (cheap) plus a ~100-byte read for the
|
||||
// rare ones that have one. Promise.all → latency ≈ slowest single stat.
|
||||
const results = await Promise.all(
|
||||
candidates.map(async wt => {
|
||||
const p = await readBridgePointer(wt)
|
||||
return p ? { pointer: p, dir: wt } : null
|
||||
}),
|
||||
)
|
||||
|
||||
// Pick freshest (lowest ageMs). The pointer stores environmentId so
|
||||
// resume reconnects to the right env regardless of which worktree
|
||||
// --continue was invoked from.
|
||||
let freshest: {
|
||||
pointer: BridgePointer & { ageMs: number }
|
||||
dir: string
|
||||
} | null = null
|
||||
for (const r of results) {
|
||||
if (r && (!freshest || r.pointer.ageMs < freshest.pointer.ageMs)) {
|
||||
freshest = r
|
||||
}
|
||||
}
|
||||
if (freshest) {
|
||||
logForDebugging(
|
||||
`[bridge:pointer] fanout found pointer in worktree ${freshest.dir} (ageMs=${freshest.pointer.ageMs})`,
|
||||
)
|
||||
}
|
||||
return freshest
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the pointer. Idempotent — ENOENT is expected when the process
|
||||
* shut down clean previously.
|
||||
*/
|
||||
export async function clearBridgePointer(dir: string): Promise<void> {
|
||||
const path = getBridgePointerPath(dir)
|
||||
try {
|
||||
await unlink(path)
|
||||
logForDebugging(`[bridge:pointer] cleared ${path}`)
|
||||
} catch (err: unknown) {
|
||||
if (!isENOENT(err)) {
|
||||
logForDebugging(`[bridge:pointer] clear failed: ${err}`, {
|
||||
level: 'warn',
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function safeJsonParse(raw: string): unknown {
|
||||
try {
|
||||
return jsonParse(raw)
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
163
src/bridge/bridgeStatusUtil.ts
Normal file
163
src/bridge/bridgeStatusUtil.ts
Normal file
@ -0,0 +1,163 @@
|
||||
import {
|
||||
getClaudeAiBaseUrl,
|
||||
getRemoteSessionUrl,
|
||||
} from '../constants/product.js'
|
||||
import { stringWidth } from '../ink/stringWidth.js'
|
||||
import { formatDuration, truncateToWidth } from '../utils/format.js'
|
||||
import { getGraphemeSegmenter } from '../utils/intl.js'
|
||||
|
||||
/** Bridge status state machine states. */
|
||||
export type StatusState =
|
||||
| 'idle'
|
||||
| 'attached'
|
||||
| 'titled'
|
||||
| 'reconnecting'
|
||||
| 'failed'
|
||||
|
||||
/** How long a tool activity line stays visible after last tool_start (ms). */
|
||||
export const TOOL_DISPLAY_EXPIRY_MS = 30_000
|
||||
|
||||
/** Interval for the shimmer animation tick (ms). */
|
||||
export const SHIMMER_INTERVAL_MS = 150
|
||||
|
||||
export function timestamp(): string {
|
||||
const now = new Date()
|
||||
const h = String(now.getHours()).padStart(2, '0')
|
||||
const m = String(now.getMinutes()).padStart(2, '0')
|
||||
const s = String(now.getSeconds()).padStart(2, '0')
|
||||
return `${h}:${m}:${s}`
|
||||
}
|
||||
|
||||
export { formatDuration, truncateToWidth as truncatePrompt }
|
||||
|
||||
/** Abbreviate a tool activity summary for the trail display. */
|
||||
export function abbreviateActivity(summary: string): string {
|
||||
return truncateToWidth(summary, 30)
|
||||
}
|
||||
|
||||
/** Build the connect URL shown when the bridge is idle. */
|
||||
export function buildBridgeConnectUrl(
|
||||
environmentId: string,
|
||||
ingressUrl?: string,
|
||||
): string {
|
||||
const baseUrl = getClaudeAiBaseUrl(undefined, ingressUrl)
|
||||
return `${baseUrl}/code?bridge=${environmentId}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the session URL shown when a session is attached. Delegates to
|
||||
* getRemoteSessionUrl for the cse_→session_ prefix translation, then appends
|
||||
* the v1-specific ?bridge={environmentId} query.
|
||||
*/
|
||||
export function buildBridgeSessionUrl(
|
||||
sessionId: string,
|
||||
environmentId: string,
|
||||
ingressUrl?: string,
|
||||
): string {
|
||||
return `${getRemoteSessionUrl(sessionId, ingressUrl)}?bridge=${environmentId}`
|
||||
}
|
||||
|
||||
/** Compute the glimmer index for a reverse-sweep shimmer animation. */
|
||||
export function computeGlimmerIndex(
|
||||
tick: number,
|
||||
messageWidth: number,
|
||||
): number {
|
||||
const cycleLength = messageWidth + 20
|
||||
return messageWidth + 10 - (tick % cycleLength)
|
||||
}
|
||||
|
||||
/**
|
||||
* Split text into three segments by visual column position for shimmer rendering.
|
||||
*
|
||||
* Uses grapheme segmentation and `stringWidth` so the split is correct for
|
||||
* multi-byte characters, emoji, and CJK glyphs.
|
||||
*
|
||||
* Returns `{ before, shimmer, after }` strings. Both renderers (chalk in
|
||||
* bridgeUI.ts and React/Ink in bridge.tsx) apply their own coloring to
|
||||
* these segments.
|
||||
*/
|
||||
export function computeShimmerSegments(
|
||||
text: string,
|
||||
glimmerIndex: number,
|
||||
): { before: string; shimmer: string; after: string } {
|
||||
const messageWidth = stringWidth(text)
|
||||
const shimmerStart = glimmerIndex - 1
|
||||
const shimmerEnd = glimmerIndex + 1
|
||||
|
||||
// When shimmer is offscreen, return all text as "before"
|
||||
if (shimmerStart >= messageWidth || shimmerEnd < 0) {
|
||||
return { before: text, shimmer: '', after: '' }
|
||||
}
|
||||
|
||||
// Split into at most 3 segments by visual column position
|
||||
const clampedStart = Math.max(0, shimmerStart)
|
||||
let colPos = 0
|
||||
let before = ''
|
||||
let shimmer = ''
|
||||
let after = ''
|
||||
for (const { segment } of getGraphemeSegmenter().segment(text)) {
|
||||
const segWidth = stringWidth(segment)
|
||||
if (colPos + segWidth <= clampedStart) {
|
||||
before += segment
|
||||
} else if (colPos > shimmerEnd) {
|
||||
after += segment
|
||||
} else {
|
||||
shimmer += segment
|
||||
}
|
||||
colPos += segWidth
|
||||
}
|
||||
|
||||
return { before, shimmer, after }
|
||||
}
|
||||
|
||||
/** Computed bridge status label and color from connection state. */
|
||||
export type BridgeStatusInfo = {
|
||||
label:
|
||||
| 'Remote Control failed'
|
||||
| 'Remote Control reconnecting'
|
||||
| 'Remote Control active'
|
||||
| 'Remote Control connecting\u2026'
|
||||
color: 'error' | 'warning' | 'success'
|
||||
}
|
||||
|
||||
/** Derive a status label and color from the bridge connection state. */
|
||||
export function getBridgeStatus({
|
||||
error,
|
||||
connected,
|
||||
sessionActive,
|
||||
reconnecting,
|
||||
}: {
|
||||
error: string | undefined
|
||||
connected: boolean
|
||||
sessionActive: boolean
|
||||
reconnecting: boolean
|
||||
}): BridgeStatusInfo {
|
||||
if (error) return { label: 'Remote Control failed', color: 'error' }
|
||||
if (reconnecting)
|
||||
return { label: 'Remote Control reconnecting', color: 'warning' }
|
||||
if (sessionActive || connected)
|
||||
return { label: 'Remote Control active', color: 'success' }
|
||||
return { label: 'Remote Control connecting\u2026', color: 'warning' }
|
||||
}
|
||||
|
||||
/** Footer text shown when bridge is idle (Ready state). */
|
||||
export function buildIdleFooterText(url: string): string {
|
||||
return `Code everywhere with the Claude app or ${url}`
|
||||
}
|
||||
|
||||
/** Footer text shown when a session is active (Connected state). */
|
||||
export function buildActiveFooterText(url: string): string {
|
||||
return `Continue coding in the Claude app or ${url}`
|
||||
}
|
||||
|
||||
/** Footer text shown when the bridge has failed. */
|
||||
export const FAILED_FOOTER_TEXT = 'Something went wrong, please try again'
|
||||
|
||||
/**
|
||||
* Wrap text in an OSC 8 terminal hyperlink. Zero visual width for layout purposes.
|
||||
* strip-ansi (used by stringWidth) correctly strips these sequences, so
|
||||
* countVisualLines in bridgeUI.ts remains accurate.
|
||||
*/
|
||||
export function wrapWithOsc8Link(text: string, url: string): string {
|
||||
return `\x1b]8;;${url}\x07${text}\x1b]8;;\x07`
|
||||
}
|
||||
530
src/bridge/bridgeUI.ts
Normal file
530
src/bridge/bridgeUI.ts
Normal file
@ -0,0 +1,530 @@
|
||||
import chalk from 'chalk'
|
||||
import { toString as qrToString } from 'qrcode'
|
||||
import {
|
||||
BRIDGE_FAILED_INDICATOR,
|
||||
BRIDGE_READY_INDICATOR,
|
||||
BRIDGE_SPINNER_FRAMES,
|
||||
} from '../constants/figures.js'
|
||||
import { stringWidth } from '../ink/stringWidth.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import {
|
||||
buildActiveFooterText,
|
||||
buildBridgeConnectUrl,
|
||||
buildBridgeSessionUrl,
|
||||
buildIdleFooterText,
|
||||
FAILED_FOOTER_TEXT,
|
||||
formatDuration,
|
||||
type StatusState,
|
||||
TOOL_DISPLAY_EXPIRY_MS,
|
||||
timestamp,
|
||||
truncatePrompt,
|
||||
wrapWithOsc8Link,
|
||||
} from './bridgeStatusUtil.js'
|
||||
import type {
|
||||
BridgeConfig,
|
||||
BridgeLogger,
|
||||
SessionActivity,
|
||||
SpawnMode,
|
||||
} from './types.js'
|
||||
|
||||
const QR_OPTIONS = {
|
||||
type: 'utf8' as const,
|
||||
errorCorrectionLevel: 'L' as const,
|
||||
small: true,
|
||||
}
|
||||
|
||||
/** Generate a QR code and return its lines. */
|
||||
async function generateQr(url: string): Promise<string[]> {
|
||||
const qr = await qrToString(url, QR_OPTIONS)
|
||||
return qr.split('\n').filter((line: string) => line.length > 0)
|
||||
}
|
||||
|
||||
export function createBridgeLogger(options: {
|
||||
verbose: boolean
|
||||
write?: (s: string) => void
|
||||
}): BridgeLogger {
|
||||
const write = options.write ?? ((s: string) => process.stdout.write(s))
|
||||
const verbose = options.verbose
|
||||
|
||||
// Track how many status lines are currently displayed at the bottom
|
||||
let statusLineCount = 0
|
||||
|
||||
// Status state machine
|
||||
let currentState: StatusState = 'idle'
|
||||
let currentStateText = 'Ready'
|
||||
let repoName = ''
|
||||
let branch = ''
|
||||
let debugLogPath = ''
|
||||
|
||||
// Connect URL (built in printBanner with correct base for staging/prod)
|
||||
let connectUrl = ''
|
||||
let cachedIngressUrl = ''
|
||||
let cachedEnvironmentId = ''
|
||||
let activeSessionUrl: string | null = null
|
||||
|
||||
// QR code lines for the current URL
|
||||
let qrLines: string[] = []
|
||||
let qrVisible = false
|
||||
|
||||
// Tool activity for the second status line
|
||||
let lastToolSummary: string | null = null
|
||||
let lastToolTime = 0
|
||||
|
||||
// Session count indicator (shown when multi-session mode is enabled)
|
||||
let sessionActive = 0
|
||||
let sessionMax = 1
|
||||
// Spawn mode shown in the session-count line + gates the `w` hint
|
||||
let spawnModeDisplay: 'same-dir' | 'worktree' | null = null
|
||||
let spawnMode: SpawnMode = 'single-session'
|
||||
|
||||
// Per-session display info for the multi-session bullet list (keyed by compat sessionId)
|
||||
const sessionDisplayInfo = new Map<
|
||||
string,
|
||||
{ title?: string; url: string; activity?: SessionActivity }
|
||||
>()
|
||||
|
||||
// Connecting spinner state
|
||||
let connectingTimer: ReturnType<typeof setInterval> | null = null
|
||||
let connectingTick = 0
|
||||
|
||||
/**
|
||||
* Count how many visual terminal rows a string occupies, accounting for
|
||||
* line wrapping. Each `\n` is one row, and content wider than the terminal
|
||||
* wraps to additional rows.
|
||||
*/
|
||||
function countVisualLines(text: string): number {
|
||||
// eslint-disable-next-line custom-rules/prefer-use-terminal-size
|
||||
const cols = process.stdout.columns || 80 // non-React CLI context
|
||||
let count = 0
|
||||
// Split on newlines to get logical lines
|
||||
for (const logical of text.split('\n')) {
|
||||
if (logical.length === 0) {
|
||||
// Empty segment between consecutive \n — counts as 1 row
|
||||
count++
|
||||
continue
|
||||
}
|
||||
const width = stringWidth(logical)
|
||||
count += Math.max(1, Math.ceil(width / cols))
|
||||
}
|
||||
// The trailing \n in "line\n" produces an empty last element — don't count it
|
||||
// because the cursor sits at the start of the next line, not a new visual row.
|
||||
if (text.endsWith('\n')) {
|
||||
count--
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
/** Write a status line and track its visual line count. */
|
||||
function writeStatus(text: string): void {
|
||||
write(text)
|
||||
statusLineCount += countVisualLines(text)
|
||||
}
|
||||
|
||||
/** Clear any currently displayed status lines. */
|
||||
function clearStatusLines(): void {
|
||||
if (statusLineCount <= 0) return
|
||||
logForDebugging(`[bridge:ui] clearStatusLines count=${statusLineCount}`)
|
||||
// Move cursor up to the start of the status block, then erase everything below
|
||||
write(`\x1b[${statusLineCount}A`) // cursor up N lines
|
||||
write('\x1b[J') // erase from cursor to end of screen
|
||||
statusLineCount = 0
|
||||
}
|
||||
|
||||
/** Print a permanent log line, clearing status first and restoring after. */
|
||||
function printLog(line: string): void {
|
||||
clearStatusLines()
|
||||
write(line)
|
||||
}
|
||||
|
||||
/** Regenerate the QR code with the given URL. */
|
||||
function regenerateQr(url: string): void {
|
||||
generateQr(url)
|
||||
.then(lines => {
|
||||
qrLines = lines
|
||||
renderStatusLine()
|
||||
})
|
||||
.catch(e => {
|
||||
logForDebugging(`QR code generation failed: ${e}`, { level: 'error' })
|
||||
})
|
||||
}
|
||||
|
||||
/** Render the connecting spinner line (shown before first updateIdleStatus). */
|
||||
function renderConnectingLine(): void {
|
||||
clearStatusLines()
|
||||
|
||||
const frame =
|
||||
BRIDGE_SPINNER_FRAMES[connectingTick % BRIDGE_SPINNER_FRAMES.length]!
|
||||
let suffix = ''
|
||||
if (repoName) {
|
||||
suffix += chalk.dim(' \u00b7 ') + chalk.dim(repoName)
|
||||
}
|
||||
if (branch) {
|
||||
suffix += chalk.dim(' \u00b7 ') + chalk.dim(branch)
|
||||
}
|
||||
writeStatus(
|
||||
`${chalk.yellow(frame)} ${chalk.yellow('Connecting')}${suffix}\n`,
|
||||
)
|
||||
}
|
||||
|
||||
/** Start the connecting spinner. Stopped by first updateIdleStatus(). */
|
||||
function startConnecting(): void {
|
||||
stopConnecting()
|
||||
renderConnectingLine()
|
||||
connectingTimer = setInterval(() => {
|
||||
connectingTick++
|
||||
renderConnectingLine()
|
||||
}, 150)
|
||||
}
|
||||
|
||||
/** Stop the connecting spinner. */
|
||||
function stopConnecting(): void {
|
||||
if (connectingTimer) {
|
||||
clearInterval(connectingTimer)
|
||||
connectingTimer = null
|
||||
}
|
||||
}
|
||||
|
||||
/** Render and write the current status lines based on state. */
|
||||
function renderStatusLine(): void {
|
||||
if (currentState === 'reconnecting' || currentState === 'failed') {
|
||||
// These states are handled separately (updateReconnectingStatus /
|
||||
// updateFailedStatus). Return before clearing so callers like toggleQr
|
||||
// and setSpawnModeDisplay don't blank the display during these states.
|
||||
return
|
||||
}
|
||||
|
||||
clearStatusLines()
|
||||
|
||||
const isIdle = currentState === 'idle'
|
||||
|
||||
// QR code above the status line
|
||||
if (qrVisible) {
|
||||
for (const line of qrLines) {
|
||||
writeStatus(`${chalk.dim(line)}\n`)
|
||||
}
|
||||
}
|
||||
|
||||
// Determine indicator and colors based on state
|
||||
const indicator = BRIDGE_READY_INDICATOR
|
||||
const indicatorColor = isIdle ? chalk.green : chalk.cyan
|
||||
const baseColor = isIdle ? chalk.green : chalk.cyan
|
||||
const stateText = baseColor(currentStateText)
|
||||
|
||||
// Build the suffix with repo and branch
|
||||
let suffix = ''
|
||||
if (repoName) {
|
||||
suffix += chalk.dim(' \u00b7 ') + chalk.dim(repoName)
|
||||
}
|
||||
// In worktree mode each session gets its own branch, so showing the
|
||||
// bridge's branch would be misleading.
|
||||
if (branch && spawnMode !== 'worktree') {
|
||||
suffix += chalk.dim(' \u00b7 ') + chalk.dim(branch)
|
||||
}
|
||||
|
||||
if (process.env.USER_TYPE === 'ant' && debugLogPath) {
|
||||
writeStatus(
|
||||
`${chalk.yellow('[ANT-ONLY] Logs:')} ${chalk.dim(debugLogPath)}\n`,
|
||||
)
|
||||
}
|
||||
writeStatus(`${indicatorColor(indicator)} ${stateText}${suffix}\n`)
|
||||
|
||||
// Session count and per-session list (multi-session mode only)
|
||||
if (sessionMax > 1) {
|
||||
const modeHint =
|
||||
spawnMode === 'worktree'
|
||||
? 'New sessions will be created in an isolated worktree'
|
||||
: 'New sessions will be created in the current directory'
|
||||
writeStatus(
|
||||
` ${chalk.dim(`Capacity: ${sessionActive}/${sessionMax} \u00b7 ${modeHint}`)}\n`,
|
||||
)
|
||||
for (const [, info] of sessionDisplayInfo) {
|
||||
const titleText = info.title
|
||||
? truncatePrompt(info.title, 35)
|
||||
: chalk.dim('Attached')
|
||||
const titleLinked = wrapWithOsc8Link(titleText, info.url)
|
||||
const act = info.activity
|
||||
const showAct = act && act.type !== 'result' && act.type !== 'error'
|
||||
const actText = showAct
|
||||
? chalk.dim(` ${truncatePrompt(act.summary, 40)}`)
|
||||
: ''
|
||||
writeStatus(` ${titleLinked}${actText}
|
||||
`)
|
||||
}
|
||||
}
|
||||
|
||||
// Mode line for spawn modes with a single slot (or true single-session mode)
|
||||
if (sessionMax === 1) {
|
||||
const modeText =
|
||||
spawnMode === 'single-session'
|
||||
? 'Single session \u00b7 exits when complete'
|
||||
: spawnMode === 'worktree'
|
||||
? `Capacity: ${sessionActive}/1 \u00b7 New sessions will be created in an isolated worktree`
|
||||
: `Capacity: ${sessionActive}/1 \u00b7 New sessions will be created in the current directory`
|
||||
writeStatus(` ${chalk.dim(modeText)}\n`)
|
||||
}
|
||||
|
||||
// Tool activity line for single-session mode
|
||||
if (
|
||||
sessionMax === 1 &&
|
||||
!isIdle &&
|
||||
lastToolSummary &&
|
||||
Date.now() - lastToolTime < TOOL_DISPLAY_EXPIRY_MS
|
||||
) {
|
||||
writeStatus(` ${chalk.dim(truncatePrompt(lastToolSummary, 60))}\n`)
|
||||
}
|
||||
|
||||
// Blank line separator before footer
|
||||
const url = activeSessionUrl ?? connectUrl
|
||||
if (url) {
|
||||
writeStatus('\n')
|
||||
const footerText = isIdle
|
||||
? buildIdleFooterText(url)
|
||||
: buildActiveFooterText(url)
|
||||
const qrHint = qrVisible
|
||||
? chalk.dim.italic('space to hide QR code')
|
||||
: chalk.dim.italic('space to show QR code')
|
||||
const toggleHint = spawnModeDisplay
|
||||
? chalk.dim.italic(' \u00b7 w to toggle spawn mode')
|
||||
: ''
|
||||
writeStatus(`${chalk.dim(footerText)}\n`)
|
||||
writeStatus(`${qrHint}${toggleHint}\n`)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
printBanner(config: BridgeConfig, environmentId: string): void {
|
||||
cachedIngressUrl = config.sessionIngressUrl
|
||||
cachedEnvironmentId = environmentId
|
||||
connectUrl = buildBridgeConnectUrl(environmentId, cachedIngressUrl)
|
||||
regenerateQr(connectUrl)
|
||||
|
||||
if (verbose) {
|
||||
write(chalk.dim(`Remote Control`) + ` v${MACRO.VERSION}\n`)
|
||||
}
|
||||
if (verbose) {
|
||||
if (config.spawnMode !== 'single-session') {
|
||||
write(chalk.dim(`Spawn mode: `) + `${config.spawnMode}\n`)
|
||||
write(
|
||||
chalk.dim(`Max concurrent sessions: `) + `${config.maxSessions}\n`,
|
||||
)
|
||||
}
|
||||
write(chalk.dim(`Environment ID: `) + `${environmentId}\n`)
|
||||
}
|
||||
if (config.sandbox) {
|
||||
write(chalk.dim(`Sandbox: `) + `${chalk.green('Enabled')}\n`)
|
||||
}
|
||||
write('\n')
|
||||
|
||||
// Start connecting spinner — first updateIdleStatus() will stop it
|
||||
startConnecting()
|
||||
},
|
||||
|
||||
logSessionStart(sessionId: string, prompt: string): void {
|
||||
if (verbose) {
|
||||
const short = truncatePrompt(prompt, 80)
|
||||
printLog(
|
||||
chalk.dim(`[${timestamp()}]`) +
|
||||
` Session started: ${chalk.white(`"${short}"`)} (${chalk.dim(sessionId)})\n`,
|
||||
)
|
||||
}
|
||||
},
|
||||
|
||||
logSessionComplete(sessionId: string, durationMs: number): void {
|
||||
printLog(
|
||||
chalk.dim(`[${timestamp()}]`) +
|
||||
` Session ${chalk.green('completed')} (${formatDuration(durationMs)}) ${chalk.dim(sessionId)}\n`,
|
||||
)
|
||||
},
|
||||
|
||||
logSessionFailed(sessionId: string, error: string): void {
|
||||
printLog(
|
||||
chalk.dim(`[${timestamp()}]`) +
|
||||
` Session ${chalk.red('failed')}: ${error} ${chalk.dim(sessionId)}\n`,
|
||||
)
|
||||
},
|
||||
|
||||
logStatus(message: string): void {
|
||||
printLog(chalk.dim(`[${timestamp()}]`) + ` ${message}\n`)
|
||||
},
|
||||
|
||||
logVerbose(message: string): void {
|
||||
if (verbose) {
|
||||
printLog(chalk.dim(`[${timestamp()}] ${message}`) + '\n')
|
||||
}
|
||||
},
|
||||
|
||||
logError(message: string): void {
|
||||
printLog(chalk.red(`[${timestamp()}] Error: ${message}`) + '\n')
|
||||
},
|
||||
|
||||
logReconnected(disconnectedMs: number): void {
|
||||
printLog(
|
||||
chalk.dim(`[${timestamp()}]`) +
|
||||
` ${chalk.green('Reconnected')} after ${formatDuration(disconnectedMs)}\n`,
|
||||
)
|
||||
},
|
||||
|
||||
setRepoInfo(repo: string, branchName: string): void {
|
||||
repoName = repo
|
||||
branch = branchName
|
||||
},
|
||||
|
||||
setDebugLogPath(path: string): void {
|
||||
debugLogPath = path
|
||||
},
|
||||
|
||||
updateIdleStatus(): void {
|
||||
stopConnecting()
|
||||
|
||||
currentState = 'idle'
|
||||
currentStateText = 'Ready'
|
||||
lastToolSummary = null
|
||||
lastToolTime = 0
|
||||
activeSessionUrl = null
|
||||
regenerateQr(connectUrl)
|
||||
renderStatusLine()
|
||||
},
|
||||
|
||||
setAttached(sessionId: string): void {
|
||||
stopConnecting()
|
||||
currentState = 'attached'
|
||||
currentStateText = 'Connected'
|
||||
lastToolSummary = null
|
||||
lastToolTime = 0
|
||||
// Multi-session: keep footer/QR on the environment connect URL so users
|
||||
// can spawn more sessions. Per-session links are in the bullet list.
|
||||
if (sessionMax <= 1) {
|
||||
activeSessionUrl = buildBridgeSessionUrl(
|
||||
sessionId,
|
||||
cachedEnvironmentId,
|
||||
cachedIngressUrl,
|
||||
)
|
||||
regenerateQr(activeSessionUrl)
|
||||
}
|
||||
renderStatusLine()
|
||||
},
|
||||
|
||||
updateReconnectingStatus(delayStr: string, elapsedStr: string): void {
|
||||
stopConnecting()
|
||||
clearStatusLines()
|
||||
currentState = 'reconnecting'
|
||||
|
||||
// QR code above the status line
|
||||
if (qrVisible) {
|
||||
for (const line of qrLines) {
|
||||
writeStatus(`${chalk.dim(line)}\n`)
|
||||
}
|
||||
}
|
||||
|
||||
const frame =
|
||||
BRIDGE_SPINNER_FRAMES[connectingTick % BRIDGE_SPINNER_FRAMES.length]!
|
||||
connectingTick++
|
||||
writeStatus(
|
||||
`${chalk.yellow(frame)} ${chalk.yellow('Reconnecting')} ${chalk.dim('\u00b7')} ${chalk.dim(`retrying in ${delayStr}`)} ${chalk.dim('\u00b7')} ${chalk.dim(`disconnected ${elapsedStr}`)}\n`,
|
||||
)
|
||||
},
|
||||
|
||||
updateFailedStatus(error: string): void {
|
||||
stopConnecting()
|
||||
clearStatusLines()
|
||||
currentState = 'failed'
|
||||
|
||||
let suffix = ''
|
||||
if (repoName) {
|
||||
suffix += chalk.dim(' \u00b7 ') + chalk.dim(repoName)
|
||||
}
|
||||
if (branch) {
|
||||
suffix += chalk.dim(' \u00b7 ') + chalk.dim(branch)
|
||||
}
|
||||
|
||||
writeStatus(
|
||||
`${chalk.red(BRIDGE_FAILED_INDICATOR)} ${chalk.red('Remote Control Failed')}${suffix}\n`,
|
||||
)
|
||||
writeStatus(`${chalk.dim(FAILED_FOOTER_TEXT)}\n`)
|
||||
|
||||
if (error) {
|
||||
writeStatus(`${chalk.red(error)}\n`)
|
||||
}
|
||||
},
|
||||
|
||||
updateSessionStatus(
|
||||
_sessionId: string,
|
||||
_elapsed: string,
|
||||
activity: SessionActivity,
|
||||
_trail: string[],
|
||||
): void {
|
||||
// Cache tool activity for the second status line
|
||||
if (activity.type === 'tool_start') {
|
||||
lastToolSummary = activity.summary
|
||||
lastToolTime = Date.now()
|
||||
}
|
||||
renderStatusLine()
|
||||
},
|
||||
|
||||
clearStatus(): void {
|
||||
stopConnecting()
|
||||
clearStatusLines()
|
||||
},
|
||||
|
||||
toggleQr(): void {
|
||||
qrVisible = !qrVisible
|
||||
renderStatusLine()
|
||||
},
|
||||
|
||||
updateSessionCount(active: number, max: number, mode: SpawnMode): void {
|
||||
if (sessionActive === active && sessionMax === max && spawnMode === mode)
|
||||
return
|
||||
sessionActive = active
|
||||
sessionMax = max
|
||||
spawnMode = mode
|
||||
// Don't re-render here — the status ticker calls renderStatusLine
|
||||
// on its own cadence, and the next tick will pick up the new values.
|
||||
},
|
||||
|
||||
setSpawnModeDisplay(mode: 'same-dir' | 'worktree' | null): void {
|
||||
if (spawnModeDisplay === mode) return
|
||||
spawnModeDisplay = mode
|
||||
// Also sync the #21118-added spawnMode so the next render shows correct
|
||||
// mode hint + branch visibility. Don't render here — matches
|
||||
// updateSessionCount: called before printBanner (initial setup) and
|
||||
// again from the `w` handler (which follows with refreshDisplay).
|
||||
if (mode) spawnMode = mode
|
||||
},
|
||||
|
||||
addSession(sessionId: string, url: string): void {
|
||||
sessionDisplayInfo.set(sessionId, { url })
|
||||
},
|
||||
|
||||
updateSessionActivity(sessionId: string, activity: SessionActivity): void {
|
||||
const info = sessionDisplayInfo.get(sessionId)
|
||||
if (!info) return
|
||||
info.activity = activity
|
||||
},
|
||||
|
||||
setSessionTitle(sessionId: string, title: string): void {
|
||||
const info = sessionDisplayInfo.get(sessionId)
|
||||
if (!info) return
|
||||
info.title = title
|
||||
// Guard against reconnecting/failed — renderStatusLine clears then returns
|
||||
// early for those states, which would erase the spinner/error.
|
||||
if (currentState === 'reconnecting' || currentState === 'failed') return
|
||||
if (sessionMax === 1) {
|
||||
// Single-session: show title in the main status line too.
|
||||
currentState = 'titled'
|
||||
currentStateText = truncatePrompt(title, 40)
|
||||
}
|
||||
renderStatusLine()
|
||||
},
|
||||
|
||||
removeSession(sessionId: string): void {
|
||||
sessionDisplayInfo.delete(sessionId)
|
||||
},
|
||||
|
||||
refreshDisplay(): void {
|
||||
// Skip during reconnecting/failed — renderStatusLine clears then returns
|
||||
// early for those states, which would erase the spinner/error.
|
||||
if (currentState === 'reconnecting' || currentState === 'failed') return
|
||||
renderStatusLine()
|
||||
},
|
||||
}
|
||||
}
|
||||
56
src/bridge/capacityWake.ts
Normal file
56
src/bridge/capacityWake.ts
Normal file
@ -0,0 +1,56 @@
|
||||
/**
|
||||
* Shared capacity-wake primitive for bridge poll loops.
|
||||
*
|
||||
* Both replBridge.ts and bridgeMain.ts need to sleep while "at capacity"
|
||||
* but wake early when either (a) the outer loop signal aborts (shutdown),
|
||||
* or (b) capacity frees up (session done / transport lost). This module
|
||||
* encapsulates the mutable wake-controller + two-signal merger that both
|
||||
* poll loops previously duplicated byte-for-byte.
|
||||
*/
|
||||
|
||||
export type CapacitySignal = { signal: AbortSignal; cleanup: () => void }
|
||||
|
||||
export type CapacityWake = {
|
||||
/**
|
||||
* Create a signal that aborts when either the outer loop signal or the
|
||||
* capacity-wake controller fires. Returns the merged signal and a cleanup
|
||||
* function that removes listeners when the sleep resolves normally
|
||||
* (without abort).
|
||||
*/
|
||||
signal(): CapacitySignal
|
||||
/**
|
||||
* Abort the current at-capacity sleep and arm a fresh controller so the
|
||||
* poll loop immediately re-checks for new work.
|
||||
*/
|
||||
wake(): void
|
||||
}
|
||||
|
||||
export function createCapacityWake(outerSignal: AbortSignal): CapacityWake {
|
||||
let wakeController = new AbortController()
|
||||
|
||||
function wake(): void {
|
||||
wakeController.abort()
|
||||
wakeController = new AbortController()
|
||||
}
|
||||
|
||||
function signal(): CapacitySignal {
|
||||
const merged = new AbortController()
|
||||
const abort = (): void => merged.abort()
|
||||
if (outerSignal.aborted || wakeController.signal.aborted) {
|
||||
merged.abort()
|
||||
return { signal: merged.signal, cleanup: () => {} }
|
||||
}
|
||||
outerSignal.addEventListener('abort', abort, { once: true })
|
||||
const capSig = wakeController.signal
|
||||
capSig.addEventListener('abort', abort, { once: true })
|
||||
return {
|
||||
signal: merged.signal,
|
||||
cleanup: () => {
|
||||
outerSignal.removeEventListener('abort', abort)
|
||||
capSig.removeEventListener('abort', abort)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return { signal, wake }
|
||||
}
|
||||
168
src/bridge/codeSessionApi.ts
Normal file
168
src/bridge/codeSessionApi.ts
Normal file
@ -0,0 +1,168 @@
|
||||
/**
|
||||
* Thin HTTP wrappers for the CCR v2 code-session API.
|
||||
*
|
||||
* Separate file from remoteBridgeCore.ts so the SDK /bridge subpath can
|
||||
* export createCodeSession + fetchRemoteCredentials without bundling the
|
||||
* heavy CLI tree (analytics, transport, etc.). Callers supply explicit
|
||||
* accessToken + baseUrl — no implicit auth or config reads.
|
||||
*/
|
||||
|
||||
import axios from 'axios'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { jsonStringify } from '../utils/slowOperations.js'
|
||||
import { extractErrorDetail } from './debugUtils.js'
|
||||
|
||||
const ANTHROPIC_VERSION = '2023-06-01'
|
||||
|
||||
function oauthHeaders(accessToken: string): Record<string, string> {
|
||||
return {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': ANTHROPIC_VERSION,
|
||||
}
|
||||
}
|
||||
|
||||
export async function createCodeSession(
|
||||
baseUrl: string,
|
||||
accessToken: string,
|
||||
title: string,
|
||||
timeoutMs: number,
|
||||
tags?: string[],
|
||||
): Promise<string | null> {
|
||||
const url = `${baseUrl}/v1/code/sessions`
|
||||
let response
|
||||
try {
|
||||
response = await axios.post(
|
||||
url,
|
||||
// bridge: {} is the positive signal for the oneof runner — omitting it
|
||||
// (or sending environment_id: "") now 400s. BridgeRunner is an empty
|
||||
// message today; it's a placeholder for future bridge-specific options.
|
||||
{ title, bridge: {}, ...(tags?.length ? { tags } : {}) },
|
||||
{
|
||||
headers: oauthHeaders(accessToken),
|
||||
timeout: timeoutMs,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
)
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[code-session] Session create request failed: ${errorMessage(err)}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
if (response.status !== 200 && response.status !== 201) {
|
||||
const detail = extractErrorDetail(response.data)
|
||||
logForDebugging(
|
||||
`[code-session] Session create failed ${response.status}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
const data: unknown = response.data
|
||||
if (
|
||||
!data ||
|
||||
typeof data !== 'object' ||
|
||||
!('session' in data) ||
|
||||
!data.session ||
|
||||
typeof data.session !== 'object' ||
|
||||
!('id' in data.session) ||
|
||||
typeof data.session.id !== 'string' ||
|
||||
!data.session.id.startsWith('cse_')
|
||||
) {
|
||||
logForDebugging(
|
||||
`[code-session] No session.id (cse_*) in response: ${jsonStringify(data).slice(0, 200)}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
return data.session.id
|
||||
}
|
||||
|
||||
/**
|
||||
* Credentials from POST /bridge. JWT is opaque — do not decode.
|
||||
* Each /bridge call bumps worker_epoch server-side (it IS the register).
|
||||
*/
|
||||
export type RemoteCredentials = {
|
||||
worker_jwt: string
|
||||
api_base_url: string
|
||||
expires_in: number
|
||||
worker_epoch: number
|
||||
}
|
||||
|
||||
export async function fetchRemoteCredentials(
|
||||
sessionId: string,
|
||||
baseUrl: string,
|
||||
accessToken: string,
|
||||
timeoutMs: number,
|
||||
trustedDeviceToken?: string,
|
||||
): Promise<RemoteCredentials | null> {
|
||||
const url = `${baseUrl}/v1/code/sessions/${sessionId}/bridge`
|
||||
const headers = oauthHeaders(accessToken)
|
||||
if (trustedDeviceToken) {
|
||||
headers['X-Trusted-Device-Token'] = trustedDeviceToken
|
||||
}
|
||||
let response
|
||||
try {
|
||||
response = await axios.post(
|
||||
url,
|
||||
{},
|
||||
{
|
||||
headers,
|
||||
timeout: timeoutMs,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
)
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[code-session] /bridge request failed: ${errorMessage(err)}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
if (response.status !== 200) {
|
||||
const detail = extractErrorDetail(response.data)
|
||||
logForDebugging(
|
||||
`[code-session] /bridge failed ${response.status}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
const data: unknown = response.data
|
||||
if (
|
||||
data === null ||
|
||||
typeof data !== 'object' ||
|
||||
!('worker_jwt' in data) ||
|
||||
typeof data.worker_jwt !== 'string' ||
|
||||
!('expires_in' in data) ||
|
||||
typeof data.expires_in !== 'number' ||
|
||||
!('api_base_url' in data) ||
|
||||
typeof data.api_base_url !== 'string' ||
|
||||
!('worker_epoch' in data)
|
||||
) {
|
||||
logForDebugging(
|
||||
`[code-session] /bridge response malformed (need worker_jwt, expires_in, api_base_url, worker_epoch): ${jsonStringify(data).slice(0, 200)}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
// protojson serializes int64 as a string to avoid JS precision loss;
|
||||
// Go may also return a number depending on encoder settings.
|
||||
const rawEpoch = data.worker_epoch
|
||||
const epoch = typeof rawEpoch === 'string' ? Number(rawEpoch) : rawEpoch
|
||||
if (
|
||||
typeof epoch !== 'number' ||
|
||||
!Number.isFinite(epoch) ||
|
||||
!Number.isSafeInteger(epoch)
|
||||
) {
|
||||
logForDebugging(
|
||||
`[code-session] /bridge worker_epoch invalid: ${jsonStringify(rawEpoch)}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
return {
|
||||
worker_jwt: data.worker_jwt,
|
||||
api_base_url: data.api_base_url,
|
||||
expires_in: data.expires_in,
|
||||
worker_epoch: epoch,
|
||||
}
|
||||
}
|
||||
384
src/bridge/createSession.ts
Normal file
384
src/bridge/createSession.ts
Normal file
@ -0,0 +1,384 @@
|
||||
import type { SDKMessage } from '../entrypoints/agentSdkTypes.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { extractErrorDetail } from './debugUtils.js'
|
||||
import { toCompatSessionId } from './sessionIdCompat.js'
|
||||
|
||||
type GitSource = {
|
||||
type: 'git_repository'
|
||||
url: string
|
||||
revision?: string
|
||||
}
|
||||
|
||||
type GitOutcome = {
|
||||
type: 'git_repository'
|
||||
git_info: { type: 'github'; repo: string; branches: string[] }
|
||||
}
|
||||
|
||||
// Events must be wrapped in { type: 'event', data: <sdk_message> } for the
|
||||
// POST /v1/sessions endpoint (discriminated union format).
|
||||
type SessionEvent = {
|
||||
type: 'event'
|
||||
data: SDKMessage
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a session on a bridge environment via POST /v1/sessions.
|
||||
*
|
||||
* Used by both `claude remote-control` (empty session so the user has somewhere to
|
||||
* type immediately) and `/remote-control` (session pre-populated with conversation
|
||||
* history).
|
||||
*
|
||||
* Returns the session ID on success, or null if creation fails (non-fatal).
|
||||
*/
|
||||
export async function createBridgeSession({
|
||||
environmentId,
|
||||
title,
|
||||
events,
|
||||
gitRepoUrl,
|
||||
branch,
|
||||
signal,
|
||||
baseUrl: baseUrlOverride,
|
||||
getAccessToken,
|
||||
permissionMode,
|
||||
}: {
|
||||
environmentId: string
|
||||
title?: string
|
||||
events: SessionEvent[]
|
||||
gitRepoUrl: string | null
|
||||
branch: string
|
||||
signal: AbortSignal
|
||||
baseUrl?: string
|
||||
getAccessToken?: () => string | undefined
|
||||
permissionMode?: string
|
||||
}): Promise<string | null> {
|
||||
const { getClaudeAIOAuthTokens } = await import('../utils/auth.js')
|
||||
const { getOrganizationUUID } = await import('../services/oauth/client.js')
|
||||
const { getOauthConfig } = await import('../constants/oauth.js')
|
||||
const { getOAuthHeaders } = await import('../utils/teleport/api.js')
|
||||
const { parseGitHubRepository } = await import('../utils/detectRepository.js')
|
||||
const { getDefaultBranch } = await import('../utils/git.js')
|
||||
const { getMainLoopModel } = await import('../utils/model/model.js')
|
||||
const { default: axios } = await import('axios')
|
||||
|
||||
const accessToken =
|
||||
getAccessToken?.() ?? getClaudeAIOAuthTokens()?.accessToken
|
||||
if (!accessToken) {
|
||||
logForDebugging('[bridge] No access token for session creation')
|
||||
return null
|
||||
}
|
||||
|
||||
const orgUUID = await getOrganizationUUID()
|
||||
if (!orgUUID) {
|
||||
logForDebugging('[bridge] No org UUID for session creation')
|
||||
return null
|
||||
}
|
||||
|
||||
// Build git source and outcome context
|
||||
let gitSource: GitSource | null = null
|
||||
let gitOutcome: GitOutcome | null = null
|
||||
|
||||
if (gitRepoUrl) {
|
||||
const { parseGitRemote } = await import('../utils/detectRepository.js')
|
||||
const parsed = parseGitRemote(gitRepoUrl)
|
||||
if (parsed) {
|
||||
const { host, owner, name } = parsed
|
||||
const revision = branch || (await getDefaultBranch()) || undefined
|
||||
gitSource = {
|
||||
type: 'git_repository',
|
||||
url: `https://${host}/${owner}/${name}`,
|
||||
revision,
|
||||
}
|
||||
gitOutcome = {
|
||||
type: 'git_repository',
|
||||
git_info: {
|
||||
type: 'github',
|
||||
repo: `${owner}/${name}`,
|
||||
branches: [`claude/${branch || 'task'}`],
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// Fallback: try parseGitHubRepository for owner/repo format
|
||||
const ownerRepo = parseGitHubRepository(gitRepoUrl)
|
||||
if (ownerRepo) {
|
||||
const [owner, name] = ownerRepo.split('/')
|
||||
if (owner && name) {
|
||||
const revision = branch || (await getDefaultBranch()) || undefined
|
||||
gitSource = {
|
||||
type: 'git_repository',
|
||||
url: `https://github.com/${owner}/${name}`,
|
||||
revision,
|
||||
}
|
||||
gitOutcome = {
|
||||
type: 'git_repository',
|
||||
git_info: {
|
||||
type: 'github',
|
||||
repo: `${owner}/${name}`,
|
||||
branches: [`claude/${branch || 'task'}`],
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const requestBody = {
|
||||
...(title !== undefined && { title }),
|
||||
events,
|
||||
session_context: {
|
||||
sources: gitSource ? [gitSource] : [],
|
||||
outcomes: gitOutcome ? [gitOutcome] : [],
|
||||
model: getMainLoopModel(),
|
||||
},
|
||||
environment_id: environmentId,
|
||||
source: 'remote-control',
|
||||
...(permissionMode && { permission_mode: permissionMode }),
|
||||
}
|
||||
|
||||
const headers = {
|
||||
...getOAuthHeaders(accessToken),
|
||||
'anthropic-beta': 'ccr-byoc-2025-07-29',
|
||||
'x-organization-uuid': orgUUID,
|
||||
}
|
||||
|
||||
const url = `${baseUrlOverride ?? getOauthConfig().BASE_API_URL}/v1/sessions`
|
||||
let response
|
||||
try {
|
||||
response = await axios.post(url, requestBody, {
|
||||
headers,
|
||||
signal,
|
||||
validateStatus: s => s < 500,
|
||||
})
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[bridge] Session creation request failed: ${errorMessage(err)}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
const isSuccess = response.status === 200 || response.status === 201
|
||||
|
||||
if (!isSuccess) {
|
||||
const detail = extractErrorDetail(response.data)
|
||||
logForDebugging(
|
||||
`[bridge] Session creation failed with status ${response.status}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
const sessionData: unknown = response.data
|
||||
if (
|
||||
!sessionData ||
|
||||
typeof sessionData !== 'object' ||
|
||||
!('id' in sessionData) ||
|
||||
typeof sessionData.id !== 'string'
|
||||
) {
|
||||
logForDebugging('[bridge] No session ID in response')
|
||||
return null
|
||||
}
|
||||
|
||||
return sessionData.id
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch a bridge session via GET /v1/sessions/{id}.
|
||||
*
|
||||
* Returns the session's environment_id (for `--session-id` resume) and title.
|
||||
* Uses the same org-scoped headers as create/archive — the environments-level
|
||||
* client in bridgeApi.ts uses a different beta header and no org UUID, which
|
||||
* makes the Sessions API return 404.
|
||||
*/
|
||||
export async function getBridgeSession(
|
||||
sessionId: string,
|
||||
opts?: { baseUrl?: string; getAccessToken?: () => string | undefined },
|
||||
): Promise<{ environment_id?: string; title?: string } | null> {
|
||||
const { getClaudeAIOAuthTokens } = await import('../utils/auth.js')
|
||||
const { getOrganizationUUID } = await import('../services/oauth/client.js')
|
||||
const { getOauthConfig } = await import('../constants/oauth.js')
|
||||
const { getOAuthHeaders } = await import('../utils/teleport/api.js')
|
||||
const { default: axios } = await import('axios')
|
||||
|
||||
const accessToken =
|
||||
opts?.getAccessToken?.() ?? getClaudeAIOAuthTokens()?.accessToken
|
||||
if (!accessToken) {
|
||||
logForDebugging('[bridge] No access token for session fetch')
|
||||
return null
|
||||
}
|
||||
|
||||
const orgUUID = await getOrganizationUUID()
|
||||
if (!orgUUID) {
|
||||
logForDebugging('[bridge] No org UUID for session fetch')
|
||||
return null
|
||||
}
|
||||
|
||||
const headers = {
|
||||
...getOAuthHeaders(accessToken),
|
||||
'anthropic-beta': 'ccr-byoc-2025-07-29',
|
||||
'x-organization-uuid': orgUUID,
|
||||
}
|
||||
|
||||
const url = `${opts?.baseUrl ?? getOauthConfig().BASE_API_URL}/v1/sessions/${sessionId}`
|
||||
logForDebugging(`[bridge] Fetching session ${sessionId}`)
|
||||
|
||||
let response
|
||||
try {
|
||||
response = await axios.get<{ environment_id?: string; title?: string }>(
|
||||
url,
|
||||
{ headers, timeout: 10_000, validateStatus: s => s < 500 },
|
||||
)
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch request failed: ${errorMessage(err)}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
if (response.status !== 200) {
|
||||
const detail = extractErrorDetail(response.data)
|
||||
logForDebugging(
|
||||
`[bridge] Session fetch failed with status ${response.status}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
return response.data
|
||||
}
|
||||
|
||||
/**
|
||||
* Archive a bridge session via POST /v1/sessions/{id}/archive.
|
||||
*
|
||||
* The CCR server never auto-archives sessions — archival is always an
|
||||
* explicit client action. Both `claude remote-control` (standalone bridge) and the
|
||||
* always-on `/remote-control` REPL bridge call this during shutdown to archive any
|
||||
* sessions that are still alive.
|
||||
*
|
||||
* The archive endpoint accepts sessions in any status (running, idle,
|
||||
* requires_action, pending) and returns 409 if already archived, making
|
||||
* it safe to call even if the server-side runner already archived the
|
||||
* session.
|
||||
*
|
||||
* Callers must handle errors — this function has no try/catch; 5xx,
|
||||
* timeouts, and network errors throw. Archival is best-effort during
|
||||
* cleanup; call sites wrap with .catch().
|
||||
*/
|
||||
export async function archiveBridgeSession(
|
||||
sessionId: string,
|
||||
opts?: {
|
||||
baseUrl?: string
|
||||
getAccessToken?: () => string | undefined
|
||||
timeoutMs?: number
|
||||
},
|
||||
): Promise<void> {
|
||||
const { getClaudeAIOAuthTokens } = await import('../utils/auth.js')
|
||||
const { getOrganizationUUID } = await import('../services/oauth/client.js')
|
||||
const { getOauthConfig } = await import('../constants/oauth.js')
|
||||
const { getOAuthHeaders } = await import('../utils/teleport/api.js')
|
||||
const { default: axios } = await import('axios')
|
||||
|
||||
const accessToken =
|
||||
opts?.getAccessToken?.() ?? getClaudeAIOAuthTokens()?.accessToken
|
||||
if (!accessToken) {
|
||||
logForDebugging('[bridge] No access token for session archive')
|
||||
return
|
||||
}
|
||||
|
||||
const orgUUID = await getOrganizationUUID()
|
||||
if (!orgUUID) {
|
||||
logForDebugging('[bridge] No org UUID for session archive')
|
||||
return
|
||||
}
|
||||
|
||||
const headers = {
|
||||
...getOAuthHeaders(accessToken),
|
||||
'anthropic-beta': 'ccr-byoc-2025-07-29',
|
||||
'x-organization-uuid': orgUUID,
|
||||
}
|
||||
|
||||
const url = `${opts?.baseUrl ?? getOauthConfig().BASE_API_URL}/v1/sessions/${sessionId}/archive`
|
||||
logForDebugging(`[bridge] Archiving session ${sessionId}`)
|
||||
|
||||
const response = await axios.post(
|
||||
url,
|
||||
{},
|
||||
{
|
||||
headers,
|
||||
timeout: opts?.timeoutMs ?? 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
)
|
||||
|
||||
if (response.status === 200) {
|
||||
logForDebugging(`[bridge] Session ${sessionId} archived successfully`)
|
||||
} else {
|
||||
const detail = extractErrorDetail(response.data)
|
||||
logForDebugging(
|
||||
`[bridge] Session archive failed with status ${response.status}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the title of a bridge session via PATCH /v1/sessions/{id}.
|
||||
*
|
||||
* Called when the user renames a session via /rename while a bridge
|
||||
* connection is active, so the title stays in sync on claude.ai/code.
|
||||
*
|
||||
* Errors are swallowed — title sync is best-effort.
|
||||
*/
|
||||
export async function updateBridgeSessionTitle(
|
||||
sessionId: string,
|
||||
title: string,
|
||||
opts?: { baseUrl?: string; getAccessToken?: () => string | undefined },
|
||||
): Promise<void> {
|
||||
const { getClaudeAIOAuthTokens } = await import('../utils/auth.js')
|
||||
const { getOrganizationUUID } = await import('../services/oauth/client.js')
|
||||
const { getOauthConfig } = await import('../constants/oauth.js')
|
||||
const { getOAuthHeaders } = await import('../utils/teleport/api.js')
|
||||
const { default: axios } = await import('axios')
|
||||
|
||||
const accessToken =
|
||||
opts?.getAccessToken?.() ?? getClaudeAIOAuthTokens()?.accessToken
|
||||
if (!accessToken) {
|
||||
logForDebugging('[bridge] No access token for session title update')
|
||||
return
|
||||
}
|
||||
|
||||
const orgUUID = await getOrganizationUUID()
|
||||
if (!orgUUID) {
|
||||
logForDebugging('[bridge] No org UUID for session title update')
|
||||
return
|
||||
}
|
||||
|
||||
const headers = {
|
||||
...getOAuthHeaders(accessToken),
|
||||
'anthropic-beta': 'ccr-byoc-2025-07-29',
|
||||
'x-organization-uuid': orgUUID,
|
||||
}
|
||||
|
||||
// Compat gateway only accepts session_* (compat/convert.go:27). v2 callers
|
||||
// pass raw cse_*; retag here so all callers can pass whatever they hold.
|
||||
// Idempotent for v1's session_* and bridgeMain's pre-converted compatSessionId.
|
||||
const compatId = toCompatSessionId(sessionId)
|
||||
const url = `${opts?.baseUrl ?? getOauthConfig().BASE_API_URL}/v1/sessions/${compatId}`
|
||||
logForDebugging(`[bridge] Updating session title: ${compatId} → ${title}`)
|
||||
|
||||
try {
|
||||
const response = await axios.patch(
|
||||
url,
|
||||
{ title },
|
||||
{ headers, timeout: 10_000, validateStatus: s => s < 500 },
|
||||
)
|
||||
|
||||
if (response.status === 200) {
|
||||
logForDebugging(`[bridge] Session title updated successfully`)
|
||||
} else {
|
||||
const detail = extractErrorDetail(response.data)
|
||||
logForDebugging(
|
||||
`[bridge] Session title update failed with status ${response.status}${detail ? `: ${detail}` : ''}`,
|
||||
)
|
||||
}
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[bridge] Session title update request failed: ${errorMessage(err)}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
141
src/bridge/debugUtils.ts
Normal file
141
src/bridge/debugUtils.ts
Normal file
@ -0,0 +1,141 @@
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
logEvent,
|
||||
} from '../services/analytics/index.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { jsonStringify } from '../utils/slowOperations.js'
|
||||
|
||||
const DEBUG_MSG_LIMIT = 2000
|
||||
|
||||
const SECRET_FIELD_NAMES = [
|
||||
'session_ingress_token',
|
||||
'environment_secret',
|
||||
'access_token',
|
||||
'secret',
|
||||
'token',
|
||||
]
|
||||
|
||||
const SECRET_PATTERN = new RegExp(
|
||||
`"(${SECRET_FIELD_NAMES.join('|')})"\\s*:\\s*"([^"]*)"`,
|
||||
'g',
|
||||
)
|
||||
|
||||
const REDACT_MIN_LENGTH = 16
|
||||
|
||||
export function redactSecrets(s: string): string {
|
||||
return s.replace(SECRET_PATTERN, (_match, field: string, value: string) => {
|
||||
if (value.length < REDACT_MIN_LENGTH) {
|
||||
return `"${field}":"[REDACTED]"`
|
||||
}
|
||||
const redacted = `${value.slice(0, 8)}...${value.slice(-4)}`
|
||||
return `"${field}":"${redacted}"`
|
||||
})
|
||||
}
|
||||
|
||||
/** Truncate a string for debug logging, collapsing newlines. */
|
||||
export function debugTruncate(s: string): string {
|
||||
const flat = s.replace(/\n/g, '\\n')
|
||||
if (flat.length <= DEBUG_MSG_LIMIT) {
|
||||
return flat
|
||||
}
|
||||
return flat.slice(0, DEBUG_MSG_LIMIT) + `... (${flat.length} chars)`
|
||||
}
|
||||
|
||||
/** Truncate a JSON-serializable value for debug logging. */
|
||||
export function debugBody(data: unknown): string {
|
||||
const raw = typeof data === 'string' ? data : jsonStringify(data)
|
||||
const s = redactSecrets(raw)
|
||||
if (s.length <= DEBUG_MSG_LIMIT) {
|
||||
return s
|
||||
}
|
||||
return s.slice(0, DEBUG_MSG_LIMIT) + `... (${s.length} chars)`
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a descriptive error message from an axios error (or any error).
|
||||
* For HTTP errors, appends the server's response body message if available,
|
||||
* since axios's default message only includes the status code.
|
||||
*/
|
||||
export function describeAxiosError(err: unknown): string {
|
||||
const msg = errorMessage(err)
|
||||
if (err && typeof err === 'object' && 'response' in err) {
|
||||
const response = (err as { response?: { data?: unknown } }).response
|
||||
if (response?.data && typeof response.data === 'object') {
|
||||
const data = response.data as Record<string, unknown>
|
||||
const detail =
|
||||
typeof data.message === 'string'
|
||||
? data.message
|
||||
: typeof data.error === 'object' &&
|
||||
data.error &&
|
||||
'message' in data.error &&
|
||||
typeof (data.error as Record<string, unknown>).message ===
|
||||
'string'
|
||||
? (data.error as Record<string, unknown>).message
|
||||
: undefined
|
||||
if (detail) {
|
||||
return `${msg}: ${detail}`
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the HTTP status code from an axios error, if present.
|
||||
* Returns undefined for non-HTTP errors (e.g. network failures).
|
||||
*/
|
||||
export function extractHttpStatus(err: unknown): number | undefined {
|
||||
if (
|
||||
err &&
|
||||
typeof err === 'object' &&
|
||||
'response' in err &&
|
||||
(err as { response?: { status?: unknown } }).response &&
|
||||
typeof (err as { response: { status?: unknown } }).response.status ===
|
||||
'number'
|
||||
) {
|
||||
return (err as { response: { status: number } }).response.status
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull a human-readable message out of an API error response body.
|
||||
* Checks `data.message` first, then `data.error.message`.
|
||||
*/
|
||||
export function extractErrorDetail(data: unknown): string | undefined {
|
||||
if (!data || typeof data !== 'object') return undefined
|
||||
if ('message' in data && typeof data.message === 'string') {
|
||||
return data.message
|
||||
}
|
||||
if (
|
||||
'error' in data &&
|
||||
data.error !== null &&
|
||||
typeof data.error === 'object' &&
|
||||
'message' in data.error &&
|
||||
typeof data.error.message === 'string'
|
||||
) {
|
||||
return data.error.message
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Log a bridge init skip — debug message + `tengu_bridge_repl_skipped`
|
||||
* analytics event. Centralizes the event name and the AnalyticsMetadata
|
||||
* cast so call sites don't each repeat the 5-line boilerplate.
|
||||
*/
|
||||
export function logBridgeSkip(
|
||||
reason: string,
|
||||
debugMsg?: string,
|
||||
v2?: boolean,
|
||||
): void {
|
||||
if (debugMsg) {
|
||||
logForDebugging(debugMsg)
|
||||
}
|
||||
logEvent('tengu_bridge_repl_skipped', {
|
||||
reason:
|
||||
reason as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
...(v2 !== undefined && { v2 }),
|
||||
})
|
||||
}
|
||||
165
src/bridge/envLessBridgeConfig.ts
Normal file
165
src/bridge/envLessBridgeConfig.ts
Normal file
@ -0,0 +1,165 @@
|
||||
import { z } from 'zod/v4'
|
||||
import { getFeatureValue_DEPRECATED } from '../services/analytics/growthbook.js'
|
||||
import { lazySchema } from '../utils/lazySchema.js'
|
||||
import { lt } from '../utils/semver.js'
|
||||
import { isEnvLessBridgeEnabled } from './bridgeEnabled.js'
|
||||
|
||||
export type EnvLessBridgeConfig = {
|
||||
// withRetry — init-phase backoff (createSession, POST /bridge, recovery /bridge)
|
||||
init_retry_max_attempts: number
|
||||
init_retry_base_delay_ms: number
|
||||
init_retry_jitter_fraction: number
|
||||
init_retry_max_delay_ms: number
|
||||
// axios timeout for POST /sessions, POST /bridge, POST /archive
|
||||
http_timeout_ms: number
|
||||
// BoundedUUIDSet ring size (echo + re-delivery dedup)
|
||||
uuid_dedup_buffer_size: number
|
||||
// CCRClient worker heartbeat cadence. Server TTL is 60s — 20s gives 3× margin.
|
||||
heartbeat_interval_ms: number
|
||||
// ±fraction of interval — per-beat jitter to spread fleet load.
|
||||
heartbeat_jitter_fraction: number
|
||||
// Fire proactive JWT refresh this long before expires_in. Larger buffer =
|
||||
// more frequent refresh (refresh cadence ≈ expires_in - buffer).
|
||||
token_refresh_buffer_ms: number
|
||||
// Archive POST timeout in teardown(). Distinct from http_timeout_ms because
|
||||
// gracefulShutdown races runCleanupFunctions() against a 2s cap — a 10s
|
||||
// axios timeout on a slow/stalled archive burns the whole budget on a
|
||||
// request that forceExit will kill anyway.
|
||||
teardown_archive_timeout_ms: number
|
||||
// Deadline for onConnect after transport.connect(). If neither onConnect
|
||||
// nor onClose fires before this, emit tengu_bridge_repl_connect_timeout
|
||||
// — the only telemetry for the ~1% of sessions that emit `started` then
|
||||
// go silent (no error, no event, just nothing).
|
||||
connect_timeout_ms: number
|
||||
// Semver floor for the env-less bridge path. Separate from the v1
|
||||
// tengu_bridge_min_version config so a v2-specific bug can force upgrades
|
||||
// without blocking v1 (env-based) clients, and vice versa.
|
||||
min_version: string
|
||||
// When true, tell users their claude.ai app may be too old to see v2
|
||||
// sessions — lets us roll the v2 bridge before the app ships the new
|
||||
// session-list query.
|
||||
should_show_app_upgrade_message: boolean
|
||||
}
|
||||
|
||||
export const DEFAULT_ENV_LESS_BRIDGE_CONFIG: EnvLessBridgeConfig = {
|
||||
init_retry_max_attempts: 3,
|
||||
init_retry_base_delay_ms: 500,
|
||||
init_retry_jitter_fraction: 0.25,
|
||||
init_retry_max_delay_ms: 4000,
|
||||
http_timeout_ms: 10_000,
|
||||
uuid_dedup_buffer_size: 2000,
|
||||
heartbeat_interval_ms: 20_000,
|
||||
heartbeat_jitter_fraction: 0.1,
|
||||
token_refresh_buffer_ms: 300_000,
|
||||
teardown_archive_timeout_ms: 1500,
|
||||
connect_timeout_ms: 15_000,
|
||||
min_version: '0.0.0',
|
||||
should_show_app_upgrade_message: false,
|
||||
}
|
||||
|
||||
// Floors reject the whole object on violation (fall back to DEFAULT) rather
|
||||
// than partially trusting — same defense-in-depth as pollConfig.ts.
|
||||
const envLessBridgeConfigSchema = lazySchema(() =>
|
||||
z.object({
|
||||
init_retry_max_attempts: z.number().int().min(1).max(10).default(3),
|
||||
init_retry_base_delay_ms: z.number().int().min(100).default(500),
|
||||
init_retry_jitter_fraction: z.number().min(0).max(1).default(0.25),
|
||||
init_retry_max_delay_ms: z.number().int().min(500).default(4000),
|
||||
http_timeout_ms: z.number().int().min(2000).default(10_000),
|
||||
uuid_dedup_buffer_size: z.number().int().min(100).max(50_000).default(2000),
|
||||
// Server TTL is 60s. Floor 5s prevents thrash; cap 30s keeps ≥2× margin.
|
||||
heartbeat_interval_ms: z
|
||||
.number()
|
||||
.int()
|
||||
.min(5000)
|
||||
.max(30_000)
|
||||
.default(20_000),
|
||||
// ±fraction per beat. Cap 0.5: at max interval (30s) × 1.5 = 45s worst case,
|
||||
// still under the 60s TTL.
|
||||
heartbeat_jitter_fraction: z.number().min(0).max(0.5).default(0.1),
|
||||
// Floor 30s prevents tight-looping. Cap 30min rejects buffer-vs-delay
|
||||
// semantic inversion: ops entering expires_in-5min (the *delay until
|
||||
// refresh*) instead of 5min (the *buffer before expiry*) yields
|
||||
// delayMs = expires_in - buffer ≈ 5min instead of ≈4h. Both are positive
|
||||
// durations so .min() alone can't distinguish; .max() catches the
|
||||
// inverted value since buffer ≥ 30min is nonsensical for a multi-hour JWT.
|
||||
token_refresh_buffer_ms: z
|
||||
.number()
|
||||
.int()
|
||||
.min(30_000)
|
||||
.max(1_800_000)
|
||||
.default(300_000),
|
||||
// Cap 2000 keeps this under gracefulShutdown's 2s cleanup race — a higher
|
||||
// timeout just lies to axios since forceExit kills the socket regardless.
|
||||
teardown_archive_timeout_ms: z
|
||||
.number()
|
||||
.int()
|
||||
.min(500)
|
||||
.max(2000)
|
||||
.default(1500),
|
||||
// Observed p99 connect is ~2-3s; 15s is ~5× headroom. Floor 5s bounds
|
||||
// false-positive rate under transient slowness; cap 60s bounds how long
|
||||
// a truly-stalled session stays dark.
|
||||
connect_timeout_ms: z.number().int().min(5_000).max(60_000).default(15_000),
|
||||
min_version: z
|
||||
.string()
|
||||
.refine(v => {
|
||||
try {
|
||||
lt(v, '0.0.0')
|
||||
return true
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
})
|
||||
.default('0.0.0'),
|
||||
should_show_app_upgrade_message: z.boolean().default(false),
|
||||
}),
|
||||
)
|
||||
|
||||
/**
|
||||
* Fetch the env-less bridge timing config from GrowthBook. Read once per
|
||||
* initEnvLessBridgeCore call — config is fixed for the lifetime of a bridge
|
||||
* session.
|
||||
*
|
||||
* Uses the blocking getter (not _CACHED_MAY_BE_STALE) because /remote-control
|
||||
* runs well after GrowthBook init — initializeGrowthBook() resolves instantly,
|
||||
* so there's no startup penalty, and we get the fresh in-memory remoteEval
|
||||
* value instead of the stale-on-first-read disk cache. The _DEPRECATED suffix
|
||||
* warns against startup-path usage, which this isn't.
|
||||
*/
|
||||
export async function getEnvLessBridgeConfig(): Promise<EnvLessBridgeConfig> {
|
||||
const raw = await getFeatureValue_DEPRECATED<unknown>(
|
||||
'tengu_bridge_repl_v2_config',
|
||||
DEFAULT_ENV_LESS_BRIDGE_CONFIG,
|
||||
)
|
||||
const parsed = envLessBridgeConfigSchema().safeParse(raw)
|
||||
return parsed.success ? parsed.data : DEFAULT_ENV_LESS_BRIDGE_CONFIG
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an error message if the current CLI version is below the minimum
|
||||
* required for the env-less (v2) bridge path, or null if the version is fine.
|
||||
*
|
||||
* v2 analogue of checkBridgeMinVersion() — reads from tengu_bridge_repl_v2_config
|
||||
* instead of tengu_bridge_min_version so the two implementations can enforce
|
||||
* independent floors.
|
||||
*/
|
||||
export async function checkEnvLessBridgeMinVersion(): Promise<string | null> {
|
||||
const cfg = await getEnvLessBridgeConfig()
|
||||
if (cfg.min_version && lt(MACRO.VERSION, cfg.min_version)) {
|
||||
return `Your version of Claude Code (${MACRO.VERSION}) is too old for Remote Control.\nVersion ${cfg.min_version} or higher is required. Run \`claude update\` to update.`
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether to nudge users toward upgrading their claude.ai app when a
|
||||
* Remote Control session starts. True only when the v2 bridge is active
|
||||
* AND the should_show_app_upgrade_message config bit is set — lets us
|
||||
* roll the v2 bridge before the app ships the new session-list query.
|
||||
*/
|
||||
export async function shouldShowAppUpgradeMessage(): Promise<boolean> {
|
||||
if (!isEnvLessBridgeEnabled()) return false
|
||||
const cfg = await getEnvLessBridgeConfig()
|
||||
return cfg.should_show_app_upgrade_message
|
||||
}
|
||||
71
src/bridge/flushGate.ts
Normal file
71
src/bridge/flushGate.ts
Normal file
@ -0,0 +1,71 @@
|
||||
/**
|
||||
* State machine for gating message writes during an initial flush.
|
||||
*
|
||||
* When a bridge session starts, historical messages are flushed to the
|
||||
* server via a single HTTP POST. During that flush, new messages must
|
||||
* be queued to prevent them from arriving at the server interleaved
|
||||
* with the historical messages.
|
||||
*
|
||||
* Lifecycle:
|
||||
* start() → enqueue() returns true, items are queued
|
||||
* end() → returns queued items for draining, enqueue() returns false
|
||||
* drop() → discards queued items (permanent transport close)
|
||||
* deactivate() → clears active flag without dropping items
|
||||
* (transport replacement — new transport will drain)
|
||||
*/
|
||||
export class FlushGate<T> {
|
||||
private _active = false
|
||||
private _pending: T[] = []
|
||||
|
||||
get active(): boolean {
|
||||
return this._active
|
||||
}
|
||||
|
||||
get pendingCount(): number {
|
||||
return this._pending.length
|
||||
}
|
||||
|
||||
/** Mark flush as in-progress. enqueue() will start queuing items. */
|
||||
start(): void {
|
||||
this._active = true
|
||||
}
|
||||
|
||||
/**
|
||||
* End the flush and return any queued items for draining.
|
||||
* Caller is responsible for sending the returned items.
|
||||
*/
|
||||
end(): T[] {
|
||||
this._active = false
|
||||
return this._pending.splice(0)
|
||||
}
|
||||
|
||||
/**
|
||||
* If flush is active, queue the items and return true.
|
||||
* If flush is not active, return false (caller should send directly).
|
||||
*/
|
||||
enqueue(...items: T[]): boolean {
|
||||
if (!this._active) return false
|
||||
this._pending.push(...items)
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Discard all queued items (permanent transport close).
|
||||
* Returns the number of items dropped.
|
||||
*/
|
||||
drop(): number {
|
||||
this._active = false
|
||||
const count = this._pending.length
|
||||
this._pending.length = 0
|
||||
return count
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the active flag without dropping queued items.
|
||||
* Used when the transport is replaced (onWorkReceived) — the new
|
||||
* transport's flush will drain the pending items.
|
||||
*/
|
||||
deactivate(): void {
|
||||
this._active = false
|
||||
}
|
||||
}
|
||||
175
src/bridge/inboundAttachments.ts
Normal file
175
src/bridge/inboundAttachments.ts
Normal file
@ -0,0 +1,175 @@
|
||||
/**
|
||||
* Resolve file_uuid attachments on inbound bridge user messages.
|
||||
*
|
||||
* Web composer uploads via cookie-authed /api/{org}/upload, sends file_uuid
|
||||
* alongside the message. Here we fetch each via GET /api/oauth/files/{uuid}/content
|
||||
* (oauth-authed, same store), write to ~/.claude/uploads/{sessionId}/, and
|
||||
* return @path refs to prepend. Claude's Read tool takes it from there.
|
||||
*
|
||||
* Best-effort: any failure (no token, network, non-2xx, disk) logs debug and
|
||||
* skips that attachment. The message still reaches Claude, just without @path.
|
||||
*/
|
||||
|
||||
import type { ContentBlockParam } from '@anthropic-ai/sdk/resources/messages.mjs'
|
||||
import axios from 'axios'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { mkdir, writeFile } from 'fs/promises'
|
||||
import { basename, join } from 'path'
|
||||
import { z } from 'zod/v4'
|
||||
import { getSessionId } from '../bootstrap/state.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { getClaudeConfigHomeDir } from '../utils/envUtils.js'
|
||||
import { lazySchema } from '../utils/lazySchema.js'
|
||||
import { getBridgeAccessToken, getBridgeBaseUrl } from './bridgeConfig.js'
|
||||
|
||||
const DOWNLOAD_TIMEOUT_MS = 30_000
|
||||
|
||||
function debug(msg: string): void {
|
||||
logForDebugging(`[bridge:inbound-attach] ${msg}`)
|
||||
}
|
||||
|
||||
const attachmentSchema = lazySchema(() =>
|
||||
z.object({
|
||||
file_uuid: z.string(),
|
||||
file_name: z.string(),
|
||||
}),
|
||||
)
|
||||
const attachmentsArraySchema = lazySchema(() => z.array(attachmentSchema()))
|
||||
|
||||
export type InboundAttachment = z.infer<ReturnType<typeof attachmentSchema>>
|
||||
|
||||
/** Pull file_attachments off a loosely-typed inbound message. */
|
||||
export function extractInboundAttachments(msg: unknown): InboundAttachment[] {
|
||||
if (typeof msg !== 'object' || msg === null || !('file_attachments' in msg)) {
|
||||
return []
|
||||
}
|
||||
const parsed = attachmentsArraySchema().safeParse(msg.file_attachments)
|
||||
return parsed.success ? parsed.data : []
|
||||
}
|
||||
|
||||
/**
|
||||
* Strip path components and keep only filename-safe chars. file_name comes
|
||||
* from the network (web composer), so treat it as untrusted even though the
|
||||
* composer controls it.
|
||||
*/
|
||||
function sanitizeFileName(name: string): string {
|
||||
const base = basename(name).replace(/[^a-zA-Z0-9._-]/g, '_')
|
||||
return base || 'attachment'
|
||||
}
|
||||
|
||||
function uploadsDir(): string {
|
||||
return join(getClaudeConfigHomeDir(), 'uploads', getSessionId())
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch + write one attachment. Returns the absolute path on success,
|
||||
* undefined on any failure.
|
||||
*/
|
||||
async function resolveOne(att: InboundAttachment): Promise<string | undefined> {
|
||||
const token = getBridgeAccessToken()
|
||||
if (!token) {
|
||||
debug('skip: no oauth token')
|
||||
return undefined
|
||||
}
|
||||
|
||||
let data: Buffer
|
||||
try {
|
||||
// getOauthConfig() (via getBridgeBaseUrl) throws on a non-allowlisted
|
||||
// CLAUDE_CODE_CUSTOM_OAUTH_URL — keep it inside the try so a bad
|
||||
// FedStart URL degrades to "no @path" instead of crashing print.ts's
|
||||
// reader loop (which has no catch around the await).
|
||||
const url = `${getBridgeBaseUrl()}/api/oauth/files/${encodeURIComponent(att.file_uuid)}/content`
|
||||
const response = await axios.get(url, {
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
responseType: 'arraybuffer',
|
||||
timeout: DOWNLOAD_TIMEOUT_MS,
|
||||
validateStatus: () => true,
|
||||
})
|
||||
if (response.status !== 200) {
|
||||
debug(`fetch ${att.file_uuid} failed: status=${response.status}`)
|
||||
return undefined
|
||||
}
|
||||
data = Buffer.from(response.data)
|
||||
} catch (e) {
|
||||
debug(`fetch ${att.file_uuid} threw: ${e}`)
|
||||
return undefined
|
||||
}
|
||||
|
||||
// uuid-prefix makes collisions impossible across messages and within one
|
||||
// (same filename, different files). 8 chars is enough — this isn't security.
|
||||
const safeName = sanitizeFileName(att.file_name)
|
||||
const prefix = (
|
||||
att.file_uuid.slice(0, 8) || randomUUID().slice(0, 8)
|
||||
).replace(/[^a-zA-Z0-9_-]/g, '_')
|
||||
const dir = uploadsDir()
|
||||
const outPath = join(dir, `${prefix}-${safeName}`)
|
||||
|
||||
try {
|
||||
await mkdir(dir, { recursive: true })
|
||||
await writeFile(outPath, data)
|
||||
} catch (e) {
|
||||
debug(`write ${outPath} failed: ${e}`)
|
||||
return undefined
|
||||
}
|
||||
|
||||
debug(`resolved ${att.file_uuid} → ${outPath} (${data.length} bytes)`)
|
||||
return outPath
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve all attachments on an inbound message to a prefix string of
|
||||
* @path refs. Empty string if none resolved.
|
||||
*/
|
||||
export async function resolveInboundAttachments(
|
||||
attachments: InboundAttachment[],
|
||||
): Promise<string> {
|
||||
if (attachments.length === 0) return ''
|
||||
debug(`resolving ${attachments.length} attachment(s)`)
|
||||
const paths = await Promise.all(attachments.map(resolveOne))
|
||||
const ok = paths.filter((p): p is string => p !== undefined)
|
||||
if (ok.length === 0) return ''
|
||||
// Quoted form — extractAtMentionedFiles truncates unquoted @refs at the
|
||||
// first space, which breaks any home dir with spaces (/Users/John Smith/).
|
||||
return ok.map(p => `@"${p}"`).join(' ') + ' '
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepend @path refs to content, whichever form it's in.
|
||||
* Targets the LAST text block — processUserInputBase reads inputString
|
||||
* from processedBlocks[processedBlocks.length - 1], so putting refs in
|
||||
* block[0] means they're silently ignored for [text, image] content.
|
||||
*/
|
||||
export function prependPathRefs(
|
||||
content: string | Array<ContentBlockParam>,
|
||||
prefix: string,
|
||||
): string | Array<ContentBlockParam> {
|
||||
if (!prefix) return content
|
||||
if (typeof content === 'string') return prefix + content
|
||||
const i = content.findLastIndex(b => b.type === 'text')
|
||||
if (i !== -1) {
|
||||
const b = content[i]!
|
||||
if (b.type === 'text') {
|
||||
return [
|
||||
...content.slice(0, i),
|
||||
{ ...b, text: prefix + b.text },
|
||||
...content.slice(i + 1),
|
||||
]
|
||||
}
|
||||
}
|
||||
// No text block — append one at the end so it's last.
|
||||
return [...content, { type: 'text', text: prefix.trimEnd() }]
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience: extract + resolve + prepend. No-op when the message has no
|
||||
* file_attachments field (fast path — no network, returns same reference).
|
||||
*/
|
||||
export async function resolveAndPrepend(
|
||||
msg: unknown,
|
||||
content: string | Array<ContentBlockParam>,
|
||||
): Promise<string | Array<ContentBlockParam>> {
|
||||
const attachments = extractInboundAttachments(msg)
|
||||
if (attachments.length === 0) return content
|
||||
const prefix = await resolveInboundAttachments(attachments)
|
||||
return prependPathRefs(content, prefix)
|
||||
}
|
||||
80
src/bridge/inboundMessages.ts
Normal file
80
src/bridge/inboundMessages.ts
Normal file
@ -0,0 +1,80 @@
|
||||
import type {
|
||||
Base64ImageSource,
|
||||
ContentBlockParam,
|
||||
ImageBlockParam,
|
||||
} from '@anthropic-ai/sdk/resources/messages.mjs'
|
||||
import type { UUID } from 'crypto'
|
||||
import type { SDKMessage } from '../entrypoints/agentSdkTypes.js'
|
||||
import { detectImageFormatFromBase64 } from '../utils/imageResizer.js'
|
||||
|
||||
/**
|
||||
* Process an inbound user message from the bridge, extracting content
|
||||
* and UUID for enqueueing. Supports both string content and
|
||||
* ContentBlockParam[] (e.g. messages containing images).
|
||||
*
|
||||
* Normalizes image blocks from bridge clients that may use camelCase
|
||||
* `mediaType` instead of snake_case `media_type` (mobile-apps#5825).
|
||||
*
|
||||
* Returns the extracted fields, or undefined if the message should be
|
||||
* skipped (non-user type, missing/empty content).
|
||||
*/
|
||||
export function extractInboundMessageFields(
|
||||
msg: SDKMessage,
|
||||
):
|
||||
| { content: string | Array<ContentBlockParam>; uuid: UUID | undefined }
|
||||
| undefined {
|
||||
if (msg.type !== 'user') return undefined
|
||||
const content = msg.message?.content
|
||||
if (!content) return undefined
|
||||
if (Array.isArray(content) && content.length === 0) return undefined
|
||||
|
||||
const uuid =
|
||||
'uuid' in msg && typeof msg.uuid === 'string'
|
||||
? (msg.uuid as UUID)
|
||||
: undefined
|
||||
|
||||
return {
|
||||
content: Array.isArray(content) ? normalizeImageBlocks(content) : content,
|
||||
uuid,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize image content blocks from bridge clients. iOS/web clients may
|
||||
* send `mediaType` (camelCase) instead of `media_type` (snake_case), or
|
||||
* omit the field entirely. Without normalization, the bad block poisons
|
||||
* the session — every subsequent API call fails with
|
||||
* "media_type: Field required".
|
||||
*
|
||||
* Fast-path scan returns the original array reference when no
|
||||
* normalization is needed (zero allocation on the happy path).
|
||||
*/
|
||||
export function normalizeImageBlocks(
|
||||
blocks: Array<ContentBlockParam>,
|
||||
): Array<ContentBlockParam> {
|
||||
if (!blocks.some(isMalformedBase64Image)) return blocks
|
||||
|
||||
return blocks.map(block => {
|
||||
if (!isMalformedBase64Image(block)) return block
|
||||
const src = block.source as unknown as Record<string, unknown>
|
||||
const mediaType =
|
||||
typeof src.mediaType === 'string' && src.mediaType
|
||||
? src.mediaType
|
||||
: detectImageFormatFromBase64(block.source.data)
|
||||
return {
|
||||
...block,
|
||||
source: {
|
||||
type: 'base64' as const,
|
||||
media_type: mediaType as Base64ImageSource['media_type'],
|
||||
data: block.source.data,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
function isMalformedBase64Image(
|
||||
block: ContentBlockParam,
|
||||
): block is ImageBlockParam & { source: Base64ImageSource } {
|
||||
if (block.type !== 'image' || block.source?.type !== 'base64') return false
|
||||
return !(block.source as unknown as Record<string, unknown>).media_type
|
||||
}
|
||||
569
src/bridge/initReplBridge.ts
Normal file
569
src/bridge/initReplBridge.ts
Normal file
@ -0,0 +1,569 @@
|
||||
/**
|
||||
* REPL-specific wrapper around initBridgeCore. Owns the parts that read
|
||||
* bootstrap state — gates, cwd, session ID, git context, OAuth, title
|
||||
* derivation — then delegates to the bootstrap-free core.
|
||||
*
|
||||
* Split out of replBridge.ts because the sessionStorage import
|
||||
* (getCurrentSessionTitle) transitively pulls in src/commands.ts → the
|
||||
* entire slash command + React component tree (~1300 modules). Keeping
|
||||
* initBridgeCore in a file that doesn't touch sessionStorage lets
|
||||
* daemonBridge.ts import the core without bloating the Agent SDK bundle.
|
||||
*
|
||||
* Called via dynamic import by useReplBridge (auto-start) and print.ts
|
||||
* (SDK -p mode via query.enableRemoteControl).
|
||||
*/
|
||||
|
||||
import { feature } from 'bun:bundle'
|
||||
import { hostname } from 'os'
|
||||
import { getOriginalCwd, getSessionId } from '../bootstrap/state.js'
|
||||
import type { SDKMessage } from '../entrypoints/agentSdkTypes.js'
|
||||
import type { SDKControlResponse } from '../entrypoints/sdk/controlTypes.js'
|
||||
import { getFeatureValue_CACHED_WITH_REFRESH } from '../services/analytics/growthbook.js'
|
||||
import { getOrganizationUUID } from '../services/oauth/client.js'
|
||||
import {
|
||||
isPolicyAllowed,
|
||||
waitForPolicyLimitsToLoad,
|
||||
} from '../services/policyLimits/index.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
import {
|
||||
checkAndRefreshOAuthTokenIfNeeded,
|
||||
getClaudeAIOAuthTokens,
|
||||
handleOAuth401Error,
|
||||
} from '../utils/auth.js'
|
||||
import { getGlobalConfig, saveGlobalConfig } from '../utils/config.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { stripDisplayTagsAllowEmpty } from '../utils/displayTags.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { getBranch, getRemoteUrl } from '../utils/git.js'
|
||||
import { toSDKMessages } from '../utils/messages/mappers.js'
|
||||
import {
|
||||
getContentText,
|
||||
getMessagesAfterCompactBoundary,
|
||||
isSyntheticMessage,
|
||||
} from '../utils/messages.js'
|
||||
import type { PermissionMode } from '../utils/permissions/PermissionMode.js'
|
||||
import { getCurrentSessionTitle } from '../utils/sessionStorage.js'
|
||||
import {
|
||||
extractConversationText,
|
||||
generateSessionTitle,
|
||||
} from '../utils/sessionTitle.js'
|
||||
import { generateShortWordSlug } from '../utils/words.js'
|
||||
import {
|
||||
getBridgeAccessToken,
|
||||
getBridgeBaseUrl,
|
||||
getBridgeTokenOverride,
|
||||
} from './bridgeConfig.js'
|
||||
import {
|
||||
checkBridgeMinVersion,
|
||||
isBridgeEnabledBlocking,
|
||||
isCseShimEnabled,
|
||||
isEnvLessBridgeEnabled,
|
||||
} from './bridgeEnabled.js'
|
||||
import {
|
||||
archiveBridgeSession,
|
||||
createBridgeSession,
|
||||
updateBridgeSessionTitle,
|
||||
} from './createSession.js'
|
||||
import { logBridgeSkip } from './debugUtils.js'
|
||||
import { checkEnvLessBridgeMinVersion } from './envLessBridgeConfig.js'
|
||||
import { getPollIntervalConfig } from './pollConfig.js'
|
||||
import type { BridgeState, ReplBridgeHandle } from './replBridge.js'
|
||||
import { initBridgeCore } from './replBridge.js'
|
||||
import { setCseShimGate } from './sessionIdCompat.js'
|
||||
import type { BridgeWorkerType } from './types.js'
|
||||
|
||||
export type InitBridgeOptions = {
|
||||
onInboundMessage?: (msg: SDKMessage) => void | Promise<void>
|
||||
onPermissionResponse?: (response: SDKControlResponse) => void
|
||||
onInterrupt?: () => void
|
||||
onSetModel?: (model: string | undefined) => void
|
||||
onSetMaxThinkingTokens?: (maxTokens: number | null) => void
|
||||
onSetPermissionMode?: (
|
||||
mode: PermissionMode,
|
||||
) => { ok: true } | { ok: false; error: string }
|
||||
onStateChange?: (state: BridgeState, detail?: string) => void
|
||||
initialMessages?: Message[]
|
||||
// Explicit session name from `/remote-control <name>`. When set, overrides
|
||||
// the title derived from the conversation or /rename.
|
||||
initialName?: string
|
||||
// Fresh view of the full conversation at call time. Used by onUserMessage's
|
||||
// count-3 derivation to call generateSessionTitle over the full conversation.
|
||||
// Optional — print.ts's SDK enableRemoteControl path has no REPL message
|
||||
// array; count-3 falls back to the single message text when absent.
|
||||
getMessages?: () => Message[]
|
||||
// UUIDs already flushed in a prior bridge session. Messages with these
|
||||
// UUIDs are excluded from the initial flush to avoid poisoning the
|
||||
// server (duplicate UUIDs across sessions cause the WS to be killed).
|
||||
// Mutated in place — newly flushed UUIDs are added after each flush.
|
||||
previouslyFlushedUUIDs?: Set<string>
|
||||
/** See BridgeCoreParams.perpetual. */
|
||||
perpetual?: boolean
|
||||
/**
|
||||
* When true, the bridge only forwards events outbound (no SSE inbound
|
||||
* stream). Used by CCR mirror mode — local sessions visible on claude.ai
|
||||
* without enabling inbound control.
|
||||
*/
|
||||
outboundOnly?: boolean
|
||||
tags?: string[]
|
||||
}
|
||||
|
||||
export async function initReplBridge(
|
||||
options?: InitBridgeOptions,
|
||||
): Promise<ReplBridgeHandle | null> {
|
||||
const {
|
||||
onInboundMessage,
|
||||
onPermissionResponse,
|
||||
onInterrupt,
|
||||
onSetModel,
|
||||
onSetMaxThinkingTokens,
|
||||
onSetPermissionMode,
|
||||
onStateChange,
|
||||
initialMessages,
|
||||
getMessages,
|
||||
previouslyFlushedUUIDs,
|
||||
initialName,
|
||||
perpetual,
|
||||
outboundOnly,
|
||||
tags,
|
||||
} = options ?? {}
|
||||
|
||||
// Wire the cse_ shim kill switch so toCompatSessionId respects the
|
||||
// GrowthBook gate. Daemon/SDK paths skip this — shim defaults to active.
|
||||
setCseShimGate(isCseShimEnabled)
|
||||
|
||||
// 1. Runtime gate
|
||||
if (!(await isBridgeEnabledBlocking())) {
|
||||
logBridgeSkip('not_enabled', '[bridge:repl] Skipping: bridge not enabled')
|
||||
return null
|
||||
}
|
||||
|
||||
// 1b. Minimum version check — deferred to after the v1/v2 branch below,
|
||||
// since each implementation has its own floor (tengu_bridge_min_version
|
||||
// for v1, tengu_bridge_repl_v2_config.min_version for v2).
|
||||
|
||||
// 2. Check OAuth — must be signed in with claude.ai. Runs before the
|
||||
// policy check so console-auth users get the actionable "/login" hint
|
||||
// instead of a misleading policy error from a stale/wrong-org cache.
|
||||
if (!getBridgeAccessToken()) {
|
||||
logBridgeSkip('no_oauth', '[bridge:repl] Skipping: no OAuth tokens')
|
||||
onStateChange?.('failed', '/login')
|
||||
return null
|
||||
}
|
||||
|
||||
// 3. Check organization policy — remote control may be disabled
|
||||
await waitForPolicyLimitsToLoad()
|
||||
if (!isPolicyAllowed('allow_remote_control')) {
|
||||
logBridgeSkip(
|
||||
'policy_denied',
|
||||
'[bridge:repl] Skipping: allow_remote_control policy not allowed',
|
||||
)
|
||||
onStateChange?.('failed', "disabled by your organization's policy")
|
||||
return null
|
||||
}
|
||||
|
||||
// When CLAUDE_BRIDGE_OAUTH_TOKEN is set (ant-only local dev), the bridge
|
||||
// uses that token directly via getBridgeAccessToken() — keychain state is
|
||||
// irrelevant. Skip 2b/2c to preserve that decoupling: an expired keychain
|
||||
// token shouldn't block a bridge connection that doesn't use it.
|
||||
if (!getBridgeTokenOverride()) {
|
||||
// 2a. Cross-process backoff. If N prior processes already saw this exact
|
||||
// dead token (matched by expiresAt), skip silently — no event, no refresh
|
||||
// attempt. The count threshold tolerates transient refresh failures (auth
|
||||
// server 5xx, lockfile errors per auth.ts:1437/1444/1485): each process
|
||||
// independently retries until 3 consecutive failures prove the token dead.
|
||||
// Mirrors useReplBridge's MAX_CONSECUTIVE_INIT_FAILURES for in-process.
|
||||
// The expiresAt key is content-addressed: /login → new token → new expiresAt
|
||||
// → this stops matching without any explicit clear.
|
||||
const cfg = getGlobalConfig()
|
||||
if (
|
||||
cfg.bridgeOauthDeadExpiresAt != null &&
|
||||
(cfg.bridgeOauthDeadFailCount ?? 0) >= 3 &&
|
||||
getClaudeAIOAuthTokens()?.expiresAt === cfg.bridgeOauthDeadExpiresAt
|
||||
) {
|
||||
logForDebugging(
|
||||
`[bridge:repl] Skipping: cross-process backoff (dead token seen ${cfg.bridgeOauthDeadFailCount} times)`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// 2b. Proactively refresh if expired. Mirrors bridgeMain.ts:2096 — the REPL
|
||||
// bridge fires at useEffect mount BEFORE any v1/messages call, making this
|
||||
// usually the first OAuth request of the session. Without this, ~9% of
|
||||
// registrations hit the server with a >8h-expired token → 401 → withOAuthRetry
|
||||
// recovers, but the server logs a 401 we can avoid. VPN egress IPs observed
|
||||
// at 30:1 401:200 when many unrelated users cluster at the 8h TTL boundary.
|
||||
//
|
||||
// Fresh-token cost: one memoized read + one Date.now() comparison (~µs).
|
||||
// checkAndRefreshOAuthTokenIfNeeded clears its own cache in every path that
|
||||
// touches the keychain (refresh success, lockfile race, throw), so no
|
||||
// explicit clearOAuthTokenCache() here — that would force a blocking
|
||||
// keychain spawn on the 91%+ fresh-token path.
|
||||
await checkAndRefreshOAuthTokenIfNeeded()
|
||||
|
||||
// 2c. Skip if token is still expired post-refresh-attempt. Env-var / FD
|
||||
// tokens (auth.ts:894-917) have expiresAt=null → never trip this. But a
|
||||
// keychain token whose refresh token is dead (password change, org left,
|
||||
// token GC'd) has expiresAt<now AND refresh just failed — the client would
|
||||
// otherwise loop 401 forever: withOAuthRetry → handleOAuth401Error →
|
||||
// refresh fails again → retry with same stale token → 401 again.
|
||||
// Datadog 2026-03-08: single IPs generating 2,879 such 401s/day. Skip the
|
||||
// guaranteed-fail API call; useReplBridge surfaces the failure.
|
||||
//
|
||||
// Intentionally NOT using isOAuthTokenExpired here — that has a 5-minute
|
||||
// proactive-refresh buffer, which is the right heuristic for "should
|
||||
// refresh soon" but wrong for "provably unusable". A token with 3min left
|
||||
// + transient refresh endpoint blip (5xx/timeout/wifi-reconnect) would
|
||||
// falsely trip a buffered check; the still-valid token would connect fine.
|
||||
// Check actual expiry instead: past-expiry AND refresh-failed → truly dead.
|
||||
const tokens = getClaudeAIOAuthTokens()
|
||||
if (tokens && tokens.expiresAt !== null && tokens.expiresAt <= Date.now()) {
|
||||
logBridgeSkip(
|
||||
'oauth_expired_unrefreshable',
|
||||
'[bridge:repl] Skipping: OAuth token expired and refresh failed (re-login required)',
|
||||
)
|
||||
onStateChange?.('failed', '/login')
|
||||
// Persist for the next process. Increments failCount when re-discovering
|
||||
// the same dead token (matched by expiresAt); resets to 1 for a different
|
||||
// token. Once count reaches 3, step 2a's early-return fires and this path
|
||||
// is never reached again — writes are capped at 3 per dead token.
|
||||
// Local const captures the narrowed type (closure loses !==null narrowing).
|
||||
const deadExpiresAt = tokens.expiresAt
|
||||
saveGlobalConfig(c => ({
|
||||
...c,
|
||||
bridgeOauthDeadExpiresAt: deadExpiresAt,
|
||||
bridgeOauthDeadFailCount:
|
||||
c.bridgeOauthDeadExpiresAt === deadExpiresAt
|
||||
? (c.bridgeOauthDeadFailCount ?? 0) + 1
|
||||
: 1,
|
||||
}))
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Compute baseUrl — needed by both v1 (env-based) and v2 (env-less)
|
||||
// paths. Hoisted above the v2 gate so both can use it.
|
||||
const baseUrl = getBridgeBaseUrl()
|
||||
|
||||
// 5. Derive session title. Precedence: explicit initialName → /rename
|
||||
// (session storage) → last meaningful user message → generated slug.
|
||||
// Cosmetic only (claude.ai session list); the model never sees it.
|
||||
// Two flags: `hasExplicitTitle` (initialName or /rename — never auto-
|
||||
// overwrite) vs. `hasTitle` (any title, including auto-derived — blocks
|
||||
// the count-1 re-derivation but not count-3). The onUserMessage callback
|
||||
// (wired to both v1 and v2 below) derives from the 1st prompt and again
|
||||
// from the 3rd so mobile/web show a title that reflects more context.
|
||||
// The slug fallback (e.g. "remote-control-graceful-unicorn") makes
|
||||
// auto-started sessions distinguishable in the claude.ai list before the
|
||||
// first prompt.
|
||||
let title = `remote-control-${generateShortWordSlug()}`
|
||||
let hasTitle = false
|
||||
let hasExplicitTitle = false
|
||||
if (initialName) {
|
||||
title = initialName
|
||||
hasTitle = true
|
||||
hasExplicitTitle = true
|
||||
} else {
|
||||
const sessionId = getSessionId()
|
||||
const customTitle = sessionId
|
||||
? getCurrentSessionTitle(sessionId)
|
||||
: undefined
|
||||
if (customTitle) {
|
||||
title = customTitle
|
||||
hasTitle = true
|
||||
hasExplicitTitle = true
|
||||
} else if (initialMessages && initialMessages.length > 0) {
|
||||
// Find the last user message that has meaningful content. Skip meta
|
||||
// (nudges), tool results, compact summaries ("This session is being
|
||||
// continued…"), non-human origins (task notifications, channel pushes),
|
||||
// and synthetic interrupts ([Request interrupted by user]) — none are
|
||||
// human-authored. Same filter as extractTitleText + isSyntheticMessage.
|
||||
for (let i = initialMessages.length - 1; i >= 0; i--) {
|
||||
const msg = initialMessages[i]!
|
||||
if (
|
||||
msg.type !== 'user' ||
|
||||
msg.isMeta ||
|
||||
msg.toolUseResult ||
|
||||
msg.isCompactSummary ||
|
||||
(msg.origin && msg.origin.kind !== 'human') ||
|
||||
isSyntheticMessage(msg)
|
||||
)
|
||||
continue
|
||||
const rawContent = getContentText(msg.message.content)
|
||||
if (!rawContent) continue
|
||||
const derived = deriveTitle(rawContent)
|
||||
if (!derived) continue
|
||||
title = derived
|
||||
hasTitle = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shared by both v1 and v2 — fires on every title-worthy user message until
|
||||
// it returns true. At count 1: deriveTitle placeholder immediately, then
|
||||
// generateSessionTitle (Haiku, sentence-case) fire-and-forget upgrade. At
|
||||
// count 3: re-generate over the full conversation. Skips entirely if the
|
||||
// title is explicit (/remote-control <name> or /rename) — re-checks
|
||||
// sessionStorage at call time so /rename between messages isn't clobbered.
|
||||
// Skips count 1 if initialMessages already derived (that title is fresh);
|
||||
// still refreshes at count 3. v2 passes cse_*; updateBridgeSessionTitle
|
||||
// retags internally.
|
||||
let userMessageCount = 0
|
||||
let lastBridgeSessionId: string | undefined
|
||||
let genSeq = 0
|
||||
const patch = (
|
||||
derived: string,
|
||||
bridgeSessionId: string,
|
||||
atCount: number,
|
||||
): void => {
|
||||
hasTitle = true
|
||||
title = derived
|
||||
logForDebugging(
|
||||
`[bridge:repl] derived title from message ${atCount}: ${derived}`,
|
||||
)
|
||||
void updateBridgeSessionTitle(bridgeSessionId, derived, {
|
||||
baseUrl,
|
||||
getAccessToken: getBridgeAccessToken,
|
||||
}).catch(() => {})
|
||||
}
|
||||
// Fire-and-forget Haiku generation with post-await guards. Re-checks /rename
|
||||
// (sessionStorage), v1 env-lost (lastBridgeSessionId), and same-session
|
||||
// out-of-order resolution (genSeq — count-1's Haiku resolving after count-3
|
||||
// would clobber the richer title). generateSessionTitle never rejects.
|
||||
const generateAndPatch = (input: string, bridgeSessionId: string): void => {
|
||||
const gen = ++genSeq
|
||||
const atCount = userMessageCount
|
||||
void generateSessionTitle(input, AbortSignal.timeout(15_000)).then(
|
||||
generated => {
|
||||
if (
|
||||
generated &&
|
||||
gen === genSeq &&
|
||||
lastBridgeSessionId === bridgeSessionId &&
|
||||
!getCurrentSessionTitle(getSessionId())
|
||||
) {
|
||||
patch(generated, bridgeSessionId, atCount)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
const onUserMessage = (text: string, bridgeSessionId: string): boolean => {
|
||||
if (hasExplicitTitle || getCurrentSessionTitle(getSessionId())) {
|
||||
return true
|
||||
}
|
||||
// v1 env-lost re-creates the session with a new ID. Reset the count so
|
||||
// the new session gets its own count-3 derivation; hasTitle stays true
|
||||
// (new session was created via getCurrentTitle(), which reads the count-1
|
||||
// title from this closure), so count-1 of the fresh cycle correctly skips.
|
||||
if (
|
||||
lastBridgeSessionId !== undefined &&
|
||||
lastBridgeSessionId !== bridgeSessionId
|
||||
) {
|
||||
userMessageCount = 0
|
||||
}
|
||||
lastBridgeSessionId = bridgeSessionId
|
||||
userMessageCount++
|
||||
if (userMessageCount === 1 && !hasTitle) {
|
||||
const placeholder = deriveTitle(text)
|
||||
if (placeholder) patch(placeholder, bridgeSessionId, userMessageCount)
|
||||
generateAndPatch(text, bridgeSessionId)
|
||||
} else if (userMessageCount === 3) {
|
||||
const msgs = getMessages?.()
|
||||
const input = msgs
|
||||
? extractConversationText(getMessagesAfterCompactBoundary(msgs))
|
||||
: text
|
||||
generateAndPatch(input, bridgeSessionId)
|
||||
}
|
||||
// Also re-latches if v1 env-lost resets the transport's done flag past 3.
|
||||
return userMessageCount >= 3
|
||||
}
|
||||
|
||||
const initialHistoryCap = getFeatureValue_CACHED_WITH_REFRESH(
|
||||
'tengu_bridge_initial_history_cap',
|
||||
200,
|
||||
5 * 60 * 1000,
|
||||
)
|
||||
|
||||
// Fetch orgUUID before the v1/v2 branch — both paths need it. v1 for
|
||||
// environment registration; v2 for archive (which lives at the compat
|
||||
// /v1/sessions/{id}/archive, not /v1/code/sessions). Without it, v2
|
||||
// archive 404s and sessions stay alive in CCR after /exit.
|
||||
const orgUUID = await getOrganizationUUID()
|
||||
if (!orgUUID) {
|
||||
logBridgeSkip('no_org_uuid', '[bridge:repl] Skipping: no org UUID')
|
||||
onStateChange?.('failed', '/login')
|
||||
return null
|
||||
}
|
||||
|
||||
// ── GrowthBook gate: env-less bridge ──────────────────────────────────
|
||||
// When enabled, skips the Environments API layer entirely (no register/
|
||||
// poll/ack/heartbeat) and connects directly via POST /bridge → worker_jwt.
|
||||
// See server PR #292605 (renamed in #293280). REPL-only — daemon/print stay
|
||||
// on env-based.
|
||||
//
|
||||
// NAMING: "env-less" is distinct from "CCR v2" (the /worker/* transport).
|
||||
// The env-based path below can ALSO use CCR v2 via CLAUDE_CODE_USE_CCR_V2.
|
||||
// tengu_bridge_repl_v2 gates env-less (no poll loop), not transport version.
|
||||
//
|
||||
// perpetual (assistant-mode session continuity via bridge-pointer.json) is
|
||||
// env-coupled and not yet implemented here — fall back to env-based when set
|
||||
// so KAIROS users don't silently lose cross-restart continuity.
|
||||
if (isEnvLessBridgeEnabled() && !perpetual) {
|
||||
const versionError = await checkEnvLessBridgeMinVersion()
|
||||
if (versionError) {
|
||||
logBridgeSkip(
|
||||
'version_too_old',
|
||||
`[bridge:repl] Skipping: ${versionError}`,
|
||||
true,
|
||||
)
|
||||
onStateChange?.('failed', 'run `claude update` to upgrade')
|
||||
return null
|
||||
}
|
||||
logForDebugging(
|
||||
'[bridge:repl] Using env-less bridge path (tengu_bridge_repl_v2)',
|
||||
)
|
||||
const { initEnvLessBridgeCore } = await import('./remoteBridgeCore.js')
|
||||
return initEnvLessBridgeCore({
|
||||
baseUrl,
|
||||
orgUUID,
|
||||
title,
|
||||
getAccessToken: getBridgeAccessToken,
|
||||
onAuth401: handleOAuth401Error,
|
||||
toSDKMessages,
|
||||
initialHistoryCap,
|
||||
initialMessages,
|
||||
// v2 always creates a fresh server session (new cse_* id), so
|
||||
// previouslyFlushedUUIDs is not passed — there's no cross-session
|
||||
// UUID collision risk, and the ref persists across enable→disable→
|
||||
// re-enable cycles which would cause the new session to receive zero
|
||||
// history (all UUIDs already in the set from the prior enable).
|
||||
// v1 handles this by calling previouslyFlushedUUIDs.clear() on fresh
|
||||
// session creation (replBridge.ts:768); v2 skips the param entirely.
|
||||
onInboundMessage,
|
||||
onUserMessage,
|
||||
onPermissionResponse,
|
||||
onInterrupt,
|
||||
onSetModel,
|
||||
onSetMaxThinkingTokens,
|
||||
onSetPermissionMode,
|
||||
onStateChange,
|
||||
outboundOnly,
|
||||
tags,
|
||||
})
|
||||
}
|
||||
|
||||
// ── v1 path: env-based (register/poll/ack/heartbeat) ──────────────────
|
||||
|
||||
const versionError = checkBridgeMinVersion()
|
||||
if (versionError) {
|
||||
logBridgeSkip('version_too_old', `[bridge:repl] Skipping: ${versionError}`)
|
||||
onStateChange?.('failed', 'run `claude update` to upgrade')
|
||||
return null
|
||||
}
|
||||
|
||||
// Gather git context — this is the bootstrap-read boundary.
|
||||
// Everything from here down is passed explicitly to bridgeCore.
|
||||
const branch = await getBranch()
|
||||
const gitRepoUrl = await getRemoteUrl()
|
||||
const sessionIngressUrl =
|
||||
process.env.USER_TYPE === 'ant' &&
|
||||
process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
? process.env.CLAUDE_BRIDGE_SESSION_INGRESS_URL
|
||||
: baseUrl
|
||||
|
||||
// Assistant-mode sessions advertise a distinct worker_type so the web UI
|
||||
// can filter them into a dedicated picker. KAIROS guard keeps the
|
||||
// assistant module out of external builds entirely.
|
||||
let workerType: BridgeWorkerType = 'claude_code'
|
||||
if (feature('KAIROS')) {
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const { isAssistantMode } =
|
||||
require('../assistant/index.js') as typeof import('../assistant/index.js')
|
||||
/* eslint-enable @typescript-eslint/no-require-imports */
|
||||
if (isAssistantMode()) {
|
||||
workerType = 'claude_code_assistant'
|
||||
}
|
||||
}
|
||||
|
||||
// 6. Delegate. BridgeCoreHandle is a structural superset of
|
||||
// ReplBridgeHandle (adds writeSdkMessages which REPL callers don't use),
|
||||
// so no adapter needed — just the narrower type on the way out.
|
||||
return initBridgeCore({
|
||||
dir: getOriginalCwd(),
|
||||
machineName: hostname(),
|
||||
branch,
|
||||
gitRepoUrl,
|
||||
title,
|
||||
baseUrl,
|
||||
sessionIngressUrl,
|
||||
workerType,
|
||||
getAccessToken: getBridgeAccessToken,
|
||||
createSession: opts =>
|
||||
createBridgeSession({
|
||||
...opts,
|
||||
events: [],
|
||||
baseUrl,
|
||||
getAccessToken: getBridgeAccessToken,
|
||||
}),
|
||||
archiveSession: sessionId =>
|
||||
archiveBridgeSession(sessionId, {
|
||||
baseUrl,
|
||||
getAccessToken: getBridgeAccessToken,
|
||||
// gracefulShutdown.ts:407 races runCleanupFunctions against 2s.
|
||||
// Teardown also does stopWork (parallel) + deregister (sequential),
|
||||
// so archive can't have the full budget. 1.5s matches v2's
|
||||
// teardown_archive_timeout_ms default.
|
||||
timeoutMs: 1500,
|
||||
}).catch((err: unknown) => {
|
||||
// archiveBridgeSession has no try/catch — 5xx/timeout/network throw
|
||||
// straight through. Previously swallowed silently, making archive
|
||||
// failures BQ-invisible and undiagnosable from debug logs.
|
||||
logForDebugging(
|
||||
`[bridge:repl] archiveBridgeSession threw: ${errorMessage(err)}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
}),
|
||||
// getCurrentTitle is read on reconnect-after-env-lost to re-title the new
|
||||
// session. /rename writes to session storage; onUserMessage mutates
|
||||
// `title` directly — both paths are picked up here.
|
||||
getCurrentTitle: () => getCurrentSessionTitle(getSessionId()) ?? title,
|
||||
onUserMessage,
|
||||
toSDKMessages,
|
||||
onAuth401: handleOAuth401Error,
|
||||
getPollIntervalConfig,
|
||||
initialHistoryCap,
|
||||
initialMessages,
|
||||
previouslyFlushedUUIDs,
|
||||
onInboundMessage,
|
||||
onPermissionResponse,
|
||||
onInterrupt,
|
||||
onSetModel,
|
||||
onSetMaxThinkingTokens,
|
||||
onSetPermissionMode,
|
||||
onStateChange,
|
||||
perpetual,
|
||||
})
|
||||
}
|
||||
|
||||
const TITLE_MAX_LEN = 50
|
||||
|
||||
/**
|
||||
* Quick placeholder title: strip display tags, take the first sentence,
|
||||
* collapse whitespace, truncate to 50 chars. Returns undefined if the result
|
||||
* is empty (e.g. message was only <local-command-stdout>). Replaced by
|
||||
* generateSessionTitle once Haiku resolves (~1-15s).
|
||||
*/
|
||||
function deriveTitle(raw: string): string | undefined {
|
||||
// Strip <ide_opened_file>, <session-start-hook>, etc. — these appear in
|
||||
// user messages when IDE/hooks inject context. stripDisplayTagsAllowEmpty
|
||||
// returns '' (not the original) so pure-tag messages are skipped.
|
||||
const clean = stripDisplayTagsAllowEmpty(raw)
|
||||
// First sentence is usually the intent; rest is often context/detail.
|
||||
// Capture group instead of lookbehind — keeps YARR JIT happy.
|
||||
const firstSentence = /^(.*?[.!?])\s/.exec(clean)?.[1] ?? clean
|
||||
// Collapse newlines/tabs — titles are single-line in the claude.ai list.
|
||||
const flat = firstSentence.replace(/\s+/g, ' ').trim()
|
||||
if (!flat) return undefined
|
||||
return flat.length > TITLE_MAX_LEN
|
||||
? flat.slice(0, TITLE_MAX_LEN - 1) + '\u2026'
|
||||
: flat
|
||||
}
|
||||
256
src/bridge/jwtUtils.ts
Normal file
256
src/bridge/jwtUtils.ts
Normal file
@ -0,0 +1,256 @@
|
||||
import { logEvent } from '../services/analytics/index.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from '../utils/diagLogs.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { jsonParse } from '../utils/slowOperations.js'
|
||||
|
||||
/** Format a millisecond duration as a human-readable string (e.g. "5m 30s"). */
|
||||
function formatDuration(ms: number): string {
|
||||
if (ms < 60_000) return `${Math.round(ms / 1000)}s`
|
||||
const m = Math.floor(ms / 60_000)
|
||||
const s = Math.round((ms % 60_000) / 1000)
|
||||
return s > 0 ? `${m}m ${s}s` : `${m}m`
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a JWT's payload segment without verifying the signature.
|
||||
* Strips the `sk-ant-si-` session-ingress prefix if present.
|
||||
* Returns the parsed JSON payload as `unknown`, or `null` if the
|
||||
* token is malformed or the payload is not valid JSON.
|
||||
*/
|
||||
export function decodeJwtPayload(token: string): unknown | null {
|
||||
const jwt = token.startsWith('sk-ant-si-')
|
||||
? token.slice('sk-ant-si-'.length)
|
||||
: token
|
||||
const parts = jwt.split('.')
|
||||
if (parts.length !== 3 || !parts[1]) return null
|
||||
try {
|
||||
return jsonParse(Buffer.from(parts[1], 'base64url').toString('utf8'))
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode the `exp` (expiry) claim from a JWT without verifying the signature.
|
||||
* @returns The `exp` value in Unix seconds, or `null` if unparseable
|
||||
*/
|
||||
export function decodeJwtExpiry(token: string): number | null {
|
||||
const payload = decodeJwtPayload(token)
|
||||
if (
|
||||
payload !== null &&
|
||||
typeof payload === 'object' &&
|
||||
'exp' in payload &&
|
||||
typeof payload.exp === 'number'
|
||||
) {
|
||||
return payload.exp
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/** Refresh buffer: request a new token before expiry. */
|
||||
const TOKEN_REFRESH_BUFFER_MS = 5 * 60 * 1000
|
||||
|
||||
/** Fallback refresh interval when the new token's expiry is unknown. */
|
||||
const FALLBACK_REFRESH_INTERVAL_MS = 30 * 60 * 1000 // 30 minutes
|
||||
|
||||
/** Max consecutive failures before giving up on the refresh chain. */
|
||||
const MAX_REFRESH_FAILURES = 3
|
||||
|
||||
/** Retry delay when getAccessToken returns undefined. */
|
||||
const REFRESH_RETRY_DELAY_MS = 60_000
|
||||
|
||||
/**
|
||||
* Creates a token refresh scheduler that proactively refreshes session tokens
|
||||
* before they expire. Used by both the standalone bridge and the REPL bridge.
|
||||
*
|
||||
* When a token is about to expire, the scheduler calls `onRefresh` with the
|
||||
* session ID and the bridge's OAuth access token. The caller is responsible
|
||||
* for delivering the token to the appropriate transport (child process stdin
|
||||
* for standalone bridge, WebSocket reconnect for REPL bridge).
|
||||
*/
|
||||
export function createTokenRefreshScheduler({
|
||||
getAccessToken,
|
||||
onRefresh,
|
||||
label,
|
||||
refreshBufferMs = TOKEN_REFRESH_BUFFER_MS,
|
||||
}: {
|
||||
getAccessToken: () => string | undefined | Promise<string | undefined>
|
||||
onRefresh: (sessionId: string, oauthToken: string) => void
|
||||
label: string
|
||||
/** How long before expiry to fire refresh. Defaults to 5 min. */
|
||||
refreshBufferMs?: number
|
||||
}): {
|
||||
schedule: (sessionId: string, token: string) => void
|
||||
scheduleFromExpiresIn: (sessionId: string, expiresInSeconds: number) => void
|
||||
cancel: (sessionId: string) => void
|
||||
cancelAll: () => void
|
||||
} {
|
||||
const timers = new Map<string, ReturnType<typeof setTimeout>>()
|
||||
const failureCounts = new Map<string, number>()
|
||||
// Generation counter per session — incremented by schedule() and cancel()
|
||||
// so that in-flight async doRefresh() calls can detect when they've been
|
||||
// superseded and should skip setting follow-up timers.
|
||||
const generations = new Map<string, number>()
|
||||
|
||||
function nextGeneration(sessionId: string): number {
|
||||
const gen = (generations.get(sessionId) ?? 0) + 1
|
||||
generations.set(sessionId, gen)
|
||||
return gen
|
||||
}
|
||||
|
||||
function schedule(sessionId: string, token: string): void {
|
||||
const expiry = decodeJwtExpiry(token)
|
||||
if (!expiry) {
|
||||
// Token is not a decodable JWT (e.g. an OAuth token passed from the
|
||||
// REPL bridge WebSocket open handler). Preserve any existing timer
|
||||
// (such as the follow-up refresh set by doRefresh) so the refresh
|
||||
// chain is not broken.
|
||||
logForDebugging(
|
||||
`[${label}:token] Could not decode JWT expiry for sessionId=${sessionId}, token prefix=${token.slice(0, 15)}…, keeping existing timer`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Clear any existing refresh timer — we have a concrete expiry to replace it.
|
||||
const existing = timers.get(sessionId)
|
||||
if (existing) {
|
||||
clearTimeout(existing)
|
||||
}
|
||||
|
||||
// Bump generation to invalidate any in-flight async doRefresh.
|
||||
const gen = nextGeneration(sessionId)
|
||||
|
||||
const expiryDate = new Date(expiry * 1000).toISOString()
|
||||
const delayMs = expiry * 1000 - Date.now() - refreshBufferMs
|
||||
if (delayMs <= 0) {
|
||||
logForDebugging(
|
||||
`[${label}:token] Token for sessionId=${sessionId} expires=${expiryDate} (past or within buffer), refreshing immediately`,
|
||||
)
|
||||
void doRefresh(sessionId, gen)
|
||||
return
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[${label}:token] Scheduled token refresh for sessionId=${sessionId} in ${formatDuration(delayMs)} (expires=${expiryDate}, buffer=${refreshBufferMs / 1000}s)`,
|
||||
)
|
||||
|
||||
const timer = setTimeout(doRefresh, delayMs, sessionId, gen)
|
||||
timers.set(sessionId, timer)
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule refresh using an explicit TTL (seconds until expiry) rather
|
||||
* than decoding a JWT's exp claim. Used by callers whose JWT is opaque
|
||||
* (e.g. POST /v1/code/sessions/{id}/bridge returns expires_in directly).
|
||||
*/
|
||||
function scheduleFromExpiresIn(
|
||||
sessionId: string,
|
||||
expiresInSeconds: number,
|
||||
): void {
|
||||
const existing = timers.get(sessionId)
|
||||
if (existing) clearTimeout(existing)
|
||||
const gen = nextGeneration(sessionId)
|
||||
// Clamp to 30s floor — if refreshBufferMs exceeds the server's expires_in
|
||||
// (e.g. very large buffer for frequent-refresh testing, or server shortens
|
||||
// expires_in unexpectedly), unclamped delayMs ≤ 0 would tight-loop.
|
||||
const delayMs = Math.max(expiresInSeconds * 1000 - refreshBufferMs, 30_000)
|
||||
logForDebugging(
|
||||
`[${label}:token] Scheduled token refresh for sessionId=${sessionId} in ${formatDuration(delayMs)} (expires_in=${expiresInSeconds}s, buffer=${refreshBufferMs / 1000}s)`,
|
||||
)
|
||||
const timer = setTimeout(doRefresh, delayMs, sessionId, gen)
|
||||
timers.set(sessionId, timer)
|
||||
}
|
||||
|
||||
async function doRefresh(sessionId: string, gen: number): Promise<void> {
|
||||
let oauthToken: string | undefined
|
||||
try {
|
||||
oauthToken = await getAccessToken()
|
||||
} catch (err) {
|
||||
logForDebugging(
|
||||
`[${label}:token] getAccessToken threw for sessionId=${sessionId}: ${errorMessage(err)}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
}
|
||||
|
||||
// If the session was cancelled or rescheduled while we were awaiting,
|
||||
// the generation will have changed — bail out to avoid orphaned timers.
|
||||
if (generations.get(sessionId) !== gen) {
|
||||
logForDebugging(
|
||||
`[${label}:token] doRefresh for sessionId=${sessionId} stale (gen ${gen} vs ${generations.get(sessionId)}), skipping`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if (!oauthToken) {
|
||||
const failures = (failureCounts.get(sessionId) ?? 0) + 1
|
||||
failureCounts.set(sessionId, failures)
|
||||
logForDebugging(
|
||||
`[${label}:token] No OAuth token available for refresh, sessionId=${sessionId} (failure ${failures}/${MAX_REFRESH_FAILURES})`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'bridge_token_refresh_no_oauth')
|
||||
// Schedule a retry so the refresh chain can recover if the token
|
||||
// becomes available again (e.g. transient cache clear during refresh).
|
||||
// Cap retries to avoid spamming on genuine failures.
|
||||
if (failures < MAX_REFRESH_FAILURES) {
|
||||
const retryTimer = setTimeout(
|
||||
doRefresh,
|
||||
REFRESH_RETRY_DELAY_MS,
|
||||
sessionId,
|
||||
gen,
|
||||
)
|
||||
timers.set(sessionId, retryTimer)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Reset failure counter on successful token retrieval
|
||||
failureCounts.delete(sessionId)
|
||||
|
||||
logForDebugging(
|
||||
`[${label}:token] Refreshing token for sessionId=${sessionId}: new token prefix=${oauthToken.slice(0, 15)}…`,
|
||||
)
|
||||
logEvent('tengu_bridge_token_refreshed', {})
|
||||
onRefresh(sessionId, oauthToken)
|
||||
|
||||
// Schedule a follow-up refresh so long-running sessions stay authenticated.
|
||||
// Without this, the initial one-shot timer leaves the session vulnerable
|
||||
// to token expiry if it runs past the first refresh window.
|
||||
const timer = setTimeout(
|
||||
doRefresh,
|
||||
FALLBACK_REFRESH_INTERVAL_MS,
|
||||
sessionId,
|
||||
gen,
|
||||
)
|
||||
timers.set(sessionId, timer)
|
||||
logForDebugging(
|
||||
`[${label}:token] Scheduled follow-up refresh for sessionId=${sessionId} in ${formatDuration(FALLBACK_REFRESH_INTERVAL_MS)}`,
|
||||
)
|
||||
}
|
||||
|
||||
function cancel(sessionId: string): void {
|
||||
// Bump generation to invalidate any in-flight async doRefresh.
|
||||
nextGeneration(sessionId)
|
||||
const timer = timers.get(sessionId)
|
||||
if (timer) {
|
||||
clearTimeout(timer)
|
||||
timers.delete(sessionId)
|
||||
}
|
||||
failureCounts.delete(sessionId)
|
||||
}
|
||||
|
||||
function cancelAll(): void {
|
||||
// Bump all generations so in-flight doRefresh calls are invalidated.
|
||||
for (const sessionId of generations.keys()) {
|
||||
nextGeneration(sessionId)
|
||||
}
|
||||
for (const timer of timers.values()) {
|
||||
clearTimeout(timer)
|
||||
}
|
||||
timers.clear()
|
||||
failureCounts.clear()
|
||||
}
|
||||
|
||||
return { schedule, scheduleFromExpiresIn, cancel, cancelAll }
|
||||
}
|
||||
3
src/bridge/peerSessions.ts
Normal file
3
src/bridge/peerSessions.ts
Normal file
@ -0,0 +1,3 @@
|
||||
export function listPeerSessions() {
|
||||
return []
|
||||
}
|
||||
110
src/bridge/pollConfig.ts
Normal file
110
src/bridge/pollConfig.ts
Normal file
@ -0,0 +1,110 @@
|
||||
import { z } from 'zod/v4'
|
||||
import { getFeatureValue_CACHED_WITH_REFRESH } from '../services/analytics/growthbook.js'
|
||||
import { lazySchema } from '../utils/lazySchema.js'
|
||||
import {
|
||||
DEFAULT_POLL_CONFIG,
|
||||
type PollIntervalConfig,
|
||||
} from './pollConfigDefaults.js'
|
||||
|
||||
// .min(100) on the seek-work intervals restores the old Math.max(..., 100)
|
||||
// defense-in-depth floor against fat-fingered GrowthBook values. Unlike a
|
||||
// clamp, Zod rejects the whole object on violation — a config with one bad
|
||||
// field falls back to DEFAULT_POLL_CONFIG entirely rather than being
|
||||
// partially trusted.
|
||||
//
|
||||
// The at_capacity intervals use a 0-or-≥100 refinement: 0 means "disabled"
|
||||
// (heartbeat-only mode), ≥100 is the fat-finger floor. Values 1–99 are
|
||||
// rejected so unit confusion (ops thinks seconds, enters 10) doesn't poll
|
||||
// every 10ms against the VerifyEnvironmentSecretAuth DB path.
|
||||
//
|
||||
// The object-level refines require at least one at-capacity liveness
|
||||
// mechanism enabled: heartbeat OR the relevant poll interval. Without this,
|
||||
// the hb=0, atCapMs=0 drift config (ops disables heartbeat without
|
||||
// restoring at_capacity) falls through every throttle site with no sleep —
|
||||
// tight-looping /poll at HTTP-round-trip speed.
|
||||
const zeroOrAtLeast100 = {
|
||||
message: 'must be 0 (disabled) or ≥100ms',
|
||||
}
|
||||
const pollIntervalConfigSchema = lazySchema(() =>
|
||||
z
|
||||
.object({
|
||||
poll_interval_ms_not_at_capacity: z.number().int().min(100),
|
||||
// 0 = no at-capacity polling. Independent of heartbeat — both can be
|
||||
// enabled (heartbeat runs, periodically breaks out to poll).
|
||||
poll_interval_ms_at_capacity: z
|
||||
.number()
|
||||
.int()
|
||||
.refine(v => v === 0 || v >= 100, zeroOrAtLeast100),
|
||||
// 0 = disabled; positive value = heartbeat at this interval while at
|
||||
// capacity. Runs alongside at-capacity polling, not instead of it.
|
||||
// Named non_exclusive to distinguish from the old heartbeat_interval_ms
|
||||
// (either-or semantics in pre-#22145 clients). .default(0) so existing
|
||||
// GrowthBook configs without this field parse successfully.
|
||||
non_exclusive_heartbeat_interval_ms: z.number().int().min(0).default(0),
|
||||
// Multisession (bridgeMain.ts) intervals. Defaults match the
|
||||
// single-session values so existing configs without these fields
|
||||
// preserve current behavior.
|
||||
multisession_poll_interval_ms_not_at_capacity: z
|
||||
.number()
|
||||
.int()
|
||||
.min(100)
|
||||
.default(
|
||||
DEFAULT_POLL_CONFIG.multisession_poll_interval_ms_not_at_capacity,
|
||||
),
|
||||
multisession_poll_interval_ms_partial_capacity: z
|
||||
.number()
|
||||
.int()
|
||||
.min(100)
|
||||
.default(
|
||||
DEFAULT_POLL_CONFIG.multisession_poll_interval_ms_partial_capacity,
|
||||
),
|
||||
multisession_poll_interval_ms_at_capacity: z
|
||||
.number()
|
||||
.int()
|
||||
.refine(v => v === 0 || v >= 100, zeroOrAtLeast100)
|
||||
.default(DEFAULT_POLL_CONFIG.multisession_poll_interval_ms_at_capacity),
|
||||
// .min(1) matches the server's ge=1 constraint (work_v1.py:230).
|
||||
reclaim_older_than_ms: z.number().int().min(1).default(5000),
|
||||
session_keepalive_interval_v2_ms: z
|
||||
.number()
|
||||
.int()
|
||||
.min(0)
|
||||
.default(120_000),
|
||||
})
|
||||
.refine(
|
||||
cfg =>
|
||||
cfg.non_exclusive_heartbeat_interval_ms > 0 ||
|
||||
cfg.poll_interval_ms_at_capacity > 0,
|
||||
{
|
||||
message:
|
||||
'at-capacity liveness requires non_exclusive_heartbeat_interval_ms > 0 or poll_interval_ms_at_capacity > 0',
|
||||
},
|
||||
)
|
||||
.refine(
|
||||
cfg =>
|
||||
cfg.non_exclusive_heartbeat_interval_ms > 0 ||
|
||||
cfg.multisession_poll_interval_ms_at_capacity > 0,
|
||||
{
|
||||
message:
|
||||
'at-capacity liveness requires non_exclusive_heartbeat_interval_ms > 0 or multisession_poll_interval_ms_at_capacity > 0',
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
/**
|
||||
* Fetch the bridge poll interval config from GrowthBook with a 5-minute
|
||||
* refresh window. Validates the served JSON against the schema; falls back
|
||||
* to defaults if the flag is absent, malformed, or partially-specified.
|
||||
*
|
||||
* Shared by bridgeMain.ts (standalone) and replBridge.ts (REPL) so ops
|
||||
* can tune both poll rates fleet-wide with a single config push.
|
||||
*/
|
||||
export function getPollIntervalConfig(): PollIntervalConfig {
|
||||
const raw = getFeatureValue_CACHED_WITH_REFRESH<unknown>(
|
||||
'tengu_bridge_poll_interval_config',
|
||||
DEFAULT_POLL_CONFIG,
|
||||
5 * 60 * 1000,
|
||||
)
|
||||
const parsed = pollIntervalConfigSchema().safeParse(raw)
|
||||
return parsed.success ? parsed.data : DEFAULT_POLL_CONFIG
|
||||
}
|
||||
82
src/bridge/pollConfigDefaults.ts
Normal file
82
src/bridge/pollConfigDefaults.ts
Normal file
@ -0,0 +1,82 @@
|
||||
/**
|
||||
* Bridge poll interval defaults. Extracted from pollConfig.ts so callers
|
||||
* that don't need live GrowthBook tuning (daemon via Agent SDK) can avoid
|
||||
* the growthbook.ts → config.ts → file.ts → sessionStorage.ts → commands.ts
|
||||
* transitive dependency chain.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Poll interval when actively seeking work (no transport / below maxSessions).
|
||||
* Governs user-visible "connecting…" latency on initial work pickup and
|
||||
* recovery speed after the server re-dispatches a work item.
|
||||
*/
|
||||
const POLL_INTERVAL_MS_NOT_AT_CAPACITY = 2000
|
||||
|
||||
/**
|
||||
* Poll interval when the transport is connected. Runs independently of
|
||||
* heartbeat — when both are enabled, the heartbeat loop breaks out to poll
|
||||
* at this interval. Set to 0 to disable at-capacity polling entirely.
|
||||
*
|
||||
* Server-side constraints that bound this value:
|
||||
* - BRIDGE_LAST_POLL_TTL = 4h (Redis key expiry → environment auto-archived)
|
||||
* - max_poll_stale_seconds = 24h (session-creation health gate, currently disabled)
|
||||
*
|
||||
* 10 minutes gives 24× headroom on the Redis TTL while still picking up
|
||||
* server-initiated token-rotation redispatches within one poll cycle.
|
||||
* The transport auto-reconnects internally for 10 minutes on transient WS
|
||||
* failures, so poll is not the recovery path — it's strictly a liveness
|
||||
* signal plus a backstop for permanent close.
|
||||
*/
|
||||
const POLL_INTERVAL_MS_AT_CAPACITY = 600_000
|
||||
|
||||
/**
|
||||
* Multisession bridge (bridgeMain.ts) poll intervals. Defaults match the
|
||||
* single-session values so existing GrowthBook configs without these fields
|
||||
* preserve current behavior. Ops can tune these independently via the
|
||||
* tengu_bridge_poll_interval_config GB flag.
|
||||
*/
|
||||
const MULTISESSION_POLL_INTERVAL_MS_NOT_AT_CAPACITY =
|
||||
POLL_INTERVAL_MS_NOT_AT_CAPACITY
|
||||
const MULTISESSION_POLL_INTERVAL_MS_PARTIAL_CAPACITY =
|
||||
POLL_INTERVAL_MS_NOT_AT_CAPACITY
|
||||
const MULTISESSION_POLL_INTERVAL_MS_AT_CAPACITY = POLL_INTERVAL_MS_AT_CAPACITY
|
||||
|
||||
export type PollIntervalConfig = {
|
||||
poll_interval_ms_not_at_capacity: number
|
||||
poll_interval_ms_at_capacity: number
|
||||
non_exclusive_heartbeat_interval_ms: number
|
||||
multisession_poll_interval_ms_not_at_capacity: number
|
||||
multisession_poll_interval_ms_partial_capacity: number
|
||||
multisession_poll_interval_ms_at_capacity: number
|
||||
reclaim_older_than_ms: number
|
||||
session_keepalive_interval_v2_ms: number
|
||||
}
|
||||
|
||||
export const DEFAULT_POLL_CONFIG: PollIntervalConfig = {
|
||||
poll_interval_ms_not_at_capacity: POLL_INTERVAL_MS_NOT_AT_CAPACITY,
|
||||
poll_interval_ms_at_capacity: POLL_INTERVAL_MS_AT_CAPACITY,
|
||||
// 0 = disabled. When > 0, at-capacity loops send per-work-item heartbeats
|
||||
// at this interval. Independent of poll_interval_ms_at_capacity — both may
|
||||
// run (heartbeat periodically yields to poll). 60s gives 5× headroom under
|
||||
// the server's 300s heartbeat TTL. Named non_exclusive to distinguish from
|
||||
// the old heartbeat_interval_ms field (either-or semantics in pre-#22145
|
||||
// clients — heartbeat suppressed poll). Old clients ignore this key; ops
|
||||
// can set both fields during rollout.
|
||||
non_exclusive_heartbeat_interval_ms: 0,
|
||||
multisession_poll_interval_ms_not_at_capacity:
|
||||
MULTISESSION_POLL_INTERVAL_MS_NOT_AT_CAPACITY,
|
||||
multisession_poll_interval_ms_partial_capacity:
|
||||
MULTISESSION_POLL_INTERVAL_MS_PARTIAL_CAPACITY,
|
||||
multisession_poll_interval_ms_at_capacity:
|
||||
MULTISESSION_POLL_INTERVAL_MS_AT_CAPACITY,
|
||||
// Poll query param: reclaim unacknowledged work items older than this.
|
||||
// Matches the server's DEFAULT_RECLAIM_OLDER_THAN_MS (work_service.py:24).
|
||||
// Enables picking up stale-pending work after JWT expiry, when the prior
|
||||
// ack failed because the session_ingress_token was already stale.
|
||||
reclaim_older_than_ms: 5000,
|
||||
// 0 = disabled. When > 0, push a silent {type:'keep_alive'} frame to
|
||||
// session-ingress at this interval so upstream proxies don't GC an idle
|
||||
// remote-control session. 2 min is the default. _v2: bridge-only gate
|
||||
// (pre-v2 clients read the old key, new clients ignore it).
|
||||
session_keepalive_interval_v2_ms: 120_000,
|
||||
}
|
||||
1008
src/bridge/remoteBridgeCore.ts
Normal file
1008
src/bridge/remoteBridgeCore.ts
Normal file
File diff suppressed because it is too large
Load Diff
2406
src/bridge/replBridge.ts
Normal file
2406
src/bridge/replBridge.ts
Normal file
File diff suppressed because it is too large
Load Diff
36
src/bridge/replBridgeHandle.ts
Normal file
36
src/bridge/replBridgeHandle.ts
Normal file
@ -0,0 +1,36 @@
|
||||
import { updateSessionBridgeId } from '../utils/concurrentSessions.js'
|
||||
import type { ReplBridgeHandle } from './replBridge.js'
|
||||
import { toCompatSessionId } from './sessionIdCompat.js'
|
||||
|
||||
/**
|
||||
* Global pointer to the active REPL bridge handle, so callers outside
|
||||
* useReplBridge's React tree (tools, slash commands) can invoke handle methods
|
||||
* like subscribePR. Same one-bridge-per-process justification as bridgeDebug.ts
|
||||
* — the handle's closure captures the sessionId and getAccessToken that created
|
||||
* the session, and re-deriving those independently (BriefTool/upload.ts pattern)
|
||||
* risks staging/prod token divergence.
|
||||
*
|
||||
* Set from useReplBridge.tsx when init completes; cleared on teardown.
|
||||
*/
|
||||
|
||||
let handle: ReplBridgeHandle | null = null
|
||||
|
||||
export function setReplBridgeHandle(h: ReplBridgeHandle | null): void {
|
||||
handle = h
|
||||
// Publish (or clear) our bridge session ID in the session record so other
|
||||
// local peers can dedup us out of their bridge list — local is preferred.
|
||||
void updateSessionBridgeId(getSelfBridgeCompatId() ?? null).catch(() => {})
|
||||
}
|
||||
|
||||
export function getReplBridgeHandle(): ReplBridgeHandle | null {
|
||||
return handle
|
||||
}
|
||||
|
||||
/**
|
||||
* Our own bridge session ID in the session_* compat format the API returns
|
||||
* in /v1/sessions responses — or undefined if bridge isn't connected.
|
||||
*/
|
||||
export function getSelfBridgeCompatId(): string | undefined {
|
||||
const h = getReplBridgeHandle()
|
||||
return h ? toCompatSessionId(h.bridgeSessionId) : undefined
|
||||
}
|
||||
370
src/bridge/replBridgeTransport.ts
Normal file
370
src/bridge/replBridgeTransport.ts
Normal file
@ -0,0 +1,370 @@
|
||||
import type { StdoutMessage } from 'src/entrypoints/sdk/controlTypes.js'
|
||||
import { CCRClient } from '../cli/transports/ccrClient.js'
|
||||
import type { HybridTransport } from '../cli/transports/HybridTransport.js'
|
||||
import { SSETransport } from '../cli/transports/SSETransport.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { updateSessionIngressAuthToken } from '../utils/sessionIngressAuth.js'
|
||||
import type { SessionState } from '../utils/sessionState.js'
|
||||
import { registerWorker } from './workSecret.js'
|
||||
|
||||
/**
|
||||
* Transport abstraction for replBridge. Covers exactly the surface that
|
||||
* replBridge.ts uses against HybridTransport so the v1/v2 choice is
|
||||
* confined to the construction site.
|
||||
*
|
||||
* - v1: HybridTransport (WS reads + POST writes to Session-Ingress)
|
||||
* - v2: SSETransport (reads) + CCRClient (writes to CCR v2 /worker/*)
|
||||
*
|
||||
* The v2 write path goes through CCRClient.writeEvent → SerialBatchEventUploader,
|
||||
* NOT through SSETransport.write() — SSETransport.write() targets the
|
||||
* Session-Ingress POST URL shape, which is wrong for CCR v2.
|
||||
*/
|
||||
export type ReplBridgeTransport = {
|
||||
write(message: StdoutMessage): Promise<void>
|
||||
writeBatch(messages: StdoutMessage[]): Promise<void>
|
||||
close(): void
|
||||
isConnectedStatus(): boolean
|
||||
getStateLabel(): string
|
||||
setOnData(callback: (data: string) => void): void
|
||||
setOnClose(callback: (closeCode?: number) => void): void
|
||||
setOnConnect(callback: () => void): void
|
||||
connect(): void
|
||||
/**
|
||||
* High-water mark of the underlying read stream's event sequence numbers.
|
||||
* replBridge reads this before swapping transports so the new one can
|
||||
* resume from where the old one left off (otherwise the server replays
|
||||
* the entire session history from seq 0).
|
||||
*
|
||||
* v1 returns 0 — Session-Ingress WS doesn't use SSE sequence numbers;
|
||||
* replay-on-reconnect is handled by the server-side message cursor.
|
||||
*/
|
||||
getLastSequenceNum(): number
|
||||
/**
|
||||
* Monotonic count of batches dropped via maxConsecutiveFailures.
|
||||
* Snapshot before writeBatch() and compare after to detect silent drops
|
||||
* (writeBatch() resolves normally even when batches were dropped).
|
||||
* v2 returns 0 — the v2 write path doesn't set maxConsecutiveFailures.
|
||||
*/
|
||||
readonly droppedBatchCount: number
|
||||
/**
|
||||
* PUT /worker state (v2 only; v1 is a no-op). `requires_action` tells
|
||||
* the backend a permission prompt is pending — claude.ai shows the
|
||||
* "waiting for input" indicator. REPL/daemon callers don't need this
|
||||
* (user watches the REPL locally); multi-session worker callers do.
|
||||
*/
|
||||
reportState(state: SessionState): void
|
||||
/** PUT /worker external_metadata (v2 only; v1 is a no-op). */
|
||||
reportMetadata(metadata: Record<string, unknown>): void
|
||||
/**
|
||||
* POST /worker/events/{id}/delivery (v2 only; v1 is a no-op). Populates
|
||||
* CCR's processing_at/processed_at columns. `received` is auto-fired by
|
||||
* CCRClient on every SSE frame and is not exposed here.
|
||||
*/
|
||||
reportDelivery(eventId: string, status: 'processing' | 'processed'): void
|
||||
/**
|
||||
* Drain the write queue before close() (v2 only; v1 resolves
|
||||
* immediately — HybridTransport POSTs are already awaited per-write).
|
||||
*/
|
||||
flush(): Promise<void>
|
||||
}
|
||||
|
||||
/**
|
||||
* v1 adapter: HybridTransport already has the full surface (it extends
|
||||
* WebSocketTransport which has setOnConnect + getStateLabel). This is a
|
||||
* no-op wrapper that exists only so replBridge's `transport` variable
|
||||
* has a single type.
|
||||
*/
|
||||
export function createV1ReplTransport(
|
||||
hybrid: HybridTransport,
|
||||
): ReplBridgeTransport {
|
||||
return {
|
||||
write: msg => hybrid.write(msg),
|
||||
writeBatch: msgs => hybrid.writeBatch(msgs),
|
||||
close: () => hybrid.close(),
|
||||
isConnectedStatus: () => hybrid.isConnectedStatus(),
|
||||
getStateLabel: () => hybrid.getStateLabel(),
|
||||
setOnData: cb => hybrid.setOnData(cb),
|
||||
setOnClose: cb => hybrid.setOnClose(cb),
|
||||
setOnConnect: cb => hybrid.setOnConnect(cb),
|
||||
connect: () => void hybrid.connect(),
|
||||
// v1 Session-Ingress WS doesn't use SSE sequence numbers; replay
|
||||
// semantics are different. Always return 0 so the seq-num carryover
|
||||
// logic in replBridge is a no-op for v1.
|
||||
getLastSequenceNum: () => 0,
|
||||
get droppedBatchCount() {
|
||||
return hybrid.droppedBatchCount
|
||||
},
|
||||
reportState: () => {},
|
||||
reportMetadata: () => {},
|
||||
reportDelivery: () => {},
|
||||
flush: () => Promise.resolve(),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* v2 adapter: wrap SSETransport (reads) + CCRClient (writes, heartbeat,
|
||||
* state, delivery tracking).
|
||||
*
|
||||
* Auth: v2 endpoints validate the JWT's session_id claim (register_worker.go:32)
|
||||
* and worker role (environment_auth.py:856). OAuth tokens have neither.
|
||||
* This is the inverse of the v1 replBridge path, which deliberately uses OAuth.
|
||||
* The JWT is refreshed when the poll loop re-dispatches work — the caller
|
||||
* invokes createV2ReplTransport again with the fresh token.
|
||||
*
|
||||
* Registration happens here (not in the caller) so the entire v2 handshake
|
||||
* is one async step. registerWorker failure propagates — replBridge will
|
||||
* catch it and stay on the poll loop.
|
||||
*/
|
||||
export async function createV2ReplTransport(opts: {
|
||||
sessionUrl: string
|
||||
ingressToken: string
|
||||
sessionId: string
|
||||
/**
|
||||
* SSE sequence-number high-water mark from the previous transport.
|
||||
* Passed to the new SSETransport so its first connect() sends
|
||||
* from_sequence_num / Last-Event-ID and the server resumes from where
|
||||
* the old stream left off. Without this, every transport swap asks the
|
||||
* server to replay the entire session history from seq 0.
|
||||
*/
|
||||
initialSequenceNum?: number
|
||||
/**
|
||||
* Worker epoch from POST /bridge response. When provided, the server
|
||||
* already bumped epoch (the /bridge call IS the register — see server
|
||||
* PR #293280). When omitted (v1 CCR-v2 path via replBridge.ts poll loop),
|
||||
* call registerWorker as before.
|
||||
*/
|
||||
epoch?: number
|
||||
/** CCRClient heartbeat interval. Defaults to 20s when omitted. */
|
||||
heartbeatIntervalMs?: number
|
||||
/** ±fraction per-beat jitter. Defaults to 0 (no jitter) when omitted. */
|
||||
heartbeatJitterFraction?: number
|
||||
/**
|
||||
* When true, skip opening the SSE read stream — only the CCRClient write
|
||||
* path is activated. Use for mirror-mode attachments that forward events
|
||||
* but never receive inbound prompts or control requests.
|
||||
*/
|
||||
outboundOnly?: boolean
|
||||
/**
|
||||
* Per-instance auth header source. When provided, CCRClient + SSETransport
|
||||
* read auth from this closure instead of the process-wide
|
||||
* CLAUDE_CODE_SESSION_ACCESS_TOKEN env var. Required for callers managing
|
||||
* multiple concurrent sessions — the env-var path stomps across sessions.
|
||||
* When omitted, falls back to the env var (single-session callers).
|
||||
*/
|
||||
getAuthToken?: () => string | undefined
|
||||
}): Promise<ReplBridgeTransport> {
|
||||
const {
|
||||
sessionUrl,
|
||||
ingressToken,
|
||||
sessionId,
|
||||
initialSequenceNum,
|
||||
getAuthToken,
|
||||
} = opts
|
||||
|
||||
// Auth header builder. If getAuthToken is provided, read from it
|
||||
// (per-instance, multi-session safe). Otherwise write ingressToken to
|
||||
// the process-wide env var (legacy single-session path — CCRClient's
|
||||
// default getAuthHeaders reads it via getSessionIngressAuthHeaders).
|
||||
let getAuthHeaders: (() => Record<string, string>) | undefined
|
||||
if (getAuthToken) {
|
||||
getAuthHeaders = (): Record<string, string> => {
|
||||
const token = getAuthToken()
|
||||
if (!token) return {}
|
||||
return { Authorization: `Bearer ${token}` }
|
||||
}
|
||||
} else {
|
||||
// CCRClient.request() and SSETransport.connect() both read auth via
|
||||
// getSessionIngressAuthHeaders() → this env var. Set it before either
|
||||
// touches the network.
|
||||
updateSessionIngressAuthToken(ingressToken)
|
||||
}
|
||||
|
||||
const epoch = opts.epoch ?? (await registerWorker(sessionUrl, ingressToken))
|
||||
logForDebugging(
|
||||
`[bridge:repl] CCR v2: worker sessionId=${sessionId} epoch=${epoch}${opts.epoch !== undefined ? ' (from /bridge)' : ' (via registerWorker)'}`,
|
||||
)
|
||||
|
||||
// Derive SSE stream URL. Same logic as transportUtils.ts:26-33 but
|
||||
// starting from an http(s) base instead of a --sdk-url that might be ws://.
|
||||
const sseUrl = new URL(sessionUrl)
|
||||
sseUrl.pathname = sseUrl.pathname.replace(/\/$/, '') + '/worker/events/stream'
|
||||
|
||||
const sse = new SSETransport(
|
||||
sseUrl,
|
||||
{},
|
||||
sessionId,
|
||||
undefined,
|
||||
initialSequenceNum,
|
||||
getAuthHeaders,
|
||||
)
|
||||
let onCloseCb: ((closeCode?: number) => void) | undefined
|
||||
const ccr = new CCRClient(sse, new URL(sessionUrl), {
|
||||
getAuthHeaders,
|
||||
heartbeatIntervalMs: opts.heartbeatIntervalMs,
|
||||
heartbeatJitterFraction: opts.heartbeatJitterFraction,
|
||||
// Default is process.exit(1) — correct for spawn-mode children. In-process,
|
||||
// that kills the REPL. Close instead: replBridge's onClose wakes the poll
|
||||
// loop, which picks up the server's re-dispatch (with fresh epoch).
|
||||
onEpochMismatch: () => {
|
||||
logForDebugging(
|
||||
'[bridge:repl] CCR v2: epoch superseded (409) — closing for poll-loop recovery',
|
||||
)
|
||||
// Close resources in a try block so the throw always executes.
|
||||
// If ccr.close() or sse.close() throw, we still need to unwind
|
||||
// the caller (request()) — otherwise handleEpochMismatch's `never`
|
||||
// return type is violated at runtime and control falls through.
|
||||
try {
|
||||
ccr.close()
|
||||
sse.close()
|
||||
onCloseCb?.(4090)
|
||||
} catch (closeErr: unknown) {
|
||||
logForDebugging(
|
||||
`[bridge:repl] CCR v2: error during epoch-mismatch cleanup: ${errorMessage(closeErr)}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
}
|
||||
// Don't return — the calling request() code continues after the 409
|
||||
// branch, so callers see the logged warning and a false return. We
|
||||
// throw to unwind; the uploaders catch it as a send failure.
|
||||
throw new Error('epoch superseded')
|
||||
},
|
||||
})
|
||||
|
||||
// CCRClient's constructor wired sse.setOnEvent → reportDelivery('received').
|
||||
// remoteIO.ts additionally sends 'processing'/'processed' via
|
||||
// setCommandLifecycleListener, which the in-process query loop fires. This
|
||||
// transport's only caller (replBridge/daemonBridge) has no such wiring — the
|
||||
// daemon's agent child is a separate process (ProcessTransport), and its
|
||||
// notifyCommandLifecycle calls fire with listener=null in its own module
|
||||
// scope. So events stay at 'received' forever, and reconnectSession re-queues
|
||||
// them on every daemon restart (observed: 21→24→25 phantom prompts as
|
||||
// "user sent a new message while you were working" system-reminders).
|
||||
//
|
||||
// Fix: ACK 'processed' immediately alongside 'received'. The window between
|
||||
// SSE receipt and transcript-write is narrow (queue → SDK → child stdin →
|
||||
// model); a crash there loses one prompt vs. the observed N-prompt flood on
|
||||
// every restart. Overwrite the constructor's wiring to do both — setOnEvent
|
||||
// replaces, not appends (SSETransport.ts:658).
|
||||
sse.setOnEvent(event => {
|
||||
ccr.reportDelivery(event.event_id, 'received')
|
||||
ccr.reportDelivery(event.event_id, 'processed')
|
||||
})
|
||||
|
||||
// Both sse.connect() and ccr.initialize() are deferred to connect() below.
|
||||
// replBridge's calling order is newTransport → setOnConnect → setOnData →
|
||||
// setOnClose → connect(), and both calls need those callbacks wired first:
|
||||
// sse.connect() opens the stream (events flow to onData/onClose immediately),
|
||||
// and ccr.initialize().then() fires onConnectCb.
|
||||
//
|
||||
// onConnect fires once ccr.initialize() resolves. Writes go via
|
||||
// CCRClient HTTP POST (SerialBatchEventUploader), not SSE, so the
|
||||
// write path is ready the moment workerEpoch is set. SSE.connect()
|
||||
// awaits its read loop and never resolves — don't gate on it.
|
||||
// The SSE stream opens in parallel (~30ms) and starts delivering
|
||||
// inbound events via setOnData; outbound doesn't need to wait for it.
|
||||
let onConnectCb: (() => void) | undefined
|
||||
let ccrInitialized = false
|
||||
let closed = false
|
||||
|
||||
return {
|
||||
write(msg) {
|
||||
return ccr.writeEvent(msg)
|
||||
},
|
||||
async writeBatch(msgs) {
|
||||
// SerialBatchEventUploader already batches internally (maxBatchSize=100);
|
||||
// sequential enqueue preserves order and the uploader coalesces.
|
||||
// Check closed between writes to avoid sending partial batches after
|
||||
// transport teardown (epoch mismatch, SSE drop).
|
||||
for (const m of msgs) {
|
||||
if (closed) break
|
||||
await ccr.writeEvent(m)
|
||||
}
|
||||
},
|
||||
close() {
|
||||
closed = true
|
||||
ccr.close()
|
||||
sse.close()
|
||||
},
|
||||
isConnectedStatus() {
|
||||
// Write-readiness, not read-readiness — replBridge checks this
|
||||
// before calling writeBatch. SSE open state is orthogonal.
|
||||
return ccrInitialized
|
||||
},
|
||||
getStateLabel() {
|
||||
// SSETransport doesn't expose its state string; synthesize from
|
||||
// what we can observe. replBridge only uses this for debug logging.
|
||||
if (sse.isClosedStatus()) return 'closed'
|
||||
if (sse.isConnectedStatus()) return ccrInitialized ? 'connected' : 'init'
|
||||
return 'connecting'
|
||||
},
|
||||
setOnData(cb) {
|
||||
sse.setOnData(cb)
|
||||
},
|
||||
setOnClose(cb) {
|
||||
onCloseCb = cb
|
||||
// SSE reconnect-budget exhaustion fires onClose(undefined) — map to
|
||||
// 4092 so ws_closed telemetry can distinguish it from HTTP-status
|
||||
// closes (SSETransport:280 passes response.status). Stop CCRClient's
|
||||
// heartbeat timer before notifying replBridge. (sse.close() doesn't
|
||||
// invoke this, so the epoch-mismatch path above isn't double-firing.)
|
||||
sse.setOnClose(code => {
|
||||
ccr.close()
|
||||
cb(code ?? 4092)
|
||||
})
|
||||
},
|
||||
setOnConnect(cb) {
|
||||
onConnectCb = cb
|
||||
},
|
||||
getLastSequenceNum() {
|
||||
return sse.getLastSequenceNum()
|
||||
},
|
||||
// v2 write path (CCRClient) doesn't set maxConsecutiveFailures — no drops.
|
||||
droppedBatchCount: 0,
|
||||
reportState(state) {
|
||||
ccr.reportState(state)
|
||||
},
|
||||
reportMetadata(metadata) {
|
||||
ccr.reportMetadata(metadata)
|
||||
},
|
||||
reportDelivery(eventId, status) {
|
||||
ccr.reportDelivery(eventId, status)
|
||||
},
|
||||
flush() {
|
||||
return ccr.flush()
|
||||
},
|
||||
connect() {
|
||||
// Outbound-only: skip the SSE read stream entirely — no inbound
|
||||
// events to receive, no delivery ACKs to send. Only the CCRClient
|
||||
// write path (POST /worker/events) and heartbeat are needed.
|
||||
if (!opts.outboundOnly) {
|
||||
// Fire-and-forget — SSETransport.connect() awaits readStream()
|
||||
// (the read loop) and only resolves on stream close/error. The
|
||||
// spawn-mode path in remoteIO.ts does the same void discard.
|
||||
void sse.connect()
|
||||
}
|
||||
void ccr.initialize(epoch).then(
|
||||
() => {
|
||||
ccrInitialized = true
|
||||
logForDebugging(
|
||||
`[bridge:repl] v2 transport ready for writes (epoch=${epoch}, sse=${sse.isConnectedStatus() ? 'open' : 'opening'})`,
|
||||
)
|
||||
onConnectCb?.()
|
||||
},
|
||||
(err: unknown) => {
|
||||
logForDebugging(
|
||||
`[bridge:repl] CCR v2 initialize failed: ${errorMessage(err)}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
// Close transport resources and notify replBridge via onClose
|
||||
// so the poll loop can retry on the next work dispatch.
|
||||
// Without this callback, replBridge never learns the transport
|
||||
// failed to initialize and sits with transport === null forever.
|
||||
ccr.close()
|
||||
sse.close()
|
||||
onCloseCb?.(4091) // 4091 = init failure, distinguishable from 4090 epoch mismatch
|
||||
},
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
57
src/bridge/sessionIdCompat.ts
Normal file
57
src/bridge/sessionIdCompat.ts
Normal file
@ -0,0 +1,57 @@
|
||||
/**
|
||||
* Session ID tag translation helpers for the CCR v2 compat layer.
|
||||
*
|
||||
* Lives in its own file (rather than workSecret.ts) so that sessionHandle.ts
|
||||
* and replBridgeTransport.ts (bridge.mjs entry points) can import from
|
||||
* workSecret.ts without pulling in these retag functions.
|
||||
*
|
||||
* The isCseShimEnabled kill switch is injected via setCseShimGate() to avoid
|
||||
* a static import of bridgeEnabled.ts → growthbook.ts → config.ts — all
|
||||
* banned from the sdk.mjs bundle (scripts/build-agent-sdk.sh). Callers that
|
||||
* already import bridgeEnabled.ts register the gate; the SDK path never does,
|
||||
* so the shim defaults to active (matching isCseShimEnabled()'s own default).
|
||||
*/
|
||||
|
||||
let _isCseShimEnabled: (() => boolean) | undefined
|
||||
|
||||
/**
|
||||
* Register the GrowthBook gate for the cse_ shim. Called from bridge
|
||||
* init code that already imports bridgeEnabled.ts.
|
||||
*/
|
||||
export function setCseShimGate(gate: () => boolean): void {
|
||||
_isCseShimEnabled = gate
|
||||
}
|
||||
|
||||
/**
|
||||
* Re-tag a `cse_*` session ID to `session_*` for use with the v1 compat API.
|
||||
*
|
||||
* Worker endpoints (/v1/code/sessions/{id}/worker/*) want `cse_*`; that's
|
||||
* what the work poll delivers. Client-facing compat endpoints
|
||||
* (/v1/sessions/{id}, /v1/sessions/{id}/archive, /v1/sessions/{id}/events)
|
||||
* want `session_*` — compat/convert.go:27 validates TagSession. Same UUID,
|
||||
* different costume. No-op for IDs that aren't `cse_*`.
|
||||
*
|
||||
* bridgeMain holds one sessionId variable for both worker registration and
|
||||
* session-management calls. It arrives as `cse_*` from the work poll under
|
||||
* the compat gate, so archiveSession/fetchSessionTitle need this re-tag.
|
||||
*/
|
||||
export function toCompatSessionId(id: string): string {
|
||||
if (!id.startsWith('cse_')) return id
|
||||
if (_isCseShimEnabled && !_isCseShimEnabled()) return id
|
||||
return 'session_' + id.slice('cse_'.length)
|
||||
}
|
||||
|
||||
/**
|
||||
* Re-tag a `session_*` session ID to `cse_*` for infrastructure-layer calls.
|
||||
*
|
||||
* Inverse of toCompatSessionId. POST /v1/environments/{id}/bridge/reconnect
|
||||
* lives below the compat layer: once ccr_v2_compat_enabled is on server-side,
|
||||
* it looks sessions up by their infra tag (`cse_*`). createBridgeSession still
|
||||
* returns `session_*` (compat/convert.go:41) and that's what bridge-pointer
|
||||
* stores — so perpetual reconnect passes the wrong costume and gets "Session
|
||||
* not found" back. Same UUID, wrong tag. No-op for IDs that aren't `session_*`.
|
||||
*/
|
||||
export function toInfraSessionId(id: string): string {
|
||||
if (!id.startsWith('session_')) return id
|
||||
return 'cse_' + id.slice('session_'.length)
|
||||
}
|
||||
550
src/bridge/sessionRunner.ts
Normal file
550
src/bridge/sessionRunner.ts
Normal file
@ -0,0 +1,550 @@
|
||||
import { type ChildProcess, spawn } from 'child_process'
|
||||
import { createWriteStream, type WriteStream } from 'fs'
|
||||
import { tmpdir } from 'os'
|
||||
import { dirname, join } from 'path'
|
||||
import { createInterface } from 'readline'
|
||||
import { jsonParse, jsonStringify } from '../utils/slowOperations.js'
|
||||
import { debugTruncate } from './debugUtils.js'
|
||||
import type {
|
||||
SessionActivity,
|
||||
SessionDoneStatus,
|
||||
SessionHandle,
|
||||
SessionSpawner,
|
||||
SessionSpawnOpts,
|
||||
} from './types.js'
|
||||
|
||||
const MAX_ACTIVITIES = 10
|
||||
const MAX_STDERR_LINES = 10
|
||||
|
||||
/**
|
||||
* Sanitize a session ID for use in file names.
|
||||
* Strips any characters that could cause path traversal (e.g. `../`, `/`)
|
||||
* or other filesystem issues, replacing them with underscores.
|
||||
*/
|
||||
export function safeFilenameId(id: string): string {
|
||||
return id.replace(/[^a-zA-Z0-9_-]/g, '_')
|
||||
}
|
||||
|
||||
/**
|
||||
* A control_request emitted by the child CLI when it needs permission to
|
||||
* execute a **specific** tool invocation (not a general capability check).
|
||||
* The bridge forwards this to the server so the user can approve/deny.
|
||||
*/
|
||||
export type PermissionRequest = {
|
||||
type: 'control_request'
|
||||
request_id: string
|
||||
request: {
|
||||
/** Per-invocation permission check — "may I run this tool with these inputs?" */
|
||||
subtype: 'can_use_tool'
|
||||
tool_name: string
|
||||
input: Record<string, unknown>
|
||||
tool_use_id: string
|
||||
}
|
||||
}
|
||||
|
||||
type SessionSpawnerDeps = {
|
||||
execPath: string
|
||||
/**
|
||||
* Arguments that must precede the CLI flags when spawning. Empty for
|
||||
* compiled binaries (where execPath is the claude binary itself); contains
|
||||
* the script path (process.argv[1]) for npm installs where execPath is the
|
||||
* node runtime. Without this, node sees --sdk-url as a node option and
|
||||
* exits with "bad option: --sdk-url" (see anthropics/claude-code#28334).
|
||||
*/
|
||||
scriptArgs: string[]
|
||||
env: NodeJS.ProcessEnv
|
||||
verbose: boolean
|
||||
sandbox: boolean
|
||||
debugFile?: string
|
||||
permissionMode?: string
|
||||
onDebug: (msg: string) => void
|
||||
onActivity?: (sessionId: string, activity: SessionActivity) => void
|
||||
onPermissionRequest?: (
|
||||
sessionId: string,
|
||||
request: PermissionRequest,
|
||||
accessToken: string,
|
||||
) => void
|
||||
}
|
||||
|
||||
/** Map tool names to human-readable verbs for the status display. */
|
||||
const TOOL_VERBS: Record<string, string> = {
|
||||
Read: 'Reading',
|
||||
Write: 'Writing',
|
||||
Edit: 'Editing',
|
||||
MultiEdit: 'Editing',
|
||||
Bash: 'Running',
|
||||
Glob: 'Searching',
|
||||
Grep: 'Searching',
|
||||
WebFetch: 'Fetching',
|
||||
WebSearch: 'Searching',
|
||||
Task: 'Running task',
|
||||
FileReadTool: 'Reading',
|
||||
FileWriteTool: 'Writing',
|
||||
FileEditTool: 'Editing',
|
||||
GlobTool: 'Searching',
|
||||
GrepTool: 'Searching',
|
||||
BashTool: 'Running',
|
||||
NotebookEditTool: 'Editing notebook',
|
||||
LSP: 'LSP',
|
||||
}
|
||||
|
||||
function toolSummary(name: string, input: Record<string, unknown>): string {
|
||||
const verb = TOOL_VERBS[name] ?? name
|
||||
const target =
|
||||
(input.file_path as string) ??
|
||||
(input.filePath as string) ??
|
||||
(input.pattern as string) ??
|
||||
(input.command as string | undefined)?.slice(0, 60) ??
|
||||
(input.url as string) ??
|
||||
(input.query as string) ??
|
||||
''
|
||||
if (target) {
|
||||
return `${verb} ${target}`
|
||||
}
|
||||
return verb
|
||||
}
|
||||
|
||||
function extractActivities(
|
||||
line: string,
|
||||
sessionId: string,
|
||||
onDebug: (msg: string) => void,
|
||||
): SessionActivity[] {
|
||||
let parsed: unknown
|
||||
try {
|
||||
parsed = jsonParse(line)
|
||||
} catch {
|
||||
return []
|
||||
}
|
||||
|
||||
if (!parsed || typeof parsed !== 'object') {
|
||||
return []
|
||||
}
|
||||
|
||||
const msg = parsed as Record<string, unknown>
|
||||
const activities: SessionActivity[] = []
|
||||
const now = Date.now()
|
||||
|
||||
switch (msg.type) {
|
||||
case 'assistant': {
|
||||
const message = msg.message as Record<string, unknown> | undefined
|
||||
if (!message) break
|
||||
const content = message.content
|
||||
if (!Array.isArray(content)) break
|
||||
|
||||
for (const block of content) {
|
||||
if (!block || typeof block !== 'object') continue
|
||||
const b = block as Record<string, unknown>
|
||||
|
||||
if (b.type === 'tool_use') {
|
||||
const name = (b.name as string) ?? 'Tool'
|
||||
const input = (b.input as Record<string, unknown>) ?? {}
|
||||
const summary = toolSummary(name, input)
|
||||
activities.push({
|
||||
type: 'tool_start',
|
||||
summary,
|
||||
timestamp: now,
|
||||
})
|
||||
onDebug(
|
||||
`[bridge:activity] sessionId=${sessionId} tool_use name=${name} ${inputPreview(input)}`,
|
||||
)
|
||||
} else if (b.type === 'text') {
|
||||
const text = (b.text as string) ?? ''
|
||||
if (text.length > 0) {
|
||||
activities.push({
|
||||
type: 'text',
|
||||
summary: text.slice(0, 80),
|
||||
timestamp: now,
|
||||
})
|
||||
onDebug(
|
||||
`[bridge:activity] sessionId=${sessionId} text "${text.slice(0, 100)}"`,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'result': {
|
||||
const subtype = msg.subtype as string | undefined
|
||||
if (subtype === 'success') {
|
||||
activities.push({
|
||||
type: 'result',
|
||||
summary: 'Session completed',
|
||||
timestamp: now,
|
||||
})
|
||||
onDebug(
|
||||
`[bridge:activity] sessionId=${sessionId} result subtype=success`,
|
||||
)
|
||||
} else if (subtype) {
|
||||
const errors = msg.errors as string[] | undefined
|
||||
const errorSummary = errors?.[0] ?? `Error: ${subtype}`
|
||||
activities.push({
|
||||
type: 'error',
|
||||
summary: errorSummary,
|
||||
timestamp: now,
|
||||
})
|
||||
onDebug(
|
||||
`[bridge:activity] sessionId=${sessionId} result subtype=${subtype} error="${errorSummary}"`,
|
||||
)
|
||||
} else {
|
||||
onDebug(
|
||||
`[bridge:activity] sessionId=${sessionId} result subtype=undefined`,
|
||||
)
|
||||
}
|
||||
break
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
return activities
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract plain text from a replayed SDKUserMessage NDJSON line. Returns the
|
||||
* trimmed text if this looks like a real human-authored message, otherwise
|
||||
* undefined so the caller keeps waiting for the first real message.
|
||||
*/
|
||||
function extractUserMessageText(
|
||||
msg: Record<string, unknown>,
|
||||
): string | undefined {
|
||||
// Skip tool-result user messages (wrapped subagent results) and synthetic
|
||||
// caveat messages — neither is human-authored.
|
||||
if (msg.parent_tool_use_id != null || msg.isSynthetic || msg.isReplay)
|
||||
return undefined
|
||||
|
||||
const message = msg.message as Record<string, unknown> | undefined
|
||||
const content = message?.content
|
||||
let text: string | undefined
|
||||
if (typeof content === 'string') {
|
||||
text = content
|
||||
} else if (Array.isArray(content)) {
|
||||
for (const block of content) {
|
||||
if (
|
||||
block &&
|
||||
typeof block === 'object' &&
|
||||
(block as Record<string, unknown>).type === 'text'
|
||||
) {
|
||||
text = (block as Record<string, unknown>).text as string | undefined
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
text = text?.trim()
|
||||
return text ? text : undefined
|
||||
}
|
||||
|
||||
/** Build a short preview of tool input for debug logging. */
|
||||
function inputPreview(input: Record<string, unknown>): string {
|
||||
const parts: string[] = []
|
||||
for (const [key, val] of Object.entries(input)) {
|
||||
if (typeof val === 'string') {
|
||||
parts.push(`${key}="${val.slice(0, 100)}"`)
|
||||
}
|
||||
if (parts.length >= 3) break
|
||||
}
|
||||
return parts.join(' ')
|
||||
}
|
||||
|
||||
export function createSessionSpawner(deps: SessionSpawnerDeps): SessionSpawner {
|
||||
return {
|
||||
spawn(opts: SessionSpawnOpts, dir: string): SessionHandle {
|
||||
// Debug file resolution:
|
||||
// 1. If deps.debugFile is provided, use it with session ID suffix for uniqueness
|
||||
// 2. If verbose or ant build, auto-generate a temp file path
|
||||
// 3. Otherwise, no debug file
|
||||
const safeId = safeFilenameId(opts.sessionId)
|
||||
let debugFile: string | undefined
|
||||
if (deps.debugFile) {
|
||||
const ext = deps.debugFile.lastIndexOf('.')
|
||||
if (ext > 0) {
|
||||
debugFile = `${deps.debugFile.slice(0, ext)}-${safeId}${deps.debugFile.slice(ext)}`
|
||||
} else {
|
||||
debugFile = `${deps.debugFile}-${safeId}`
|
||||
}
|
||||
} else if (deps.verbose || process.env.USER_TYPE === 'ant') {
|
||||
debugFile = join(tmpdir(), 'claude', `bridge-session-${safeId}.log`)
|
||||
}
|
||||
|
||||
// Transcript file: write raw NDJSON lines for post-hoc analysis.
|
||||
// Placed alongside the debug file when one is configured.
|
||||
let transcriptStream: WriteStream | null = null
|
||||
let transcriptPath: string | undefined
|
||||
if (deps.debugFile) {
|
||||
transcriptPath = join(
|
||||
dirname(deps.debugFile),
|
||||
`bridge-transcript-${safeId}.jsonl`,
|
||||
)
|
||||
transcriptStream = createWriteStream(transcriptPath, { flags: 'a' })
|
||||
transcriptStream.on('error', err => {
|
||||
deps.onDebug(
|
||||
`[bridge:session] Transcript write error: ${err.message}`,
|
||||
)
|
||||
transcriptStream = null
|
||||
})
|
||||
deps.onDebug(`[bridge:session] Transcript log: ${transcriptPath}`)
|
||||
}
|
||||
|
||||
const args = [
|
||||
...deps.scriptArgs,
|
||||
'--print',
|
||||
'--sdk-url',
|
||||
opts.sdkUrl,
|
||||
'--session-id',
|
||||
opts.sessionId,
|
||||
'--input-format',
|
||||
'stream-json',
|
||||
'--output-format',
|
||||
'stream-json',
|
||||
'--replay-user-messages',
|
||||
...(deps.verbose ? ['--verbose'] : []),
|
||||
...(debugFile ? ['--debug-file', debugFile] : []),
|
||||
...(deps.permissionMode
|
||||
? ['--permission-mode', deps.permissionMode]
|
||||
: []),
|
||||
]
|
||||
|
||||
const env: NodeJS.ProcessEnv = {
|
||||
...deps.env,
|
||||
// Strip the bridge's OAuth token so the child CC process uses
|
||||
// the session access token for inference instead.
|
||||
CLAUDE_CODE_OAUTH_TOKEN: undefined,
|
||||
CLAUDE_CODE_ENVIRONMENT_KIND: 'bridge',
|
||||
...(deps.sandbox && { CLAUDE_CODE_FORCE_SANDBOX: '1' }),
|
||||
CLAUDE_CODE_SESSION_ACCESS_TOKEN: opts.accessToken,
|
||||
// v1: HybridTransport (WS reads + POST writes) to Session-Ingress.
|
||||
// Harmless in v2 mode — transportUtils checks CLAUDE_CODE_USE_CCR_V2 first.
|
||||
CLAUDE_CODE_POST_FOR_SESSION_INGRESS_V2: '1',
|
||||
// v2: SSETransport + CCRClient to CCR's /v1/code/sessions/* endpoints.
|
||||
// Same env vars environment-manager sets in the container path.
|
||||
...(opts.useCcrV2 && {
|
||||
CLAUDE_CODE_USE_CCR_V2: '1',
|
||||
CLAUDE_CODE_WORKER_EPOCH: String(opts.workerEpoch),
|
||||
}),
|
||||
}
|
||||
|
||||
deps.onDebug(
|
||||
`[bridge:session] Spawning sessionId=${opts.sessionId} sdkUrl=${opts.sdkUrl} accessToken=${opts.accessToken ? 'present' : 'MISSING'}`,
|
||||
)
|
||||
deps.onDebug(`[bridge:session] Child args: ${args.join(' ')}`)
|
||||
if (debugFile) {
|
||||
deps.onDebug(`[bridge:session] Debug log: ${debugFile}`)
|
||||
}
|
||||
|
||||
// Pipe all three streams: stdin for control, stdout for NDJSON parsing,
|
||||
// stderr for error capture and diagnostics.
|
||||
const child: ChildProcess = spawn(deps.execPath, args, {
|
||||
cwd: dir,
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
env,
|
||||
windowsHide: true,
|
||||
})
|
||||
|
||||
deps.onDebug(
|
||||
`[bridge:session] sessionId=${opts.sessionId} pid=${child.pid}`,
|
||||
)
|
||||
|
||||
const activities: SessionActivity[] = []
|
||||
let currentActivity: SessionActivity | null = null
|
||||
const lastStderr: string[] = []
|
||||
let sigkillSent = false
|
||||
let firstUserMessageSeen = false
|
||||
|
||||
// Buffer stderr for error diagnostics
|
||||
if (child.stderr) {
|
||||
const stderrRl = createInterface({ input: child.stderr })
|
||||
stderrRl.on('line', line => {
|
||||
// Forward stderr to bridge's stderr in verbose mode
|
||||
if (deps.verbose) {
|
||||
process.stderr.write(line + '\n')
|
||||
}
|
||||
// Ring buffer of last N lines
|
||||
if (lastStderr.length >= MAX_STDERR_LINES) {
|
||||
lastStderr.shift()
|
||||
}
|
||||
lastStderr.push(line)
|
||||
})
|
||||
}
|
||||
|
||||
// Parse NDJSON from child stdout
|
||||
if (child.stdout) {
|
||||
const rl = createInterface({ input: child.stdout })
|
||||
rl.on('line', line => {
|
||||
// Write raw NDJSON to transcript file
|
||||
if (transcriptStream) {
|
||||
transcriptStream.write(line + '\n')
|
||||
}
|
||||
|
||||
// Log all messages flowing from the child CLI to the bridge
|
||||
deps.onDebug(
|
||||
`[bridge:ws] sessionId=${opts.sessionId} <<< ${debugTruncate(line)}`,
|
||||
)
|
||||
|
||||
// In verbose mode, forward raw output to stderr
|
||||
if (deps.verbose) {
|
||||
process.stderr.write(line + '\n')
|
||||
}
|
||||
|
||||
const extracted = extractActivities(
|
||||
line,
|
||||
opts.sessionId,
|
||||
deps.onDebug,
|
||||
)
|
||||
for (const activity of extracted) {
|
||||
// Maintain ring buffer
|
||||
if (activities.length >= MAX_ACTIVITIES) {
|
||||
activities.shift()
|
||||
}
|
||||
activities.push(activity)
|
||||
currentActivity = activity
|
||||
|
||||
deps.onActivity?.(opts.sessionId, activity)
|
||||
}
|
||||
|
||||
// Detect control_request and replayed user messages.
|
||||
// extractActivities parses the same line but swallows parse errors
|
||||
// and skips 'user' type — re-parse here is cheap (NDJSON lines are
|
||||
// small) and keeps each path self-contained.
|
||||
{
|
||||
let parsed: unknown
|
||||
try {
|
||||
parsed = jsonParse(line)
|
||||
} catch {
|
||||
// Non-JSON line, skip detection
|
||||
}
|
||||
if (parsed && typeof parsed === 'object') {
|
||||
const msg = parsed as Record<string, unknown>
|
||||
|
||||
if (msg.type === 'control_request') {
|
||||
const request = msg.request as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
if (
|
||||
request?.subtype === 'can_use_tool' &&
|
||||
deps.onPermissionRequest
|
||||
) {
|
||||
deps.onPermissionRequest(
|
||||
opts.sessionId,
|
||||
parsed as PermissionRequest,
|
||||
opts.accessToken,
|
||||
)
|
||||
}
|
||||
// interrupt is turn-level; the child handles it internally (print.ts)
|
||||
} else if (
|
||||
msg.type === 'user' &&
|
||||
!firstUserMessageSeen &&
|
||||
opts.onFirstUserMessage
|
||||
) {
|
||||
const text = extractUserMessageText(msg)
|
||||
if (text) {
|
||||
firstUserMessageSeen = true
|
||||
opts.onFirstUserMessage(text)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const done = new Promise<SessionDoneStatus>(resolve => {
|
||||
child.on('close', (code, signal) => {
|
||||
// Close transcript stream on exit
|
||||
if (transcriptStream) {
|
||||
transcriptStream.end()
|
||||
transcriptStream = null
|
||||
}
|
||||
|
||||
if (signal === 'SIGTERM' || signal === 'SIGINT') {
|
||||
deps.onDebug(
|
||||
`[bridge:session] sessionId=${opts.sessionId} interrupted signal=${signal} pid=${child.pid}`,
|
||||
)
|
||||
resolve('interrupted')
|
||||
} else if (code === 0) {
|
||||
deps.onDebug(
|
||||
`[bridge:session] sessionId=${opts.sessionId} completed exit_code=0 pid=${child.pid}`,
|
||||
)
|
||||
resolve('completed')
|
||||
} else {
|
||||
deps.onDebug(
|
||||
`[bridge:session] sessionId=${opts.sessionId} failed exit_code=${code} pid=${child.pid}`,
|
||||
)
|
||||
resolve('failed')
|
||||
}
|
||||
})
|
||||
|
||||
child.on('error', err => {
|
||||
deps.onDebug(
|
||||
`[bridge:session] sessionId=${opts.sessionId} spawn error: ${err.message}`,
|
||||
)
|
||||
resolve('failed')
|
||||
})
|
||||
})
|
||||
|
||||
const handle: SessionHandle = {
|
||||
sessionId: opts.sessionId,
|
||||
done,
|
||||
activities,
|
||||
accessToken: opts.accessToken,
|
||||
lastStderr,
|
||||
get currentActivity(): SessionActivity | null {
|
||||
return currentActivity
|
||||
},
|
||||
kill(): void {
|
||||
if (!child.killed) {
|
||||
deps.onDebug(
|
||||
`[bridge:session] Sending SIGTERM to sessionId=${opts.sessionId} pid=${child.pid}`,
|
||||
)
|
||||
// On Windows, child.kill('SIGTERM') throws; use default signal.
|
||||
if (process.platform === 'win32') {
|
||||
child.kill()
|
||||
} else {
|
||||
child.kill('SIGTERM')
|
||||
}
|
||||
}
|
||||
},
|
||||
forceKill(): void {
|
||||
// Use separate flag because child.killed is set when kill() is called,
|
||||
// not when the process exits. We need to send SIGKILL even after SIGTERM.
|
||||
if (!sigkillSent && child.pid) {
|
||||
sigkillSent = true
|
||||
deps.onDebug(
|
||||
`[bridge:session] Sending SIGKILL to sessionId=${opts.sessionId} pid=${child.pid}`,
|
||||
)
|
||||
if (process.platform === 'win32') {
|
||||
child.kill()
|
||||
} else {
|
||||
child.kill('SIGKILL')
|
||||
}
|
||||
}
|
||||
},
|
||||
writeStdin(data: string): void {
|
||||
if (child.stdin && !child.stdin.destroyed) {
|
||||
deps.onDebug(
|
||||
`[bridge:ws] sessionId=${opts.sessionId} >>> ${debugTruncate(data)}`,
|
||||
)
|
||||
child.stdin.write(data)
|
||||
}
|
||||
},
|
||||
updateAccessToken(token: string): void {
|
||||
handle.accessToken = token
|
||||
// Send the fresh token to the child process via stdin. The child's
|
||||
// StructuredIO handles update_environment_variables messages by
|
||||
// setting process.env directly, so getSessionIngressAuthToken()
|
||||
// picks up the new token on the next refreshHeaders call.
|
||||
handle.writeStdin(
|
||||
jsonStringify({
|
||||
type: 'update_environment_variables',
|
||||
variables: { CLAUDE_CODE_SESSION_ACCESS_TOKEN: token },
|
||||
}) + '\n',
|
||||
)
|
||||
deps.onDebug(
|
||||
`[bridge:session] Sent token refresh via stdin for sessionId=${opts.sessionId}`,
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
return handle
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export { extractActivities as _extractActivitiesForTesting }
|
||||
210
src/bridge/trustedDevice.ts
Normal file
210
src/bridge/trustedDevice.ts
Normal file
@ -0,0 +1,210 @@
|
||||
import axios from 'axios'
|
||||
import memoize from 'lodash-es/memoize.js'
|
||||
import { hostname } from 'os'
|
||||
import { getOauthConfig } from '../constants/oauth.js'
|
||||
import {
|
||||
checkGate_CACHED_OR_BLOCKING,
|
||||
getFeatureValue_CACHED_MAY_BE_STALE,
|
||||
} from '../services/analytics/growthbook.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { isEssentialTrafficOnly } from '../utils/privacyLevel.js'
|
||||
import { getSecureStorage } from '../utils/secureStorage/index.js'
|
||||
import { jsonStringify } from '../utils/slowOperations.js'
|
||||
|
||||
/**
|
||||
* Trusted device token source for bridge (remote-control) sessions.
|
||||
*
|
||||
* Bridge sessions have SecurityTier=ELEVATED on the server (CCR v2).
|
||||
* The server gates ConnectBridgeWorker on its own flag
|
||||
* (sessions_elevated_auth_enforcement in Anthropic Main); this CLI-side
|
||||
* flag controls whether the CLI sends X-Trusted-Device-Token at all.
|
||||
* Two flags so rollout can be staged: flip CLI-side first (headers
|
||||
* start flowing, server still no-ops), then flip server-side.
|
||||
*
|
||||
* Enrollment (POST /auth/trusted_devices) is gated server-side by
|
||||
* account_session.created_at < 10min, so it must happen during /login.
|
||||
* Token is persistent (90d rolling expiry) and stored in keychain.
|
||||
*
|
||||
* See anthropics/anthropic#274559 (spec), #310375 (B1b tenant RPCs),
|
||||
* #295987 (B2 Python routes), #307150 (C1' CCR v2 gate).
|
||||
*/
|
||||
|
||||
const TRUSTED_DEVICE_GATE = 'tengu_sessions_elevated_auth_enforcement'
|
||||
|
||||
function isGateEnabled(): boolean {
|
||||
return getFeatureValue_CACHED_MAY_BE_STALE(TRUSTED_DEVICE_GATE, false)
|
||||
}
|
||||
|
||||
// Memoized — secureStorage.read() spawns a macOS `security` subprocess (~40ms).
|
||||
// bridgeApi.ts calls this from getHeaders() on every poll/heartbeat/ack.
|
||||
// Cache cleared after enrollment (below) and on logout (clearAuthRelatedCaches).
|
||||
//
|
||||
// Only the storage read is memoized — the GrowthBook gate is checked live so
|
||||
// that a gate flip after GrowthBook refresh takes effect without a restart.
|
||||
const readStoredToken = memoize((): string | undefined => {
|
||||
// Env var takes precedence for testing/canary.
|
||||
const envToken = process.env.CLAUDE_TRUSTED_DEVICE_TOKEN
|
||||
if (envToken) {
|
||||
return envToken
|
||||
}
|
||||
return getSecureStorage().read()?.trustedDeviceToken
|
||||
})
|
||||
|
||||
export function getTrustedDeviceToken(): string | undefined {
|
||||
if (!isGateEnabled()) {
|
||||
return undefined
|
||||
}
|
||||
return readStoredToken()
|
||||
}
|
||||
|
||||
export function clearTrustedDeviceTokenCache(): void {
|
||||
readStoredToken.cache?.clear?.()
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the stored trusted device token from secure storage and the memo cache.
|
||||
* Called before enrollTrustedDevice() during /login so a stale token from the
|
||||
* previous account isn't sent as X-Trusted-Device-Token while enrollment is
|
||||
* in-flight (enrollTrustedDevice is async — bridge API calls between login and
|
||||
* enrollment completion would otherwise still read the old cached token).
|
||||
*/
|
||||
export function clearTrustedDeviceToken(): void {
|
||||
if (!isGateEnabled()) {
|
||||
return
|
||||
}
|
||||
const secureStorage = getSecureStorage()
|
||||
try {
|
||||
const data = secureStorage.read()
|
||||
if (data?.trustedDeviceToken) {
|
||||
delete data.trustedDeviceToken
|
||||
secureStorage.update(data)
|
||||
}
|
||||
} catch {
|
||||
// Best-effort — don't block login if storage is inaccessible
|
||||
}
|
||||
readStoredToken.cache?.clear?.()
|
||||
}
|
||||
|
||||
/**
|
||||
* Enroll this device via POST /auth/trusted_devices and persist the token
|
||||
* to keychain. Best-effort — logs and returns on failure so callers
|
||||
* (post-login hooks) don't block the login flow.
|
||||
*
|
||||
* The server gates enrollment on account_session.created_at < 10min, so
|
||||
* this must be called immediately after a fresh /login. Calling it later
|
||||
* (e.g. lazy enrollment on /bridge 403) will fail with 403 stale_session.
|
||||
*/
|
||||
export async function enrollTrustedDevice(): Promise<void> {
|
||||
try {
|
||||
// checkGate_CACHED_OR_BLOCKING awaits any in-flight GrowthBook re-init
|
||||
// (triggered by refreshGrowthBookAfterAuthChange in login.tsx) before
|
||||
// reading the gate, so we get the post-refresh value.
|
||||
if (!(await checkGate_CACHED_OR_BLOCKING(TRUSTED_DEVICE_GATE))) {
|
||||
logForDebugging(
|
||||
`[trusted-device] Gate ${TRUSTED_DEVICE_GATE} is off, skipping enrollment`,
|
||||
)
|
||||
return
|
||||
}
|
||||
// If CLAUDE_TRUSTED_DEVICE_TOKEN is set (e.g. by an enterprise wrapper),
|
||||
// skip enrollment — the env var takes precedence in readStoredToken() so
|
||||
// any enrolled token would be shadowed and never used.
|
||||
if (process.env.CLAUDE_TRUSTED_DEVICE_TOKEN) {
|
||||
logForDebugging(
|
||||
'[trusted-device] CLAUDE_TRUSTED_DEVICE_TOKEN env var is set, skipping enrollment (env var takes precedence)',
|
||||
)
|
||||
return
|
||||
}
|
||||
// Lazy require — utils/auth.ts transitively pulls ~1300 modules
|
||||
// (config → file → permissions → sessionStorage → commands). Daemon callers
|
||||
// of getTrustedDeviceToken() don't need this; only /login does.
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const { getClaudeAIOAuthTokens } =
|
||||
require('../utils/auth.js') as typeof import('../utils/auth.js')
|
||||
/* eslint-enable @typescript-eslint/no-require-imports */
|
||||
const accessToken = getClaudeAIOAuthTokens()?.accessToken
|
||||
if (!accessToken) {
|
||||
logForDebugging('[trusted-device] No OAuth token, skipping enrollment')
|
||||
return
|
||||
}
|
||||
// Always re-enroll on /login — the existing token may belong to a
|
||||
// different account (account-switch without /logout). Skipping enrollment
|
||||
// would send the old account's token on the new account's bridge calls.
|
||||
const secureStorage = getSecureStorage()
|
||||
|
||||
if (isEssentialTrafficOnly()) {
|
||||
logForDebugging(
|
||||
'[trusted-device] Essential traffic only, skipping enrollment',
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const baseUrl = getOauthConfig().BASE_API_URL
|
||||
let response
|
||||
try {
|
||||
response = await axios.post<{
|
||||
device_token?: string
|
||||
device_id?: string
|
||||
}>(
|
||||
`${baseUrl}/api/auth/trusted_devices`,
|
||||
{ display_name: `Claude Code on ${hostname()} · ${process.platform}` },
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
timeout: 10_000,
|
||||
validateStatus: s => s < 500,
|
||||
},
|
||||
)
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[trusted-device] Enrollment request failed: ${errorMessage(err)}`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if (response.status !== 200 && response.status !== 201) {
|
||||
logForDebugging(
|
||||
`[trusted-device] Enrollment failed ${response.status}: ${jsonStringify(response.data).slice(0, 200)}`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const token = response.data?.device_token
|
||||
if (!token || typeof token !== 'string') {
|
||||
logForDebugging(
|
||||
'[trusted-device] Enrollment response missing device_token field',
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const storageData = secureStorage.read()
|
||||
if (!storageData) {
|
||||
logForDebugging(
|
||||
'[trusted-device] Cannot read storage, skipping token persist',
|
||||
)
|
||||
return
|
||||
}
|
||||
storageData.trustedDeviceToken = token
|
||||
const result = secureStorage.update(storageData)
|
||||
if (!result.success) {
|
||||
logForDebugging(
|
||||
`[trusted-device] Failed to persist token: ${result.warning ?? 'unknown'}`,
|
||||
)
|
||||
return
|
||||
}
|
||||
readStoredToken.cache?.clear?.()
|
||||
logForDebugging(
|
||||
`[trusted-device] Enrolled device_id=${response.data.device_id ?? 'unknown'}`,
|
||||
)
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(
|
||||
`[trusted-device] Storage write failed: ${errorMessage(err)}`,
|
||||
)
|
||||
}
|
||||
} catch (err: unknown) {
|
||||
logForDebugging(`[trusted-device] Enrollment error: ${errorMessage(err)}`)
|
||||
}
|
||||
}
|
||||
262
src/bridge/types.ts
Normal file
262
src/bridge/types.ts
Normal file
@ -0,0 +1,262 @@
|
||||
/** Default per-session timeout (24 hours). */
|
||||
export const DEFAULT_SESSION_TIMEOUT_MS = 24 * 60 * 60 * 1000
|
||||
|
||||
/** Reusable login guidance appended to bridge auth errors. */
|
||||
export const BRIDGE_LOGIN_INSTRUCTION =
|
||||
'Remote Control is only available with claude.ai subscriptions. Please use `/login` to sign in with your claude.ai account.'
|
||||
|
||||
/** Full error printed when `claude remote-control` is run without auth. */
|
||||
export const BRIDGE_LOGIN_ERROR =
|
||||
'Error: You must be logged in to use Remote Control.\n\n' +
|
||||
BRIDGE_LOGIN_INSTRUCTION
|
||||
|
||||
/** Shown when the user disconnects Remote Control (via /remote-control or ultraplan launch). */
|
||||
export const REMOTE_CONTROL_DISCONNECTED_MSG = 'Remote Control disconnected.'
|
||||
|
||||
// --- Protocol types for the environments API ---
|
||||
|
||||
export type WorkData = {
|
||||
type: 'session' | 'healthcheck'
|
||||
id: string
|
||||
}
|
||||
|
||||
export type WorkResponse = {
|
||||
id: string
|
||||
type: 'work'
|
||||
environment_id: string
|
||||
state: string
|
||||
data: WorkData
|
||||
secret: string // base64url-encoded JSON
|
||||
created_at: string
|
||||
}
|
||||
|
||||
export type WorkSecret = {
|
||||
version: number
|
||||
session_ingress_token: string
|
||||
api_base_url: string
|
||||
sources: Array<{
|
||||
type: string
|
||||
git_info?: { type: string; repo: string; ref?: string; token?: string }
|
||||
}>
|
||||
auth: Array<{ type: string; token: string }>
|
||||
claude_code_args?: Record<string, string> | null
|
||||
mcp_config?: unknown | null
|
||||
environment_variables?: Record<string, string> | null
|
||||
/**
|
||||
* Server-driven CCR v2 selector. Set by prepare_work_secret() when the
|
||||
* session was created via the v2 compat layer (ccr_v2_compat_enabled).
|
||||
* Same field the BYOC runner reads at environment-runner/sessionExecutor.ts.
|
||||
*/
|
||||
use_code_sessions?: boolean
|
||||
}
|
||||
|
||||
export type SessionDoneStatus = 'completed' | 'failed' | 'interrupted'
|
||||
|
||||
export type SessionActivityType = 'tool_start' | 'text' | 'result' | 'error'
|
||||
|
||||
export type SessionActivity = {
|
||||
type: SessionActivityType
|
||||
summary: string // e.g. "Editing src/foo.ts", "Reading package.json"
|
||||
timestamp: number
|
||||
}
|
||||
|
||||
/**
|
||||
* How `claude remote-control` chooses session working directories.
|
||||
* - `single-session`: one session in cwd, bridge tears down when it ends
|
||||
* - `worktree`: persistent server, every session gets an isolated git worktree
|
||||
* - `same-dir`: persistent server, every session shares cwd (can stomp each other)
|
||||
*/
|
||||
export type SpawnMode = 'single-session' | 'worktree' | 'same-dir'
|
||||
|
||||
/**
|
||||
* Well-known worker_type values THIS codebase produces. Sent as
|
||||
* `metadata.worker_type` at environment registration so claude.ai can filter
|
||||
* the session picker by origin (e.g. assistant tab only shows assistant
|
||||
* workers). The backend treats this as an opaque string — desktop cowork
|
||||
* sends `"cowork"`, which isn't in this union. REPL code uses this narrow
|
||||
* type for its own exhaustiveness; wire-level fields accept any string.
|
||||
*/
|
||||
export type BridgeWorkerType = 'claude_code' | 'claude_code_assistant'
|
||||
|
||||
export type BridgeConfig = {
|
||||
dir: string
|
||||
machineName: string
|
||||
branch: string
|
||||
gitRepoUrl: string | null
|
||||
maxSessions: number
|
||||
spawnMode: SpawnMode
|
||||
verbose: boolean
|
||||
sandbox: boolean
|
||||
/** Client-generated UUID identifying this bridge instance. */
|
||||
bridgeId: string
|
||||
/**
|
||||
* Sent as metadata.worker_type so web clients can filter by origin.
|
||||
* Backend treats this as opaque — any string, not just BridgeWorkerType.
|
||||
*/
|
||||
workerType: string
|
||||
/** Client-generated UUID for idempotent environment registration. */
|
||||
environmentId: string
|
||||
/**
|
||||
* Backend-issued environment_id to reuse on re-register. When set, the
|
||||
* backend treats registration as a reconnect to the existing environment
|
||||
* instead of creating a new one. Used by `claude remote-control
|
||||
* --session-id` resume. Must be a backend-format ID — client UUIDs are
|
||||
* rejected with 400.
|
||||
*/
|
||||
reuseEnvironmentId?: string
|
||||
/** API base URL the bridge is connected to (used for polling). */
|
||||
apiBaseUrl: string
|
||||
/** Session ingress base URL for WebSocket connections (may differ from apiBaseUrl locally). */
|
||||
sessionIngressUrl: string
|
||||
/** Debug file path passed via --debug-file. */
|
||||
debugFile?: string
|
||||
/** Per-session timeout in milliseconds. Sessions exceeding this are killed. */
|
||||
sessionTimeoutMs?: number
|
||||
}
|
||||
|
||||
// --- Dependency interfaces (for testability) ---
|
||||
|
||||
/**
|
||||
* A control_response event sent back to a session (e.g. a permission decision).
|
||||
* The `subtype` is `'success'` per the SDK protocol; the inner `response`
|
||||
* carries the permission decision payload (e.g. `{ behavior: 'allow' }`).
|
||||
*/
|
||||
export type PermissionResponseEvent = {
|
||||
type: 'control_response'
|
||||
response: {
|
||||
subtype: 'success'
|
||||
request_id: string
|
||||
response: Record<string, unknown>
|
||||
}
|
||||
}
|
||||
|
||||
export type BridgeApiClient = {
|
||||
registerBridgeEnvironment(config: BridgeConfig): Promise<{
|
||||
environment_id: string
|
||||
environment_secret: string
|
||||
}>
|
||||
pollForWork(
|
||||
environmentId: string,
|
||||
environmentSecret: string,
|
||||
signal?: AbortSignal,
|
||||
reclaimOlderThanMs?: number,
|
||||
): Promise<WorkResponse | null>
|
||||
acknowledgeWork(
|
||||
environmentId: string,
|
||||
workId: string,
|
||||
sessionToken: string,
|
||||
): Promise<void>
|
||||
/** Stop a work item via the environments API. */
|
||||
stopWork(environmentId: string, workId: string, force: boolean): Promise<void>
|
||||
/** Deregister/delete the bridge environment on graceful shutdown. */
|
||||
deregisterEnvironment(environmentId: string): Promise<void>
|
||||
/** Send a permission response (control_response) to a session via the session events API. */
|
||||
sendPermissionResponseEvent(
|
||||
sessionId: string,
|
||||
event: PermissionResponseEvent,
|
||||
sessionToken: string,
|
||||
): Promise<void>
|
||||
/** Archive a session so it no longer appears as active on the server. */
|
||||
archiveSession(sessionId: string): Promise<void>
|
||||
/**
|
||||
* Force-stop stale worker instances and re-queue a session on an environment.
|
||||
* Used by `--session-id` to resume a session after the original bridge died.
|
||||
*/
|
||||
reconnectSession(environmentId: string, sessionId: string): Promise<void>
|
||||
/**
|
||||
* Send a lightweight heartbeat for an active work item, extending its lease.
|
||||
* Uses SessionIngressAuth (JWT, no DB hit) instead of EnvironmentSecretAuth.
|
||||
* Returns the server's response with lease status.
|
||||
*/
|
||||
heartbeatWork(
|
||||
environmentId: string,
|
||||
workId: string,
|
||||
sessionToken: string,
|
||||
): Promise<{ lease_extended: boolean; state: string }>
|
||||
}
|
||||
|
||||
export type SessionHandle = {
|
||||
sessionId: string
|
||||
done: Promise<SessionDoneStatus>
|
||||
kill(): void
|
||||
forceKill(): void
|
||||
activities: SessionActivity[] // ring buffer of recent activities (last ~10)
|
||||
currentActivity: SessionActivity | null // most recent
|
||||
accessToken: string // session_ingress_token for API calls
|
||||
lastStderr: string[] // ring buffer of last stderr lines
|
||||
writeStdin(data: string): void // write directly to child stdin
|
||||
/** Update the access token for a running session (e.g. after token refresh). */
|
||||
updateAccessToken(token: string): void
|
||||
}
|
||||
|
||||
export type SessionSpawnOpts = {
|
||||
sessionId: string
|
||||
sdkUrl: string
|
||||
accessToken: string
|
||||
/** When true, spawn the child with CCR v2 env vars (SSE transport + CCRClient). */
|
||||
useCcrV2?: boolean
|
||||
/** Required when useCcrV2 is true. Obtained from POST /worker/register. */
|
||||
workerEpoch?: number
|
||||
/**
|
||||
* Fires once with the text of the first real user message seen on the
|
||||
* child's stdout (via --replay-user-messages). Lets the caller derive a
|
||||
* session title when none exists yet. Tool-result and synthetic user
|
||||
* messages are skipped.
|
||||
*/
|
||||
onFirstUserMessage?: (text: string) => void
|
||||
}
|
||||
|
||||
export type SessionSpawner = {
|
||||
spawn(opts: SessionSpawnOpts, dir: string): SessionHandle
|
||||
}
|
||||
|
||||
export type BridgeLogger = {
|
||||
printBanner(config: BridgeConfig, environmentId: string): void
|
||||
logSessionStart(sessionId: string, prompt: string): void
|
||||
logSessionComplete(sessionId: string, durationMs: number): void
|
||||
logSessionFailed(sessionId: string, error: string): void
|
||||
logStatus(message: string): void
|
||||
logVerbose(message: string): void
|
||||
logError(message: string): void
|
||||
/** Log a reconnection success event after recovering from connection errors. */
|
||||
logReconnected(disconnectedMs: number): void
|
||||
/** Show idle status with repo/branch info and shimmer animation. */
|
||||
updateIdleStatus(): void
|
||||
/** Show reconnecting status in the live display. */
|
||||
updateReconnectingStatus(delayStr: string, elapsedStr: string): void
|
||||
updateSessionStatus(
|
||||
sessionId: string,
|
||||
elapsed: string,
|
||||
activity: SessionActivity,
|
||||
trail: string[],
|
||||
): void
|
||||
clearStatus(): void
|
||||
/** Set repository info for status line display. */
|
||||
setRepoInfo(repoName: string, branch: string): void
|
||||
/** Set debug log glob shown above the status line (ant users). */
|
||||
setDebugLogPath(path: string): void
|
||||
/** Transition to "Attached" state when a session starts. */
|
||||
setAttached(sessionId: string): void
|
||||
/** Show failed status in the live display. */
|
||||
updateFailedStatus(error: string): void
|
||||
/** Toggle QR code visibility. */
|
||||
toggleQr(): void
|
||||
/** Update the "<n> of <m> sessions" indicator and spawn mode hint. */
|
||||
updateSessionCount(active: number, max: number, mode: SpawnMode): void
|
||||
/** Update the spawn mode shown in the session-count line. Pass null to hide (single-session or toggle unavailable). */
|
||||
setSpawnModeDisplay(mode: 'same-dir' | 'worktree' | null): void
|
||||
/** Register a new session for multi-session display (called after spawn succeeds). */
|
||||
addSession(sessionId: string, url: string): void
|
||||
/** Update the per-session activity summary (tool being run) in the multi-session list. */
|
||||
updateSessionActivity(sessionId: string, activity: SessionActivity): void
|
||||
/**
|
||||
* Set a session's display title. In multi-session mode, updates the bullet list
|
||||
* entry. In single-session mode, also shows the title in the main status line.
|
||||
* Triggers a render (guarded against reconnecting/failed states).
|
||||
*/
|
||||
setSessionTitle(sessionId: string, title: string): void
|
||||
/** Remove a session from the multi-session display when it ends. */
|
||||
removeSession(sessionId: string): void
|
||||
/** Force a re-render of the status display (for multi-session activity refresh). */
|
||||
refreshDisplay(): void
|
||||
}
|
||||
3
src/bridge/webhookSanitizer.ts
Normal file
3
src/bridge/webhookSanitizer.ts
Normal file
@ -0,0 +1,3 @@
|
||||
export function sanitizeWebhookPayload<T>(value: T): T {
|
||||
return value
|
||||
}
|
||||
127
src/bridge/workSecret.ts
Normal file
127
src/bridge/workSecret.ts
Normal file
@ -0,0 +1,127 @@
|
||||
import axios from 'axios'
|
||||
import { jsonParse, jsonStringify } from '../utils/slowOperations.js'
|
||||
import type { WorkSecret } from './types.js'
|
||||
|
||||
/** Decode a base64url-encoded work secret and validate its version. */
|
||||
export function decodeWorkSecret(secret: string): WorkSecret {
|
||||
const json = Buffer.from(secret, 'base64url').toString('utf-8')
|
||||
const parsed: unknown = jsonParse(json)
|
||||
if (
|
||||
!parsed ||
|
||||
typeof parsed !== 'object' ||
|
||||
!('version' in parsed) ||
|
||||
parsed.version !== 1
|
||||
) {
|
||||
throw new Error(
|
||||
`Unsupported work secret version: ${parsed && typeof parsed === 'object' && 'version' in parsed ? parsed.version : 'unknown'}`,
|
||||
)
|
||||
}
|
||||
const obj = parsed as Record<string, unknown>
|
||||
if (
|
||||
typeof obj.session_ingress_token !== 'string' ||
|
||||
obj.session_ingress_token.length === 0
|
||||
) {
|
||||
throw new Error(
|
||||
'Invalid work secret: missing or empty session_ingress_token',
|
||||
)
|
||||
}
|
||||
if (typeof obj.api_base_url !== 'string') {
|
||||
throw new Error('Invalid work secret: missing api_base_url')
|
||||
}
|
||||
return parsed as WorkSecret
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a WebSocket SDK URL from the API base URL and session ID.
|
||||
* Strips the HTTP(S) protocol and constructs a ws(s):// ingress URL.
|
||||
*
|
||||
* Uses /v2/ for localhost (direct to session-ingress, no Envoy rewrite)
|
||||
* and /v1/ for production (Envoy rewrites /v1/ → /v2/).
|
||||
*/
|
||||
export function buildSdkUrl(apiBaseUrl: string, sessionId: string): string {
|
||||
const isLocalhost =
|
||||
apiBaseUrl.includes('localhost') || apiBaseUrl.includes('127.0.0.1')
|
||||
const protocol = isLocalhost ? 'ws' : 'wss'
|
||||
const version = isLocalhost ? 'v2' : 'v1'
|
||||
const host = apiBaseUrl.replace(/^https?:\/\//, '').replace(/\/+$/, '')
|
||||
return `${protocol}://${host}/${version}/session_ingress/ws/${sessionId}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two session IDs regardless of their tagged-ID prefix.
|
||||
*
|
||||
* Tagged IDs have the form {tag}_{body} or {tag}_staging_{body}, where the
|
||||
* body encodes a UUID. CCR v2's compat layer returns `session_*` to v1 API
|
||||
* clients (compat/convert.go:41) but the infrastructure layer (sandbox-gateway
|
||||
* work queue, work poll response) uses `cse_*` (compat/CLAUDE.md:13). Both
|
||||
* have the same underlying UUID.
|
||||
*
|
||||
* Without this, replBridge rejects its own session as "foreign" at the
|
||||
* work-received check when the ccr_v2_compat_enabled gate is on.
|
||||
*/
|
||||
export function sameSessionId(a: string, b: string): boolean {
|
||||
if (a === b) return true
|
||||
// The body is everything after the last underscore — this handles both
|
||||
// `{tag}_{body}` and `{tag}_staging_{body}`.
|
||||
const aBody = a.slice(a.lastIndexOf('_') + 1)
|
||||
const bBody = b.slice(b.lastIndexOf('_') + 1)
|
||||
// Guard against IDs with no underscore (bare UUIDs): lastIndexOf returns -1,
|
||||
// slice(0) returns the whole string, and we already checked a === b above.
|
||||
// Require a minimum length to avoid accidental matches on short suffixes
|
||||
// (e.g. single-char tag remnants from malformed IDs).
|
||||
return aBody.length >= 4 && aBody === bBody
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a CCR v2 session URL from the API base URL and session ID.
|
||||
* Unlike buildSdkUrl, this returns an HTTP(S) URL (not ws://) and points at
|
||||
* /v1/code/sessions/{id} — the child CC will derive the SSE stream path
|
||||
* and worker endpoints from this base.
|
||||
*/
|
||||
export function buildCCRv2SdkUrl(
|
||||
apiBaseUrl: string,
|
||||
sessionId: string,
|
||||
): string {
|
||||
const base = apiBaseUrl.replace(/\/+$/, '')
|
||||
return `${base}/v1/code/sessions/${sessionId}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Register this bridge as the worker for a CCR v2 session.
|
||||
* Returns the worker_epoch, which must be passed to the child CC process
|
||||
* so its CCRClient can include it in every heartbeat/state/event request.
|
||||
*
|
||||
* Mirrors what environment-manager does in the container path
|
||||
* (api-go/environment-manager/cmd/cmd_task_run.go RegisterWorker).
|
||||
*/
|
||||
export async function registerWorker(
|
||||
sessionUrl: string,
|
||||
accessToken: string,
|
||||
): Promise<number> {
|
||||
const response = await axios.post(
|
||||
`${sessionUrl}/worker/register`,
|
||||
{},
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01',
|
||||
},
|
||||
timeout: 10_000,
|
||||
},
|
||||
)
|
||||
// protojson serializes int64 as a string to avoid JS number precision loss;
|
||||
// the Go side may also return a number depending on encoder settings.
|
||||
const raw = response.data?.worker_epoch
|
||||
const epoch = typeof raw === 'string' ? Number(raw) : raw
|
||||
if (
|
||||
typeof epoch !== 'number' ||
|
||||
!Number.isFinite(epoch) ||
|
||||
!Number.isSafeInteger(epoch)
|
||||
) {
|
||||
throw new Error(
|
||||
`registerWorker: invalid worker_epoch in response: ${jsonStringify(response.data)}`,
|
||||
)
|
||||
}
|
||||
return epoch
|
||||
}
|
||||
371
src/buddy/CompanionSprite.tsx
Normal file
371
src/buddy/CompanionSprite.tsx
Normal file
File diff suppressed because one or more lines are too long
133
src/buddy/companion.ts
Normal file
133
src/buddy/companion.ts
Normal file
@ -0,0 +1,133 @@
|
||||
import { getGlobalConfig } from '../utils/config.js'
|
||||
import {
|
||||
type Companion,
|
||||
type CompanionBones,
|
||||
EYES,
|
||||
HATS,
|
||||
RARITIES,
|
||||
RARITY_WEIGHTS,
|
||||
type Rarity,
|
||||
SPECIES,
|
||||
STAT_NAMES,
|
||||
type StatName,
|
||||
} from './types.js'
|
||||
|
||||
// Mulberry32 — tiny seeded PRNG, good enough for picking ducks
|
||||
function mulberry32(seed: number): () => number {
|
||||
let a = seed >>> 0
|
||||
return function () {
|
||||
a |= 0
|
||||
a = (a + 0x6d2b79f5) | 0
|
||||
let t = Math.imul(a ^ (a >>> 15), 1 | a)
|
||||
t = (t + Math.imul(t ^ (t >>> 7), 61 | t)) ^ t
|
||||
return ((t ^ (t >>> 14)) >>> 0) / 4294967296
|
||||
}
|
||||
}
|
||||
|
||||
function hashString(s: string): number {
|
||||
if (typeof Bun !== 'undefined') {
|
||||
return Number(BigInt(Bun.hash(s)) & 0xffffffffn)
|
||||
}
|
||||
let h = 2166136261
|
||||
for (let i = 0; i < s.length; i++) {
|
||||
h ^= s.charCodeAt(i)
|
||||
h = Math.imul(h, 16777619)
|
||||
}
|
||||
return h >>> 0
|
||||
}
|
||||
|
||||
function pick<T>(rng: () => number, arr: readonly T[]): T {
|
||||
return arr[Math.floor(rng() * arr.length)]!
|
||||
}
|
||||
|
||||
function rollRarity(rng: () => number): Rarity {
|
||||
const total = Object.values(RARITY_WEIGHTS).reduce((a, b) => a + b, 0)
|
||||
let roll = rng() * total
|
||||
for (const rarity of RARITIES) {
|
||||
roll -= RARITY_WEIGHTS[rarity]
|
||||
if (roll < 0) return rarity
|
||||
}
|
||||
return 'common'
|
||||
}
|
||||
|
||||
const RARITY_FLOOR: Record<Rarity, number> = {
|
||||
common: 5,
|
||||
uncommon: 15,
|
||||
rare: 25,
|
||||
epic: 35,
|
||||
legendary: 50,
|
||||
}
|
||||
|
||||
// One peak stat, one dump stat, rest scattered. Rarity bumps the floor.
|
||||
function rollStats(
|
||||
rng: () => number,
|
||||
rarity: Rarity,
|
||||
): Record<StatName, number> {
|
||||
const floor = RARITY_FLOOR[rarity]
|
||||
const peak = pick(rng, STAT_NAMES)
|
||||
let dump = pick(rng, STAT_NAMES)
|
||||
while (dump === peak) dump = pick(rng, STAT_NAMES)
|
||||
|
||||
const stats = {} as Record<StatName, number>
|
||||
for (const name of STAT_NAMES) {
|
||||
if (name === peak) {
|
||||
stats[name] = Math.min(100, floor + 50 + Math.floor(rng() * 30))
|
||||
} else if (name === dump) {
|
||||
stats[name] = Math.max(1, floor - 10 + Math.floor(rng() * 15))
|
||||
} else {
|
||||
stats[name] = floor + Math.floor(rng() * 40)
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
const SALT = 'friend-2026-401'
|
||||
|
||||
export type Roll = {
|
||||
bones: CompanionBones
|
||||
inspirationSeed: number
|
||||
}
|
||||
|
||||
function rollFrom(rng: () => number): Roll {
|
||||
const rarity = rollRarity(rng)
|
||||
const bones: CompanionBones = {
|
||||
rarity,
|
||||
species: pick(rng, SPECIES),
|
||||
eye: pick(rng, EYES),
|
||||
hat: rarity === 'common' ? 'none' : pick(rng, HATS),
|
||||
shiny: rng() < 0.01,
|
||||
stats: rollStats(rng, rarity),
|
||||
}
|
||||
return { bones, inspirationSeed: Math.floor(rng() * 1e9) }
|
||||
}
|
||||
|
||||
// Called from three hot paths (500ms sprite tick, per-keystroke PromptInput,
|
||||
// per-turn observer) with the same userId → cache the deterministic result.
|
||||
let rollCache: { key: string; value: Roll } | undefined
|
||||
export function roll(userId: string): Roll {
|
||||
const key = userId + SALT
|
||||
if (rollCache?.key === key) return rollCache.value
|
||||
const value = rollFrom(mulberry32(hashString(key)))
|
||||
rollCache = { key, value }
|
||||
return value
|
||||
}
|
||||
|
||||
export function rollWithSeed(seed: string): Roll {
|
||||
return rollFrom(mulberry32(hashString(seed)))
|
||||
}
|
||||
|
||||
export function companionUserId(): string {
|
||||
const config = getGlobalConfig()
|
||||
return config.oauthAccount?.accountUuid ?? config.userID ?? 'anon'
|
||||
}
|
||||
|
||||
// Regenerate bones from userId, merge with stored soul. Bones never persist
|
||||
// so species renames and SPECIES-array edits can't break stored companions,
|
||||
// and editing config.companion can't fake a rarity.
|
||||
export function getCompanion(): Companion | undefined {
|
||||
const stored = getGlobalConfig().companion
|
||||
if (!stored) return undefined
|
||||
const { bones } = roll(companionUserId())
|
||||
// bones last so stale bones fields in old-format configs get overridden
|
||||
return { ...stored, ...bones }
|
||||
}
|
||||
36
src/buddy/prompt.ts
Normal file
36
src/buddy/prompt.ts
Normal file
@ -0,0 +1,36 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type { Message } from '../types/message.js'
|
||||
import type { Attachment } from '../utils/attachments.js'
|
||||
import { getGlobalConfig } from '../utils/config.js'
|
||||
import { getCompanion } from './companion.js'
|
||||
|
||||
export function companionIntroText(name: string, species: string): string {
|
||||
return `# Companion
|
||||
|
||||
A small ${species} named ${name} sits beside the user's input box and occasionally comments in a speech bubble. You're not ${name} — it's a separate watcher.
|
||||
|
||||
When the user addresses ${name} directly (by name), its bubble will answer. Your job in that moment is to stay out of the way: respond in ONE line or less, or just answer any part of the message meant for you. Don't explain that you're not ${name} — they know. Don't narrate what ${name} might say — the bubble handles that.`
|
||||
}
|
||||
|
||||
export function getCompanionIntroAttachment(
|
||||
messages: Message[] | undefined,
|
||||
): Attachment[] {
|
||||
if (!feature('BUDDY')) return []
|
||||
const companion = getCompanion()
|
||||
if (!companion || getGlobalConfig().companionMuted) return []
|
||||
|
||||
// Skip if already announced for this companion.
|
||||
for (const msg of messages ?? []) {
|
||||
if (msg.type !== 'attachment') continue
|
||||
if (msg.attachment.type !== 'companion_intro') continue
|
||||
if (msg.attachment.name === companion.name) return []
|
||||
}
|
||||
|
||||
return [
|
||||
{
|
||||
type: 'companion_intro',
|
||||
name: companion.name,
|
||||
species: companion.species,
|
||||
},
|
||||
]
|
||||
}
|
||||
514
src/buddy/sprites.ts
Normal file
514
src/buddy/sprites.ts
Normal file
@ -0,0 +1,514 @@
|
||||
import type { CompanionBones, Eye, Hat, Species } from './types.js'
|
||||
import {
|
||||
axolotl,
|
||||
blob,
|
||||
cactus,
|
||||
capybara,
|
||||
cat,
|
||||
chonk,
|
||||
dragon,
|
||||
duck,
|
||||
ghost,
|
||||
goose,
|
||||
mushroom,
|
||||
octopus,
|
||||
owl,
|
||||
penguin,
|
||||
rabbit,
|
||||
robot,
|
||||
snail,
|
||||
turtle,
|
||||
} from './types.js'
|
||||
|
||||
// Each sprite is 5 lines tall, 12 wide (after {E}→1char substitution).
|
||||
// Multiple frames per species for idle fidget animation.
|
||||
// Line 0 is the hat slot — must be blank in frames 0-1; frame 2 may use it.
|
||||
const BODIES: Record<Species, string[][]> = {
|
||||
[duck]: [
|
||||
[
|
||||
' ',
|
||||
' __ ',
|
||||
' <({E} )___ ',
|
||||
' ( ._> ',
|
||||
' `--´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' __ ',
|
||||
' <({E} )___ ',
|
||||
' ( ._> ',
|
||||
' `--´~ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' __ ',
|
||||
' <({E} )___ ',
|
||||
' ( .__> ',
|
||||
' `--´ ',
|
||||
],
|
||||
],
|
||||
[goose]: [
|
||||
[
|
||||
' ',
|
||||
' ({E}> ',
|
||||
' || ',
|
||||
' _(__)_ ',
|
||||
' ^^^^ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' ({E}> ',
|
||||
' || ',
|
||||
' _(__)_ ',
|
||||
' ^^^^ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' ({E}>> ',
|
||||
' || ',
|
||||
' _(__)_ ',
|
||||
' ^^^^ ',
|
||||
],
|
||||
],
|
||||
[blob]: [
|
||||
[
|
||||
' ',
|
||||
' .----. ',
|
||||
' ( {E} {E} ) ',
|
||||
' ( ) ',
|
||||
' `----´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' .------. ',
|
||||
' ( {E} {E} ) ',
|
||||
' ( ) ',
|
||||
' `------´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' .--. ',
|
||||
' ({E} {E}) ',
|
||||
' ( ) ',
|
||||
' `--´ ',
|
||||
],
|
||||
],
|
||||
[cat]: [
|
||||
[
|
||||
' ',
|
||||
' /\\_/\\ ',
|
||||
' ( {E} {E}) ',
|
||||
' ( ω ) ',
|
||||
' (")_(") ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' /\\_/\\ ',
|
||||
' ( {E} {E}) ',
|
||||
' ( ω ) ',
|
||||
' (")_(")~ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' /\\-/\\ ',
|
||||
' ( {E} {E}) ',
|
||||
' ( ω ) ',
|
||||
' (")_(") ',
|
||||
],
|
||||
],
|
||||
[dragon]: [
|
||||
[
|
||||
' ',
|
||||
' /^\\ /^\\ ',
|
||||
' < {E} {E} > ',
|
||||
' ( ~~ ) ',
|
||||
' `-vvvv-´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' /^\\ /^\\ ',
|
||||
' < {E} {E} > ',
|
||||
' ( ) ',
|
||||
' `-vvvv-´ ',
|
||||
],
|
||||
[
|
||||
' ~ ~ ',
|
||||
' /^\\ /^\\ ',
|
||||
' < {E} {E} > ',
|
||||
' ( ~~ ) ',
|
||||
' `-vvvv-´ ',
|
||||
],
|
||||
],
|
||||
[octopus]: [
|
||||
[
|
||||
' ',
|
||||
' .----. ',
|
||||
' ( {E} {E} ) ',
|
||||
' (______) ',
|
||||
' /\\/\\/\\/\\ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' .----. ',
|
||||
' ( {E} {E} ) ',
|
||||
' (______) ',
|
||||
' \\/\\/\\/\\/ ',
|
||||
],
|
||||
[
|
||||
' o ',
|
||||
' .----. ',
|
||||
' ( {E} {E} ) ',
|
||||
' (______) ',
|
||||
' /\\/\\/\\/\\ ',
|
||||
],
|
||||
],
|
||||
[owl]: [
|
||||
[
|
||||
' ',
|
||||
' /\\ /\\ ',
|
||||
' (({E})({E})) ',
|
||||
' ( >< ) ',
|
||||
' `----´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' /\\ /\\ ',
|
||||
' (({E})({E})) ',
|
||||
' ( >< ) ',
|
||||
' .----. ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' /\\ /\\ ',
|
||||
' (({E})(-)) ',
|
||||
' ( >< ) ',
|
||||
' `----´ ',
|
||||
],
|
||||
],
|
||||
[penguin]: [
|
||||
[
|
||||
' ',
|
||||
' .---. ',
|
||||
' ({E}>{E}) ',
|
||||
' /( )\\ ',
|
||||
' `---´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' .---. ',
|
||||
' ({E}>{E}) ',
|
||||
' |( )| ',
|
||||
' `---´ ',
|
||||
],
|
||||
[
|
||||
' .---. ',
|
||||
' ({E}>{E}) ',
|
||||
' /( )\\ ',
|
||||
' `---´ ',
|
||||
' ~ ~ ',
|
||||
],
|
||||
],
|
||||
[turtle]: [
|
||||
[
|
||||
' ',
|
||||
' _,--._ ',
|
||||
' ( {E} {E} ) ',
|
||||
' /[______]\\ ',
|
||||
' `` `` ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' _,--._ ',
|
||||
' ( {E} {E} ) ',
|
||||
' /[______]\\ ',
|
||||
' `` `` ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' _,--._ ',
|
||||
' ( {E} {E} ) ',
|
||||
' /[======]\\ ',
|
||||
' `` `` ',
|
||||
],
|
||||
],
|
||||
[snail]: [
|
||||
[
|
||||
' ',
|
||||
' {E} .--. ',
|
||||
' \\ ( @ ) ',
|
||||
' \\_`--´ ',
|
||||
' ~~~~~~~ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' {E} .--. ',
|
||||
' | ( @ ) ',
|
||||
' \\_`--´ ',
|
||||
' ~~~~~~~ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' {E} .--. ',
|
||||
' \\ ( @ ) ',
|
||||
' \\_`--´ ',
|
||||
' ~~~~~~ ',
|
||||
],
|
||||
],
|
||||
[ghost]: [
|
||||
[
|
||||
' ',
|
||||
' .----. ',
|
||||
' / {E} {E} \\ ',
|
||||
' | | ',
|
||||
' ~`~``~`~ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' .----. ',
|
||||
' / {E} {E} \\ ',
|
||||
' | | ',
|
||||
' `~`~~`~` ',
|
||||
],
|
||||
[
|
||||
' ~ ~ ',
|
||||
' .----. ',
|
||||
' / {E} {E} \\ ',
|
||||
' | | ',
|
||||
' ~~`~~`~~ ',
|
||||
],
|
||||
],
|
||||
[axolotl]: [
|
||||
[
|
||||
' ',
|
||||
'}~(______)~{',
|
||||
'}~({E} .. {E})~{',
|
||||
' ( .--. ) ',
|
||||
' (_/ \\_) ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
'~}(______){~',
|
||||
'~}({E} .. {E}){~',
|
||||
' ( .--. ) ',
|
||||
' (_/ \\_) ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
'}~(______)~{',
|
||||
'}~({E} .. {E})~{',
|
||||
' ( -- ) ',
|
||||
' ~_/ \\_~ ',
|
||||
],
|
||||
],
|
||||
[capybara]: [
|
||||
[
|
||||
' ',
|
||||
' n______n ',
|
||||
' ( {E} {E} ) ',
|
||||
' ( oo ) ',
|
||||
' `------´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' n______n ',
|
||||
' ( {E} {E} ) ',
|
||||
' ( Oo ) ',
|
||||
' `------´ ',
|
||||
],
|
||||
[
|
||||
' ~ ~ ',
|
||||
' u______n ',
|
||||
' ( {E} {E} ) ',
|
||||
' ( oo ) ',
|
||||
' `------´ ',
|
||||
],
|
||||
],
|
||||
[cactus]: [
|
||||
[
|
||||
' ',
|
||||
' n ____ n ',
|
||||
' | |{E} {E}| | ',
|
||||
' |_| |_| ',
|
||||
' | | ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' ____ ',
|
||||
' n |{E} {E}| n ',
|
||||
' |_| |_| ',
|
||||
' | | ',
|
||||
],
|
||||
[
|
||||
' n n ',
|
||||
' | ____ | ',
|
||||
' | |{E} {E}| | ',
|
||||
' |_| |_| ',
|
||||
' | | ',
|
||||
],
|
||||
],
|
||||
[robot]: [
|
||||
[
|
||||
' ',
|
||||
' .[||]. ',
|
||||
' [ {E} {E} ] ',
|
||||
' [ ==== ] ',
|
||||
' `------´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' .[||]. ',
|
||||
' [ {E} {E} ] ',
|
||||
' [ -==- ] ',
|
||||
' `------´ ',
|
||||
],
|
||||
[
|
||||
' * ',
|
||||
' .[||]. ',
|
||||
' [ {E} {E} ] ',
|
||||
' [ ==== ] ',
|
||||
' `------´ ',
|
||||
],
|
||||
],
|
||||
[rabbit]: [
|
||||
[
|
||||
' ',
|
||||
' (\\__/) ',
|
||||
' ( {E} {E} ) ',
|
||||
' =( .. )= ',
|
||||
' (")__(") ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' (|__/) ',
|
||||
' ( {E} {E} ) ',
|
||||
' =( .. )= ',
|
||||
' (")__(") ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' (\\__/) ',
|
||||
' ( {E} {E} ) ',
|
||||
' =( . . )= ',
|
||||
' (")__(") ',
|
||||
],
|
||||
],
|
||||
[mushroom]: [
|
||||
[
|
||||
' ',
|
||||
' .-o-OO-o-. ',
|
||||
'(__________)',
|
||||
' |{E} {E}| ',
|
||||
' |____| ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' .-O-oo-O-. ',
|
||||
'(__________)',
|
||||
' |{E} {E}| ',
|
||||
' |____| ',
|
||||
],
|
||||
[
|
||||
' . o . ',
|
||||
' .-o-OO-o-. ',
|
||||
'(__________)',
|
||||
' |{E} {E}| ',
|
||||
' |____| ',
|
||||
],
|
||||
],
|
||||
[chonk]: [
|
||||
[
|
||||
' ',
|
||||
' /\\ /\\ ',
|
||||
' ( {E} {E} ) ',
|
||||
' ( .. ) ',
|
||||
' `------´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' /\\ /| ',
|
||||
' ( {E} {E} ) ',
|
||||
' ( .. ) ',
|
||||
' `------´ ',
|
||||
],
|
||||
[
|
||||
' ',
|
||||
' /\\ /\\ ',
|
||||
' ( {E} {E} ) ',
|
||||
' ( .. ) ',
|
||||
' `------´~ ',
|
||||
],
|
||||
],
|
||||
}
|
||||
|
||||
const HAT_LINES: Record<Hat, string> = {
|
||||
none: '',
|
||||
crown: ' \\^^^/ ',
|
||||
tophat: ' [___] ',
|
||||
propeller: ' -+- ',
|
||||
halo: ' ( ) ',
|
||||
wizard: ' /^\\ ',
|
||||
beanie: ' (___) ',
|
||||
tinyduck: ' ,> ',
|
||||
}
|
||||
|
||||
export function renderSprite(bones: CompanionBones, frame = 0): string[] {
|
||||
const frames = BODIES[bones.species]
|
||||
const body = frames[frame % frames.length]!.map(line =>
|
||||
line.replaceAll('{E}', bones.eye),
|
||||
)
|
||||
const lines = [...body]
|
||||
// Only replace with hat if line 0 is empty (some fidget frames use it for smoke etc)
|
||||
if (bones.hat !== 'none' && !lines[0]!.trim()) {
|
||||
lines[0] = HAT_LINES[bones.hat]
|
||||
}
|
||||
// Drop blank hat slot — wastes a row in the Card and ambient sprite when
|
||||
// there's no hat and the frame isn't using it for smoke/antenna/etc.
|
||||
// Only safe when ALL frames have blank line 0; otherwise heights oscillate.
|
||||
if (!lines[0]!.trim() && frames.every(f => !f[0]!.trim())) lines.shift()
|
||||
return lines
|
||||
}
|
||||
|
||||
export function spriteFrameCount(species: Species): number {
|
||||
return BODIES[species].length
|
||||
}
|
||||
|
||||
export function renderFace(bones: CompanionBones): string {
|
||||
const eye: Eye = bones.eye
|
||||
switch (bones.species) {
|
||||
case duck:
|
||||
case goose:
|
||||
return `(${eye}>`
|
||||
case blob:
|
||||
return `(${eye}${eye})`
|
||||
case cat:
|
||||
return `=${eye}ω${eye}=`
|
||||
case dragon:
|
||||
return `<${eye}~${eye}>`
|
||||
case octopus:
|
||||
return `~(${eye}${eye})~`
|
||||
case owl:
|
||||
return `(${eye})(${eye})`
|
||||
case penguin:
|
||||
return `(${eye}>)`
|
||||
case turtle:
|
||||
return `[${eye}_${eye}]`
|
||||
case snail:
|
||||
return `${eye}(@)`
|
||||
case ghost:
|
||||
return `/${eye}${eye}\\`
|
||||
case axolotl:
|
||||
return `}${eye}.${eye}{`
|
||||
case capybara:
|
||||
return `(${eye}oo${eye})`
|
||||
case cactus:
|
||||
return `|${eye} ${eye}|`
|
||||
case robot:
|
||||
return `[${eye}${eye}]`
|
||||
case rabbit:
|
||||
return `(${eye}..${eye})`
|
||||
case mushroom:
|
||||
return `|${eye} ${eye}|`
|
||||
case chonk:
|
||||
return `(${eye}.${eye})`
|
||||
}
|
||||
}
|
||||
148
src/buddy/types.ts
Normal file
148
src/buddy/types.ts
Normal file
@ -0,0 +1,148 @@
|
||||
export const RARITIES = [
|
||||
'common',
|
||||
'uncommon',
|
||||
'rare',
|
||||
'epic',
|
||||
'legendary',
|
||||
] as const
|
||||
export type Rarity = (typeof RARITIES)[number]
|
||||
|
||||
// One species name collides with a model-codename canary in excluded-strings.txt.
|
||||
// The check greps build output (not source), so runtime-constructing the value keeps
|
||||
// the literal out of the bundle while the check stays armed for the actual codename.
|
||||
// All species encoded uniformly; `as` casts are type-position only (erased pre-bundle).
|
||||
const c = String.fromCharCode
|
||||
// biome-ignore format: keep the species list compact
|
||||
|
||||
export const duck = c(0x64,0x75,0x63,0x6b) as 'duck'
|
||||
export const goose = c(0x67, 0x6f, 0x6f, 0x73, 0x65) as 'goose'
|
||||
export const blob = c(0x62, 0x6c, 0x6f, 0x62) as 'blob'
|
||||
export const cat = c(0x63, 0x61, 0x74) as 'cat'
|
||||
export const dragon = c(0x64, 0x72, 0x61, 0x67, 0x6f, 0x6e) as 'dragon'
|
||||
export const octopus = c(0x6f, 0x63, 0x74, 0x6f, 0x70, 0x75, 0x73) as 'octopus'
|
||||
export const owl = c(0x6f, 0x77, 0x6c) as 'owl'
|
||||
export const penguin = c(0x70, 0x65, 0x6e, 0x67, 0x75, 0x69, 0x6e) as 'penguin'
|
||||
export const turtle = c(0x74, 0x75, 0x72, 0x74, 0x6c, 0x65) as 'turtle'
|
||||
export const snail = c(0x73, 0x6e, 0x61, 0x69, 0x6c) as 'snail'
|
||||
export const ghost = c(0x67, 0x68, 0x6f, 0x73, 0x74) as 'ghost'
|
||||
export const axolotl = c(0x61, 0x78, 0x6f, 0x6c, 0x6f, 0x74, 0x6c) as 'axolotl'
|
||||
export const capybara = c(
|
||||
0x63,
|
||||
0x61,
|
||||
0x70,
|
||||
0x79,
|
||||
0x62,
|
||||
0x61,
|
||||
0x72,
|
||||
0x61,
|
||||
) as 'capybara'
|
||||
export const cactus = c(0x63, 0x61, 0x63, 0x74, 0x75, 0x73) as 'cactus'
|
||||
export const robot = c(0x72, 0x6f, 0x62, 0x6f, 0x74) as 'robot'
|
||||
export const rabbit = c(0x72, 0x61, 0x62, 0x62, 0x69, 0x74) as 'rabbit'
|
||||
export const mushroom = c(
|
||||
0x6d,
|
||||
0x75,
|
||||
0x73,
|
||||
0x68,
|
||||
0x72,
|
||||
0x6f,
|
||||
0x6f,
|
||||
0x6d,
|
||||
) as 'mushroom'
|
||||
export const chonk = c(0x63, 0x68, 0x6f, 0x6e, 0x6b) as 'chonk'
|
||||
|
||||
export const SPECIES = [
|
||||
duck,
|
||||
goose,
|
||||
blob,
|
||||
cat,
|
||||
dragon,
|
||||
octopus,
|
||||
owl,
|
||||
penguin,
|
||||
turtle,
|
||||
snail,
|
||||
ghost,
|
||||
axolotl,
|
||||
capybara,
|
||||
cactus,
|
||||
robot,
|
||||
rabbit,
|
||||
mushroom,
|
||||
chonk,
|
||||
] as const
|
||||
export type Species = (typeof SPECIES)[number] // biome-ignore format: keep compact
|
||||
|
||||
export const EYES = ['·', '✦', '×', '◉', '@', '°'] as const
|
||||
export type Eye = (typeof EYES)[number]
|
||||
|
||||
export const HATS = [
|
||||
'none',
|
||||
'crown',
|
||||
'tophat',
|
||||
'propeller',
|
||||
'halo',
|
||||
'wizard',
|
||||
'beanie',
|
||||
'tinyduck',
|
||||
] as const
|
||||
export type Hat = (typeof HATS)[number]
|
||||
|
||||
export const STAT_NAMES = [
|
||||
'DEBUGGING',
|
||||
'PATIENCE',
|
||||
'CHAOS',
|
||||
'WISDOM',
|
||||
'SNARK',
|
||||
] as const
|
||||
export type StatName = (typeof STAT_NAMES)[number]
|
||||
|
||||
// Deterministic parts — derived from hash(userId)
|
||||
export type CompanionBones = {
|
||||
rarity: Rarity
|
||||
species: Species
|
||||
eye: Eye
|
||||
hat: Hat
|
||||
shiny: boolean
|
||||
stats: Record<StatName, number>
|
||||
}
|
||||
|
||||
// Model-generated soul — stored in config after first hatch
|
||||
export type CompanionSoul = {
|
||||
name: string
|
||||
personality: string
|
||||
}
|
||||
|
||||
export type Companion = CompanionBones &
|
||||
CompanionSoul & {
|
||||
hatchedAt: number
|
||||
}
|
||||
|
||||
// What actually persists in config. Bones are regenerated from hash(userId)
|
||||
// on every read so species renames don't break stored companions and users
|
||||
// can't edit their way to a legendary.
|
||||
export type StoredCompanion = CompanionSoul & { hatchedAt: number }
|
||||
|
||||
export const RARITY_WEIGHTS = {
|
||||
common: 60,
|
||||
uncommon: 25,
|
||||
rare: 10,
|
||||
epic: 4,
|
||||
legendary: 1,
|
||||
} as const satisfies Record<Rarity, number>
|
||||
|
||||
export const RARITY_STARS = {
|
||||
common: '★',
|
||||
uncommon: '★★',
|
||||
rare: '★★★',
|
||||
epic: '★★★★',
|
||||
legendary: '★★★★★',
|
||||
} as const satisfies Record<Rarity, string>
|
||||
|
||||
export const RARITY_COLORS = {
|
||||
common: 'inactive',
|
||||
uncommon: 'success',
|
||||
rare: 'permission',
|
||||
epic: 'autoAccept',
|
||||
legendary: 'warning',
|
||||
} as const satisfies Record<Rarity, keyof import('../utils/theme.js').Theme>
|
||||
98
src/buddy/useBuddyNotification.tsx
Normal file
98
src/buddy/useBuddyNotification.tsx
Normal file
File diff suppressed because one or more lines are too long
31
src/cli/exit.ts
Normal file
31
src/cli/exit.ts
Normal file
@ -0,0 +1,31 @@
|
||||
/**
|
||||
* CLI exit helpers for subcommand handlers.
|
||||
*
|
||||
* Consolidates the 4-5 line "print + lint-suppress + exit" block that was
|
||||
* copy-pasted ~60 times across `claude mcp *` / `claude plugin *` handlers.
|
||||
* The `: never` return type lets TypeScript narrow control flow at call sites
|
||||
* without a trailing `return`.
|
||||
*/
|
||||
/* eslint-disable custom-rules/no-process-exit -- centralized CLI exit point */
|
||||
|
||||
// `return undefined as never` (not a post-exit throw) — tests spy on
|
||||
// process.exit and let it return. Call sites write `return cliError(...)`
|
||||
// where subsequent code would dereference narrowed-away values under mock.
|
||||
// cliError uses console.error (tests spy on console.error); cliOk uses
|
||||
// process.stdout.write (tests spy on process.stdout.write — Bun's console.log
|
||||
// doesn't route through a spied process.stdout.write).
|
||||
|
||||
/** Write an error message to stderr (if given) and exit with code 1. */
|
||||
export function cliError(msg?: string): never {
|
||||
// biome-ignore lint/suspicious/noConsole: centralized CLI error output
|
||||
if (msg) console.error(msg)
|
||||
process.exit(1)
|
||||
return undefined as never
|
||||
}
|
||||
|
||||
/** Write a message to stdout (if given) and exit with code 0. */
|
||||
export function cliOk(msg?: string): never {
|
||||
if (msg) process.stdout.write(msg + '\n')
|
||||
process.exit(0)
|
||||
return undefined as never
|
||||
}
|
||||
70
src/cli/handlers/agents.ts
Normal file
70
src/cli/handlers/agents.ts
Normal file
@ -0,0 +1,70 @@
|
||||
/**
|
||||
* Agents subcommand handler — prints the list of configured agents.
|
||||
* Dynamically imported only when `claude agents` runs.
|
||||
*/
|
||||
|
||||
import {
|
||||
AGENT_SOURCE_GROUPS,
|
||||
compareAgentsByName,
|
||||
getOverrideSourceLabel,
|
||||
type ResolvedAgent,
|
||||
resolveAgentModelDisplay,
|
||||
resolveAgentOverrides,
|
||||
} from '../../tools/AgentTool/agentDisplay.js'
|
||||
import {
|
||||
getActiveAgentsFromList,
|
||||
getAgentDefinitionsWithOverrides,
|
||||
} from '../../tools/AgentTool/loadAgentsDir.js'
|
||||
import { getCwd } from '../../utils/cwd.js'
|
||||
|
||||
function formatAgent(agent: ResolvedAgent): string {
|
||||
const model = resolveAgentModelDisplay(agent)
|
||||
const parts = [agent.agentType]
|
||||
if (model) {
|
||||
parts.push(model)
|
||||
}
|
||||
if (agent.memory) {
|
||||
parts.push(`${agent.memory} memory`)
|
||||
}
|
||||
return parts.join(' · ')
|
||||
}
|
||||
|
||||
export async function agentsHandler(): Promise<void> {
|
||||
const cwd = getCwd()
|
||||
const { allAgents } = await getAgentDefinitionsWithOverrides(cwd)
|
||||
const activeAgents = getActiveAgentsFromList(allAgents)
|
||||
const resolvedAgents = resolveAgentOverrides(allAgents, activeAgents)
|
||||
|
||||
const lines: string[] = []
|
||||
let totalActive = 0
|
||||
|
||||
for (const { label, source } of AGENT_SOURCE_GROUPS) {
|
||||
const groupAgents = resolvedAgents
|
||||
.filter(a => a.source === source)
|
||||
.sort(compareAgentsByName)
|
||||
|
||||
if (groupAgents.length === 0) continue
|
||||
|
||||
lines.push(`${label}:`)
|
||||
for (const agent of groupAgents) {
|
||||
if (agent.overriddenBy) {
|
||||
const winnerSource = getOverrideSourceLabel(agent.overriddenBy)
|
||||
lines.push(` (shadowed by ${winnerSource}) ${formatAgent(agent)}`)
|
||||
} else {
|
||||
lines.push(` ${formatAgent(agent)}`)
|
||||
totalActive++
|
||||
}
|
||||
}
|
||||
lines.push('')
|
||||
}
|
||||
|
||||
if (lines.length === 0) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('No agents found.')
|
||||
} else {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(`${totalActive} active agents\n`)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(lines.join('\n').trimEnd())
|
||||
}
|
||||
}
|
||||
330
src/cli/handlers/auth.ts
Normal file
330
src/cli/handlers/auth.ts
Normal file
@ -0,0 +1,330 @@
|
||||
/* eslint-disable custom-rules/no-process-exit -- CLI subcommand handler intentionally exits */
|
||||
|
||||
import {
|
||||
clearAuthRelatedCaches,
|
||||
performLogout,
|
||||
} from '../../commands/logout/logout.js'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
logEvent,
|
||||
} from '../../services/analytics/index.js'
|
||||
import { getSSLErrorHint } from '../../services/api/errorUtils.js'
|
||||
import { fetchAndStoreClaudeCodeFirstTokenDate } from '../../services/api/firstTokenDate.js'
|
||||
import {
|
||||
createAndStoreApiKey,
|
||||
fetchAndStoreUserRoles,
|
||||
refreshOAuthToken,
|
||||
shouldUseClaudeAIAuth,
|
||||
storeOAuthAccountInfo,
|
||||
} from '../../services/oauth/client.js'
|
||||
import { getOauthProfileFromOauthToken } from '../../services/oauth/getOauthProfile.js'
|
||||
import { OAuthService } from '../../services/oauth/index.js'
|
||||
import type { OAuthTokens } from '../../services/oauth/types.js'
|
||||
import {
|
||||
clearOAuthTokenCache,
|
||||
getAnthropicApiKeyWithSource,
|
||||
getAuthTokenSource,
|
||||
getOauthAccountInfo,
|
||||
getSubscriptionType,
|
||||
isUsing3PServices,
|
||||
saveOAuthTokensIfNeeded,
|
||||
validateForceLoginOrg,
|
||||
} from '../../utils/auth.js'
|
||||
import { saveGlobalConfig } from '../../utils/config.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { isRunningOnHomespace } from '../../utils/envUtils.js'
|
||||
import { errorMessage } from '../../utils/errors.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { getAPIProvider } from '../../utils/model/providers.js'
|
||||
import { getInitialSettings } from '../../utils/settings/settings.js'
|
||||
import { jsonStringify } from '../../utils/slowOperations.js'
|
||||
import {
|
||||
buildAccountProperties,
|
||||
buildAPIProviderProperties,
|
||||
} from '../../utils/status.js'
|
||||
|
||||
/**
|
||||
* Shared post-token-acquisition logic. Saves tokens, fetches profile/roles,
|
||||
* and sets up the local auth state.
|
||||
*/
|
||||
export async function installOAuthTokens(tokens: OAuthTokens): Promise<void> {
|
||||
// Clear old state before saving new credentials
|
||||
await performLogout({ clearOnboarding: false })
|
||||
|
||||
// Reuse pre-fetched profile if available, otherwise fetch fresh
|
||||
const profile =
|
||||
tokens.profile ?? (await getOauthProfileFromOauthToken(tokens.accessToken))
|
||||
if (profile) {
|
||||
storeOAuthAccountInfo({
|
||||
accountUuid: profile.account.uuid,
|
||||
emailAddress: profile.account.email,
|
||||
organizationUuid: profile.organization.uuid,
|
||||
displayName: profile.account.display_name || undefined,
|
||||
hasExtraUsageEnabled:
|
||||
profile.organization.has_extra_usage_enabled ?? undefined,
|
||||
billingType: profile.organization.billing_type ?? undefined,
|
||||
subscriptionCreatedAt:
|
||||
profile.organization.subscription_created_at ?? undefined,
|
||||
accountCreatedAt: profile.account.created_at,
|
||||
})
|
||||
} else if (tokens.tokenAccount) {
|
||||
// Fallback to token exchange account data when profile endpoint fails
|
||||
storeOAuthAccountInfo({
|
||||
accountUuid: tokens.tokenAccount.uuid,
|
||||
emailAddress: tokens.tokenAccount.emailAddress,
|
||||
organizationUuid: tokens.tokenAccount.organizationUuid,
|
||||
})
|
||||
}
|
||||
|
||||
const storageResult = saveOAuthTokensIfNeeded(tokens)
|
||||
clearOAuthTokenCache()
|
||||
|
||||
if (storageResult.warning) {
|
||||
logEvent('tengu_oauth_storage_warning', {
|
||||
warning:
|
||||
storageResult.warning as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
}
|
||||
|
||||
// Roles and first-token-date may fail for limited-scope tokens (e.g.
|
||||
// inference-only from setup-token). They're not required for core auth.
|
||||
await fetchAndStoreUserRoles(tokens.accessToken).catch(err =>
|
||||
logForDebugging(String(err), { level: 'error' }),
|
||||
)
|
||||
|
||||
if (shouldUseClaudeAIAuth(tokens.scopes)) {
|
||||
await fetchAndStoreClaudeCodeFirstTokenDate().catch(err =>
|
||||
logForDebugging(String(err), { level: 'error' }),
|
||||
)
|
||||
} else {
|
||||
// API key creation is critical for Console users — let it throw.
|
||||
const apiKey = await createAndStoreApiKey(tokens.accessToken)
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
'Unable to create API key. The server accepted the request but did not return a key.',
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
await clearAuthRelatedCaches()
|
||||
}
|
||||
|
||||
export async function authLogin({
|
||||
email,
|
||||
sso,
|
||||
console: useConsole,
|
||||
claudeai,
|
||||
}: {
|
||||
email?: string
|
||||
sso?: boolean
|
||||
console?: boolean
|
||||
claudeai?: boolean
|
||||
}): Promise<void> {
|
||||
if (useConsole && claudeai) {
|
||||
process.stderr.write(
|
||||
'Error: --console and --claudeai cannot be used together.\n',
|
||||
)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const settings = getInitialSettings()
|
||||
// forceLoginMethod is a hard constraint (enterprise setting) — matches ConsoleOAuthFlow behavior.
|
||||
// Without it, --console selects Console; --claudeai (or no flag) selects claude.ai.
|
||||
const loginWithClaudeAi = settings.forceLoginMethod
|
||||
? settings.forceLoginMethod === 'claudeai'
|
||||
: !useConsole
|
||||
const orgUUID = settings.forceLoginOrgUUID
|
||||
|
||||
// Fast path: if a refresh token is provided via env var, skip the browser
|
||||
// OAuth flow and exchange it directly for tokens.
|
||||
const envRefreshToken = process.env.CLAUDE_CODE_OAUTH_REFRESH_TOKEN
|
||||
if (envRefreshToken) {
|
||||
const envScopes = process.env.CLAUDE_CODE_OAUTH_SCOPES
|
||||
if (!envScopes) {
|
||||
process.stderr.write(
|
||||
'CLAUDE_CODE_OAUTH_SCOPES is required when using CLAUDE_CODE_OAUTH_REFRESH_TOKEN.\n' +
|
||||
'Set it to the space-separated scopes the refresh token was issued with\n' +
|
||||
'(e.g. "user:inference" or "user:profile user:inference user:sessions:claude_code user:mcp_servers").\n',
|
||||
)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const scopes = envScopes.split(/\s+/).filter(Boolean)
|
||||
|
||||
try {
|
||||
logEvent('tengu_login_from_refresh_token', {})
|
||||
|
||||
const tokens = await refreshOAuthToken(envRefreshToken, { scopes })
|
||||
await installOAuthTokens(tokens)
|
||||
|
||||
const orgResult = await validateForceLoginOrg()
|
||||
if (!orgResult.valid) {
|
||||
process.stderr.write(orgResult.message + '\n')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
// Mark onboarding complete — interactive paths handle this via
|
||||
// the Onboarding component, but the env var path skips it.
|
||||
saveGlobalConfig(current => {
|
||||
if (current.hasCompletedOnboarding) return current
|
||||
return { ...current, hasCompletedOnboarding: true }
|
||||
})
|
||||
|
||||
logEvent('tengu_oauth_success', {
|
||||
loginWithClaudeAi: shouldUseClaudeAIAuth(tokens.scopes),
|
||||
})
|
||||
process.stdout.write('Login successful.\n')
|
||||
process.exit(0)
|
||||
} catch (err) {
|
||||
logError(err)
|
||||
const sslHint = getSSLErrorHint(err)
|
||||
process.stderr.write(
|
||||
`Login failed: ${errorMessage(err)}\n${sslHint ? sslHint + '\n' : ''}`,
|
||||
)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
const resolvedLoginMethod = sso ? 'sso' : undefined
|
||||
|
||||
const oauthService = new OAuthService()
|
||||
|
||||
try {
|
||||
logEvent('tengu_oauth_flow_start', { loginWithClaudeAi })
|
||||
|
||||
const result = await oauthService.startOAuthFlow(
|
||||
async url => {
|
||||
process.stdout.write('Opening browser to sign in…\n')
|
||||
process.stdout.write(`If the browser didn't open, visit: ${url}\n`)
|
||||
},
|
||||
{
|
||||
loginWithClaudeAi,
|
||||
loginHint: email,
|
||||
loginMethod: resolvedLoginMethod,
|
||||
orgUUID,
|
||||
},
|
||||
)
|
||||
|
||||
await installOAuthTokens(result)
|
||||
|
||||
const orgResult = await validateForceLoginOrg()
|
||||
if (!orgResult.valid) {
|
||||
process.stderr.write(orgResult.message + '\n')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
logEvent('tengu_oauth_success', { loginWithClaudeAi })
|
||||
|
||||
process.stdout.write('Login successful.\n')
|
||||
process.exit(0)
|
||||
} catch (err) {
|
||||
logError(err)
|
||||
const sslHint = getSSLErrorHint(err)
|
||||
process.stderr.write(
|
||||
`Login failed: ${errorMessage(err)}\n${sslHint ? sslHint + '\n' : ''}`,
|
||||
)
|
||||
process.exit(1)
|
||||
} finally {
|
||||
oauthService.cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
export async function authStatus(opts: {
|
||||
json?: boolean
|
||||
text?: boolean
|
||||
}): Promise<void> {
|
||||
const { source: authTokenSource, hasToken } = getAuthTokenSource()
|
||||
const { source: apiKeySource } = getAnthropicApiKeyWithSource()
|
||||
const hasApiKeyEnvVar =
|
||||
!!process.env.ANTHROPIC_API_KEY && !isRunningOnHomespace()
|
||||
const oauthAccount = getOauthAccountInfo()
|
||||
const subscriptionType = getSubscriptionType()
|
||||
const using3P = isUsing3PServices()
|
||||
const loggedIn =
|
||||
hasToken || apiKeySource !== 'none' || hasApiKeyEnvVar || using3P
|
||||
|
||||
// Determine auth method
|
||||
let authMethod: string = 'none'
|
||||
if (using3P) {
|
||||
authMethod = 'third_party'
|
||||
} else if (authTokenSource === 'claude.ai') {
|
||||
authMethod = 'claude.ai'
|
||||
} else if (authTokenSource === 'apiKeyHelper') {
|
||||
authMethod = 'api_key_helper'
|
||||
} else if (authTokenSource !== 'none') {
|
||||
authMethod = 'oauth_token'
|
||||
} else if (apiKeySource === 'ANTHROPIC_API_KEY' || hasApiKeyEnvVar) {
|
||||
authMethod = 'api_key'
|
||||
} else if (apiKeySource === '/login managed key') {
|
||||
authMethod = 'claude.ai'
|
||||
}
|
||||
|
||||
if (opts.text) {
|
||||
const properties = [
|
||||
...buildAccountProperties(),
|
||||
...buildAPIProviderProperties(),
|
||||
]
|
||||
let hasAuthProperty = false
|
||||
for (const prop of properties) {
|
||||
const value =
|
||||
typeof prop.value === 'string'
|
||||
? prop.value
|
||||
: Array.isArray(prop.value)
|
||||
? prop.value.join(', ')
|
||||
: null
|
||||
if (value === null || value === 'none') {
|
||||
continue
|
||||
}
|
||||
hasAuthProperty = true
|
||||
if (prop.label) {
|
||||
process.stdout.write(`${prop.label}: ${value}\n`)
|
||||
} else {
|
||||
process.stdout.write(`${value}\n`)
|
||||
}
|
||||
}
|
||||
if (!hasAuthProperty && hasApiKeyEnvVar) {
|
||||
process.stdout.write('API key: ANTHROPIC_API_KEY\n')
|
||||
}
|
||||
if (!loggedIn) {
|
||||
process.stdout.write(
|
||||
'Not logged in. Run claude auth login to authenticate.\n',
|
||||
)
|
||||
}
|
||||
} else {
|
||||
const apiProvider = getAPIProvider()
|
||||
const resolvedApiKeySource =
|
||||
apiKeySource !== 'none'
|
||||
? apiKeySource
|
||||
: hasApiKeyEnvVar
|
||||
? 'ANTHROPIC_API_KEY'
|
||||
: null
|
||||
const output: Record<string, string | boolean | null> = {
|
||||
loggedIn,
|
||||
authMethod,
|
||||
apiProvider,
|
||||
}
|
||||
if (resolvedApiKeySource) {
|
||||
output.apiKeySource = resolvedApiKeySource
|
||||
}
|
||||
if (authMethod === 'claude.ai') {
|
||||
output.email = oauthAccount?.emailAddress ?? null
|
||||
output.orgId = oauthAccount?.organizationUuid ?? null
|
||||
output.orgName = oauthAccount?.organizationName ?? null
|
||||
output.subscriptionType = subscriptionType ?? null
|
||||
}
|
||||
|
||||
process.stdout.write(jsonStringify(output, null, 2) + '\n')
|
||||
}
|
||||
process.exit(loggedIn ? 0 : 1)
|
||||
}
|
||||
|
||||
export async function authLogout(): Promise<void> {
|
||||
try {
|
||||
await performLogout({ clearOnboarding: false })
|
||||
} catch {
|
||||
process.stderr.write('Failed to log out.\n')
|
||||
process.exit(1)
|
||||
}
|
||||
process.stdout.write('Successfully logged out from your Anthropic account.\n')
|
||||
process.exit(0)
|
||||
}
|
||||
170
src/cli/handlers/autoMode.ts
Normal file
170
src/cli/handlers/autoMode.ts
Normal file
@ -0,0 +1,170 @@
|
||||
/**
|
||||
* Auto mode subcommand handlers — dump default/merged classifier rules and
|
||||
* critique user-written rules. Dynamically imported when `claude auto-mode ...` runs.
|
||||
*/
|
||||
|
||||
import { errorMessage } from '../../utils/errors.js'
|
||||
import {
|
||||
getMainLoopModel,
|
||||
parseUserSpecifiedModel,
|
||||
} from '../../utils/model/model.js'
|
||||
import {
|
||||
type AutoModeRules,
|
||||
buildDefaultExternalSystemPrompt,
|
||||
getDefaultExternalAutoModeRules,
|
||||
} from '../../utils/permissions/yoloClassifier.js'
|
||||
import { getAutoModeConfig } from '../../utils/settings/settings.js'
|
||||
import { sideQuery } from '../../utils/sideQuery.js'
|
||||
import { jsonStringify } from '../../utils/slowOperations.js'
|
||||
|
||||
function writeRules(rules: AutoModeRules): void {
|
||||
process.stdout.write(jsonStringify(rules, null, 2) + '\n')
|
||||
}
|
||||
|
||||
export function autoModeDefaultsHandler(): void {
|
||||
writeRules(getDefaultExternalAutoModeRules())
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump the effective auto mode config: user settings where provided, external
|
||||
* defaults otherwise. Per-section REPLACE semantics — matches how
|
||||
* buildYoloSystemPrompt resolves the external template (a non-empty user
|
||||
* section replaces that section's defaults entirely; an empty/absent section
|
||||
* falls through to defaults).
|
||||
*/
|
||||
export function autoModeConfigHandler(): void {
|
||||
const config = getAutoModeConfig()
|
||||
const defaults = getDefaultExternalAutoModeRules()
|
||||
writeRules({
|
||||
allow: config?.allow?.length ? config.allow : defaults.allow,
|
||||
soft_deny: config?.soft_deny?.length
|
||||
? config.soft_deny
|
||||
: defaults.soft_deny,
|
||||
environment: config?.environment?.length
|
||||
? config.environment
|
||||
: defaults.environment,
|
||||
})
|
||||
}
|
||||
|
||||
const CRITIQUE_SYSTEM_PROMPT =
|
||||
'You are an expert reviewer of auto mode classifier rules for Claude Code.\n' +
|
||||
'\n' +
|
||||
'Claude Code has an "auto mode" that uses an AI classifier to decide whether ' +
|
||||
'tool calls should be auto-approved or require user confirmation. Users can ' +
|
||||
'write custom rules in three categories:\n' +
|
||||
'\n' +
|
||||
'- **allow**: Actions the classifier should auto-approve\n' +
|
||||
'- **soft_deny**: Actions the classifier should block (require user confirmation)\n' +
|
||||
"- **environment**: Context about the user's setup that helps the classifier make decisions\n" +
|
||||
'\n' +
|
||||
"Your job is to critique the user's custom rules for clarity, completeness, " +
|
||||
'and potential issues. The classifier is an LLM that reads these rules as ' +
|
||||
'part of its system prompt.\n' +
|
||||
'\n' +
|
||||
'For each rule, evaluate:\n' +
|
||||
'1. **Clarity**: Is the rule unambiguous? Could the classifier misinterpret it?\n' +
|
||||
"2. **Completeness**: Are there gaps or edge cases the rule doesn't cover?\n" +
|
||||
'3. **Conflicts**: Do any of the rules conflict with each other?\n' +
|
||||
'4. **Actionability**: Is the rule specific enough for the classifier to act on?\n' +
|
||||
'\n' +
|
||||
'Be concise and constructive. Only comment on rules that could be improved. ' +
|
||||
'If all rules look good, say so.'
|
||||
|
||||
export async function autoModeCritiqueHandler(options: {
|
||||
model?: string
|
||||
}): Promise<void> {
|
||||
const config = getAutoModeConfig()
|
||||
const hasCustomRules =
|
||||
(config?.allow?.length ?? 0) > 0 ||
|
||||
(config?.soft_deny?.length ?? 0) > 0 ||
|
||||
(config?.environment?.length ?? 0) > 0
|
||||
|
||||
if (!hasCustomRules) {
|
||||
process.stdout.write(
|
||||
'No custom auto mode rules found.\n\n' +
|
||||
'Add rules to your settings file under autoMode.{allow, soft_deny, environment}.\n' +
|
||||
'Run `claude auto-mode defaults` to see the default rules for reference.\n',
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const model = options.model
|
||||
? parseUserSpecifiedModel(options.model)
|
||||
: getMainLoopModel()
|
||||
|
||||
const defaults = getDefaultExternalAutoModeRules()
|
||||
const classifierPrompt = buildDefaultExternalSystemPrompt()
|
||||
|
||||
const userRulesSummary =
|
||||
formatRulesForCritique('allow', config?.allow ?? [], defaults.allow) +
|
||||
formatRulesForCritique(
|
||||
'soft_deny',
|
||||
config?.soft_deny ?? [],
|
||||
defaults.soft_deny,
|
||||
) +
|
||||
formatRulesForCritique(
|
||||
'environment',
|
||||
config?.environment ?? [],
|
||||
defaults.environment,
|
||||
)
|
||||
|
||||
process.stdout.write('Analyzing your auto mode rules…\n\n')
|
||||
|
||||
let response
|
||||
try {
|
||||
response = await sideQuery({
|
||||
querySource: 'auto_mode_critique',
|
||||
model,
|
||||
system: CRITIQUE_SYSTEM_PROMPT,
|
||||
skipSystemPromptPrefix: true,
|
||||
max_tokens: 4096,
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content:
|
||||
'Here is the full classifier system prompt that the auto mode classifier receives:\n\n' +
|
||||
'<classifier_system_prompt>\n' +
|
||||
classifierPrompt +
|
||||
'\n</classifier_system_prompt>\n\n' +
|
||||
"Here are the user's custom rules that REPLACE the corresponding default sections:\n\n" +
|
||||
userRulesSummary +
|
||||
'\nPlease critique these custom rules.',
|
||||
},
|
||||
],
|
||||
})
|
||||
} catch (error) {
|
||||
process.stderr.write(
|
||||
'Failed to analyze rules: ' + errorMessage(error) + '\n',
|
||||
)
|
||||
process.exitCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
const textBlock = response.content.find(block => block.type === 'text')
|
||||
if (textBlock?.type === 'text') {
|
||||
process.stdout.write(textBlock.text + '\n')
|
||||
} else {
|
||||
process.stdout.write('No critique was generated. Please try again.\n')
|
||||
}
|
||||
}
|
||||
|
||||
function formatRulesForCritique(
|
||||
section: string,
|
||||
userRules: string[],
|
||||
defaultRules: string[],
|
||||
): string {
|
||||
if (userRules.length === 0) return ''
|
||||
const customLines = userRules.map(r => '- ' + r).join('\n')
|
||||
const defaultLines = defaultRules.map(r => '- ' + r).join('\n')
|
||||
return (
|
||||
'## ' +
|
||||
section +
|
||||
' (custom rules replacing defaults)\n' +
|
||||
'Custom:\n' +
|
||||
customLines +
|
||||
'\n\n' +
|
||||
'Defaults being replaced:\n' +
|
||||
defaultLines +
|
||||
'\n\n'
|
||||
)
|
||||
}
|
||||
362
src/cli/handlers/mcp.tsx
Normal file
362
src/cli/handlers/mcp.tsx
Normal file
File diff suppressed because one or more lines are too long
878
src/cli/handlers/plugins.ts
Normal file
878
src/cli/handlers/plugins.ts
Normal file
@ -0,0 +1,878 @@
|
||||
/**
|
||||
* Plugin and marketplace subcommand handlers — extracted from main.tsx for lazy loading.
|
||||
* These are dynamically imported only when `claude plugin *` or `claude plugin marketplace *` runs.
|
||||
*/
|
||||
/* eslint-disable custom-rules/no-process-exit -- CLI subcommand handlers intentionally exit */
|
||||
import figures from 'figures'
|
||||
import { basename, dirname } from 'path'
|
||||
import { setUseCoworkPlugins } from '../../bootstrap/state.js'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
logEvent,
|
||||
} from '../../services/analytics/index.js'
|
||||
import {
|
||||
disableAllPlugins,
|
||||
disablePlugin,
|
||||
enablePlugin,
|
||||
installPlugin,
|
||||
uninstallPlugin,
|
||||
updatePluginCli,
|
||||
VALID_INSTALLABLE_SCOPES,
|
||||
VALID_UPDATE_SCOPES,
|
||||
} from '../../services/plugins/pluginCliCommands.js'
|
||||
import { getPluginErrorMessage } from '../../types/plugin.js'
|
||||
import { errorMessage } from '../../utils/errors.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
import { clearAllCaches } from '../../utils/plugins/cacheUtils.js'
|
||||
import { getInstallCounts } from '../../utils/plugins/installCounts.js'
|
||||
import {
|
||||
isPluginInstalled,
|
||||
loadInstalledPluginsV2,
|
||||
} from '../../utils/plugins/installedPluginsManager.js'
|
||||
import {
|
||||
createPluginId,
|
||||
loadMarketplacesWithGracefulDegradation,
|
||||
} from '../../utils/plugins/marketplaceHelpers.js'
|
||||
import {
|
||||
addMarketplaceSource,
|
||||
loadKnownMarketplacesConfig,
|
||||
refreshAllMarketplaces,
|
||||
refreshMarketplace,
|
||||
removeMarketplaceSource,
|
||||
saveMarketplaceToSettings,
|
||||
} from '../../utils/plugins/marketplaceManager.js'
|
||||
import { loadPluginMcpServers } from '../../utils/plugins/mcpPluginIntegration.js'
|
||||
import { parseMarketplaceInput } from '../../utils/plugins/parseMarketplaceInput.js'
|
||||
import {
|
||||
parsePluginIdentifier,
|
||||
scopeToSettingSource,
|
||||
} from '../../utils/plugins/pluginIdentifier.js'
|
||||
import { loadAllPlugins } from '../../utils/plugins/pluginLoader.js'
|
||||
import type { PluginSource } from '../../utils/plugins/schemas.js'
|
||||
import {
|
||||
type ValidationResult,
|
||||
validateManifest,
|
||||
validatePluginContents,
|
||||
} from '../../utils/plugins/validatePlugin.js'
|
||||
import { jsonStringify } from '../../utils/slowOperations.js'
|
||||
import { plural } from '../../utils/stringUtils.js'
|
||||
import { cliError, cliOk } from '../exit.js'
|
||||
|
||||
// Re-export for main.tsx to reference in option definitions
|
||||
export { VALID_INSTALLABLE_SCOPES, VALID_UPDATE_SCOPES }
|
||||
|
||||
/**
|
||||
* Helper function to handle marketplace command errors consistently.
|
||||
*/
|
||||
export function handleMarketplaceError(error: unknown, action: string): never {
|
||||
logError(error)
|
||||
cliError(`${figures.cross} Failed to ${action}: ${errorMessage(error)}`)
|
||||
}
|
||||
|
||||
function printValidationResult(result: ValidationResult): void {
|
||||
if (result.errors.length > 0) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(
|
||||
`${figures.cross} Found ${result.errors.length} ${plural(result.errors.length, 'error')}:\n`,
|
||||
)
|
||||
result.errors.forEach(error => {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` ${figures.pointer} ${error.path}: ${error.message}`)
|
||||
})
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('')
|
||||
}
|
||||
if (result.warnings.length > 0) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(
|
||||
`${figures.warning} Found ${result.warnings.length} ${plural(result.warnings.length, 'warning')}:\n`,
|
||||
)
|
||||
result.warnings.forEach(warning => {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` ${figures.pointer} ${warning.path}: ${warning.message}`)
|
||||
})
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('')
|
||||
}
|
||||
}
|
||||
|
||||
// plugin validate
|
||||
export async function pluginValidateHandler(
|
||||
manifestPath: string,
|
||||
options: { cowork?: boolean },
|
||||
): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
try {
|
||||
const result = await validateManifest(manifestPath)
|
||||
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(`Validating ${result.fileType} manifest: ${result.filePath}\n`)
|
||||
printValidationResult(result)
|
||||
|
||||
// If this is a plugin manifest located inside a .claude-plugin directory,
|
||||
// also validate the plugin's content files (skills, agents, commands,
|
||||
// hooks). Works whether the user passed a directory or the plugin.json
|
||||
// path directly.
|
||||
let contentResults: ValidationResult[] = []
|
||||
if (result.fileType === 'plugin') {
|
||||
const manifestDir = dirname(result.filePath)
|
||||
if (basename(manifestDir) === '.claude-plugin') {
|
||||
contentResults = await validatePluginContents(dirname(manifestDir))
|
||||
for (const r of contentResults) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(`Validating ${r.fileType}: ${r.filePath}\n`)
|
||||
printValidationResult(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const allSuccess = result.success && contentResults.every(r => r.success)
|
||||
const hasWarnings =
|
||||
result.warnings.length > 0 ||
|
||||
contentResults.some(r => r.warnings.length > 0)
|
||||
|
||||
if (allSuccess) {
|
||||
cliOk(
|
||||
hasWarnings
|
||||
? `${figures.tick} Validation passed with warnings`
|
||||
: `${figures.tick} Validation passed`,
|
||||
)
|
||||
} else {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(`${figures.cross} Validation failed`)
|
||||
process.exit(1)
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.error(
|
||||
`${figures.cross} Unexpected error during validation: ${errorMessage(error)}`,
|
||||
)
|
||||
process.exit(2)
|
||||
}
|
||||
}
|
||||
|
||||
// plugin list (lines 5217–5416)
|
||||
export async function pluginListHandler(options: {
|
||||
json?: boolean
|
||||
available?: boolean
|
||||
cowork?: boolean
|
||||
}): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
logEvent('tengu_plugin_list_command', {})
|
||||
|
||||
const installedData = loadInstalledPluginsV2()
|
||||
const { getPluginEditableScopes } = await import(
|
||||
'../../utils/plugins/pluginStartupCheck.js'
|
||||
)
|
||||
const enabledPlugins = getPluginEditableScopes()
|
||||
|
||||
const pluginIds = Object.keys(installedData.plugins)
|
||||
|
||||
// Load all plugins once. The JSON and human paths both need:
|
||||
// - loadErrors (to show load failures per plugin)
|
||||
// - inline plugins (session-only via --plugin-dir, source='name@inline')
|
||||
// which are NOT in installedData.plugins (V2 bookkeeping) — they must
|
||||
// be surfaced separately or `plugin list` silently ignores --plugin-dir.
|
||||
const {
|
||||
enabled: loadedEnabled,
|
||||
disabled: loadedDisabled,
|
||||
errors: loadErrors,
|
||||
} = await loadAllPlugins()
|
||||
const allLoadedPlugins = [...loadedEnabled, ...loadedDisabled]
|
||||
const inlinePlugins = allLoadedPlugins.filter(p =>
|
||||
p.source.endsWith('@inline'),
|
||||
)
|
||||
// Path-level inline failures (dir doesn't exist, parse error before
|
||||
// manifest is read) use source='inline[N]'. Plugin-level errors after
|
||||
// manifest read use source='name@inline'. Collect both for the session
|
||||
// section — these are otherwise invisible since they have no pluginId.
|
||||
const inlineLoadErrors = loadErrors.filter(
|
||||
e => e.source.endsWith('@inline') || e.source.startsWith('inline['),
|
||||
)
|
||||
|
||||
if (options.json) {
|
||||
// Create a map of plugin source to loaded plugin for quick lookup
|
||||
const loadedPluginMap = new Map(allLoadedPlugins.map(p => [p.source, p]))
|
||||
|
||||
const plugins: Array<{
|
||||
id: string
|
||||
version: string
|
||||
scope: string
|
||||
enabled: boolean
|
||||
installPath: string
|
||||
installedAt?: string
|
||||
lastUpdated?: string
|
||||
projectPath?: string
|
||||
mcpServers?: Record<string, unknown>
|
||||
errors?: string[]
|
||||
}> = []
|
||||
|
||||
for (const pluginId of pluginIds.sort()) {
|
||||
const installations = installedData.plugins[pluginId]
|
||||
if (!installations || installations.length === 0) continue
|
||||
|
||||
// Find loading errors for this plugin
|
||||
const pluginName = parsePluginIdentifier(pluginId).name
|
||||
const pluginErrors = loadErrors
|
||||
.filter(
|
||||
e =>
|
||||
e.source === pluginId || ('plugin' in e && e.plugin === pluginName),
|
||||
)
|
||||
.map(getPluginErrorMessage)
|
||||
|
||||
for (const installation of installations) {
|
||||
// Try to find the loaded plugin to get MCP servers
|
||||
const loadedPlugin = loadedPluginMap.get(pluginId)
|
||||
let mcpServers: Record<string, unknown> | undefined
|
||||
|
||||
if (loadedPlugin) {
|
||||
// Load MCP servers if not already cached
|
||||
const servers =
|
||||
loadedPlugin.mcpServers ||
|
||||
(await loadPluginMcpServers(loadedPlugin))
|
||||
if (servers && Object.keys(servers).length > 0) {
|
||||
mcpServers = servers
|
||||
}
|
||||
}
|
||||
|
||||
plugins.push({
|
||||
id: pluginId,
|
||||
version: installation.version || 'unknown',
|
||||
scope: installation.scope,
|
||||
enabled: enabledPlugins.has(pluginId),
|
||||
installPath: installation.installPath,
|
||||
installedAt: installation.installedAt,
|
||||
lastUpdated: installation.lastUpdated,
|
||||
projectPath: installation.projectPath,
|
||||
mcpServers,
|
||||
errors: pluginErrors.length > 0 ? pluginErrors : undefined,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Session-only plugins: scope='session', no install metadata.
|
||||
// Filter from inlineLoadErrors (not loadErrors) so an installed plugin
|
||||
// with the same manifest name doesn't cross-contaminate via e.plugin.
|
||||
// The e.plugin fallback catches the dirName≠manifestName case:
|
||||
// createPluginFromPath tags errors with `${dirName}@inline` but
|
||||
// plugin.source is reassigned to `${manifest.name}@inline` afterward
|
||||
// (pluginLoader.ts loadInlinePlugins), so e.source !== p.source when
|
||||
// a dev checkout dir like ~/code/my-fork/ has manifest name 'cool-plugin'.
|
||||
for (const p of inlinePlugins) {
|
||||
const servers = p.mcpServers || (await loadPluginMcpServers(p))
|
||||
const pErrors = inlineLoadErrors
|
||||
.filter(
|
||||
e => e.source === p.source || ('plugin' in e && e.plugin === p.name),
|
||||
)
|
||||
.map(getPluginErrorMessage)
|
||||
plugins.push({
|
||||
id: p.source,
|
||||
version: p.manifest.version ?? 'unknown',
|
||||
scope: 'session',
|
||||
enabled: p.enabled !== false,
|
||||
installPath: p.path,
|
||||
mcpServers:
|
||||
servers && Object.keys(servers).length > 0 ? servers : undefined,
|
||||
errors: pErrors.length > 0 ? pErrors : undefined,
|
||||
})
|
||||
}
|
||||
// Path-level inline failures (--plugin-dir /nonexistent): no LoadedPlugin
|
||||
// exists so the loop above can't surface them. Mirror the human-path
|
||||
// handling so JSON consumers see the failure instead of silent omission.
|
||||
for (const e of inlineLoadErrors.filter(e =>
|
||||
e.source.startsWith('inline['),
|
||||
)) {
|
||||
plugins.push({
|
||||
id: e.source,
|
||||
version: 'unknown',
|
||||
scope: 'session',
|
||||
enabled: false,
|
||||
installPath: 'path' in e ? e.path : '',
|
||||
errors: [getPluginErrorMessage(e)],
|
||||
})
|
||||
}
|
||||
|
||||
// If --available is set, also load available plugins from marketplaces
|
||||
if (options.available) {
|
||||
const available: Array<{
|
||||
pluginId: string
|
||||
name: string
|
||||
description?: string
|
||||
marketplaceName: string
|
||||
version?: string
|
||||
source: PluginSource
|
||||
installCount?: number
|
||||
}> = []
|
||||
|
||||
try {
|
||||
const [config, installCounts] = await Promise.all([
|
||||
loadKnownMarketplacesConfig(),
|
||||
getInstallCounts(),
|
||||
])
|
||||
const { marketplaces } =
|
||||
await loadMarketplacesWithGracefulDegradation(config)
|
||||
|
||||
for (const {
|
||||
name: marketplaceName,
|
||||
data: marketplace,
|
||||
} of marketplaces) {
|
||||
if (marketplace) {
|
||||
for (const entry of marketplace.plugins) {
|
||||
const pluginId = createPluginId(entry.name, marketplaceName)
|
||||
// Only include plugins that are not already installed
|
||||
if (!isPluginInstalled(pluginId)) {
|
||||
available.push({
|
||||
pluginId,
|
||||
name: entry.name,
|
||||
description: entry.description,
|
||||
marketplaceName,
|
||||
version: entry.version,
|
||||
source: entry.source,
|
||||
installCount: installCounts?.get(pluginId),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Silently ignore marketplace loading errors
|
||||
}
|
||||
|
||||
cliOk(jsonStringify({ installed: plugins, available }, null, 2))
|
||||
} else {
|
||||
cliOk(jsonStringify(plugins, null, 2))
|
||||
}
|
||||
}
|
||||
|
||||
if (pluginIds.length === 0 && inlinePlugins.length === 0) {
|
||||
// inlineLoadErrors can exist with zero inline plugins (e.g. --plugin-dir
|
||||
// points at a nonexistent path). Don't early-exit over them — fall
|
||||
// through to the session section so the failure is visible.
|
||||
if (inlineLoadErrors.length === 0) {
|
||||
cliOk(
|
||||
'No plugins installed. Use `claude plugin install` to install a plugin.',
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if (pluginIds.length > 0) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('Installed plugins:\n')
|
||||
}
|
||||
|
||||
for (const pluginId of pluginIds.sort()) {
|
||||
const installations = installedData.plugins[pluginId]
|
||||
if (!installations || installations.length === 0) continue
|
||||
|
||||
// Find loading errors for this plugin
|
||||
const pluginName = parsePluginIdentifier(pluginId).name
|
||||
const pluginErrors = loadErrors.filter(
|
||||
e => e.source === pluginId || ('plugin' in e && e.plugin === pluginName),
|
||||
)
|
||||
|
||||
for (const installation of installations) {
|
||||
const isEnabled = enabledPlugins.has(pluginId)
|
||||
const status =
|
||||
pluginErrors.length > 0
|
||||
? `${figures.cross} failed to load`
|
||||
: isEnabled
|
||||
? `${figures.tick} enabled`
|
||||
: `${figures.cross} disabled`
|
||||
const version = installation.version || 'unknown'
|
||||
const scope = installation.scope
|
||||
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` ${figures.pointer} ${pluginId}`)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Version: ${version}`)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Scope: ${scope}`)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Status: ${status}`)
|
||||
for (const error of pluginErrors) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Error: ${getPluginErrorMessage(error)}`)
|
||||
}
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('')
|
||||
}
|
||||
}
|
||||
|
||||
if (inlinePlugins.length > 0 || inlineLoadErrors.length > 0) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('Session-only plugins (--plugin-dir):\n')
|
||||
for (const p of inlinePlugins) {
|
||||
// Same dirName≠manifestName fallback as the JSON path above — error
|
||||
// sources use the dir basename but p.source uses the manifest name.
|
||||
const pErrors = inlineLoadErrors.filter(
|
||||
e => e.source === p.source || ('plugin' in e && e.plugin === p.name),
|
||||
)
|
||||
const status =
|
||||
pErrors.length > 0
|
||||
? `${figures.cross} loaded with errors`
|
||||
: `${figures.tick} loaded`
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` ${figures.pointer} ${p.source}`)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Version: ${p.manifest.version ?? 'unknown'}`)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Path: ${p.path}`)
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Status: ${status}`)
|
||||
for (const e of pErrors) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Error: ${getPluginErrorMessage(e)}`)
|
||||
}
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('')
|
||||
}
|
||||
// Path-level failures: no LoadedPlugin object exists. Show them so
|
||||
// `--plugin-dir /typo` doesn't just silently produce nothing.
|
||||
for (const e of inlineLoadErrors.filter(e =>
|
||||
e.source.startsWith('inline['),
|
||||
)) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(
|
||||
` ${figures.pointer} ${e.source}: ${figures.cross} ${getPluginErrorMessage(e)}\n`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
cliOk()
|
||||
}
|
||||
|
||||
// marketplace add (lines 5433–5487)
|
||||
export async function marketplaceAddHandler(
|
||||
source: string,
|
||||
options: { cowork?: boolean; sparse?: string[]; scope?: string },
|
||||
): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
try {
|
||||
const parsed = await parseMarketplaceInput(source)
|
||||
|
||||
if (!parsed) {
|
||||
cliError(
|
||||
`${figures.cross} Invalid marketplace source format. Try: owner/repo, https://..., or ./path`,
|
||||
)
|
||||
}
|
||||
|
||||
if ('error' in parsed) {
|
||||
cliError(`${figures.cross} ${parsed.error}`)
|
||||
}
|
||||
|
||||
// Validate scope
|
||||
const scope = options.scope ?? 'user'
|
||||
if (scope !== 'user' && scope !== 'project' && scope !== 'local') {
|
||||
cliError(
|
||||
`${figures.cross} Invalid scope '${scope}'. Use: user, project, or local`,
|
||||
)
|
||||
}
|
||||
const settingSource = scopeToSettingSource(scope)
|
||||
|
||||
let marketplaceSource = parsed
|
||||
|
||||
if (options.sparse && options.sparse.length > 0) {
|
||||
if (
|
||||
marketplaceSource.source === 'github' ||
|
||||
marketplaceSource.source === 'git'
|
||||
) {
|
||||
marketplaceSource = {
|
||||
...marketplaceSource,
|
||||
sparsePaths: options.sparse,
|
||||
}
|
||||
} else {
|
||||
cliError(
|
||||
`${figures.cross} --sparse is only supported for github and git marketplace sources (got: ${marketplaceSource.source})`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('Adding marketplace...')
|
||||
|
||||
const { name, alreadyMaterialized, resolvedSource } =
|
||||
await addMarketplaceSource(marketplaceSource, message => {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(message)
|
||||
})
|
||||
|
||||
// Write intent to settings at the requested scope
|
||||
saveMarketplaceToSettings(name, { source: resolvedSource }, settingSource)
|
||||
|
||||
clearAllCaches()
|
||||
|
||||
let sourceType = marketplaceSource.source
|
||||
if (marketplaceSource.source === 'github') {
|
||||
sourceType =
|
||||
marketplaceSource.repo as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS
|
||||
}
|
||||
logEvent('tengu_marketplace_added', {
|
||||
source_type:
|
||||
sourceType as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
|
||||
cliOk(
|
||||
alreadyMaterialized
|
||||
? `${figures.tick} Marketplace '${name}' already on disk — declared in ${scope} settings`
|
||||
: `${figures.tick} Successfully added marketplace: ${name} (declared in ${scope} settings)`,
|
||||
)
|
||||
} catch (error) {
|
||||
handleMarketplaceError(error, 'add marketplace')
|
||||
}
|
||||
}
|
||||
|
||||
// marketplace list (lines 5497–5565)
|
||||
export async function marketplaceListHandler(options: {
|
||||
json?: boolean
|
||||
cowork?: boolean
|
||||
}): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
try {
|
||||
const config = await loadKnownMarketplacesConfig()
|
||||
const names = Object.keys(config)
|
||||
|
||||
if (options.json) {
|
||||
const marketplaces = names.sort().map(name => {
|
||||
const marketplace = config[name]
|
||||
const source = marketplace?.source
|
||||
return {
|
||||
name,
|
||||
source: source?.source,
|
||||
...(source?.source === 'github' && { repo: source.repo }),
|
||||
...(source?.source === 'git' && { url: source.url }),
|
||||
...(source?.source === 'url' && { url: source.url }),
|
||||
...(source?.source === 'directory' && { path: source.path }),
|
||||
...(source?.source === 'file' && { path: source.path }),
|
||||
installLocation: marketplace?.installLocation,
|
||||
}
|
||||
})
|
||||
cliOk(jsonStringify(marketplaces, null, 2))
|
||||
}
|
||||
|
||||
if (names.length === 0) {
|
||||
cliOk('No marketplaces configured')
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('Configured marketplaces:\n')
|
||||
names.forEach(name => {
|
||||
const marketplace = config[name]
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` ${figures.pointer} ${name}`)
|
||||
|
||||
if (marketplace?.source) {
|
||||
const src = marketplace.source
|
||||
if (src.source === 'github') {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Source: GitHub (${src.repo})`)
|
||||
} else if (src.source === 'git') {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Source: Git (${src.url})`)
|
||||
} else if (src.source === 'url') {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Source: URL (${src.url})`)
|
||||
} else if (src.source === 'directory') {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Source: Directory (${src.path})`)
|
||||
} else if (src.source === 'file') {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(` Source: File (${src.path})`)
|
||||
}
|
||||
}
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log('')
|
||||
})
|
||||
|
||||
cliOk()
|
||||
} catch (error) {
|
||||
handleMarketplaceError(error, 'list marketplaces')
|
||||
}
|
||||
}
|
||||
|
||||
// marketplace remove (lines 5576–5598)
|
||||
export async function marketplaceRemoveHandler(
|
||||
name: string,
|
||||
options: { cowork?: boolean },
|
||||
): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
try {
|
||||
await removeMarketplaceSource(name)
|
||||
clearAllCaches()
|
||||
|
||||
logEvent('tengu_marketplace_removed', {
|
||||
marketplace_name:
|
||||
name as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
|
||||
cliOk(`${figures.tick} Successfully removed marketplace: ${name}`)
|
||||
} catch (error) {
|
||||
handleMarketplaceError(error, 'remove marketplace')
|
||||
}
|
||||
}
|
||||
|
||||
// marketplace update (lines 5609–5672)
|
||||
export async function marketplaceUpdateHandler(
|
||||
name: string | undefined,
|
||||
options: { cowork?: boolean },
|
||||
): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
try {
|
||||
if (name) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(`Updating marketplace: ${name}...`)
|
||||
|
||||
await refreshMarketplace(name, message => {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(message)
|
||||
})
|
||||
|
||||
clearAllCaches()
|
||||
|
||||
logEvent('tengu_marketplace_updated', {
|
||||
marketplace_name:
|
||||
name as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
|
||||
cliOk(`${figures.tick} Successfully updated marketplace: ${name}`)
|
||||
} else {
|
||||
const config = await loadKnownMarketplacesConfig()
|
||||
const marketplaceNames = Object.keys(config)
|
||||
|
||||
if (marketplaceNames.length === 0) {
|
||||
cliOk('No marketplaces configured')
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.log(`Updating ${marketplaceNames.length} marketplace(s)...`)
|
||||
|
||||
await refreshAllMarketplaces()
|
||||
clearAllCaches()
|
||||
|
||||
logEvent('tengu_marketplace_updated_all', {
|
||||
count:
|
||||
marketplaceNames.length as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
|
||||
cliOk(
|
||||
`${figures.tick} Successfully updated ${marketplaceNames.length} marketplace(s)`,
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
handleMarketplaceError(error, 'update marketplace(s)')
|
||||
}
|
||||
}
|
||||
|
||||
// plugin install (lines 5690–5721)
|
||||
export async function pluginInstallHandler(
|
||||
plugin: string,
|
||||
options: { scope?: string; cowork?: boolean },
|
||||
): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
const scope = options.scope || 'user'
|
||||
if (options.cowork && scope !== 'user') {
|
||||
cliError('--cowork can only be used with user scope')
|
||||
}
|
||||
if (
|
||||
!VALID_INSTALLABLE_SCOPES.includes(
|
||||
scope as (typeof VALID_INSTALLABLE_SCOPES)[number],
|
||||
)
|
||||
) {
|
||||
cliError(
|
||||
`Invalid scope: ${scope}. Must be one of: ${VALID_INSTALLABLE_SCOPES.join(', ')}.`,
|
||||
)
|
||||
}
|
||||
// _PROTO_* routes to PII-tagged plugin_name/marketplace_name BQ columns.
|
||||
// Unredacted plugin arg was previously logged to general-access
|
||||
// additional_metadata for all users — dropped in favor of the privileged
|
||||
// column route. marketplace may be undefined (fires before resolution).
|
||||
const { name, marketplace } = parsePluginIdentifier(plugin)
|
||||
logEvent('tengu_plugin_install_command', {
|
||||
_PROTO_plugin_name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
...(marketplace && {
|
||||
_PROTO_marketplace_name:
|
||||
marketplace as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
}),
|
||||
scope: scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
|
||||
await installPlugin(plugin, scope as 'user' | 'project' | 'local')
|
||||
}
|
||||
|
||||
// plugin uninstall (lines 5738–5769)
|
||||
export async function pluginUninstallHandler(
|
||||
plugin: string,
|
||||
options: { scope?: string; cowork?: boolean; keepData?: boolean },
|
||||
): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
const scope = options.scope || 'user'
|
||||
if (options.cowork && scope !== 'user') {
|
||||
cliError('--cowork can only be used with user scope')
|
||||
}
|
||||
if (
|
||||
!VALID_INSTALLABLE_SCOPES.includes(
|
||||
scope as (typeof VALID_INSTALLABLE_SCOPES)[number],
|
||||
)
|
||||
) {
|
||||
cliError(
|
||||
`Invalid scope: ${scope}. Must be one of: ${VALID_INSTALLABLE_SCOPES.join(', ')}.`,
|
||||
)
|
||||
}
|
||||
const { name, marketplace } = parsePluginIdentifier(plugin)
|
||||
logEvent('tengu_plugin_uninstall_command', {
|
||||
_PROTO_plugin_name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
...(marketplace && {
|
||||
_PROTO_marketplace_name:
|
||||
marketplace as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
}),
|
||||
scope: scope as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
|
||||
await uninstallPlugin(
|
||||
plugin,
|
||||
scope as 'user' | 'project' | 'local',
|
||||
options.keepData,
|
||||
)
|
||||
}
|
||||
|
||||
// plugin enable (lines 5783–5818)
|
||||
export async function pluginEnableHandler(
|
||||
plugin: string,
|
||||
options: { scope?: string; cowork?: boolean },
|
||||
): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
let scope: (typeof VALID_INSTALLABLE_SCOPES)[number] | undefined
|
||||
if (options.scope) {
|
||||
if (
|
||||
!VALID_INSTALLABLE_SCOPES.includes(
|
||||
options.scope as (typeof VALID_INSTALLABLE_SCOPES)[number],
|
||||
)
|
||||
) {
|
||||
cliError(
|
||||
`Invalid scope "${options.scope}". Valid scopes: ${VALID_INSTALLABLE_SCOPES.join(', ')}`,
|
||||
)
|
||||
}
|
||||
scope = options.scope as (typeof VALID_INSTALLABLE_SCOPES)[number]
|
||||
}
|
||||
if (options.cowork && scope !== undefined && scope !== 'user') {
|
||||
cliError('--cowork can only be used with user scope')
|
||||
}
|
||||
|
||||
// --cowork always operates at user scope
|
||||
if (options.cowork && scope === undefined) {
|
||||
scope = 'user'
|
||||
}
|
||||
|
||||
const { name, marketplace } = parsePluginIdentifier(plugin)
|
||||
logEvent('tengu_plugin_enable_command', {
|
||||
_PROTO_plugin_name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
...(marketplace && {
|
||||
_PROTO_marketplace_name:
|
||||
marketplace as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
}),
|
||||
scope: (scope ??
|
||||
'auto') as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
|
||||
await enablePlugin(plugin, scope)
|
||||
}
|
||||
|
||||
// plugin disable (lines 5833–5902)
|
||||
export async function pluginDisableHandler(
|
||||
plugin: string | undefined,
|
||||
options: { scope?: string; cowork?: boolean; all?: boolean },
|
||||
): Promise<void> {
|
||||
if (options.all && plugin) {
|
||||
cliError('Cannot use --all with a specific plugin')
|
||||
}
|
||||
|
||||
if (!options.all && !plugin) {
|
||||
cliError('Please specify a plugin name or use --all to disable all plugins')
|
||||
}
|
||||
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
|
||||
if (options.all) {
|
||||
if (options.scope) {
|
||||
cliError('Cannot use --scope with --all')
|
||||
}
|
||||
|
||||
// No _PROTO_plugin_name here — --all disables all plugins.
|
||||
// Distinguishable from the specific-plugin branch by plugin_name IS NULL.
|
||||
logEvent('tengu_plugin_disable_command', {})
|
||||
|
||||
await disableAllPlugins()
|
||||
return
|
||||
}
|
||||
|
||||
let scope: (typeof VALID_INSTALLABLE_SCOPES)[number] | undefined
|
||||
if (options.scope) {
|
||||
if (
|
||||
!VALID_INSTALLABLE_SCOPES.includes(
|
||||
options.scope as (typeof VALID_INSTALLABLE_SCOPES)[number],
|
||||
)
|
||||
) {
|
||||
cliError(
|
||||
`Invalid scope "${options.scope}". Valid scopes: ${VALID_INSTALLABLE_SCOPES.join(', ')}`,
|
||||
)
|
||||
}
|
||||
scope = options.scope as (typeof VALID_INSTALLABLE_SCOPES)[number]
|
||||
}
|
||||
if (options.cowork && scope !== undefined && scope !== 'user') {
|
||||
cliError('--cowork can only be used with user scope')
|
||||
}
|
||||
|
||||
// --cowork always operates at user scope
|
||||
if (options.cowork && scope === undefined) {
|
||||
scope = 'user'
|
||||
}
|
||||
|
||||
const { name, marketplace } = parsePluginIdentifier(plugin!)
|
||||
logEvent('tengu_plugin_disable_command', {
|
||||
_PROTO_plugin_name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
...(marketplace && {
|
||||
_PROTO_marketplace_name:
|
||||
marketplace as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
}),
|
||||
scope: (scope ??
|
||||
'auto') as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
|
||||
await disablePlugin(plugin!, scope)
|
||||
}
|
||||
|
||||
// plugin update (lines 5918–5948)
|
||||
export async function pluginUpdateHandler(
|
||||
plugin: string,
|
||||
options: { scope?: string; cowork?: boolean },
|
||||
): Promise<void> {
|
||||
if (options.cowork) setUseCoworkPlugins(true)
|
||||
const { name, marketplace } = parsePluginIdentifier(plugin)
|
||||
logEvent('tengu_plugin_update_command', {
|
||||
_PROTO_plugin_name: name as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
...(marketplace && {
|
||||
_PROTO_marketplace_name:
|
||||
marketplace as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
}),
|
||||
})
|
||||
|
||||
let scope: (typeof VALID_UPDATE_SCOPES)[number] = 'user'
|
||||
if (options.scope) {
|
||||
if (
|
||||
!VALID_UPDATE_SCOPES.includes(
|
||||
options.scope as (typeof VALID_UPDATE_SCOPES)[number],
|
||||
)
|
||||
) {
|
||||
cliError(
|
||||
`Invalid scope "${options.scope}". Valid scopes: ${VALID_UPDATE_SCOPES.join(', ')}`,
|
||||
)
|
||||
}
|
||||
scope = options.scope as (typeof VALID_UPDATE_SCOPES)[number]
|
||||
}
|
||||
if (options.cowork && scope !== 'user') {
|
||||
cliError('--cowork can only be used with user scope')
|
||||
}
|
||||
|
||||
await updatePluginCli(plugin, scope)
|
||||
}
|
||||
110
src/cli/handlers/util.tsx
Normal file
110
src/cli/handlers/util.tsx
Normal file
File diff suppressed because one or more lines are too long
32
src/cli/ndjsonSafeStringify.ts
Normal file
32
src/cli/ndjsonSafeStringify.ts
Normal file
@ -0,0 +1,32 @@
|
||||
import { jsonStringify } from '../utils/slowOperations.js'
|
||||
|
||||
// JSON.stringify emits U+2028/U+2029 raw (valid per ECMA-404). When the
|
||||
// output is a single NDJSON line, any receiver that uses JavaScript
|
||||
// line-terminator semantics (ECMA-262 §11.3 — \n \r U+2028 U+2029) to
|
||||
// split the stream will cut the JSON mid-string. ProcessTransport now
|
||||
// silently skips non-JSON lines rather than crashing (gh-28405), but
|
||||
// the truncated fragment is still lost — the message is silently dropped.
|
||||
//
|
||||
// The \uXXXX form is equivalent JSON (parses to the same string) but
|
||||
// can never be mistaken for a line terminator by ANY receiver. This is
|
||||
// what ES2019's "Subsume JSON" proposal and Node's util.inspect do.
|
||||
//
|
||||
// Single regex with alternation: the callback's one dispatch per match
|
||||
// is cheaper than two full-string scans.
|
||||
const JS_LINE_TERMINATORS = /\u2028|\u2029/g
|
||||
|
||||
function escapeJsLineTerminators(json: string): string {
|
||||
return json.replace(JS_LINE_TERMINATORS, c =>
|
||||
c === '\u2028' ? '\\u2028' : '\\u2029',
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* JSON.stringify for one-message-per-line transports. Escapes U+2028
|
||||
* LINE SEPARATOR and U+2029 PARAGRAPH SEPARATOR so the serialized output
|
||||
* cannot be broken by a line-splitting receiver. Output is still valid
|
||||
* JSON and parses to the same value.
|
||||
*/
|
||||
export function ndjsonSafeStringify(value: unknown): string {
|
||||
return escapeJsLineTerminators(jsonStringify(value))
|
||||
}
|
||||
5594
src/cli/print.ts
Normal file
5594
src/cli/print.ts
Normal file
File diff suppressed because it is too large
Load Diff
255
src/cli/remoteIO.ts
Normal file
255
src/cli/remoteIO.ts
Normal file
@ -0,0 +1,255 @@
|
||||
import type { StdoutMessage } from 'src/entrypoints/sdk/controlTypes.js'
|
||||
import { PassThrough } from 'stream'
|
||||
import { URL } from 'url'
|
||||
import { getSessionId } from '../bootstrap/state.js'
|
||||
import { getPollIntervalConfig } from '../bridge/pollConfig.js'
|
||||
import { registerCleanup } from '../utils/cleanupRegistry.js'
|
||||
import { setCommandLifecycleListener } from '../utils/commandLifecycle.js'
|
||||
import { isDebugMode, logForDebugging } from '../utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from '../utils/diagLogs.js'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { gracefulShutdown } from '../utils/gracefulShutdown.js'
|
||||
import { logError } from '../utils/log.js'
|
||||
import { writeToStdout } from '../utils/process.js'
|
||||
import { getSessionIngressAuthToken } from '../utils/sessionIngressAuth.js'
|
||||
import {
|
||||
setSessionMetadataChangedListener,
|
||||
setSessionStateChangedListener,
|
||||
} from '../utils/sessionState.js'
|
||||
import {
|
||||
setInternalEventReader,
|
||||
setInternalEventWriter,
|
||||
} from '../utils/sessionStorage.js'
|
||||
import { ndjsonSafeStringify } from './ndjsonSafeStringify.js'
|
||||
import { StructuredIO } from './structuredIO.js'
|
||||
import { CCRClient, CCRInitError } from './transports/ccrClient.js'
|
||||
import { SSETransport } from './transports/SSETransport.js'
|
||||
import type { Transport } from './transports/Transport.js'
|
||||
import { getTransportForUrl } from './transports/transportUtils.js'
|
||||
|
||||
/**
|
||||
* Bidirectional streaming for SDK mode with session tracking
|
||||
* Supports WebSocket transport
|
||||
*/
|
||||
export class RemoteIO extends StructuredIO {
|
||||
private url: URL
|
||||
private transport: Transport
|
||||
private inputStream: PassThrough
|
||||
private readonly isBridge: boolean = false
|
||||
private readonly isDebug: boolean = false
|
||||
private ccrClient: CCRClient | null = null
|
||||
private keepAliveTimer: ReturnType<typeof setInterval> | null = null
|
||||
|
||||
constructor(
|
||||
streamUrl: string,
|
||||
initialPrompt?: AsyncIterable<string>,
|
||||
replayUserMessages?: boolean,
|
||||
) {
|
||||
const inputStream = new PassThrough({ encoding: 'utf8' })
|
||||
super(inputStream, replayUserMessages)
|
||||
this.inputStream = inputStream
|
||||
this.url = new URL(streamUrl)
|
||||
|
||||
// Prepare headers with session token if available
|
||||
const headers: Record<string, string> = {}
|
||||
const sessionToken = getSessionIngressAuthToken()
|
||||
if (sessionToken) {
|
||||
headers['Authorization'] = `Bearer ${sessionToken}`
|
||||
} else {
|
||||
logForDebugging('[remote-io] No session ingress token available', {
|
||||
level: 'error',
|
||||
})
|
||||
}
|
||||
|
||||
// Add environment runner version if available (set by Environment Manager)
|
||||
const erVersion = process.env.CLAUDE_CODE_ENVIRONMENT_RUNNER_VERSION
|
||||
if (erVersion) {
|
||||
headers['x-environment-runner-version'] = erVersion
|
||||
}
|
||||
|
||||
// Provide a callback that re-reads the session token dynamically.
|
||||
// When the parent process refreshes the token (via token file or env var),
|
||||
// the transport can pick it up on reconnection.
|
||||
const refreshHeaders = (): Record<string, string> => {
|
||||
const h: Record<string, string> = {}
|
||||
const freshToken = getSessionIngressAuthToken()
|
||||
if (freshToken) {
|
||||
h['Authorization'] = `Bearer ${freshToken}`
|
||||
}
|
||||
const freshErVersion = process.env.CLAUDE_CODE_ENVIRONMENT_RUNNER_VERSION
|
||||
if (freshErVersion) {
|
||||
h['x-environment-runner-version'] = freshErVersion
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Get appropriate transport based on URL protocol
|
||||
this.transport = getTransportForUrl(
|
||||
this.url,
|
||||
headers,
|
||||
getSessionId(),
|
||||
refreshHeaders,
|
||||
)
|
||||
|
||||
// Set up data callback
|
||||
this.isBridge = process.env.CLAUDE_CODE_ENVIRONMENT_KIND === 'bridge'
|
||||
this.isDebug = isDebugMode()
|
||||
this.transport.setOnData((data: string) => {
|
||||
this.inputStream.write(data)
|
||||
if (this.isBridge && this.isDebug) {
|
||||
writeToStdout(data.endsWith('\n') ? data : data + '\n')
|
||||
}
|
||||
})
|
||||
|
||||
// Set up close callback to handle connection failures
|
||||
this.transport.setOnClose(() => {
|
||||
// End the input stream to trigger graceful shutdown
|
||||
this.inputStream.end()
|
||||
})
|
||||
|
||||
// Initialize CCR v2 client (heartbeats, epoch, state reporting, event writes).
|
||||
// The CCRClient constructor wires the SSE received-ack handler
|
||||
// synchronously, so new CCRClient() MUST run before transport.connect() —
|
||||
// otherwise early SSE frames hit an unwired onEventCallback and their
|
||||
// 'received' delivery acks are silently dropped.
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_CCR_V2)) {
|
||||
// CCR v2 is SSE+POST by definition. getTransportForUrl returns
|
||||
// SSETransport under the same env var, but the two checks live in
|
||||
// different files — assert the invariant so a future decoupling
|
||||
// fails loudly here instead of confusingly inside CCRClient.
|
||||
if (!(this.transport instanceof SSETransport)) {
|
||||
throw new Error(
|
||||
'CCR v2 requires SSETransport; check getTransportForUrl',
|
||||
)
|
||||
}
|
||||
this.ccrClient = new CCRClient(this.transport, this.url)
|
||||
const init = this.ccrClient.initialize()
|
||||
this.restoredWorkerState = init.catch(() => null)
|
||||
init.catch((error: unknown) => {
|
||||
logForDiagnosticsNoPII('error', 'cli_worker_lifecycle_init_failed', {
|
||||
reason: error instanceof CCRInitError ? error.reason : 'unknown',
|
||||
})
|
||||
logError(
|
||||
new Error(`CCRClient initialization failed: ${errorMessage(error)}`),
|
||||
)
|
||||
void gracefulShutdown(1, 'other')
|
||||
})
|
||||
registerCleanup(async () => this.ccrClient?.close())
|
||||
|
||||
// Register internal event writer for transcript persistence.
|
||||
// When set, sessionStorage writes transcript messages as CCR v2
|
||||
// internal events instead of v1 Session Ingress.
|
||||
setInternalEventWriter((eventType, payload, options) =>
|
||||
this.ccrClient!.writeInternalEvent(eventType, payload, options),
|
||||
)
|
||||
|
||||
// Register internal event readers for session resume.
|
||||
// When set, hydrateFromCCRv2InternalEvents() can fetch foreground
|
||||
// and subagent internal events to reconstruct conversation state.
|
||||
setInternalEventReader(
|
||||
() => this.ccrClient!.readInternalEvents(),
|
||||
() => this.ccrClient!.readSubagentInternalEvents(),
|
||||
)
|
||||
|
||||
const LIFECYCLE_TO_DELIVERY = {
|
||||
started: 'processing',
|
||||
completed: 'processed',
|
||||
} as const
|
||||
setCommandLifecycleListener((uuid, state) => {
|
||||
this.ccrClient?.reportDelivery(uuid, LIFECYCLE_TO_DELIVERY[state])
|
||||
})
|
||||
setSessionStateChangedListener((state, details) => {
|
||||
this.ccrClient?.reportState(state, details)
|
||||
})
|
||||
setSessionMetadataChangedListener(metadata => {
|
||||
this.ccrClient?.reportMetadata(metadata)
|
||||
})
|
||||
}
|
||||
|
||||
// Start connection only after all callbacks are wired (setOnData above,
|
||||
// setOnEvent inside new CCRClient() when CCR v2 is enabled).
|
||||
void this.transport.connect()
|
||||
|
||||
// Push a silent keep_alive frame on a fixed interval so upstream
|
||||
// proxies and the session-ingress layer don't GC an otherwise-idle
|
||||
// remote control session. The keep_alive type is filtered before
|
||||
// reaching any client UI (Query.ts drops it; structuredIO.ts drops it;
|
||||
// web/iOS/Android never see it in their message loop). Interval comes
|
||||
// from GrowthBook (tengu_bridge_poll_interval_config
|
||||
// session_keepalive_interval_v2_ms, default 120s); 0 = disabled.
|
||||
// Bridge-only: fixes Envoy idle timeout on bridge-topology sessions
|
||||
// (#21931). byoc workers ran without this before #21931 and do not
|
||||
// need it — different network path.
|
||||
const keepAliveIntervalMs =
|
||||
getPollIntervalConfig().session_keepalive_interval_v2_ms
|
||||
if (this.isBridge && keepAliveIntervalMs > 0) {
|
||||
this.keepAliveTimer = setInterval(() => {
|
||||
logForDebugging('[remote-io] keep_alive sent')
|
||||
void this.write({ type: 'keep_alive' }).catch(err => {
|
||||
logForDebugging(
|
||||
`[remote-io] keep_alive write failed: ${errorMessage(err)}`,
|
||||
)
|
||||
})
|
||||
}, keepAliveIntervalMs)
|
||||
this.keepAliveTimer.unref?.()
|
||||
}
|
||||
|
||||
// Register for graceful shutdown cleanup
|
||||
registerCleanup(async () => this.close())
|
||||
|
||||
// If initial prompt is provided, send it through the input stream
|
||||
if (initialPrompt) {
|
||||
// Convert the initial prompt to the input stream format.
|
||||
// Chunks from stdin may already contain trailing newlines, so strip
|
||||
// them before appending our own to avoid double-newline issues that
|
||||
// cause structuredIO to parse empty lines. String() handles both
|
||||
// string chunks and Buffer objects from process.stdin.
|
||||
const stream = this.inputStream
|
||||
void (async () => {
|
||||
for await (const chunk of initialPrompt) {
|
||||
stream.write(String(chunk).replace(/\n$/, '') + '\n')
|
||||
}
|
||||
})()
|
||||
}
|
||||
}
|
||||
|
||||
override flushInternalEvents(): Promise<void> {
|
||||
return this.ccrClient?.flushInternalEvents() ?? Promise.resolve()
|
||||
}
|
||||
|
||||
override get internalEventsPending(): number {
|
||||
return this.ccrClient?.internalEventsPending ?? 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Send output to the transport.
|
||||
* In bridge mode, control_request messages are always echoed to stdout so the
|
||||
* bridge parent can detect permission requests. Other messages are echoed only
|
||||
* in debug mode.
|
||||
*/
|
||||
async write(message: StdoutMessage): Promise<void> {
|
||||
if (this.ccrClient) {
|
||||
await this.ccrClient.writeEvent(message)
|
||||
} else {
|
||||
await this.transport.write(message)
|
||||
}
|
||||
if (this.isBridge) {
|
||||
if (message.type === 'control_request' || this.isDebug) {
|
||||
writeToStdout(ndjsonSafeStringify(message) + '\n')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up connections gracefully
|
||||
*/
|
||||
close(): void {
|
||||
if (this.keepAliveTimer) {
|
||||
clearInterval(this.keepAliveTimer)
|
||||
this.keepAliveTimer = null
|
||||
}
|
||||
this.transport.close()
|
||||
this.inputStream.end()
|
||||
}
|
||||
}
|
||||
859
src/cli/structuredIO.ts
Normal file
859
src/cli/structuredIO.ts
Normal file
@ -0,0 +1,859 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type {
|
||||
ElicitResult,
|
||||
JSONRPCMessage,
|
||||
} from '@modelcontextprotocol/sdk/types.js'
|
||||
import { randomUUID } from 'crypto'
|
||||
import type { AssistantMessage } from 'src//types/message.js'
|
||||
import type {
|
||||
HookInput,
|
||||
HookJSONOutput,
|
||||
PermissionUpdate,
|
||||
SDKMessage,
|
||||
SDKUserMessage,
|
||||
} from 'src/entrypoints/agentSdkTypes.js'
|
||||
import { SDKControlElicitationResponseSchema } from 'src/entrypoints/sdk/controlSchemas.js'
|
||||
import type {
|
||||
SDKControlRequest,
|
||||
SDKControlResponse,
|
||||
StdinMessage,
|
||||
StdoutMessage,
|
||||
} from 'src/entrypoints/sdk/controlTypes.js'
|
||||
import type { CanUseToolFn } from 'src/hooks/useCanUseTool.js'
|
||||
import type { Tool, ToolUseContext } from 'src/Tool.js'
|
||||
import { type HookCallback, hookJSONOutputSchema } from 'src/types/hooks.js'
|
||||
import { logForDebugging } from 'src/utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from 'src/utils/diagLogs.js'
|
||||
import { AbortError } from 'src/utils/errors.js'
|
||||
import {
|
||||
type Output as PermissionToolOutput,
|
||||
permissionPromptToolResultToPermissionDecision,
|
||||
outputSchema as permissionToolOutputSchema,
|
||||
} from 'src/utils/permissions/PermissionPromptToolResultSchema.js'
|
||||
import type {
|
||||
PermissionDecision,
|
||||
PermissionDecisionReason,
|
||||
} from 'src/utils/permissions/PermissionResult.js'
|
||||
import { hasPermissionsToUseTool } from 'src/utils/permissions/permissions.js'
|
||||
import { writeToStdout } from 'src/utils/process.js'
|
||||
import { jsonStringify } from 'src/utils/slowOperations.js'
|
||||
import { z } from 'zod/v4'
|
||||
import { notifyCommandLifecycle } from '../utils/commandLifecycle.js'
|
||||
import { normalizeControlMessageKeys } from '../utils/controlMessageCompat.js'
|
||||
import { executePermissionRequestHooks } from '../utils/hooks.js'
|
||||
import {
|
||||
applyPermissionUpdates,
|
||||
persistPermissionUpdates,
|
||||
} from '../utils/permissions/PermissionUpdate.js'
|
||||
import {
|
||||
notifySessionStateChanged,
|
||||
type RequiresActionDetails,
|
||||
type SessionExternalMetadata,
|
||||
} from '../utils/sessionState.js'
|
||||
import { jsonParse } from '../utils/slowOperations.js'
|
||||
import { Stream } from '../utils/stream.js'
|
||||
import { ndjsonSafeStringify } from './ndjsonSafeStringify.js'
|
||||
|
||||
/**
|
||||
* Synthetic tool name used when forwarding sandbox network permission
|
||||
* requests via the can_use_tool control_request protocol. SDK hosts
|
||||
* see this as a normal tool permission prompt.
|
||||
*/
|
||||
export const SANDBOX_NETWORK_ACCESS_TOOL_NAME = 'SandboxNetworkAccess'
|
||||
|
||||
function serializeDecisionReason(
|
||||
reason: PermissionDecisionReason | undefined,
|
||||
): string | undefined {
|
||||
if (!reason) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
if (
|
||||
(feature('BASH_CLASSIFIER') || feature('TRANSCRIPT_CLASSIFIER')) &&
|
||||
reason.type === 'classifier'
|
||||
) {
|
||||
return reason.reason
|
||||
}
|
||||
switch (reason.type) {
|
||||
case 'rule':
|
||||
case 'mode':
|
||||
case 'subcommandResults':
|
||||
case 'permissionPromptTool':
|
||||
return undefined
|
||||
case 'hook':
|
||||
case 'asyncAgent':
|
||||
case 'sandboxOverride':
|
||||
case 'workingDir':
|
||||
case 'safetyCheck':
|
||||
case 'other':
|
||||
return reason.reason
|
||||
}
|
||||
}
|
||||
|
||||
function buildRequiresActionDetails(
|
||||
tool: Tool,
|
||||
input: Record<string, unknown>,
|
||||
toolUseID: string,
|
||||
requestId: string,
|
||||
): RequiresActionDetails {
|
||||
// Per-tool summary methods may throw on malformed input; permission
|
||||
// handling must not break because of a bad description.
|
||||
let description: string
|
||||
try {
|
||||
description =
|
||||
tool.getActivityDescription?.(input) ??
|
||||
tool.getToolUseSummary?.(input) ??
|
||||
tool.userFacingName(input)
|
||||
} catch {
|
||||
description = tool.name
|
||||
}
|
||||
return {
|
||||
tool_name: tool.name,
|
||||
action_description: description,
|
||||
tool_use_id: toolUseID,
|
||||
request_id: requestId,
|
||||
input,
|
||||
}
|
||||
}
|
||||
|
||||
type PendingRequest<T> = {
|
||||
resolve: (result: T) => void
|
||||
reject: (error: unknown) => void
|
||||
schema?: z.Schema
|
||||
request: SDKControlRequest
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a structured way to read and write SDK messages from stdio,
|
||||
* capturing the SDK protocol.
|
||||
*/
|
||||
// Maximum number of resolved tool_use IDs to track. Once exceeded, the oldest
|
||||
// entry is evicted. This bounds memory in very long sessions while keeping
|
||||
// enough history to catch duplicate control_response deliveries.
|
||||
const MAX_RESOLVED_TOOL_USE_IDS = 1000
|
||||
|
||||
export class StructuredIO {
|
||||
readonly structuredInput: AsyncGenerator<StdinMessage | SDKMessage>
|
||||
private readonly pendingRequests = new Map<string, PendingRequest<unknown>>()
|
||||
|
||||
// CCR external_metadata read back on worker start; null when the
|
||||
// transport doesn't restore. Assigned by RemoteIO.
|
||||
restoredWorkerState: Promise<SessionExternalMetadata | null> =
|
||||
Promise.resolve(null)
|
||||
|
||||
private inputClosed = false
|
||||
private unexpectedResponseCallback?: (
|
||||
response: SDKControlResponse,
|
||||
) => Promise<void>
|
||||
|
||||
// Tracks tool_use IDs that have been resolved through the normal permission
|
||||
// flow (or aborted by a hook). When a duplicate control_response arrives
|
||||
// after the original was already handled, this Set prevents the orphan
|
||||
// handler from re-processing it — which would push duplicate assistant
|
||||
// messages into mutableMessages and cause a 400 "tool_use ids must be unique"
|
||||
// error from the API.
|
||||
private readonly resolvedToolUseIds = new Set<string>()
|
||||
private prependedLines: string[] = []
|
||||
private onControlRequestSent?: (request: SDKControlRequest) => void
|
||||
private onControlRequestResolved?: (requestId: string) => void
|
||||
|
||||
// sendRequest() and print.ts both enqueue here; the drain loop is the
|
||||
// only writer. Prevents control_request from overtaking queued stream_events.
|
||||
readonly outbound = new Stream<StdoutMessage>()
|
||||
|
||||
constructor(
|
||||
private readonly input: AsyncIterable<string>,
|
||||
private readonly replayUserMessages?: boolean,
|
||||
) {
|
||||
this.input = input
|
||||
this.structuredInput = this.read()
|
||||
}
|
||||
|
||||
/**
|
||||
* Records a tool_use ID as resolved so that late/duplicate control_response
|
||||
* messages for the same tool are ignored by the orphan handler.
|
||||
*/
|
||||
private trackResolvedToolUseId(request: SDKControlRequest): void {
|
||||
if (request.request.subtype === 'can_use_tool') {
|
||||
this.resolvedToolUseIds.add(request.request.tool_use_id)
|
||||
if (this.resolvedToolUseIds.size > MAX_RESOLVED_TOOL_USE_IDS) {
|
||||
// Evict the oldest entry (Sets iterate in insertion order)
|
||||
const first = this.resolvedToolUseIds.values().next().value
|
||||
if (first !== undefined) {
|
||||
this.resolvedToolUseIds.delete(first)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Flush pending internal events. No-op for non-remote IO. Overridden by RemoteIO. */
|
||||
flushInternalEvents(): Promise<void> {
|
||||
return Promise.resolve()
|
||||
}
|
||||
|
||||
/** Internal-event queue depth. Overridden by RemoteIO; zero otherwise. */
|
||||
get internalEventsPending(): number {
|
||||
return 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Queue a user turn to be yielded before the next message from this.input.
|
||||
* Works before iteration starts and mid-stream — read() re-checks
|
||||
* prependedLines between each yielded message.
|
||||
*/
|
||||
prependUserMessage(content: string): void {
|
||||
this.prependedLines.push(
|
||||
jsonStringify({
|
||||
type: 'user',
|
||||
session_id: '',
|
||||
message: { role: 'user', content },
|
||||
parent_tool_use_id: null,
|
||||
} satisfies SDKUserMessage) + '\n',
|
||||
)
|
||||
}
|
||||
|
||||
private async *read() {
|
||||
let content = ''
|
||||
|
||||
// Called once before for-await (an empty this.input otherwise skips the
|
||||
// loop body entirely), then again per block. prependedLines re-check is
|
||||
// inside the while so a prepend pushed between two messages in the SAME
|
||||
// block still lands first.
|
||||
const splitAndProcess = async function* (this: StructuredIO) {
|
||||
for (;;) {
|
||||
if (this.prependedLines.length > 0) {
|
||||
content = this.prependedLines.join('') + content
|
||||
this.prependedLines = []
|
||||
}
|
||||
const newline = content.indexOf('\n')
|
||||
if (newline === -1) break
|
||||
const line = content.slice(0, newline)
|
||||
content = content.slice(newline + 1)
|
||||
const message = await this.processLine(line)
|
||||
if (message) {
|
||||
logForDiagnosticsNoPII('info', 'cli_stdin_message_parsed', {
|
||||
type: message.type,
|
||||
})
|
||||
yield message
|
||||
}
|
||||
}
|
||||
}.bind(this)
|
||||
|
||||
yield* splitAndProcess()
|
||||
|
||||
for await (const block of this.input) {
|
||||
content += block
|
||||
yield* splitAndProcess()
|
||||
}
|
||||
if (content) {
|
||||
const message = await this.processLine(content)
|
||||
if (message) {
|
||||
yield message
|
||||
}
|
||||
}
|
||||
this.inputClosed = true
|
||||
for (const request of this.pendingRequests.values()) {
|
||||
// Reject all pending requests if the input stream
|
||||
request.reject(
|
||||
new Error('Tool permission stream closed before response received'),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
getPendingPermissionRequests() {
|
||||
return Array.from(this.pendingRequests.values())
|
||||
.map(entry => entry.request)
|
||||
.filter(pr => pr.request.subtype === 'can_use_tool')
|
||||
}
|
||||
|
||||
setUnexpectedResponseCallback(
|
||||
callback: (response: SDKControlResponse) => Promise<void>,
|
||||
): void {
|
||||
this.unexpectedResponseCallback = callback
|
||||
}
|
||||
|
||||
/**
|
||||
* Inject a control_response message to resolve a pending permission request.
|
||||
* Used by the bridge to feed permission responses from claude.ai into the
|
||||
* SDK permission flow.
|
||||
*
|
||||
* Also sends a control_cancel_request to the SDK consumer so its canUseTool
|
||||
* callback is aborted via the signal — otherwise the callback hangs.
|
||||
*/
|
||||
injectControlResponse(response: SDKControlResponse): void {
|
||||
const requestId = response.response?.request_id
|
||||
if (!requestId) return
|
||||
const request = this.pendingRequests.get(requestId)
|
||||
if (!request) return
|
||||
this.trackResolvedToolUseId(request.request)
|
||||
this.pendingRequests.delete(requestId)
|
||||
// Cancel the SDK consumer's canUseTool callback — the bridge won.
|
||||
void this.write({
|
||||
type: 'control_cancel_request',
|
||||
request_id: requestId,
|
||||
})
|
||||
if (response.response.subtype === 'error') {
|
||||
request.reject(new Error(response.response.error))
|
||||
} else {
|
||||
const result = response.response.response
|
||||
if (request.schema) {
|
||||
try {
|
||||
request.resolve(request.schema.parse(result))
|
||||
} catch (error) {
|
||||
request.reject(error)
|
||||
}
|
||||
} else {
|
||||
request.resolve({})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a callback invoked whenever a can_use_tool control_request
|
||||
* is written to stdout. Used by the bridge to forward permission
|
||||
* requests to claude.ai.
|
||||
*/
|
||||
setOnControlRequestSent(
|
||||
callback: ((request: SDKControlRequest) => void) | undefined,
|
||||
): void {
|
||||
this.onControlRequestSent = callback
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a callback invoked when a can_use_tool control_response arrives
|
||||
* from the SDK consumer (via stdin). Used by the bridge to cancel the
|
||||
* stale permission prompt on claude.ai when the SDK consumer wins the race.
|
||||
*/
|
||||
setOnControlRequestResolved(
|
||||
callback: ((requestId: string) => void) | undefined,
|
||||
): void {
|
||||
this.onControlRequestResolved = callback
|
||||
}
|
||||
|
||||
private async processLine(
|
||||
line: string,
|
||||
): Promise<StdinMessage | SDKMessage | undefined> {
|
||||
// Skip empty lines (e.g. from double newlines in piped stdin)
|
||||
if (!line) {
|
||||
return undefined
|
||||
}
|
||||
try {
|
||||
const message = normalizeControlMessageKeys(jsonParse(line)) as
|
||||
| StdinMessage
|
||||
| SDKMessage
|
||||
if (message.type === 'keep_alive') {
|
||||
// Silently ignore keep-alive messages
|
||||
return undefined
|
||||
}
|
||||
if (message.type === 'update_environment_variables') {
|
||||
// Apply environment variable updates directly to process.env.
|
||||
// Used by bridge session runner for auth token refresh
|
||||
// (CLAUDE_CODE_SESSION_ACCESS_TOKEN) which must be readable
|
||||
// by the REPL process itself, not just child Bash commands.
|
||||
const keys = Object.keys(message.variables)
|
||||
for (const [key, value] of Object.entries(message.variables)) {
|
||||
process.env[key] = value
|
||||
}
|
||||
logForDebugging(
|
||||
`[structuredIO] applied update_environment_variables: ${keys.join(', ')}`,
|
||||
)
|
||||
return undefined
|
||||
}
|
||||
if (message.type === 'control_response') {
|
||||
// Close lifecycle for every control_response, including duplicates
|
||||
// and orphans — orphans don't yield to print.ts's main loop, so this
|
||||
// is the only path that sees them. uuid is server-injected into the
|
||||
// payload.
|
||||
const uuid =
|
||||
'uuid' in message && typeof message.uuid === 'string'
|
||||
? message.uuid
|
||||
: undefined
|
||||
if (uuid) {
|
||||
notifyCommandLifecycle(uuid, 'completed')
|
||||
}
|
||||
const request = this.pendingRequests.get(message.response.request_id)
|
||||
if (!request) {
|
||||
// Check if this tool_use was already resolved through the normal
|
||||
// permission flow. Duplicate control_response deliveries (e.g. from
|
||||
// WebSocket reconnects) arrive after the original was handled, and
|
||||
// re-processing them would push duplicate assistant messages into
|
||||
// the conversation, causing API 400 errors.
|
||||
const responsePayload =
|
||||
message.response.subtype === 'success'
|
||||
? message.response.response
|
||||
: undefined
|
||||
const toolUseID = responsePayload?.toolUseID
|
||||
if (
|
||||
typeof toolUseID === 'string' &&
|
||||
this.resolvedToolUseIds.has(toolUseID)
|
||||
) {
|
||||
logForDebugging(
|
||||
`Ignoring duplicate control_response for already-resolved toolUseID=${toolUseID} request_id=${message.response.request_id}`,
|
||||
)
|
||||
return undefined
|
||||
}
|
||||
if (this.unexpectedResponseCallback) {
|
||||
await this.unexpectedResponseCallback(message)
|
||||
}
|
||||
return undefined // Ignore responses for requests we don't know about
|
||||
}
|
||||
this.trackResolvedToolUseId(request.request)
|
||||
this.pendingRequests.delete(message.response.request_id)
|
||||
// Notify the bridge when the SDK consumer resolves a can_use_tool
|
||||
// request, so it can cancel the stale permission prompt on claude.ai.
|
||||
if (
|
||||
request.request.request.subtype === 'can_use_tool' &&
|
||||
this.onControlRequestResolved
|
||||
) {
|
||||
this.onControlRequestResolved(message.response.request_id)
|
||||
}
|
||||
|
||||
if (message.response.subtype === 'error') {
|
||||
request.reject(new Error(message.response.error))
|
||||
return undefined
|
||||
}
|
||||
const result = message.response.response
|
||||
if (request.schema) {
|
||||
try {
|
||||
request.resolve(request.schema.parse(result))
|
||||
} catch (error) {
|
||||
request.reject(error)
|
||||
}
|
||||
} else {
|
||||
request.resolve({})
|
||||
}
|
||||
// Propagate control responses when replay is enabled
|
||||
if (this.replayUserMessages) {
|
||||
return message
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
if (
|
||||
message.type !== 'user' &&
|
||||
message.type !== 'control_request' &&
|
||||
message.type !== 'assistant' &&
|
||||
message.type !== 'system'
|
||||
) {
|
||||
logForDebugging(`Ignoring unknown message type: ${message.type}`, {
|
||||
level: 'warn',
|
||||
})
|
||||
return undefined
|
||||
}
|
||||
if (message.type === 'control_request') {
|
||||
if (!message.request) {
|
||||
exitWithMessage(`Error: Missing request on control_request`)
|
||||
}
|
||||
return message
|
||||
}
|
||||
if (message.type === 'assistant' || message.type === 'system') {
|
||||
return message
|
||||
}
|
||||
if (message.message.role !== 'user') {
|
||||
exitWithMessage(
|
||||
`Error: Expected message role 'user', got '${message.message.role}'`,
|
||||
)
|
||||
}
|
||||
return message
|
||||
} catch (error) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.error(`Error parsing streaming input line: ${line}: ${error}`)
|
||||
// eslint-disable-next-line custom-rules/no-process-exit
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
async write(message: StdoutMessage): Promise<void> {
|
||||
writeToStdout(ndjsonSafeStringify(message) + '\n')
|
||||
}
|
||||
|
||||
private async sendRequest<Response>(
|
||||
request: SDKControlRequest['request'],
|
||||
schema: z.Schema,
|
||||
signal?: AbortSignal,
|
||||
requestId: string = randomUUID(),
|
||||
): Promise<Response> {
|
||||
const message: SDKControlRequest = {
|
||||
type: 'control_request',
|
||||
request_id: requestId,
|
||||
request,
|
||||
}
|
||||
if (this.inputClosed) {
|
||||
throw new Error('Stream closed')
|
||||
}
|
||||
if (signal?.aborted) {
|
||||
throw new Error('Request aborted')
|
||||
}
|
||||
this.outbound.enqueue(message)
|
||||
if (request.subtype === 'can_use_tool' && this.onControlRequestSent) {
|
||||
this.onControlRequestSent(message)
|
||||
}
|
||||
const aborted = () => {
|
||||
this.outbound.enqueue({
|
||||
type: 'control_cancel_request',
|
||||
request_id: requestId,
|
||||
})
|
||||
// Immediately reject the outstanding promise, without
|
||||
// waiting for the host to acknowledge the cancellation.
|
||||
const request = this.pendingRequests.get(requestId)
|
||||
if (request) {
|
||||
// Track the tool_use ID as resolved before rejecting, so that a
|
||||
// late response from the host is ignored by the orphan handler.
|
||||
this.trackResolvedToolUseId(request.request)
|
||||
request.reject(new AbortError())
|
||||
}
|
||||
}
|
||||
if (signal) {
|
||||
signal.addEventListener('abort', aborted, {
|
||||
once: true,
|
||||
})
|
||||
}
|
||||
try {
|
||||
return await new Promise<Response>((resolve, reject) => {
|
||||
this.pendingRequests.set(requestId, {
|
||||
request: {
|
||||
type: 'control_request',
|
||||
request_id: requestId,
|
||||
request,
|
||||
},
|
||||
resolve: result => {
|
||||
resolve(result as Response)
|
||||
},
|
||||
reject,
|
||||
schema,
|
||||
})
|
||||
})
|
||||
} finally {
|
||||
if (signal) {
|
||||
signal.removeEventListener('abort', aborted)
|
||||
}
|
||||
this.pendingRequests.delete(requestId)
|
||||
}
|
||||
}
|
||||
|
||||
createCanUseTool(
|
||||
onPermissionPrompt?: (details: RequiresActionDetails) => void,
|
||||
): CanUseToolFn {
|
||||
return async (
|
||||
tool: Tool,
|
||||
input: { [key: string]: unknown },
|
||||
toolUseContext: ToolUseContext,
|
||||
assistantMessage: AssistantMessage,
|
||||
toolUseID: string,
|
||||
forceDecision?: PermissionDecision,
|
||||
): Promise<PermissionDecision> => {
|
||||
const mainPermissionResult =
|
||||
forceDecision ??
|
||||
(await hasPermissionsToUseTool(
|
||||
tool,
|
||||
input,
|
||||
toolUseContext,
|
||||
assistantMessage,
|
||||
toolUseID,
|
||||
))
|
||||
// If the tool is allowed or denied, return the result
|
||||
if (
|
||||
mainPermissionResult.behavior === 'allow' ||
|
||||
mainPermissionResult.behavior === 'deny'
|
||||
) {
|
||||
return mainPermissionResult
|
||||
}
|
||||
|
||||
// Run PermissionRequest hooks in parallel with the SDK permission
|
||||
// prompt. In the terminal CLI, hooks race against the interactive
|
||||
// prompt so that e.g. a hook with --delay 20 doesn't block the UI.
|
||||
// We need the same behavior here: the SDK host (VS Code, etc.) shows
|
||||
// its permission dialog immediately while hooks run in the background.
|
||||
// Whichever resolves first wins; the loser is cancelled/ignored.
|
||||
|
||||
// AbortController used to cancel the SDK request if a hook decides first
|
||||
const hookAbortController = new AbortController()
|
||||
const parentSignal = toolUseContext.abortController.signal
|
||||
// Forward parent abort to our local controller
|
||||
const onParentAbort = () => hookAbortController.abort()
|
||||
parentSignal.addEventListener('abort', onParentAbort, { once: true })
|
||||
|
||||
try {
|
||||
// Start the hook evaluation (runs in background)
|
||||
const hookPromise = executePermissionRequestHooksForSDK(
|
||||
tool.name,
|
||||
toolUseID,
|
||||
input,
|
||||
toolUseContext,
|
||||
mainPermissionResult.suggestions,
|
||||
).then(decision => ({ source: 'hook' as const, decision }))
|
||||
|
||||
// Start the SDK permission prompt immediately (don't wait for hooks)
|
||||
const requestId = randomUUID()
|
||||
onPermissionPrompt?.(
|
||||
buildRequiresActionDetails(tool, input, toolUseID, requestId),
|
||||
)
|
||||
const sdkPromise = this.sendRequest<PermissionToolOutput>(
|
||||
{
|
||||
subtype: 'can_use_tool',
|
||||
tool_name: tool.name,
|
||||
input,
|
||||
permission_suggestions: mainPermissionResult.suggestions,
|
||||
blocked_path: mainPermissionResult.blockedPath,
|
||||
decision_reason: serializeDecisionReason(
|
||||
mainPermissionResult.decisionReason,
|
||||
),
|
||||
tool_use_id: toolUseID,
|
||||
agent_id: toolUseContext.agentId,
|
||||
},
|
||||
permissionToolOutputSchema(),
|
||||
hookAbortController.signal,
|
||||
requestId,
|
||||
).then(result => ({ source: 'sdk' as const, result }))
|
||||
|
||||
// Race: hook completion vs SDK prompt response.
|
||||
// The hook promise always resolves (never rejects), returning
|
||||
// undefined if no hook made a decision.
|
||||
const winner = await Promise.race([hookPromise, sdkPromise])
|
||||
|
||||
if (winner.source === 'hook') {
|
||||
if (winner.decision) {
|
||||
// Hook decided — abort the pending SDK request.
|
||||
// Suppress the expected AbortError rejection from sdkPromise.
|
||||
sdkPromise.catch(() => {})
|
||||
hookAbortController.abort()
|
||||
return winner.decision
|
||||
}
|
||||
// Hook passed through (no decision) — wait for the SDK prompt
|
||||
const sdkResult = await sdkPromise
|
||||
return permissionPromptToolResultToPermissionDecision(
|
||||
sdkResult.result,
|
||||
tool,
|
||||
input,
|
||||
toolUseContext,
|
||||
)
|
||||
}
|
||||
|
||||
// SDK prompt responded first — use its result (hook still running
|
||||
// in background but its result will be ignored)
|
||||
return permissionPromptToolResultToPermissionDecision(
|
||||
winner.result,
|
||||
tool,
|
||||
input,
|
||||
toolUseContext,
|
||||
)
|
||||
} catch (error) {
|
||||
return permissionPromptToolResultToPermissionDecision(
|
||||
{
|
||||
behavior: 'deny',
|
||||
message: `Tool permission request failed: ${error}`,
|
||||
toolUseID,
|
||||
},
|
||||
tool,
|
||||
input,
|
||||
toolUseContext,
|
||||
)
|
||||
} finally {
|
||||
// Only transition back to 'running' if no other permission prompts
|
||||
// are pending (concurrent tool execution can have multiple in-flight).
|
||||
if (this.getPendingPermissionRequests().length === 0) {
|
||||
notifySessionStateChanged('running')
|
||||
}
|
||||
parentSignal.removeEventListener('abort', onParentAbort)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
createHookCallback(callbackId: string, timeout?: number): HookCallback {
|
||||
return {
|
||||
type: 'callback',
|
||||
timeout,
|
||||
callback: async (
|
||||
input: HookInput,
|
||||
toolUseID: string | null,
|
||||
abort: AbortSignal | undefined,
|
||||
): Promise<HookJSONOutput> => {
|
||||
try {
|
||||
const result = await this.sendRequest<HookJSONOutput>(
|
||||
{
|
||||
subtype: 'hook_callback',
|
||||
callback_id: callbackId,
|
||||
input,
|
||||
tool_use_id: toolUseID || undefined,
|
||||
},
|
||||
hookJSONOutputSchema(),
|
||||
abort,
|
||||
)
|
||||
return result
|
||||
} catch (error) {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.error(`Error in hook callback ${callbackId}:`, error)
|
||||
return {}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends an elicitation request to the SDK consumer and returns the response.
|
||||
*/
|
||||
async handleElicitation(
|
||||
serverName: string,
|
||||
message: string,
|
||||
requestedSchema?: Record<string, unknown>,
|
||||
signal?: AbortSignal,
|
||||
mode?: 'form' | 'url',
|
||||
url?: string,
|
||||
elicitationId?: string,
|
||||
): Promise<ElicitResult> {
|
||||
try {
|
||||
const result = await this.sendRequest<ElicitResult>(
|
||||
{
|
||||
subtype: 'elicitation',
|
||||
mcp_server_name: serverName,
|
||||
message,
|
||||
mode,
|
||||
url,
|
||||
elicitation_id: elicitationId,
|
||||
requested_schema: requestedSchema,
|
||||
},
|
||||
SDKControlElicitationResponseSchema(),
|
||||
signal,
|
||||
)
|
||||
return result
|
||||
} catch {
|
||||
return { action: 'cancel' as const }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a SandboxAskCallback that forwards sandbox network permission
|
||||
* requests to the SDK host as can_use_tool control_requests.
|
||||
*
|
||||
* This piggybacks on the existing can_use_tool protocol with a synthetic
|
||||
* tool name so that SDK hosts (VS Code, CCR, etc.) can prompt the user
|
||||
* for network access without requiring a new protocol subtype.
|
||||
*/
|
||||
createSandboxAskCallback(): (hostPattern: {
|
||||
host: string
|
||||
port?: number
|
||||
}) => Promise<boolean> {
|
||||
return async (hostPattern): Promise<boolean> => {
|
||||
try {
|
||||
const result = await this.sendRequest<PermissionToolOutput>(
|
||||
{
|
||||
subtype: 'can_use_tool',
|
||||
tool_name: SANDBOX_NETWORK_ACCESS_TOOL_NAME,
|
||||
input: { host: hostPattern.host },
|
||||
tool_use_id: randomUUID(),
|
||||
description: `Allow network connection to ${hostPattern.host}?`,
|
||||
},
|
||||
permissionToolOutputSchema(),
|
||||
)
|
||||
return result.behavior === 'allow'
|
||||
} catch {
|
||||
// If the request fails (stream closed, abort, etc.), deny the connection
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends an MCP message to an SDK server and waits for the response
|
||||
*/
|
||||
async sendMcpMessage(
|
||||
serverName: string,
|
||||
message: JSONRPCMessage,
|
||||
): Promise<JSONRPCMessage> {
|
||||
const response = await this.sendRequest<{ mcp_response: JSONRPCMessage }>(
|
||||
{
|
||||
subtype: 'mcp_message',
|
||||
server_name: serverName,
|
||||
message,
|
||||
},
|
||||
z.object({
|
||||
mcp_response: z.any() as z.Schema<JSONRPCMessage>,
|
||||
}),
|
||||
)
|
||||
return response.mcp_response
|
||||
}
|
||||
}
|
||||
|
||||
function exitWithMessage(message: string): never {
|
||||
// biome-ignore lint/suspicious/noConsole:: intentional console output
|
||||
console.error(message)
|
||||
// eslint-disable-next-line custom-rules/no-process-exit
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute PermissionRequest hooks and return a decision if one is made.
|
||||
* Returns undefined if no hook made a decision.
|
||||
*/
|
||||
async function executePermissionRequestHooksForSDK(
|
||||
toolName: string,
|
||||
toolUseID: string,
|
||||
input: Record<string, unknown>,
|
||||
toolUseContext: ToolUseContext,
|
||||
suggestions: PermissionUpdate[] | undefined,
|
||||
): Promise<PermissionDecision | undefined> {
|
||||
const appState = toolUseContext.getAppState()
|
||||
const permissionMode = appState.toolPermissionContext.mode
|
||||
|
||||
// Iterate directly over the generator instead of using `all`
|
||||
const hookGenerator = executePermissionRequestHooks(
|
||||
toolName,
|
||||
toolUseID,
|
||||
input,
|
||||
toolUseContext,
|
||||
permissionMode,
|
||||
suggestions,
|
||||
toolUseContext.abortController.signal,
|
||||
)
|
||||
|
||||
for await (const hookResult of hookGenerator) {
|
||||
if (
|
||||
hookResult.permissionRequestResult &&
|
||||
(hookResult.permissionRequestResult.behavior === 'allow' ||
|
||||
hookResult.permissionRequestResult.behavior === 'deny')
|
||||
) {
|
||||
const decision = hookResult.permissionRequestResult
|
||||
if (decision.behavior === 'allow') {
|
||||
const finalInput = decision.updatedInput || input
|
||||
|
||||
// Apply permission updates if provided by hook ("always allow")
|
||||
const permissionUpdates = decision.updatedPermissions ?? []
|
||||
if (permissionUpdates.length > 0) {
|
||||
persistPermissionUpdates(permissionUpdates)
|
||||
const currentAppState = toolUseContext.getAppState()
|
||||
const updatedContext = applyPermissionUpdates(
|
||||
currentAppState.toolPermissionContext,
|
||||
permissionUpdates,
|
||||
)
|
||||
// Update permission context via setAppState
|
||||
toolUseContext.setAppState(prev => {
|
||||
if (prev.toolPermissionContext === updatedContext) return prev
|
||||
return { ...prev, toolPermissionContext: updatedContext }
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
behavior: 'allow',
|
||||
updatedInput: finalInput,
|
||||
userModified: false,
|
||||
decisionReason: {
|
||||
type: 'hook',
|
||||
hookName: 'PermissionRequest',
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// Hook denied the permission
|
||||
return {
|
||||
behavior: 'deny',
|
||||
message:
|
||||
decision.message || 'Permission denied by PermissionRequest hook',
|
||||
decisionReason: {
|
||||
type: 'hook',
|
||||
hookName: 'PermissionRequest',
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
282
src/cli/transports/HybridTransport.ts
Normal file
282
src/cli/transports/HybridTransport.ts
Normal file
@ -0,0 +1,282 @@
|
||||
import axios, { type AxiosError } from 'axios'
|
||||
import type { StdoutMessage } from 'src/entrypoints/sdk/controlTypes.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from '../../utils/diagLogs.js'
|
||||
import { getSessionIngressAuthToken } from '../../utils/sessionIngressAuth.js'
|
||||
import { SerialBatchEventUploader } from './SerialBatchEventUploader.js'
|
||||
import {
|
||||
WebSocketTransport,
|
||||
type WebSocketTransportOptions,
|
||||
} from './WebSocketTransport.js'
|
||||
|
||||
const BATCH_FLUSH_INTERVAL_MS = 100
|
||||
// Per-attempt POST timeout. Bounds how long a single stuck POST can block
|
||||
// the serialized queue. Without this, a hung connection stalls all writes.
|
||||
const POST_TIMEOUT_MS = 15_000
|
||||
// Grace period for queued writes on close(). Covers a healthy POST (~100ms)
|
||||
// plus headroom; best-effort, not a delivery guarantee under degraded network.
|
||||
// Void-ed (nothing awaits it) so this is a last resort — replBridge teardown
|
||||
// now closes AFTER archive so archive latency is the primary drain window.
|
||||
// NOTE: gracefulShutdown's cleanup budget is 2s (not the 5s outer failsafe);
|
||||
// 3s here exceeds it, but the process lives ~2s longer for hooks+analytics.
|
||||
const CLOSE_GRACE_MS = 3000
|
||||
|
||||
/**
|
||||
* Hybrid transport: WebSocket for reads, HTTP POST for writes.
|
||||
*
|
||||
* Write flow:
|
||||
*
|
||||
* write(stream_event) ─┐
|
||||
* │ (100ms timer)
|
||||
* │
|
||||
* ▼
|
||||
* write(other) ────► uploader.enqueue() (SerialBatchEventUploader)
|
||||
* ▲ │
|
||||
* writeBatch() ────────┘ │ serial, batched, retries indefinitely,
|
||||
* │ backpressure at maxQueueSize
|
||||
* ▼
|
||||
* postOnce() (single HTTP POST, throws on retryable)
|
||||
*
|
||||
* stream_event messages accumulate in streamEventBuffer for up to 100ms
|
||||
* before enqueue (reduces POST count for high-volume content deltas). A
|
||||
* non-stream write flushes any buffered stream_events first to preserve order.
|
||||
*
|
||||
* Serialization + retry + backpressure are delegated to SerialBatchEventUploader
|
||||
* (same primitive CCR uses). At most one POST in-flight; events arriving during
|
||||
* a POST batch into the next one. On failure, the uploader re-queues and retries
|
||||
* with exponential backoff + jitter. If the queue fills past maxQueueSize,
|
||||
* enqueue() blocks — giving awaiting callers backpressure.
|
||||
*
|
||||
* Why serialize? Bridge mode fires writes via `void transport.write()`
|
||||
* (fire-and-forget). Without this, concurrent POSTs → concurrent Firestore
|
||||
* writes to the same document → collisions → retry storms → pages oncall.
|
||||
*/
|
||||
export class HybridTransport extends WebSocketTransport {
|
||||
private postUrl: string
|
||||
private uploader: SerialBatchEventUploader<StdoutMessage>
|
||||
|
||||
// stream_event delay buffer — accumulates content deltas for up to
|
||||
// BATCH_FLUSH_INTERVAL_MS before enqueueing (reduces POST count)
|
||||
private streamEventBuffer: StdoutMessage[] = []
|
||||
private streamEventTimer: ReturnType<typeof setTimeout> | null = null
|
||||
|
||||
constructor(
|
||||
url: URL,
|
||||
headers: Record<string, string> = {},
|
||||
sessionId?: string,
|
||||
refreshHeaders?: () => Record<string, string>,
|
||||
options?: WebSocketTransportOptions & {
|
||||
maxConsecutiveFailures?: number
|
||||
onBatchDropped?: (batchSize: number, failures: number) => void
|
||||
},
|
||||
) {
|
||||
super(url, headers, sessionId, refreshHeaders, options)
|
||||
const { maxConsecutiveFailures, onBatchDropped } = options ?? {}
|
||||
this.postUrl = convertWsUrlToPostUrl(url)
|
||||
this.uploader = new SerialBatchEventUploader<StdoutMessage>({
|
||||
// Large cap — session-ingress accepts arbitrary batch sizes. Events
|
||||
// naturally batch during in-flight POSTs; this just bounds the payload.
|
||||
maxBatchSize: 500,
|
||||
// Bridge callers use `void transport.write()` — backpressure doesn't
|
||||
// apply (they don't await). A batch >maxQueueSize deadlocks (see
|
||||
// SerialBatchEventUploader backpressure check). So set it high enough
|
||||
// to be a memory bound only. Wire real backpressure in a follow-up
|
||||
// once callers await.
|
||||
maxQueueSize: 100_000,
|
||||
baseDelayMs: 500,
|
||||
maxDelayMs: 8000,
|
||||
jitterMs: 1000,
|
||||
// Optional cap so a persistently-failing server can't pin the drain
|
||||
// loop for the lifetime of the process. Undefined = indefinite retry.
|
||||
// replBridge sets this; the 1P transportUtils path does not.
|
||||
maxConsecutiveFailures,
|
||||
onBatchDropped: (batchSize, failures) => {
|
||||
logForDiagnosticsNoPII(
|
||||
'error',
|
||||
'cli_hybrid_batch_dropped_max_failures',
|
||||
{
|
||||
batchSize,
|
||||
failures,
|
||||
},
|
||||
)
|
||||
onBatchDropped?.(batchSize, failures)
|
||||
},
|
||||
send: batch => this.postOnce(batch),
|
||||
})
|
||||
logForDebugging(`HybridTransport: POST URL = ${this.postUrl}`)
|
||||
logForDiagnosticsNoPII('info', 'cli_hybrid_transport_initialized')
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue a message and wait for the queue to drain. Returning flush()
|
||||
* preserves the contract that `await write()` resolves after the event is
|
||||
* POSTed (relied on by tests and replBridge's initial flush). Fire-and-forget
|
||||
* callers (`void transport.write()`) are unaffected — they don't await,
|
||||
* so the later resolution doesn't add latency.
|
||||
*/
|
||||
override async write(message: StdoutMessage): Promise<void> {
|
||||
if (message.type === 'stream_event') {
|
||||
// Delay: accumulate stream_events briefly before enqueueing.
|
||||
// Promise resolves immediately — callers don't await stream_events.
|
||||
this.streamEventBuffer.push(message)
|
||||
if (!this.streamEventTimer) {
|
||||
this.streamEventTimer = setTimeout(
|
||||
() => this.flushStreamEvents(),
|
||||
BATCH_FLUSH_INTERVAL_MS,
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
// Immediate: flush any buffered stream_events (ordering), then this event.
|
||||
await this.uploader.enqueue([...this.takeStreamEvents(), message])
|
||||
return this.uploader.flush()
|
||||
}
|
||||
|
||||
async writeBatch(messages: StdoutMessage[]): Promise<void> {
|
||||
await this.uploader.enqueue([...this.takeStreamEvents(), ...messages])
|
||||
return this.uploader.flush()
|
||||
}
|
||||
|
||||
/** Snapshot before/after writeBatch() to detect silent drops. */
|
||||
get droppedBatchCount(): number {
|
||||
return this.uploader.droppedBatchCount
|
||||
}
|
||||
|
||||
/**
|
||||
* Block until all pending events are POSTed. Used by bridge's initial
|
||||
* history flush so onStateChange('connected') fires after persistence.
|
||||
*/
|
||||
flush(): Promise<void> {
|
||||
void this.uploader.enqueue(this.takeStreamEvents())
|
||||
return this.uploader.flush()
|
||||
}
|
||||
|
||||
/** Take ownership of buffered stream_events and clear the delay timer. */
|
||||
private takeStreamEvents(): StdoutMessage[] {
|
||||
if (this.streamEventTimer) {
|
||||
clearTimeout(this.streamEventTimer)
|
||||
this.streamEventTimer = null
|
||||
}
|
||||
const buffered = this.streamEventBuffer
|
||||
this.streamEventBuffer = []
|
||||
return buffered
|
||||
}
|
||||
|
||||
/** Delay timer fired — enqueue accumulated stream_events. */
|
||||
private flushStreamEvents(): void {
|
||||
this.streamEventTimer = null
|
||||
void this.uploader.enqueue(this.takeStreamEvents())
|
||||
}
|
||||
|
||||
override close(): void {
|
||||
if (this.streamEventTimer) {
|
||||
clearTimeout(this.streamEventTimer)
|
||||
this.streamEventTimer = null
|
||||
}
|
||||
this.streamEventBuffer = []
|
||||
// Grace period for queued writes — fallback. replBridge teardown now
|
||||
// awaits archive between write and close (see CLOSE_GRACE_MS), so
|
||||
// archive latency is the primary drain window and this is a last
|
||||
// resort. Keep close() sync (returns immediately) but defer
|
||||
// uploader.close() so any remaining queue gets a chance to finish.
|
||||
const uploader = this.uploader
|
||||
let graceTimer: ReturnType<typeof setTimeout> | undefined
|
||||
void Promise.race([
|
||||
uploader.flush(),
|
||||
new Promise<void>(r => {
|
||||
// eslint-disable-next-line no-restricted-syntax -- need timer ref for clearTimeout
|
||||
graceTimer = setTimeout(r, CLOSE_GRACE_MS)
|
||||
}),
|
||||
]).finally(() => {
|
||||
clearTimeout(graceTimer)
|
||||
uploader.close()
|
||||
})
|
||||
super.close()
|
||||
}
|
||||
|
||||
/**
|
||||
* Single-attempt POST. Throws on retryable failures (429, 5xx, network)
|
||||
* so SerialBatchEventUploader re-queues and retries. Returns on success
|
||||
* and on permanent failures (4xx non-429, no token) so the uploader moves on.
|
||||
*/
|
||||
private async postOnce(events: StdoutMessage[]): Promise<void> {
|
||||
const sessionToken = getSessionIngressAuthToken()
|
||||
if (!sessionToken) {
|
||||
logForDebugging('HybridTransport: No session token available for POST')
|
||||
logForDiagnosticsNoPII('warn', 'cli_hybrid_post_no_token')
|
||||
return
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
Authorization: `Bearer ${sessionToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
let response
|
||||
try {
|
||||
response = await axios.post(
|
||||
this.postUrl,
|
||||
{ events },
|
||||
{
|
||||
headers,
|
||||
validateStatus: () => true,
|
||||
timeout: POST_TIMEOUT_MS,
|
||||
},
|
||||
)
|
||||
} catch (error) {
|
||||
const axiosError = error as AxiosError
|
||||
logForDebugging(`HybridTransport: POST error: ${axiosError.message}`)
|
||||
logForDiagnosticsNoPII('warn', 'cli_hybrid_post_network_error')
|
||||
throw error
|
||||
}
|
||||
|
||||
if (response.status >= 200 && response.status < 300) {
|
||||
logForDebugging(`HybridTransport: POST success count=${events.length}`)
|
||||
return
|
||||
}
|
||||
|
||||
// 4xx (except 429) are permanent — drop, don't retry.
|
||||
if (
|
||||
response.status >= 400 &&
|
||||
response.status < 500 &&
|
||||
response.status !== 429
|
||||
) {
|
||||
logForDebugging(
|
||||
`HybridTransport: POST returned ${response.status} (permanent), dropping`,
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_hybrid_post_client_error', {
|
||||
status: response.status,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// 429 / 5xx — retryable. Throw so uploader re-queues and backs off.
|
||||
logForDebugging(
|
||||
`HybridTransport: POST returned ${response.status} (retryable)`,
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_hybrid_post_retryable_error', {
|
||||
status: response.status,
|
||||
})
|
||||
throw new Error(`POST failed with ${response.status}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a WebSocket URL to the HTTP POST endpoint URL.
|
||||
* From: wss://api.example.com/v2/session_ingress/ws/<session_id>
|
||||
* To: https://api.example.com/v2/session_ingress/session/<session_id>/events
|
||||
*/
|
||||
function convertWsUrlToPostUrl(wsUrl: URL): string {
|
||||
const protocol = wsUrl.protocol === 'wss:' ? 'https:' : 'http:'
|
||||
|
||||
// Replace /ws/ with /session/ and append /events
|
||||
let pathname = wsUrl.pathname
|
||||
pathname = pathname.replace('/ws/', '/session/')
|
||||
if (!pathname.endsWith('/events')) {
|
||||
pathname = pathname.endsWith('/')
|
||||
? pathname + 'events'
|
||||
: pathname + '/events'
|
||||
}
|
||||
|
||||
return `${protocol}//${wsUrl.host}${pathname}${wsUrl.search}`
|
||||
}
|
||||
711
src/cli/transports/SSETransport.ts
Normal file
711
src/cli/transports/SSETransport.ts
Normal file
@ -0,0 +1,711 @@
|
||||
import axios, { type AxiosError } from 'axios'
|
||||
import type { StdoutMessage } from 'src/entrypoints/sdk/controlTypes.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from '../../utils/diagLogs.js'
|
||||
import { errorMessage } from '../../utils/errors.js'
|
||||
import { getSessionIngressAuthHeaders } from '../../utils/sessionIngressAuth.js'
|
||||
import { sleep } from '../../utils/sleep.js'
|
||||
import { jsonParse, jsonStringify } from '../../utils/slowOperations.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
import type { Transport } from './Transport.js'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Configuration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const RECONNECT_BASE_DELAY_MS = 1000
|
||||
const RECONNECT_MAX_DELAY_MS = 30_000
|
||||
/** Time budget for reconnection attempts before giving up (10 minutes). */
|
||||
const RECONNECT_GIVE_UP_MS = 600_000
|
||||
/** Server sends keepalives every 15s; treat connection as dead after 45s of silence. */
|
||||
const LIVENESS_TIMEOUT_MS = 45_000
|
||||
|
||||
/**
|
||||
* HTTP status codes that indicate a permanent server-side rejection.
|
||||
* The transport transitions to 'closed' immediately without retrying.
|
||||
*/
|
||||
const PERMANENT_HTTP_CODES = new Set([401, 403, 404])
|
||||
|
||||
// POST retry configuration (matches HybridTransport)
|
||||
const POST_MAX_RETRIES = 10
|
||||
const POST_BASE_DELAY_MS = 500
|
||||
const POST_MAX_DELAY_MS = 8000
|
||||
|
||||
/** Hoisted TextDecoder options to avoid per-chunk allocation in readStream. */
|
||||
const STREAM_DECODE_OPTS: TextDecodeOptions = { stream: true }
|
||||
|
||||
/** Hoisted axios validateStatus callback to avoid per-request closure allocation. */
|
||||
function alwaysValidStatus(): boolean {
|
||||
return true
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SSE Frame Parser
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type SSEFrame = {
|
||||
event?: string
|
||||
id?: string
|
||||
data?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Incrementally parse SSE frames from a text buffer.
|
||||
* Returns parsed frames and the remaining (incomplete) buffer.
|
||||
*
|
||||
* @internal exported for testing
|
||||
*/
|
||||
export function parseSSEFrames(buffer: string): {
|
||||
frames: SSEFrame[]
|
||||
remaining: string
|
||||
} {
|
||||
const frames: SSEFrame[] = []
|
||||
let pos = 0
|
||||
|
||||
// SSE frames are delimited by double newlines
|
||||
let idx: number
|
||||
while ((idx = buffer.indexOf('\n\n', pos)) !== -1) {
|
||||
const rawFrame = buffer.slice(pos, idx)
|
||||
pos = idx + 2
|
||||
|
||||
// Skip empty frames
|
||||
if (!rawFrame.trim()) continue
|
||||
|
||||
const frame: SSEFrame = {}
|
||||
let isComment = false
|
||||
|
||||
for (const line of rawFrame.split('\n')) {
|
||||
if (line.startsWith(':')) {
|
||||
// SSE comment (e.g., `:keepalive`)
|
||||
isComment = true
|
||||
continue
|
||||
}
|
||||
|
||||
const colonIdx = line.indexOf(':')
|
||||
if (colonIdx === -1) continue
|
||||
|
||||
const field = line.slice(0, colonIdx)
|
||||
// Per SSE spec, strip one leading space after colon if present
|
||||
const value =
|
||||
line[colonIdx + 1] === ' '
|
||||
? line.slice(colonIdx + 2)
|
||||
: line.slice(colonIdx + 1)
|
||||
|
||||
switch (field) {
|
||||
case 'event':
|
||||
frame.event = value
|
||||
break
|
||||
case 'id':
|
||||
frame.id = value
|
||||
break
|
||||
case 'data':
|
||||
// Per SSE spec, multiple data: lines are concatenated with \n
|
||||
frame.data = frame.data ? frame.data + '\n' + value : value
|
||||
break
|
||||
// Ignore other fields (retry:, etc.)
|
||||
}
|
||||
}
|
||||
|
||||
// Only emit frames that have data (or are pure comments which reset liveness)
|
||||
if (frame.data || isComment) {
|
||||
frames.push(frame)
|
||||
}
|
||||
}
|
||||
|
||||
return { frames, remaining: buffer.slice(pos) }
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type SSETransportState =
|
||||
| 'idle'
|
||||
| 'connected'
|
||||
| 'reconnecting'
|
||||
| 'closing'
|
||||
| 'closed'
|
||||
|
||||
/**
|
||||
* Payload for `event: client_event` frames, matching the StreamClientEvent
|
||||
* proto message in session_stream.proto. This is the only event type sent
|
||||
* to worker subscribers — delivery_update, session_update, ephemeral_event,
|
||||
* and catch_up_truncated are client-channel-only (see notifier.go and
|
||||
* event_stream.go SubscriberClient guard).
|
||||
*/
|
||||
export type StreamClientEvent = {
|
||||
event_id: string
|
||||
sequence_num: number
|
||||
event_type: string
|
||||
source: string
|
||||
payload: Record<string, unknown>
|
||||
created_at: string
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SSETransport
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Transport that uses SSE for reading and HTTP POST for writing.
|
||||
*
|
||||
* Reads events via Server-Sent Events from the CCR v2 event stream endpoint.
|
||||
* Writes events via HTTP POST with retry logic (same pattern as HybridTransport).
|
||||
*
|
||||
* Each `event: client_event` frame carries a StreamClientEvent proto JSON
|
||||
* directly in `data:`. The transport extracts `payload` and passes it to
|
||||
* `onData` as newline-delimited JSON for StructuredIO consumers.
|
||||
*
|
||||
* Supports automatic reconnection with exponential backoff and Last-Event-ID
|
||||
* for resumption after disconnection.
|
||||
*/
|
||||
export class SSETransport implements Transport {
|
||||
private state: SSETransportState = 'idle'
|
||||
private onData?: (data: string) => void
|
||||
private onCloseCallback?: (closeCode?: number) => void
|
||||
private onEventCallback?: (event: StreamClientEvent) => void
|
||||
private headers: Record<string, string>
|
||||
private sessionId?: string
|
||||
private refreshHeaders?: () => Record<string, string>
|
||||
private readonly getAuthHeaders: () => Record<string, string>
|
||||
|
||||
// SSE connection state
|
||||
private abortController: AbortController | null = null
|
||||
private lastSequenceNum = 0
|
||||
private seenSequenceNums = new Set<number>()
|
||||
|
||||
// Reconnection state
|
||||
private reconnectAttempts = 0
|
||||
private reconnectStartTime: number | null = null
|
||||
private reconnectTimer: NodeJS.Timeout | null = null
|
||||
|
||||
// Liveness detection
|
||||
private livenessTimer: NodeJS.Timeout | null = null
|
||||
|
||||
// POST URL (derived from SSE URL)
|
||||
private postUrl: string
|
||||
|
||||
// Runtime epoch for CCR v2 event format
|
||||
|
||||
constructor(
|
||||
private readonly url: URL,
|
||||
headers: Record<string, string> = {},
|
||||
sessionId?: string,
|
||||
refreshHeaders?: () => Record<string, string>,
|
||||
initialSequenceNum?: number,
|
||||
/**
|
||||
* Per-instance auth header source. Omit to read the process-wide
|
||||
* CLAUDE_CODE_SESSION_ACCESS_TOKEN (single-session callers). Required
|
||||
* for concurrent multi-session callers — the env-var path is a process
|
||||
* global and would stomp across sessions.
|
||||
*/
|
||||
getAuthHeaders?: () => Record<string, string>,
|
||||
) {
|
||||
this.headers = headers
|
||||
this.sessionId = sessionId
|
||||
this.refreshHeaders = refreshHeaders
|
||||
this.getAuthHeaders = getAuthHeaders ?? getSessionIngressAuthHeaders
|
||||
this.postUrl = convertSSEUrlToPostUrl(url)
|
||||
// Seed with a caller-provided high-water mark so the first connect()
|
||||
// sends from_sequence_num / Last-Event-ID. Without this, a fresh
|
||||
// SSETransport always asks the server to replay from sequence 0 —
|
||||
// the entire session history on every transport swap.
|
||||
if (initialSequenceNum !== undefined && initialSequenceNum > 0) {
|
||||
this.lastSequenceNum = initialSequenceNum
|
||||
}
|
||||
logForDebugging(`SSETransport: SSE URL = ${url.href}`)
|
||||
logForDebugging(`SSETransport: POST URL = ${this.postUrl}`)
|
||||
logForDiagnosticsNoPII('info', 'cli_sse_transport_initialized')
|
||||
}
|
||||
|
||||
/**
|
||||
* High-water mark of sequence numbers seen on this stream. Callers that
|
||||
* recreate the transport (e.g. replBridge onWorkReceived) read this before
|
||||
* close() and pass it as `initialSequenceNum` to the next instance so the
|
||||
* server resumes from the right point instead of replaying everything.
|
||||
*/
|
||||
getLastSequenceNum(): number {
|
||||
return this.lastSequenceNum
|
||||
}
|
||||
|
||||
async connect(): Promise<void> {
|
||||
if (this.state !== 'idle' && this.state !== 'reconnecting') {
|
||||
logForDebugging(
|
||||
`SSETransport: Cannot connect, current state is ${this.state}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_sse_connect_failed')
|
||||
return
|
||||
}
|
||||
|
||||
this.state = 'reconnecting'
|
||||
const connectStartTime = Date.now()
|
||||
|
||||
// Build SSE URL with sequence number for resumption
|
||||
const sseUrl = new URL(this.url.href)
|
||||
if (this.lastSequenceNum > 0) {
|
||||
sseUrl.searchParams.set('from_sequence_num', String(this.lastSequenceNum))
|
||||
}
|
||||
|
||||
// Build headers -- use fresh auth headers (supports Cookie for session keys).
|
||||
// Remove stale Authorization header from this.headers when Cookie auth is used,
|
||||
// since sending both confuses the auth interceptor.
|
||||
const authHeaders = this.getAuthHeaders()
|
||||
const headers: Record<string, string> = {
|
||||
...this.headers,
|
||||
...authHeaders,
|
||||
Accept: 'text/event-stream',
|
||||
'anthropic-version': '2023-06-01',
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
}
|
||||
if (authHeaders['Cookie']) {
|
||||
delete headers['Authorization']
|
||||
}
|
||||
if (this.lastSequenceNum > 0) {
|
||||
headers['Last-Event-ID'] = String(this.lastSequenceNum)
|
||||
}
|
||||
|
||||
logForDebugging(`SSETransport: Opening ${sseUrl.href}`)
|
||||
logForDiagnosticsNoPII('info', 'cli_sse_connect_opening')
|
||||
|
||||
this.abortController = new AbortController()
|
||||
|
||||
try {
|
||||
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
|
||||
const response = await fetch(sseUrl.href, {
|
||||
headers,
|
||||
signal: this.abortController.signal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const isPermanent = PERMANENT_HTTP_CODES.has(response.status)
|
||||
logForDebugging(
|
||||
`SSETransport: HTTP ${response.status}${isPermanent ? ' (permanent)' : ''}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_sse_connect_http_error', {
|
||||
status: response.status,
|
||||
})
|
||||
|
||||
if (isPermanent) {
|
||||
this.state = 'closed'
|
||||
this.onCloseCallback?.(response.status)
|
||||
return
|
||||
}
|
||||
|
||||
this.handleConnectionError()
|
||||
return
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
logForDebugging('SSETransport: No response body')
|
||||
this.handleConnectionError()
|
||||
return
|
||||
}
|
||||
|
||||
// Successfully connected
|
||||
const connectDuration = Date.now() - connectStartTime
|
||||
logForDebugging('SSETransport: Connected')
|
||||
logForDiagnosticsNoPII('info', 'cli_sse_connect_connected', {
|
||||
duration_ms: connectDuration,
|
||||
})
|
||||
|
||||
this.state = 'connected'
|
||||
this.reconnectAttempts = 0
|
||||
this.reconnectStartTime = null
|
||||
this.resetLivenessTimer()
|
||||
|
||||
// Read the SSE stream
|
||||
await this.readStream(response.body)
|
||||
} catch (error) {
|
||||
if (this.abortController?.signal.aborted) {
|
||||
// Intentional close
|
||||
return
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`SSETransport: Connection error: ${errorMessage(error)}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_sse_connect_error')
|
||||
this.handleConnectionError()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read and process the SSE stream body.
|
||||
*/
|
||||
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
|
||||
private async readStream(body: ReadableStream<Uint8Array>): Promise<void> {
|
||||
const reader = body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, STREAM_DECODE_OPTS)
|
||||
const { frames, remaining } = parseSSEFrames(buffer)
|
||||
buffer = remaining
|
||||
|
||||
for (const frame of frames) {
|
||||
// Any frame (including keepalive comments) proves the connection is alive
|
||||
this.resetLivenessTimer()
|
||||
|
||||
if (frame.id) {
|
||||
const seqNum = parseInt(frame.id, 10)
|
||||
if (!isNaN(seqNum)) {
|
||||
if (this.seenSequenceNums.has(seqNum)) {
|
||||
logForDebugging(
|
||||
`SSETransport: DUPLICATE frame seq=${seqNum} (lastSequenceNum=${this.lastSequenceNum}, seenCount=${this.seenSequenceNums.size})`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_sse_duplicate_sequence')
|
||||
} else {
|
||||
this.seenSequenceNums.add(seqNum)
|
||||
// Prevent unbounded growth: once we have many entries, prune
|
||||
// old sequence numbers that are well below the high-water mark.
|
||||
// Only sequence numbers near lastSequenceNum matter for dedup.
|
||||
if (this.seenSequenceNums.size > 1000) {
|
||||
const threshold = this.lastSequenceNum - 200
|
||||
for (const s of this.seenSequenceNums) {
|
||||
if (s < threshold) {
|
||||
this.seenSequenceNums.delete(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (seqNum > this.lastSequenceNum) {
|
||||
this.lastSequenceNum = seqNum
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (frame.event && frame.data) {
|
||||
this.handleSSEFrame(frame.event, frame.data)
|
||||
} else if (frame.data) {
|
||||
// data: without event: — server is emitting the old envelope format
|
||||
// or a bug. Log so incidents show as a signal instead of silent drops.
|
||||
logForDebugging(
|
||||
'SSETransport: Frame has data: but no event: field — dropped',
|
||||
{ level: 'warn' },
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_sse_frame_missing_event_field')
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
if (this.abortController?.signal.aborted) return
|
||||
logForDebugging(
|
||||
`SSETransport: Stream read error: ${errorMessage(error)}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_sse_stream_read_error')
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
|
||||
// Stream ended — reconnect unless we're closing
|
||||
if (this.state !== 'closing' && this.state !== 'closed') {
|
||||
logForDebugging('SSETransport: Stream ended, reconnecting')
|
||||
this.handleConnectionError()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle a single SSE frame. The event: field names the variant; data:
|
||||
* carries the inner proto JSON directly (no envelope).
|
||||
*
|
||||
* Worker subscribers only receive client_event frames (see notifier.go) —
|
||||
* any other event type indicates a server-side change that CC doesn't yet
|
||||
* understand. Log a diagnostic so we notice in telemetry.
|
||||
*/
|
||||
private handleSSEFrame(eventType: string, data: string): void {
|
||||
if (eventType !== 'client_event') {
|
||||
logForDebugging(
|
||||
`SSETransport: Unexpected SSE event type '${eventType}' on worker stream`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_sse_unexpected_event_type', {
|
||||
event_type: eventType,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
let ev: StreamClientEvent
|
||||
try {
|
||||
ev = jsonParse(data) as StreamClientEvent
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`SSETransport: Failed to parse client_event data: ${errorMessage(error)}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const payload = ev.payload
|
||||
if (payload && typeof payload === 'object' && 'type' in payload) {
|
||||
const sessionLabel = this.sessionId ? ` session=${this.sessionId}` : ''
|
||||
logForDebugging(
|
||||
`SSETransport: Event seq=${ev.sequence_num} event_id=${ev.event_id} event_type=${ev.event_type} payload_type=${String(payload.type)}${sessionLabel}`,
|
||||
)
|
||||
logForDiagnosticsNoPII('info', 'cli_sse_message_received')
|
||||
// Pass the unwrapped payload as newline-delimited JSON,
|
||||
// matching the format that StructuredIO/WebSocketTransport consumers expect
|
||||
this.onData?.(jsonStringify(payload) + '\n')
|
||||
} else {
|
||||
logForDebugging(
|
||||
`SSETransport: Ignoring client_event with no type in payload: event_id=${ev.event_id}`,
|
||||
)
|
||||
}
|
||||
|
||||
this.onEventCallback?.(ev)
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle connection errors with exponential backoff and time budget.
|
||||
*/
|
||||
private handleConnectionError(): void {
|
||||
this.clearLivenessTimer()
|
||||
|
||||
if (this.state === 'closing' || this.state === 'closed') return
|
||||
|
||||
// Abort any in-flight SSE fetch
|
||||
this.abortController?.abort()
|
||||
this.abortController = null
|
||||
|
||||
const now = Date.now()
|
||||
if (!this.reconnectStartTime) {
|
||||
this.reconnectStartTime = now
|
||||
}
|
||||
|
||||
const elapsed = now - this.reconnectStartTime
|
||||
if (elapsed < RECONNECT_GIVE_UP_MS) {
|
||||
// Clear any existing timer
|
||||
if (this.reconnectTimer) {
|
||||
clearTimeout(this.reconnectTimer)
|
||||
this.reconnectTimer = null
|
||||
}
|
||||
|
||||
// Refresh headers before reconnecting
|
||||
if (this.refreshHeaders) {
|
||||
const freshHeaders = this.refreshHeaders()
|
||||
Object.assign(this.headers, freshHeaders)
|
||||
logForDebugging('SSETransport: Refreshed headers for reconnect')
|
||||
}
|
||||
|
||||
this.state = 'reconnecting'
|
||||
this.reconnectAttempts++
|
||||
|
||||
const baseDelay = Math.min(
|
||||
RECONNECT_BASE_DELAY_MS * Math.pow(2, this.reconnectAttempts - 1),
|
||||
RECONNECT_MAX_DELAY_MS,
|
||||
)
|
||||
// Add ±25% jitter
|
||||
const delay = Math.max(
|
||||
0,
|
||||
baseDelay + baseDelay * 0.25 * (2 * Math.random() - 1),
|
||||
)
|
||||
|
||||
logForDebugging(
|
||||
`SSETransport: Reconnecting in ${Math.round(delay)}ms (attempt ${this.reconnectAttempts}, ${Math.round(elapsed / 1000)}s elapsed)`,
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_sse_reconnect_attempt', {
|
||||
reconnectAttempts: this.reconnectAttempts,
|
||||
})
|
||||
|
||||
this.reconnectTimer = setTimeout(() => {
|
||||
this.reconnectTimer = null
|
||||
void this.connect()
|
||||
}, delay)
|
||||
} else {
|
||||
logForDebugging(
|
||||
`SSETransport: Reconnection time budget exhausted after ${Math.round(elapsed / 1000)}s`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_sse_reconnect_exhausted', {
|
||||
reconnectAttempts: this.reconnectAttempts,
|
||||
elapsedMs: elapsed,
|
||||
})
|
||||
this.state = 'closed'
|
||||
this.onCloseCallback?.()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Bound timeout callback. Hoisted from an inline closure so that
|
||||
* resetLivenessTimer (called per-frame) does not allocate a new closure
|
||||
* on every SSE frame.
|
||||
*/
|
||||
private readonly onLivenessTimeout = (): void => {
|
||||
this.livenessTimer = null
|
||||
logForDebugging('SSETransport: Liveness timeout, reconnecting', {
|
||||
level: 'error',
|
||||
})
|
||||
logForDiagnosticsNoPII('error', 'cli_sse_liveness_timeout')
|
||||
this.abortController?.abort()
|
||||
this.handleConnectionError()
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the liveness timer. If no SSE frame arrives within the timeout,
|
||||
* treat the connection as dead and reconnect.
|
||||
*/
|
||||
private resetLivenessTimer(): void {
|
||||
this.clearLivenessTimer()
|
||||
this.livenessTimer = setTimeout(this.onLivenessTimeout, LIVENESS_TIMEOUT_MS)
|
||||
}
|
||||
|
||||
private clearLivenessTimer(): void {
|
||||
if (this.livenessTimer) {
|
||||
clearTimeout(this.livenessTimer)
|
||||
this.livenessTimer = null
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Write (HTTP POST) — same pattern as HybridTransport
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
async write(message: StdoutMessage): Promise<void> {
|
||||
const authHeaders = this.getAuthHeaders()
|
||||
if (Object.keys(authHeaders).length === 0) {
|
||||
logForDebugging('SSETransport: No session token available for POST')
|
||||
logForDiagnosticsNoPII('warn', 'cli_sse_post_no_token')
|
||||
return
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
...authHeaders,
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01',
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`SSETransport: POST body keys=${Object.keys(message as Record<string, unknown>).join(',')}`,
|
||||
)
|
||||
|
||||
for (let attempt = 1; attempt <= POST_MAX_RETRIES; attempt++) {
|
||||
try {
|
||||
const response = await axios.post(this.postUrl, message, {
|
||||
headers,
|
||||
validateStatus: alwaysValidStatus,
|
||||
})
|
||||
|
||||
if (response.status === 200 || response.status === 201) {
|
||||
logForDebugging(`SSETransport: POST success type=${message.type}`)
|
||||
return
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`SSETransport: POST ${response.status} body=${jsonStringify(response.data).slice(0, 200)}`,
|
||||
)
|
||||
// 4xx errors (except 429) are permanent - don't retry
|
||||
if (
|
||||
response.status >= 400 &&
|
||||
response.status < 500 &&
|
||||
response.status !== 429
|
||||
) {
|
||||
logForDebugging(
|
||||
`SSETransport: POST returned ${response.status} (client error), not retrying`,
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_sse_post_client_error', {
|
||||
status: response.status,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// 429 or 5xx - retry
|
||||
logForDebugging(
|
||||
`SSETransport: POST returned ${response.status}, attempt ${attempt}/${POST_MAX_RETRIES}`,
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_sse_post_retryable_error', {
|
||||
status: response.status,
|
||||
attempt,
|
||||
})
|
||||
} catch (error) {
|
||||
const axiosError = error as AxiosError
|
||||
logForDebugging(
|
||||
`SSETransport: POST error: ${axiosError.message}, attempt ${attempt}/${POST_MAX_RETRIES}`,
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_sse_post_network_error', {
|
||||
attempt,
|
||||
})
|
||||
}
|
||||
|
||||
if (attempt === POST_MAX_RETRIES) {
|
||||
logForDebugging(
|
||||
`SSETransport: POST failed after ${POST_MAX_RETRIES} attempts, continuing`,
|
||||
)
|
||||
logForDiagnosticsNoPII('warn', 'cli_sse_post_retries_exhausted')
|
||||
return
|
||||
}
|
||||
|
||||
const delayMs = Math.min(
|
||||
POST_BASE_DELAY_MS * Math.pow(2, attempt - 1),
|
||||
POST_MAX_DELAY_MS,
|
||||
)
|
||||
await sleep(delayMs)
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Transport interface
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
isConnectedStatus(): boolean {
|
||||
return this.state === 'connected'
|
||||
}
|
||||
|
||||
isClosedStatus(): boolean {
|
||||
return this.state === 'closed'
|
||||
}
|
||||
|
||||
setOnData(callback: (data: string) => void): void {
|
||||
this.onData = callback
|
||||
}
|
||||
|
||||
setOnClose(callback: (closeCode?: number) => void): void {
|
||||
this.onCloseCallback = callback
|
||||
}
|
||||
|
||||
setOnEvent(callback: (event: StreamClientEvent) => void): void {
|
||||
this.onEventCallback = callback
|
||||
}
|
||||
|
||||
close(): void {
|
||||
if (this.reconnectTimer) {
|
||||
clearTimeout(this.reconnectTimer)
|
||||
this.reconnectTimer = null
|
||||
}
|
||||
this.clearLivenessTimer()
|
||||
|
||||
this.state = 'closing'
|
||||
this.abortController?.abort()
|
||||
this.abortController = null
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// URL Conversion
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Convert an SSE URL to the HTTP POST endpoint URL.
|
||||
* The SSE stream URL and POST URL share the same base; the POST endpoint
|
||||
* is at `/events` (without `/stream`).
|
||||
*
|
||||
* From: https://api.example.com/v2/session_ingress/session/<session_id>/events/stream
|
||||
* To: https://api.example.com/v2/session_ingress/session/<session_id>/events
|
||||
*/
|
||||
function convertSSEUrlToPostUrl(sseUrl: URL): string {
|
||||
let pathname = sseUrl.pathname
|
||||
// Remove /stream suffix to get the POST events endpoint
|
||||
if (pathname.endsWith('/stream')) {
|
||||
pathname = pathname.slice(0, -'/stream'.length)
|
||||
}
|
||||
return `${sseUrl.protocol}//${sseUrl.host}${pathname}`
|
||||
}
|
||||
275
src/cli/transports/SerialBatchEventUploader.ts
Normal file
275
src/cli/transports/SerialBatchEventUploader.ts
Normal file
@ -0,0 +1,275 @@
|
||||
import { jsonStringify } from '../../utils/slowOperations.js'
|
||||
|
||||
/**
|
||||
* Serial ordered event uploader with batching, retry, and backpressure.
|
||||
*
|
||||
* - enqueue() adds events to a pending buffer
|
||||
* - At most 1 POST in-flight at a time
|
||||
* - Drains up to maxBatchSize items per POST
|
||||
* - New events accumulate while in-flight
|
||||
* - On failure: exponential backoff (clamped), retries indefinitely
|
||||
* until success or close() — unless maxConsecutiveFailures is set,
|
||||
* in which case the failing batch is dropped and drain advances
|
||||
* - flush() blocks until pending is empty and kicks drain if needed
|
||||
* - Backpressure: enqueue() blocks when maxQueueSize is reached
|
||||
*/
|
||||
|
||||
/**
|
||||
* Throw from config.send() to make the uploader wait a server-supplied
|
||||
* duration before retrying (e.g. 429 with Retry-After). When retryAfterMs
|
||||
* is set, it overrides exponential backoff for that attempt — clamped to
|
||||
* [baseDelayMs, maxDelayMs] and jittered so a misbehaving server can
|
||||
* neither hot-loop nor stall the client, and many sessions sharing a rate
|
||||
* limit don't all pounce at the same instant. Without retryAfterMs, behaves
|
||||
* like any other thrown error (exponential backoff).
|
||||
*/
|
||||
export class RetryableError extends Error {
|
||||
constructor(
|
||||
message: string,
|
||||
readonly retryAfterMs?: number,
|
||||
) {
|
||||
super(message)
|
||||
}
|
||||
}
|
||||
|
||||
type SerialBatchEventUploaderConfig<T> = {
|
||||
/** Max items per POST (1 = no batching) */
|
||||
maxBatchSize: number
|
||||
/**
|
||||
* Max serialized bytes per POST. First item always goes in regardless of
|
||||
* size; subsequent items only if cumulative JSON bytes stay under this.
|
||||
* Undefined = no byte limit (count-only batching).
|
||||
*/
|
||||
maxBatchBytes?: number
|
||||
/** Max pending items before enqueue() blocks */
|
||||
maxQueueSize: number
|
||||
/** The actual HTTP call — caller controls payload format */
|
||||
send: (batch: T[]) => Promise<void>
|
||||
/** Base delay for exponential backoff (ms) */
|
||||
baseDelayMs: number
|
||||
/** Max delay cap (ms) */
|
||||
maxDelayMs: number
|
||||
/** Random jitter range added to retry delay (ms) */
|
||||
jitterMs: number
|
||||
/**
|
||||
* After this many consecutive send() failures, drop the failing batch
|
||||
* and move on to the next pending item with a fresh failure budget.
|
||||
* Undefined = retry indefinitely (default).
|
||||
*/
|
||||
maxConsecutiveFailures?: number
|
||||
/** Called when a batch is dropped for hitting maxConsecutiveFailures. */
|
||||
onBatchDropped?: (batchSize: number, failures: number) => void
|
||||
}
|
||||
|
||||
export class SerialBatchEventUploader<T> {
|
||||
private pending: T[] = []
|
||||
private pendingAtClose = 0
|
||||
private draining = false
|
||||
private closed = false
|
||||
private backpressureResolvers: Array<() => void> = []
|
||||
private sleepResolve: (() => void) | null = null
|
||||
private flushResolvers: Array<() => void> = []
|
||||
private droppedBatches = 0
|
||||
private readonly config: SerialBatchEventUploaderConfig<T>
|
||||
|
||||
constructor(config: SerialBatchEventUploaderConfig<T>) {
|
||||
this.config = config
|
||||
}
|
||||
|
||||
/**
|
||||
* Monotonic count of batches dropped via maxConsecutiveFailures. Callers
|
||||
* can snapshot before flush() and compare after to detect silent drops
|
||||
* (flush() resolves normally even when batches were dropped).
|
||||
*/
|
||||
get droppedBatchCount(): number {
|
||||
return this.droppedBatches
|
||||
}
|
||||
|
||||
/**
|
||||
* Pending queue depth. After close(), returns the count at close time —
|
||||
* close() clears the queue but shutdown diagnostics may read this after.
|
||||
*/
|
||||
get pendingCount(): number {
|
||||
return this.closed ? this.pendingAtClose : this.pending.length
|
||||
}
|
||||
|
||||
/**
|
||||
* Add events to the pending buffer. Returns immediately if space is
|
||||
* available. Blocks (awaits) if the buffer is full — caller pauses
|
||||
* until drain frees space.
|
||||
*/
|
||||
async enqueue(events: T | T[]): Promise<void> {
|
||||
if (this.closed) return
|
||||
const items = Array.isArray(events) ? events : [events]
|
||||
if (items.length === 0) return
|
||||
|
||||
// Backpressure: wait until there's space
|
||||
while (
|
||||
this.pending.length + items.length > this.config.maxQueueSize &&
|
||||
!this.closed
|
||||
) {
|
||||
await new Promise<void>(resolve => {
|
||||
this.backpressureResolvers.push(resolve)
|
||||
})
|
||||
}
|
||||
|
||||
if (this.closed) return
|
||||
this.pending.push(...items)
|
||||
void this.drain()
|
||||
}
|
||||
|
||||
/**
|
||||
* Block until all pending events have been sent.
|
||||
* Used at turn boundaries and graceful shutdown.
|
||||
*/
|
||||
flush(): Promise<void> {
|
||||
if (this.pending.length === 0 && !this.draining) {
|
||||
return Promise.resolve()
|
||||
}
|
||||
void this.drain()
|
||||
return new Promise<void>(resolve => {
|
||||
this.flushResolvers.push(resolve)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Drop pending events and stop processing.
|
||||
* Resolves any blocked enqueue() and flush() callers.
|
||||
*/
|
||||
close(): void {
|
||||
if (this.closed) return
|
||||
this.closed = true
|
||||
this.pendingAtClose = this.pending.length
|
||||
this.pending = []
|
||||
this.sleepResolve?.()
|
||||
this.sleepResolve = null
|
||||
for (const resolve of this.backpressureResolvers) resolve()
|
||||
this.backpressureResolvers = []
|
||||
for (const resolve of this.flushResolvers) resolve()
|
||||
this.flushResolvers = []
|
||||
}
|
||||
|
||||
/**
|
||||
* Drain loop. At most one instance runs at a time (guarded by this.draining).
|
||||
* Sends batches serially. On failure, backs off and retries indefinitely.
|
||||
*/
|
||||
private async drain(): Promise<void> {
|
||||
if (this.draining || this.closed) return
|
||||
this.draining = true
|
||||
let failures = 0
|
||||
|
||||
try {
|
||||
while (this.pending.length > 0 && !this.closed) {
|
||||
const batch = this.takeBatch()
|
||||
if (batch.length === 0) continue
|
||||
|
||||
try {
|
||||
await this.config.send(batch)
|
||||
failures = 0
|
||||
} catch (err) {
|
||||
failures++
|
||||
if (
|
||||
this.config.maxConsecutiveFailures !== undefined &&
|
||||
failures >= this.config.maxConsecutiveFailures
|
||||
) {
|
||||
this.droppedBatches++
|
||||
this.config.onBatchDropped?.(batch.length, failures)
|
||||
failures = 0
|
||||
this.releaseBackpressure()
|
||||
continue
|
||||
}
|
||||
// Re-queue the failed batch at the front. Use concat (single
|
||||
// allocation) instead of unshift(...batch) which shifts every
|
||||
// pending item batch.length times. Only hit on failure path.
|
||||
this.pending = batch.concat(this.pending)
|
||||
const retryAfterMs =
|
||||
err instanceof RetryableError ? err.retryAfterMs : undefined
|
||||
await this.sleep(this.retryDelay(failures, retryAfterMs))
|
||||
continue
|
||||
}
|
||||
|
||||
// Release backpressure waiters if space opened up
|
||||
this.releaseBackpressure()
|
||||
}
|
||||
} finally {
|
||||
this.draining = false
|
||||
// Notify flush waiters if queue is empty
|
||||
if (this.pending.length === 0) {
|
||||
for (const resolve of this.flushResolvers) resolve()
|
||||
this.flushResolvers = []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull the next batch from pending. Respects both maxBatchSize and
|
||||
* maxBatchBytes. The first item is always taken; subsequent items only
|
||||
* if adding them keeps the cumulative JSON size under maxBatchBytes.
|
||||
*
|
||||
* Un-serializable items (BigInt, circular refs, throwing toJSON) are
|
||||
* dropped in place — they can never be sent and leaving them at
|
||||
* pending[0] would poison the queue and hang flush() forever.
|
||||
*/
|
||||
private takeBatch(): T[] {
|
||||
const { maxBatchSize, maxBatchBytes } = this.config
|
||||
if (maxBatchBytes === undefined) {
|
||||
return this.pending.splice(0, maxBatchSize)
|
||||
}
|
||||
let bytes = 0
|
||||
let count = 0
|
||||
while (count < this.pending.length && count < maxBatchSize) {
|
||||
let itemBytes: number
|
||||
try {
|
||||
itemBytes = Buffer.byteLength(jsonStringify(this.pending[count]))
|
||||
} catch {
|
||||
this.pending.splice(count, 1)
|
||||
continue
|
||||
}
|
||||
if (count > 0 && bytes + itemBytes > maxBatchBytes) break
|
||||
bytes += itemBytes
|
||||
count++
|
||||
}
|
||||
return this.pending.splice(0, count)
|
||||
}
|
||||
|
||||
private retryDelay(failures: number, retryAfterMs?: number): number {
|
||||
const jitter = Math.random() * this.config.jitterMs
|
||||
if (retryAfterMs !== undefined) {
|
||||
// Jitter on top of the server's hint prevents thundering herd when
|
||||
// many sessions share a rate limit and all receive the same
|
||||
// Retry-After. Clamp first, then spread — same shape as the
|
||||
// exponential path (effective ceiling is maxDelayMs + jitterMs).
|
||||
const clamped = Math.max(
|
||||
this.config.baseDelayMs,
|
||||
Math.min(retryAfterMs, this.config.maxDelayMs),
|
||||
)
|
||||
return clamped + jitter
|
||||
}
|
||||
const exponential = Math.min(
|
||||
this.config.baseDelayMs * 2 ** (failures - 1),
|
||||
this.config.maxDelayMs,
|
||||
)
|
||||
return exponential + jitter
|
||||
}
|
||||
|
||||
private releaseBackpressure(): void {
|
||||
const resolvers = this.backpressureResolvers
|
||||
this.backpressureResolvers = []
|
||||
for (const resolve of resolvers) resolve()
|
||||
}
|
||||
|
||||
private sleep(ms: number): Promise<void> {
|
||||
return new Promise(resolve => {
|
||||
this.sleepResolve = resolve
|
||||
setTimeout(
|
||||
(self, resolve) => {
|
||||
self.sleepResolve = null
|
||||
resolve()
|
||||
},
|
||||
ms,
|
||||
this,
|
||||
resolve,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
7
src/cli/transports/Transport.ts
Normal file
7
src/cli/transports/Transport.ts
Normal file
@ -0,0 +1,7 @@
|
||||
export interface Transport {
|
||||
connect?(): Promise<void>
|
||||
close?(): void | Promise<void>
|
||||
send?(data: string): Promise<void>
|
||||
onData?(handler: (data: string) => void): void
|
||||
onClose?(handler: (closeCode?: number) => void): void
|
||||
}
|
||||
800
src/cli/transports/WebSocketTransport.ts
Normal file
800
src/cli/transports/WebSocketTransport.ts
Normal file
@ -0,0 +1,800 @@
|
||||
import type { StdoutMessage } from 'src/entrypoints/sdk/controlTypes.js'
|
||||
import type WsWebSocket from 'ws'
|
||||
import { logEvent } from '../../services/analytics/index.js'
|
||||
import { CircularBuffer } from '../../utils/CircularBuffer.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from '../../utils/diagLogs.js'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { getWebSocketTLSOptions } from '../../utils/mtls.js'
|
||||
import {
|
||||
getWebSocketProxyAgent,
|
||||
getWebSocketProxyUrl,
|
||||
} from '../../utils/proxy.js'
|
||||
import {
|
||||
registerSessionActivityCallback,
|
||||
unregisterSessionActivityCallback,
|
||||
} from '../../utils/sessionActivity.js'
|
||||
import { jsonStringify } from '../../utils/slowOperations.js'
|
||||
import type { Transport } from './Transport.js'
|
||||
|
||||
const KEEP_ALIVE_FRAME = '{"type":"keep_alive"}\n'
|
||||
|
||||
const DEFAULT_MAX_BUFFER_SIZE = 1000
|
||||
const DEFAULT_BASE_RECONNECT_DELAY = 1000
|
||||
const DEFAULT_MAX_RECONNECT_DELAY = 30000
|
||||
/** Time budget for reconnection attempts before giving up (10 minutes). */
|
||||
const DEFAULT_RECONNECT_GIVE_UP_MS = 600_000
|
||||
const DEFAULT_PING_INTERVAL = 10000
|
||||
const DEFAULT_KEEPALIVE_INTERVAL = 300_000 // 5 minutes
|
||||
|
||||
/**
|
||||
* Threshold for detecting system sleep/wake. If the gap between consecutive
|
||||
* reconnection attempts exceeds this, the machine likely slept. We reset
|
||||
* the reconnection budget and retry — the server will reject with permanent
|
||||
* close codes (4001/1002) if the session was reaped during sleep.
|
||||
*/
|
||||
const SLEEP_DETECTION_THRESHOLD_MS = DEFAULT_MAX_RECONNECT_DELAY * 2 // 60s
|
||||
|
||||
/**
|
||||
* WebSocket close codes that indicate a permanent server-side rejection.
|
||||
* The transport transitions to 'closed' immediately without retrying.
|
||||
*/
|
||||
const PERMANENT_CLOSE_CODES = new Set([
|
||||
1002, // protocol error — server rejected handshake (e.g. session reaped)
|
||||
4001, // session expired / not found
|
||||
4003, // unauthorized
|
||||
])
|
||||
|
||||
export type WebSocketTransportOptions = {
|
||||
/** When false, the transport does not attempt automatic reconnection on
|
||||
* disconnect. Use this when the caller has its own recovery mechanism
|
||||
* (e.g. the REPL bridge poll loop). Defaults to true. */
|
||||
autoReconnect?: boolean
|
||||
/** Gates the tengu_ws_transport_* telemetry events. Set true at the
|
||||
* REPL-bridge construction site so only Remote Control sessions (the
|
||||
* Cloudflare-idle-timeout population) emit; print-mode workers stay
|
||||
* silent. Defaults to false. */
|
||||
isBridge?: boolean
|
||||
}
|
||||
|
||||
type WebSocketTransportState =
|
||||
| 'idle'
|
||||
| 'connected'
|
||||
| 'reconnecting'
|
||||
| 'closing'
|
||||
| 'closed'
|
||||
|
||||
// Common interface between globalThis.WebSocket and ws.WebSocket
|
||||
type WebSocketLike = {
|
||||
close(): void
|
||||
send(data: string): void
|
||||
ping?(): void // Bun & ws both support this
|
||||
}
|
||||
|
||||
export class WebSocketTransport implements Transport {
|
||||
private ws: WebSocketLike | null = null
|
||||
private lastSentId: string | null = null
|
||||
protected url: URL
|
||||
protected state: WebSocketTransportState = 'idle'
|
||||
protected onData?: (data: string) => void
|
||||
private onCloseCallback?: (closeCode?: number) => void
|
||||
private onConnectCallback?: () => void
|
||||
private headers: Record<string, string>
|
||||
private sessionId?: string
|
||||
private autoReconnect: boolean
|
||||
private isBridge: boolean
|
||||
|
||||
// Reconnection state
|
||||
private reconnectAttempts = 0
|
||||
private reconnectStartTime: number | null = null
|
||||
private reconnectTimer: NodeJS.Timeout | null = null
|
||||
private lastReconnectAttemptTime: number | null = null
|
||||
// Wall-clock of last WS data-frame activity (inbound message or outbound
|
||||
// ws.send). Used to compute idle time at close — the signal for diagnosing
|
||||
// proxy idle-timeout RSTs (e.g. Cloudflare 5-min). Excludes ping/pong
|
||||
// control frames (proxies don't count those).
|
||||
private lastActivityTime = 0
|
||||
|
||||
// Ping interval for connection health checks
|
||||
private pingInterval: NodeJS.Timeout | null = null
|
||||
private pongReceived = true
|
||||
|
||||
// Periodic keep_alive data frames to reset proxy idle timers
|
||||
private keepAliveInterval: NodeJS.Timeout | null = null
|
||||
|
||||
// Message buffering for replay on reconnection
|
||||
private messageBuffer: CircularBuffer<StdoutMessage>
|
||||
// Track which runtime's WS we're using so we can detach listeners
|
||||
// with the matching API (removeEventListener vs. off).
|
||||
private isBunWs = false
|
||||
|
||||
// Captured at connect() time for handleOpenEvent timing. Stored as an
|
||||
// instance field so the onOpen handler can be a stable class-property
|
||||
// arrow function (removable in doDisconnect) instead of a closure over
|
||||
// a local variable.
|
||||
private connectStartTime = 0
|
||||
|
||||
private refreshHeaders?: () => Record<string, string>
|
||||
|
||||
constructor(
|
||||
url: URL,
|
||||
headers: Record<string, string> = {},
|
||||
sessionId?: string,
|
||||
refreshHeaders?: () => Record<string, string>,
|
||||
options?: WebSocketTransportOptions,
|
||||
) {
|
||||
this.url = url
|
||||
this.headers = headers
|
||||
this.sessionId = sessionId
|
||||
this.refreshHeaders = refreshHeaders
|
||||
this.autoReconnect = options?.autoReconnect ?? true
|
||||
this.isBridge = options?.isBridge ?? false
|
||||
this.messageBuffer = new CircularBuffer(DEFAULT_MAX_BUFFER_SIZE)
|
||||
}
|
||||
|
||||
public async connect(): Promise<void> {
|
||||
if (this.state !== 'idle' && this.state !== 'reconnecting') {
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Cannot connect, current state is ${this.state}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_connect_failed')
|
||||
return
|
||||
}
|
||||
this.state = 'reconnecting'
|
||||
|
||||
this.connectStartTime = Date.now()
|
||||
logForDebugging(`WebSocketTransport: Opening ${this.url.href}`)
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_connect_opening')
|
||||
|
||||
// Start with provided headers and add runtime headers
|
||||
const headers = { ...this.headers }
|
||||
if (this.lastSentId) {
|
||||
headers['X-Last-Request-Id'] = this.lastSentId
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Adding X-Last-Request-Id header: ${this.lastSentId}`,
|
||||
)
|
||||
}
|
||||
|
||||
if (typeof Bun !== 'undefined') {
|
||||
// Bun's WebSocket supports headers/proxy options but the DOM typings don't
|
||||
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
|
||||
const ws = new globalThis.WebSocket(this.url.href, {
|
||||
headers,
|
||||
proxy: getWebSocketProxyUrl(this.url.href),
|
||||
tls: getWebSocketTLSOptions() || undefined,
|
||||
} as unknown as string[])
|
||||
this.ws = ws
|
||||
this.isBunWs = true
|
||||
|
||||
ws.addEventListener('open', this.onBunOpen)
|
||||
ws.addEventListener('message', this.onBunMessage)
|
||||
ws.addEventListener('error', this.onBunError)
|
||||
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
|
||||
ws.addEventListener('close', this.onBunClose)
|
||||
// 'pong' is Bun-specific — not in DOM typings.
|
||||
ws.addEventListener('pong', this.onPong)
|
||||
} else {
|
||||
const { default: WS } = await import('ws')
|
||||
const ws = new WS(this.url.href, {
|
||||
headers,
|
||||
agent: getWebSocketProxyAgent(this.url.href),
|
||||
...getWebSocketTLSOptions(),
|
||||
})
|
||||
this.ws = ws
|
||||
this.isBunWs = false
|
||||
|
||||
ws.on('open', this.onNodeOpen)
|
||||
ws.on('message', this.onNodeMessage)
|
||||
ws.on('error', this.onNodeError)
|
||||
ws.on('close', this.onNodeClose)
|
||||
ws.on('pong', this.onPong)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Bun (native WebSocket) event handlers ---
|
||||
// Stored as class-property arrow functions so they can be removed in
|
||||
// doDisconnect(). Without removal, each reconnect orphans the old WS
|
||||
// object + its 5 closures until GC, which accumulates under network
|
||||
// instability. Mirrors the pattern in src/utils/mcpWebSocketTransport.ts.
|
||||
|
||||
private onBunOpen = () => {
|
||||
this.handleOpenEvent()
|
||||
// Bun's WebSocket doesn't expose upgrade response headers,
|
||||
// so replay all buffered messages. The server deduplicates by UUID.
|
||||
if (this.lastSentId) {
|
||||
this.replayBufferedMessages('')
|
||||
}
|
||||
}
|
||||
|
||||
private onBunMessage = (event: MessageEvent) => {
|
||||
const message =
|
||||
typeof event.data === 'string' ? event.data : String(event.data)
|
||||
this.lastActivityTime = Date.now()
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_message_received', {
|
||||
length: message.length,
|
||||
})
|
||||
if (this.onData) {
|
||||
this.onData(message)
|
||||
}
|
||||
}
|
||||
|
||||
private onBunError = () => {
|
||||
logForDebugging('WebSocketTransport: Error', {
|
||||
level: 'error',
|
||||
})
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_connect_error')
|
||||
// close event fires after error — let it call handleConnectionError
|
||||
}
|
||||
|
||||
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
|
||||
private onBunClose = (event: CloseEvent) => {
|
||||
const isClean = event.code === 1000 || event.code === 1001
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Closed: ${event.code}`,
|
||||
isClean ? undefined : { level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_connect_closed')
|
||||
this.handleConnectionError(event.code)
|
||||
}
|
||||
|
||||
// --- Node (ws package) event handlers ---
|
||||
|
||||
private onNodeOpen = () => {
|
||||
// Capture ws before handleOpenEvent() invokes onConnectCallback — if the
|
||||
// callback synchronously closes the transport, this.ws becomes null.
|
||||
// The old inline-closure code had this safety implicitly via closure capture.
|
||||
const ws = this.ws
|
||||
this.handleOpenEvent()
|
||||
if (!ws) return
|
||||
// Check for last-id in upgrade response headers (ws package only)
|
||||
const nws = ws as unknown as WsWebSocket & {
|
||||
upgradeReq?: { headers?: Record<string, string> }
|
||||
}
|
||||
const upgradeResponse = nws.upgradeReq
|
||||
if (upgradeResponse?.headers?.['x-last-request-id']) {
|
||||
const serverLastId = upgradeResponse.headers['x-last-request-id']
|
||||
this.replayBufferedMessages(serverLastId)
|
||||
}
|
||||
}
|
||||
|
||||
private onNodeMessage = (data: Buffer) => {
|
||||
const message = data.toString()
|
||||
this.lastActivityTime = Date.now()
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_message_received', {
|
||||
length: message.length,
|
||||
})
|
||||
if (this.onData) {
|
||||
this.onData(message)
|
||||
}
|
||||
}
|
||||
|
||||
private onNodeError = (err: Error) => {
|
||||
logForDebugging(`WebSocketTransport: Error: ${err.message}`, {
|
||||
level: 'error',
|
||||
})
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_connect_error')
|
||||
// close event fires after error — let it call handleConnectionError
|
||||
}
|
||||
|
||||
private onNodeClose = (code: number, _reason: Buffer) => {
|
||||
const isClean = code === 1000 || code === 1001
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Closed: ${code}`,
|
||||
isClean ? undefined : { level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_connect_closed')
|
||||
this.handleConnectionError(code)
|
||||
}
|
||||
|
||||
// --- Shared handlers ---
|
||||
|
||||
private onPong = () => {
|
||||
this.pongReceived = true
|
||||
}
|
||||
|
||||
private handleOpenEvent(): void {
|
||||
const connectDuration = Date.now() - this.connectStartTime
|
||||
logForDebugging('WebSocketTransport: Connected')
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_connect_connected', {
|
||||
duration_ms: connectDuration,
|
||||
})
|
||||
|
||||
// Reconnect success — capture attempt count + downtime before resetting.
|
||||
// reconnectStartTime is null on first connect, non-null on reopen.
|
||||
if (this.isBridge && this.reconnectStartTime !== null) {
|
||||
logEvent('tengu_ws_transport_reconnected', {
|
||||
attempts: this.reconnectAttempts,
|
||||
downtimeMs: Date.now() - this.reconnectStartTime,
|
||||
})
|
||||
}
|
||||
|
||||
this.reconnectAttempts = 0
|
||||
this.reconnectStartTime = null
|
||||
this.lastReconnectAttemptTime = null
|
||||
this.lastActivityTime = Date.now()
|
||||
this.state = 'connected'
|
||||
this.onConnectCallback?.()
|
||||
|
||||
// Start periodic pings to detect dead connections
|
||||
this.startPingInterval()
|
||||
|
||||
// Start periodic keep_alive data frames to reset proxy idle timers
|
||||
this.startKeepaliveInterval()
|
||||
|
||||
// Register callback for session activity signals
|
||||
registerSessionActivityCallback(() => {
|
||||
void this.write({ type: 'keep_alive' })
|
||||
})
|
||||
}
|
||||
|
||||
protected sendLine(line: string): boolean {
|
||||
if (!this.ws || this.state !== 'connected') {
|
||||
logForDebugging('WebSocketTransport: Not connected')
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_send_not_connected')
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
this.ws.send(line)
|
||||
this.lastActivityTime = Date.now()
|
||||
return true
|
||||
} catch (error) {
|
||||
logForDebugging(`WebSocketTransport: Failed to send: ${error}`, {
|
||||
level: 'error',
|
||||
})
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_send_error')
|
||||
// Don't null this.ws here — let doDisconnect() (via handleConnectionError)
|
||||
// handle cleanup so listeners are removed before the WS is released.
|
||||
this.handleConnectionError()
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all listeners attached in connect() for the given WebSocket.
|
||||
* Without this, each reconnect orphans the old WS object + its closures
|
||||
* until GC — these accumulate under network instability. Mirrors the
|
||||
* pattern in src/utils/mcpWebSocketTransport.ts.
|
||||
*/
|
||||
private removeWsListeners(ws: WebSocketLike): void {
|
||||
if (this.isBunWs) {
|
||||
const nws = ws as unknown as globalThis.WebSocket
|
||||
nws.removeEventListener('open', this.onBunOpen)
|
||||
nws.removeEventListener('message', this.onBunMessage)
|
||||
nws.removeEventListener('error', this.onBunError)
|
||||
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
|
||||
nws.removeEventListener('close', this.onBunClose)
|
||||
// 'pong' is Bun-specific — not in DOM typings
|
||||
nws.removeEventListener('pong' as 'message', this.onPong)
|
||||
} else {
|
||||
const nws = ws as unknown as WsWebSocket
|
||||
nws.off('open', this.onNodeOpen)
|
||||
nws.off('message', this.onNodeMessage)
|
||||
nws.off('error', this.onNodeError)
|
||||
nws.off('close', this.onNodeClose)
|
||||
nws.off('pong', this.onPong)
|
||||
}
|
||||
}
|
||||
|
||||
protected doDisconnect(): void {
|
||||
// Stop pinging and keepalive when disconnecting
|
||||
this.stopPingInterval()
|
||||
this.stopKeepaliveInterval()
|
||||
|
||||
// Unregister session activity callback
|
||||
unregisterSessionActivityCallback()
|
||||
|
||||
if (this.ws) {
|
||||
// Remove listeners BEFORE close() so the old WS + closures can be
|
||||
// GC'd promptly instead of lingering until the next mark-and-sweep.
|
||||
this.removeWsListeners(this.ws)
|
||||
this.ws.close()
|
||||
this.ws = null
|
||||
}
|
||||
}
|
||||
|
||||
private handleConnectionError(closeCode?: number): void {
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Disconnected from ${this.url.href}` +
|
||||
(closeCode != null ? ` (code ${closeCode})` : ''),
|
||||
)
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_disconnected')
|
||||
if (this.isBridge) {
|
||||
// Fire on every close — including intermediate ones during a reconnect
|
||||
// storm (those never surface to the onCloseCallback consumer). For the
|
||||
// Cloudflare-5min-idle hypothesis: cluster msSinceLastActivity; if the
|
||||
// peak sits at ~300s with closeCode 1006, that's the proxy RST.
|
||||
logEvent('tengu_ws_transport_closed', {
|
||||
closeCode,
|
||||
msSinceLastActivity:
|
||||
this.lastActivityTime > 0 ? Date.now() - this.lastActivityTime : -1,
|
||||
// 'connected' = healthy drop (the Cloudflare case); 'reconnecting' =
|
||||
// connect-rejection mid-storm. State isn't mutated until the branches
|
||||
// below, so this reads the pre-close value.
|
||||
wasConnected: this.state === 'connected',
|
||||
reconnectAttempts: this.reconnectAttempts,
|
||||
})
|
||||
}
|
||||
this.doDisconnect()
|
||||
|
||||
if (this.state === 'closing' || this.state === 'closed') return
|
||||
|
||||
// Permanent codes: don't retry — server has definitively ended the session.
|
||||
// Exception: 4003 (unauthorized) can be retried when refreshHeaders is
|
||||
// available and returns a new token (e.g. after the parent process mints
|
||||
// a fresh session ingress token during reconnection).
|
||||
let headersRefreshed = false
|
||||
if (closeCode === 4003 && this.refreshHeaders) {
|
||||
const freshHeaders = this.refreshHeaders()
|
||||
if (freshHeaders.Authorization !== this.headers.Authorization) {
|
||||
Object.assign(this.headers, freshHeaders)
|
||||
headersRefreshed = true
|
||||
logForDebugging(
|
||||
'WebSocketTransport: 4003 received but headers refreshed, scheduling reconnect',
|
||||
)
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_4003_token_refreshed')
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
closeCode != null &&
|
||||
PERMANENT_CLOSE_CODES.has(closeCode) &&
|
||||
!headersRefreshed
|
||||
) {
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Permanent close code ${closeCode}, not reconnecting`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_permanent_close', {
|
||||
closeCode,
|
||||
})
|
||||
this.state = 'closed'
|
||||
this.onCloseCallback?.(closeCode)
|
||||
return
|
||||
}
|
||||
|
||||
// When autoReconnect is disabled, go straight to closed state.
|
||||
// The caller (e.g. REPL bridge poll loop) handles recovery.
|
||||
if (!this.autoReconnect) {
|
||||
this.state = 'closed'
|
||||
this.onCloseCallback?.(closeCode)
|
||||
return
|
||||
}
|
||||
|
||||
// Schedule reconnection with exponential backoff and time budget
|
||||
const now = Date.now()
|
||||
if (!this.reconnectStartTime) {
|
||||
this.reconnectStartTime = now
|
||||
}
|
||||
|
||||
// Detect system sleep/wake: if the gap since our last reconnection
|
||||
// attempt greatly exceeds the max delay, the machine likely slept
|
||||
// (e.g. laptop lid closed). Reset the budget and retry from scratch —
|
||||
// the server will reject with permanent close codes (4001/1002) if
|
||||
// the session was reaped while we were asleep.
|
||||
if (
|
||||
this.lastReconnectAttemptTime !== null &&
|
||||
now - this.lastReconnectAttemptTime > SLEEP_DETECTION_THRESHOLD_MS
|
||||
) {
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Detected system sleep (${Math.round((now - this.lastReconnectAttemptTime) / 1000)}s gap), resetting reconnection budget`,
|
||||
)
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_sleep_detected', {
|
||||
gapMs: now - this.lastReconnectAttemptTime,
|
||||
})
|
||||
this.reconnectStartTime = now
|
||||
this.reconnectAttempts = 0
|
||||
}
|
||||
this.lastReconnectAttemptTime = now
|
||||
|
||||
const elapsed = now - this.reconnectStartTime
|
||||
if (elapsed < DEFAULT_RECONNECT_GIVE_UP_MS) {
|
||||
// Clear any existing reconnection timer to avoid duplicates
|
||||
if (this.reconnectTimer) {
|
||||
clearTimeout(this.reconnectTimer)
|
||||
this.reconnectTimer = null
|
||||
}
|
||||
|
||||
// Refresh headers before reconnecting (e.g. to pick up a new session token).
|
||||
// Skip if already refreshed by the 4003 path above.
|
||||
if (!headersRefreshed && this.refreshHeaders) {
|
||||
const freshHeaders = this.refreshHeaders()
|
||||
Object.assign(this.headers, freshHeaders)
|
||||
logForDebugging('WebSocketTransport: Refreshed headers for reconnect')
|
||||
}
|
||||
|
||||
this.state = 'reconnecting'
|
||||
this.reconnectAttempts++
|
||||
|
||||
const baseDelay = Math.min(
|
||||
DEFAULT_BASE_RECONNECT_DELAY * Math.pow(2, this.reconnectAttempts - 1),
|
||||
DEFAULT_MAX_RECONNECT_DELAY,
|
||||
)
|
||||
// Add ±25% jitter to avoid thundering herd
|
||||
const delay = Math.max(
|
||||
0,
|
||||
baseDelay + baseDelay * 0.25 * (2 * Math.random() - 1),
|
||||
)
|
||||
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Reconnecting in ${Math.round(delay)}ms (attempt ${this.reconnectAttempts}, ${Math.round(elapsed / 1000)}s elapsed)`,
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_reconnect_attempt', {
|
||||
reconnectAttempts: this.reconnectAttempts,
|
||||
})
|
||||
if (this.isBridge) {
|
||||
logEvent('tengu_ws_transport_reconnecting', {
|
||||
attempt: this.reconnectAttempts,
|
||||
elapsedMs: elapsed,
|
||||
delayMs: Math.round(delay),
|
||||
})
|
||||
}
|
||||
|
||||
this.reconnectTimer = setTimeout(() => {
|
||||
this.reconnectTimer = null
|
||||
void this.connect()
|
||||
}, delay)
|
||||
} else {
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Reconnection time budget exhausted after ${Math.round(elapsed / 1000)}s for ${this.url.href}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_reconnect_exhausted', {
|
||||
reconnectAttempts: this.reconnectAttempts,
|
||||
elapsedMs: elapsed,
|
||||
})
|
||||
this.state = 'closed'
|
||||
|
||||
// Notify close callback
|
||||
if (this.onCloseCallback) {
|
||||
this.onCloseCallback(closeCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
close(): void {
|
||||
// Clear any pending reconnection timer
|
||||
if (this.reconnectTimer) {
|
||||
clearTimeout(this.reconnectTimer)
|
||||
this.reconnectTimer = null
|
||||
}
|
||||
|
||||
// Clear ping and keepalive intervals
|
||||
this.stopPingInterval()
|
||||
this.stopKeepaliveInterval()
|
||||
|
||||
// Unregister session activity callback
|
||||
unregisterSessionActivityCallback()
|
||||
|
||||
this.state = 'closing'
|
||||
this.doDisconnect()
|
||||
}
|
||||
|
||||
private replayBufferedMessages(lastId: string): void {
|
||||
const messages = this.messageBuffer.toArray()
|
||||
if (messages.length === 0) return
|
||||
|
||||
// Find where to start replay based on server's last received message
|
||||
let startIndex = 0
|
||||
if (lastId) {
|
||||
const lastConfirmedIndex = messages.findIndex(
|
||||
message => 'uuid' in message && message.uuid === lastId,
|
||||
)
|
||||
if (lastConfirmedIndex >= 0) {
|
||||
// Server confirmed messages up to lastConfirmedIndex — evict them
|
||||
startIndex = lastConfirmedIndex + 1
|
||||
// Rebuild the buffer with only unconfirmed messages
|
||||
const remaining = messages.slice(startIndex)
|
||||
this.messageBuffer.clear()
|
||||
this.messageBuffer.addAll(remaining)
|
||||
if (remaining.length === 0) {
|
||||
this.lastSentId = null
|
||||
}
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Evicted ${startIndex} confirmed messages, ${remaining.length} remaining`,
|
||||
)
|
||||
logForDiagnosticsNoPII(
|
||||
'info',
|
||||
'cli_websocket_evicted_confirmed_messages',
|
||||
{
|
||||
evicted: startIndex,
|
||||
remaining: remaining.length,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const messagesToReplay = messages.slice(startIndex)
|
||||
if (messagesToReplay.length === 0) {
|
||||
logForDebugging('WebSocketTransport: No new messages to replay')
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_no_messages_to_replay')
|
||||
return
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Replaying ${messagesToReplay.length} buffered messages`,
|
||||
)
|
||||
logForDiagnosticsNoPII('info', 'cli_websocket_messages_to_replay', {
|
||||
count: messagesToReplay.length,
|
||||
})
|
||||
|
||||
for (const message of messagesToReplay) {
|
||||
const line = jsonStringify(message) + '\n'
|
||||
const success = this.sendLine(line)
|
||||
if (!success) {
|
||||
this.handleConnectionError()
|
||||
break
|
||||
}
|
||||
}
|
||||
// Do NOT clear the buffer after replay — messages remain buffered until
|
||||
// the server confirms receipt on the next reconnection. This prevents
|
||||
// message loss if the connection drops after replay but before the server
|
||||
// processes the messages.
|
||||
}
|
||||
|
||||
isConnectedStatus(): boolean {
|
||||
return this.state === 'connected'
|
||||
}
|
||||
|
||||
isClosedStatus(): boolean {
|
||||
return this.state === 'closed'
|
||||
}
|
||||
|
||||
setOnData(callback: (data: string) => void): void {
|
||||
this.onData = callback
|
||||
}
|
||||
|
||||
setOnConnect(callback: () => void): void {
|
||||
this.onConnectCallback = callback
|
||||
}
|
||||
|
||||
setOnClose(callback: (closeCode?: number) => void): void {
|
||||
this.onCloseCallback = callback
|
||||
}
|
||||
|
||||
getStateLabel(): string {
|
||||
return this.state
|
||||
}
|
||||
|
||||
async write(message: StdoutMessage): Promise<void> {
|
||||
if ('uuid' in message && typeof message.uuid === 'string') {
|
||||
this.messageBuffer.add(message)
|
||||
this.lastSentId = message.uuid
|
||||
}
|
||||
|
||||
const line = jsonStringify(message) + '\n'
|
||||
|
||||
if (this.state !== 'connected') {
|
||||
// Message buffered for replay when connected (if it has a UUID)
|
||||
return
|
||||
}
|
||||
|
||||
const sessionLabel = this.sessionId ? ` session=${this.sessionId}` : ''
|
||||
const detailLabel = this.getControlMessageDetailLabel(message)
|
||||
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Sending message type=${message.type}${sessionLabel}${detailLabel}`,
|
||||
)
|
||||
|
||||
this.sendLine(line)
|
||||
}
|
||||
|
||||
private getControlMessageDetailLabel(message: StdoutMessage): string {
|
||||
if (message.type === 'control_request') {
|
||||
const { request_id, request } = message
|
||||
const toolName =
|
||||
request.subtype === 'can_use_tool' ? request.tool_name : ''
|
||||
return ` subtype=${request.subtype} request_id=${request_id}${toolName ? ` tool=${toolName}` : ''}`
|
||||
}
|
||||
if (message.type === 'control_response') {
|
||||
const { subtype, request_id } = message.response
|
||||
return ` subtype=${subtype} request_id=${request_id}`
|
||||
}
|
||||
return ''
|
||||
}
|
||||
|
||||
private startPingInterval(): void {
|
||||
// Clear any existing interval
|
||||
this.stopPingInterval()
|
||||
|
||||
this.pongReceived = true
|
||||
let lastTickTime = Date.now()
|
||||
|
||||
// Send ping periodically to detect dead connections.
|
||||
// If the previous ping got no pong, treat the connection as dead.
|
||||
this.pingInterval = setInterval(() => {
|
||||
if (this.state === 'connected' && this.ws) {
|
||||
const now = Date.now()
|
||||
const gap = now - lastTickTime
|
||||
lastTickTime = now
|
||||
|
||||
// Process-suspension detector. If the wall-clock gap between ticks
|
||||
// greatly exceeds the 10s interval, the process was suspended
|
||||
// (laptop lid, SIGSTOP, VM pause). setInterval does not queue
|
||||
// missed ticks — it coalesces — so on wake this callback fires
|
||||
// once with a huge gap. The socket is almost certainly dead:
|
||||
// NAT mappings drop in 30s–5min, and the server has been
|
||||
// retransmitting into the void. Don't wait for a ping/pong
|
||||
// round-trip to confirm (ws.ping() on a dead socket returns
|
||||
// immediately with no error — bytes go into the kernel send
|
||||
// buffer). Assume dead and reconnect now. A spurious reconnect
|
||||
// after a short sleep is cheap — replayBufferedMessages() handles
|
||||
// it and the server dedups by UUID.
|
||||
if (gap > SLEEP_DETECTION_THRESHOLD_MS) {
|
||||
logForDebugging(
|
||||
`WebSocketTransport: ${Math.round(gap / 1000)}s tick gap detected — process was suspended, forcing reconnect`,
|
||||
)
|
||||
logForDiagnosticsNoPII(
|
||||
'info',
|
||||
'cli_websocket_sleep_detected_on_ping',
|
||||
{ gapMs: gap },
|
||||
)
|
||||
this.handleConnectionError()
|
||||
return
|
||||
}
|
||||
|
||||
if (!this.pongReceived) {
|
||||
logForDebugging(
|
||||
'WebSocketTransport: No pong received, connection appears dead',
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_pong_timeout')
|
||||
this.handleConnectionError()
|
||||
return
|
||||
}
|
||||
|
||||
this.pongReceived = false
|
||||
try {
|
||||
this.ws.ping?.()
|
||||
} catch (error) {
|
||||
logForDebugging(`WebSocketTransport: Ping failed: ${error}`, {
|
||||
level: 'error',
|
||||
})
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_ping_failed')
|
||||
}
|
||||
}
|
||||
}, DEFAULT_PING_INTERVAL)
|
||||
}
|
||||
|
||||
private stopPingInterval(): void {
|
||||
if (this.pingInterval) {
|
||||
clearInterval(this.pingInterval)
|
||||
this.pingInterval = null
|
||||
}
|
||||
}
|
||||
|
||||
private startKeepaliveInterval(): void {
|
||||
this.stopKeepaliveInterval()
|
||||
|
||||
// In CCR sessions, session activity heartbeats handle keep-alives
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_REMOTE)) {
|
||||
return
|
||||
}
|
||||
|
||||
this.keepAliveInterval = setInterval(() => {
|
||||
if (this.state === 'connected' && this.ws) {
|
||||
try {
|
||||
this.ws.send(KEEP_ALIVE_FRAME)
|
||||
this.lastActivityTime = Date.now()
|
||||
logForDebugging(
|
||||
'WebSocketTransport: Sent periodic keep_alive data frame',
|
||||
)
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`WebSocketTransport: Periodic keep_alive failed: ${error}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_websocket_keepalive_failed')
|
||||
}
|
||||
}
|
||||
}, DEFAULT_KEEPALIVE_INTERVAL)
|
||||
}
|
||||
|
||||
private stopKeepaliveInterval(): void {
|
||||
if (this.keepAliveInterval) {
|
||||
clearInterval(this.keepAliveInterval)
|
||||
this.keepAliveInterval = null
|
||||
}
|
||||
}
|
||||
}
|
||||
131
src/cli/transports/WorkerStateUploader.ts
Normal file
131
src/cli/transports/WorkerStateUploader.ts
Normal file
@ -0,0 +1,131 @@
|
||||
import { sleep } from '../../utils/sleep.js'
|
||||
|
||||
/**
|
||||
* Coalescing uploader for PUT /worker (session state + metadata).
|
||||
*
|
||||
* - 1 in-flight PUT + 1 pending patch
|
||||
* - New calls coalesce into pending (never grows beyond 1 slot)
|
||||
* - On success: send pending if exists
|
||||
* - On failure: exponential backoff (clamped), retries indefinitely
|
||||
* until success or close(). Absorbs any pending patches before each retry.
|
||||
* - No backpressure needed — naturally bounded at 2 slots
|
||||
*
|
||||
* Coalescing rules:
|
||||
* - Top-level keys (worker_status, external_metadata) — last value wins
|
||||
* - Inside external_metadata / internal_metadata — RFC 7396 merge:
|
||||
* keys are added/overwritten, null values preserved (server deletes)
|
||||
*/
|
||||
|
||||
type WorkerStateUploaderConfig = {
|
||||
send: (body: Record<string, unknown>) => Promise<boolean>
|
||||
/** Base delay for exponential backoff (ms) */
|
||||
baseDelayMs: number
|
||||
/** Max delay cap (ms) */
|
||||
maxDelayMs: number
|
||||
/** Random jitter range added to retry delay (ms) */
|
||||
jitterMs: number
|
||||
}
|
||||
|
||||
export class WorkerStateUploader {
|
||||
private inflight: Promise<void> | null = null
|
||||
private pending: Record<string, unknown> | null = null
|
||||
private closed = false
|
||||
private readonly config: WorkerStateUploaderConfig
|
||||
|
||||
constructor(config: WorkerStateUploaderConfig) {
|
||||
this.config = config
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue a patch to PUT /worker. Coalesces with any existing pending
|
||||
* patch. Fire-and-forget — callers don't need to await.
|
||||
*/
|
||||
enqueue(patch: Record<string, unknown>): void {
|
||||
if (this.closed) return
|
||||
this.pending = this.pending ? coalescePatches(this.pending, patch) : patch
|
||||
void this.drain()
|
||||
}
|
||||
|
||||
close(): void {
|
||||
this.closed = true
|
||||
this.pending = null
|
||||
}
|
||||
|
||||
private async drain(): Promise<void> {
|
||||
if (this.inflight || this.closed) return
|
||||
if (!this.pending) return
|
||||
|
||||
const payload = this.pending
|
||||
this.pending = null
|
||||
|
||||
this.inflight = this.sendWithRetry(payload).then(() => {
|
||||
this.inflight = null
|
||||
if (this.pending && !this.closed) {
|
||||
void this.drain()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/** Retries indefinitely with exponential backoff until success or close(). */
|
||||
private async sendWithRetry(payload: Record<string, unknown>): Promise<void> {
|
||||
let current = payload
|
||||
let failures = 0
|
||||
while (!this.closed) {
|
||||
const ok = await this.config.send(current)
|
||||
if (ok) return
|
||||
|
||||
failures++
|
||||
await sleep(this.retryDelay(failures))
|
||||
|
||||
// Absorb any patches that arrived during the retry
|
||||
if (this.pending && !this.closed) {
|
||||
current = coalescePatches(current, this.pending)
|
||||
this.pending = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private retryDelay(failures: number): number {
|
||||
const exponential = Math.min(
|
||||
this.config.baseDelayMs * 2 ** (failures - 1),
|
||||
this.config.maxDelayMs,
|
||||
)
|
||||
const jitter = Math.random() * this.config.jitterMs
|
||||
return exponential + jitter
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Coalesce two patches for PUT /worker.
|
||||
*
|
||||
* Top-level keys: overlay replaces base (last value wins).
|
||||
* Metadata keys (external_metadata, internal_metadata): RFC 7396 merge
|
||||
* one level deep — overlay keys are added/overwritten, null values
|
||||
* preserved for server-side delete.
|
||||
*/
|
||||
function coalescePatches(
|
||||
base: Record<string, unknown>,
|
||||
overlay: Record<string, unknown>,
|
||||
): Record<string, unknown> {
|
||||
const merged = { ...base }
|
||||
|
||||
for (const [key, value] of Object.entries(overlay)) {
|
||||
if (
|
||||
(key === 'external_metadata' || key === 'internal_metadata') &&
|
||||
merged[key] &&
|
||||
typeof merged[key] === 'object' &&
|
||||
typeof value === 'object' &&
|
||||
value !== null
|
||||
) {
|
||||
// RFC 7396 merge — overlay keys win, nulls preserved for server
|
||||
merged[key] = {
|
||||
...(merged[key] as Record<string, unknown>),
|
||||
...(value as Record<string, unknown>),
|
||||
}
|
||||
} else {
|
||||
merged[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
998
src/cli/transports/ccrClient.ts
Normal file
998
src/cli/transports/ccrClient.ts
Normal file
@ -0,0 +1,998 @@
|
||||
import { randomUUID } from 'crypto'
|
||||
import type {
|
||||
SDKPartialAssistantMessage,
|
||||
StdoutMessage,
|
||||
} from 'src/entrypoints/sdk/controlTypes.js'
|
||||
import { decodeJwtExpiry } from '../../bridge/jwtUtils.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from '../../utils/diagLogs.js'
|
||||
import { errorMessage, getErrnoCode } from '../../utils/errors.js'
|
||||
import { createAxiosInstance } from '../../utils/proxy.js'
|
||||
import {
|
||||
registerSessionActivityCallback,
|
||||
unregisterSessionActivityCallback,
|
||||
} from '../../utils/sessionActivity.js'
|
||||
import {
|
||||
getSessionIngressAuthHeaders,
|
||||
getSessionIngressAuthToken,
|
||||
} from '../../utils/sessionIngressAuth.js'
|
||||
import type {
|
||||
RequiresActionDetails,
|
||||
SessionState,
|
||||
} from '../../utils/sessionState.js'
|
||||
import { sleep } from '../../utils/sleep.js'
|
||||
import { getClaudeCodeUserAgent } from '../../utils/userAgent.js'
|
||||
import {
|
||||
RetryableError,
|
||||
SerialBatchEventUploader,
|
||||
} from './SerialBatchEventUploader.js'
|
||||
import type { SSETransport, StreamClientEvent } from './SSETransport.js'
|
||||
import { WorkerStateUploader } from './WorkerStateUploader.js'
|
||||
|
||||
/** Default interval between heartbeat events (20s; server TTL is 60s). */
|
||||
const DEFAULT_HEARTBEAT_INTERVAL_MS = 20_000
|
||||
|
||||
/**
|
||||
* stream_event messages accumulate in a delay buffer for up to this many ms
|
||||
* before enqueue. Mirrors HybridTransport's batching window. text_delta
|
||||
* events for the same content block accumulate into a single full-so-far
|
||||
* snapshot per flush — each emitted event is self-contained so a client
|
||||
* connecting mid-stream sees complete text, not a fragment.
|
||||
*/
|
||||
const STREAM_EVENT_FLUSH_INTERVAL_MS = 100
|
||||
|
||||
/** Hoisted axios validateStatus callback to avoid per-request closure allocation. */
|
||||
function alwaysValidStatus(): boolean {
|
||||
return true
|
||||
}
|
||||
|
||||
export type CCRInitFailReason =
|
||||
| 'no_auth_headers'
|
||||
| 'missing_epoch'
|
||||
| 'worker_register_failed'
|
||||
|
||||
/** Thrown by initialize(); carries a typed reason for the diag classifier. */
|
||||
export class CCRInitError extends Error {
|
||||
constructor(readonly reason: CCRInitFailReason) {
|
||||
super(`CCRClient init failed: ${reason}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Consecutive 401/403 with a VALID-LOOKING token before giving up. An
|
||||
* expired JWT short-circuits this (exits immediately — deterministic,
|
||||
* retry is futile). This threshold is for the uncertain case: token's
|
||||
* exp is in the future but server says 401 (userauth down, KMS hiccup,
|
||||
* clock skew). 10 × 20s heartbeat ≈ 200s to ride it out.
|
||||
*/
|
||||
const MAX_CONSECUTIVE_AUTH_FAILURES = 10
|
||||
|
||||
type EventPayload = {
|
||||
uuid: string
|
||||
type: string
|
||||
[key: string]: unknown
|
||||
}
|
||||
|
||||
type ClientEvent = {
|
||||
payload: EventPayload
|
||||
ephemeral?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Structural subset of a stream_event carrying a text_delta. Not a narrowing
|
||||
* of SDKPartialAssistantMessage — RawMessageStreamEvent's delta is a union and
|
||||
* narrowing through two levels defeats the discriminant.
|
||||
*/
|
||||
type CoalescedStreamEvent = {
|
||||
type: 'stream_event'
|
||||
uuid: string
|
||||
session_id: string
|
||||
parent_tool_use_id: string | null
|
||||
event: {
|
||||
type: 'content_block_delta'
|
||||
index: number
|
||||
delta: { type: 'text_delta'; text: string }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Accumulator state for text_delta coalescing. Keyed by API message ID so
|
||||
* lifetime is tied to the assistant message — cleared when the complete
|
||||
* SDKAssistantMessage arrives (writeEvent), which is reliable even when
|
||||
* abort/error paths skip content_block_stop/message_stop delivery.
|
||||
*/
|
||||
export type StreamAccumulatorState = {
|
||||
/** API message ID (msg_...) → blocks[blockIndex] → chunk array. */
|
||||
byMessage: Map<string, string[][]>
|
||||
/**
|
||||
* {session_id}:{parent_tool_use_id} → active message ID.
|
||||
* content_block_delta events don't carry the message ID (only
|
||||
* message_start does), so we track which message is currently streaming
|
||||
* for each scope. At most one message streams per scope at a time.
|
||||
*/
|
||||
scopeToMessage: Map<string, string>
|
||||
}
|
||||
|
||||
export function createStreamAccumulator(): StreamAccumulatorState {
|
||||
return { byMessage: new Map(), scopeToMessage: new Map() }
|
||||
}
|
||||
|
||||
function scopeKey(m: {
|
||||
session_id: string
|
||||
parent_tool_use_id: string | null
|
||||
}): string {
|
||||
return `${m.session_id}:${m.parent_tool_use_id ?? ''}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Accumulate text_delta stream_events into full-so-far snapshots per content
|
||||
* block. Each flush emits ONE event per touched block containing the FULL
|
||||
* accumulated text from the start of the block — a client connecting
|
||||
* mid-stream receives a self-contained snapshot, not a fragment.
|
||||
*
|
||||
* Non-text-delta events pass through unchanged. message_start records the
|
||||
* active message ID for the scope; content_block_delta appends chunks;
|
||||
* the snapshot event reuses the first text_delta UUID seen for that block in
|
||||
* this flush so server-side idempotency remains stable across retries.
|
||||
*
|
||||
* Cleanup happens in writeEvent when the complete assistant message arrives
|
||||
* (reliable), not here on stop events (abort/error paths skip those).
|
||||
*/
|
||||
export function accumulateStreamEvents(
|
||||
buffer: SDKPartialAssistantMessage[],
|
||||
state: StreamAccumulatorState,
|
||||
): EventPayload[] {
|
||||
const out: EventPayload[] = []
|
||||
// chunks[] → snapshot already in `out` this flush. Keyed by the chunks
|
||||
// array reference (stable per {messageId, index}) so subsequent deltas
|
||||
// rewrite the same entry instead of emitting one event per delta.
|
||||
const touched = new Map<string[], CoalescedStreamEvent>()
|
||||
for (const msg of buffer) {
|
||||
switch (msg.event.type) {
|
||||
case 'message_start': {
|
||||
const id = msg.event.message.id
|
||||
const prevId = state.scopeToMessage.get(scopeKey(msg))
|
||||
if (prevId) state.byMessage.delete(prevId)
|
||||
state.scopeToMessage.set(scopeKey(msg), id)
|
||||
state.byMessage.set(id, [])
|
||||
out.push(msg)
|
||||
break
|
||||
}
|
||||
case 'content_block_delta': {
|
||||
if (msg.event.delta.type !== 'text_delta') {
|
||||
out.push(msg)
|
||||
break
|
||||
}
|
||||
const messageId = state.scopeToMessage.get(scopeKey(msg))
|
||||
const blocks = messageId ? state.byMessage.get(messageId) : undefined
|
||||
if (!blocks) {
|
||||
// Delta without a preceding message_start (reconnect mid-stream,
|
||||
// or message_start was in a prior buffer that got dropped). Pass
|
||||
// through raw — can't produce a full-so-far snapshot without the
|
||||
// prior chunks anyway.
|
||||
out.push(msg)
|
||||
break
|
||||
}
|
||||
const chunks = (blocks[msg.event.index] ??= [])
|
||||
chunks.push(msg.event.delta.text)
|
||||
const existing = touched.get(chunks)
|
||||
if (existing) {
|
||||
existing.event.delta.text = chunks.join('')
|
||||
break
|
||||
}
|
||||
const snapshot: CoalescedStreamEvent = {
|
||||
type: 'stream_event',
|
||||
uuid: msg.uuid,
|
||||
session_id: msg.session_id,
|
||||
parent_tool_use_id: msg.parent_tool_use_id,
|
||||
event: {
|
||||
type: 'content_block_delta',
|
||||
index: msg.event.index,
|
||||
delta: { type: 'text_delta', text: chunks.join('') },
|
||||
},
|
||||
}
|
||||
touched.set(chunks, snapshot)
|
||||
out.push(snapshot)
|
||||
break
|
||||
}
|
||||
default:
|
||||
out.push(msg)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear accumulator entries for a completed assistant message. Called from
|
||||
* writeEvent when the SDKAssistantMessage arrives — the reliable end-of-stream
|
||||
* signal that fires even when abort/interrupt/error skip SSE stop events.
|
||||
*/
|
||||
export function clearStreamAccumulatorForMessage(
|
||||
state: StreamAccumulatorState,
|
||||
assistant: {
|
||||
session_id: string
|
||||
parent_tool_use_id: string | null
|
||||
message: { id: string }
|
||||
},
|
||||
): void {
|
||||
state.byMessage.delete(assistant.message.id)
|
||||
const scope = scopeKey(assistant)
|
||||
if (state.scopeToMessage.get(scope) === assistant.message.id) {
|
||||
state.scopeToMessage.delete(scope)
|
||||
}
|
||||
}
|
||||
|
||||
type RequestResult = { ok: true } | { ok: false; retryAfterMs?: number }
|
||||
|
||||
type WorkerEvent = {
|
||||
payload: EventPayload
|
||||
is_compaction?: boolean
|
||||
agent_id?: string
|
||||
}
|
||||
|
||||
export type InternalEvent = {
|
||||
event_id: string
|
||||
event_type: string
|
||||
payload: Record<string, unknown>
|
||||
event_metadata?: Record<string, unknown> | null
|
||||
is_compaction: boolean
|
||||
created_at: string
|
||||
agent_id?: string
|
||||
}
|
||||
|
||||
type ListInternalEventsResponse = {
|
||||
data: InternalEvent[]
|
||||
next_cursor?: string
|
||||
}
|
||||
|
||||
type WorkerStateResponse = {
|
||||
worker?: {
|
||||
external_metadata?: Record<string, unknown>
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Manages the worker lifecycle protocol with CCR v2:
|
||||
* - Epoch management: reads worker_epoch from CLAUDE_CODE_WORKER_EPOCH env var
|
||||
* - Runtime state reporting: PUT /sessions/{id}/worker
|
||||
* - Heartbeat: POST /sessions/{id}/worker/heartbeat for liveness detection
|
||||
*
|
||||
* All writes go through this.request().
|
||||
*/
|
||||
export class CCRClient {
|
||||
private workerEpoch = 0
|
||||
private readonly heartbeatIntervalMs: number
|
||||
private readonly heartbeatJitterFraction: number
|
||||
private heartbeatTimer: NodeJS.Timeout | null = null
|
||||
private heartbeatInFlight = false
|
||||
private closed = false
|
||||
private consecutiveAuthFailures = 0
|
||||
private currentState: SessionState | null = null
|
||||
private readonly sessionBaseUrl: string
|
||||
private readonly sessionId: string
|
||||
private readonly http = createAxiosInstance({ keepAlive: true })
|
||||
|
||||
// stream_event delay buffer — accumulates content deltas for up to
|
||||
// STREAM_EVENT_FLUSH_INTERVAL_MS before enqueueing (reduces POST count
|
||||
// and enables text_delta coalescing). Mirrors HybridTransport's pattern.
|
||||
private streamEventBuffer: SDKPartialAssistantMessage[] = []
|
||||
private streamEventTimer: ReturnType<typeof setTimeout> | null = null
|
||||
// Full-so-far text accumulator. Persists across flushes so each emitted
|
||||
// text_delta event carries the complete text from the start of the block —
|
||||
// mid-stream reconnects see a self-contained snapshot. Keyed by API message
|
||||
// ID; cleared in writeEvent when the complete assistant message arrives.
|
||||
private streamTextAccumulator = createStreamAccumulator()
|
||||
|
||||
private readonly workerState: WorkerStateUploader
|
||||
private readonly eventUploader: SerialBatchEventUploader<ClientEvent>
|
||||
private readonly internalEventUploader: SerialBatchEventUploader<WorkerEvent>
|
||||
private readonly deliveryUploader: SerialBatchEventUploader<{
|
||||
eventId: string
|
||||
status: 'received' | 'processing' | 'processed'
|
||||
}>
|
||||
|
||||
/**
|
||||
* Called when the server returns 409 (a newer worker epoch superseded ours).
|
||||
* Default: process.exit(1) — correct for spawn-mode children where the
|
||||
* parent bridge re-spawns. In-process callers (replBridge) MUST override
|
||||
* this to close gracefully instead; exit would kill the user's REPL.
|
||||
*/
|
||||
private readonly onEpochMismatch: () => never
|
||||
|
||||
/**
|
||||
* Auth header source. Defaults to the process-wide session-ingress token
|
||||
* (CLAUDE_CODE_SESSION_ACCESS_TOKEN env var). Callers managing multiple
|
||||
* concurrent sessions with distinct JWTs MUST inject this — the env-var
|
||||
* path is a process global and would stomp across sessions.
|
||||
*/
|
||||
private readonly getAuthHeaders: () => Record<string, string>
|
||||
|
||||
constructor(
|
||||
transport: SSETransport,
|
||||
sessionUrl: URL,
|
||||
opts?: {
|
||||
onEpochMismatch?: () => never
|
||||
heartbeatIntervalMs?: number
|
||||
heartbeatJitterFraction?: number
|
||||
/**
|
||||
* Per-instance auth header source. Omit to read the process-wide
|
||||
* CLAUDE_CODE_SESSION_ACCESS_TOKEN (single-session callers — REPL,
|
||||
* daemon). Required for concurrent multi-session callers.
|
||||
*/
|
||||
getAuthHeaders?: () => Record<string, string>
|
||||
},
|
||||
) {
|
||||
this.onEpochMismatch =
|
||||
opts?.onEpochMismatch ??
|
||||
(() => {
|
||||
// eslint-disable-next-line custom-rules/no-process-exit
|
||||
process.exit(1)
|
||||
})
|
||||
this.heartbeatIntervalMs =
|
||||
opts?.heartbeatIntervalMs ?? DEFAULT_HEARTBEAT_INTERVAL_MS
|
||||
this.heartbeatJitterFraction = opts?.heartbeatJitterFraction ?? 0
|
||||
this.getAuthHeaders = opts?.getAuthHeaders ?? getSessionIngressAuthHeaders
|
||||
// Session URL: https://host/v1/code/sessions/{id}
|
||||
if (sessionUrl.protocol !== 'http:' && sessionUrl.protocol !== 'https:') {
|
||||
throw new Error(
|
||||
`CCRClient: Expected http(s) URL, got ${sessionUrl.protocol}`,
|
||||
)
|
||||
}
|
||||
const pathname = sessionUrl.pathname.replace(/\/$/, '')
|
||||
this.sessionBaseUrl = `${sessionUrl.protocol}//${sessionUrl.host}${pathname}`
|
||||
// Extract session ID from the URL path (last segment)
|
||||
this.sessionId = pathname.split('/').pop() || ''
|
||||
|
||||
this.workerState = new WorkerStateUploader({
|
||||
send: body =>
|
||||
this.request(
|
||||
'put',
|
||||
'/worker',
|
||||
{ worker_epoch: this.workerEpoch, ...body },
|
||||
'PUT worker',
|
||||
).then(r => r.ok),
|
||||
baseDelayMs: 500,
|
||||
maxDelayMs: 30_000,
|
||||
jitterMs: 500,
|
||||
})
|
||||
|
||||
this.eventUploader = new SerialBatchEventUploader<ClientEvent>({
|
||||
maxBatchSize: 100,
|
||||
maxBatchBytes: 10 * 1024 * 1024,
|
||||
// flushStreamEventBuffer() enqueues a full 100ms window of accumulated
|
||||
// stream_events in one call. A burst of mixed delta types that don't
|
||||
// fold into a single snapshot could exceed the old cap (50) and deadlock
|
||||
// on the SerialBatchEventUploader backpressure check. Match
|
||||
// HybridTransport's bound — high enough to be memory-only.
|
||||
maxQueueSize: 100_000,
|
||||
send: async batch => {
|
||||
const result = await this.request(
|
||||
'post',
|
||||
'/worker/events',
|
||||
{ worker_epoch: this.workerEpoch, events: batch },
|
||||
'client events',
|
||||
)
|
||||
if (!result.ok) {
|
||||
throw new RetryableError(
|
||||
'client event POST failed',
|
||||
result.retryAfterMs,
|
||||
)
|
||||
}
|
||||
},
|
||||
baseDelayMs: 500,
|
||||
maxDelayMs: 30_000,
|
||||
jitterMs: 500,
|
||||
})
|
||||
|
||||
this.internalEventUploader = new SerialBatchEventUploader<WorkerEvent>({
|
||||
maxBatchSize: 100,
|
||||
maxBatchBytes: 10 * 1024 * 1024,
|
||||
maxQueueSize: 200,
|
||||
send: async batch => {
|
||||
const result = await this.request(
|
||||
'post',
|
||||
'/worker/internal-events',
|
||||
{ worker_epoch: this.workerEpoch, events: batch },
|
||||
'internal events',
|
||||
)
|
||||
if (!result.ok) {
|
||||
throw new RetryableError(
|
||||
'internal event POST failed',
|
||||
result.retryAfterMs,
|
||||
)
|
||||
}
|
||||
},
|
||||
baseDelayMs: 500,
|
||||
maxDelayMs: 30_000,
|
||||
jitterMs: 500,
|
||||
})
|
||||
|
||||
this.deliveryUploader = new SerialBatchEventUploader<{
|
||||
eventId: string
|
||||
status: 'received' | 'processing' | 'processed'
|
||||
}>({
|
||||
maxBatchSize: 64,
|
||||
maxQueueSize: 64,
|
||||
send: async batch => {
|
||||
const result = await this.request(
|
||||
'post',
|
||||
'/worker/events/delivery',
|
||||
{
|
||||
worker_epoch: this.workerEpoch,
|
||||
updates: batch.map(d => ({
|
||||
event_id: d.eventId,
|
||||
status: d.status,
|
||||
})),
|
||||
},
|
||||
'delivery batch',
|
||||
)
|
||||
if (!result.ok) {
|
||||
throw new RetryableError('delivery POST failed', result.retryAfterMs)
|
||||
}
|
||||
},
|
||||
baseDelayMs: 500,
|
||||
maxDelayMs: 30_000,
|
||||
jitterMs: 500,
|
||||
})
|
||||
|
||||
// Ack each received client_event so CCR can track delivery status.
|
||||
// Wired here (not in initialize()) so the callback is registered the
|
||||
// moment new CCRClient() returns — remoteIO must be free to call
|
||||
// transport.connect() immediately after without racing the first
|
||||
// SSE catch-up frame against an unwired onEventCallback.
|
||||
transport.setOnEvent((event: StreamClientEvent) => {
|
||||
this.reportDelivery(event.event_id, 'received')
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the session worker:
|
||||
* 1. Take worker_epoch from the argument, or fall back to
|
||||
* CLAUDE_CODE_WORKER_EPOCH (set by env-manager / bridge spawner)
|
||||
* 2. Report state as 'idle'
|
||||
* 3. Start heartbeat timer
|
||||
*
|
||||
* In-process callers (replBridge) pass the epoch directly — they
|
||||
* registered the worker themselves and there is no parent process
|
||||
* setting env vars.
|
||||
*/
|
||||
async initialize(epoch?: number): Promise<Record<string, unknown> | null> {
|
||||
const startMs = Date.now()
|
||||
if (Object.keys(this.getAuthHeaders()).length === 0) {
|
||||
throw new CCRInitError('no_auth_headers')
|
||||
}
|
||||
if (epoch === undefined) {
|
||||
const rawEpoch = process.env.CLAUDE_CODE_WORKER_EPOCH
|
||||
epoch = rawEpoch ? parseInt(rawEpoch, 10) : NaN
|
||||
}
|
||||
if (isNaN(epoch)) {
|
||||
throw new CCRInitError('missing_epoch')
|
||||
}
|
||||
this.workerEpoch = epoch
|
||||
|
||||
// Concurrent with the init PUT — neither depends on the other.
|
||||
const restoredPromise = this.getWorkerState()
|
||||
|
||||
const result = await this.request(
|
||||
'put',
|
||||
'/worker',
|
||||
{
|
||||
worker_status: 'idle',
|
||||
worker_epoch: this.workerEpoch,
|
||||
// Clear stale pending_action/task_summary left by a prior
|
||||
// worker crash — the in-session clears don't survive process restart.
|
||||
external_metadata: {
|
||||
pending_action: null,
|
||||
task_summary: null,
|
||||
},
|
||||
},
|
||||
'PUT worker (init)',
|
||||
)
|
||||
if (!result.ok) {
|
||||
// 409 → onEpochMismatch may throw, but request() catches it and returns
|
||||
// false. Without this check we'd continue to startHeartbeat(), leaking a
|
||||
// 20s timer against a dead epoch. Throw so connect()'s rejection handler
|
||||
// fires instead of the success path.
|
||||
throw new CCRInitError('worker_register_failed')
|
||||
}
|
||||
this.currentState = 'idle'
|
||||
this.startHeartbeat()
|
||||
|
||||
// sessionActivity's refcount-gated timer fires while an API call or tool
|
||||
// is in-flight; without a write the container lease can expire mid-wait.
|
||||
// v1 wires this in WebSocketTransport per-connection.
|
||||
registerSessionActivityCallback(() => {
|
||||
void this.writeEvent({ type: 'keep_alive' })
|
||||
})
|
||||
|
||||
logForDebugging(`CCRClient: initialized, epoch=${this.workerEpoch}`)
|
||||
logForDiagnosticsNoPII('info', 'cli_worker_lifecycle_initialized', {
|
||||
epoch: this.workerEpoch,
|
||||
duration_ms: Date.now() - startMs,
|
||||
})
|
||||
|
||||
// Await the concurrent GET and log state_restored here, after the PUT
|
||||
// has succeeded — logging inside getWorkerState() raced: if the GET
|
||||
// resolved before the PUT failed, diagnostics showed both init_failed
|
||||
// and state_restored for the same session.
|
||||
const { metadata, durationMs } = await restoredPromise
|
||||
if (!this.closed) {
|
||||
logForDiagnosticsNoPII('info', 'cli_worker_state_restored', {
|
||||
duration_ms: durationMs,
|
||||
had_state: metadata !== null,
|
||||
})
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
// Control_requests are marked processed and not re-delivered on
|
||||
// restart, so read back what the prior worker wrote.
|
||||
private async getWorkerState(): Promise<{
|
||||
metadata: Record<string, unknown> | null
|
||||
durationMs: number
|
||||
}> {
|
||||
const startMs = Date.now()
|
||||
const authHeaders = this.getAuthHeaders()
|
||||
if (Object.keys(authHeaders).length === 0) {
|
||||
return { metadata: null, durationMs: 0 }
|
||||
}
|
||||
const data = await this.getWithRetry<WorkerStateResponse>(
|
||||
`${this.sessionBaseUrl}/worker`,
|
||||
authHeaders,
|
||||
'worker_state',
|
||||
)
|
||||
return {
|
||||
metadata: data?.worker?.external_metadata ?? null,
|
||||
durationMs: Date.now() - startMs,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send an authenticated HTTP request to CCR. Handles auth headers,
|
||||
* 409 epoch mismatch, and error logging. Returns { ok: true } on 2xx.
|
||||
* On 429, reads Retry-After (integer seconds) so the uploader can honor
|
||||
* the server's backoff hint instead of blindly exponentiating.
|
||||
*/
|
||||
private async request(
|
||||
method: 'post' | 'put',
|
||||
path: string,
|
||||
body: unknown,
|
||||
label: string,
|
||||
{ timeout = 10_000 }: { timeout?: number } = {},
|
||||
): Promise<RequestResult> {
|
||||
const authHeaders = this.getAuthHeaders()
|
||||
if (Object.keys(authHeaders).length === 0) return { ok: false }
|
||||
|
||||
try {
|
||||
const response = await this.http[method](
|
||||
`${this.sessionBaseUrl}${path}`,
|
||||
body,
|
||||
{
|
||||
headers: {
|
||||
...authHeaders,
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01',
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
},
|
||||
validateStatus: alwaysValidStatus,
|
||||
timeout,
|
||||
},
|
||||
)
|
||||
|
||||
if (response.status >= 200 && response.status < 300) {
|
||||
this.consecutiveAuthFailures = 0
|
||||
return { ok: true }
|
||||
}
|
||||
if (response.status === 409) {
|
||||
this.handleEpochMismatch()
|
||||
}
|
||||
if (response.status === 401 || response.status === 403) {
|
||||
// A 401 with an expired JWT is deterministic — no retry will
|
||||
// ever succeed. Check the token's own exp before burning
|
||||
// wall-clock on the threshold loop.
|
||||
const tok = getSessionIngressAuthToken()
|
||||
const exp = tok ? decodeJwtExpiry(tok) : null
|
||||
if (exp !== null && exp * 1000 < Date.now()) {
|
||||
logForDebugging(
|
||||
`CCRClient: session_token expired (exp=${new Date(exp * 1000).toISOString()}) — no refresh was delivered, exiting`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_worker_token_expired_no_refresh')
|
||||
this.onEpochMismatch()
|
||||
}
|
||||
// Token looks valid but server says 401 — possible server-side
|
||||
// blip (userauth down, KMS hiccup). Count toward threshold.
|
||||
this.consecutiveAuthFailures++
|
||||
if (this.consecutiveAuthFailures >= MAX_CONSECUTIVE_AUTH_FAILURES) {
|
||||
logForDebugging(
|
||||
`CCRClient: ${this.consecutiveAuthFailures} consecutive auth failures with a valid-looking token — server-side auth unrecoverable, exiting`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
logForDiagnosticsNoPII('error', 'cli_worker_auth_failures_exhausted')
|
||||
this.onEpochMismatch()
|
||||
}
|
||||
}
|
||||
logForDebugging(`CCRClient: ${label} returned ${response.status}`, {
|
||||
level: 'warn',
|
||||
})
|
||||
logForDiagnosticsNoPII('warn', 'cli_worker_request_failed', {
|
||||
method,
|
||||
path,
|
||||
status: response.status,
|
||||
})
|
||||
if (response.status === 429) {
|
||||
const raw = response.headers?.['retry-after']
|
||||
const seconds = typeof raw === 'string' ? parseInt(raw, 10) : NaN
|
||||
if (!isNaN(seconds) && seconds >= 0) {
|
||||
return { ok: false, retryAfterMs: seconds * 1000 }
|
||||
}
|
||||
}
|
||||
return { ok: false }
|
||||
} catch (error) {
|
||||
logForDebugging(`CCRClient: ${label} failed: ${errorMessage(error)}`, {
|
||||
level: 'warn',
|
||||
})
|
||||
logForDiagnosticsNoPII('warn', 'cli_worker_request_error', {
|
||||
method,
|
||||
path,
|
||||
error_code: getErrnoCode(error),
|
||||
})
|
||||
return { ok: false }
|
||||
}
|
||||
}
|
||||
|
||||
/** Report worker state to CCR via PUT /sessions/{id}/worker. */
|
||||
reportState(state: SessionState, details?: RequiresActionDetails): void {
|
||||
if (state === this.currentState && !details) return
|
||||
this.currentState = state
|
||||
this.workerState.enqueue({
|
||||
worker_status: state,
|
||||
requires_action_details: details
|
||||
? {
|
||||
tool_name: details.tool_name,
|
||||
action_description: details.action_description,
|
||||
request_id: details.request_id,
|
||||
}
|
||||
: null,
|
||||
})
|
||||
}
|
||||
|
||||
/** Report external metadata to CCR via PUT /worker. */
|
||||
reportMetadata(metadata: Record<string, unknown>): void {
|
||||
this.workerState.enqueue({ external_metadata: metadata })
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle epoch mismatch (409 Conflict). A newer CC instance has replaced
|
||||
* this one — exit immediately.
|
||||
*/
|
||||
private handleEpochMismatch(): never {
|
||||
logForDebugging('CCRClient: Epoch mismatch (409), shutting down', {
|
||||
level: 'error',
|
||||
})
|
||||
logForDiagnosticsNoPII('error', 'cli_worker_epoch_mismatch')
|
||||
this.onEpochMismatch()
|
||||
}
|
||||
|
||||
/** Start periodic heartbeat. */
|
||||
private startHeartbeat(): void {
|
||||
this.stopHeartbeat()
|
||||
const schedule = (): void => {
|
||||
const jitter =
|
||||
this.heartbeatIntervalMs *
|
||||
this.heartbeatJitterFraction *
|
||||
(2 * Math.random() - 1)
|
||||
this.heartbeatTimer = setTimeout(tick, this.heartbeatIntervalMs + jitter)
|
||||
}
|
||||
const tick = (): void => {
|
||||
void this.sendHeartbeat()
|
||||
// stopHeartbeat nulls the timer; check after the fire-and-forget send
|
||||
// but before rescheduling so close() during sendHeartbeat is honored.
|
||||
if (this.heartbeatTimer === null) return
|
||||
schedule()
|
||||
}
|
||||
schedule()
|
||||
}
|
||||
|
||||
/** Stop heartbeat timer. */
|
||||
private stopHeartbeat(): void {
|
||||
if (this.heartbeatTimer) {
|
||||
clearTimeout(this.heartbeatTimer)
|
||||
this.heartbeatTimer = null
|
||||
}
|
||||
}
|
||||
|
||||
/** Send a heartbeat via POST /sessions/{id}/worker/heartbeat. */
|
||||
private async sendHeartbeat(): Promise<void> {
|
||||
if (this.heartbeatInFlight) return
|
||||
this.heartbeatInFlight = true
|
||||
try {
|
||||
const result = await this.request(
|
||||
'post',
|
||||
'/worker/heartbeat',
|
||||
{ session_id: this.sessionId, worker_epoch: this.workerEpoch },
|
||||
'Heartbeat',
|
||||
{ timeout: 5_000 },
|
||||
)
|
||||
if (result.ok) {
|
||||
logForDebugging('CCRClient: Heartbeat sent')
|
||||
}
|
||||
} finally {
|
||||
this.heartbeatInFlight = false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a StdoutMessage as a client event via POST /sessions/{id}/worker/events.
|
||||
* These events are visible to frontend clients via the SSE stream.
|
||||
* Injects a UUID if missing to ensure server-side idempotency on retry.
|
||||
*
|
||||
* stream_event messages are held in a 100ms delay buffer and accumulated
|
||||
* (text_deltas for the same content block emit a full-so-far snapshot per
|
||||
* flush). A non-stream_event write flushes the buffer first so downstream
|
||||
* ordering is preserved.
|
||||
*/
|
||||
async writeEvent(message: StdoutMessage): Promise<void> {
|
||||
if (message.type === 'stream_event') {
|
||||
this.streamEventBuffer.push(message)
|
||||
if (!this.streamEventTimer) {
|
||||
this.streamEventTimer = setTimeout(
|
||||
() => void this.flushStreamEventBuffer(),
|
||||
STREAM_EVENT_FLUSH_INTERVAL_MS,
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
await this.flushStreamEventBuffer()
|
||||
if (message.type === 'assistant') {
|
||||
clearStreamAccumulatorForMessage(this.streamTextAccumulator, message)
|
||||
}
|
||||
await this.eventUploader.enqueue(this.toClientEvent(message))
|
||||
}
|
||||
|
||||
/** Wrap a StdoutMessage as a ClientEvent, injecting a UUID if missing. */
|
||||
private toClientEvent(message: StdoutMessage): ClientEvent {
|
||||
const msg = message as unknown as Record<string, unknown>
|
||||
return {
|
||||
payload: {
|
||||
...msg,
|
||||
uuid: typeof msg.uuid === 'string' ? msg.uuid : randomUUID(),
|
||||
} as EventPayload,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Drain the stream_event delay buffer: accumulate text_deltas into
|
||||
* full-so-far snapshots, clear the timer, enqueue the resulting events.
|
||||
* Called from the timer, from writeEvent on a non-stream message, and from
|
||||
* flush(). close() drops the buffer — call flush() first if you need
|
||||
* delivery.
|
||||
*/
|
||||
private async flushStreamEventBuffer(): Promise<void> {
|
||||
if (this.streamEventTimer) {
|
||||
clearTimeout(this.streamEventTimer)
|
||||
this.streamEventTimer = null
|
||||
}
|
||||
if (this.streamEventBuffer.length === 0) return
|
||||
const buffered = this.streamEventBuffer
|
||||
this.streamEventBuffer = []
|
||||
const payloads = accumulateStreamEvents(
|
||||
buffered,
|
||||
this.streamTextAccumulator,
|
||||
)
|
||||
await this.eventUploader.enqueue(
|
||||
payloads.map(payload => ({ payload, ephemeral: true })),
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Write an internal worker event via POST /sessions/{id}/worker/internal-events.
|
||||
* These events are NOT visible to frontend clients — they store worker-internal
|
||||
* state (transcript messages, compaction markers) needed for session resume.
|
||||
*/
|
||||
async writeInternalEvent(
|
||||
eventType: string,
|
||||
payload: Record<string, unknown>,
|
||||
{
|
||||
isCompaction = false,
|
||||
agentId,
|
||||
}: {
|
||||
isCompaction?: boolean
|
||||
agentId?: string
|
||||
} = {},
|
||||
): Promise<void> {
|
||||
const event: WorkerEvent = {
|
||||
payload: {
|
||||
type: eventType,
|
||||
...payload,
|
||||
uuid: typeof payload.uuid === 'string' ? payload.uuid : randomUUID(),
|
||||
} as EventPayload,
|
||||
...(isCompaction && { is_compaction: true }),
|
||||
...(agentId && { agent_id: agentId }),
|
||||
}
|
||||
await this.internalEventUploader.enqueue(event)
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush pending internal events. Call between turns and on shutdown
|
||||
* to ensure transcript entries are persisted.
|
||||
*/
|
||||
flushInternalEvents(): Promise<void> {
|
||||
return this.internalEventUploader.flush()
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush pending client events (writeEvent queue). Call before close()
|
||||
* when the caller needs delivery confirmation — close() abandons the
|
||||
* queue. Resolves once the uploader drains or rejects; returns
|
||||
* regardless of whether individual POSTs succeeded (check server state
|
||||
* separately if that matters).
|
||||
*/
|
||||
async flush(): Promise<void> {
|
||||
await this.flushStreamEventBuffer()
|
||||
return this.eventUploader.flush()
|
||||
}
|
||||
|
||||
/**
|
||||
* Read foreground agent internal events from
|
||||
* GET /sessions/{id}/worker/internal-events.
|
||||
* Returns transcript entries from the last compaction boundary, or null on failure.
|
||||
* Used for session resume.
|
||||
*/
|
||||
async readInternalEvents(): Promise<InternalEvent[] | null> {
|
||||
return this.paginatedGet('/worker/internal-events', {}, 'internal_events')
|
||||
}
|
||||
|
||||
/**
|
||||
* Read all subagent internal events from
|
||||
* GET /sessions/{id}/worker/internal-events?subagents=true.
|
||||
* Returns a merged stream across all non-foreground agents, each from its
|
||||
* compaction point. Used for session resume.
|
||||
*/
|
||||
async readSubagentInternalEvents(): Promise<InternalEvent[] | null> {
|
||||
return this.paginatedGet(
|
||||
'/worker/internal-events',
|
||||
{ subagents: 'true' },
|
||||
'subagent_events',
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Paginated GET with retry. Fetches all pages from a list endpoint,
|
||||
* retrying each page on failure with exponential backoff + jitter.
|
||||
*/
|
||||
private async paginatedGet(
|
||||
path: string,
|
||||
params: Record<string, string>,
|
||||
context: string,
|
||||
): Promise<InternalEvent[] | null> {
|
||||
const authHeaders = this.getAuthHeaders()
|
||||
if (Object.keys(authHeaders).length === 0) return null
|
||||
|
||||
const allEvents: InternalEvent[] = []
|
||||
let cursor: string | undefined
|
||||
|
||||
do {
|
||||
const url = new URL(`${this.sessionBaseUrl}${path}`)
|
||||
for (const [k, v] of Object.entries(params)) {
|
||||
url.searchParams.set(k, v)
|
||||
}
|
||||
if (cursor) {
|
||||
url.searchParams.set('cursor', cursor)
|
||||
}
|
||||
|
||||
const page = await this.getWithRetry<ListInternalEventsResponse>(
|
||||
url.toString(),
|
||||
authHeaders,
|
||||
context,
|
||||
)
|
||||
if (!page) return null
|
||||
|
||||
allEvents.push(...(page.data ?? []))
|
||||
cursor = page.next_cursor
|
||||
} while (cursor)
|
||||
|
||||
logForDebugging(
|
||||
`CCRClient: Read ${allEvents.length} internal events from ${path}${params.subagents ? ' (subagents)' : ''}`,
|
||||
)
|
||||
return allEvents
|
||||
}
|
||||
|
||||
/**
|
||||
* Single GET request with retry. Returns the parsed response body
|
||||
* on success, null if all retries are exhausted.
|
||||
*/
|
||||
private async getWithRetry<T>(
|
||||
url: string,
|
||||
authHeaders: Record<string, string>,
|
||||
context: string,
|
||||
): Promise<T | null> {
|
||||
for (let attempt = 1; attempt <= 10; attempt++) {
|
||||
let response
|
||||
try {
|
||||
response = await this.http.get<T>(url, {
|
||||
headers: {
|
||||
...authHeaders,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'User-Agent': getClaudeCodeUserAgent(),
|
||||
},
|
||||
validateStatus: alwaysValidStatus,
|
||||
timeout: 30_000,
|
||||
})
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`CCRClient: GET ${url} failed (attempt ${attempt}/10): ${errorMessage(error)}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
if (attempt < 10) {
|
||||
const delay =
|
||||
Math.min(500 * 2 ** (attempt - 1), 30_000) + Math.random() * 500
|
||||
await sleep(delay)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (response.status >= 200 && response.status < 300) {
|
||||
return response.data
|
||||
}
|
||||
if (response.status === 409) {
|
||||
this.handleEpochMismatch()
|
||||
}
|
||||
logForDebugging(
|
||||
`CCRClient: GET ${url} returned ${response.status} (attempt ${attempt}/10)`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
|
||||
if (attempt < 10) {
|
||||
const delay =
|
||||
Math.min(500 * 2 ** (attempt - 1), 30_000) + Math.random() * 500
|
||||
await sleep(delay)
|
||||
}
|
||||
}
|
||||
|
||||
logForDebugging('CCRClient: GET retries exhausted', { level: 'error' })
|
||||
logForDiagnosticsNoPII('error', 'cli_worker_get_retries_exhausted', {
|
||||
context,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Report delivery status for a client-to-worker event.
|
||||
* POST /v1/code/sessions/{id}/worker/events/delivery (batch endpoint)
|
||||
*/
|
||||
reportDelivery(
|
||||
eventId: string,
|
||||
status: 'received' | 'processing' | 'processed',
|
||||
): void {
|
||||
void this.deliveryUploader.enqueue({ eventId, status })
|
||||
}
|
||||
|
||||
/** Get the current epoch (for external use). */
|
||||
getWorkerEpoch(): number {
|
||||
return this.workerEpoch
|
||||
}
|
||||
|
||||
/** Internal-event queue depth — shutdown-snapshot backpressure signal. */
|
||||
get internalEventsPending(): number {
|
||||
return this.internalEventUploader.pendingCount
|
||||
}
|
||||
|
||||
/** Clean up uploaders and timers. */
|
||||
close(): void {
|
||||
this.closed = true
|
||||
this.stopHeartbeat()
|
||||
unregisterSessionActivityCallback()
|
||||
if (this.streamEventTimer) {
|
||||
clearTimeout(this.streamEventTimer)
|
||||
this.streamEventTimer = null
|
||||
}
|
||||
this.streamEventBuffer = []
|
||||
this.streamTextAccumulator.byMessage.clear()
|
||||
this.streamTextAccumulator.scopeToMessage.clear()
|
||||
this.workerState.close()
|
||||
this.eventUploader.close()
|
||||
this.internalEventUploader.close()
|
||||
this.deliveryUploader.close()
|
||||
}
|
||||
}
|
||||
45
src/cli/transports/transportUtils.ts
Normal file
45
src/cli/transports/transportUtils.ts
Normal file
@ -0,0 +1,45 @@
|
||||
import { URL } from 'url'
|
||||
import { isEnvTruthy } from '../../utils/envUtils.js'
|
||||
import { HybridTransport } from './HybridTransport.js'
|
||||
import { SSETransport } from './SSETransport.js'
|
||||
import type { Transport } from './Transport.js'
|
||||
import { WebSocketTransport } from './WebSocketTransport.js'
|
||||
|
||||
/**
|
||||
* Helper function to get the appropriate transport for a URL.
|
||||
*
|
||||
* Transport selection priority:
|
||||
* 1. SSETransport (SSE reads + POST writes) when CLAUDE_CODE_USE_CCR_V2 is set
|
||||
* 2. HybridTransport (WS reads + POST writes) when CLAUDE_CODE_POST_FOR_SESSION_INGRESS_V2 is set
|
||||
* 3. WebSocketTransport (WS reads + WS writes) — default
|
||||
*/
|
||||
export function getTransportForUrl(
|
||||
url: URL,
|
||||
headers: Record<string, string> = {},
|
||||
sessionId?: string,
|
||||
refreshHeaders?: () => Record<string, string>,
|
||||
): Transport {
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_CCR_V2)) {
|
||||
// v2: SSE for reads, HTTP POST for writes
|
||||
// --sdk-url is the session URL (.../sessions/{id});
|
||||
// derive the SSE stream URL by appending /worker/events/stream
|
||||
const sseUrl = new URL(url.href)
|
||||
if (sseUrl.protocol === 'wss:') {
|
||||
sseUrl.protocol = 'https:'
|
||||
} else if (sseUrl.protocol === 'ws:') {
|
||||
sseUrl.protocol = 'http:'
|
||||
}
|
||||
sseUrl.pathname =
|
||||
sseUrl.pathname.replace(/\/$/, '') + '/worker/events/stream'
|
||||
return new SSETransport(sseUrl, headers, sessionId, refreshHeaders)
|
||||
}
|
||||
|
||||
if (url.protocol === 'ws:' || url.protocol === 'wss:') {
|
||||
if (isEnvTruthy(process.env.CLAUDE_CODE_POST_FOR_SESSION_INGRESS_V2)) {
|
||||
return new HybridTransport(url, headers, sessionId, refreshHeaders)
|
||||
}
|
||||
return new WebSocketTransport(url, headers, sessionId, refreshHeaders)
|
||||
} else {
|
||||
throw new Error(`Unsupported protocol: ${url.protocol}`)
|
||||
}
|
||||
}
|
||||
422
src/cli/update.ts
Normal file
422
src/cli/update.ts
Normal file
@ -0,0 +1,422 @@
|
||||
import chalk from 'chalk'
|
||||
import { logEvent } from 'src/services/analytics/index.js'
|
||||
import {
|
||||
getLatestVersion,
|
||||
type InstallStatus,
|
||||
installGlobalPackage,
|
||||
} from 'src/utils/autoUpdater.js'
|
||||
import { regenerateCompletionCache } from 'src/utils/completionCache.js'
|
||||
import {
|
||||
getGlobalConfig,
|
||||
type InstallMethod,
|
||||
saveGlobalConfig,
|
||||
} from 'src/utils/config.js'
|
||||
import { logForDebugging } from 'src/utils/debug.js'
|
||||
import { getDoctorDiagnostic } from 'src/utils/doctorDiagnostic.js'
|
||||
import { gracefulShutdown } from 'src/utils/gracefulShutdown.js'
|
||||
import {
|
||||
installOrUpdateClaudePackage,
|
||||
localInstallationExists,
|
||||
} from 'src/utils/localInstaller.js'
|
||||
import {
|
||||
installLatest as installLatestNative,
|
||||
removeInstalledSymlink,
|
||||
} from 'src/utils/nativeInstaller/index.js'
|
||||
import { getPackageManager } from 'src/utils/nativeInstaller/packageManagers.js'
|
||||
import { writeToStdout } from 'src/utils/process.js'
|
||||
import { gte } from 'src/utils/semver.js'
|
||||
import { getInitialSettings } from 'src/utils/settings/settings.js'
|
||||
|
||||
export async function update() {
|
||||
logEvent('tengu_update_check', {})
|
||||
writeToStdout(`Current version: ${MACRO.VERSION}\n`)
|
||||
|
||||
const channel = getInitialSettings()?.autoUpdatesChannel ?? 'latest'
|
||||
writeToStdout(`Checking for updates to ${channel} version...\n`)
|
||||
|
||||
logForDebugging('update: Starting update check')
|
||||
|
||||
// Run diagnostic to detect potential issues
|
||||
logForDebugging('update: Running diagnostic')
|
||||
const diagnostic = await getDoctorDiagnostic()
|
||||
logForDebugging(`update: Installation type: ${diagnostic.installationType}`)
|
||||
logForDebugging(
|
||||
`update: Config install method: ${diagnostic.configInstallMethod}`,
|
||||
)
|
||||
|
||||
// Check for multiple installations
|
||||
if (diagnostic.multipleInstallations.length > 1) {
|
||||
writeToStdout('\n')
|
||||
writeToStdout(chalk.yellow('Warning: Multiple installations found') + '\n')
|
||||
for (const install of diagnostic.multipleInstallations) {
|
||||
const current =
|
||||
diagnostic.installationType === install.type
|
||||
? ' (currently running)'
|
||||
: ''
|
||||
writeToStdout(`- ${install.type} at ${install.path}${current}\n`)
|
||||
}
|
||||
}
|
||||
|
||||
// Display warnings if any exist
|
||||
if (diagnostic.warnings.length > 0) {
|
||||
writeToStdout('\n')
|
||||
for (const warning of diagnostic.warnings) {
|
||||
logForDebugging(`update: Warning detected: ${warning.issue}`)
|
||||
|
||||
// Don't skip PATH warnings - they're always relevant
|
||||
// The user needs to know that 'which claude' points elsewhere
|
||||
logForDebugging(`update: Showing warning: ${warning.issue}`)
|
||||
|
||||
writeToStdout(chalk.yellow(`Warning: ${warning.issue}\n`))
|
||||
|
||||
writeToStdout(chalk.bold(`Fix: ${warning.fix}\n`))
|
||||
}
|
||||
}
|
||||
|
||||
// Update config if installMethod is not set (but skip for package managers)
|
||||
const config = getGlobalConfig()
|
||||
if (
|
||||
!config.installMethod &&
|
||||
diagnostic.installationType !== 'package-manager'
|
||||
) {
|
||||
writeToStdout('\n')
|
||||
writeToStdout('Updating configuration to track installation method...\n')
|
||||
let detectedMethod: 'local' | 'native' | 'global' | 'unknown' = 'unknown'
|
||||
|
||||
// Map diagnostic installation type to config install method
|
||||
switch (diagnostic.installationType) {
|
||||
case 'npm-local':
|
||||
detectedMethod = 'local'
|
||||
break
|
||||
case 'native':
|
||||
detectedMethod = 'native'
|
||||
break
|
||||
case 'npm-global':
|
||||
detectedMethod = 'global'
|
||||
break
|
||||
default:
|
||||
detectedMethod = 'unknown'
|
||||
}
|
||||
|
||||
saveGlobalConfig(current => ({
|
||||
...current,
|
||||
installMethod: detectedMethod,
|
||||
}))
|
||||
writeToStdout(`Installation method set to: ${detectedMethod}\n`)
|
||||
}
|
||||
|
||||
// Check if running from development build
|
||||
if (diagnostic.installationType === 'development') {
|
||||
writeToStdout('\n')
|
||||
writeToStdout(
|
||||
chalk.yellow('Warning: Cannot update development build') + '\n',
|
||||
)
|
||||
await gracefulShutdown(1)
|
||||
}
|
||||
|
||||
// Check if running from a package manager
|
||||
if (diagnostic.installationType === 'package-manager') {
|
||||
const packageManager = await getPackageManager()
|
||||
writeToStdout('\n')
|
||||
|
||||
if (packageManager === 'homebrew') {
|
||||
writeToStdout('Claude is managed by Homebrew.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(chalk.bold(' brew upgrade claude-code') + '\n')
|
||||
} else {
|
||||
writeToStdout('Claude is up to date!\n')
|
||||
}
|
||||
} else if (packageManager === 'winget') {
|
||||
writeToStdout('Claude is managed by winget.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(
|
||||
chalk.bold(' winget upgrade Anthropic.ClaudeCode') + '\n',
|
||||
)
|
||||
} else {
|
||||
writeToStdout('Claude is up to date!\n')
|
||||
}
|
||||
} else if (packageManager === 'apk') {
|
||||
writeToStdout('Claude is managed by apk.\n')
|
||||
const latest = await getLatestVersion(channel)
|
||||
if (latest && !gte(MACRO.VERSION, latest)) {
|
||||
writeToStdout(`Update available: ${MACRO.VERSION} → ${latest}\n`)
|
||||
writeToStdout('\n')
|
||||
writeToStdout('To update, run:\n')
|
||||
writeToStdout(chalk.bold(' apk upgrade claude-code') + '\n')
|
||||
} else {
|
||||
writeToStdout('Claude is up to date!\n')
|
||||
}
|
||||
} else {
|
||||
// pacman, deb, and rpm don't get specific commands because they each have
|
||||
// multiple frontends (pacman: yay/paru/makepkg, deb: apt/apt-get/aptitude/nala,
|
||||
// rpm: dnf/yum/zypper)
|
||||
writeToStdout('Claude is managed by a package manager.\n')
|
||||
writeToStdout('Please use your package manager to update.\n')
|
||||
}
|
||||
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
// Check for config/reality mismatch (skip for package-manager installs)
|
||||
if (
|
||||
config.installMethod &&
|
||||
diagnostic.configInstallMethod !== 'not set' &&
|
||||
diagnostic.installationType !== 'package-manager'
|
||||
) {
|
||||
const runningType = diagnostic.installationType
|
||||
const configExpects = diagnostic.configInstallMethod
|
||||
|
||||
// Map installation types for comparison
|
||||
const typeMapping: Record<string, string> = {
|
||||
'npm-local': 'local',
|
||||
'npm-global': 'global',
|
||||
native: 'native',
|
||||
development: 'development',
|
||||
unknown: 'unknown',
|
||||
}
|
||||
|
||||
const normalizedRunningType = typeMapping[runningType] || runningType
|
||||
|
||||
if (
|
||||
normalizedRunningType !== configExpects &&
|
||||
configExpects !== 'unknown'
|
||||
) {
|
||||
writeToStdout('\n')
|
||||
writeToStdout(chalk.yellow('Warning: Configuration mismatch') + '\n')
|
||||
writeToStdout(`Config expects: ${configExpects} installation\n`)
|
||||
writeToStdout(`Currently running: ${runningType}\n`)
|
||||
writeToStdout(
|
||||
chalk.yellow(
|
||||
`Updating the ${runningType} installation you are currently using`,
|
||||
) + '\n',
|
||||
)
|
||||
|
||||
// Update config to match reality
|
||||
saveGlobalConfig(current => ({
|
||||
...current,
|
||||
installMethod: normalizedRunningType as InstallMethod,
|
||||
}))
|
||||
writeToStdout(
|
||||
`Config updated to reflect current installation method: ${normalizedRunningType}\n`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle native installation updates first
|
||||
if (diagnostic.installationType === 'native') {
|
||||
logForDebugging(
|
||||
'update: Detected native installation, using native updater',
|
||||
)
|
||||
try {
|
||||
const result = await installLatestNative(channel, true)
|
||||
|
||||
// Handle lock contention gracefully
|
||||
if (result.lockFailed) {
|
||||
const pidInfo = result.lockHolderPid
|
||||
? ` (PID ${result.lockHolderPid})`
|
||||
: ''
|
||||
writeToStdout(
|
||||
chalk.yellow(
|
||||
`Another Claude process${pidInfo} is currently running. Please try again in a moment.`,
|
||||
) + '\n',
|
||||
)
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
if (!result.latestVersion) {
|
||||
process.stderr.write('Failed to check for updates\n')
|
||||
await gracefulShutdown(1)
|
||||
}
|
||||
|
||||
if (result.latestVersion === MACRO.VERSION) {
|
||||
writeToStdout(
|
||||
chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
|
||||
)
|
||||
} else {
|
||||
writeToStdout(
|
||||
chalk.green(
|
||||
`Successfully updated from ${MACRO.VERSION} to version ${result.latestVersion}`,
|
||||
) + '\n',
|
||||
)
|
||||
await regenerateCompletionCache()
|
||||
}
|
||||
await gracefulShutdown(0)
|
||||
} catch (error) {
|
||||
process.stderr.write('Error: Failed to install native update\n')
|
||||
process.stderr.write(String(error) + '\n')
|
||||
process.stderr.write('Try running "claude doctor" for diagnostics\n')
|
||||
await gracefulShutdown(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to existing JS/npm-based update logic
|
||||
// Remove native installer symlink since we're not using native installation
|
||||
// But only if user hasn't migrated to native installation
|
||||
if (config.installMethod !== 'native') {
|
||||
await removeInstalledSymlink()
|
||||
}
|
||||
|
||||
logForDebugging('update: Checking npm registry for latest version')
|
||||
logForDebugging(`update: Package URL: ${MACRO.PACKAGE_URL}`)
|
||||
const npmTag = channel === 'stable' ? 'stable' : 'latest'
|
||||
const npmCommand = `npm view ${MACRO.PACKAGE_URL}@${npmTag} version`
|
||||
logForDebugging(`update: Running: ${npmCommand}`)
|
||||
const latestVersion = await getLatestVersion(channel)
|
||||
logForDebugging(
|
||||
`update: Latest version from npm: ${latestVersion || 'FAILED'}`,
|
||||
)
|
||||
|
||||
if (!latestVersion) {
|
||||
logForDebugging('update: Failed to get latest version from npm registry')
|
||||
process.stderr.write(chalk.red('Failed to check for updates') + '\n')
|
||||
process.stderr.write('Unable to fetch latest version from npm registry\n')
|
||||
process.stderr.write('\n')
|
||||
process.stderr.write('Possible causes:\n')
|
||||
process.stderr.write(' • Network connectivity issues\n')
|
||||
process.stderr.write(' • npm registry is unreachable\n')
|
||||
process.stderr.write(' • Corporate proxy/firewall blocking npm\n')
|
||||
if (MACRO.PACKAGE_URL && !MACRO.PACKAGE_URL.startsWith('@anthropic')) {
|
||||
process.stderr.write(
|
||||
' • Internal/development build not published to npm\n',
|
||||
)
|
||||
}
|
||||
process.stderr.write('\n')
|
||||
process.stderr.write('Try:\n')
|
||||
process.stderr.write(' • Check your internet connection\n')
|
||||
process.stderr.write(' • Run with --debug flag for more details\n')
|
||||
const packageName =
|
||||
MACRO.PACKAGE_URL ||
|
||||
(process.env.USER_TYPE === 'ant'
|
||||
? '@anthropic-ai/claude-cli'
|
||||
: '@anthropic-ai/claude-code')
|
||||
process.stderr.write(
|
||||
` • Manually check: npm view ${packageName} version\n`,
|
||||
)
|
||||
|
||||
process.stderr.write(' • Check if you need to login: npm whoami\n')
|
||||
await gracefulShutdown(1)
|
||||
}
|
||||
|
||||
// Check if versions match exactly, including any build metadata (like SHA)
|
||||
if (latestVersion === MACRO.VERSION) {
|
||||
writeToStdout(
|
||||
chalk.green(`Claude Code is up to date (${MACRO.VERSION})`) + '\n',
|
||||
)
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
|
||||
writeToStdout(
|
||||
`New version available: ${latestVersion} (current: ${MACRO.VERSION})\n`,
|
||||
)
|
||||
writeToStdout('Installing update...\n')
|
||||
|
||||
// Determine update method based on what's actually running
|
||||
let useLocalUpdate = false
|
||||
let updateMethodName = ''
|
||||
|
||||
switch (diagnostic.installationType) {
|
||||
case 'npm-local':
|
||||
useLocalUpdate = true
|
||||
updateMethodName = 'local'
|
||||
break
|
||||
case 'npm-global':
|
||||
useLocalUpdate = false
|
||||
updateMethodName = 'global'
|
||||
break
|
||||
case 'unknown': {
|
||||
// Fallback to detection if we can't determine installation type
|
||||
const isLocal = await localInstallationExists()
|
||||
useLocalUpdate = isLocal
|
||||
updateMethodName = isLocal ? 'local' : 'global'
|
||||
writeToStdout(
|
||||
chalk.yellow('Warning: Could not determine installation type') + '\n',
|
||||
)
|
||||
writeToStdout(
|
||||
`Attempting ${updateMethodName} update based on file detection...\n`,
|
||||
)
|
||||
break
|
||||
}
|
||||
default:
|
||||
process.stderr.write(
|
||||
`Error: Cannot update ${diagnostic.installationType} installation\n`,
|
||||
)
|
||||
await gracefulShutdown(1)
|
||||
}
|
||||
|
||||
writeToStdout(`Using ${updateMethodName} installation update method...\n`)
|
||||
|
||||
logForDebugging(`update: Update method determined: ${updateMethodName}`)
|
||||
logForDebugging(`update: useLocalUpdate: ${useLocalUpdate}`)
|
||||
|
||||
let status: InstallStatus
|
||||
|
||||
if (useLocalUpdate) {
|
||||
logForDebugging(
|
||||
'update: Calling installOrUpdateClaudePackage() for local update',
|
||||
)
|
||||
status = await installOrUpdateClaudePackage(channel)
|
||||
} else {
|
||||
logForDebugging('update: Calling installGlobalPackage() for global update')
|
||||
status = await installGlobalPackage()
|
||||
}
|
||||
|
||||
logForDebugging(`update: Installation status: ${status}`)
|
||||
|
||||
switch (status) {
|
||||
case 'success':
|
||||
writeToStdout(
|
||||
chalk.green(
|
||||
`Successfully updated from ${MACRO.VERSION} to version ${latestVersion}`,
|
||||
) + '\n',
|
||||
)
|
||||
await regenerateCompletionCache()
|
||||
break
|
||||
case 'no_permissions':
|
||||
process.stderr.write(
|
||||
'Error: Insufficient permissions to install update\n',
|
||||
)
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write('Try running with sudo or fix npm permissions\n')
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: claude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
break
|
||||
case 'install_failed':
|
||||
process.stderr.write('Error: Failed to install update\n')
|
||||
if (useLocalUpdate) {
|
||||
process.stderr.write('Try manually updating with:\n')
|
||||
process.stderr.write(
|
||||
` cd ~/.claude/local && npm update ${MACRO.PACKAGE_URL}\n`,
|
||||
)
|
||||
} else {
|
||||
process.stderr.write(
|
||||
'Or consider using native installation with: claude install\n',
|
||||
)
|
||||
}
|
||||
await gracefulShutdown(1)
|
||||
break
|
||||
case 'in_progress':
|
||||
process.stderr.write(
|
||||
'Error: Another instance is currently performing an update\n',
|
||||
)
|
||||
process.stderr.write('Please wait and try again later\n')
|
||||
await gracefulShutdown(1)
|
||||
break
|
||||
}
|
||||
await gracefulShutdown(0)
|
||||
}
|
||||
754
src/commands.ts
Normal file
754
src/commands.ts
Normal file
@ -0,0 +1,754 @@
|
||||
// biome-ignore-all assist/source/organizeImports: ANT-ONLY import markers must not be reordered
|
||||
import addDir from './commands/add-dir/index.js'
|
||||
import autofixPr from './commands/autofix-pr/index.js'
|
||||
import backfillSessions from './commands/backfill-sessions/index.js'
|
||||
import btw from './commands/btw/index.js'
|
||||
import goodClaude from './commands/good-claude/index.js'
|
||||
import issue from './commands/issue/index.js'
|
||||
import feedback from './commands/feedback/index.js'
|
||||
import clear from './commands/clear/index.js'
|
||||
import color from './commands/color/index.js'
|
||||
import commit from './commands/commit.js'
|
||||
import copy from './commands/copy/index.js'
|
||||
import desktop from './commands/desktop/index.js'
|
||||
import commitPushPr from './commands/commit-push-pr.js'
|
||||
import compact from './commands/compact/index.js'
|
||||
import config from './commands/config/index.js'
|
||||
import { context, contextNonInteractive } from './commands/context/index.js'
|
||||
import cost from './commands/cost/index.js'
|
||||
import diff from './commands/diff/index.js'
|
||||
import ctx_viz from './commands/ctx_viz/index.js'
|
||||
import doctor from './commands/doctor/index.js'
|
||||
import memory from './commands/memory/index.js'
|
||||
import help from './commands/help/index.js'
|
||||
import ide from './commands/ide/index.js'
|
||||
import init from './commands/init.js'
|
||||
import initVerifiers from './commands/init-verifiers.js'
|
||||
import keybindings from './commands/keybindings/index.js'
|
||||
import login from './commands/login/index.js'
|
||||
import logout from './commands/logout/index.js'
|
||||
import installGitHubApp from './commands/install-github-app/index.js'
|
||||
import installSlackApp from './commands/install-slack-app/index.js'
|
||||
import breakCache from './commands/break-cache/index.js'
|
||||
import mcp from './commands/mcp/index.js'
|
||||
import mobile from './commands/mobile/index.js'
|
||||
import onboarding from './commands/onboarding/index.js'
|
||||
import pr_comments from './commands/pr_comments/index.js'
|
||||
import releaseNotes from './commands/release-notes/index.js'
|
||||
import rename from './commands/rename/index.js'
|
||||
import resume from './commands/resume/index.js'
|
||||
import review, { ultrareview } from './commands/review.js'
|
||||
import session from './commands/session/index.js'
|
||||
import share from './commands/share/index.js'
|
||||
import skills from './commands/skills/index.js'
|
||||
import status from './commands/status/index.js'
|
||||
import tasks from './commands/tasks/index.js'
|
||||
import teleport from './commands/teleport/index.js'
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const agentsPlatform =
|
||||
process.env.USER_TYPE === 'ant'
|
||||
? require('./commands/agents-platform/index.js').default
|
||||
: null
|
||||
/* eslint-enable @typescript-eslint/no-require-imports */
|
||||
import securityReview from './commands/security-review.js'
|
||||
import bughunter from './commands/bughunter/index.js'
|
||||
import terminalSetup from './commands/terminalSetup/index.js'
|
||||
import usage from './commands/usage/index.js'
|
||||
import theme from './commands/theme/index.js'
|
||||
import vim from './commands/vim/index.js'
|
||||
import { feature } from 'bun:bundle'
|
||||
// Dead code elimination: conditional imports
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const proactive =
|
||||
feature('PROACTIVE') || feature('KAIROS')
|
||||
? require('./commands/proactive.js').default
|
||||
: null
|
||||
const briefCommand =
|
||||
feature('KAIROS') || feature('KAIROS_BRIEF')
|
||||
? require('./commands/brief.js').default
|
||||
: null
|
||||
const assistantCommand = feature('KAIROS')
|
||||
? require('./commands/assistant/index.js').default
|
||||
: null
|
||||
const bridge = feature('BRIDGE_MODE')
|
||||
? require('./commands/bridge/index.js').default
|
||||
: null
|
||||
const remoteControlServerCommand =
|
||||
feature('DAEMON') && feature('BRIDGE_MODE')
|
||||
? require('./commands/remoteControlServer/index.js').default
|
||||
: null
|
||||
const voiceCommand = feature('VOICE_MODE')
|
||||
? require('./commands/voice/index.js').default
|
||||
: null
|
||||
const forceSnip = feature('HISTORY_SNIP')
|
||||
? require('./commands/force-snip.js').default
|
||||
: null
|
||||
const workflowsCmd = feature('WORKFLOW_SCRIPTS')
|
||||
? (
|
||||
require('./commands/workflows/index.js') as typeof import('./commands/workflows/index.js')
|
||||
).default
|
||||
: null
|
||||
const webCmd = feature('CCR_REMOTE_SETUP')
|
||||
? (
|
||||
require('./commands/remote-setup/index.js') as typeof import('./commands/remote-setup/index.js')
|
||||
).default
|
||||
: null
|
||||
const clearSkillIndexCache = feature('EXPERIMENTAL_SKILL_SEARCH')
|
||||
? (
|
||||
require('./services/skillSearch/localSearch.js') as typeof import('./services/skillSearch/localSearch.js')
|
||||
).clearSkillIndexCache
|
||||
: null
|
||||
const subscribePr = feature('KAIROS_GITHUB_WEBHOOKS')
|
||||
? require('./commands/subscribe-pr.js').default
|
||||
: null
|
||||
const ultraplan = feature('ULTRAPLAN')
|
||||
? require('./commands/ultraplan.js').default
|
||||
: null
|
||||
const torch = feature('TORCH') ? require('./commands/torch.js').default : null
|
||||
const peersCmd = feature('UDS_INBOX')
|
||||
? (
|
||||
require('./commands/peers/index.js') as typeof import('./commands/peers/index.js')
|
||||
).default
|
||||
: null
|
||||
const forkCmd = feature('FORK_SUBAGENT')
|
||||
? (
|
||||
require('./commands/fork/index.js') as typeof import('./commands/fork/index.js')
|
||||
).default
|
||||
: null
|
||||
const buddy = feature('BUDDY')
|
||||
? (
|
||||
require('./commands/buddy/index.js') as typeof import('./commands/buddy/index.js')
|
||||
).default
|
||||
: null
|
||||
/* eslint-enable @typescript-eslint/no-require-imports */
|
||||
import thinkback from './commands/thinkback/index.js'
|
||||
import thinkbackPlay from './commands/thinkback-play/index.js'
|
||||
import permissions from './commands/permissions/index.js'
|
||||
import plan from './commands/plan/index.js'
|
||||
import fast from './commands/fast/index.js'
|
||||
import passes from './commands/passes/index.js'
|
||||
import privacySettings from './commands/privacy-settings/index.js'
|
||||
import hooks from './commands/hooks/index.js'
|
||||
import files from './commands/files/index.js'
|
||||
import branch from './commands/branch/index.js'
|
||||
import agents from './commands/agents/index.js'
|
||||
import plugin from './commands/plugin/index.js'
|
||||
import reloadPlugins from './commands/reload-plugins/index.js'
|
||||
import rewind from './commands/rewind/index.js'
|
||||
import heapDump from './commands/heapdump/index.js'
|
||||
import mockLimits from './commands/mock-limits/index.js'
|
||||
import bridgeKick from './commands/bridge-kick.js'
|
||||
import version from './commands/version.js'
|
||||
import summary from './commands/summary/index.js'
|
||||
import {
|
||||
resetLimits,
|
||||
resetLimitsNonInteractive,
|
||||
} from './commands/reset-limits/index.js'
|
||||
import antTrace from './commands/ant-trace/index.js'
|
||||
import perfIssue from './commands/perf-issue/index.js'
|
||||
import sandboxToggle from './commands/sandbox-toggle/index.js'
|
||||
import chrome from './commands/chrome/index.js'
|
||||
import stickers from './commands/stickers/index.js'
|
||||
import advisor from './commands/advisor.js'
|
||||
import { logError } from './utils/log.js'
|
||||
import { toError } from './utils/errors.js'
|
||||
import { logForDebugging } from './utils/debug.js'
|
||||
import {
|
||||
getSkillDirCommands,
|
||||
clearSkillCaches,
|
||||
getDynamicSkills,
|
||||
} from './skills/loadSkillsDir.js'
|
||||
import { getBundledSkills } from './skills/bundledSkills.js'
|
||||
import { getBuiltinPluginSkillCommands } from './plugins/builtinPlugins.js'
|
||||
import {
|
||||
getPluginCommands,
|
||||
clearPluginCommandCache,
|
||||
getPluginSkills,
|
||||
clearPluginSkillsCache,
|
||||
} from './utils/plugins/loadPluginCommands.js'
|
||||
import memoize from 'lodash-es/memoize.js'
|
||||
import { isUsing3PServices, isClaudeAISubscriber } from './utils/auth.js'
|
||||
import { isFirstPartyAnthropicBaseUrl } from './utils/model/providers.js'
|
||||
import env from './commands/env/index.js'
|
||||
import exit from './commands/exit/index.js'
|
||||
import exportCommand from './commands/export/index.js'
|
||||
import model from './commands/model/index.js'
|
||||
import tag from './commands/tag/index.js'
|
||||
import outputStyle from './commands/output-style/index.js'
|
||||
import remoteEnv from './commands/remote-env/index.js'
|
||||
import upgrade from './commands/upgrade/index.js'
|
||||
import {
|
||||
extraUsage,
|
||||
extraUsageNonInteractive,
|
||||
} from './commands/extra-usage/index.js'
|
||||
import rateLimitOptions from './commands/rate-limit-options/index.js'
|
||||
import statusline from './commands/statusline.js'
|
||||
import effort from './commands/effort/index.js'
|
||||
import stats from './commands/stats/index.js'
|
||||
// insights.ts is 113KB (3200 lines, includes diffLines/html rendering). Lazy
|
||||
// shim defers the heavy module until /insights is actually invoked.
|
||||
const usageReport: Command = {
|
||||
type: 'prompt',
|
||||
name: 'insights',
|
||||
description: 'Generate a report analyzing your Claude Code sessions',
|
||||
contentLength: 0,
|
||||
progressMessage: 'analyzing your sessions',
|
||||
source: 'builtin',
|
||||
async getPromptForCommand(args, context) {
|
||||
const real = (await import('./commands/insights.js')).default
|
||||
if (real.type !== 'prompt') throw new Error('unreachable')
|
||||
return real.getPromptForCommand(args, context)
|
||||
},
|
||||
}
|
||||
import oauthRefresh from './commands/oauth-refresh/index.js'
|
||||
import debugToolCall from './commands/debug-tool-call/index.js'
|
||||
import { getSettingSourceName } from './utils/settings/constants.js'
|
||||
import {
|
||||
type Command,
|
||||
getCommandName,
|
||||
isCommandEnabled,
|
||||
} from './types/command.js'
|
||||
|
||||
// Re-export types from the centralized location
|
||||
export type {
|
||||
Command,
|
||||
CommandBase,
|
||||
CommandResultDisplay,
|
||||
LocalCommandResult,
|
||||
LocalJSXCommandContext,
|
||||
PromptCommand,
|
||||
ResumeEntrypoint,
|
||||
} from './types/command.js'
|
||||
export { getCommandName, isCommandEnabled } from './types/command.js'
|
||||
|
||||
// Commands that get eliminated from the external build
|
||||
export const INTERNAL_ONLY_COMMANDS = [
|
||||
backfillSessions,
|
||||
breakCache,
|
||||
bughunter,
|
||||
commit,
|
||||
commitPushPr,
|
||||
ctx_viz,
|
||||
goodClaude,
|
||||
issue,
|
||||
initVerifiers,
|
||||
...(forceSnip ? [forceSnip] : []),
|
||||
mockLimits,
|
||||
bridgeKick,
|
||||
version,
|
||||
...(ultraplan ? [ultraplan] : []),
|
||||
...(subscribePr ? [subscribePr] : []),
|
||||
resetLimits,
|
||||
resetLimitsNonInteractive,
|
||||
onboarding,
|
||||
share,
|
||||
summary,
|
||||
teleport,
|
||||
antTrace,
|
||||
perfIssue,
|
||||
env,
|
||||
oauthRefresh,
|
||||
debugToolCall,
|
||||
agentsPlatform,
|
||||
autofixPr,
|
||||
].filter(Boolean)
|
||||
|
||||
// Declared as a function so that we don't run this until getCommands is called,
|
||||
// since underlying functions read from config, which can't be read at module initialization time
|
||||
const COMMANDS = memoize((): Command[] => [
|
||||
addDir,
|
||||
advisor,
|
||||
agents,
|
||||
branch,
|
||||
btw,
|
||||
chrome,
|
||||
clear,
|
||||
color,
|
||||
compact,
|
||||
config,
|
||||
copy,
|
||||
desktop,
|
||||
context,
|
||||
contextNonInteractive,
|
||||
cost,
|
||||
diff,
|
||||
doctor,
|
||||
effort,
|
||||
exit,
|
||||
fast,
|
||||
files,
|
||||
heapDump,
|
||||
help,
|
||||
ide,
|
||||
init,
|
||||
keybindings,
|
||||
installGitHubApp,
|
||||
installSlackApp,
|
||||
mcp,
|
||||
memory,
|
||||
mobile,
|
||||
model,
|
||||
outputStyle,
|
||||
remoteEnv,
|
||||
plugin,
|
||||
pr_comments,
|
||||
releaseNotes,
|
||||
reloadPlugins,
|
||||
rename,
|
||||
resume,
|
||||
session,
|
||||
skills,
|
||||
stats,
|
||||
status,
|
||||
statusline,
|
||||
stickers,
|
||||
tag,
|
||||
theme,
|
||||
feedback,
|
||||
review,
|
||||
ultrareview,
|
||||
rewind,
|
||||
securityReview,
|
||||
terminalSetup,
|
||||
upgrade,
|
||||
extraUsage,
|
||||
extraUsageNonInteractive,
|
||||
rateLimitOptions,
|
||||
usage,
|
||||
usageReport,
|
||||
vim,
|
||||
...(webCmd ? [webCmd] : []),
|
||||
...(forkCmd ? [forkCmd] : []),
|
||||
...(buddy ? [buddy] : []),
|
||||
...(proactive ? [proactive] : []),
|
||||
...(briefCommand ? [briefCommand] : []),
|
||||
...(assistantCommand ? [assistantCommand] : []),
|
||||
...(bridge ? [bridge] : []),
|
||||
...(remoteControlServerCommand ? [remoteControlServerCommand] : []),
|
||||
...(voiceCommand ? [voiceCommand] : []),
|
||||
thinkback,
|
||||
thinkbackPlay,
|
||||
permissions,
|
||||
plan,
|
||||
privacySettings,
|
||||
hooks,
|
||||
exportCommand,
|
||||
sandboxToggle,
|
||||
...(!isUsing3PServices() ? [logout, login()] : []),
|
||||
passes,
|
||||
...(peersCmd ? [peersCmd] : []),
|
||||
tasks,
|
||||
...(workflowsCmd ? [workflowsCmd] : []),
|
||||
...(torch ? [torch] : []),
|
||||
...(process.env.USER_TYPE === 'ant' && !process.env.IS_DEMO
|
||||
? INTERNAL_ONLY_COMMANDS
|
||||
: []),
|
||||
])
|
||||
|
||||
export const builtInCommandNames = memoize(
|
||||
(): Set<string> =>
|
||||
new Set(COMMANDS().flatMap(_ => [_.name, ...(_.aliases ?? [])])),
|
||||
)
|
||||
|
||||
async function getSkills(cwd: string): Promise<{
|
||||
skillDirCommands: Command[]
|
||||
pluginSkills: Command[]
|
||||
bundledSkills: Command[]
|
||||
builtinPluginSkills: Command[]
|
||||
}> {
|
||||
try {
|
||||
const [skillDirCommands, pluginSkills] = await Promise.all([
|
||||
getSkillDirCommands(cwd).catch(err => {
|
||||
logError(toError(err))
|
||||
logForDebugging(
|
||||
'Skill directory commands failed to load, continuing without them',
|
||||
)
|
||||
return []
|
||||
}),
|
||||
getPluginSkills().catch(err => {
|
||||
logError(toError(err))
|
||||
logForDebugging('Plugin skills failed to load, continuing without them')
|
||||
return []
|
||||
}),
|
||||
])
|
||||
// Bundled skills are registered synchronously at startup
|
||||
const bundledSkills = getBundledSkills()
|
||||
// Built-in plugin skills come from enabled built-in plugins
|
||||
const builtinPluginSkills = getBuiltinPluginSkillCommands()
|
||||
logForDebugging(
|
||||
`getSkills returning: ${skillDirCommands.length} skill dir commands, ${pluginSkills.length} plugin skills, ${bundledSkills.length} bundled skills, ${builtinPluginSkills.length} builtin plugin skills`,
|
||||
)
|
||||
return {
|
||||
skillDirCommands,
|
||||
pluginSkills,
|
||||
bundledSkills,
|
||||
builtinPluginSkills,
|
||||
}
|
||||
} catch (err) {
|
||||
// This should never happen since we catch at the Promise level, but defensive
|
||||
logError(toError(err))
|
||||
logForDebugging('Unexpected error in getSkills, returning empty')
|
||||
return {
|
||||
skillDirCommands: [],
|
||||
pluginSkills: [],
|
||||
bundledSkills: [],
|
||||
builtinPluginSkills: [],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* eslint-disable @typescript-eslint/no-require-imports */
|
||||
const getWorkflowCommands = feature('WORKFLOW_SCRIPTS')
|
||||
? (
|
||||
require('./tools/WorkflowTool/createWorkflowCommand.js') as typeof import('./tools/WorkflowTool/createWorkflowCommand.js')
|
||||
).getWorkflowCommands
|
||||
: null
|
||||
/* eslint-enable @typescript-eslint/no-require-imports */
|
||||
|
||||
/**
|
||||
* Filters commands by their declared `availability` (auth/provider requirement).
|
||||
* Commands without `availability` are treated as universal.
|
||||
* This runs before `isEnabled()` so that provider-gated commands are hidden
|
||||
* regardless of feature-flag state.
|
||||
*
|
||||
* Not memoized — auth state can change mid-session (e.g. after /login),
|
||||
* so this must be re-evaluated on every getCommands() call.
|
||||
*/
|
||||
export function meetsAvailabilityRequirement(cmd: Command): boolean {
|
||||
if (!cmd.availability) return true
|
||||
for (const a of cmd.availability) {
|
||||
switch (a) {
|
||||
case 'claude-ai':
|
||||
if (isClaudeAISubscriber()) return true
|
||||
break
|
||||
case 'console':
|
||||
// Console API key user = direct 1P API customer (not 3P, not claude.ai).
|
||||
// Excludes 3P (Bedrock/Vertex/Foundry) who don't set ANTHROPIC_BASE_URL
|
||||
// and gateway users who proxy through a custom base URL.
|
||||
if (
|
||||
!isClaudeAISubscriber() &&
|
||||
!isUsing3PServices() &&
|
||||
isFirstPartyAnthropicBaseUrl()
|
||||
)
|
||||
return true
|
||||
break
|
||||
default: {
|
||||
const _exhaustive: never = a
|
||||
void _exhaustive
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads all command sources (skills, plugins, workflows). Memoized by cwd
|
||||
* because loading is expensive (disk I/O, dynamic imports).
|
||||
*/
|
||||
const loadAllCommands = memoize(async (cwd: string): Promise<Command[]> => {
|
||||
const [
|
||||
{ skillDirCommands, pluginSkills, bundledSkills, builtinPluginSkills },
|
||||
pluginCommands,
|
||||
workflowCommands,
|
||||
] = await Promise.all([
|
||||
getSkills(cwd),
|
||||
getPluginCommands(),
|
||||
getWorkflowCommands ? getWorkflowCommands(cwd) : Promise.resolve([]),
|
||||
])
|
||||
|
||||
return [
|
||||
...bundledSkills,
|
||||
...builtinPluginSkills,
|
||||
...skillDirCommands,
|
||||
...workflowCommands,
|
||||
...pluginCommands,
|
||||
...pluginSkills,
|
||||
...COMMANDS(),
|
||||
]
|
||||
})
|
||||
|
||||
/**
|
||||
* Returns commands available to the current user. The expensive loading is
|
||||
* memoized, but availability and isEnabled checks run fresh every call so
|
||||
* auth changes (e.g. /login) take effect immediately.
|
||||
*/
|
||||
export async function getCommands(cwd: string): Promise<Command[]> {
|
||||
const allCommands = await loadAllCommands(cwd)
|
||||
|
||||
// Get dynamic skills discovered during file operations
|
||||
const dynamicSkills = getDynamicSkills()
|
||||
|
||||
// Build base commands without dynamic skills
|
||||
const baseCommands = allCommands.filter(
|
||||
_ => meetsAvailabilityRequirement(_) && isCommandEnabled(_),
|
||||
)
|
||||
|
||||
if (dynamicSkills.length === 0) {
|
||||
return baseCommands
|
||||
}
|
||||
|
||||
// Dedupe dynamic skills - only add if not already present
|
||||
const baseCommandNames = new Set(baseCommands.map(c => c.name))
|
||||
const uniqueDynamicSkills = dynamicSkills.filter(
|
||||
s =>
|
||||
!baseCommandNames.has(s.name) &&
|
||||
meetsAvailabilityRequirement(s) &&
|
||||
isCommandEnabled(s),
|
||||
)
|
||||
|
||||
if (uniqueDynamicSkills.length === 0) {
|
||||
return baseCommands
|
||||
}
|
||||
|
||||
// Insert dynamic skills after plugin skills but before built-in commands
|
||||
const builtInNames = new Set(COMMANDS().map(c => c.name))
|
||||
const insertIndex = baseCommands.findIndex(c => builtInNames.has(c.name))
|
||||
|
||||
if (insertIndex === -1) {
|
||||
return [...baseCommands, ...uniqueDynamicSkills]
|
||||
}
|
||||
|
||||
return [
|
||||
...baseCommands.slice(0, insertIndex),
|
||||
...uniqueDynamicSkills,
|
||||
...baseCommands.slice(insertIndex),
|
||||
]
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears only the memoization caches for commands, WITHOUT clearing skill caches.
|
||||
* Use this when dynamic skills are added to invalidate cached command lists.
|
||||
*/
|
||||
export function clearCommandMemoizationCaches(): void {
|
||||
loadAllCommands.cache?.clear?.()
|
||||
getSkillToolCommands.cache?.clear?.()
|
||||
getSlashCommandToolSkills.cache?.clear?.()
|
||||
// getSkillIndex in skillSearch/localSearch.ts is a separate memoization layer
|
||||
// built ON TOP of getSkillToolCommands/getCommands. Clearing only the inner
|
||||
// caches is a no-op for the outer — lodash memoize returns the cached result
|
||||
// without ever reaching the cleared inners. Must clear it explicitly.
|
||||
clearSkillIndexCache?.()
|
||||
}
|
||||
|
||||
export function clearCommandsCache(): void {
|
||||
clearCommandMemoizationCaches()
|
||||
clearPluginCommandCache()
|
||||
clearPluginSkillsCache()
|
||||
clearSkillCaches()
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter AppState.mcp.commands to MCP-provided skills (prompt-type,
|
||||
* model-invocable, loaded from MCP). These live outside getCommands() so
|
||||
* callers that need MCP skills in their skill index thread them through
|
||||
* separately.
|
||||
*/
|
||||
export function getMcpSkillCommands(
|
||||
mcpCommands: readonly Command[],
|
||||
): readonly Command[] {
|
||||
if (feature('MCP_SKILLS')) {
|
||||
return mcpCommands.filter(
|
||||
cmd =>
|
||||
cmd.type === 'prompt' &&
|
||||
cmd.loadedFrom === 'mcp' &&
|
||||
!cmd.disableModelInvocation,
|
||||
)
|
||||
}
|
||||
return []
|
||||
}
|
||||
|
||||
// SkillTool shows ALL prompt-based commands that the model can invoke
|
||||
// This includes both skills (from /skills/) and commands (from /commands/)
|
||||
export const getSkillToolCommands = memoize(
|
||||
async (cwd: string): Promise<Command[]> => {
|
||||
const allCommands = await getCommands(cwd)
|
||||
return allCommands.filter(
|
||||
cmd =>
|
||||
cmd.type === 'prompt' &&
|
||||
!cmd.disableModelInvocation &&
|
||||
cmd.source !== 'builtin' &&
|
||||
// Always include skills from /skills/ dirs, bundled skills, and legacy /commands/ entries
|
||||
// (they all get an auto-derived description from the first line if frontmatter is missing).
|
||||
// Plugin/MCP commands still require an explicit description to appear in the listing.
|
||||
(cmd.loadedFrom === 'bundled' ||
|
||||
cmd.loadedFrom === 'skills' ||
|
||||
cmd.loadedFrom === 'commands_DEPRECATED' ||
|
||||
cmd.hasUserSpecifiedDescription ||
|
||||
cmd.whenToUse),
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
// Filters commands to include only skills. Skills are commands that provide
|
||||
// specialized capabilities for the model to use. They are identified by
|
||||
// loadedFrom being 'skills', 'plugin', or 'bundled', or having disableModelInvocation set.
|
||||
export const getSlashCommandToolSkills = memoize(
|
||||
async (cwd: string): Promise<Command[]> => {
|
||||
try {
|
||||
const allCommands = await getCommands(cwd)
|
||||
return allCommands.filter(
|
||||
cmd =>
|
||||
cmd.type === 'prompt' &&
|
||||
cmd.source !== 'builtin' &&
|
||||
(cmd.hasUserSpecifiedDescription || cmd.whenToUse) &&
|
||||
(cmd.loadedFrom === 'skills' ||
|
||||
cmd.loadedFrom === 'plugin' ||
|
||||
cmd.loadedFrom === 'bundled' ||
|
||||
cmd.disableModelInvocation),
|
||||
)
|
||||
} catch (error) {
|
||||
logError(toError(error))
|
||||
// Return empty array rather than throwing - skills are non-critical
|
||||
// This prevents skill loading failures from breaking the entire system
|
||||
logForDebugging('Returning empty skills array due to load failure')
|
||||
return []
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
/**
|
||||
* Commands that are safe to use in remote mode (--remote).
|
||||
* These only affect local TUI state and don't depend on local filesystem,
|
||||
* git, shell, IDE, MCP, or other local execution context.
|
||||
*
|
||||
* Used in two places:
|
||||
* 1. Pre-filtering commands in main.tsx before REPL renders (prevents race with CCR init)
|
||||
* 2. Preserving local-only commands in REPL's handleRemoteInit after CCR filters
|
||||
*/
|
||||
export const REMOTE_SAFE_COMMANDS: Set<Command> = new Set([
|
||||
session, // Shows QR code / URL for remote session
|
||||
exit, // Exit the TUI
|
||||
clear, // Clear screen
|
||||
help, // Show help
|
||||
theme, // Change terminal theme
|
||||
color, // Change agent color
|
||||
vim, // Toggle vim mode
|
||||
cost, // Show session cost (local cost tracking)
|
||||
usage, // Show usage info
|
||||
copy, // Copy last message
|
||||
btw, // Quick note
|
||||
feedback, // Send feedback
|
||||
plan, // Plan mode toggle
|
||||
keybindings, // Keybinding management
|
||||
statusline, // Status line toggle
|
||||
stickers, // Stickers
|
||||
mobile, // Mobile QR code
|
||||
])
|
||||
|
||||
/**
|
||||
* Builtin commands of type 'local' that ARE safe to execute when received
|
||||
* over the Remote Control bridge. These produce text output that streams
|
||||
* back to the mobile/web client and have no terminal-only side effects.
|
||||
*
|
||||
* 'local-jsx' commands are blocked by type (they render Ink UI) and
|
||||
* 'prompt' commands are allowed by type (they expand to text sent to the
|
||||
* model) — this set only gates 'local' commands.
|
||||
*
|
||||
* When adding a new 'local' command that should work from mobile, add it
|
||||
* here. Default is blocked.
|
||||
*/
|
||||
export const BRIDGE_SAFE_COMMANDS: Set<Command> = new Set(
|
||||
[
|
||||
compact, // Shrink context — useful mid-session from a phone
|
||||
clear, // Wipe transcript
|
||||
cost, // Show session cost
|
||||
summary, // Summarize conversation
|
||||
releaseNotes, // Show changelog
|
||||
files, // List tracked files
|
||||
].filter((c): c is Command => c !== null),
|
||||
)
|
||||
|
||||
/**
|
||||
* Whether a slash command is safe to execute when its input arrived over the
|
||||
* Remote Control bridge (mobile/web client).
|
||||
*
|
||||
* PR #19134 blanket-blocked all slash commands from bridge inbound because
|
||||
* `/model` from iOS was popping the local Ink picker. This predicate relaxes
|
||||
* that with an explicit allowlist: 'prompt' commands (skills) expand to text
|
||||
* and are safe by construction; 'local' commands need an explicit opt-in via
|
||||
* BRIDGE_SAFE_COMMANDS; 'local-jsx' commands render Ink UI and stay blocked.
|
||||
*/
|
||||
export function isBridgeSafeCommand(cmd: Command): boolean {
|
||||
if (cmd.type === 'local-jsx') return false
|
||||
if (cmd.type === 'prompt') return true
|
||||
return BRIDGE_SAFE_COMMANDS.has(cmd)
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter commands to only include those safe for remote mode.
|
||||
* Used to pre-filter commands when rendering the REPL in --remote mode,
|
||||
* preventing local-only commands from being briefly available before
|
||||
* the CCR init message arrives.
|
||||
*/
|
||||
export function filterCommandsForRemoteMode(commands: Command[]): Command[] {
|
||||
return commands.filter(cmd => REMOTE_SAFE_COMMANDS.has(cmd))
|
||||
}
|
||||
|
||||
export function findCommand(
|
||||
commandName: string,
|
||||
commands: Command[],
|
||||
): Command | undefined {
|
||||
return commands.find(
|
||||
_ =>
|
||||
_.name === commandName ||
|
||||
getCommandName(_) === commandName ||
|
||||
_.aliases?.includes(commandName),
|
||||
)
|
||||
}
|
||||
|
||||
export function hasCommand(commandName: string, commands: Command[]): boolean {
|
||||
return findCommand(commandName, commands) !== undefined
|
||||
}
|
||||
|
||||
export function getCommand(commandName: string, commands: Command[]): Command {
|
||||
const command = findCommand(commandName, commands)
|
||||
if (!command) {
|
||||
throw ReferenceError(
|
||||
`Command ${commandName} not found. Available commands: ${commands
|
||||
.map(_ => {
|
||||
const name = getCommandName(_)
|
||||
return _.aliases ? `${name} (aliases: ${_.aliases.join(', ')})` : name
|
||||
})
|
||||
.sort((a, b) => a.localeCompare(b))
|
||||
.join(', ')}`,
|
||||
)
|
||||
}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats a command's description with its source annotation for user-facing UI.
|
||||
* Use this in typeahead, help screens, and other places where users need to see
|
||||
* where a command comes from.
|
||||
*
|
||||
* For model-facing prompts (like SkillTool), use cmd.description directly.
|
||||
*/
|
||||
export function formatDescriptionWithSource(cmd: Command): string {
|
||||
if (cmd.type !== 'prompt') {
|
||||
return cmd.description
|
||||
}
|
||||
|
||||
if (cmd.kind === 'workflow') {
|
||||
return `${cmd.description} (workflow)`
|
||||
}
|
||||
|
||||
if (cmd.source === 'plugin') {
|
||||
const pluginName = cmd.pluginInfo?.pluginManifest.name
|
||||
if (pluginName) {
|
||||
return `(${pluginName}) ${cmd.description}`
|
||||
}
|
||||
return `${cmd.description} (plugin)`
|
||||
}
|
||||
|
||||
if (cmd.source === 'builtin' || cmd.source === 'mcp') {
|
||||
return cmd.description
|
||||
}
|
||||
|
||||
if (cmd.source === 'bundled') {
|
||||
return `${cmd.description} (bundled)`
|
||||
}
|
||||
|
||||
return `${cmd.description} (${getSettingSourceName(cmd.source)})`
|
||||
}
|
||||
126
src/commands/add-dir/add-dir.tsx
Normal file
126
src/commands/add-dir/add-dir.tsx
Normal file
File diff suppressed because one or more lines are too long
11
src/commands/add-dir/index.ts
Normal file
11
src/commands/add-dir/index.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
|
||||
const addDir = {
|
||||
type: 'local-jsx',
|
||||
name: 'add-dir',
|
||||
description: 'Add a new working directory',
|
||||
argumentHint: '<path>',
|
||||
load: () => import('./add-dir.js'),
|
||||
} satisfies Command
|
||||
|
||||
export default addDir
|
||||
110
src/commands/add-dir/validation.ts
Normal file
110
src/commands/add-dir/validation.ts
Normal file
@ -0,0 +1,110 @@
|
||||
import chalk from 'chalk'
|
||||
import { stat } from 'fs/promises'
|
||||
import { dirname, resolve } from 'path'
|
||||
import type { ToolPermissionContext } from '../../Tool.js'
|
||||
import { getErrnoCode } from '../../utils/errors.js'
|
||||
import { expandPath } from '../../utils/path.js'
|
||||
import {
|
||||
allWorkingDirectories,
|
||||
pathInWorkingPath,
|
||||
} from '../../utils/permissions/filesystem.js'
|
||||
|
||||
export type AddDirectoryResult =
|
||||
| {
|
||||
resultType: 'success'
|
||||
absolutePath: string
|
||||
}
|
||||
| {
|
||||
resultType: 'emptyPath'
|
||||
}
|
||||
| {
|
||||
resultType: 'pathNotFound' | 'notADirectory'
|
||||
directoryPath: string
|
||||
absolutePath: string
|
||||
}
|
||||
| {
|
||||
resultType: 'alreadyInWorkingDirectory'
|
||||
directoryPath: string
|
||||
workingDir: string
|
||||
}
|
||||
|
||||
export async function validateDirectoryForWorkspace(
|
||||
directoryPath: string,
|
||||
permissionContext: ToolPermissionContext,
|
||||
): Promise<AddDirectoryResult> {
|
||||
if (!directoryPath) {
|
||||
return {
|
||||
resultType: 'emptyPath',
|
||||
}
|
||||
}
|
||||
|
||||
// resolve() strips the trailing slash expandPath can leave on absolute
|
||||
// inputs, so /foo and /foo/ map to the same storage key (CC-33).
|
||||
const absolutePath = resolve(expandPath(directoryPath))
|
||||
|
||||
// Check if path exists and is a directory (single syscall)
|
||||
try {
|
||||
const stats = await stat(absolutePath)
|
||||
if (!stats.isDirectory()) {
|
||||
return {
|
||||
resultType: 'notADirectory',
|
||||
directoryPath,
|
||||
absolutePath,
|
||||
}
|
||||
}
|
||||
} catch (e: unknown) {
|
||||
const code = getErrnoCode(e)
|
||||
// Match prior existsSync() semantics: treat any of these as "not found"
|
||||
// rather than re-throwing. EACCES/EPERM in particular must not crash
|
||||
// startup when a settings-configured additional directory is inaccessible.
|
||||
if (
|
||||
code === 'ENOENT' ||
|
||||
code === 'ENOTDIR' ||
|
||||
code === 'EACCES' ||
|
||||
code === 'EPERM'
|
||||
) {
|
||||
return {
|
||||
resultType: 'pathNotFound',
|
||||
directoryPath,
|
||||
absolutePath,
|
||||
}
|
||||
}
|
||||
throw e
|
||||
}
|
||||
|
||||
// Get current permission context
|
||||
const currentWorkingDirs = allWorkingDirectories(permissionContext)
|
||||
|
||||
// Check if already within an existing working directory
|
||||
for (const workingDir of currentWorkingDirs) {
|
||||
if (pathInWorkingPath(absolutePath, workingDir)) {
|
||||
return {
|
||||
resultType: 'alreadyInWorkingDirectory',
|
||||
directoryPath,
|
||||
workingDir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
resultType: 'success',
|
||||
absolutePath,
|
||||
}
|
||||
}
|
||||
|
||||
export function addDirHelpMessage(result: AddDirectoryResult): string {
|
||||
switch (result.resultType) {
|
||||
case 'emptyPath':
|
||||
return 'Please provide a directory path.'
|
||||
case 'pathNotFound':
|
||||
return `Path ${chalk.bold(result.absolutePath)} was not found.`
|
||||
case 'notADirectory': {
|
||||
const parentDir = dirname(result.absolutePath)
|
||||
return `${chalk.bold(result.directoryPath)} is not a directory. Did you mean to add the parent directory ${chalk.bold(parentDir)}?`
|
||||
}
|
||||
case 'alreadyInWorkingDirectory':
|
||||
return `${chalk.bold(result.directoryPath)} is already accessible within the existing working directory ${chalk.bold(result.workingDir)}.`
|
||||
case 'success':
|
||||
return `Added ${chalk.bold(result.absolutePath)} as a working directory.`
|
||||
}
|
||||
}
|
||||
109
src/commands/advisor.ts
Normal file
109
src/commands/advisor.ts
Normal file
@ -0,0 +1,109 @@
|
||||
import type { Command } from '../commands.js'
|
||||
import type { LocalCommandCall } from '../types/command.js'
|
||||
import {
|
||||
canUserConfigureAdvisor,
|
||||
isValidAdvisorModel,
|
||||
modelSupportsAdvisor,
|
||||
} from '../utils/advisor.js'
|
||||
import {
|
||||
getDefaultMainLoopModelSetting,
|
||||
normalizeModelStringForAPI,
|
||||
parseUserSpecifiedModel,
|
||||
} from '../utils/model/model.js'
|
||||
import { validateModel } from '../utils/model/validateModel.js'
|
||||
import { updateSettingsForSource } from '../utils/settings/settings.js'
|
||||
|
||||
const call: LocalCommandCall = async (args, context) => {
|
||||
const arg = args.trim().toLowerCase()
|
||||
const baseModel = parseUserSpecifiedModel(
|
||||
context.getAppState().mainLoopModel ?? getDefaultMainLoopModelSetting(),
|
||||
)
|
||||
|
||||
if (!arg) {
|
||||
const current = context.getAppState().advisorModel
|
||||
if (!current) {
|
||||
return {
|
||||
type: 'text',
|
||||
value:
|
||||
'Advisor: not set\nUse "/advisor <model>" to enable (e.g. "/advisor opus").',
|
||||
}
|
||||
}
|
||||
if (!modelSupportsAdvisor(baseModel)) {
|
||||
return {
|
||||
type: 'text',
|
||||
value: `Advisor: ${current} (inactive)\nThe current model (${baseModel}) does not support advisors.`,
|
||||
}
|
||||
}
|
||||
return {
|
||||
type: 'text',
|
||||
value: `Advisor: ${current}\nUse "/advisor unset" to disable or "/advisor <model>" to change.`,
|
||||
}
|
||||
}
|
||||
|
||||
if (arg === 'unset' || arg === 'off') {
|
||||
const prev = context.getAppState().advisorModel
|
||||
context.setAppState(s => {
|
||||
if (s.advisorModel === undefined) return s
|
||||
return { ...s, advisorModel: undefined }
|
||||
})
|
||||
updateSettingsForSource('userSettings', { advisorModel: undefined })
|
||||
return {
|
||||
type: 'text',
|
||||
value: prev
|
||||
? `Advisor disabled (was ${prev}).`
|
||||
: 'Advisor already unset.',
|
||||
}
|
||||
}
|
||||
|
||||
const normalizedModel = normalizeModelStringForAPI(arg)
|
||||
const resolvedModel = parseUserSpecifiedModel(arg)
|
||||
const { valid, error } = await validateModel(resolvedModel)
|
||||
if (!valid) {
|
||||
return {
|
||||
type: 'text',
|
||||
value: error
|
||||
? `Invalid advisor model: ${error}`
|
||||
: `Unknown model: ${arg} (${resolvedModel})`,
|
||||
}
|
||||
}
|
||||
|
||||
if (!isValidAdvisorModel(resolvedModel)) {
|
||||
return {
|
||||
type: 'text',
|
||||
value: `The model ${arg} (${resolvedModel}) cannot be used as an advisor`,
|
||||
}
|
||||
}
|
||||
|
||||
context.setAppState(s => {
|
||||
if (s.advisorModel === normalizedModel) return s
|
||||
return { ...s, advisorModel: normalizedModel }
|
||||
})
|
||||
updateSettingsForSource('userSettings', { advisorModel: normalizedModel })
|
||||
|
||||
if (!modelSupportsAdvisor(baseModel)) {
|
||||
return {
|
||||
type: 'text',
|
||||
value: `Advisor set to ${normalizedModel}.\nNote: Your current model (${baseModel}) does not support advisors. Switch to a supported model to use the advisor.`,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'text',
|
||||
value: `Advisor set to ${normalizedModel}.`,
|
||||
}
|
||||
}
|
||||
|
||||
const advisor = {
|
||||
type: 'local',
|
||||
name: 'advisor',
|
||||
description: 'Configure the advisor model',
|
||||
argumentHint: '[<model>|off]',
|
||||
isEnabled: () => canUserConfigureAdvisor(),
|
||||
get isHidden() {
|
||||
return !canUserConfigureAdvisor()
|
||||
},
|
||||
supportsNonInteractive: true,
|
||||
load: () => Promise.resolve({ call }),
|
||||
} satisfies Command
|
||||
|
||||
export default advisor
|
||||
13
src/commands/agents-platform/index.ts
Normal file
13
src/commands/agents-platform/index.ts
Normal file
@ -0,0 +1,13 @@
|
||||
const agentsPlatform = {
|
||||
name: 'agents-platform',
|
||||
type: 'local',
|
||||
description: 'Unavailable in restored development build.',
|
||||
supportsNonInteractive: true,
|
||||
load: async () => ({
|
||||
async call() {
|
||||
return { type: 'skip' as const }
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
export default agentsPlatform
|
||||
12
src/commands/agents/agents.tsx
Normal file
12
src/commands/agents/agents.tsx
Normal file
@ -0,0 +1,12 @@
|
||||
import * as React from 'react';
|
||||
import { AgentsMenu } from '../../components/agents/AgentsMenu.js';
|
||||
import type { ToolUseContext } from '../../Tool.js';
|
||||
import { getTools } from '../../tools.js';
|
||||
import type { LocalJSXCommandOnDone } from '../../types/command.js';
|
||||
export async function call(onDone: LocalJSXCommandOnDone, context: ToolUseContext): Promise<React.ReactNode> {
|
||||
const appState = context.getAppState();
|
||||
const permissionContext = appState.toolPermissionContext;
|
||||
const tools = getTools(permissionContext);
|
||||
return <AgentsMenu tools={tools} onExit={onDone} />;
|
||||
}
|
||||
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJuYW1lcyI6WyJSZWFjdCIsIkFnZW50c01lbnUiLCJUb29sVXNlQ29udGV4dCIsImdldFRvb2xzIiwiTG9jYWxKU1hDb21tYW5kT25Eb25lIiwiY2FsbCIsIm9uRG9uZSIsImNvbnRleHQiLCJQcm9taXNlIiwiUmVhY3ROb2RlIiwiYXBwU3RhdGUiLCJnZXRBcHBTdGF0ZSIsInBlcm1pc3Npb25Db250ZXh0IiwidG9vbFBlcm1pc3Npb25Db250ZXh0IiwidG9vbHMiXSwic291cmNlcyI6WyJhZ2VudHMudHN4Il0sInNvdXJjZXNDb250ZW50IjpbImltcG9ydCAqIGFzIFJlYWN0IGZyb20gJ3JlYWN0J1xuaW1wb3J0IHsgQWdlbnRzTWVudSB9IGZyb20gJy4uLy4uL2NvbXBvbmVudHMvYWdlbnRzL0FnZW50c01lbnUuanMnXG5pbXBvcnQgdHlwZSB7IFRvb2xVc2VDb250ZXh0IH0gZnJvbSAnLi4vLi4vVG9vbC5qcydcbmltcG9ydCB7IGdldFRvb2xzIH0gZnJvbSAnLi4vLi4vdG9vbHMuanMnXG5pbXBvcnQgdHlwZSB7IExvY2FsSlNYQ29tbWFuZE9uRG9uZSB9IGZyb20gJy4uLy4uL3R5cGVzL2NvbW1hbmQuanMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBjYWxsKFxuICBvbkRvbmU6IExvY2FsSlNYQ29tbWFuZE9uRG9uZSxcbiAgY29udGV4dDogVG9vbFVzZUNvbnRleHQsXG4pOiBQcm9taXNlPFJlYWN0LlJlYWN0Tm9kZT4ge1xuICBjb25zdCBhcHBTdGF0ZSA9IGNvbnRleHQuZ2V0QXBwU3RhdGUoKVxuICBjb25zdCBwZXJtaXNzaW9uQ29udGV4dCA9IGFwcFN0YXRlLnRvb2xQZXJtaXNzaW9uQ29udGV4dFxuICBjb25zdCB0b29scyA9IGdldFRvb2xzKHBlcm1pc3Npb25Db250ZXh0KVxuXG4gIHJldHVybiA8QWdlbnRzTWVudSB0b29scz17dG9vbHN9IG9uRXhpdD17b25Eb25lfSAvPlxufVxuIl0sIm1hcHBpbmdzIjoiQUFBQSxPQUFPLEtBQUtBLEtBQUssTUFBTSxPQUFPO0FBQzlCLFNBQVNDLFVBQVUsUUFBUSx1Q0FBdUM7QUFDbEUsY0FBY0MsY0FBYyxRQUFRLGVBQWU7QUFDbkQsU0FBU0MsUUFBUSxRQUFRLGdCQUFnQjtBQUN6QyxjQUFjQyxxQkFBcUIsUUFBUSx3QkFBd0I7QUFFbkUsT0FBTyxlQUFlQyxJQUFJQSxDQUN4QkMsTUFBTSxFQUFFRixxQkFBcUIsRUFDN0JHLE9BQU8sRUFBRUwsY0FBYyxDQUN4QixFQUFFTSxPQUFPLENBQUNSLEtBQUssQ0FBQ1MsU0FBUyxDQUFDLENBQUM7RUFDMUIsTUFBTUMsUUFBUSxHQUFHSCxPQUFPLENBQUNJLFdBQVcsQ0FBQyxDQUFDO0VBQ3RDLE1BQU1DLGlCQUFpQixHQUFHRixRQUFRLENBQUNHLHFCQUFxQjtFQUN4RCxNQUFNQyxLQUFLLEdBQUdYLFFBQVEsQ0FBQ1MsaUJBQWlCLENBQUM7RUFFekMsT0FBTyxDQUFDLFVBQVUsQ0FBQyxLQUFLLENBQUMsQ0FBQ0UsS0FBSyxDQUFDLENBQUMsTUFBTSxDQUFDLENBQUNSLE1BQU0sQ0FBQyxHQUFHO0FBQ3JEIiwiaWdub3JlTGlzdCI6W119
|
||||
10
src/commands/agents/index.ts
Normal file
10
src/commands/agents/index.ts
Normal file
@ -0,0 +1,10 @@
|
||||
import type { Command } from '../../commands.js'
|
||||
|
||||
const agents = {
|
||||
type: 'local-jsx',
|
||||
name: 'agents',
|
||||
description: 'Manage agent configurations',
|
||||
load: () => import('./agents.js'),
|
||||
} satisfies Command
|
||||
|
||||
export default agents
|
||||
1
src/commands/ant-trace/index.js
Normal file
1
src/commands/ant-trace/index.js
Normal file
@ -0,0 +1 @@
|
||||
export default { isEnabled: () => false, isHidden: true, name: 'stub' };
|
||||
1
src/commands/autofix-pr/index.js
Normal file
1
src/commands/autofix-pr/index.js
Normal file
@ -0,0 +1 @@
|
||||
export default { isEnabled: () => false, isHidden: true, name: 'stub' };
|
||||
1
src/commands/backfill-sessions/index.js
Normal file
1
src/commands/backfill-sessions/index.js
Normal file
@ -0,0 +1 @@
|
||||
export default { isEnabled: () => false, isHidden: true, name: 'stub' };
|
||||
296
src/commands/branch/branch.ts
Normal file
296
src/commands/branch/branch.ts
Normal file
@ -0,0 +1,296 @@
|
||||
import { randomUUID, type UUID } from 'crypto'
|
||||
import { mkdir, readFile, writeFile } from 'fs/promises'
|
||||
import { getOriginalCwd, getSessionId } from '../../bootstrap/state.js'
|
||||
import type { LocalJSXCommandContext } from '../../commands.js'
|
||||
import { logEvent } from '../../services/analytics/index.js'
|
||||
import type { LocalJSXCommandOnDone } from '../../types/command.js'
|
||||
import type {
|
||||
ContentReplacementEntry,
|
||||
Entry,
|
||||
LogOption,
|
||||
SerializedMessage,
|
||||
TranscriptMessage,
|
||||
} from '../../types/logs.js'
|
||||
import { parseJSONL } from '../../utils/json.js'
|
||||
import {
|
||||
getProjectDir,
|
||||
getTranscriptPath,
|
||||
getTranscriptPathForSession,
|
||||
isTranscriptMessage,
|
||||
saveCustomTitle,
|
||||
searchSessionsByCustomTitle,
|
||||
} from '../../utils/sessionStorage.js'
|
||||
import { jsonStringify } from '../../utils/slowOperations.js'
|
||||
import { escapeRegExp } from '../../utils/stringUtils.js'
|
||||
|
||||
type TranscriptEntry = TranscriptMessage & {
|
||||
forkedFrom?: {
|
||||
sessionId: string
|
||||
messageUuid: UUID
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Derive a single-line title base from the first user message.
|
||||
* Collapses whitespace — multiline first messages (pasted stacks, code)
|
||||
* otherwise flow into the saved title and break the resume hint.
|
||||
*/
|
||||
export function deriveFirstPrompt(
|
||||
firstUserMessage: Extract<SerializedMessage, { type: 'user' }> | undefined,
|
||||
): string {
|
||||
const content = firstUserMessage?.message?.content
|
||||
if (!content) return 'Branched conversation'
|
||||
const raw =
|
||||
typeof content === 'string'
|
||||
? content
|
||||
: content.find(
|
||||
(block): block is { type: 'text'; text: string } =>
|
||||
block.type === 'text',
|
||||
)?.text
|
||||
if (!raw) return 'Branched conversation'
|
||||
return (
|
||||
raw.replace(/\s+/g, ' ').trim().slice(0, 100) || 'Branched conversation'
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a fork of the current conversation by copying from the transcript file.
|
||||
* Preserves all original metadata (timestamps, gitBranch, etc.) while updating
|
||||
* sessionId and adding forkedFrom traceability.
|
||||
*/
|
||||
async function createFork(customTitle?: string): Promise<{
|
||||
sessionId: UUID
|
||||
title: string | undefined
|
||||
forkPath: string
|
||||
serializedMessages: SerializedMessage[]
|
||||
contentReplacementRecords: ContentReplacementEntry['replacements']
|
||||
}> {
|
||||
const forkSessionId = randomUUID() as UUID
|
||||
const originalSessionId = getSessionId()
|
||||
const projectDir = getProjectDir(getOriginalCwd())
|
||||
const forkSessionPath = getTranscriptPathForSession(forkSessionId)
|
||||
const currentTranscriptPath = getTranscriptPath()
|
||||
|
||||
// Ensure project directory exists
|
||||
await mkdir(projectDir, { recursive: true, mode: 0o700 })
|
||||
|
||||
// Read current transcript file
|
||||
let transcriptContent: Buffer
|
||||
try {
|
||||
transcriptContent = await readFile(currentTranscriptPath)
|
||||
} catch {
|
||||
throw new Error('No conversation to branch')
|
||||
}
|
||||
|
||||
if (transcriptContent.length === 0) {
|
||||
throw new Error('No conversation to branch')
|
||||
}
|
||||
|
||||
// Parse all transcript entries (messages + metadata entries like content-replacement)
|
||||
const entries = parseJSONL<Entry>(transcriptContent)
|
||||
|
||||
// Filter to only main conversation messages (exclude sidechains and non-message entries)
|
||||
const mainConversationEntries = entries.filter(
|
||||
(entry): entry is TranscriptMessage =>
|
||||
isTranscriptMessage(entry) && !entry.isSidechain,
|
||||
)
|
||||
|
||||
// Content-replacement entries for the original session. These record which
|
||||
// tool_result blocks were replaced with previews by the per-message budget.
|
||||
// Without them in the fork JSONL, `claude -r {forkId}` reconstructs state
|
||||
// with an empty replacements Map → previously-replaced results are classified
|
||||
// as FROZEN and sent as full content (prompt cache miss + permanent overage).
|
||||
// sessionId must be rewritten since loadTranscriptFile keys lookup by the
|
||||
// session's messages' sessionId.
|
||||
const contentReplacementRecords = entries
|
||||
.filter(
|
||||
(entry): entry is ContentReplacementEntry =>
|
||||
entry.type === 'content-replacement' &&
|
||||
entry.sessionId === originalSessionId,
|
||||
)
|
||||
.flatMap(entry => entry.replacements)
|
||||
|
||||
if (mainConversationEntries.length === 0) {
|
||||
throw new Error('No messages to branch')
|
||||
}
|
||||
|
||||
// Build forked entries with new sessionId and preserved metadata
|
||||
let parentUuid: UUID | null = null
|
||||
const lines: string[] = []
|
||||
const serializedMessages: SerializedMessage[] = []
|
||||
|
||||
for (const entry of mainConversationEntries) {
|
||||
// Create forked transcript entry preserving all original metadata
|
||||
const forkedEntry: TranscriptEntry = {
|
||||
...entry,
|
||||
sessionId: forkSessionId,
|
||||
parentUuid,
|
||||
isSidechain: false,
|
||||
forkedFrom: {
|
||||
sessionId: originalSessionId,
|
||||
messageUuid: entry.uuid,
|
||||
},
|
||||
}
|
||||
|
||||
// Build serialized message for LogOption
|
||||
const serialized: SerializedMessage = {
|
||||
...entry,
|
||||
sessionId: forkSessionId,
|
||||
}
|
||||
|
||||
serializedMessages.push(serialized)
|
||||
lines.push(jsonStringify(forkedEntry))
|
||||
if (entry.type !== 'progress') {
|
||||
parentUuid = entry.uuid
|
||||
}
|
||||
}
|
||||
|
||||
// Append content-replacement entry (if any) with the fork's sessionId.
|
||||
// Written as a SINGLE entry (same shape as insertContentReplacement) so
|
||||
// loadTranscriptFile's content-replacement branch picks it up.
|
||||
if (contentReplacementRecords.length > 0) {
|
||||
const forkedReplacementEntry: ContentReplacementEntry = {
|
||||
type: 'content-replacement',
|
||||
sessionId: forkSessionId,
|
||||
replacements: contentReplacementRecords,
|
||||
}
|
||||
lines.push(jsonStringify(forkedReplacementEntry))
|
||||
}
|
||||
|
||||
// Write the fork session file
|
||||
await writeFile(forkSessionPath, lines.join('\n') + '\n', {
|
||||
encoding: 'utf8',
|
||||
mode: 0o600,
|
||||
})
|
||||
|
||||
return {
|
||||
sessionId: forkSessionId,
|
||||
title: customTitle,
|
||||
forkPath: forkSessionPath,
|
||||
serializedMessages,
|
||||
contentReplacementRecords,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a unique fork name by checking for collisions with existing session names.
|
||||
* If "baseName (Branch)" already exists, tries "baseName (Branch 2)", "baseName (Branch 3)", etc.
|
||||
*/
|
||||
async function getUniqueForkName(baseName: string): Promise<string> {
|
||||
const candidateName = `${baseName} (Branch)`
|
||||
|
||||
// Check if this exact name already exists
|
||||
const existingWithExactName = await searchSessionsByCustomTitle(
|
||||
candidateName,
|
||||
{ exact: true },
|
||||
)
|
||||
|
||||
if (existingWithExactName.length === 0) {
|
||||
return candidateName
|
||||
}
|
||||
|
||||
// Name collision - find a unique numbered suffix
|
||||
// Search for all sessions that start with the base pattern
|
||||
const existingForks = await searchSessionsByCustomTitle(`${baseName} (Branch`)
|
||||
|
||||
// Extract existing fork numbers to find the next available
|
||||
const usedNumbers = new Set<number>([1]) // Consider " (Branch)" as number 1
|
||||
const forkNumberPattern = new RegExp(
|
||||
`^${escapeRegExp(baseName)} \\(Branch(?: (\\d+))?\\)$`,
|
||||
)
|
||||
|
||||
for (const session of existingForks) {
|
||||
const match = session.customTitle?.match(forkNumberPattern)
|
||||
if (match) {
|
||||
if (match[1]) {
|
||||
usedNumbers.add(parseInt(match[1], 10))
|
||||
} else {
|
||||
usedNumbers.add(1) // " (Branch)" without number is treated as 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find the next available number
|
||||
let nextNumber = 2
|
||||
while (usedNumbers.has(nextNumber)) {
|
||||
nextNumber++
|
||||
}
|
||||
|
||||
return `${baseName} (Branch ${nextNumber})`
|
||||
}
|
||||
|
||||
export async function call(
|
||||
onDone: LocalJSXCommandOnDone,
|
||||
context: LocalJSXCommandContext,
|
||||
args: string,
|
||||
): Promise<React.ReactNode> {
|
||||
const customTitle = args?.trim() || undefined
|
||||
|
||||
const originalSessionId = getSessionId()
|
||||
|
||||
try {
|
||||
const {
|
||||
sessionId,
|
||||
title,
|
||||
forkPath,
|
||||
serializedMessages,
|
||||
contentReplacementRecords,
|
||||
} = await createFork(customTitle)
|
||||
|
||||
// Build LogOption for resume
|
||||
const now = new Date()
|
||||
const firstPrompt = deriveFirstPrompt(
|
||||
serializedMessages.find(m => m.type === 'user'),
|
||||
)
|
||||
|
||||
// Save custom title - use provided title or firstPrompt as default
|
||||
// This ensures /status and /resume show the same session name
|
||||
// Always add " (Branch)" suffix to make it clear this is a branched session
|
||||
// Handle collisions by adding a number suffix (e.g., " (Branch 2)", " (Branch 3)")
|
||||
const baseName = title ?? firstPrompt
|
||||
const effectiveTitle = await getUniqueForkName(baseName)
|
||||
await saveCustomTitle(sessionId, effectiveTitle, forkPath)
|
||||
|
||||
logEvent('tengu_conversation_forked', {
|
||||
message_count: serializedMessages.length,
|
||||
has_custom_title: !!title,
|
||||
})
|
||||
|
||||
const forkLog: LogOption = {
|
||||
date: now.toISOString().split('T')[0]!,
|
||||
messages: serializedMessages,
|
||||
fullPath: forkPath,
|
||||
value: now.getTime(),
|
||||
created: now,
|
||||
modified: now,
|
||||
firstPrompt,
|
||||
messageCount: serializedMessages.length,
|
||||
isSidechain: false,
|
||||
sessionId,
|
||||
customTitle: effectiveTitle,
|
||||
contentReplacements: contentReplacementRecords,
|
||||
}
|
||||
|
||||
// Resume into the fork
|
||||
const titleInfo = title ? ` "${title}"` : ''
|
||||
const resumeHint = `\nTo resume the original: claude -r ${originalSessionId}`
|
||||
const successMessage = `Branched conversation${titleInfo}. You are now in the branch.${resumeHint}`
|
||||
|
||||
if (context.resume) {
|
||||
await context.resume(sessionId, forkLog, 'fork')
|
||||
onDone(successMessage, { display: 'system' })
|
||||
} else {
|
||||
// Fallback if resume not available
|
||||
onDone(
|
||||
`Branched conversation${titleInfo}. Resume with: /resume ${sessionId}`,
|
||||
)
|
||||
}
|
||||
|
||||
return null
|
||||
} catch (error) {
|
||||
const message =
|
||||
error instanceof Error ? error.message : 'Unknown error occurred'
|
||||
onDone(`Failed to branch conversation: ${message}`)
|
||||
return null
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user