init claude-code

This commit is contained in:
2026-04-01 17:32:37 +02:00
commit 73b208c009
1902 changed files with 513237 additions and 0 deletions
+455
View File
@@ -0,0 +1,455 @@
/* eslint-disable eslint-plugin-n/no-unsupported-features/node-builtins */
/**
* CONNECT-over-WebSocket relay for CCR upstreamproxy.
*
* Listens on localhost TCP, accepts HTTP CONNECT from curl/gh/kubectl/etc,
* and tunnels bytes over WebSocket to the CCR upstreamproxy endpoint.
* The CCR server-side terminates the tunnel, MITMs TLS, injects org-configured
* credentials (e.g. DD-API-KEY), and forwards to the real upstream.
*
* WHY WebSocket and not raw CONNECT: CCR ingress is GKE L7 with path-prefix
* routing; there's no connect_matcher in cdk-constructs. The session-ingress
* tunnel (sessions/tunnel/v1alpha/tunnel.proto) already uses this pattern.
*
* Protocol: bytes are wrapped in UpstreamProxyChunk protobuf messages
* (`message UpstreamProxyChunk { bytes data = 1; }`) for compatibility with
* gateway.NewWebSocketStreamAdapter on the server side.
*/
import { createServer, type Socket as NodeSocket } from 'node:net'
import { logForDebugging } from '../utils/debug.js'
import { getWebSocketTLSOptions } from '../utils/mtls.js'
import { getWebSocketProxyAgent, getWebSocketProxyUrl } from '../utils/proxy.js'
// The CCR container runs behind an egress gateway — direct outbound is
// blocked, so the WS upgrade must go through the same HTTP CONNECT proxy
// everything else uses. undici's globalThis.WebSocket does not consult
// the global dispatcher for the upgrade, so under Node we use the ws package
// with an explicit agent (same pattern as SessionsWebSocket). Bun's native
// WebSocket takes a proxy URL directly. Preloaded in startNodeRelay so
// openTunnel stays synchronous and the CONNECT state machine doesn't race.
type WSCtor = typeof import('ws').default
let nodeWSCtor: WSCtor | undefined
// Intersection of the surface openTunnel touches. Both undici's
// globalThis.WebSocket and the ws package satisfy this via property-style
// onX handlers.
type WebSocketLike = Pick<
WebSocket,
| 'onopen'
| 'onmessage'
| 'onerror'
| 'onclose'
| 'send'
| 'close'
| 'readyState'
| 'binaryType'
>
// Envoy per-request buffer cap. Week-1 Datadog payloads won't hit this, but
// design for it so git-push doesn't need a relay rewrite.
const MAX_CHUNK_BYTES = 512 * 1024
// Sidecar idle timeout is 50s; ping well inside that.
const PING_INTERVAL_MS = 30_000
/**
* Encode an UpstreamProxyChunk protobuf message by hand.
*
* For `message UpstreamProxyChunk { bytes data = 1; }` the wire format is:
* tag = (field_number << 3) | wire_type = (1 << 3) | 2 = 0x0a
* followed by varint length, followed by the bytes.
*
* protobufjs would be the general answer; for a single-field bytes message
* the hand encoding is 10 lines and avoids a runtime dep in the hot path.
*/
export function encodeChunk(data: Uint8Array): Uint8Array {
const len = data.length
// varint encoding of length — most chunks fit in 13 length bytes
const varint: number[] = []
let n = len
while (n > 0x7f) {
varint.push((n & 0x7f) | 0x80)
n >>>= 7
}
varint.push(n)
const out = new Uint8Array(1 + varint.length + len)
out[0] = 0x0a
out.set(varint, 1)
out.set(data, 1 + varint.length)
return out
}
/**
* Decode an UpstreamProxyChunk. Returns the data field, or null if malformed.
* Tolerates the server sending a zero-length chunk (keepalive semantics).
*/
export function decodeChunk(buf: Uint8Array): Uint8Array | null {
if (buf.length === 0) return new Uint8Array(0)
if (buf[0] !== 0x0a) return null
let len = 0
let shift = 0
let i = 1
while (i < buf.length) {
const b = buf[i]!
len |= (b & 0x7f) << shift
i++
if ((b & 0x80) === 0) break
shift += 7
if (shift > 28) return null
}
if (i + len > buf.length) return null
return buf.subarray(i, i + len)
}
export type UpstreamProxyRelay = {
port: number
stop: () => void
}
type ConnState = {
ws?: WebSocketLike
connectBuf: Buffer
pinger?: ReturnType<typeof setInterval>
// Bytes that arrived after the CONNECT header but before ws.onopen fired.
// TCP can coalesce CONNECT + ClientHello into one packet, and the socket's
// data callback can fire again while the WS handshake is still in flight.
// Both cases would silently drop bytes without this buffer.
pending: Buffer[]
wsOpen: boolean
// Set once the server's 200 Connection Established has been forwarded and
// the tunnel is carrying TLS. After that, writing a plaintext 502 would
// corrupt the client's TLS stream — just close instead.
established: boolean
// WS onerror is always followed by onclose; without a guard the second
// handler would sock.end() an already-ended socket. First caller wins.
closed: boolean
}
/**
* Minimal socket abstraction so the CONNECT parser and WS tunnel plumbing
* are runtime-agnostic. Implementations handle write backpressure internally:
* Bun's sock.write() does partial writes and needs explicit tail-queueing;
* Node's net.Socket buffers unconditionally and never drops bytes.
*/
type ClientSocket = {
write: (data: Uint8Array | string) => void
end: () => void
}
function newConnState(): ConnState {
return {
connectBuf: Buffer.alloc(0),
pending: [],
wsOpen: false,
established: false,
closed: false,
}
}
/**
* Start the relay. Returns the ephemeral port it bound and a stop function.
* Uses Bun.listen when available, otherwise Node's net.createServer — the CCR
* container runs the CLI under Node, not Bun.
*/
export async function startUpstreamProxyRelay(opts: {
wsUrl: string
sessionId: string
token: string
}): Promise<UpstreamProxyRelay> {
const authHeader =
'Basic ' + Buffer.from(`${opts.sessionId}:${opts.token}`).toString('base64')
// WS upgrade itself is auth-gated (proto authn: PRIVATE_API) — the gateway
// wants the session-ingress JWT on the upgrade request, separate from the
// Proxy-Authorization that rides inside the tunneled CONNECT.
const wsAuthHeader = `Bearer ${opts.token}`
const relay =
typeof Bun !== 'undefined'
? startBunRelay(opts.wsUrl, authHeader, wsAuthHeader)
: await startNodeRelay(opts.wsUrl, authHeader, wsAuthHeader)
logForDebugging(`[upstreamproxy] relay listening on 127.0.0.1:${relay.port}`)
return relay
}
function startBunRelay(
wsUrl: string,
authHeader: string,
wsAuthHeader: string,
): UpstreamProxyRelay {
// Bun TCP sockets don't auto-buffer partial writes: sock.write() returns
// the byte count actually handed to the kernel, and the remainder is
// silently dropped. When the kernel buffer fills, we queue the tail and
// let the drain handler flush it. Per-socket because the adapter closure
// outlives individual handler calls.
type BunState = ConnState & { writeBuf: Uint8Array[] }
// eslint-disable-next-line custom-rules/require-bun-typeof-guard -- caller dispatches on typeof Bun
const server = Bun.listen<BunState>({
hostname: '127.0.0.1',
port: 0,
socket: {
open(sock) {
sock.data = { ...newConnState(), writeBuf: [] }
},
data(sock, data) {
const st = sock.data
const adapter: ClientSocket = {
write: payload => {
const bytes =
typeof payload === 'string'
? Buffer.from(payload, 'utf8')
: payload
if (st.writeBuf.length > 0) {
st.writeBuf.push(bytes)
return
}
const n = sock.write(bytes)
if (n < bytes.length) st.writeBuf.push(bytes.subarray(n))
},
end: () => sock.end(),
}
handleData(adapter, st, data, wsUrl, authHeader, wsAuthHeader)
},
drain(sock) {
const st = sock.data
while (st.writeBuf.length > 0) {
const chunk = st.writeBuf[0]!
const n = sock.write(chunk)
if (n < chunk.length) {
st.writeBuf[0] = chunk.subarray(n)
return
}
st.writeBuf.shift()
}
},
close(sock) {
cleanupConn(sock.data)
},
error(sock, err) {
logForDebugging(`[upstreamproxy] client socket error: ${err.message}`)
cleanupConn(sock.data)
},
},
})
return {
port: server.port,
stop: () => server.stop(true),
}
}
// Exported so tests can exercise the Node path directly — the test runner is
// Bun, so the runtime dispatch in startUpstreamProxyRelay always picks Bun.
export async function startNodeRelay(
wsUrl: string,
authHeader: string,
wsAuthHeader: string,
): Promise<UpstreamProxyRelay> {
nodeWSCtor = (await import('ws')).default
const states = new WeakMap<NodeSocket, ConnState>()
const server = createServer(sock => {
const st = newConnState()
states.set(sock, st)
// Node's sock.write() buffers internally — a false return signals
// backpressure but the bytes are already queued, so no tail-tracking
// needed for correctness. Week-1 payloads won't stress the buffer.
const adapter: ClientSocket = {
write: payload => {
sock.write(typeof payload === 'string' ? payload : Buffer.from(payload))
},
end: () => sock.end(),
}
sock.on('data', data =>
handleData(adapter, st, data, wsUrl, authHeader, wsAuthHeader),
)
sock.on('close', () => cleanupConn(states.get(sock)))
sock.on('error', err => {
logForDebugging(`[upstreamproxy] client socket error: ${err.message}`)
cleanupConn(states.get(sock))
})
})
return new Promise((resolve, reject) => {
server.once('error', reject)
server.listen(0, '127.0.0.1', () => {
const addr = server.address()
if (addr === null || typeof addr === 'string') {
reject(new Error('upstreamproxy: server has no TCP address'))
return
}
resolve({
port: addr.port,
stop: () => server.close(),
})
})
})
}
/**
* Shared per-connection data handler. Phase 1 accumulates the CONNECT request;
* phase 2 forwards client bytes over the WS tunnel.
*/
function handleData(
sock: ClientSocket,
st: ConnState,
data: Buffer,
wsUrl: string,
authHeader: string,
wsAuthHeader: string,
): void {
// Phase 1: accumulate until we've seen the full CONNECT request
// (terminated by CRLF CRLF). curl/gh send this in one packet, but
// don't assume that.
if (!st.ws) {
st.connectBuf = Buffer.concat([st.connectBuf, data])
const headerEnd = st.connectBuf.indexOf('\r\n\r\n')
if (headerEnd === -1) {
// Guard against a client that never sends CRLFCRLF.
if (st.connectBuf.length > 8192) {
sock.write('HTTP/1.1 400 Bad Request\r\n\r\n')
sock.end()
}
return
}
const reqHead = st.connectBuf.subarray(0, headerEnd).toString('utf8')
const firstLine = reqHead.split('\r\n')[0] ?? ''
const m = firstLine.match(/^CONNECT\s+(\S+)\s+HTTP\/1\.[01]$/i)
if (!m) {
sock.write('HTTP/1.1 405 Method Not Allowed\r\n\r\n')
sock.end()
return
}
// Stash any bytes that arrived after the CONNECT header so
// openTunnel can flush them once the WS is open.
const trailing = st.connectBuf.subarray(headerEnd + 4)
if (trailing.length > 0) {
st.pending.push(Buffer.from(trailing))
}
st.connectBuf = Buffer.alloc(0)
openTunnel(sock, st, firstLine, wsUrl, authHeader, wsAuthHeader)
return
}
// Phase 2: WS exists. If it isn't OPEN yet, buffer; ws.onopen will
// flush. Once open, pump client bytes to WS in chunks.
if (!st.wsOpen) {
st.pending.push(Buffer.from(data))
return
}
forwardToWs(st.ws, data)
}
function openTunnel(
sock: ClientSocket,
st: ConnState,
connectLine: string,
wsUrl: string,
authHeader: string,
wsAuthHeader: string,
): void {
// core/websocket/stream.go picks JSON vs binary-proto from the upgrade
// request's Content-Type header (defaults to JSON). Without application/proto
// the server protojson.Unmarshals our hand-encoded binary chunks and fails
// silently with EOF.
const headers = {
'Content-Type': 'application/proto',
Authorization: wsAuthHeader,
}
let ws: WebSocketLike
if (nodeWSCtor) {
ws = new nodeWSCtor(wsUrl, {
headers,
agent: getWebSocketProxyAgent(wsUrl),
...getWebSocketTLSOptions(),
}) as unknown as WebSocketLike
} else {
ws = new globalThis.WebSocket(wsUrl, {
// @ts-expect-error — Bun extension; not in lib.dom WebSocket types
headers,
proxy: getWebSocketProxyUrl(wsUrl),
tls: getWebSocketTLSOptions() || undefined,
})
}
ws.binaryType = 'arraybuffer'
st.ws = ws
ws.onopen = () => {
// First chunk carries the CONNECT line plus Proxy-Authorization so the
// server can auth the tunnel and know the target host:port. Server
// responds with its own "HTTP/1.1 200" over the tunnel; we just pipe it.
const head =
`${connectLine}\r\n` + `Proxy-Authorization: ${authHeader}\r\n` + `\r\n`
ws.send(encodeChunk(Buffer.from(head, 'utf8')))
// Flush anything that arrived while the WS handshake was in flight —
// trailing bytes from the CONNECT packet and any data() callbacks that
// fired before onopen.
st.wsOpen = true
for (const buf of st.pending) {
forwardToWs(ws, buf)
}
st.pending = []
// Not all WS implementations expose ping(); empty chunk works as an
// application-level keepalive the server can ignore.
st.pinger = setInterval(sendKeepalive, PING_INTERVAL_MS, ws)
}
ws.onmessage = ev => {
const raw =
ev.data instanceof ArrayBuffer
? new Uint8Array(ev.data)
: new Uint8Array(Buffer.from(ev.data))
const payload = decodeChunk(raw)
if (payload && payload.length > 0) {
st.established = true
sock.write(payload)
}
}
ws.onerror = ev => {
const msg = 'message' in ev ? String(ev.message) : 'websocket error'
logForDebugging(`[upstreamproxy] ws error: ${msg}`)
if (st.closed) return
st.closed = true
if (!st.established) {
sock.write('HTTP/1.1 502 Bad Gateway\r\n\r\n')
}
sock.end()
cleanupConn(st)
}
ws.onclose = () => {
if (st.closed) return
st.closed = true
sock.end()
cleanupConn(st)
}
}
function sendKeepalive(ws: WebSocketLike): void {
if (ws.readyState === WebSocket.OPEN) {
ws.send(encodeChunk(new Uint8Array(0)))
}
}
function forwardToWs(ws: WebSocketLike, data: Buffer): void {
if (ws.readyState !== WebSocket.OPEN) return
for (let off = 0; off < data.length; off += MAX_CHUNK_BYTES) {
const slice = data.subarray(off, off + MAX_CHUNK_BYTES)
ws.send(encodeChunk(slice))
}
}
function cleanupConn(st: ConnState | undefined): void {
if (!st) return
if (st.pinger) clearInterval(st.pinger)
if (st.ws && st.ws.readyState <= WebSocket.OPEN) {
try {
st.ws.close()
} catch {
// already closing
}
}
st.ws = undefined
}
+285
View File
@@ -0,0 +1,285 @@
/**
* CCR upstreamproxy — container-side wiring.
*
* When running inside a CCR session container with upstreamproxy configured,
* this module:
* 1. Reads the session token from /run/ccr/session_token
* 2. Sets prctl(PR_SET_DUMPABLE, 0) to block same-UID ptrace of the heap
* 3. Downloads the upstreamproxy CA cert and concatenates it with the
* system bundle so curl/gh/python trust the MITM proxy
* 4. Starts a local CONNECT→WebSocket relay (see relay.ts)
* 5. Unlinks the token file (token stays heap-only; file is gone before
* the agent loop can see it, but only after the relay is confirmed up
* so a supervisor restart can retry)
* 6. Exposes HTTPS_PROXY / SSL_CERT_FILE env vars for all agent subprocesses
*
* Every step fails open: any error logs a warning and disables the proxy.
* A broken proxy setup must never break an otherwise-working session.
*
* Design doc: api-go/ccr/docs/plans/CCR_AUTH_DESIGN.md § "Week-1 pilot scope".
*/
import { mkdir, readFile, unlink, writeFile } from 'fs/promises'
import { homedir } from 'os'
import { join } from 'path'
import { registerCleanup } from '../utils/cleanupRegistry.js'
import { logForDebugging } from '../utils/debug.js'
import { isEnvTruthy } from '../utils/envUtils.js'
import { isENOENT } from '../utils/errors.js'
import { startUpstreamProxyRelay } from './relay.js'
export const SESSION_TOKEN_PATH = '/run/ccr/session_token'
const SYSTEM_CA_BUNDLE = '/etc/ssl/certs/ca-certificates.crt'
// Hosts the proxy must NOT intercept. Covers loopback, RFC1918, the IMDS
// range, and the package registries + GitHub that CCR containers already
// reach directly. Mirrors airlock/scripts/sandbox-shell-ccr.sh.
const NO_PROXY_LIST = [
'localhost',
'127.0.0.1',
'::1',
'169.254.0.0/16',
'10.0.0.0/8',
'172.16.0.0/12',
'192.168.0.0/16',
// Anthropic API: no upstream route will ever match, and the MITM breaks
// non-Bun runtimes (Python httpx/certifi doesn't trust the forged CA).
// Three forms because NO_PROXY parsing differs across runtimes:
// *.anthropic.com — Bun, curl, Go (glob match)
// .anthropic.com — Python urllib/httpx (suffix match, strips leading dot)
// anthropic.com — apex domain fallback
'anthropic.com',
'.anthropic.com',
'*.anthropic.com',
'github.com',
'api.github.com',
'*.github.com',
'*.githubusercontent.com',
'registry.npmjs.org',
'pypi.org',
'files.pythonhosted.org',
'index.crates.io',
'proxy.golang.org',
].join(',')
type UpstreamProxyState = {
enabled: boolean
port?: number
caBundlePath?: string
}
let state: UpstreamProxyState = { enabled: false }
/**
* Initialize upstreamproxy. Called once from init.ts. Safe to call when the
* feature is off or the token file is absent — returns {enabled: false}.
*
* Overridable paths are for tests; production uses the defaults.
*/
export async function initUpstreamProxy(opts?: {
tokenPath?: string
systemCaPath?: string
caBundlePath?: string
ccrBaseUrl?: string
}): Promise<UpstreamProxyState> {
if (!isEnvTruthy(process.env.CLAUDE_CODE_REMOTE)) {
return state
}
// CCR evaluates ccr_upstream_proxy_enabled server-side (where GrowthBook is
// warm) and injects this env var via StartupContext.EnvironmentVariables.
// Every CCR session is a fresh container with no GB cache, so a client-side
// GB check here always returned the default (false).
if (!isEnvTruthy(process.env.CCR_UPSTREAM_PROXY_ENABLED)) {
return state
}
const sessionId = process.env.CLAUDE_CODE_REMOTE_SESSION_ID
if (!sessionId) {
logForDebugging(
'[upstreamproxy] CLAUDE_CODE_REMOTE_SESSION_ID unset; proxy disabled',
{ level: 'warn' },
)
return state
}
const tokenPath = opts?.tokenPath ?? SESSION_TOKEN_PATH
const token = await readToken(tokenPath)
if (!token) {
logForDebugging('[upstreamproxy] no session token file; proxy disabled')
return state
}
setNonDumpable()
// CCR injects ANTHROPIC_BASE_URL via StartupContext (sessionExecutor.ts /
// sessionHandler.ts). getOauthConfig() is wrong here: it keys off
// USER_TYPE + USE_{LOCAL,STAGING}_OAUTH, none of which the container sets,
// so it always returned the prod URL and the CA fetch 404'd.
const baseUrl =
opts?.ccrBaseUrl ??
process.env.ANTHROPIC_BASE_URL ??
'https://api.anthropic.com'
const caBundlePath =
opts?.caBundlePath ?? join(homedir(), '.ccr', 'ca-bundle.crt')
const caOk = await downloadCaBundle(
baseUrl,
opts?.systemCaPath ?? SYSTEM_CA_BUNDLE,
caBundlePath,
)
if (!caOk) return state
try {
const wsUrl = baseUrl.replace(/^http/, 'ws') + '/v1/code/upstreamproxy/ws'
const relay = await startUpstreamProxyRelay({ wsUrl, sessionId, token })
registerCleanup(async () => relay.stop())
state = { enabled: true, port: relay.port, caBundlePath }
logForDebugging(`[upstreamproxy] enabled on 127.0.0.1:${relay.port}`)
// Only unlink after the listener is up: if CA download or listen()
// fails, a supervisor restart can retry with the token still on disk.
await unlink(tokenPath).catch(() => {
logForDebugging('[upstreamproxy] token file unlink failed', {
level: 'warn',
})
})
} catch (err) {
logForDebugging(
`[upstreamproxy] relay start failed: ${err instanceof Error ? err.message : String(err)}; proxy disabled`,
{ level: 'warn' },
)
}
return state
}
/**
* Env vars to merge into every agent subprocess. Empty when the proxy is
* disabled. Called from subprocessEnv() so Bash/MCP/LSP/hooks all inherit
* the same recipe.
*/
export function getUpstreamProxyEnv(): Record<string, string> {
if (!state.enabled || !state.port || !state.caBundlePath) {
// Child CLI processes can't re-initialize the relay (token file was
// unlinked by the parent), but the parent's relay is still running and
// reachable at 127.0.0.1:<port>. If we inherited proxy vars from the
// parent (HTTPS_PROXY + SSL_CERT_FILE both set), pass them through so
// our subprocesses also route through the parent's relay.
if (process.env.HTTPS_PROXY && process.env.SSL_CERT_FILE) {
const inherited: Record<string, string> = {}
for (const key of [
'HTTPS_PROXY',
'https_proxy',
'NO_PROXY',
'no_proxy',
'SSL_CERT_FILE',
'NODE_EXTRA_CA_CERTS',
'REQUESTS_CA_BUNDLE',
'CURL_CA_BUNDLE',
]) {
if (process.env[key]) inherited[key] = process.env[key]
}
return inherited
}
return {}
}
const proxyUrl = `http://127.0.0.1:${state.port}`
// HTTPS only: the relay handles CONNECT and nothing else. Plain HTTP has
// no credentials to inject, so routing it through the relay would just
// break the request with a 405.
return {
HTTPS_PROXY: proxyUrl,
https_proxy: proxyUrl,
NO_PROXY: NO_PROXY_LIST,
no_proxy: NO_PROXY_LIST,
SSL_CERT_FILE: state.caBundlePath,
NODE_EXTRA_CA_CERTS: state.caBundlePath,
REQUESTS_CA_BUNDLE: state.caBundlePath,
CURL_CA_BUNDLE: state.caBundlePath,
}
}
/** Test-only: reset module state between test cases. */
export function resetUpstreamProxyForTests(): void {
state = { enabled: false }
}
async function readToken(path: string): Promise<string | null> {
try {
const raw = await readFile(path, 'utf8')
return raw.trim() || null
} catch (err) {
if (isENOENT(err)) return null
logForDebugging(
`[upstreamproxy] token read failed: ${err instanceof Error ? err.message : String(err)}`,
{ level: 'warn' },
)
return null
}
}
/**
* prctl(PR_SET_DUMPABLE, 0) via libc FFI. Blocks same-UID ptrace of this
* process, so a prompt-injected `gdb -p $PPID` can't scrape the token from
* the heap. Linux-only; silently no-ops elsewhere.
*/
function setNonDumpable(): void {
if (process.platform !== 'linux' || typeof Bun === 'undefined') return
try {
// eslint-disable-next-line @typescript-eslint/no-require-imports
const ffi = require('bun:ffi') as typeof import('bun:ffi')
const lib = ffi.dlopen('libc.so.6', {
prctl: {
args: ['int', 'u64', 'u64', 'u64', 'u64'],
returns: 'int',
},
} as const)
const PR_SET_DUMPABLE = 4
const rc = lib.symbols.prctl(PR_SET_DUMPABLE, 0n, 0n, 0n, 0n)
if (rc !== 0) {
logForDebugging(
'[upstreamproxy] prctl(PR_SET_DUMPABLE,0) returned nonzero',
{
level: 'warn',
},
)
}
} catch (err) {
logForDebugging(
`[upstreamproxy] prctl unavailable: ${err instanceof Error ? err.message : String(err)}`,
{ level: 'warn' },
)
}
}
async function downloadCaBundle(
baseUrl: string,
systemCaPath: string,
outPath: string,
): Promise<boolean> {
try {
// eslint-disable-next-line eslint-plugin-n/no-unsupported-features/node-builtins
const resp = await fetch(`${baseUrl}/v1/code/upstreamproxy/ca-cert`, {
// Bun has no default fetch timeout — a hung endpoint would block CLI
// startup forever. 5s is generous for a small PEM.
signal: AbortSignal.timeout(5000),
})
if (!resp.ok) {
logForDebugging(
`[upstreamproxy] ca-cert fetch ${resp.status}; proxy disabled`,
{ level: 'warn' },
)
return false
}
const ccrCa = await resp.text()
const systemCa = await readFile(systemCaPath, 'utf8').catch(() => '')
await mkdir(join(outPath, '..'), { recursive: true })
await writeFile(outPath, systemCa + '\n' + ccrCa, 'utf8')
return true
} catch (err) {
logForDebugging(
`[upstreamproxy] ca-cert download failed: ${err instanceof Error ? err.message : String(err)}; proxy disabled`,
{ level: 'warn' },
)
return false
}
}