init claude-code

This commit is contained in:
2026-04-01 17:32:37 +02:00
commit 73b208c009
1902 changed files with 513237 additions and 0 deletions
+390
View File
@@ -0,0 +1,390 @@
import { unlink } from 'fs/promises'
import { CircularBuffer } from '../CircularBuffer.js'
import { logForDebugging } from '../debug.js'
import { readFileRange, tailFile } from '../fsOperations.js'
import { getMaxOutputLength } from '../shell/outputLimits.js'
import { safeJoinLines } from '../stringUtils.js'
import { DiskTaskOutput, getTaskOutputPath } from './diskOutput.js'
const DEFAULT_MAX_MEMORY = 8 * 1024 * 1024 // 8MB
const POLL_INTERVAL_MS = 1000
const PROGRESS_TAIL_BYTES = 4096
type ProgressCallback = (
lastLines: string,
allLines: string,
totalLines: number,
totalBytes: number,
isIncomplete: boolean,
) => void
/**
* Single source of truth for a shell command's output.
*
* For bash commands (file mode): both stdout and stderr go directly to
* a file via stdio fds — neither enters JS. Progress is extracted by
* polling the file tail. getStderr() returns '' since stderr is
* interleaved in the output file.
*
* For hooks (pipe mode): data flows through writeStdout()/writeStderr()
* and is buffered in memory, spilling to disk if it exceeds the limit.
*/
export class TaskOutput {
readonly taskId: string
readonly path: string
/** True when stdout goes to a file fd (bypassing JS). False for pipe mode (hooks). */
readonly stdoutToFile: boolean
#stdoutBuffer = ''
#stderrBuffer = ''
#disk: DiskTaskOutput | null = null
#recentLines = new CircularBuffer<string>(1000)
#totalLines = 0
#totalBytes = 0
#maxMemory: number
#onProgress: ProgressCallback | null
/** Set by getStdout() — true when the file was fully read (≤ maxOutputLength). */
#outputFileRedundant = false
/** Set by getStdout() — total file size in bytes. */
#outputFileSize = 0
// --- Shared poller state ---
/** Registry of all file-mode TaskOutput instances with onProgress callbacks. */
static #registry = new Map<string, TaskOutput>()
/** Subset of #registry currently being polled (visibility-driven by React). */
static #activePolling = new Map<string, TaskOutput>()
static #pollInterval: ReturnType<typeof setInterval> | null = null
constructor(
taskId: string,
onProgress: ProgressCallback | null,
stdoutToFile = false,
maxMemory: number = DEFAULT_MAX_MEMORY,
) {
this.taskId = taskId
this.path = getTaskOutputPath(taskId)
this.stdoutToFile = stdoutToFile
this.#maxMemory = maxMemory
this.#onProgress = onProgress
// Register for polling when stdout goes to a file and progress is needed.
// Actual polling is started/stopped by React via startPolling/stopPolling.
if (stdoutToFile && onProgress) {
TaskOutput.#registry.set(taskId, this)
}
}
/**
* Begin polling the output file for progress. Called from React
* useEffect when the progress component mounts.
*/
static startPolling(taskId: string): void {
const instance = TaskOutput.#registry.get(taskId)
if (!instance || !instance.#onProgress) {
return
}
TaskOutput.#activePolling.set(taskId, instance)
if (!TaskOutput.#pollInterval) {
TaskOutput.#pollInterval = setInterval(TaskOutput.#tick, POLL_INTERVAL_MS)
TaskOutput.#pollInterval.unref()
}
}
/**
* Stop polling the output file. Called from React useEffect cleanup
* when the progress component unmounts.
*/
static stopPolling(taskId: string): void {
TaskOutput.#activePolling.delete(taskId)
if (TaskOutput.#activePolling.size === 0 && TaskOutput.#pollInterval) {
clearInterval(TaskOutput.#pollInterval)
TaskOutput.#pollInterval = null
}
}
/**
* Shared tick: reads the file tail for every actively-polled task.
* Non-async body (.then) to avoid stacking if I/O is slow.
*/
static #tick(): void {
for (const [, entry] of TaskOutput.#activePolling) {
if (!entry.#onProgress) {
continue
}
void tailFile(entry.path, PROGRESS_TAIL_BYTES).then(
({ content, bytesRead, bytesTotal }) => {
if (!entry.#onProgress) {
return
}
// Always call onProgress even when content is empty, so the
// progress loop wakes up and can check for backgrounding.
// Commands like `git log -S` produce no output for long periods.
if (!content) {
entry.#onProgress('', '', entry.#totalLines, bytesTotal, false)
return
}
// Count all newlines in the tail and capture slice points for the
// last 5 and last 100 lines. Uncapped so extrapolation stays accurate
// for dense output (short lines → >100 newlines in 4KB).
let pos = content.length
let n5 = 0
let n100 = 0
let lineCount = 0
while (pos > 0) {
pos = content.lastIndexOf('\n', pos - 1)
lineCount++
if (lineCount === 5) n5 = pos <= 0 ? 0 : pos + 1
if (lineCount === 100) n100 = pos <= 0 ? 0 : pos + 1
}
// lineCount is exact when the whole file fits in PROGRESS_TAIL_BYTES.
// Otherwise extrapolate from the tail sample; monotone max keeps the
// counter from going backwards when the tail has longer lines on one tick.
const totalLines =
bytesRead >= bytesTotal
? lineCount
: Math.max(
entry.#totalLines,
Math.round((bytesTotal / bytesRead) * lineCount),
)
entry.#totalLines = totalLines
entry.#totalBytes = bytesTotal
entry.#onProgress(
content.slice(n5),
content.slice(n100),
totalLines,
bytesTotal,
bytesRead < bytesTotal,
)
},
() => {
// File may not exist yet
},
)
}
}
/** Write stdout data (pipe mode only — used by hooks). */
writeStdout(data: string): void {
this.#writeBuffered(data, false)
}
/** Write stderr data (always piped). */
writeStderr(data: string): void {
this.#writeBuffered(data, true)
}
#writeBuffered(data: string, isStderr: boolean): void {
this.#totalBytes += data.length
this.#updateProgress(data)
// Write to disk if already overflowed
if (this.#disk) {
this.#disk.append(isStderr ? `[stderr] ${data}` : data)
return
}
// Check if this chunk would exceed the in-memory limit
const totalMem =
this.#stdoutBuffer.length + this.#stderrBuffer.length + data.length
if (totalMem > this.#maxMemory) {
this.#spillToDisk(isStderr ? data : null, isStderr ? null : data)
return
}
if (isStderr) {
this.#stderrBuffer += data
} else {
this.#stdoutBuffer += data
}
}
/**
* Single backward pass: count all newlines (for totalLines) and extract
* the last few lines as flat copies (for the CircularBuffer / progress).
* Only used in pipe mode (hooks). File mode uses the shared poller.
*/
#updateProgress(data: string): void {
const MAX_PROGRESS_BYTES = 4096
const MAX_PROGRESS_LINES = 100
let lineCount = 0
const lines: string[] = []
let extractedBytes = 0
let pos = data.length
while (pos > 0) {
const prev = data.lastIndexOf('\n', pos - 1)
if (prev === -1) {
break
}
lineCount++
if (
lines.length < MAX_PROGRESS_LINES &&
extractedBytes < MAX_PROGRESS_BYTES
) {
const lineLen = pos - prev - 1
if (lineLen > 0 && lineLen <= MAX_PROGRESS_BYTES - extractedBytes) {
const line = data.slice(prev + 1, pos)
if (line.trim()) {
lines.push(Buffer.from(line).toString())
extractedBytes += lineLen
}
}
}
pos = prev
}
this.#totalLines += lineCount
for (let i = lines.length - 1; i >= 0; i--) {
this.#recentLines.add(lines[i]!)
}
if (this.#onProgress && lines.length > 0) {
const recent = this.#recentLines.getRecent(5)
this.#onProgress(
safeJoinLines(recent, '\n'),
safeJoinLines(this.#recentLines.getRecent(100), '\n'),
this.#totalLines,
this.#totalBytes,
this.#disk !== null,
)
}
}
#spillToDisk(stderrChunk: string | null, stdoutChunk: string | null): void {
this.#disk = new DiskTaskOutput(this.taskId)
// Flush existing buffers
if (this.#stdoutBuffer) {
this.#disk.append(this.#stdoutBuffer)
this.#stdoutBuffer = ''
}
if (this.#stderrBuffer) {
this.#disk.append(`[stderr] ${this.#stderrBuffer}`)
this.#stderrBuffer = ''
}
// Write the chunk that triggered overflow
if (stdoutChunk) {
this.#disk.append(stdoutChunk)
}
if (stderrChunk) {
this.#disk.append(`[stderr] ${stderrChunk}`)
}
}
/**
* Get stdout. In file mode, reads from the output file.
* In pipe mode, returns the in-memory buffer or tail from CircularBuffer.
*/
async getStdout(): Promise<string> {
if (this.stdoutToFile) {
return this.#readStdoutFromFile()
}
// Pipe mode (hooks) — use in-memory data
if (this.#disk) {
const recent = this.#recentLines.getRecent(5)
const tail = safeJoinLines(recent, '\n')
const sizeKB = Math.round(this.#totalBytes / 1024)
const notice = `\nOutput truncated (${sizeKB}KB total). Full output saved to: ${this.path}`
return tail ? tail + notice : notice.trimStart()
}
return this.#stdoutBuffer
}
async #readStdoutFromFile(): Promise<string> {
const maxBytes = getMaxOutputLength()
try {
const result = await readFileRange(this.path, 0, maxBytes)
if (!result) {
this.#outputFileRedundant = true
return ''
}
const { content, bytesRead, bytesTotal } = result
// If the file fits, it's fully captured inline and can be deleted.
// If not, return what we read — processToolResultBlock handles
// the <persisted-output> formatting and persistence downstream.
this.#outputFileSize = bytesTotal
this.#outputFileRedundant = bytesTotal <= bytesRead
return content
} catch (err) {
// Surface the error instead of silently returning empty. An ENOENT here
// means the output file was deleted while the command was running
// (historically: cross-session startup cleanup in the same project dir).
// Returning a diagnostic string keeps the tool_result non-empty, which
// avoids reminder-only-at-tail confusion downstream and tells the model
// (and us, via the transcript) what actually happened.
const code =
err instanceof Error && 'code' in err ? String(err.code) : 'unknown'
logForDebugging(
`TaskOutput.#readStdoutFromFile: failed to read ${this.path} (${code}): ${err}`,
)
return `<bash output unavailable: output file ${this.path} could not be read (${code}). This usually means another Claude Code process in the same project deleted it during startup cleanup.>`
}
}
/** Sync getter for ExecResult.stderr */
getStderr(): string {
if (this.#disk) {
return ''
}
return this.#stderrBuffer
}
get isOverflowed(): boolean {
return this.#disk !== null
}
get totalLines(): number {
return this.#totalLines
}
get totalBytes(): number {
return this.#totalBytes
}
/**
* True after getStdout() when the output file was fully read.
* The file content is redundant (fully in ExecResult.stdout) and can be deleted.
*/
get outputFileRedundant(): boolean {
return this.#outputFileRedundant
}
/** Total file size in bytes, set after getStdout() reads the file. */
get outputFileSize(): number {
return this.#outputFileSize
}
/** Force all buffered content to disk. Call when backgrounding. */
spillToDisk(): void {
if (!this.#disk) {
this.#spillToDisk(null, null)
}
}
async flush(): Promise<void> {
await this.#disk?.flush()
}
/** Delete the output file (fire-and-forget safe). */
async deleteOutputFile(): Promise<void> {
try {
await unlink(this.path)
} catch {
// File may already be deleted or not exist
}
}
clear(): void {
this.#stdoutBuffer = ''
this.#stderrBuffer = ''
this.#recentLines.clear()
this.#onProgress = null
this.#disk?.cancel()
TaskOutput.stopPolling(this.taskId)
TaskOutput.#registry.delete(this.taskId)
}
}
+451
View File
@@ -0,0 +1,451 @@
import { constants as fsConstants } from 'fs'
import {
type FileHandle,
mkdir,
open,
stat,
symlink,
unlink,
} from 'fs/promises'
import { join } from 'path'
import { getSessionId } from '../../bootstrap/state.js'
import { getErrnoCode } from '../errors.js'
import { readFileRange, tailFile } from '../fsOperations.js'
import { logError } from '../log.js'
import { getProjectTempDir } from '../permissions/filesystem.js'
// SECURITY: O_NOFOLLOW prevents following symlinks when opening task output files.
// Without this, an attacker in the sandbox could create symlinks in the tasks directory
// pointing to arbitrary files, causing Claude Code on the host to write to those files.
// O_NOFOLLOW is not available on Windows, but the sandbox attack vector is Unix-only.
const O_NOFOLLOW = fsConstants.O_NOFOLLOW ?? 0
const DEFAULT_MAX_READ_BYTES = 8 * 1024 * 1024 // 8MB
/**
* Disk cap for task output files. In file mode (bash), a watchdog polls
* file size and kills the process. In pipe mode (hooks), DiskTaskOutput
* drops chunks past this limit. Shared so both caps stay in sync.
*/
export const MAX_TASK_OUTPUT_BYTES = 5 * 1024 * 1024 * 1024
export const MAX_TASK_OUTPUT_BYTES_DISPLAY = '5GB'
/**
* Get the task output directory for this session.
* Uses project temp directory so reads are auto-allowed by checkReadableInternalPath.
*
* The session ID is included so concurrent sessions in the same project don't
* clobber each other's output files. Startup cleanup in one session previously
* unlinked in-flight output files from other sessions — the writing process's fd
* keeps the inode alive but reads via path fail ENOENT, and getStdout() returned
* empty string (inc-4586 / boris-20260309-060423).
*
* The session ID is captured at FIRST CALL, not re-read on every invocation.
* /clear calls regenerateSessionId(), which would otherwise cause
* ensureOutputDir() to create a new-session path while existing TaskOutput
* instances still hold old-session paths — open() would ENOENT. Background
* bash tasks surviving /clear need their output files to stay reachable.
*/
let _taskOutputDir: string | undefined
export function getTaskOutputDir(): string {
if (_taskOutputDir === undefined) {
_taskOutputDir = join(getProjectTempDir(), getSessionId(), 'tasks')
}
return _taskOutputDir
}
/** Test helper — clears the memoized dir. */
export function _resetTaskOutputDirForTest(): void {
_taskOutputDir = undefined
}
/**
* Ensure the task output directory exists
*/
async function ensureOutputDir(): Promise<void> {
await mkdir(getTaskOutputDir(), { recursive: true })
}
/**
* Get the output file path for a task
*/
export function getTaskOutputPath(taskId: string): string {
return join(getTaskOutputDir(), `${taskId}.output`)
}
// Tracks fire-and-forget promises (initTaskOutput, initTaskOutputAsSymlink,
// evictTaskOutput, #drain) so tests can drain before teardown. Prevents the
// async-ENOENT-after-teardown flake class (#24957, #25065): a voided async
// resumes after preload's afterEach nuked the temp dir → ENOENT → unhandled
// rejection → flaky test failure. allSettled so a rejection doesn't short-
// circuit the drain and leave other ops racing the rmSync.
const _pendingOps = new Set<Promise<unknown>>()
function track<T>(p: Promise<T>): Promise<T> {
_pendingOps.add(p)
void p.finally(() => _pendingOps.delete(p)).catch(() => {})
return p
}
/**
* Encapsulates async disk writes for a single task's output.
*
* Uses a flat array as a write queue processed by a single drain loop,
* so each chunk can be GC'd immediately after its write completes.
* This avoids the memory retention problem of chained .then() closures
* where every reaction captures its data until the whole chain resolves.
*/
export class DiskTaskOutput {
#path: string
#fileHandle: FileHandle | null = null
#queue: string[] = []
#bytesWritten = 0
#capped = false
#flushPromise: Promise<void> | null = null
#flushResolve: (() => void) | null = null
constructor(taskId: string) {
this.#path = getTaskOutputPath(taskId)
}
append(content: string): void {
if (this.#capped) {
return
}
// content.length (UTF-16 code units) undercounts UTF-8 bytes by at most ~3×.
// Acceptable for a coarse disk-fill guard — avoids re-scanning every chunk.
this.#bytesWritten += content.length
if (this.#bytesWritten > MAX_TASK_OUTPUT_BYTES) {
this.#capped = true
this.#queue.push(
`\n[output truncated: exceeded ${MAX_TASK_OUTPUT_BYTES_DISPLAY} disk cap]\n`,
)
} else {
this.#queue.push(content)
}
if (!this.#flushPromise) {
this.#flushPromise = new Promise<void>(resolve => {
this.#flushResolve = resolve
})
void track(this.#drain())
}
}
flush(): Promise<void> {
return this.#flushPromise ?? Promise.resolve()
}
cancel(): void {
this.#queue.length = 0
}
async #drainAllChunks(): Promise<void> {
while (true) {
try {
if (!this.#fileHandle) {
await ensureOutputDir()
this.#fileHandle = await open(
this.#path,
process.platform === 'win32'
? 'a'
: fsConstants.O_WRONLY |
fsConstants.O_APPEND |
fsConstants.O_CREAT |
O_NOFOLLOW,
)
}
while (true) {
await this.#writeAllChunks()
if (this.#queue.length === 0) {
break
}
}
} finally {
if (this.#fileHandle) {
const fileHandle = this.#fileHandle
this.#fileHandle = null
await fileHandle.close()
}
}
// you could have another .append() while we're waiting for the file to close, so we check the queue again before fully exiting
if (this.#queue.length) {
continue
}
break
}
}
#writeAllChunks(): Promise<void> {
// This code is extremely precise.
// You **must not** add an await here!! That will cause memory to balloon as the queue grows.
// It's okay to add an `await` to the caller of this method (e.g. #drainAllChunks) because that won't cause Buffer[] to be kept alive in memory.
return this.#fileHandle!.appendFile(
// This variable needs to get GC'd ASAP.
this.#queueToBuffers(),
)
}
/** Keep this in a separate method so that GC doesn't keep it alive for any longer than it should. */
#queueToBuffers(): Buffer {
// Use .splice to in-place mutate the array, informing the GC it can free it.
const queue = this.#queue.splice(0, this.#queue.length)
let totalLength = 0
for (const str of queue) {
totalLength += Buffer.byteLength(str, 'utf8')
}
const buffer = Buffer.allocUnsafe(totalLength)
let offset = 0
for (const str of queue) {
offset += buffer.write(str, offset, 'utf8')
}
return buffer
}
async #drain(): Promise<void> {
try {
await this.#drainAllChunks()
} catch (e) {
// Transient fs errors (EMFILE on busy CI, EPERM on Windows pending-
// delete) previously rode up through `void this.#drain()` as an
// unhandled rejection while the flush promise resolved anyway — callers
// saw an empty file with no error. Retry once for the transient case
// (queue is intact if open() failed), then log and give up.
logError(e)
if (this.#queue.length > 0) {
try {
await this.#drainAllChunks()
} catch (e2) {
logError(e2)
}
}
} finally {
const resolve = this.#flushResolve!
this.#flushPromise = null
this.#flushResolve = null
resolve()
}
}
}
const outputs = new Map<string, DiskTaskOutput>()
/**
* Test helper — cancel pending writes, await in-flight ops, clear the map.
* backgroundShells.test.ts and other task tests spawn real shells that
* write through this module without afterEach cleanup; their entries
* leak into diskOutput.test.ts on the same shard.
*
* Awaits all tracked promises until the set stabilizes — a settling promise
* may spawn another (initTaskOutputAsSymlink's catch → initTaskOutput).
* Call this in afterEach BEFORE rmSync to avoid async-ENOENT-after-teardown.
*/
export async function _clearOutputsForTest(): Promise<void> {
for (const output of outputs.values()) {
output.cancel()
}
while (_pendingOps.size > 0) {
await Promise.allSettled([..._pendingOps])
}
outputs.clear()
}
function getOrCreateOutput(taskId: string): DiskTaskOutput {
let output = outputs.get(taskId)
if (!output) {
output = new DiskTaskOutput(taskId)
outputs.set(taskId, output)
}
return output
}
/**
* Append output to a task's disk file asynchronously.
* Creates the file if it doesn't exist.
*/
export function appendTaskOutput(taskId: string, content: string): void {
getOrCreateOutput(taskId).append(content)
}
/**
* Wait for all pending writes for a task to complete.
* Useful before reading output to ensure all data is flushed.
*/
export async function flushTaskOutput(taskId: string): Promise<void> {
const output = outputs.get(taskId)
if (output) {
await output.flush()
}
}
/**
* Evict a task's DiskTaskOutput from the in-memory map after flushing.
* Unlike cleanupTaskOutput, this does not delete the output file on disk.
* Call this when a task completes and its output has been consumed.
*/
export function evictTaskOutput(taskId: string): Promise<void> {
return track(
(async () => {
const output = outputs.get(taskId)
if (output) {
await output.flush()
outputs.delete(taskId)
}
})(),
)
}
/**
* Get delta (new content) since last read.
* Reads only from the byte offset, up to maxBytes — never loads the full file.
*/
export async function getTaskOutputDelta(
taskId: string,
fromOffset: number,
maxBytes: number = DEFAULT_MAX_READ_BYTES,
): Promise<{ content: string; newOffset: number }> {
try {
const result = await readFileRange(
getTaskOutputPath(taskId),
fromOffset,
maxBytes,
)
if (!result) {
return { content: '', newOffset: fromOffset }
}
return {
content: result.content,
newOffset: fromOffset + result.bytesRead,
}
} catch (e) {
const code = getErrnoCode(e)
if (code === 'ENOENT') {
return { content: '', newOffset: fromOffset }
}
logError(e)
return { content: '', newOffset: fromOffset }
}
}
/**
* Get output for a task, reading the tail of the file.
* Caps at maxBytes to avoid loading multi-GB files into memory.
*/
export async function getTaskOutput(
taskId: string,
maxBytes: number = DEFAULT_MAX_READ_BYTES,
): Promise<string> {
try {
const { content, bytesTotal, bytesRead } = await tailFile(
getTaskOutputPath(taskId),
maxBytes,
)
if (bytesTotal > bytesRead) {
return `[${Math.round((bytesTotal - bytesRead) / 1024)}KB of earlier output omitted]\n${content}`
}
return content
} catch (e) {
const code = getErrnoCode(e)
if (code === 'ENOENT') {
return ''
}
logError(e)
return ''
}
}
/**
* Get the current size (offset) of a task's output file.
*/
export async function getTaskOutputSize(taskId: string): Promise<number> {
try {
return (await stat(getTaskOutputPath(taskId))).size
} catch (e) {
const code = getErrnoCode(e)
if (code === 'ENOENT') {
return 0
}
logError(e)
return 0
}
}
/**
* Clean up a task's output file and write queue.
*/
export async function cleanupTaskOutput(taskId: string): Promise<void> {
const output = outputs.get(taskId)
if (output) {
output.cancel()
outputs.delete(taskId)
}
try {
await unlink(getTaskOutputPath(taskId))
} catch (e) {
const code = getErrnoCode(e)
if (code === 'ENOENT') {
return
}
logError(e)
}
}
/**
* Initialize output file for a new task.
* Creates an empty file to ensure the path exists.
*/
export function initTaskOutput(taskId: string): Promise<string> {
return track(
(async () => {
await ensureOutputDir()
const outputPath = getTaskOutputPath(taskId)
// SECURITY: O_NOFOLLOW prevents symlink-following attacks from the sandbox.
// O_EXCL ensures we create a new file and fail if something already exists at this path.
// On Windows, use string flags — numeric O_EXCL can produce EINVAL through libuv.
const fh = await open(
outputPath,
process.platform === 'win32'
? 'wx'
: fsConstants.O_WRONLY |
fsConstants.O_CREAT |
fsConstants.O_EXCL |
O_NOFOLLOW,
)
await fh.close()
return outputPath
})(),
)
}
/**
* Initialize output file as a symlink to another file (e.g., agent transcript).
* Tries to create the symlink first; if a file already exists, removes it and retries.
*/
export function initTaskOutputAsSymlink(
taskId: string,
targetPath: string,
): Promise<string> {
return track(
(async () => {
try {
await ensureOutputDir()
const outputPath = getTaskOutputPath(taskId)
try {
await symlink(targetPath, outputPath)
} catch {
await unlink(outputPath)
await symlink(targetPath, outputPath)
}
return outputPath
} catch (error) {
logError(error)
return initTaskOutput(taskId)
}
})(),
)
}
+308
View File
@@ -0,0 +1,308 @@
import {
OUTPUT_FILE_TAG,
STATUS_TAG,
SUMMARY_TAG,
TASK_ID_TAG,
TASK_NOTIFICATION_TAG,
TASK_TYPE_TAG,
TOOL_USE_ID_TAG,
} from '../../constants/xml.js'
import type { AppState } from '../../state/AppState.js'
import {
isTerminalTaskStatus,
type TaskStatus,
type TaskType,
} from '../../Task.js'
import type { TaskState } from '../../tasks/types.js'
import { enqueuePendingNotification } from '../messageQueueManager.js'
import { enqueueSdkEvent } from '../sdkEventQueue.js'
import { getTaskOutputDelta, getTaskOutputPath } from './diskOutput.js'
// Standard polling interval for all tasks
export const POLL_INTERVAL_MS = 1000
// Duration to display killed tasks before eviction
export const STOPPED_DISPLAY_MS = 3_000
// Grace period for terminal local_agent tasks in the coordinator panel
export const PANEL_GRACE_MS = 30_000
// Attachment type for task status updates
export type TaskAttachment = {
type: 'task_status'
taskId: string
toolUseId?: string
taskType: TaskType
status: TaskStatus
description: string
deltaSummary: string | null // New output since last attachment
}
type SetAppState = (updater: (prev: AppState) => AppState) => void
/**
* Update a task's state in AppState.
* Helper function for task implementations.
* Generic to allow type-safe updates for specific task types.
*/
export function updateTaskState<T extends TaskState>(
taskId: string,
setAppState: SetAppState,
updater: (task: T) => T,
): void {
setAppState(prev => {
const task = prev.tasks?.[taskId] as T | undefined
if (!task) {
return prev
}
const updated = updater(task)
if (updated === task) {
// Updater returned the same reference (early-return no-op). Skip the
// spread so s.tasks subscribers don't re-render on unchanged state.
return prev
}
return {
...prev,
tasks: {
...prev.tasks,
[taskId]: updated,
},
}
})
}
/**
* Register a new task in AppState.
*/
export function registerTask(task: TaskState, setAppState: SetAppState): void {
let isReplacement = false
setAppState(prev => {
const existing = prev.tasks[task.id]
isReplacement = existing !== undefined
// Carry forward UI-held state on re-register (resumeAgentBackground
// replaces the task; user's retain shouldn't reset). startTime keeps
// the panel sort stable; messages + diskLoaded preserve the viewed
// transcript across the replace (the user's just-appended prompt lives
// in messages and isn't on disk yet).
const merged =
existing && 'retain' in existing
? {
...task,
retain: existing.retain,
startTime: existing.startTime,
messages: existing.messages,
diskLoaded: existing.diskLoaded,
pendingMessages: existing.pendingMessages,
}
: task
return { ...prev, tasks: { ...prev.tasks, [task.id]: merged } }
})
// Replacement (resume) — not a new start. Skip to avoid double-emit.
if (isReplacement) return
enqueueSdkEvent({
type: 'system',
subtype: 'task_started',
task_id: task.id,
tool_use_id: task.toolUseId,
description: task.description,
task_type: task.type,
workflow_name:
'workflowName' in task
? (task.workflowName as string | undefined)
: undefined,
prompt: 'prompt' in task ? (task.prompt as string) : undefined,
})
}
/**
* Eagerly evict a terminal task from AppState.
* The task must be in a terminal state (completed/failed/killed) with notified=true.
* This allows memory to be freed without waiting for the next query loop iteration.
* The lazy GC in generateTaskAttachments() remains as a safety net.
*/
export function evictTerminalTask(
taskId: string,
setAppState: SetAppState,
): void {
setAppState(prev => {
const task = prev.tasks?.[taskId]
if (!task) return prev
if (!isTerminalTaskStatus(task.status)) return prev
if (!task.notified) return prev
// Panel grace period — blocks eviction until deadline passes.
// 'retain' in task narrows to LocalAgentTaskState (the only type with
// that field); evictAfter is optional so 'evictAfter' in task would
// miss tasks that haven't had it set yet.
if ('retain' in task && (task.evictAfter ?? Infinity) > Date.now()) {
return prev
}
const { [taskId]: _, ...remainingTasks } = prev.tasks
return { ...prev, tasks: remainingTasks }
})
}
/**
* Get all running tasks.
*/
export function getRunningTasks(state: AppState): TaskState[] {
const tasks = state.tasks ?? {}
return Object.values(tasks).filter(task => task.status === 'running')
}
/**
* Generate attachments for tasks with new output or status changes.
* Called by the framework to create push notifications.
*/
export async function generateTaskAttachments(state: AppState): Promise<{
attachments: TaskAttachment[]
// Only the offset patch — NOT the full task. The task may transition to
// completed during getTaskOutputDelta's async disk read, and spreading the
// full stale snapshot would clobber that transition (zombifying the task).
updatedTaskOffsets: Record<string, number>
evictedTaskIds: string[]
}> {
const attachments: TaskAttachment[] = []
const updatedTaskOffsets: Record<string, number> = {}
const evictedTaskIds: string[] = []
const tasks = state.tasks ?? {}
for (const taskState of Object.values(tasks)) {
if (taskState.notified) {
switch (taskState.status) {
case 'completed':
case 'failed':
case 'killed':
// Evict terminal tasks — they've been consumed and can be GC'd
evictedTaskIds.push(taskState.id)
continue
case 'pending':
// Keep in map — hasn't run yet, but parent already knows about it
continue
case 'running':
// Fall through to running logic below
break
}
}
if (taskState.status === 'running') {
const delta = await getTaskOutputDelta(
taskState.id,
taskState.outputOffset,
)
if (delta.content) {
updatedTaskOffsets[taskState.id] = delta.newOffset
}
}
// Completed tasks are NOT notified here — each task type handles its own
// completion notification via enqueuePendingNotification(). Generating
// attachments here would race with those per-type callbacks, causing
// dual delivery (one inline attachment + one separate API turn).
}
return { attachments, updatedTaskOffsets, evictedTaskIds }
}
/**
* Apply the outputOffset patches and evictions from generateTaskAttachments.
* Merges patches against FRESH prev.tasks (not the stale pre-await snapshot),
* so concurrent status transitions aren't clobbered.
*/
export function applyTaskOffsetsAndEvictions(
setAppState: SetAppState,
updatedTaskOffsets: Record<string, number>,
evictedTaskIds: string[],
): void {
const offsetIds = Object.keys(updatedTaskOffsets)
if (offsetIds.length === 0 && evictedTaskIds.length === 0) {
return
}
setAppState(prev => {
let changed = false
const newTasks = { ...prev.tasks }
for (const id of offsetIds) {
const fresh = newTasks[id]
// Re-check status on fresh state — task may have completed during the
// await. If it's no longer running, the offset update is moot.
if (fresh?.status === 'running') {
newTasks[id] = { ...fresh, outputOffset: updatedTaskOffsets[id]! }
changed = true
}
}
for (const id of evictedTaskIds) {
const fresh = newTasks[id]
// Re-check terminal+notified on fresh state (TOCTOU: resume may have
// replaced the task during the generateTaskAttachments await)
if (!fresh || !isTerminalTaskStatus(fresh.status) || !fresh.notified) {
continue
}
if ('retain' in fresh && (fresh.evictAfter ?? Infinity) > Date.now()) {
continue
}
delete newTasks[id]
changed = true
}
return changed ? { ...prev, tasks: newTasks } : prev
})
}
/**
* Poll all running tasks and check for updates.
* This is the main polling loop called by the framework.
*/
export async function pollTasks(
getAppState: () => AppState,
setAppState: SetAppState,
): Promise<void> {
const state = getAppState()
const { attachments, updatedTaskOffsets, evictedTaskIds } =
await generateTaskAttachments(state)
applyTaskOffsetsAndEvictions(setAppState, updatedTaskOffsets, evictedTaskIds)
// Send notifications for completed tasks
for (const attachment of attachments) {
enqueueTaskNotification(attachment)
}
}
/**
* Enqueue a task notification to the message queue.
*/
function enqueueTaskNotification(attachment: TaskAttachment): void {
const statusText = getStatusText(attachment.status)
const outputPath = getTaskOutputPath(attachment.taskId)
const toolUseIdLine = attachment.toolUseId
? `\n<${TOOL_USE_ID_TAG}>${attachment.toolUseId}</${TOOL_USE_ID_TAG}>`
: ''
const message = `<${TASK_NOTIFICATION_TAG}>
<${TASK_ID_TAG}>${attachment.taskId}</${TASK_ID_TAG}>${toolUseIdLine}
<${TASK_TYPE_TAG}>${attachment.taskType}</${TASK_TYPE_TAG}>
<${OUTPUT_FILE_TAG}>${outputPath}</${OUTPUT_FILE_TAG}>
<${STATUS_TAG}>${attachment.status}</${STATUS_TAG}>
<${SUMMARY_TAG}>Task "${attachment.description}" ${statusText}</${SUMMARY_TAG}>
</${TASK_NOTIFICATION_TAG}>`
enqueuePendingNotification({ value: message, mode: 'task-notification' })
}
/**
* Get human-readable status text.
*/
function getStatusText(status: TaskStatus): string {
switch (status) {
case 'completed':
return 'completed successfully'
case 'failed':
return 'failed'
case 'killed':
return 'was stopped'
case 'running':
return 'is running'
case 'pending':
return 'is pending'
}
}
+38
View File
@@ -0,0 +1,38 @@
import { validateBoundedIntEnvVar } from '../envValidation.js'
import { getTaskOutputPath } from './diskOutput.js'
export const TASK_MAX_OUTPUT_UPPER_LIMIT = 160_000
export const TASK_MAX_OUTPUT_DEFAULT = 32_000
export function getMaxTaskOutputLength(): number {
const result = validateBoundedIntEnvVar(
'TASK_MAX_OUTPUT_LENGTH',
process.env.TASK_MAX_OUTPUT_LENGTH,
TASK_MAX_OUTPUT_DEFAULT,
TASK_MAX_OUTPUT_UPPER_LIMIT,
)
return result.effective
}
/**
* Format task output for API consumption, truncating if too large.
* When truncated, includes a header with the file path and returns
* the last N characters that fit within the limit.
*/
export function formatTaskOutput(
output: string,
taskId: string,
): { content: string; wasTruncated: boolean } {
const maxLen = getMaxTaskOutputLength()
if (output.length <= maxLen) {
return { content: output, wasTruncated: false }
}
const filePath = getTaskOutputPath(taskId)
const header = `[Truncated. Full output: ${filePath}]\n\n`
const availableSpace = maxLen - header.length
const truncated = output.slice(-availableSpace)
return { content: header + truncated, wasTruncated: true }
}
+36
View File
@@ -0,0 +1,36 @@
import type { SdkWorkflowProgress } from '../../types/tools.js'
import { enqueueSdkEvent } from '../sdkEventQueue.js'
/**
* Emit a `task_progress` SDK event. Shared by background agents (per tool_use
* in runAsyncAgentLifecycle) and workflows (per flushProgress batch). Accepts
* already-computed primitives so callers can derive them from their own state
* shapes (ProgressTracker for agents, LocalWorkflowTaskState for workflows).
*/
export function emitTaskProgress(params: {
taskId: string
toolUseId: string | undefined
description: string
startTime: number
totalTokens: number
toolUses: number
lastToolName?: string
summary?: string
workflowProgress?: SdkWorkflowProgress[]
}): void {
enqueueSdkEvent({
type: 'system',
subtype: 'task_progress',
task_id: params.taskId,
tool_use_id: params.toolUseId,
description: params.description,
usage: {
total_tokens: params.totalTokens,
tool_uses: params.toolUses,
duration_ms: Date.now() - params.startTime,
},
last_tool_name: params.lastToolName,
summary: params.summary,
workflow_progress: params.workflowProgress,
})
}