init claude-code
This commit is contained in:
@@ -0,0 +1,811 @@
|
||||
import { statSync } from 'fs'
|
||||
import ignore from 'ignore'
|
||||
import * as path from 'path'
|
||||
import {
|
||||
CLAUDE_CONFIG_DIRECTORIES,
|
||||
loadMarkdownFilesForSubdir,
|
||||
} from 'src/utils/markdownConfigLoader.js'
|
||||
import type { SuggestionItem } from '../components/PromptInput/PromptInputFooterSuggestions.js'
|
||||
import {
|
||||
CHUNK_MS,
|
||||
FileIndex,
|
||||
yieldToEventLoop,
|
||||
} from '../native-ts/file-index/index.js'
|
||||
import { logEvent } from '../services/analytics/index.js'
|
||||
import type { FileSuggestionCommandInput } from '../types/fileSuggestion.js'
|
||||
import { getGlobalConfig } from '../utils/config.js'
|
||||
import { getCwd } from '../utils/cwd.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import { execFileNoThrowWithCwd } from '../utils/execFileNoThrow.js'
|
||||
import { getFsImplementation } from '../utils/fsOperations.js'
|
||||
import { findGitRoot, gitExe } from '../utils/git.js'
|
||||
import {
|
||||
createBaseHookInput,
|
||||
executeFileSuggestionCommand,
|
||||
} from '../utils/hooks.js'
|
||||
import { logError } from '../utils/log.js'
|
||||
import { expandPath } from '../utils/path.js'
|
||||
import { ripGrep } from '../utils/ripgrep.js'
|
||||
import { getInitialSettings } from '../utils/settings/settings.js'
|
||||
import { createSignal } from '../utils/signal.js'
|
||||
|
||||
// Lazily constructed singleton
|
||||
let fileIndex: FileIndex | null = null
|
||||
|
||||
function getFileIndex(): FileIndex {
|
||||
if (!fileIndex) {
|
||||
fileIndex = new FileIndex()
|
||||
}
|
||||
return fileIndex
|
||||
}
|
||||
|
||||
let fileListRefreshPromise: Promise<FileIndex> | null = null
|
||||
// Signal fired when an in-progress index build completes. Lets the
|
||||
// typeahead UI re-run its last search so partial results upgrade to full.
|
||||
const indexBuildComplete = createSignal()
|
||||
export const onIndexBuildComplete = indexBuildComplete.subscribe
|
||||
let cacheGeneration = 0
|
||||
|
||||
// Background fetch for untracked files
|
||||
let untrackedFetchPromise: Promise<void> | null = null
|
||||
|
||||
// Store tracked files so we can rebuild index with untracked
|
||||
let cachedTrackedFiles: string[] = []
|
||||
// Store config files so mergeUntrackedIntoNormalizedCache preserves them
|
||||
let cachedConfigFiles: string[] = []
|
||||
// Store tracked directories so mergeUntrackedIntoNormalizedCache doesn't
|
||||
// recompute ~270k path.dirname() calls on each merge
|
||||
let cachedTrackedDirs: string[] = []
|
||||
|
||||
// Cache for .ignore/.rgignore patterns (keyed by repoRoot:cwd)
|
||||
let ignorePatternsCache: ReturnType<typeof ignore> | null = null
|
||||
let ignorePatternsCacheKey: string | null = null
|
||||
|
||||
// Throttle state for background refresh. .git/index mtime triggers an
|
||||
// immediate refresh when tracked files change (add/checkout/commit/rm).
|
||||
// The time floor still refreshes every 5s to pick up untracked files,
|
||||
// which don't bump the index.
|
||||
let lastRefreshMs = 0
|
||||
let lastGitIndexMtime: number | null = null
|
||||
|
||||
// Signatures of the path lists loaded into the Rust index. Two separate
|
||||
// signatures because the two loadFromFileList call sites use differently
|
||||
// structured arrays — a shared signature would ping-pong and never match.
|
||||
// Skips nucleo.restart() when git ls-files returns an unchanged list
|
||||
// (e.g. `git add` of an already-tracked file bumps index mtime but not the list).
|
||||
let loadedTrackedSignature: string | null = null
|
||||
let loadedMergedSignature: string | null = null
|
||||
|
||||
/**
|
||||
* Clear all file suggestion caches.
|
||||
* Call this when resuming a session to ensure fresh file discovery.
|
||||
*/
|
||||
export function clearFileSuggestionCaches(): void {
|
||||
fileIndex = null
|
||||
fileListRefreshPromise = null
|
||||
cacheGeneration++
|
||||
untrackedFetchPromise = null
|
||||
cachedTrackedFiles = []
|
||||
cachedConfigFiles = []
|
||||
cachedTrackedDirs = []
|
||||
indexBuildComplete.clear()
|
||||
ignorePatternsCache = null
|
||||
ignorePatternsCacheKey = null
|
||||
lastRefreshMs = 0
|
||||
lastGitIndexMtime = null
|
||||
loadedTrackedSignature = null
|
||||
loadedMergedSignature = null
|
||||
}
|
||||
|
||||
/**
|
||||
* Content hash of a path list. A length|first|last sample misses renames of
|
||||
* middle files (same length, same endpoints → stale entry stuck in nucleo).
|
||||
*
|
||||
* Samples every Nth path (plus length). On a 346k-path list this hashes ~700
|
||||
* paths instead of 14MB — enough to catch git operations (checkout, rebase,
|
||||
* add/rm) while running in <1ms. A single mid-list rename that happens to
|
||||
* fall between samples will miss the rebuild, but the 5s refresh floor picks
|
||||
* it up on the next cycle.
|
||||
*/
|
||||
export function pathListSignature(paths: string[]): string {
|
||||
const n = paths.length
|
||||
const stride = Math.max(1, Math.floor(n / 500))
|
||||
let h = 0x811c9dc5 | 0
|
||||
for (let i = 0; i < n; i += stride) {
|
||||
const p = paths[i]!
|
||||
for (let j = 0; j < p.length; j++) {
|
||||
h = ((h ^ p.charCodeAt(j)) * 0x01000193) | 0
|
||||
}
|
||||
h = (h * 0x01000193) | 0
|
||||
}
|
||||
// Stride starts at 0 (first path always hashed); explicitly include last
|
||||
// so single-file add/rm at the tail is caught
|
||||
if (n > 0) {
|
||||
const last = paths[n - 1]!
|
||||
for (let j = 0; j < last.length; j++) {
|
||||
h = ((h ^ last.charCodeAt(j)) * 0x01000193) | 0
|
||||
}
|
||||
}
|
||||
return `${n}:${(h >>> 0).toString(16)}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Stat .git/index to detect git state changes without spawning git ls-files.
|
||||
* Returns null for worktrees (.git is a file → ENOTDIR), fresh repos with no
|
||||
* index yet (ENOENT), and non-git dirs — caller falls back to time throttle.
|
||||
*/
|
||||
function getGitIndexMtime(): number | null {
|
||||
const repoRoot = findGitRoot(getCwd())
|
||||
if (!repoRoot) return null
|
||||
try {
|
||||
// eslint-disable-next-line custom-rules/no-sync-fs -- mtimeMs is the operation here, not a pre-check. findGitRoot above already stat-walks synchronously; one more stat is marginal vs spawning git ls-files on every keystroke. Async would force startBackgroundCacheRefresh to become async, breaking the synchronous fileListRefreshPromise contract at the cold-start await site.
|
||||
return statSync(path.join(repoRoot, '.git', 'index')).mtimeMs
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize git paths relative to originalCwd
|
||||
*/
|
||||
function normalizeGitPaths(
|
||||
files: string[],
|
||||
repoRoot: string,
|
||||
originalCwd: string,
|
||||
): string[] {
|
||||
if (originalCwd === repoRoot) {
|
||||
return files
|
||||
}
|
||||
return files.map(f => {
|
||||
const absolutePath = path.join(repoRoot, f)
|
||||
return path.relative(originalCwd, absolutePath)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge already-normalized untracked files into the cache
|
||||
*/
|
||||
async function mergeUntrackedIntoNormalizedCache(
|
||||
normalizedUntracked: string[],
|
||||
): Promise<void> {
|
||||
if (normalizedUntracked.length === 0) return
|
||||
if (!fileIndex || cachedTrackedFiles.length === 0) return
|
||||
|
||||
const untrackedDirs = await getDirectoryNamesAsync(normalizedUntracked)
|
||||
const allPaths = [
|
||||
...cachedTrackedFiles,
|
||||
...cachedConfigFiles,
|
||||
...cachedTrackedDirs,
|
||||
...normalizedUntracked,
|
||||
...untrackedDirs,
|
||||
]
|
||||
const sig = pathListSignature(allPaths)
|
||||
if (sig === loadedMergedSignature) {
|
||||
logForDebugging(
|
||||
`[FileIndex] skipped index rebuild — merged paths unchanged`,
|
||||
)
|
||||
return
|
||||
}
|
||||
await fileIndex.loadFromFileListAsync(allPaths).done
|
||||
loadedMergedSignature = sig
|
||||
logForDebugging(
|
||||
`[FileIndex] rebuilt index with ${cachedTrackedFiles.length} tracked + ${normalizedUntracked.length} untracked files`,
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Load ripgrep-specific ignore patterns from .ignore or .rgignore files
|
||||
* Returns an ignore instance if patterns were found, null otherwise
|
||||
* Results are cached per repoRoot:cwd combination
|
||||
*/
|
||||
async function loadRipgrepIgnorePatterns(
|
||||
repoRoot: string,
|
||||
cwd: string,
|
||||
): Promise<ReturnType<typeof ignore> | null> {
|
||||
const cacheKey = `${repoRoot}:${cwd}`
|
||||
|
||||
// Return cached result if available
|
||||
if (ignorePatternsCacheKey === cacheKey) {
|
||||
return ignorePatternsCache
|
||||
}
|
||||
|
||||
const fs = getFsImplementation()
|
||||
const ignoreFiles = ['.ignore', '.rgignore']
|
||||
const directories = [...new Set([repoRoot, cwd])]
|
||||
|
||||
const ig = ignore()
|
||||
let hasPatterns = false
|
||||
|
||||
const paths = directories.flatMap(dir =>
|
||||
ignoreFiles.map(f => path.join(dir, f)),
|
||||
)
|
||||
const contents = await Promise.all(
|
||||
paths.map(p => fs.readFile(p, { encoding: 'utf8' }).catch(() => null)),
|
||||
)
|
||||
for (const [i, content] of contents.entries()) {
|
||||
if (content === null) continue
|
||||
ig.add(content)
|
||||
hasPatterns = true
|
||||
logForDebugging(`[FileIndex] loaded ignore patterns from ${paths[i]}`)
|
||||
}
|
||||
|
||||
const result = hasPatterns ? ig : null
|
||||
ignorePatternsCache = result
|
||||
ignorePatternsCacheKey = cacheKey
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
/**
|
||||
* Get files using git ls-files (much faster than ripgrep for git repos)
|
||||
* Returns tracked files immediately, fetches untracked in background
|
||||
* @param respectGitignore If true, excludes gitignored files from untracked results
|
||||
*
|
||||
* Note: Unlike ripgrep --follow, git ls-files doesn't follow symlinks.
|
||||
* This is intentional as git tracks symlinks as symlinks.
|
||||
*/
|
||||
async function getFilesUsingGit(
|
||||
abortSignal: AbortSignal,
|
||||
respectGitignore: boolean,
|
||||
): Promise<string[] | null> {
|
||||
const startTime = Date.now()
|
||||
logForDebugging(`[FileIndex] getFilesUsingGit called`)
|
||||
|
||||
// Check if we're in a git repo. findGitRoot is LRU-memoized per path.
|
||||
const repoRoot = findGitRoot(getCwd())
|
||||
if (!repoRoot) {
|
||||
logForDebugging(`[FileIndex] not a git repo, returning null`)
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
const cwd = getCwd()
|
||||
|
||||
// Get tracked files (fast - reads from git index)
|
||||
// Run from repoRoot so paths are relative to repo root, not CWD
|
||||
const lsFilesStart = Date.now()
|
||||
const trackedResult = await execFileNoThrowWithCwd(
|
||||
gitExe(),
|
||||
['-c', 'core.quotepath=false', 'ls-files', '--recurse-submodules'],
|
||||
{ timeout: 5000, abortSignal, cwd: repoRoot },
|
||||
)
|
||||
logForDebugging(
|
||||
`[FileIndex] git ls-files (tracked) took ${Date.now() - lsFilesStart}ms`,
|
||||
)
|
||||
|
||||
if (trackedResult.code !== 0) {
|
||||
logForDebugging(
|
||||
`[FileIndex] git ls-files failed (code=${trackedResult.code}, stderr=${trackedResult.stderr}), falling back to ripgrep`,
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
const trackedFiles = trackedResult.stdout.trim().split('\n').filter(Boolean)
|
||||
|
||||
// Normalize paths relative to the current working directory
|
||||
let normalizedTracked = normalizeGitPaths(trackedFiles, repoRoot, cwd)
|
||||
|
||||
// Apply .ignore/.rgignore patterns if present (faster than falling back to ripgrep)
|
||||
const ignorePatterns = await loadRipgrepIgnorePatterns(repoRoot, cwd)
|
||||
if (ignorePatterns) {
|
||||
const beforeCount = normalizedTracked.length
|
||||
normalizedTracked = ignorePatterns.filter(normalizedTracked)
|
||||
logForDebugging(
|
||||
`[FileIndex] applied ignore patterns: ${beforeCount} -> ${normalizedTracked.length} files`,
|
||||
)
|
||||
}
|
||||
|
||||
// Cache tracked files for later merge with untracked
|
||||
cachedTrackedFiles = normalizedTracked
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
logForDebugging(
|
||||
`[FileIndex] git ls-files: ${normalizedTracked.length} tracked files in ${duration}ms`,
|
||||
)
|
||||
|
||||
logEvent('tengu_file_suggestions_git_ls_files', {
|
||||
file_count: normalizedTracked.length,
|
||||
tracked_count: normalizedTracked.length,
|
||||
untracked_count: 0,
|
||||
duration_ms: duration,
|
||||
})
|
||||
|
||||
// Start background fetch for untracked files (don't await)
|
||||
if (!untrackedFetchPromise) {
|
||||
const untrackedArgs = respectGitignore
|
||||
? [
|
||||
'-c',
|
||||
'core.quotepath=false',
|
||||
'ls-files',
|
||||
'--others',
|
||||
'--exclude-standard',
|
||||
]
|
||||
: ['-c', 'core.quotepath=false', 'ls-files', '--others']
|
||||
|
||||
const generation = cacheGeneration
|
||||
untrackedFetchPromise = execFileNoThrowWithCwd(gitExe(), untrackedArgs, {
|
||||
timeout: 10000,
|
||||
cwd: repoRoot,
|
||||
})
|
||||
.then(async untrackedResult => {
|
||||
if (generation !== cacheGeneration) {
|
||||
return // Cache was cleared; don't merge stale untracked files
|
||||
}
|
||||
if (untrackedResult.code === 0) {
|
||||
const rawUntrackedFiles = untrackedResult.stdout
|
||||
.trim()
|
||||
.split('\n')
|
||||
.filter(Boolean)
|
||||
|
||||
// Normalize paths BEFORE applying ignore patterns (consistent with tracked files)
|
||||
let normalizedUntracked = normalizeGitPaths(
|
||||
rawUntrackedFiles,
|
||||
repoRoot,
|
||||
cwd,
|
||||
)
|
||||
|
||||
// Apply .ignore/.rgignore patterns to normalized untracked files
|
||||
const ignorePatterns = await loadRipgrepIgnorePatterns(
|
||||
repoRoot,
|
||||
cwd,
|
||||
)
|
||||
if (ignorePatterns && normalizedUntracked.length > 0) {
|
||||
const beforeCount = normalizedUntracked.length
|
||||
normalizedUntracked = ignorePatterns.filter(normalizedUntracked)
|
||||
logForDebugging(
|
||||
`[FileIndex] applied ignore patterns to untracked: ${beforeCount} -> ${normalizedUntracked.length} files`,
|
||||
)
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[FileIndex] background untracked fetch: ${normalizedUntracked.length} files`,
|
||||
)
|
||||
// Pass already-normalized files directly to merge function
|
||||
void mergeUntrackedIntoNormalizedCache(normalizedUntracked)
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
logForDebugging(
|
||||
`[FileIndex] background untracked fetch failed: ${error}`,
|
||||
)
|
||||
})
|
||||
.finally(() => {
|
||||
untrackedFetchPromise = null
|
||||
})
|
||||
}
|
||||
|
||||
return normalizedTracked
|
||||
} catch (error) {
|
||||
logForDebugging(`[FileIndex] git ls-files error: ${errorMessage(error)}`)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This function collects all parent directories for each file path
|
||||
* and returns a list of unique directory names with a trailing separator.
|
||||
* For example, if the input is ['src/index.js', 'src/utils/helpers.js'],
|
||||
* the output will be ['src/', 'src/utils/'].
|
||||
* @param files An array of file paths
|
||||
* @returns An array of unique directory names with a trailing separator
|
||||
*/
|
||||
export function getDirectoryNames(files: string[]): string[] {
|
||||
const directoryNames = new Set<string>()
|
||||
collectDirectoryNames(files, 0, files.length, directoryNames)
|
||||
return [...directoryNames].map(d => d + path.sep)
|
||||
}
|
||||
|
||||
/**
|
||||
* Async variant: yields every ~10k files so 270k+ file lists don't block
|
||||
* the main thread for >10ms at a time.
|
||||
*/
|
||||
export async function getDirectoryNamesAsync(
|
||||
files: string[],
|
||||
): Promise<string[]> {
|
||||
const directoryNames = new Set<string>()
|
||||
// Time-based chunking: yield after CHUNK_MS of work so slow machines get
|
||||
// smaller chunks and stay responsive.
|
||||
let chunkStart = performance.now()
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
collectDirectoryNames(files, i, i + 1, directoryNames)
|
||||
if ((i & 0xff) === 0xff && performance.now() - chunkStart > CHUNK_MS) {
|
||||
await yieldToEventLoop()
|
||||
chunkStart = performance.now()
|
||||
}
|
||||
}
|
||||
return [...directoryNames].map(d => d + path.sep)
|
||||
}
|
||||
|
||||
function collectDirectoryNames(
|
||||
files: string[],
|
||||
start: number,
|
||||
end: number,
|
||||
out: Set<string>,
|
||||
): void {
|
||||
for (let i = start; i < end; i++) {
|
||||
let currentDir = path.dirname(files[i]!)
|
||||
// Early exit if we've already processed this directory and all its parents.
|
||||
// Root detection: path.dirname returns its input at the root (fixed point),
|
||||
// so we stop when dirname stops changing. Checking this before add() keeps
|
||||
// the root out of the result set (matching the old path.parse().root guard).
|
||||
// This avoids path.parse() which allocates a 5-field object per file.
|
||||
while (currentDir !== '.' && !out.has(currentDir)) {
|
||||
const parent = path.dirname(currentDir)
|
||||
if (parent === currentDir) break
|
||||
out.add(currentDir)
|
||||
currentDir = parent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets additional files from Claude config directories
|
||||
*/
|
||||
async function getClaudeConfigFiles(cwd: string): Promise<string[]> {
|
||||
const markdownFileArrays = await Promise.all(
|
||||
CLAUDE_CONFIG_DIRECTORIES.map(subdir =>
|
||||
loadMarkdownFilesForSubdir(subdir, cwd),
|
||||
),
|
||||
)
|
||||
return markdownFileArrays.flatMap(markdownFiles =>
|
||||
markdownFiles.map(f => f.filePath),
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets project files using git ls-files (fast) or ripgrep (fallback)
|
||||
*/
|
||||
async function getProjectFiles(
|
||||
abortSignal: AbortSignal,
|
||||
respectGitignore: boolean,
|
||||
): Promise<string[]> {
|
||||
logForDebugging(
|
||||
`[FileIndex] getProjectFiles called, respectGitignore=${respectGitignore}`,
|
||||
)
|
||||
|
||||
// Try git ls-files first (much faster for git repos)
|
||||
const gitFiles = await getFilesUsingGit(abortSignal, respectGitignore)
|
||||
if (gitFiles !== null) {
|
||||
logForDebugging(
|
||||
`[FileIndex] using git ls-files result (${gitFiles.length} files)`,
|
||||
)
|
||||
return gitFiles
|
||||
}
|
||||
|
||||
// Fall back to ripgrep
|
||||
logForDebugging(
|
||||
`[FileIndex] git ls-files returned null, falling back to ripgrep`,
|
||||
)
|
||||
const startTime = Date.now()
|
||||
const rgArgs = [
|
||||
'--files',
|
||||
'--follow',
|
||||
'--hidden',
|
||||
'--glob',
|
||||
'!.git/',
|
||||
'--glob',
|
||||
'!.svn/',
|
||||
'--glob',
|
||||
'!.hg/',
|
||||
'--glob',
|
||||
'!.bzr/',
|
||||
'--glob',
|
||||
'!.jj/',
|
||||
'--glob',
|
||||
'!.sl/',
|
||||
]
|
||||
if (!respectGitignore) {
|
||||
rgArgs.push('--no-ignore-vcs')
|
||||
}
|
||||
|
||||
const files = await ripGrep(rgArgs, '.', abortSignal)
|
||||
const relativePaths = files.map(f => path.relative(getCwd(), f))
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
logForDebugging(
|
||||
`[FileIndex] ripgrep: ${relativePaths.length} files in ${duration}ms`,
|
||||
)
|
||||
|
||||
logEvent('tengu_file_suggestions_ripgrep', {
|
||||
file_count: relativePaths.length,
|
||||
duration_ms: duration,
|
||||
})
|
||||
|
||||
return relativePaths
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets both files and their directory paths for providing path suggestions
|
||||
* Uses git ls-files for git repos (fast) or ripgrep as fallback
|
||||
* Returns a FileIndex populated for fast fuzzy search
|
||||
*/
|
||||
export async function getPathsForSuggestions(): Promise<FileIndex> {
|
||||
const signal = AbortSignal.timeout(10_000)
|
||||
const index = getFileIndex()
|
||||
|
||||
try {
|
||||
// Check project settings first, then fall back to global config
|
||||
const projectSettings = getInitialSettings()
|
||||
const globalConfig = getGlobalConfig()
|
||||
const respectGitignore =
|
||||
projectSettings.respectGitignore ?? globalConfig.respectGitignore ?? true
|
||||
|
||||
const cwd = getCwd()
|
||||
const [projectFiles, configFiles] = await Promise.all([
|
||||
getProjectFiles(signal, respectGitignore),
|
||||
getClaudeConfigFiles(cwd),
|
||||
])
|
||||
|
||||
// Cache for mergeUntrackedIntoNormalizedCache
|
||||
cachedConfigFiles = configFiles
|
||||
|
||||
const allFiles = [...projectFiles, ...configFiles]
|
||||
const directories = await getDirectoryNamesAsync(allFiles)
|
||||
cachedTrackedDirs = directories
|
||||
const allPathsList = [...directories, ...allFiles]
|
||||
|
||||
// Skip rebuild when the list is unchanged. This is the common case
|
||||
// during a typing session — git ls-files returns the same output.
|
||||
const sig = pathListSignature(allPathsList)
|
||||
if (sig !== loadedTrackedSignature) {
|
||||
// Await the full build so cold-start returns complete results. The
|
||||
// build yields every ~4ms so the UI stays responsive — user can keep
|
||||
// typing during the ~120ms wait without input lag.
|
||||
await index.loadFromFileListAsync(allPathsList).done
|
||||
loadedTrackedSignature = sig
|
||||
// We just replaced the merged index with tracked-only data. Force
|
||||
// the next untracked merge to rebuild even if its own sig matches.
|
||||
loadedMergedSignature = null
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[FileIndex] skipped index rebuild — tracked paths unchanged`,
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
}
|
||||
|
||||
return index
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the common prefix between two strings
|
||||
*/
|
||||
function findCommonPrefix(a: string, b: string): string {
|
||||
const minLength = Math.min(a.length, b.length)
|
||||
let i = 0
|
||||
while (i < minLength && a[i] === b[i]) {
|
||||
i++
|
||||
}
|
||||
return a.substring(0, i)
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the longest common prefix among an array of suggestion items
|
||||
*/
|
||||
export function findLongestCommonPrefix(suggestions: SuggestionItem[]): string {
|
||||
if (suggestions.length === 0) return ''
|
||||
|
||||
const strings = suggestions.map(item => item.displayText)
|
||||
let prefix = strings[0]!
|
||||
for (let i = 1; i < strings.length; i++) {
|
||||
const currentString = strings[i]!
|
||||
prefix = findCommonPrefix(prefix, currentString)
|
||||
if (prefix === '') return ''
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a file suggestion item
|
||||
*/
|
||||
function createFileSuggestionItem(
|
||||
filePath: string,
|
||||
score?: number,
|
||||
): SuggestionItem {
|
||||
return {
|
||||
id: `file-${filePath}`,
|
||||
displayText: filePath,
|
||||
metadata: score !== undefined ? { score } : undefined,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find matching files and folders for a given query using the TS file index
|
||||
*/
|
||||
const MAX_SUGGESTIONS = 15
|
||||
function findMatchingFiles(
|
||||
fileIndex: FileIndex,
|
||||
partialPath: string,
|
||||
): SuggestionItem[] {
|
||||
const results = fileIndex.search(partialPath, MAX_SUGGESTIONS)
|
||||
return results.map(result =>
|
||||
createFileSuggestionItem(result.path, result.score),
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts a background refresh of the file index cache if not already in progress.
|
||||
*
|
||||
* Throttled: when a cache already exists, we skip the refresh unless git state
|
||||
* has actually changed. This prevents every keystroke from spawning git ls-files
|
||||
* and rebuilding the nucleo index.
|
||||
*/
|
||||
const REFRESH_THROTTLE_MS = 5_000
|
||||
export function startBackgroundCacheRefresh(): void {
|
||||
if (fileListRefreshPromise) return
|
||||
|
||||
// Throttle only when a cache exists — cold start must always populate.
|
||||
// Refresh immediately when .git/index mtime changed (tracked files).
|
||||
// Otherwise refresh at most once per 5s — this floor picks up new UNTRACKED
|
||||
// files, which don't bump .git/index. The signature checks downstream skip
|
||||
// the rebuild when the 5s refresh finds nothing actually changed.
|
||||
const indexMtime = getGitIndexMtime()
|
||||
if (fileIndex) {
|
||||
const gitStateChanged =
|
||||
indexMtime !== null && indexMtime !== lastGitIndexMtime
|
||||
if (!gitStateChanged && Date.now() - lastRefreshMs < REFRESH_THROTTLE_MS) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
const generation = cacheGeneration
|
||||
const refreshStart = Date.now()
|
||||
// Ensure the FileIndex singleton exists — it's progressively queryable
|
||||
// via readyCount while the build runs. Callers searching early get partial
|
||||
// results; indexBuildComplete fires after .done so they can re-search.
|
||||
getFileIndex()
|
||||
fileListRefreshPromise = getPathsForSuggestions()
|
||||
.then(result => {
|
||||
if (generation !== cacheGeneration) {
|
||||
return result // Cache was cleared; don't overwrite with stale data
|
||||
}
|
||||
fileListRefreshPromise = null
|
||||
indexBuildComplete.emit()
|
||||
// Commit the start-time mtime observation on success. If git state
|
||||
// changed mid-refresh, the next call will see the newer mtime and
|
||||
// correctly refresh again.
|
||||
lastGitIndexMtime = indexMtime
|
||||
lastRefreshMs = Date.now()
|
||||
logForDebugging(
|
||||
`[FileIndex] cache refresh completed in ${Date.now() - refreshStart}ms`,
|
||||
)
|
||||
return result
|
||||
})
|
||||
.catch(error => {
|
||||
logForDebugging(
|
||||
`[FileIndex] Cache refresh failed: ${errorMessage(error)}`,
|
||||
)
|
||||
logError(error)
|
||||
if (generation === cacheGeneration) {
|
||||
fileListRefreshPromise = null // Allow retry on next call
|
||||
}
|
||||
return getFileIndex()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the top-level files and directories in the current working directory
|
||||
* @returns Array of file/directory paths in the current directory
|
||||
*/
|
||||
async function getTopLevelPaths(): Promise<string[]> {
|
||||
const fs = getFsImplementation()
|
||||
const cwd = getCwd()
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(cwd)
|
||||
return entries.map(entry => {
|
||||
const fullPath = path.join(cwd, entry.name)
|
||||
const relativePath = path.relative(cwd, fullPath)
|
||||
// Add trailing separator for directories
|
||||
return entry.isDirectory() ? relativePath + path.sep : relativePath
|
||||
})
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate file suggestions for the current input and cursor position
|
||||
* @param partialPath The partial file path to match
|
||||
* @param showOnEmpty Whether to show suggestions even if partialPath is empty (used for @ symbol)
|
||||
*/
|
||||
export async function generateFileSuggestions(
|
||||
partialPath: string,
|
||||
showOnEmpty = false,
|
||||
): Promise<SuggestionItem[]> {
|
||||
// If input is empty and we don't want to show suggestions on empty, return nothing
|
||||
if (!partialPath && !showOnEmpty) {
|
||||
return []
|
||||
}
|
||||
|
||||
// Use custom command directly if configured. We don't mix in our config files
|
||||
// because the command returns pre-ranked results using its own search logic.
|
||||
if (getInitialSettings().fileSuggestion?.type === 'command') {
|
||||
const input: FileSuggestionCommandInput = {
|
||||
...createBaseHookInput(),
|
||||
query: partialPath,
|
||||
}
|
||||
const results = await executeFileSuggestionCommand(input)
|
||||
return results.slice(0, MAX_SUGGESTIONS).map(createFileSuggestionItem)
|
||||
}
|
||||
|
||||
// If the partial path is empty or just a dot, return current directory suggestions
|
||||
if (partialPath === '' || partialPath === '.' || partialPath === './') {
|
||||
const topLevelPaths = await getTopLevelPaths()
|
||||
startBackgroundCacheRefresh()
|
||||
return topLevelPaths.slice(0, MAX_SUGGESTIONS).map(createFileSuggestionItem)
|
||||
}
|
||||
|
||||
const startTime = Date.now()
|
||||
|
||||
try {
|
||||
// Kick a background refresh. The index is progressively queryable —
|
||||
// searches during build return partial results from ready chunks, and
|
||||
// the typeahead callback (setOnIndexBuildComplete) re-fires the search
|
||||
// when the build finishes to upgrade partial → full.
|
||||
const wasBuilding = fileListRefreshPromise !== null
|
||||
startBackgroundCacheRefresh()
|
||||
|
||||
// Handle both './' and '.\'
|
||||
let normalizedPath = partialPath
|
||||
const currentDirPrefix = '.' + path.sep
|
||||
if (partialPath.startsWith(currentDirPrefix)) {
|
||||
normalizedPath = partialPath.substring(2)
|
||||
}
|
||||
|
||||
// Handle tilde expansion for home directory
|
||||
if (normalizedPath.startsWith('~')) {
|
||||
normalizedPath = expandPath(normalizedPath)
|
||||
}
|
||||
|
||||
const matches = fileIndex
|
||||
? findMatchingFiles(fileIndex, normalizedPath)
|
||||
: []
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
logForDebugging(
|
||||
`[FileIndex] generateFileSuggestions: ${matches.length} results in ${duration}ms (${wasBuilding ? 'partial' : 'full'} index)`,
|
||||
)
|
||||
logEvent('tengu_file_suggestions_query', {
|
||||
duration_ms: duration,
|
||||
cache_hit: !wasBuilding,
|
||||
result_count: matches.length,
|
||||
query_length: partialPath.length,
|
||||
})
|
||||
|
||||
return matches
|
||||
} catch (error) {
|
||||
logError(error)
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a file suggestion to the input
|
||||
*/
|
||||
export function applyFileSuggestion(
|
||||
suggestion: string | SuggestionItem,
|
||||
input: string,
|
||||
partialPath: string,
|
||||
startPos: number,
|
||||
onInputChange: (value: string) => void,
|
||||
setCursorOffset: (offset: number) => void,
|
||||
): void {
|
||||
// Extract suggestion text from string or SuggestionItem
|
||||
const suggestionText =
|
||||
typeof suggestion === 'string' ? suggestion : suggestion.displayText
|
||||
|
||||
// Replace the partial path with the selected file path
|
||||
const newInput =
|
||||
input.substring(0, startPos) +
|
||||
suggestionText +
|
||||
input.substring(startPos + partialPath.length)
|
||||
onInputChange(newInput)
|
||||
|
||||
// Move cursor to end of the file path
|
||||
const newCursorPos = startPos + suggestionText.length
|
||||
setCursorOffset(newCursorPos)
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useNotifications } from 'src/context/notifications.js'
|
||||
import { getIsRemoteMode } from '../../bootstrap/state.js'
|
||||
import { useAppState } from '../../state/AppState.js'
|
||||
import type { PermissionMode } from '../../utils/permissions/PermissionMode.js'
|
||||
import {
|
||||
getAutoModeUnavailableNotification,
|
||||
getAutoModeUnavailableReason,
|
||||
} from '../../utils/permissions/permissionSetup.js'
|
||||
import { hasAutoModeOptIn } from '../../utils/settings/settings.js'
|
||||
|
||||
/**
|
||||
* Shows a one-shot notification when the shift-tab carousel wraps past where
|
||||
* auto mode would have been. Covers all reasons (settings, circuit-breaker,
|
||||
* org-allowlist). The startup case (defaultMode: auto silently downgraded) is
|
||||
* handled by verifyAutoModeGateAccess → checkAndDisableAutoModeIfNeeded.
|
||||
*/
|
||||
export function useAutoModeUnavailableNotification(): void {
|
||||
const { addNotification } = useNotifications()
|
||||
const mode = useAppState(s => s.toolPermissionContext.mode)
|
||||
const isAutoModeAvailable = useAppState(
|
||||
s => s.toolPermissionContext.isAutoModeAvailable,
|
||||
)
|
||||
const shownRef = useRef(false)
|
||||
const prevModeRef = useRef<PermissionMode>(mode)
|
||||
|
||||
useEffect(() => {
|
||||
const prevMode = prevModeRef.current
|
||||
prevModeRef.current = mode
|
||||
|
||||
if (!feature('TRANSCRIPT_CLASSIFIER')) return
|
||||
if (getIsRemoteMode()) return
|
||||
if (shownRef.current) return
|
||||
|
||||
const wrappedPastAutoSlot =
|
||||
mode === 'default' &&
|
||||
prevMode !== 'default' &&
|
||||
prevMode !== 'auto' &&
|
||||
!isAutoModeAvailable &&
|
||||
hasAutoModeOptIn()
|
||||
|
||||
if (!wrappedPastAutoSlot) return
|
||||
|
||||
const reason = getAutoModeUnavailableReason()
|
||||
if (!reason) return
|
||||
|
||||
shownRef.current = true
|
||||
addNotification({
|
||||
key: 'auto-mode-unavailable',
|
||||
text: getAutoModeUnavailableNotification(reason),
|
||||
color: 'warning',
|
||||
priority: 'medium',
|
||||
})
|
||||
}, [mode, isAutoModeAvailable, addNotification])
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,44 @@
|
||||
import { c as _c } from "react/compiler-runtime";
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { useNotifications } from 'src/context/notifications.js';
|
||||
import { getModelDeprecationWarning } from 'src/utils/model/deprecation.js';
|
||||
import { getIsRemoteMode } from '../../bootstrap/state.js';
|
||||
export function useDeprecationWarningNotification(model) {
|
||||
const $ = _c(4);
|
||||
const {
|
||||
addNotification
|
||||
} = useNotifications();
|
||||
const lastWarningRef = useRef(null);
|
||||
let t0;
|
||||
let t1;
|
||||
if ($[0] !== addNotification || $[1] !== model) {
|
||||
t0 = () => {
|
||||
if (getIsRemoteMode()) {
|
||||
return;
|
||||
}
|
||||
const deprecationWarning = getModelDeprecationWarning(model);
|
||||
if (deprecationWarning && deprecationWarning !== lastWarningRef.current) {
|
||||
lastWarningRef.current = deprecationWarning;
|
||||
addNotification({
|
||||
key: "model-deprecation-warning",
|
||||
text: deprecationWarning,
|
||||
color: "warning",
|
||||
priority: "high"
|
||||
});
|
||||
}
|
||||
if (!deprecationWarning) {
|
||||
lastWarningRef.current = null;
|
||||
}
|
||||
};
|
||||
t1 = [model, addNotification];
|
||||
$[0] = addNotification;
|
||||
$[1] = model;
|
||||
$[2] = t0;
|
||||
$[3] = t1;
|
||||
} else {
|
||||
t0 = $[2];
|
||||
t1 = $[3];
|
||||
}
|
||||
useEffect(t0, t1);
|
||||
}
|
||||
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJuYW1lcyI6WyJ1c2VFZmZlY3QiLCJ1c2VSZWYiLCJ1c2VOb3RpZmljYXRpb25zIiwiZ2V0TW9kZWxEZXByZWNhdGlvbldhcm5pbmciLCJnZXRJc1JlbW90ZU1vZGUiLCJ1c2VEZXByZWNhdGlvbldhcm5pbmdOb3RpZmljYXRpb24iLCJtb2RlbCIsIiQiLCJfYyIsImFkZE5vdGlmaWNhdGlvbiIsImxhc3RXYXJuaW5nUmVmIiwidDAiLCJ0MSIsImRlcHJlY2F0aW9uV2FybmluZyIsImN1cnJlbnQiLCJrZXkiLCJ0ZXh0IiwiY29sb3IiLCJwcmlvcml0eSJdLCJzb3VyY2VzIjpbInVzZURlcHJlY2F0aW9uV2FybmluZ05vdGlmaWNhdGlvbi50c3giXSwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IHsgdXNlRWZmZWN0LCB1c2VSZWYgfSBmcm9tICdyZWFjdCdcbmltcG9ydCB7IHVzZU5vdGlmaWNhdGlvbnMgfSBmcm9tICdzcmMvY29udGV4dC9ub3RpZmljYXRpb25zLmpzJ1xuaW1wb3J0IHsgZ2V0TW9kZWxEZXByZWNhdGlvbldhcm5pbmcgfSBmcm9tICdzcmMvdXRpbHMvbW9kZWwvZGVwcmVjYXRpb24uanMnXG5pbXBvcnQgeyBnZXRJc1JlbW90ZU1vZGUgfSBmcm9tICcuLi8uLi9ib290c3RyYXAvc3RhdGUuanMnXG5cbmV4cG9ydCBmdW5jdGlvbiB1c2VEZXByZWNhdGlvbldhcm5pbmdOb3RpZmljYXRpb24obW9kZWw6IHN0cmluZyk6IHZvaWQge1xuICBjb25zdCB7IGFkZE5vdGlmaWNhdGlvbiB9ID0gdXNlTm90aWZpY2F0aW9ucygpXG4gIGNvbnN0IGxhc3RXYXJuaW5nUmVmID0gdXNlUmVmPHN0cmluZyB8IG51bGw+KG51bGwpXG5cbiAgdXNlRWZmZWN0KCgpID0+IHtcbiAgICBpZiAoZ2V0SXNSZW1vdGVNb2RlKCkpIHJldHVyblxuICAgIGNvbnN0IGRlcHJlY2F0aW9uV2FybmluZyA9IGdldE1vZGVsRGVwcmVjYXRpb25XYXJuaW5nKG1vZGVsKVxuXG4gICAgLy8gU2hvdyB3YXJuaW5nIGlmIG1vZGVsIGlzIGRlcHJlY2F0ZWQgYW5kIHdlIGhhdmVuJ3Qgc2hvd24gdGhpcyBleGFjdCB3YXJuaW5nIHlldFxuICAgIGlmIChkZXByZWNhdGlvbldhcm5pbmcgJiYgZGVwcmVjYXRpb25XYXJuaW5nICE9PSBsYXN0V2FybmluZ1JlZi5jdXJyZW50KSB7XG4gICAgICBsYXN0V2FybmluZ1JlZi5jdXJyZW50ID0gZGVwcmVjYXRpb25XYXJuaW5nXG4gICAgICBhZGROb3RpZmljYXRpb24oe1xuICAgICAgICBrZXk6ICdtb2RlbC1kZXByZWNhdGlvbi13YXJuaW5nJyxcbiAgICAgICAgdGV4dDogZGVwcmVjYXRpb25XYXJuaW5nLFxuICAgICAgICBjb2xvcjogJ3dhcm5pbmcnLFxuICAgICAgICBwcmlvcml0eTogJ2hpZ2gnLFxuICAgICAgfSlcbiAgICB9XG5cbiAgICAvLyBSZXNldCB0cmFja2luZyBpZiBtb2RlbCBjaGFuZ2VzIHRvIG5vbi1kZXByZWNhdGVkXG4gICAgaWYgKCFkZXByZWNhdGlvbldhcm5pbmcpIHtcbiAgICAgIGxhc3RXYXJuaW5nUmVmLmN1cnJlbnQgPSBudWxsXG4gICAgfVxuICB9LCBbbW9kZWwsIGFkZE5vdGlmaWNhdGlvbl0pXG59XG4iXSwibWFwcGluZ3MiOiI7QUFBQSxTQUFTQSxTQUFTLEVBQUVDLE1BQU0sUUFBUSxPQUFPO0FBQ3pDLFNBQVNDLGdCQUFnQixRQUFRLDhCQUE4QjtBQUMvRCxTQUFTQywwQkFBMEIsUUFBUSxnQ0FBZ0M7QUFDM0UsU0FBU0MsZUFBZSxRQUFRLDBCQUEwQjtBQUUxRCxPQUFPLFNBQUFDLGtDQUFBQyxLQUFBO0VBQUEsTUFBQUMsQ0FBQSxHQUFBQyxFQUFBO0VBQ0w7SUFBQUM7RUFBQSxJQUE0QlAsZ0JBQWdCLENBQUMsQ0FBQztFQUM5QyxNQUFBUSxjQUFBLEdBQXVCVCxNQUFNLENBQWdCLElBQUksQ0FBQztFQUFBLElBQUFVLEVBQUE7RUFBQSxJQUFBQyxFQUFBO0VBQUEsSUFBQUwsQ0FBQSxRQUFBRSxlQUFBLElBQUFGLENBQUEsUUFBQUQsS0FBQTtJQUV4Q0ssRUFBQSxHQUFBQSxDQUFBO01BQ1IsSUFBSVAsZUFBZSxDQUFDLENBQUM7UUFBQTtNQUFBO01BQ3JCLE1BQUFTLGtCQUFBLEdBQTJCViwwQkFBMEIsQ0FBQ0csS0FBSyxDQUFDO01BRzVELElBQUlPLGtCQUFtRSxJQUE3Q0Esa0JBQWtCLEtBQUtILGNBQWMsQ0FBQUksT0FBUTtRQUNyRUosY0FBYyxDQUFBSSxPQUFBLEdBQVdELGtCQUFIO1FBQ3RCSixlQUFlLENBQUM7VUFBQU0sR0FBQSxFQUNULDJCQUEyQjtVQUFBQyxJQUFBLEVBQzFCSCxrQkFBa0I7VUFBQUksS0FBQSxFQUNqQixTQUFTO1VBQUFDLFFBQUEsRUFDTjtRQUNaLENBQUMsQ0FBQztNQUFBO01BSUosSUFBSSxDQUFDTCxrQkFBa0I7UUFDckJILGNBQWMsQ0FBQUksT0FBQSxHQUFXLElBQUg7TUFBQTtJQUN2QixDQUNGO0lBQUVGLEVBQUEsSUFBQ04sS0FBSyxFQUFFRyxlQUFlLENBQUM7SUFBQUYsQ0FBQSxNQUFBRSxlQUFBO0lBQUFGLENBQUEsTUFBQUQsS0FBQTtJQUFBQyxDQUFBLE1BQUFJLEVBQUE7SUFBQUosQ0FBQSxNQUFBSyxFQUFBO0VBQUE7SUFBQUQsRUFBQSxHQUFBSixDQUFBO0lBQUFLLEVBQUEsR0FBQUwsQ0FBQTtFQUFBO0VBbkIzQlAsU0FBUyxDQUFDVyxFQW1CVCxFQUFFQyxFQUF3QixDQUFDO0FBQUEiLCJpZ25vcmVMaXN0IjpbXX0=
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,26 @@
|
||||
import { checkInstall } from 'src/utils/nativeInstaller/index.js';
|
||||
import { useStartupNotification } from './useStartupNotification.js';
|
||||
export function useInstallMessages() {
|
||||
useStartupNotification(_temp2);
|
||||
}
|
||||
async function _temp2() {
|
||||
const messages = await checkInstall();
|
||||
return messages.map(_temp);
|
||||
}
|
||||
function _temp(message, index) {
|
||||
let priority = "low";
|
||||
if (message.type === "error" || message.userActionRequired) {
|
||||
priority = "high";
|
||||
} else {
|
||||
if (message.type === "path" || message.type === "alias") {
|
||||
priority = "medium";
|
||||
}
|
||||
}
|
||||
return {
|
||||
key: `install-message-${index}-${message.type}`,
|
||||
text: message.message,
|
||||
priority,
|
||||
color: message.type === "error" ? "error" : "warning"
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJuYW1lcyI6WyJjaGVja0luc3RhbGwiLCJ1c2VTdGFydHVwTm90aWZpY2F0aW9uIiwidXNlSW5zdGFsbE1lc3NhZ2VzIiwiX3RlbXAyIiwibWVzc2FnZXMiLCJtYXAiLCJfdGVtcCIsIm1lc3NhZ2UiLCJpbmRleCIsInByaW9yaXR5IiwidHlwZSIsInVzZXJBY3Rpb25SZXF1aXJlZCIsImtleSIsInRleHQiLCJjb2xvciJdLCJzb3VyY2VzIjpbInVzZUluc3RhbGxNZXNzYWdlcy50c3giXSwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IHsgY2hlY2tJbnN0YWxsIH0gZnJvbSAnc3JjL3V0aWxzL25hdGl2ZUluc3RhbGxlci9pbmRleC5qcydcbmltcG9ydCB7IHVzZVN0YXJ0dXBOb3RpZmljYXRpb24gfSBmcm9tICcuL3VzZVN0YXJ0dXBOb3RpZmljYXRpb24uanMnXG5cbmV4cG9ydCBmdW5jdGlvbiB1c2VJbnN0YWxsTWVzc2FnZXMoKTogdm9pZCB7XG4gIHVzZVN0YXJ0dXBOb3RpZmljYXRpb24oYXN5bmMgKCkgPT4ge1xuICAgIGNvbnN0IG1lc3NhZ2VzID0gYXdhaXQgY2hlY2tJbnN0YWxsKClcbiAgICByZXR1cm4gbWVzc2FnZXMubWFwKChtZXNzYWdlLCBpbmRleCkgPT4ge1xuICAgICAgbGV0IHByaW9yaXR5OiAnbG93JyB8ICdtZWRpdW0nIHwgJ2hpZ2gnIHwgJ2ltbWVkaWF0ZScgPSAnbG93J1xuICAgICAgaWYgKG1lc3NhZ2UudHlwZSA9PT0gJ2Vycm9yJyB8fCBtZXNzYWdlLnVzZXJBY3Rpb25SZXF1aXJlZCkge1xuICAgICAgICBwcmlvcml0eSA9ICdoaWdoJ1xuICAgICAgfSBlbHNlIGlmIChtZXNzYWdlLnR5cGUgPT09ICdwYXRoJyB8fCBtZXNzYWdlLnR5cGUgPT09ICdhbGlhcycpIHtcbiAgICAgICAgcHJpb3JpdHkgPSAnbWVkaXVtJ1xuICAgICAgfVxuICAgICAgcmV0dXJuIHtcbiAgICAgICAga2V5OiBgaW5zdGFsbC1tZXNzYWdlLSR7aW5kZXh9LSR7bWVzc2FnZS50eXBlfWAsXG4gICAgICAgIHRleHQ6IG1lc3NhZ2UubWVzc2FnZSxcbiAgICAgICAgcHJpb3JpdHksXG4gICAgICAgIGNvbG9yOiBtZXNzYWdlLnR5cGUgPT09ICdlcnJvcicgPyAnZXJyb3InIDogJ3dhcm5pbmcnLFxuICAgICAgfVxuICAgIH0pXG4gIH0pXG59XG4iXSwibWFwcGluZ3MiOiJBQUFBLFNBQVNBLFlBQVksUUFBUSxvQ0FBb0M7QUFDakUsU0FBU0Msc0JBQXNCLFFBQVEsNkJBQTZCO0FBRXBFLE9BQU8sU0FBQUMsbUJBQUE7RUFDTEQsc0JBQXNCLENBQUNFLE1BZ0J0QixDQUFDO0FBQUE7QUFqQkcsZUFBQUEsT0FBQTtFQUVILE1BQUFDLFFBQUEsR0FBaUIsTUFBTUosWUFBWSxDQUFDLENBQUM7RUFBQSxPQUM5QkksUUFBUSxDQUFBQyxHQUFJLENBQUNDLEtBYW5CLENBQUM7QUFBQTtBQWhCQyxTQUFBQSxNQUFBQyxPQUFBLEVBQUFDLEtBQUE7RUFJRCxJQUFBQyxRQUFBLEdBQXdELEtBQUs7RUFDN0QsSUFBSUYsT0FBTyxDQUFBRyxJQUFLLEtBQUssT0FBcUMsSUFBMUJILE9BQU8sQ0FBQUksa0JBQW1CO0lBQ3hERixRQUFBLENBQUFBLENBQUEsQ0FBV0EsTUFBTTtFQUFUO0lBQ0gsSUFBSUYsT0FBTyxDQUFBRyxJQUFLLEtBQUssTUFBa0MsSUFBeEJILE9BQU8sQ0FBQUcsSUFBSyxLQUFLLE9BQU87TUFDNURELFFBQUEsQ0FBQUEsQ0FBQSxDQUFXQSxRQUFRO0lBQVg7RUFDVDtFQUFBLE9BQ007SUFBQUcsR0FBQSxFQUNBLG1CQUFtQkosS0FBSyxJQUFJRCxPQUFPLENBQUFHLElBQUssRUFBRTtJQUFBRyxJQUFBLEVBQ3pDTixPQUFPLENBQUFBLE9BQVE7SUFBQUUsUUFBQTtJQUFBSyxLQUFBLEVBRWRQLE9BQU8sQ0FBQUcsSUFBSyxLQUFLLE9BQTZCLEdBQTlDLE9BQThDLEdBQTlDO0VBQ1QsQ0FBQztBQUFBIiwiaWdub3JlTGlzdCI6W119
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,25 @@
|
||||
import { isInBundledMode } from 'src/utils/bundledMode.js';
|
||||
import { getCurrentInstallationType } from 'src/utils/doctorDiagnostic.js';
|
||||
import { isEnvTruthy } from 'src/utils/envUtils.js';
|
||||
import { useStartupNotification } from './useStartupNotification.js';
|
||||
const NPM_DEPRECATION_MESSAGE = 'Claude Code has switched from npm to native installer. Run `claude install` or see https://docs.anthropic.com/en/docs/claude-code/getting-started for more options.';
|
||||
export function useNpmDeprecationNotification() {
|
||||
useStartupNotification(_temp);
|
||||
}
|
||||
async function _temp() {
|
||||
if (isInBundledMode() || isEnvTruthy(process.env.DISABLE_INSTALLATION_CHECKS)) {
|
||||
return null;
|
||||
}
|
||||
const installationType = await getCurrentInstallationType();
|
||||
if (installationType === "development") {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
timeoutMs: 15000,
|
||||
key: "npm-deprecation-warning",
|
||||
text: NPM_DEPRECATION_MESSAGE,
|
||||
color: "warning",
|
||||
priority: "high"
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJuYW1lcyI6WyJpc0luQnVuZGxlZE1vZGUiLCJnZXRDdXJyZW50SW5zdGFsbGF0aW9uVHlwZSIsImlzRW52VHJ1dGh5IiwidXNlU3RhcnR1cE5vdGlmaWNhdGlvbiIsIk5QTV9ERVBSRUNBVElPTl9NRVNTQUdFIiwidXNlTnBtRGVwcmVjYXRpb25Ob3RpZmljYXRpb24iLCJfdGVtcCIsInByb2Nlc3MiLCJlbnYiLCJESVNBQkxFX0lOU1RBTExBVElPTl9DSEVDS1MiLCJpbnN0YWxsYXRpb25UeXBlIiwidGltZW91dE1zIiwia2V5IiwidGV4dCIsImNvbG9yIiwicHJpb3JpdHkiXSwic291cmNlcyI6WyJ1c2VOcG1EZXByZWNhdGlvbk5vdGlmaWNhdGlvbi50c3giXSwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IHsgaXNJbkJ1bmRsZWRNb2RlIH0gZnJvbSAnc3JjL3V0aWxzL2J1bmRsZWRNb2RlLmpzJ1xuaW1wb3J0IHsgZ2V0Q3VycmVudEluc3RhbGxhdGlvblR5cGUgfSBmcm9tICdzcmMvdXRpbHMvZG9jdG9yRGlhZ25vc3RpYy5qcydcbmltcG9ydCB7IGlzRW52VHJ1dGh5IH0gZnJvbSAnc3JjL3V0aWxzL2VudlV0aWxzLmpzJ1xuaW1wb3J0IHsgdXNlU3RhcnR1cE5vdGlmaWNhdGlvbiB9IGZyb20gJy4vdXNlU3RhcnR1cE5vdGlmaWNhdGlvbi5qcydcblxuY29uc3QgTlBNX0RFUFJFQ0FUSU9OX01FU1NBR0UgPVxuICAnQ2xhdWRlIENvZGUgaGFzIHN3aXRjaGVkIGZyb20gbnBtIHRvIG5hdGl2ZSBpbnN0YWxsZXIuIFJ1biBgY2xhdWRlIGluc3RhbGxgIG9yIHNlZSBodHRwczovL2RvY3MuYW50aHJvcGljLmNvbS9lbi9kb2NzL2NsYXVkZS1jb2RlL2dldHRpbmctc3RhcnRlZCBmb3IgbW9yZSBvcHRpb25zLidcblxuZXhwb3J0IGZ1bmN0aW9uIHVzZU5wbURlcHJlY2F0aW9uTm90aWZpY2F0aW9uKCk6IHZvaWQge1xuICB1c2VTdGFydHVwTm90aWZpY2F0aW9uKGFzeW5jICgpID0+IHtcbiAgICBpZiAoXG4gICAgICBpc0luQnVuZGxlZE1vZGUoKSB8fFxuICAgICAgaXNFbnZUcnV0aHkocHJvY2Vzcy5lbnYuRElTQUJMRV9JTlNUQUxMQVRJT05fQ0hFQ0tTKVxuICAgICkge1xuICAgICAgcmV0dXJuIG51bGxcbiAgICB9XG4gICAgY29uc3QgaW5zdGFsbGF0aW9uVHlwZSA9IGF3YWl0IGdldEN1cnJlbnRJbnN0YWxsYXRpb25UeXBlKClcbiAgICBpZiAoaW5zdGFsbGF0aW9uVHlwZSA9PT0gJ2RldmVsb3BtZW50JykgcmV0dXJuIG51bGxcbiAgICByZXR1cm4ge1xuICAgICAgdGltZW91dE1zOiAxNTAwMCxcbiAgICAgIGtleTogJ25wbS1kZXByZWNhdGlvbi13YXJuaW5nJyxcbiAgICAgIHRleHQ6IE5QTV9ERVBSRUNBVElPTl9NRVNTQUdFLFxuICAgICAgY29sb3I6ICd3YXJuaW5nJyxcbiAgICAgIHByaW9yaXR5OiAnaGlnaCcsXG4gICAgfVxuICB9KVxufVxuIl0sIm1hcHBpbmdzIjoiQUFBQSxTQUFTQSxlQUFlLFFBQVEsMEJBQTBCO0FBQzFELFNBQVNDLDBCQUEwQixRQUFRLCtCQUErQjtBQUMxRSxTQUFTQyxXQUFXLFFBQVEsdUJBQXVCO0FBQ25ELFNBQVNDLHNCQUFzQixRQUFRLDZCQUE2QjtBQUVwRSxNQUFNQyx1QkFBdUIsR0FDM0IscUtBQXFLO0FBRXZLLE9BQU8sU0FBQUMsOEJBQUE7RUFDTEYsc0JBQXNCLENBQUNHLEtBZ0J0QixDQUFDO0FBQUE7QUFqQkcsZUFBQUEsTUFBQTtFQUVILElBQ0VOLGVBQWUsQ0FDb0MsQ0FBQyxJQUFwREUsV0FBVyxDQUFDSyxPQUFPLENBQUFDLEdBQUksQ0FBQUMsMkJBQTRCLENBQUM7SUFBQSxPQUU3QyxJQUFJO0VBQUE7RUFFYixNQUFBQyxnQkFBQSxHQUF5QixNQUFNVCwwQkFBMEIsQ0FBQyxDQUFDO0VBQzNELElBQUlTLGdCQUFnQixLQUFLLGFBQWE7SUFBQSxPQUFTLElBQUk7RUFBQTtFQUFBLE9BQzVDO0lBQUFDLFNBQUEsRUFDTSxLQUFLO0lBQUFDLEdBQUEsRUFDWCx5QkFBeUI7SUFBQUMsSUFBQSxFQUN4QlQsdUJBQXVCO0lBQUFVLEtBQUEsRUFDdEIsU0FBUztJQUFBQyxRQUFBLEVBQ047RUFDWixDQUFDO0FBQUEiLCJpZ25vcmVMaXN0IjpbXX0=
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,69 @@
|
||||
import { c as _c } from "react/compiler-runtime";
|
||||
import { useCallback, useEffect, useState } from 'react';
|
||||
import { useNotifications } from 'src/context/notifications.js';
|
||||
import { getIsRemoteMode } from '../../bootstrap/state.js';
|
||||
import { getSettingsWithAllErrors } from '../../utils/settings/allErrors.js';
|
||||
import type { ValidationError } from '../../utils/settings/validation.js';
|
||||
import { useSettingsChange } from '../useSettingsChange.js';
|
||||
const SETTINGS_ERRORS_NOTIFICATION_KEY = 'settings-errors';
|
||||
export function useSettingsErrors() {
|
||||
const $ = _c(6);
|
||||
const {
|
||||
addNotification,
|
||||
removeNotification
|
||||
} = useNotifications();
|
||||
const [errors_0, setErrors] = useState(_temp);
|
||||
let t0;
|
||||
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
|
||||
t0 = () => {
|
||||
const {
|
||||
errors: errors_1
|
||||
} = getSettingsWithAllErrors();
|
||||
setErrors(errors_1);
|
||||
};
|
||||
$[0] = t0;
|
||||
} else {
|
||||
t0 = $[0];
|
||||
}
|
||||
const handleSettingsChange = t0;
|
||||
useSettingsChange(handleSettingsChange);
|
||||
let t1;
|
||||
let t2;
|
||||
if ($[1] !== addNotification || $[2] !== errors_0 || $[3] !== removeNotification) {
|
||||
t1 = () => {
|
||||
if (getIsRemoteMode()) {
|
||||
return;
|
||||
}
|
||||
if (errors_0.length > 0) {
|
||||
const message = `Found ${errors_0.length} settings ${errors_0.length === 1 ? "issue" : "issues"} · /doctor for details`;
|
||||
addNotification({
|
||||
key: SETTINGS_ERRORS_NOTIFICATION_KEY,
|
||||
text: message,
|
||||
color: "warning",
|
||||
priority: "high",
|
||||
timeoutMs: 60000
|
||||
});
|
||||
} else {
|
||||
removeNotification(SETTINGS_ERRORS_NOTIFICATION_KEY);
|
||||
}
|
||||
};
|
||||
t2 = [errors_0, addNotification, removeNotification];
|
||||
$[1] = addNotification;
|
||||
$[2] = errors_0;
|
||||
$[3] = removeNotification;
|
||||
$[4] = t1;
|
||||
$[5] = t2;
|
||||
} else {
|
||||
t1 = $[4];
|
||||
t2 = $[5];
|
||||
}
|
||||
useEffect(t1, t2);
|
||||
return errors_0;
|
||||
}
|
||||
function _temp() {
|
||||
const {
|
||||
errors
|
||||
} = getSettingsWithAllErrors();
|
||||
return errors;
|
||||
}
|
||||
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJuYW1lcyI6WyJ1c2VDYWxsYmFjayIsInVzZUVmZmVjdCIsInVzZVN0YXRlIiwidXNlTm90aWZpY2F0aW9ucyIsImdldElzUmVtb3RlTW9kZSIsImdldFNldHRpbmdzV2l0aEFsbEVycm9ycyIsIlZhbGlkYXRpb25FcnJvciIsInVzZVNldHRpbmdzQ2hhbmdlIiwiU0VUVElOR1NfRVJST1JTX05PVElGSUNBVElPTl9LRVkiLCJ1c2VTZXR0aW5nc0Vycm9ycyIsIiQiLCJfYyIsImFkZE5vdGlmaWNhdGlvbiIsInJlbW92ZU5vdGlmaWNhdGlvbiIsImVycm9yc18wIiwic2V0RXJyb3JzIiwiX3RlbXAiLCJ0MCIsIlN5bWJvbCIsImZvciIsImVycm9ycyIsImVycm9yc18xIiwiaGFuZGxlU2V0dGluZ3NDaGFuZ2UiLCJ0MSIsInQyIiwibGVuZ3RoIiwibWVzc2FnZSIsImtleSIsInRleHQiLCJjb2xvciIsInByaW9yaXR5IiwidGltZW91dE1zIl0sInNvdXJjZXMiOlsidXNlU2V0dGluZ3NFcnJvcnMudHN4Il0sInNvdXJjZXNDb250ZW50IjpbImltcG9ydCB7IHVzZUNhbGxiYWNrLCB1c2VFZmZlY3QsIHVzZVN0YXRlIH0gZnJvbSAncmVhY3QnXG5pbXBvcnQgeyB1c2VOb3RpZmljYXRpb25zIH0gZnJvbSAnc3JjL2NvbnRleHQvbm90aWZpY2F0aW9ucy5qcydcbmltcG9ydCB7IGdldElzUmVtb3RlTW9kZSB9IGZyb20gJy4uLy4uL2Jvb3RzdHJhcC9zdGF0ZS5qcydcbmltcG9ydCB7IGdldFNldHRpbmdzV2l0aEFsbEVycm9ycyB9IGZyb20gJy4uLy4uL3V0aWxzL3NldHRpbmdzL2FsbEVycm9ycy5qcydcbmltcG9ydCB0eXBlIHsgVmFsaWRhdGlvbkVycm9yIH0gZnJvbSAnLi4vLi4vdXRpbHMvc2V0dGluZ3MvdmFsaWRhdGlvbi5qcydcbmltcG9ydCB7IHVzZVNldHRpbmdzQ2hhbmdlIH0gZnJvbSAnLi4vdXNlU2V0dGluZ3NDaGFuZ2UuanMnXG5cbmNvbnN0IFNFVFRJTkdTX0VSUk9SU19OT1RJRklDQVRJT05fS0VZID0gJ3NldHRpbmdzLWVycm9ycydcblxuZXhwb3J0IGZ1bmN0aW9uIHVzZVNldHRpbmdzRXJyb3JzKCk6IFZhbGlkYXRpb25FcnJvcltdIHtcbiAgY29uc3QgeyBhZGROb3RpZmljYXRpb24sIHJlbW92ZU5vdGlmaWNhdGlvbiB9ID0gdXNlTm90aWZpY2F0aW9ucygpXG4gIGNvbnN0IFtlcnJvcnMsIHNldEVycm9yc10gPSB1c2VTdGF0ZTxWYWxpZGF0aW9uRXJyb3JbXT4oKCkgPT4ge1xuICAgIGNvbnN0IHsgZXJyb3JzIH0gPSBnZXRTZXR0aW5nc1dpdGhBbGxFcnJvcnMoKVxuICAgIHJldHVybiBlcnJvcnNcbiAgfSlcblxuICBjb25zdCBoYW5kbGVTZXR0aW5nc0NoYW5nZSA9IHVzZUNhbGxiYWNrKCgpID0+IHtcbiAgICBjb25zdCB7IGVycm9ycyB9ID0gZ2V0U2V0dGluZ3NXaXRoQWxsRXJyb3JzKClcbiAgICBzZXRFcnJvcnMoZXJyb3JzKVxuICB9LCBbXSlcblxuICB1c2VTZXR0aW5nc0NoYW5nZShoYW5kbGVTZXR0aW5nc0NoYW5nZSlcblxuICB1c2VFZmZlY3QoKCkgPT4ge1xuICAgIGlmIChnZXRJc1JlbW90ZU1vZGUoKSkgcmV0dXJuXG4gICAgaWYgKGVycm9ycy5sZW5ndGggPiAwKSB7XG4gICAgICBjb25zdCBtZXNzYWdlID0gYEZvdW5kICR7ZXJyb3JzLmxlbmd0aH0gc2V0dGluZ3MgJHtlcnJvcnMubGVuZ3RoID09PSAxID8gJ2lzc3VlJyA6ICdpc3N1ZXMnfSDCtyAvZG9jdG9yIGZvciBkZXRhaWxzYFxuICAgICAgYWRkTm90aWZpY2F0aW9uKHtcbiAgICAgICAga2V5OiBTRVRUSU5HU19FUlJPUlNfTk9USUZJQ0FUSU9OX0tFWSxcbiAgICAgICAgdGV4dDogbWVzc2FnZSxcbiAgICAgICAgY29sb3I6ICd3YXJuaW5nJyxcbiAgICAgICAgcHJpb3JpdHk6ICdoaWdoJyxcbiAgICAgICAgdGltZW91dE1zOiA2MDAwMCxcbiAgICAgIH0pXG4gICAgfSBlbHNlIHtcbiAgICAgIHJlbW92ZU5vdGlmaWNhdGlvbihTRVRUSU5HU19FUlJPUlNfTk9USUZJQ0FUSU9OX0tFWSlcbiAgICB9XG4gIH0sIFtlcnJvcnMsIGFkZE5vdGlmaWNhdGlvbiwgcmVtb3ZlTm90aWZpY2F0aW9uXSlcblxuICByZXR1cm4gZXJyb3JzXG59XG4iXSwibWFwcGluZ3MiOiI7QUFBQSxTQUFTQSxXQUFXLEVBQUVDLFNBQVMsRUFBRUMsUUFBUSxRQUFRLE9BQU87QUFDeEQsU0FBU0MsZ0JBQWdCLFFBQVEsOEJBQThCO0FBQy9ELFNBQVNDLGVBQWUsUUFBUSwwQkFBMEI7QUFDMUQsU0FBU0Msd0JBQXdCLFFBQVEsbUNBQW1DO0FBQzVFLGNBQWNDLGVBQWUsUUFBUSxvQ0FBb0M7QUFDekUsU0FBU0MsaUJBQWlCLFFBQVEseUJBQXlCO0FBRTNELE1BQU1DLGdDQUFnQyxHQUFHLGlCQUFpQjtBQUUxRCxPQUFPLFNBQUFDLGtCQUFBO0VBQUEsTUFBQUMsQ0FBQSxHQUFBQyxFQUFBO0VBQ0w7SUFBQUMsZUFBQTtJQUFBQztFQUFBLElBQWdEVixnQkFBZ0IsQ0FBQyxDQUFDO0VBQ2xFLE9BQUFXLFFBQUEsRUFBQUMsU0FBQSxJQUE0QmIsUUFBUSxDQUFvQmMsS0FHdkQsQ0FBQztFQUFBLElBQUFDLEVBQUE7RUFBQSxJQUFBUCxDQUFBLFFBQUFRLE1BQUEsQ0FBQUMsR0FBQTtJQUV1Q0YsRUFBQSxHQUFBQSxDQUFBO01BQ3ZDO1FBQUFHLE1BQUEsRUFBQUM7TUFBQSxJQUFtQmhCLHdCQUF3QixDQUFDLENBQUM7TUFDN0NVLFNBQVMsQ0FBQ0ssUUFBTSxDQUFDO0lBQUEsQ0FDbEI7SUFBQVYsQ0FBQSxNQUFBTyxFQUFBO0VBQUE7SUFBQUEsRUFBQSxHQUFBUCxDQUFBO0VBQUE7RUFIRCxNQUFBWSxvQkFBQSxHQUE2QkwsRUFHdkI7RUFFTlYsaUJBQWlCLENBQUNlLG9CQUFvQixDQUFDO0VBQUEsSUFBQUMsRUFBQTtFQUFBLElBQUFDLEVBQUE7RUFBQSxJQUFBZCxDQUFBLFFBQUFFLGVBQUEsSUFBQUYsQ0FBQSxRQUFBSSxRQUFBLElBQUFKLENBQUEsUUFBQUcsa0JBQUE7SUFFN0JVLEVBQUEsR0FBQUEsQ0FBQTtNQUNSLElBQUluQixlQUFlLENBQUMsQ0FBQztRQUFBO01BQUE7TUFDckIsSUFBSWdCLFFBQU0sQ0FBQUssTUFBTyxHQUFHLENBQUM7UUFDbkIsTUFBQUMsT0FBQSxHQUFnQixTQUFTTixRQUFNLENBQUFLLE1BQU8sYUFBYUwsUUFBTSxDQUFBSyxNQUFPLEtBQUssQ0FBc0IsR0FBeEMsT0FBd0MsR0FBeEMsUUFBd0Msd0JBQXdCO1FBQ25IYixlQUFlLENBQUM7VUFBQWUsR0FBQSxFQUNUbkIsZ0NBQWdDO1VBQUFvQixJQUFBLEVBQy9CRixPQUFPO1VBQUFHLEtBQUEsRUFDTixTQUFTO1VBQUFDLFFBQUEsRUFDTixNQUFNO1VBQUFDLFNBQUEsRUFDTDtRQUNiLENBQUMsQ0FBQztNQUFBO1FBRUZsQixrQkFBa0IsQ0FBQ0wsZ0NBQWdDLENBQUM7TUFBQTtJQUNyRCxDQUNGO0lBQUVnQixFQUFBLElBQUNKLFFBQU0sRUFBRVIsZUFBZSxFQUFFQyxrQkFBa0IsQ0FBQztJQUFBSCxDQUFBLE1BQUFFLGVBQUE7SUFBQUYsQ0FBQSxNQUFBSSxRQUFBO0lBQUFKLENBQUEsTUFBQUcsa0JBQUE7SUFBQUgsQ0FBQSxNQUFBYSxFQUFBO0lBQUFiLENBQUEsTUFBQWMsRUFBQTtFQUFBO0lBQUFELEVBQUEsR0FBQWIsQ0FBQTtJQUFBYyxFQUFBLEdBQUFkLENBQUE7RUFBQTtFQWRoRFQsU0FBUyxDQUFDc0IsRUFjVCxFQUFFQyxFQUE2QyxDQUFDO0VBQUEsT0FFMUNKLFFBQU07QUFBQTtBQTlCUixTQUFBSixNQUFBO0VBR0g7SUFBQUk7RUFBQSxJQUFtQmYsd0JBQXdCLENBQUMsQ0FBQztFQUFBLE9BQ3RDZSxNQUFNO0FBQUEiLCJpZ25vcmVMaXN0IjpbXX0=
|
||||
@@ -0,0 +1,41 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { getIsRemoteMode } from '../../bootstrap/state.js'
|
||||
import {
|
||||
type Notification,
|
||||
useNotifications,
|
||||
} from '../../context/notifications.js'
|
||||
import { logError } from '../../utils/log.js'
|
||||
|
||||
type Result = Notification | Notification[] | null
|
||||
|
||||
/**
|
||||
* Fires notification(s) once on mount. Encapsulates the remote-mode gate and
|
||||
* once-per-session ref guard that was hand-rolled across 10+ notifs/ hooks.
|
||||
*
|
||||
* The compute fn runs exactly once on first effect. Return null to skip,
|
||||
* a Notification to fire one, or an array to fire several. Sync or async.
|
||||
* Rejections are routed to logError.
|
||||
*/
|
||||
export function useStartupNotification(
|
||||
compute: () => Result | Promise<Result>,
|
||||
): void {
|
||||
const { addNotification } = useNotifications()
|
||||
const hasRunRef = useRef(false)
|
||||
const computeRef = useRef(compute)
|
||||
computeRef.current = compute
|
||||
|
||||
useEffect(() => {
|
||||
if (getIsRemoteMode() || hasRunRef.current) return
|
||||
hasRunRef.current = true
|
||||
|
||||
void Promise.resolve()
|
||||
.then(() => computeRef.current())
|
||||
.then(result => {
|
||||
if (!result) return
|
||||
for (const n of Array.isArray(result) ? result : [result]) {
|
||||
addNotification(n)
|
||||
}
|
||||
})
|
||||
.catch(logError)
|
||||
}, [addNotification])
|
||||
}
|
||||
@@ -0,0 +1,78 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { getIsRemoteMode } from '../../bootstrap/state.js'
|
||||
import {
|
||||
type Notification,
|
||||
useNotifications,
|
||||
} from '../../context/notifications.js'
|
||||
import { useAppState } from '../../state/AppState.js'
|
||||
import { isInProcessTeammateTask } from '../../tasks/InProcessTeammateTask/types.js'
|
||||
|
||||
function parseCount(notif: Notification): number {
|
||||
if (!('text' in notif)) {
|
||||
return 1
|
||||
}
|
||||
const match = notif.text.match(/^(\d+)/)
|
||||
return match?.[1] ? parseInt(match[1], 10) : 1
|
||||
}
|
||||
|
||||
function foldSpawn(acc: Notification, _incoming: Notification): Notification {
|
||||
return makeSpawnNotif(parseCount(acc) + 1)
|
||||
}
|
||||
|
||||
function makeSpawnNotif(count: number): Notification {
|
||||
return {
|
||||
key: 'teammate-spawn',
|
||||
text: count === 1 ? '1 agent spawned' : `${count} agents spawned`,
|
||||
priority: 'low',
|
||||
timeoutMs: 5000,
|
||||
fold: foldSpawn,
|
||||
}
|
||||
}
|
||||
|
||||
function foldShutdown(
|
||||
acc: Notification,
|
||||
_incoming: Notification,
|
||||
): Notification {
|
||||
return makeShutdownNotif(parseCount(acc) + 1)
|
||||
}
|
||||
|
||||
function makeShutdownNotif(count: number): Notification {
|
||||
return {
|
||||
key: 'teammate-shutdown',
|
||||
text: count === 1 ? '1 agent shut down' : `${count} agents shut down`,
|
||||
priority: 'low',
|
||||
timeoutMs: 5000,
|
||||
fold: foldShutdown,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fires batched notifications when in-process teammates spawn or shut down.
|
||||
* Uses fold() to combine repeated events into a single notification
|
||||
* like "3 agents spawned" or "2 agents shut down".
|
||||
*/
|
||||
export function useTeammateLifecycleNotification(): void {
|
||||
const tasks = useAppState(s => s.tasks)
|
||||
const { addNotification } = useNotifications()
|
||||
const seenRunningRef = useRef<Set<string>>(new Set())
|
||||
const seenCompletedRef = useRef<Set<string>>(new Set())
|
||||
|
||||
useEffect(() => {
|
||||
if (getIsRemoteMode()) return
|
||||
for (const [id, task] of Object.entries(tasks)) {
|
||||
if (!isInProcessTeammateTask(task)) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (task.status === 'running' && !seenRunningRef.current.has(id)) {
|
||||
seenRunningRef.current.add(id)
|
||||
addNotification(makeSpawnNotif(1))
|
||||
}
|
||||
|
||||
if (task.status === 'completed' && !seenCompletedRef.current.has(id)) {
|
||||
seenCompletedRef.current.add(id)
|
||||
addNotification(makeShutdownNotif(1))
|
||||
}
|
||||
}
|
||||
}, [tasks, addNotification])
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
import chalk from 'chalk'
|
||||
|
||||
type PlaceholderRendererProps = {
|
||||
placeholder?: string
|
||||
value: string
|
||||
showCursor?: boolean
|
||||
focus?: boolean
|
||||
terminalFocus: boolean
|
||||
invert?: (text: string) => string
|
||||
hidePlaceholderText?: boolean
|
||||
}
|
||||
|
||||
export function renderPlaceholder({
|
||||
placeholder,
|
||||
value,
|
||||
showCursor,
|
||||
focus,
|
||||
terminalFocus = true,
|
||||
invert = chalk.inverse,
|
||||
hidePlaceholderText = false,
|
||||
}: PlaceholderRendererProps): {
|
||||
renderedPlaceholder: string | undefined
|
||||
showPlaceholder: boolean
|
||||
} {
|
||||
let renderedPlaceholder: string | undefined = undefined
|
||||
|
||||
if (placeholder) {
|
||||
if (hidePlaceholderText) {
|
||||
// Voice recording: show only the cursor, no placeholder text
|
||||
renderedPlaceholder =
|
||||
showCursor && focus && terminalFocus ? invert(' ') : ''
|
||||
} else {
|
||||
renderedPlaceholder = chalk.dim(placeholder)
|
||||
|
||||
// Show inverse cursor only when both input and terminal are focused
|
||||
if (showCursor && focus && terminalFocus) {
|
||||
renderedPlaceholder =
|
||||
placeholder.length > 0
|
||||
? invert(placeholder[0]!) + chalk.dim(placeholder.slice(1))
|
||||
: invert(' ')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const showPlaceholder = value.length === 0 && Boolean(placeholder)
|
||||
|
||||
return {
|
||||
renderedPlaceholder,
|
||||
showPlaceholder,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,388 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type { ContentBlockParam } from '@anthropic-ai/sdk/resources/messages.mjs'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
logEvent,
|
||||
} from 'src/services/analytics/index.js'
|
||||
import { sanitizeToolNameForAnalytics } from 'src/services/analytics/metadata.js'
|
||||
import type { ToolUseConfirm } from '../../components/permissions/PermissionRequest.js'
|
||||
import type {
|
||||
ToolPermissionContext,
|
||||
Tool as ToolType,
|
||||
ToolUseContext,
|
||||
} from '../../Tool.js'
|
||||
import { awaitClassifierAutoApproval } from '../../tools/BashTool/bashPermissions.js'
|
||||
import { BASH_TOOL_NAME } from '../../tools/BashTool/toolName.js'
|
||||
import type { AssistantMessage } from '../../types/message.js'
|
||||
import type {
|
||||
PendingClassifierCheck,
|
||||
PermissionAllowDecision,
|
||||
PermissionDecisionReason,
|
||||
PermissionDenyDecision,
|
||||
} from '../../types/permissions.js'
|
||||
import { setClassifierApproval } from '../../utils/classifierApprovals.js'
|
||||
import { logForDebugging } from '../../utils/debug.js'
|
||||
import { executePermissionRequestHooks } from '../../utils/hooks.js'
|
||||
import {
|
||||
REJECT_MESSAGE,
|
||||
REJECT_MESSAGE_WITH_REASON_PREFIX,
|
||||
SUBAGENT_REJECT_MESSAGE,
|
||||
SUBAGENT_REJECT_MESSAGE_WITH_REASON_PREFIX,
|
||||
withMemoryCorrectionHint,
|
||||
} from '../../utils/messages.js'
|
||||
import type { PermissionDecision } from '../../utils/permissions/PermissionResult.js'
|
||||
import {
|
||||
applyPermissionUpdates,
|
||||
persistPermissionUpdates,
|
||||
supportsPersistence,
|
||||
} from '../../utils/permissions/PermissionUpdate.js'
|
||||
import type { PermissionUpdate } from '../../utils/permissions/PermissionUpdateSchema.js'
|
||||
import {
|
||||
logPermissionDecision,
|
||||
type PermissionDecisionArgs,
|
||||
} from './permissionLogging.js'
|
||||
|
||||
type PermissionApprovalSource =
|
||||
| { type: 'hook'; permanent?: boolean }
|
||||
| { type: 'user'; permanent: boolean }
|
||||
| { type: 'classifier' }
|
||||
|
||||
type PermissionRejectionSource =
|
||||
| { type: 'hook' }
|
||||
| { type: 'user_abort' }
|
||||
| { type: 'user_reject'; hasFeedback: boolean }
|
||||
|
||||
// Generic interface for permission queue operations, decoupled from React.
|
||||
// In the REPL, these are backed by React state.
|
||||
type PermissionQueueOps = {
|
||||
push(item: ToolUseConfirm): void
|
||||
remove(toolUseID: string): void
|
||||
update(toolUseID: string, patch: Partial<ToolUseConfirm>): void
|
||||
}
|
||||
|
||||
type ResolveOnce<T> = {
|
||||
resolve(value: T): void
|
||||
isResolved(): boolean
|
||||
/**
|
||||
* Atomically check-and-mark as resolved. Returns true if this caller
|
||||
* won the race (nobody else has resolved yet), false otherwise.
|
||||
* Use this in async callbacks BEFORE awaiting, to close the window
|
||||
* between the `isResolved()` check and the actual `resolve()` call.
|
||||
*/
|
||||
claim(): boolean
|
||||
}
|
||||
|
||||
function createResolveOnce<T>(resolve: (value: T) => void): ResolveOnce<T> {
|
||||
let claimed = false
|
||||
let delivered = false
|
||||
return {
|
||||
resolve(value: T) {
|
||||
if (delivered) return
|
||||
delivered = true
|
||||
claimed = true
|
||||
resolve(value)
|
||||
},
|
||||
isResolved() {
|
||||
return claimed
|
||||
},
|
||||
claim() {
|
||||
if (claimed) return false
|
||||
claimed = true
|
||||
return true
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
function createPermissionContext(
|
||||
tool: ToolType,
|
||||
input: Record<string, unknown>,
|
||||
toolUseContext: ToolUseContext,
|
||||
assistantMessage: AssistantMessage,
|
||||
toolUseID: string,
|
||||
setToolPermissionContext: (context: ToolPermissionContext) => void,
|
||||
queueOps?: PermissionQueueOps,
|
||||
) {
|
||||
const messageId = assistantMessage.message.id
|
||||
const ctx = {
|
||||
tool,
|
||||
input,
|
||||
toolUseContext,
|
||||
assistantMessage,
|
||||
messageId,
|
||||
toolUseID,
|
||||
logDecision(
|
||||
args: PermissionDecisionArgs,
|
||||
opts?: {
|
||||
input?: Record<string, unknown>
|
||||
permissionPromptStartTimeMs?: number
|
||||
},
|
||||
) {
|
||||
logPermissionDecision(
|
||||
{
|
||||
tool,
|
||||
input: opts?.input ?? input,
|
||||
toolUseContext,
|
||||
messageId,
|
||||
toolUseID,
|
||||
},
|
||||
args,
|
||||
opts?.permissionPromptStartTimeMs,
|
||||
)
|
||||
},
|
||||
logCancelled() {
|
||||
logEvent('tengu_tool_use_cancelled', {
|
||||
messageID:
|
||||
messageId as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
toolName: sanitizeToolNameForAnalytics(tool.name),
|
||||
})
|
||||
},
|
||||
async persistPermissions(updates: PermissionUpdate[]) {
|
||||
if (updates.length === 0) return false
|
||||
persistPermissionUpdates(updates)
|
||||
const appState = toolUseContext.getAppState()
|
||||
setToolPermissionContext(
|
||||
applyPermissionUpdates(appState.toolPermissionContext, updates),
|
||||
)
|
||||
return updates.some(update => supportsPersistence(update.destination))
|
||||
},
|
||||
resolveIfAborted(resolve: (decision: PermissionDecision) => void) {
|
||||
if (!toolUseContext.abortController.signal.aborted) return false
|
||||
this.logCancelled()
|
||||
resolve(this.cancelAndAbort(undefined, true))
|
||||
return true
|
||||
},
|
||||
cancelAndAbort(
|
||||
feedback?: string,
|
||||
isAbort?: boolean,
|
||||
contentBlocks?: ContentBlockParam[],
|
||||
): PermissionDecision {
|
||||
const sub = !!toolUseContext.agentId
|
||||
const baseMessage = feedback
|
||||
? `${sub ? SUBAGENT_REJECT_MESSAGE_WITH_REASON_PREFIX : REJECT_MESSAGE_WITH_REASON_PREFIX}${feedback}`
|
||||
: sub
|
||||
? SUBAGENT_REJECT_MESSAGE
|
||||
: REJECT_MESSAGE
|
||||
const message = sub ? baseMessage : withMemoryCorrectionHint(baseMessage)
|
||||
if (isAbort || (!feedback && !contentBlocks?.length && !sub)) {
|
||||
logForDebugging(
|
||||
`Aborting: tool=${tool.name} isAbort=${isAbort} hasFeedback=${!!feedback} isSubagent=${sub}`,
|
||||
)
|
||||
toolUseContext.abortController.abort()
|
||||
}
|
||||
return { behavior: 'ask', message, contentBlocks }
|
||||
},
|
||||
...(feature('BASH_CLASSIFIER')
|
||||
? {
|
||||
async tryClassifier(
|
||||
pendingClassifierCheck: PendingClassifierCheck | undefined,
|
||||
updatedInput: Record<string, unknown> | undefined,
|
||||
): Promise<PermissionDecision | null> {
|
||||
if (tool.name !== BASH_TOOL_NAME || !pendingClassifierCheck) {
|
||||
return null
|
||||
}
|
||||
const classifierDecision = await awaitClassifierAutoApproval(
|
||||
pendingClassifierCheck,
|
||||
toolUseContext.abortController.signal,
|
||||
toolUseContext.options.isNonInteractiveSession,
|
||||
)
|
||||
if (!classifierDecision) {
|
||||
return null
|
||||
}
|
||||
if (
|
||||
feature('TRANSCRIPT_CLASSIFIER') &&
|
||||
classifierDecision.type === 'classifier'
|
||||
) {
|
||||
const matchedRule = classifierDecision.reason.match(
|
||||
/^Allowed by prompt rule: "(.+)"$/,
|
||||
)?.[1]
|
||||
if (matchedRule) {
|
||||
setClassifierApproval(toolUseID, matchedRule)
|
||||
}
|
||||
}
|
||||
logPermissionDecision(
|
||||
{ tool, input, toolUseContext, messageId, toolUseID },
|
||||
{ decision: 'accept', source: { type: 'classifier' } },
|
||||
undefined,
|
||||
)
|
||||
return {
|
||||
behavior: 'allow' as const,
|
||||
updatedInput: updatedInput ?? input,
|
||||
userModified: false,
|
||||
decisionReason: classifierDecision,
|
||||
}
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
async runHooks(
|
||||
permissionMode: string | undefined,
|
||||
suggestions: PermissionUpdate[] | undefined,
|
||||
updatedInput?: Record<string, unknown>,
|
||||
permissionPromptStartTimeMs?: number,
|
||||
): Promise<PermissionDecision | null> {
|
||||
for await (const hookResult of executePermissionRequestHooks(
|
||||
tool.name,
|
||||
toolUseID,
|
||||
input,
|
||||
toolUseContext,
|
||||
permissionMode,
|
||||
suggestions,
|
||||
toolUseContext.abortController.signal,
|
||||
)) {
|
||||
if (hookResult.permissionRequestResult) {
|
||||
const decision = hookResult.permissionRequestResult
|
||||
if (decision.behavior === 'allow') {
|
||||
const finalInput = decision.updatedInput ?? updatedInput ?? input
|
||||
return await this.handleHookAllow(
|
||||
finalInput,
|
||||
decision.updatedPermissions ?? [],
|
||||
permissionPromptStartTimeMs,
|
||||
)
|
||||
} else if (decision.behavior === 'deny') {
|
||||
this.logDecision(
|
||||
{ decision: 'reject', source: { type: 'hook' } },
|
||||
{ permissionPromptStartTimeMs },
|
||||
)
|
||||
if (decision.interrupt) {
|
||||
logForDebugging(
|
||||
`Hook interrupt: tool=${tool.name} hookMessage=${decision.message}`,
|
||||
)
|
||||
toolUseContext.abortController.abort()
|
||||
}
|
||||
return this.buildDeny(
|
||||
decision.message || 'Permission denied by hook',
|
||||
{
|
||||
type: 'hook',
|
||||
hookName: 'PermissionRequest',
|
||||
reason: decision.message,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
return null
|
||||
},
|
||||
buildAllow(
|
||||
updatedInput: Record<string, unknown>,
|
||||
opts?: {
|
||||
userModified?: boolean
|
||||
decisionReason?: PermissionDecisionReason
|
||||
acceptFeedback?: string
|
||||
contentBlocks?: ContentBlockParam[]
|
||||
},
|
||||
): PermissionAllowDecision {
|
||||
return {
|
||||
behavior: 'allow' as const,
|
||||
updatedInput,
|
||||
userModified: opts?.userModified ?? false,
|
||||
...(opts?.decisionReason && { decisionReason: opts.decisionReason }),
|
||||
...(opts?.acceptFeedback && { acceptFeedback: opts.acceptFeedback }),
|
||||
...(opts?.contentBlocks &&
|
||||
opts.contentBlocks.length > 0 && {
|
||||
contentBlocks: opts.contentBlocks,
|
||||
}),
|
||||
}
|
||||
},
|
||||
buildDeny(
|
||||
message: string,
|
||||
decisionReason: PermissionDecisionReason,
|
||||
): PermissionDenyDecision {
|
||||
return { behavior: 'deny' as const, message, decisionReason }
|
||||
},
|
||||
async handleUserAllow(
|
||||
updatedInput: Record<string, unknown>,
|
||||
permissionUpdates: PermissionUpdate[],
|
||||
feedback?: string,
|
||||
permissionPromptStartTimeMs?: number,
|
||||
contentBlocks?: ContentBlockParam[],
|
||||
decisionReason?: PermissionDecisionReason,
|
||||
): Promise<PermissionAllowDecision> {
|
||||
const acceptedPermanentUpdates =
|
||||
await this.persistPermissions(permissionUpdates)
|
||||
this.logDecision(
|
||||
{
|
||||
decision: 'accept',
|
||||
source: { type: 'user', permanent: acceptedPermanentUpdates },
|
||||
},
|
||||
{ input: updatedInput, permissionPromptStartTimeMs },
|
||||
)
|
||||
const userModified = tool.inputsEquivalent
|
||||
? !tool.inputsEquivalent(input, updatedInput)
|
||||
: false
|
||||
const trimmedFeedback = feedback?.trim()
|
||||
return this.buildAllow(updatedInput, {
|
||||
userModified,
|
||||
decisionReason,
|
||||
acceptFeedback: trimmedFeedback || undefined,
|
||||
contentBlocks,
|
||||
})
|
||||
},
|
||||
async handleHookAllow(
|
||||
finalInput: Record<string, unknown>,
|
||||
permissionUpdates: PermissionUpdate[],
|
||||
permissionPromptStartTimeMs?: number,
|
||||
): Promise<PermissionAllowDecision> {
|
||||
const acceptedPermanentUpdates =
|
||||
await this.persistPermissions(permissionUpdates)
|
||||
this.logDecision(
|
||||
{
|
||||
decision: 'accept',
|
||||
source: { type: 'hook', permanent: acceptedPermanentUpdates },
|
||||
},
|
||||
{ input: finalInput, permissionPromptStartTimeMs },
|
||||
)
|
||||
return this.buildAllow(finalInput, {
|
||||
decisionReason: { type: 'hook', hookName: 'PermissionRequest' },
|
||||
})
|
||||
},
|
||||
pushToQueue(item: ToolUseConfirm) {
|
||||
queueOps?.push(item)
|
||||
},
|
||||
removeFromQueue() {
|
||||
queueOps?.remove(toolUseID)
|
||||
},
|
||||
updateQueueItem(patch: Partial<ToolUseConfirm>) {
|
||||
queueOps?.update(toolUseID, patch)
|
||||
},
|
||||
}
|
||||
return Object.freeze(ctx)
|
||||
}
|
||||
|
||||
type PermissionContext = ReturnType<typeof createPermissionContext>
|
||||
|
||||
/**
|
||||
* Create a PermissionQueueOps backed by a React state setter.
|
||||
* This is the bridge between React's `setToolUseConfirmQueue` and the
|
||||
* generic queue interface used by PermissionContext.
|
||||
*/
|
||||
function createPermissionQueueOps(
|
||||
setToolUseConfirmQueue: React.Dispatch<
|
||||
React.SetStateAction<ToolUseConfirm[]>
|
||||
>,
|
||||
): PermissionQueueOps {
|
||||
return {
|
||||
push(item: ToolUseConfirm) {
|
||||
setToolUseConfirmQueue(queue => [...queue, item])
|
||||
},
|
||||
remove(toolUseID: string) {
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.filter(item => item.toolUseID !== toolUseID),
|
||||
)
|
||||
},
|
||||
update(toolUseID: string, patch: Partial<ToolUseConfirm>) {
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.map(item =>
|
||||
item.toolUseID === toolUseID ? { ...item, ...patch } : item,
|
||||
),
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export { createPermissionContext, createPermissionQueueOps, createResolveOnce }
|
||||
export type {
|
||||
PermissionContext,
|
||||
PermissionApprovalSource,
|
||||
PermissionQueueOps,
|
||||
PermissionRejectionSource,
|
||||
ResolveOnce,
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type { PendingClassifierCheck } from '../../../types/permissions.js'
|
||||
import { logError } from '../../../utils/log.js'
|
||||
import type { PermissionDecision } from '../../../utils/permissions/PermissionResult.js'
|
||||
import type { PermissionUpdate } from '../../../utils/permissions/PermissionUpdateSchema.js'
|
||||
import type { PermissionContext } from '../PermissionContext.js'
|
||||
|
||||
type CoordinatorPermissionParams = {
|
||||
ctx: PermissionContext
|
||||
pendingClassifierCheck?: PendingClassifierCheck | undefined
|
||||
updatedInput: Record<string, unknown> | undefined
|
||||
suggestions: PermissionUpdate[] | undefined
|
||||
permissionMode: string | undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles the coordinator worker permission flow.
|
||||
*
|
||||
* For coordinator workers, automated checks (hooks and classifier) are
|
||||
* awaited sequentially before falling through to the interactive dialog.
|
||||
*
|
||||
* Returns a PermissionDecision if the automated checks resolved the
|
||||
* permission, or null if the caller should fall through to the
|
||||
* interactive dialog.
|
||||
*/
|
||||
async function handleCoordinatorPermission(
|
||||
params: CoordinatorPermissionParams,
|
||||
): Promise<PermissionDecision | null> {
|
||||
const { ctx, updatedInput, suggestions, permissionMode } = params
|
||||
|
||||
try {
|
||||
// 1. Try permission hooks first (fast, local)
|
||||
const hookResult = await ctx.runHooks(
|
||||
permissionMode,
|
||||
suggestions,
|
||||
updatedInput,
|
||||
)
|
||||
if (hookResult) return hookResult
|
||||
|
||||
// 2. Try classifier (slow, inference -- bash only)
|
||||
const classifierResult = feature('BASH_CLASSIFIER')
|
||||
? await ctx.tryClassifier?.(params.pendingClassifierCheck, updatedInput)
|
||||
: null
|
||||
if (classifierResult) {
|
||||
return classifierResult
|
||||
}
|
||||
} catch (error) {
|
||||
// If automated checks fail unexpectedly, fall through to show the dialog
|
||||
// so the user can decide manually. Non-Error throws get a context prefix
|
||||
// so the log is traceable — intentionally NOT toError(), which would drop
|
||||
// the prefix.
|
||||
if (error instanceof Error) {
|
||||
logError(error)
|
||||
} else {
|
||||
logError(new Error(`Automated permission check failed: ${String(error)}`))
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Neither resolved (or checks failed) -- fall through to dialog below.
|
||||
// Hooks already ran, classifier already consumed.
|
||||
return null
|
||||
}
|
||||
|
||||
export { handleCoordinatorPermission }
|
||||
export type { CoordinatorPermissionParams }
|
||||
@@ -0,0 +1,536 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type { ContentBlockParam } from '@anthropic-ai/sdk/resources/messages.mjs'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { logForDebugging } from 'src/utils/debug.js'
|
||||
import { getAllowedChannels } from '../../../bootstrap/state.js'
|
||||
import type { BridgePermissionCallbacks } from '../../../bridge/bridgePermissionCallbacks.js'
|
||||
import { getTerminalFocused } from '../../../ink/terminal-focus-state.js'
|
||||
import {
|
||||
CHANNEL_PERMISSION_REQUEST_METHOD,
|
||||
type ChannelPermissionRequestParams,
|
||||
findChannelEntry,
|
||||
} from '../../../services/mcp/channelNotification.js'
|
||||
import type { ChannelPermissionCallbacks } from '../../../services/mcp/channelPermissions.js'
|
||||
import {
|
||||
filterPermissionRelayClients,
|
||||
shortRequestId,
|
||||
truncateForPreview,
|
||||
} from '../../../services/mcp/channelPermissions.js'
|
||||
import { executeAsyncClassifierCheck } from '../../../tools/BashTool/bashPermissions.js'
|
||||
import { BASH_TOOL_NAME } from '../../../tools/BashTool/toolName.js'
|
||||
import {
|
||||
clearClassifierChecking,
|
||||
setClassifierApproval,
|
||||
setClassifierChecking,
|
||||
setYoloClassifierApproval,
|
||||
} from '../../../utils/classifierApprovals.js'
|
||||
import { errorMessage } from '../../../utils/errors.js'
|
||||
import type { PermissionDecision } from '../../../utils/permissions/PermissionResult.js'
|
||||
import type { PermissionUpdate } from '../../../utils/permissions/PermissionUpdateSchema.js'
|
||||
import { hasPermissionsToUseTool } from '../../../utils/permissions/permissions.js'
|
||||
import type { PermissionContext } from '../PermissionContext.js'
|
||||
import { createResolveOnce } from '../PermissionContext.js'
|
||||
|
||||
type InteractivePermissionParams = {
|
||||
ctx: PermissionContext
|
||||
description: string
|
||||
result: PermissionDecision & { behavior: 'ask' }
|
||||
awaitAutomatedChecksBeforeDialog: boolean | undefined
|
||||
bridgeCallbacks?: BridgePermissionCallbacks
|
||||
channelCallbacks?: ChannelPermissionCallbacks
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles the interactive (main-agent) permission flow.
|
||||
*
|
||||
* Pushes a ToolUseConfirm entry to the confirm queue with callbacks:
|
||||
* onAbort, onAllow, onReject, recheckPermission, onUserInteraction.
|
||||
*
|
||||
* Runs permission hooks and bash classifier checks asynchronously in the
|
||||
* background, racing them against user interaction. Uses a resolve-once
|
||||
* guard and `userInteracted` flag to prevent multiple resolutions.
|
||||
*
|
||||
* This function does NOT return a Promise -- it sets up callbacks that
|
||||
* eventually call `resolve()` to resolve the outer promise owned by
|
||||
* the caller.
|
||||
*/
|
||||
function handleInteractivePermission(
|
||||
params: InteractivePermissionParams,
|
||||
resolve: (decision: PermissionDecision) => void,
|
||||
): void {
|
||||
const {
|
||||
ctx,
|
||||
description,
|
||||
result,
|
||||
awaitAutomatedChecksBeforeDialog,
|
||||
bridgeCallbacks,
|
||||
channelCallbacks,
|
||||
} = params
|
||||
|
||||
const { resolve: resolveOnce, isResolved, claim } = createResolveOnce(resolve)
|
||||
let userInteracted = false
|
||||
let checkmarkTransitionTimer: ReturnType<typeof setTimeout> | undefined
|
||||
// Hoisted so onDismissCheckmark (Esc during checkmark window) can also
|
||||
// remove the abort listener — not just the timer callback.
|
||||
let checkmarkAbortHandler: (() => void) | undefined
|
||||
const bridgeRequestId = bridgeCallbacks ? randomUUID() : undefined
|
||||
// Hoisted so local/hook/classifier wins can remove the pending channel
|
||||
// entry. No "tell remote to dismiss" equivalent — the text sits in your
|
||||
// phone, and a stale "yes abc123" after local-resolve falls through
|
||||
// tryConsumeReply (entry gone) and gets enqueued as normal chat.
|
||||
let channelUnsubscribe: (() => void) | undefined
|
||||
|
||||
const permissionPromptStartTimeMs = Date.now()
|
||||
const displayInput = result.updatedInput ?? ctx.input
|
||||
|
||||
function clearClassifierIndicator(): void {
|
||||
if (feature('BASH_CLASSIFIER')) {
|
||||
ctx.updateQueueItem({ classifierCheckInProgress: false })
|
||||
}
|
||||
}
|
||||
|
||||
ctx.pushToQueue({
|
||||
assistantMessage: ctx.assistantMessage,
|
||||
tool: ctx.tool,
|
||||
description,
|
||||
input: displayInput,
|
||||
toolUseContext: ctx.toolUseContext,
|
||||
toolUseID: ctx.toolUseID,
|
||||
permissionResult: result,
|
||||
permissionPromptStartTimeMs,
|
||||
...(feature('BASH_CLASSIFIER')
|
||||
? {
|
||||
classifierCheckInProgress:
|
||||
!!result.pendingClassifierCheck &&
|
||||
!awaitAutomatedChecksBeforeDialog,
|
||||
}
|
||||
: {}),
|
||||
onUserInteraction() {
|
||||
// Called when user starts interacting with the permission dialog
|
||||
// (e.g., arrow keys, tab, typing feedback)
|
||||
// Hide the classifier indicator since auto-approve is no longer possible
|
||||
//
|
||||
// Grace period: ignore interactions in the first 200ms to prevent
|
||||
// accidental keypresses from canceling the classifier prematurely
|
||||
const GRACE_PERIOD_MS = 200
|
||||
if (Date.now() - permissionPromptStartTimeMs < GRACE_PERIOD_MS) {
|
||||
return
|
||||
}
|
||||
userInteracted = true
|
||||
clearClassifierChecking(ctx.toolUseID)
|
||||
clearClassifierIndicator()
|
||||
},
|
||||
onDismissCheckmark() {
|
||||
if (checkmarkTransitionTimer) {
|
||||
clearTimeout(checkmarkTransitionTimer)
|
||||
checkmarkTransitionTimer = undefined
|
||||
if (checkmarkAbortHandler) {
|
||||
ctx.toolUseContext.abortController.signal.removeEventListener(
|
||||
'abort',
|
||||
checkmarkAbortHandler,
|
||||
)
|
||||
checkmarkAbortHandler = undefined
|
||||
}
|
||||
ctx.removeFromQueue()
|
||||
}
|
||||
},
|
||||
onAbort() {
|
||||
if (!claim()) return
|
||||
if (bridgeCallbacks && bridgeRequestId) {
|
||||
bridgeCallbacks.sendResponse(bridgeRequestId, {
|
||||
behavior: 'deny',
|
||||
message: 'User aborted',
|
||||
})
|
||||
bridgeCallbacks.cancelRequest(bridgeRequestId)
|
||||
}
|
||||
channelUnsubscribe?.()
|
||||
ctx.logCancelled()
|
||||
ctx.logDecision(
|
||||
{ decision: 'reject', source: { type: 'user_abort' } },
|
||||
{ permissionPromptStartTimeMs },
|
||||
)
|
||||
resolveOnce(ctx.cancelAndAbort(undefined, true))
|
||||
},
|
||||
async onAllow(
|
||||
updatedInput,
|
||||
permissionUpdates: PermissionUpdate[],
|
||||
feedback?: string,
|
||||
contentBlocks?: ContentBlockParam[],
|
||||
) {
|
||||
if (!claim()) return // atomic check-and-mark before await
|
||||
|
||||
if (bridgeCallbacks && bridgeRequestId) {
|
||||
bridgeCallbacks.sendResponse(bridgeRequestId, {
|
||||
behavior: 'allow',
|
||||
updatedInput,
|
||||
updatedPermissions: permissionUpdates,
|
||||
})
|
||||
bridgeCallbacks.cancelRequest(bridgeRequestId)
|
||||
}
|
||||
channelUnsubscribe?.()
|
||||
|
||||
resolveOnce(
|
||||
await ctx.handleUserAllow(
|
||||
updatedInput,
|
||||
permissionUpdates,
|
||||
feedback,
|
||||
permissionPromptStartTimeMs,
|
||||
contentBlocks,
|
||||
result.decisionReason,
|
||||
),
|
||||
)
|
||||
},
|
||||
onReject(feedback?: string, contentBlocks?: ContentBlockParam[]) {
|
||||
if (!claim()) return
|
||||
|
||||
if (bridgeCallbacks && bridgeRequestId) {
|
||||
bridgeCallbacks.sendResponse(bridgeRequestId, {
|
||||
behavior: 'deny',
|
||||
message: feedback ?? 'User denied permission',
|
||||
})
|
||||
bridgeCallbacks.cancelRequest(bridgeRequestId)
|
||||
}
|
||||
channelUnsubscribe?.()
|
||||
|
||||
ctx.logDecision(
|
||||
{
|
||||
decision: 'reject',
|
||||
source: { type: 'user_reject', hasFeedback: !!feedback },
|
||||
},
|
||||
{ permissionPromptStartTimeMs },
|
||||
)
|
||||
resolveOnce(ctx.cancelAndAbort(feedback, undefined, contentBlocks))
|
||||
},
|
||||
async recheckPermission() {
|
||||
if (isResolved()) return
|
||||
const freshResult = await hasPermissionsToUseTool(
|
||||
ctx.tool,
|
||||
ctx.input,
|
||||
ctx.toolUseContext,
|
||||
ctx.assistantMessage,
|
||||
ctx.toolUseID,
|
||||
)
|
||||
if (freshResult.behavior === 'allow') {
|
||||
// claim() (atomic check-and-mark), not isResolved() — the async
|
||||
// hasPermissionsToUseTool call above opens a window where CCR
|
||||
// could have responded in flight. Matches onAllow/onReject/hook
|
||||
// paths. cancelRequest tells CCR to dismiss its prompt — without
|
||||
// it, the web UI shows a stale prompt for a tool that's already
|
||||
// executing (particularly visible when recheck is triggered by
|
||||
// a CCR-initiated mode switch, the very case this callback exists
|
||||
// for after useReplBridge started calling it).
|
||||
if (!claim()) return
|
||||
if (bridgeCallbacks && bridgeRequestId) {
|
||||
bridgeCallbacks.cancelRequest(bridgeRequestId)
|
||||
}
|
||||
channelUnsubscribe?.()
|
||||
ctx.removeFromQueue()
|
||||
ctx.logDecision({ decision: 'accept', source: 'config' })
|
||||
resolveOnce(ctx.buildAllow(freshResult.updatedInput ?? ctx.input))
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
// Race 4: Bridge permission response from CCR (claude.ai)
|
||||
// When the bridge is connected, send the permission request to CCR and
|
||||
// subscribe for a response. Whichever side (CLI or CCR) responds first
|
||||
// wins via claim().
|
||||
//
|
||||
// All tools are forwarded — CCR's generic allow/deny modal handles any
|
||||
// tool, and can return `updatedInput` when it has a dedicated renderer
|
||||
// (e.g. plan edit). Tools whose local dialog injects fields (ReviewArtifact
|
||||
// `selected`, AskUserQuestion `answers`) tolerate the field being missing
|
||||
// so generic remote approval degrades gracefully instead of throwing.
|
||||
if (bridgeCallbacks && bridgeRequestId) {
|
||||
bridgeCallbacks.sendRequest(
|
||||
bridgeRequestId,
|
||||
ctx.tool.name,
|
||||
displayInput,
|
||||
ctx.toolUseID,
|
||||
description,
|
||||
result.suggestions,
|
||||
result.blockedPath,
|
||||
)
|
||||
|
||||
const signal = ctx.toolUseContext.abortController.signal
|
||||
const unsubscribe = bridgeCallbacks.onResponse(
|
||||
bridgeRequestId,
|
||||
response => {
|
||||
if (!claim()) return // Local user/hook/classifier already responded
|
||||
signal.removeEventListener('abort', unsubscribe)
|
||||
clearClassifierChecking(ctx.toolUseID)
|
||||
clearClassifierIndicator()
|
||||
ctx.removeFromQueue()
|
||||
channelUnsubscribe?.()
|
||||
|
||||
if (response.behavior === 'allow') {
|
||||
if (response.updatedPermissions?.length) {
|
||||
void ctx.persistPermissions(response.updatedPermissions)
|
||||
}
|
||||
ctx.logDecision(
|
||||
{
|
||||
decision: 'accept',
|
||||
source: {
|
||||
type: 'user',
|
||||
permanent: !!response.updatedPermissions?.length,
|
||||
},
|
||||
},
|
||||
{ permissionPromptStartTimeMs },
|
||||
)
|
||||
resolveOnce(ctx.buildAllow(response.updatedInput ?? displayInput))
|
||||
} else {
|
||||
ctx.logDecision(
|
||||
{
|
||||
decision: 'reject',
|
||||
source: {
|
||||
type: 'user_reject',
|
||||
hasFeedback: !!response.message,
|
||||
},
|
||||
},
|
||||
{ permissionPromptStartTimeMs },
|
||||
)
|
||||
resolveOnce(ctx.cancelAndAbort(response.message))
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
signal.addEventListener('abort', unsubscribe, { once: true })
|
||||
}
|
||||
|
||||
// Channel permission relay — races alongside the bridge block above. Send a
|
||||
// permission prompt to every active channel (Telegram, iMessage, etc.) via
|
||||
// its MCP send_message tool, then race the reply against local/bridge/hook/
|
||||
// classifier. The inbound "yes abc123" is intercepted in the notification
|
||||
// handler (useManageMCPConnections.ts) BEFORE enqueue, so it never reaches
|
||||
// Claude as a conversation turn.
|
||||
//
|
||||
// Unlike the bridge block, this still guards on `requiresUserInteraction` —
|
||||
// channel replies are pure yes/no with no `updatedInput` path. In practice
|
||||
// the guard is dead code today: all three `requiresUserInteraction` tools
|
||||
// (ExitPlanMode, AskUserQuestion, ReviewArtifact) return `isEnabled()===false`
|
||||
// when channels are configured, so they never reach this handler.
|
||||
//
|
||||
// Fire-and-forget send: if callTool fails (channel down, tool missing),
|
||||
// the subscription never fires and another racer wins. Graceful degradation
|
||||
// — the local dialog is always there as the floor.
|
||||
if (
|
||||
(feature('KAIROS') || feature('KAIROS_CHANNELS')) &&
|
||||
channelCallbacks &&
|
||||
!ctx.tool.requiresUserInteraction?.()
|
||||
) {
|
||||
const channelRequestId = shortRequestId(ctx.toolUseID)
|
||||
const allowedChannels = getAllowedChannels()
|
||||
const channelClients = filterPermissionRelayClients(
|
||||
ctx.toolUseContext.getAppState().mcp.clients,
|
||||
name => findChannelEntry(name, allowedChannels) !== undefined,
|
||||
)
|
||||
|
||||
if (channelClients.length > 0) {
|
||||
// Outbound is structured too (Kenneth's symmetry ask) — server owns
|
||||
// message formatting for its platform (Telegram markdown, iMessage
|
||||
// rich text, Discord embed). CC sends the RAW parts; server composes.
|
||||
// The old callTool('send_message', {text,content,message}) triple-key
|
||||
// hack is gone — no more guessing which arg name each plugin takes.
|
||||
const params: ChannelPermissionRequestParams = {
|
||||
request_id: channelRequestId,
|
||||
tool_name: ctx.tool.name,
|
||||
description,
|
||||
input_preview: truncateForPreview(displayInput),
|
||||
}
|
||||
|
||||
for (const client of channelClients) {
|
||||
if (client.type !== 'connected') continue // refine for TS
|
||||
void client.client
|
||||
.notification({
|
||||
method: CHANNEL_PERMISSION_REQUEST_METHOD,
|
||||
params,
|
||||
})
|
||||
.catch(e => {
|
||||
logForDebugging(
|
||||
`Channel permission_request failed for ${client.name}: ${errorMessage(e)}`,
|
||||
{ level: 'error' },
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
const channelSignal = ctx.toolUseContext.abortController.signal
|
||||
// Wrap so BOTH the map delete AND the abort-listener teardown happen
|
||||
// at every call site. The 6 channelUnsubscribe?.() sites after local/
|
||||
// hook/classifier wins previously only deleted the map entry — the
|
||||
// dead closure stayed registered on the session-scoped abort signal
|
||||
// until the session ended. Not a functional bug (Map.delete is
|
||||
// idempotent), but it held the closure alive.
|
||||
const mapUnsub = channelCallbacks.onResponse(
|
||||
channelRequestId,
|
||||
response => {
|
||||
if (!claim()) return // Another racer won
|
||||
channelUnsubscribe?.() // both: map delete + listener remove
|
||||
clearClassifierChecking(ctx.toolUseID)
|
||||
clearClassifierIndicator()
|
||||
ctx.removeFromQueue()
|
||||
// Bridge is the other remote — tell it we're done.
|
||||
if (bridgeCallbacks && bridgeRequestId) {
|
||||
bridgeCallbacks.cancelRequest(bridgeRequestId)
|
||||
}
|
||||
|
||||
if (response.behavior === 'allow') {
|
||||
ctx.logDecision(
|
||||
{
|
||||
decision: 'accept',
|
||||
source: { type: 'user', permanent: false },
|
||||
},
|
||||
{ permissionPromptStartTimeMs },
|
||||
)
|
||||
resolveOnce(ctx.buildAllow(displayInput))
|
||||
} else {
|
||||
ctx.logDecision(
|
||||
{
|
||||
decision: 'reject',
|
||||
source: { type: 'user_reject', hasFeedback: false },
|
||||
},
|
||||
{ permissionPromptStartTimeMs },
|
||||
)
|
||||
resolveOnce(
|
||||
ctx.cancelAndAbort(`Denied via channel ${response.fromServer}`),
|
||||
)
|
||||
}
|
||||
},
|
||||
)
|
||||
channelUnsubscribe = () => {
|
||||
mapUnsub()
|
||||
channelSignal.removeEventListener('abort', channelUnsubscribe!)
|
||||
}
|
||||
|
||||
channelSignal.addEventListener('abort', channelUnsubscribe, {
|
||||
once: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Skip hooks if they were already awaited in the coordinator branch above
|
||||
if (!awaitAutomatedChecksBeforeDialog) {
|
||||
// Execute PermissionRequest hooks asynchronously
|
||||
// If hook returns a decision before user responds, apply it
|
||||
void (async () => {
|
||||
if (isResolved()) return
|
||||
const currentAppState = ctx.toolUseContext.getAppState()
|
||||
const hookDecision = await ctx.runHooks(
|
||||
currentAppState.toolPermissionContext.mode,
|
||||
result.suggestions,
|
||||
result.updatedInput,
|
||||
permissionPromptStartTimeMs,
|
||||
)
|
||||
if (!hookDecision || !claim()) return
|
||||
if (bridgeCallbacks && bridgeRequestId) {
|
||||
bridgeCallbacks.cancelRequest(bridgeRequestId)
|
||||
}
|
||||
channelUnsubscribe?.()
|
||||
ctx.removeFromQueue()
|
||||
resolveOnce(hookDecision)
|
||||
})()
|
||||
}
|
||||
|
||||
// Execute bash classifier check asynchronously (if applicable)
|
||||
if (
|
||||
feature('BASH_CLASSIFIER') &&
|
||||
result.pendingClassifierCheck &&
|
||||
ctx.tool.name === BASH_TOOL_NAME &&
|
||||
!awaitAutomatedChecksBeforeDialog
|
||||
) {
|
||||
// UI indicator for "classifier running" — set here (not in
|
||||
// toolExecution.ts) so commands that auto-allow via prefix rules
|
||||
// don't flash the indicator for a split second before allow returns.
|
||||
setClassifierChecking(ctx.toolUseID)
|
||||
void executeAsyncClassifierCheck(
|
||||
result.pendingClassifierCheck,
|
||||
ctx.toolUseContext.abortController.signal,
|
||||
ctx.toolUseContext.options.isNonInteractiveSession,
|
||||
{
|
||||
shouldContinue: () => !isResolved() && !userInteracted,
|
||||
onComplete: () => {
|
||||
clearClassifierChecking(ctx.toolUseID)
|
||||
clearClassifierIndicator()
|
||||
},
|
||||
onAllow: decisionReason => {
|
||||
if (!claim()) return
|
||||
if (bridgeCallbacks && bridgeRequestId) {
|
||||
bridgeCallbacks.cancelRequest(bridgeRequestId)
|
||||
}
|
||||
channelUnsubscribe?.()
|
||||
clearClassifierChecking(ctx.toolUseID)
|
||||
|
||||
const matchedRule =
|
||||
decisionReason.type === 'classifier'
|
||||
? (decisionReason.reason.match(
|
||||
/^Allowed by prompt rule: "(.+)"$/,
|
||||
)?.[1] ?? decisionReason.reason)
|
||||
: undefined
|
||||
|
||||
// Show auto-approved transition with dimmed options
|
||||
if (feature('TRANSCRIPT_CLASSIFIER')) {
|
||||
ctx.updateQueueItem({
|
||||
classifierCheckInProgress: false,
|
||||
classifierAutoApproved: true,
|
||||
classifierMatchedRule: matchedRule,
|
||||
})
|
||||
}
|
||||
|
||||
if (
|
||||
feature('TRANSCRIPT_CLASSIFIER') &&
|
||||
decisionReason.type === 'classifier'
|
||||
) {
|
||||
if (decisionReason.classifier === 'auto-mode') {
|
||||
setYoloClassifierApproval(ctx.toolUseID, decisionReason.reason)
|
||||
} else if (matchedRule) {
|
||||
setClassifierApproval(ctx.toolUseID, matchedRule)
|
||||
}
|
||||
}
|
||||
|
||||
ctx.logDecision(
|
||||
{ decision: 'accept', source: { type: 'classifier' } },
|
||||
{ permissionPromptStartTimeMs },
|
||||
)
|
||||
resolveOnce(ctx.buildAllow(ctx.input, { decisionReason }))
|
||||
|
||||
// Keep checkmark visible, then remove dialog.
|
||||
// 3s if terminal is focused (user can see it), 1s if not.
|
||||
// User can dismiss early with Esc via onDismissCheckmark.
|
||||
const signal = ctx.toolUseContext.abortController.signal
|
||||
checkmarkAbortHandler = () => {
|
||||
if (checkmarkTransitionTimer) {
|
||||
clearTimeout(checkmarkTransitionTimer)
|
||||
checkmarkTransitionTimer = undefined
|
||||
// Sibling Bash error can fire this (StreamingToolExecutor
|
||||
// cascades via siblingAbortController) — must drop the
|
||||
// cosmetic ✓ dialog or it blocks the next queued item.
|
||||
ctx.removeFromQueue()
|
||||
}
|
||||
}
|
||||
const checkmarkMs = getTerminalFocused() ? 3000 : 1000
|
||||
checkmarkTransitionTimer = setTimeout(() => {
|
||||
checkmarkTransitionTimer = undefined
|
||||
if (checkmarkAbortHandler) {
|
||||
signal.removeEventListener('abort', checkmarkAbortHandler)
|
||||
checkmarkAbortHandler = undefined
|
||||
}
|
||||
ctx.removeFromQueue()
|
||||
}, checkmarkMs)
|
||||
signal.addEventListener('abort', checkmarkAbortHandler, {
|
||||
once: true,
|
||||
})
|
||||
},
|
||||
},
|
||||
).catch(error => {
|
||||
// Log classifier API errors for debugging but don't propagate them as interruptions
|
||||
// These errors can be network failures, rate limits, or model issues - not user cancellations
|
||||
logForDebugging(`Async classifier check failed: ${errorMessage(error)}`, {
|
||||
level: 'error',
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --
|
||||
|
||||
export { handleInteractivePermission }
|
||||
export type { InteractivePermissionParams }
|
||||
@@ -0,0 +1,159 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import type { ContentBlockParam } from '@anthropic-ai/sdk/resources/messages.mjs'
|
||||
import type { PendingClassifierCheck } from '../../../types/permissions.js'
|
||||
import { isAgentSwarmsEnabled } from '../../../utils/agentSwarmsEnabled.js'
|
||||
import { toError } from '../../../utils/errors.js'
|
||||
import { logError } from '../../../utils/log.js'
|
||||
import type { PermissionDecision } from '../../../utils/permissions/PermissionResult.js'
|
||||
import type { PermissionUpdate } from '../../../utils/permissions/PermissionUpdateSchema.js'
|
||||
import {
|
||||
createPermissionRequest,
|
||||
isSwarmWorker,
|
||||
sendPermissionRequestViaMailbox,
|
||||
} from '../../../utils/swarm/permissionSync.js'
|
||||
import { registerPermissionCallback } from '../../useSwarmPermissionPoller.js'
|
||||
import type { PermissionContext } from '../PermissionContext.js'
|
||||
import { createResolveOnce } from '../PermissionContext.js'
|
||||
|
||||
type SwarmWorkerPermissionParams = {
|
||||
ctx: PermissionContext
|
||||
description: string
|
||||
pendingClassifierCheck?: PendingClassifierCheck | undefined
|
||||
updatedInput: Record<string, unknown> | undefined
|
||||
suggestions: PermissionUpdate[] | undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles the swarm worker permission flow.
|
||||
*
|
||||
* When running as a swarm worker:
|
||||
* 1. Tries classifier auto-approval for bash commands
|
||||
* 2. Forwards the permission request to the leader via mailbox
|
||||
* 3. Registers callbacks for when the leader responds
|
||||
* 4. Sets the pending indicator while waiting
|
||||
*
|
||||
* Returns a PermissionDecision if the classifier auto-approves,
|
||||
* or a Promise that resolves when the leader responds.
|
||||
* Returns null if swarms are not enabled or this is not a swarm worker,
|
||||
* so the caller can fall through to interactive handling.
|
||||
*/
|
||||
async function handleSwarmWorkerPermission(
|
||||
params: SwarmWorkerPermissionParams,
|
||||
): Promise<PermissionDecision | null> {
|
||||
if (!isAgentSwarmsEnabled() || !isSwarmWorker()) {
|
||||
return null
|
||||
}
|
||||
|
||||
const { ctx, description, updatedInput, suggestions } = params
|
||||
|
||||
// For bash commands, try classifier auto-approval before forwarding to
|
||||
// the leader. Agents await the classifier result (rather than racing it
|
||||
// against user interaction like the main agent).
|
||||
const classifierResult = feature('BASH_CLASSIFIER')
|
||||
? await ctx.tryClassifier?.(params.pendingClassifierCheck, updatedInput)
|
||||
: null
|
||||
if (classifierResult) {
|
||||
return classifierResult
|
||||
}
|
||||
|
||||
// Forward permission request to the leader via mailbox
|
||||
try {
|
||||
const clearPendingRequest = (): void =>
|
||||
ctx.toolUseContext.setAppState(prev => ({
|
||||
...prev,
|
||||
pendingWorkerRequest: null,
|
||||
}))
|
||||
|
||||
const decision = await new Promise<PermissionDecision>(resolve => {
|
||||
const { resolve: resolveOnce, claim } = createResolveOnce(resolve)
|
||||
|
||||
// Create the permission request
|
||||
const request = createPermissionRequest({
|
||||
toolName: ctx.tool.name,
|
||||
toolUseId: ctx.toolUseID,
|
||||
input: ctx.input,
|
||||
description,
|
||||
permissionSuggestions: suggestions,
|
||||
})
|
||||
|
||||
// Register callback BEFORE sending the request to avoid race condition
|
||||
// where leader responds before callback is registered
|
||||
registerPermissionCallback({
|
||||
requestId: request.id,
|
||||
toolUseId: ctx.toolUseID,
|
||||
async onAllow(
|
||||
allowedInput: Record<string, unknown> | undefined,
|
||||
permissionUpdates: PermissionUpdate[],
|
||||
feedback?: string,
|
||||
contentBlocks?: ContentBlockParam[],
|
||||
) {
|
||||
if (!claim()) return // atomic check-and-mark before await
|
||||
clearPendingRequest()
|
||||
|
||||
// Merge the updated input with the original input
|
||||
const finalInput =
|
||||
allowedInput && Object.keys(allowedInput).length > 0
|
||||
? allowedInput
|
||||
: ctx.input
|
||||
|
||||
resolveOnce(
|
||||
await ctx.handleUserAllow(
|
||||
finalInput,
|
||||
permissionUpdates,
|
||||
feedback,
|
||||
undefined,
|
||||
contentBlocks,
|
||||
),
|
||||
)
|
||||
},
|
||||
onReject(feedback?: string, contentBlocks?: ContentBlockParam[]) {
|
||||
if (!claim()) return
|
||||
clearPendingRequest()
|
||||
|
||||
ctx.logDecision({
|
||||
decision: 'reject',
|
||||
source: { type: 'user_reject', hasFeedback: !!feedback },
|
||||
})
|
||||
|
||||
resolveOnce(ctx.cancelAndAbort(feedback, undefined, contentBlocks))
|
||||
},
|
||||
})
|
||||
|
||||
// Now that callback is registered, send the request to the leader
|
||||
void sendPermissionRequestViaMailbox(request)
|
||||
|
||||
// Show visual indicator that we're waiting for leader approval
|
||||
ctx.toolUseContext.setAppState(prev => ({
|
||||
...prev,
|
||||
pendingWorkerRequest: {
|
||||
toolName: ctx.tool.name,
|
||||
toolUseId: ctx.toolUseID,
|
||||
description,
|
||||
},
|
||||
}))
|
||||
|
||||
// If the abort signal fires while waiting for the leader response,
|
||||
// resolve the promise with a cancel decision so it does not hang.
|
||||
ctx.toolUseContext.abortController.signal.addEventListener(
|
||||
'abort',
|
||||
() => {
|
||||
if (!claim()) return
|
||||
clearPendingRequest()
|
||||
ctx.logCancelled()
|
||||
resolveOnce(ctx.cancelAndAbort(undefined, true))
|
||||
},
|
||||
{ once: true },
|
||||
)
|
||||
})
|
||||
|
||||
return decision
|
||||
} catch (error) {
|
||||
// If swarm permission submission fails, fall back to local handling
|
||||
logError(toError(error))
|
||||
// Continue to local UI handling below
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
export { handleSwarmWorkerPermission }
|
||||
export type { SwarmWorkerPermissionParams }
|
||||
@@ -0,0 +1,238 @@
|
||||
// Centralized analytics/telemetry logging for tool permission decisions.
|
||||
// All permission approve/reject events flow through logPermissionDecision(),
|
||||
// which fans out to Statsig analytics, OTel telemetry, and code-edit metrics.
|
||||
import { feature } from 'bun:bundle'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
logEvent,
|
||||
} from 'src/services/analytics/index.js'
|
||||
import { sanitizeToolNameForAnalytics } from 'src/services/analytics/metadata.js'
|
||||
import { getCodeEditToolDecisionCounter } from '../../bootstrap/state.js'
|
||||
import type { Tool as ToolType, ToolUseContext } from '../../Tool.js'
|
||||
import { getLanguageName } from '../../utils/cliHighlight.js'
|
||||
import { SandboxManager } from '../../utils/sandbox/sandbox-adapter.js'
|
||||
import { logOTelEvent } from '../../utils/telemetry/events.js'
|
||||
import type {
|
||||
PermissionApprovalSource,
|
||||
PermissionRejectionSource,
|
||||
} from './PermissionContext.js'
|
||||
|
||||
type PermissionLogContext = {
|
||||
tool: ToolType
|
||||
input: unknown
|
||||
toolUseContext: ToolUseContext
|
||||
messageId: string
|
||||
toolUseID: string
|
||||
}
|
||||
|
||||
// Discriminated union: 'accept' pairs with approval sources, 'reject' with rejection sources
|
||||
type PermissionDecisionArgs =
|
||||
| { decision: 'accept'; source: PermissionApprovalSource | 'config' }
|
||||
| { decision: 'reject'; source: PermissionRejectionSource | 'config' }
|
||||
|
||||
const CODE_EDITING_TOOLS = ['Edit', 'Write', 'NotebookEdit']
|
||||
|
||||
function isCodeEditingTool(toolName: string): boolean {
|
||||
return CODE_EDITING_TOOLS.includes(toolName)
|
||||
}
|
||||
|
||||
// Builds OTel counter attributes for code editing tools, enriching with
|
||||
// language when the tool's target file path can be extracted from input
|
||||
async function buildCodeEditToolAttributes(
|
||||
tool: ToolType,
|
||||
input: unknown,
|
||||
decision: 'accept' | 'reject',
|
||||
source: string,
|
||||
): Promise<Record<string, string>> {
|
||||
// Derive language from file path if the tool exposes one (e.g., Edit, Write)
|
||||
let language: string | undefined
|
||||
if (tool.getPath && input) {
|
||||
const parseResult = tool.inputSchema.safeParse(input)
|
||||
if (parseResult.success) {
|
||||
const filePath = tool.getPath(parseResult.data)
|
||||
if (filePath) {
|
||||
language = await getLanguageName(filePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
decision,
|
||||
source,
|
||||
tool_name: tool.name,
|
||||
...(language && { language }),
|
||||
}
|
||||
}
|
||||
|
||||
// Flattens structured source into a string label for analytics/OTel events
|
||||
function sourceToString(
|
||||
source: PermissionApprovalSource | PermissionRejectionSource,
|
||||
): string {
|
||||
if (
|
||||
(feature('BASH_CLASSIFIER') || feature('TRANSCRIPT_CLASSIFIER')) &&
|
||||
source.type === 'classifier'
|
||||
) {
|
||||
return 'classifier'
|
||||
}
|
||||
switch (source.type) {
|
||||
case 'hook':
|
||||
return 'hook'
|
||||
case 'user':
|
||||
return source.permanent ? 'user_permanent' : 'user_temporary'
|
||||
case 'user_abort':
|
||||
return 'user_abort'
|
||||
case 'user_reject':
|
||||
return 'user_reject'
|
||||
default:
|
||||
return 'unknown'
|
||||
}
|
||||
}
|
||||
|
||||
function baseMetadata(
|
||||
messageId: string,
|
||||
toolName: string,
|
||||
waitMs: number | undefined,
|
||||
): { [key: string]: boolean | number | undefined } {
|
||||
return {
|
||||
messageID:
|
||||
messageId as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
toolName: sanitizeToolNameForAnalytics(toolName),
|
||||
sandboxEnabled: SandboxManager.isSandboxingEnabled(),
|
||||
// Only include wait time when the user was actually prompted (not auto-approved)
|
||||
...(waitMs !== undefined && { waiting_for_user_permission_ms: waitMs }),
|
||||
}
|
||||
}
|
||||
|
||||
// Emits a distinct analytics event name per approval source for funnel analysis
|
||||
function logApprovalEvent(
|
||||
tool: ToolType,
|
||||
messageId: string,
|
||||
source: PermissionApprovalSource | 'config',
|
||||
waitMs: number | undefined,
|
||||
): void {
|
||||
if (source === 'config') {
|
||||
// Auto-approved by allowlist in settings -- no user wait time
|
||||
logEvent(
|
||||
'tengu_tool_use_granted_in_config',
|
||||
baseMetadata(messageId, tool.name, undefined),
|
||||
)
|
||||
return
|
||||
}
|
||||
if (
|
||||
(feature('BASH_CLASSIFIER') || feature('TRANSCRIPT_CLASSIFIER')) &&
|
||||
source.type === 'classifier'
|
||||
) {
|
||||
logEvent(
|
||||
'tengu_tool_use_granted_by_classifier',
|
||||
baseMetadata(messageId, tool.name, waitMs),
|
||||
)
|
||||
return
|
||||
}
|
||||
switch (source.type) {
|
||||
case 'user':
|
||||
logEvent(
|
||||
source.permanent
|
||||
? 'tengu_tool_use_granted_in_prompt_permanent'
|
||||
: 'tengu_tool_use_granted_in_prompt_temporary',
|
||||
baseMetadata(messageId, tool.name, waitMs),
|
||||
)
|
||||
break
|
||||
case 'hook':
|
||||
logEvent('tengu_tool_use_granted_by_permission_hook', {
|
||||
...baseMetadata(messageId, tool.name, waitMs),
|
||||
permanent: source.permanent ?? false,
|
||||
})
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Rejections share a single event name, differentiated by metadata fields
|
||||
function logRejectionEvent(
|
||||
tool: ToolType,
|
||||
messageId: string,
|
||||
source: PermissionRejectionSource | 'config',
|
||||
waitMs: number | undefined,
|
||||
): void {
|
||||
if (source === 'config') {
|
||||
// Denied by denylist in settings
|
||||
logEvent(
|
||||
'tengu_tool_use_denied_in_config',
|
||||
baseMetadata(messageId, tool.name, undefined),
|
||||
)
|
||||
return
|
||||
}
|
||||
logEvent('tengu_tool_use_rejected_in_prompt', {
|
||||
...baseMetadata(messageId, tool.name, waitMs),
|
||||
// Distinguish hook rejections from user rejections via separate fields
|
||||
...(source.type === 'hook'
|
||||
? { isHook: true }
|
||||
: {
|
||||
hasFeedback:
|
||||
source.type === 'user_reject' ? source.hasFeedback : false,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
// Single entry point for all permission decision logging. Called by permission
|
||||
// handlers after every approve/reject. Fans out to: analytics events, OTel
|
||||
// telemetry, code-edit OTel counters, and toolUseContext decision storage.
|
||||
function logPermissionDecision(
|
||||
ctx: PermissionLogContext,
|
||||
args: PermissionDecisionArgs,
|
||||
permissionPromptStartTimeMs?: number,
|
||||
): void {
|
||||
const { tool, input, toolUseContext, messageId, toolUseID } = ctx
|
||||
const { decision, source } = args
|
||||
|
||||
const waiting_for_user_permission_ms =
|
||||
permissionPromptStartTimeMs !== undefined
|
||||
? Date.now() - permissionPromptStartTimeMs
|
||||
: undefined
|
||||
|
||||
// Log the analytics event
|
||||
if (args.decision === 'accept') {
|
||||
logApprovalEvent(
|
||||
tool,
|
||||
messageId,
|
||||
args.source,
|
||||
waiting_for_user_permission_ms,
|
||||
)
|
||||
} else {
|
||||
logRejectionEvent(
|
||||
tool,
|
||||
messageId,
|
||||
args.source,
|
||||
waiting_for_user_permission_ms,
|
||||
)
|
||||
}
|
||||
|
||||
const sourceString = source === 'config' ? 'config' : sourceToString(source)
|
||||
|
||||
// Track code editing tool metrics
|
||||
if (isCodeEditingTool(tool.name)) {
|
||||
void buildCodeEditToolAttributes(tool, input, decision, sourceString).then(
|
||||
attributes => getCodeEditToolDecisionCounter()?.add(1, attributes),
|
||||
)
|
||||
}
|
||||
|
||||
// Persist decision on the context so downstream code can inspect what happened
|
||||
if (!toolUseContext.toolDecisions) {
|
||||
toolUseContext.toolDecisions = new Map()
|
||||
}
|
||||
toolUseContext.toolDecisions.set(toolUseID, {
|
||||
source: sourceString,
|
||||
decision,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
|
||||
void logOTelEvent('tool_decision', {
|
||||
decision,
|
||||
source: sourceString,
|
||||
tool_name: sanitizeToolNameForAnalytics(tool.name),
|
||||
})
|
||||
}
|
||||
|
||||
export { isCodeEditingTool, buildCodeEditToolAttributes, logPermissionDecision }
|
||||
export type { PermissionLogContext, PermissionDecisionArgs }
|
||||
@@ -0,0 +1,202 @@
|
||||
import Fuse from 'fuse.js'
|
||||
import { basename } from 'path'
|
||||
import type { SuggestionItem } from 'src/components/PromptInput/PromptInputFooterSuggestions.js'
|
||||
import { generateFileSuggestions } from 'src/hooks/fileSuggestions.js'
|
||||
import type { ServerResource } from 'src/services/mcp/types.js'
|
||||
import { getAgentColor } from 'src/tools/AgentTool/agentColorManager.js'
|
||||
import type { AgentDefinition } from 'src/tools/AgentTool/loadAgentsDir.js'
|
||||
import { truncateToWidth } from 'src/utils/format.js'
|
||||
import { logError } from 'src/utils/log.js'
|
||||
import type { Theme } from 'src/utils/theme.js'
|
||||
|
||||
type FileSuggestionSource = {
|
||||
type: 'file'
|
||||
displayText: string
|
||||
description?: string
|
||||
path: string
|
||||
filename: string
|
||||
score?: number
|
||||
}
|
||||
|
||||
type McpResourceSuggestionSource = {
|
||||
type: 'mcp_resource'
|
||||
displayText: string
|
||||
description: string
|
||||
server: string
|
||||
uri: string
|
||||
name: string
|
||||
}
|
||||
|
||||
type AgentSuggestionSource = {
|
||||
type: 'agent'
|
||||
displayText: string
|
||||
description: string
|
||||
agentType: string
|
||||
color?: keyof Theme
|
||||
}
|
||||
|
||||
type SuggestionSource =
|
||||
| FileSuggestionSource
|
||||
| McpResourceSuggestionSource
|
||||
| AgentSuggestionSource
|
||||
|
||||
/**
|
||||
* Creates a unified suggestion item from a source
|
||||
*/
|
||||
function createSuggestionFromSource(source: SuggestionSource): SuggestionItem {
|
||||
switch (source.type) {
|
||||
case 'file':
|
||||
return {
|
||||
id: `file-${source.path}`,
|
||||
displayText: source.displayText,
|
||||
description: source.description,
|
||||
}
|
||||
case 'mcp_resource':
|
||||
return {
|
||||
id: `mcp-resource-${source.server}__${source.uri}`,
|
||||
displayText: source.displayText,
|
||||
description: source.description,
|
||||
}
|
||||
case 'agent':
|
||||
return {
|
||||
id: `agent-${source.agentType}`,
|
||||
displayText: source.displayText,
|
||||
description: source.description,
|
||||
color: source.color,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_UNIFIED_SUGGESTIONS = 15
|
||||
const DESCRIPTION_MAX_LENGTH = 60
|
||||
|
||||
function truncateDescription(description: string): string {
|
||||
return truncateToWidth(description, DESCRIPTION_MAX_LENGTH)
|
||||
}
|
||||
|
||||
function generateAgentSuggestions(
|
||||
agents: AgentDefinition[],
|
||||
query: string,
|
||||
showOnEmpty = false,
|
||||
): AgentSuggestionSource[] {
|
||||
if (!query && !showOnEmpty) {
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
const agentSources: AgentSuggestionSource[] = agents.map(agent => ({
|
||||
type: 'agent' as const,
|
||||
displayText: `${agent.agentType} (agent)`,
|
||||
description: truncateDescription(agent.whenToUse),
|
||||
agentType: agent.agentType,
|
||||
color: getAgentColor(agent.agentType),
|
||||
}))
|
||||
|
||||
if (!query) {
|
||||
return agentSources
|
||||
}
|
||||
|
||||
const queryLower = query.toLowerCase()
|
||||
return agentSources.filter(
|
||||
agent =>
|
||||
agent.agentType.toLowerCase().includes(queryLower) ||
|
||||
agent.displayText.toLowerCase().includes(queryLower),
|
||||
)
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
export async function generateUnifiedSuggestions(
|
||||
query: string,
|
||||
mcpResources: Record<string, ServerResource[]>,
|
||||
agents: AgentDefinition[],
|
||||
showOnEmpty = false,
|
||||
): Promise<SuggestionItem[]> {
|
||||
if (!query && !showOnEmpty) {
|
||||
return []
|
||||
}
|
||||
|
||||
const [fileSuggestions, agentSources] = await Promise.all([
|
||||
generateFileSuggestions(query, showOnEmpty),
|
||||
Promise.resolve(generateAgentSuggestions(agents, query, showOnEmpty)),
|
||||
])
|
||||
|
||||
const fileSources: FileSuggestionSource[] = fileSuggestions.map(
|
||||
suggestion => ({
|
||||
type: 'file' as const,
|
||||
displayText: suggestion.displayText,
|
||||
description: suggestion.description,
|
||||
path: suggestion.displayText, // Use displayText as path for files
|
||||
filename: basename(suggestion.displayText),
|
||||
score: (suggestion.metadata as { score?: number } | undefined)?.score,
|
||||
}),
|
||||
)
|
||||
|
||||
const mcpSources: McpResourceSuggestionSource[] = Object.values(mcpResources)
|
||||
.flat()
|
||||
.map(resource => ({
|
||||
type: 'mcp_resource' as const,
|
||||
displayText: `${resource.server}:${resource.uri}`,
|
||||
description: truncateDescription(
|
||||
resource.description || resource.name || resource.uri,
|
||||
),
|
||||
server: resource.server,
|
||||
uri: resource.uri,
|
||||
name: resource.name || resource.uri,
|
||||
}))
|
||||
|
||||
if (!query) {
|
||||
const allSources = [...fileSources, ...mcpSources, ...agentSources]
|
||||
return allSources
|
||||
.slice(0, MAX_UNIFIED_SUGGESTIONS)
|
||||
.map(createSuggestionFromSource)
|
||||
}
|
||||
|
||||
const nonFileSources: SuggestionSource[] = [...mcpSources, ...agentSources]
|
||||
|
||||
// Score non-file sources with Fuse.js
|
||||
// File sources are already scored by Rust/nucleo
|
||||
type ScoredSource = { source: SuggestionSource; score: number }
|
||||
const scoredResults: ScoredSource[] = []
|
||||
|
||||
// Add file sources with their nucleo scores (already 0-1, lower is better)
|
||||
for (const fileSource of fileSources) {
|
||||
scoredResults.push({
|
||||
source: fileSource,
|
||||
score: fileSource.score ?? 0.5, // Default to middle score if missing
|
||||
})
|
||||
}
|
||||
|
||||
// Score non-file sources with Fuse.js and add them
|
||||
if (nonFileSources.length > 0) {
|
||||
const fuse = new Fuse(nonFileSources, {
|
||||
includeScore: true,
|
||||
threshold: 0.6, // Allow more matches through, we'll sort by score
|
||||
keys: [
|
||||
{ name: 'displayText', weight: 2 },
|
||||
{ name: 'name', weight: 3 },
|
||||
{ name: 'server', weight: 1 },
|
||||
{ name: 'description', weight: 1 },
|
||||
{ name: 'agentType', weight: 3 },
|
||||
],
|
||||
})
|
||||
|
||||
const fuseResults = fuse.search(query, { limit: MAX_UNIFIED_SUGGESTIONS })
|
||||
for (const result of fuseResults) {
|
||||
scoredResults.push({
|
||||
source: result.item,
|
||||
score: result.score ?? 0.5,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Sort all results by score (lower is better) and return top results
|
||||
scoredResults.sort((a, b) => a.score - b.score)
|
||||
|
||||
return scoredResults
|
||||
.slice(0, MAX_UNIFIED_SUGGESTIONS)
|
||||
.map(r => r.source)
|
||||
.map(createSuggestionFromSource)
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
import { useEffect } from 'react'
|
||||
import { isEnvTruthy } from '../utils/envUtils.js'
|
||||
|
||||
export function useAfterFirstRender(): void {
|
||||
useEffect(() => {
|
||||
if (
|
||||
process.env.USER_TYPE === 'ant' &&
|
||||
isEnvTruthy(process.env.CLAUDE_CODE_EXIT_AFTER_FIRST_RENDER)
|
||||
) {
|
||||
process.stderr.write(
|
||||
`\nStartup time: ${Math.round(process.uptime() * 1000)}ms\n`,
|
||||
)
|
||||
// eslint-disable-next-line custom-rules/no-process-exit
|
||||
process.exit(0)
|
||||
}
|
||||
}, [])
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
import { useCallback, useState } from 'react'
|
||||
import { getIsNonInteractiveSession } from '../bootstrap/state.js'
|
||||
import { verifyApiKey } from '../services/api/claude.js'
|
||||
import {
|
||||
getAnthropicApiKeyWithSource,
|
||||
getApiKeyFromApiKeyHelper,
|
||||
isAnthropicAuthEnabled,
|
||||
isClaudeAISubscriber,
|
||||
} from '../utils/auth.js'
|
||||
|
||||
export type VerificationStatus =
|
||||
| 'loading'
|
||||
| 'valid'
|
||||
| 'invalid'
|
||||
| 'missing'
|
||||
| 'error'
|
||||
|
||||
export type ApiKeyVerificationResult = {
|
||||
status: VerificationStatus
|
||||
reverify: () => Promise<void>
|
||||
error: Error | null
|
||||
}
|
||||
|
||||
export function useApiKeyVerification(): ApiKeyVerificationResult {
|
||||
const [status, setStatus] = useState<VerificationStatus>(() => {
|
||||
if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) {
|
||||
return 'valid'
|
||||
}
|
||||
// Use skipRetrievingKeyFromApiKeyHelper to avoid executing apiKeyHelper
|
||||
// before trust dialog is shown (security: prevents RCE via settings.json)
|
||||
const { key, source } = getAnthropicApiKeyWithSource({
|
||||
skipRetrievingKeyFromApiKeyHelper: true,
|
||||
})
|
||||
// If apiKeyHelper is configured, we have a key source even though we
|
||||
// haven't executed it yet - return 'loading' to indicate we'll verify later
|
||||
if (key || source === 'apiKeyHelper') {
|
||||
return 'loading'
|
||||
}
|
||||
return 'missing'
|
||||
})
|
||||
const [error, setError] = useState<Error | null>(null)
|
||||
|
||||
const verify = useCallback(async (): Promise<void> => {
|
||||
if (!isAnthropicAuthEnabled() || isClaudeAISubscriber()) {
|
||||
setStatus('valid')
|
||||
return
|
||||
}
|
||||
// Warm the apiKeyHelper cache (no-op if not configured), then read from
|
||||
// all sources. getAnthropicApiKeyWithSource() reads the now-warm cache.
|
||||
await getApiKeyFromApiKeyHelper(getIsNonInteractiveSession())
|
||||
const { key: apiKey, source } = getAnthropicApiKeyWithSource()
|
||||
if (!apiKey) {
|
||||
if (source === 'apiKeyHelper') {
|
||||
setStatus('error')
|
||||
setError(new Error('API key helper did not return a valid key'))
|
||||
return
|
||||
}
|
||||
const newStatus = 'missing'
|
||||
setStatus(newStatus)
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const isValid = await verifyApiKey(apiKey, false)
|
||||
const newStatus = isValid ? 'valid' : 'invalid'
|
||||
setStatus(newStatus)
|
||||
return
|
||||
} catch (error) {
|
||||
// This happens when there an error response from the API but it's not an invalid API key error
|
||||
// In this case, we still mark the API key as invalid - but we also log the error so we can
|
||||
// display it to the user to be more helpful
|
||||
setError(error as Error)
|
||||
const newStatus = 'error'
|
||||
setStatus(newStatus)
|
||||
return
|
||||
}
|
||||
}, [])
|
||||
|
||||
return {
|
||||
status,
|
||||
reverify: verify,
|
||||
error,
|
||||
}
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,250 @@
|
||||
import { randomUUID } from 'crypto'
|
||||
import {
|
||||
type RefObject,
|
||||
useCallback,
|
||||
useEffect,
|
||||
useLayoutEffect,
|
||||
useRef,
|
||||
} from 'react'
|
||||
import {
|
||||
createHistoryAuthCtx,
|
||||
fetchLatestEvents,
|
||||
fetchOlderEvents,
|
||||
type HistoryAuthCtx,
|
||||
type HistoryPage,
|
||||
} from '../assistant/sessionHistory.js'
|
||||
import type { ScrollBoxHandle } from '../ink/components/ScrollBox.js'
|
||||
import type { RemoteSessionConfig } from '../remote/RemoteSessionManager.js'
|
||||
import { convertSDKMessage } from '../remote/sdkMessageAdapter.js'
|
||||
import type { Message, SystemInformationalMessage } from '../types/message.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
|
||||
type Props = {
|
||||
/** Gated on viewerOnly — non-viewer sessions have no remote history to page. */
|
||||
config: RemoteSessionConfig | undefined
|
||||
setMessages: React.Dispatch<React.SetStateAction<Message[]>>
|
||||
scrollRef: RefObject<ScrollBoxHandle | null>
|
||||
/** Called after prepend from the layout effect with message count + height
|
||||
* delta. Lets useUnseenDivider shift dividerIndex + dividerYRef. */
|
||||
onPrepend?: (indexDelta: number, heightDelta: number) => void
|
||||
}
|
||||
|
||||
type Result = {
|
||||
/** Trigger for ScrollKeybindingHandler's onScroll composition. */
|
||||
maybeLoadOlder: (handle: ScrollBoxHandle) => void
|
||||
}
|
||||
|
||||
/** Fire loadOlder when scrolled within this many rows of the top. */
|
||||
const PREFETCH_THRESHOLD_ROWS = 40
|
||||
|
||||
/** Max chained page loads to fill the viewport on mount. Bounds the loop if
|
||||
* events convert to zero visible messages (everything filtered). */
|
||||
const MAX_FILL_PAGES = 10
|
||||
|
||||
const SENTINEL_LOADING = 'loading older messages…'
|
||||
const SENTINEL_LOADING_FAILED =
|
||||
'failed to load older messages — scroll up to retry'
|
||||
const SENTINEL_START = 'start of session'
|
||||
|
||||
/** Convert a HistoryPage to REPL Message[] using the same opts as viewer mode. */
|
||||
function pageToMessages(page: HistoryPage): Message[] {
|
||||
const out: Message[] = []
|
||||
for (const ev of page.events) {
|
||||
const c = convertSDKMessage(ev, {
|
||||
convertUserTextMessages: true,
|
||||
convertToolResults: true,
|
||||
})
|
||||
if (c.type === 'message') out.push(c.message)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazy-load `claude assistant` history on scroll-up.
|
||||
*
|
||||
* On mount: fetch newest page via anchor_to_latest, prepend to messages.
|
||||
* On scroll-up near top: fetch next-older page via before_id, prepend with
|
||||
* scroll anchoring (viewport stays put).
|
||||
*
|
||||
* No-op unless config.viewerOnly. REPL only calls this hook inside a
|
||||
* feature('KAIROS') gate, so build-time elimination is handled there.
|
||||
*/
|
||||
export function useAssistantHistory({
|
||||
config,
|
||||
setMessages,
|
||||
scrollRef,
|
||||
onPrepend,
|
||||
}: Props): Result {
|
||||
const enabled = config?.viewerOnly === true
|
||||
|
||||
// Cursor state: ref-only (no re-render on cursor change). `null` = no
|
||||
// older pages. `undefined` = initial page not fetched yet.
|
||||
const cursorRef = useRef<string | null | undefined>(undefined)
|
||||
const ctxRef = useRef<HistoryAuthCtx | null>(null)
|
||||
const inflightRef = useRef(false)
|
||||
|
||||
// Scroll-anchor: snapshot height + prepended count before setMessages;
|
||||
// compensate in useLayoutEffect after React commits. getFreshScrollHeight
|
||||
// reads Yoga directly so the value is correct post-commit.
|
||||
const anchorRef = useRef<{ beforeHeight: number; count: number } | null>(null)
|
||||
|
||||
// Fill-viewport chaining: after the initial page commits, if content doesn't
|
||||
// fill the viewport yet, load another page. Self-chains via the layout effect
|
||||
// until filled or the budget runs out. Budget set once on initial load; user
|
||||
// scroll-ups don't need it (maybeLoadOlder re-fires on next wheel event).
|
||||
const fillBudgetRef = useRef(0)
|
||||
|
||||
// Stable sentinel UUID — reused across swaps so virtual-scroll treats it
|
||||
// as one item (text-only mutation, not remove+insert).
|
||||
const sentinelUuidRef = useRef(randomUUID())
|
||||
|
||||
function mkSentinel(text: string): SystemInformationalMessage {
|
||||
return {
|
||||
type: 'system',
|
||||
subtype: 'informational',
|
||||
content: text,
|
||||
isMeta: false,
|
||||
timestamp: new Date().toISOString(),
|
||||
uuid: sentinelUuidRef.current,
|
||||
level: 'info',
|
||||
}
|
||||
}
|
||||
|
||||
/** Prepend a page at the front, with scroll-anchor snapshot for non-initial.
|
||||
* Replaces the sentinel (always at index 0 when present) in-place. */
|
||||
const prepend = useCallback(
|
||||
(page: HistoryPage, isInitial: boolean) => {
|
||||
const msgs = pageToMessages(page)
|
||||
cursorRef.current = page.hasMore ? page.firstId : null
|
||||
|
||||
if (!isInitial) {
|
||||
const s = scrollRef.current
|
||||
anchorRef.current = s
|
||||
? { beforeHeight: s.getFreshScrollHeight(), count: msgs.length }
|
||||
: null
|
||||
}
|
||||
|
||||
const sentinel = page.hasMore ? null : mkSentinel(SENTINEL_START)
|
||||
setMessages(prev => {
|
||||
// Drop existing sentinel (index 0, known stable UUID — O(1)).
|
||||
const base =
|
||||
prev[0]?.uuid === sentinelUuidRef.current ? prev.slice(1) : prev
|
||||
return sentinel ? [sentinel, ...msgs, ...base] : [...msgs, ...base]
|
||||
})
|
||||
|
||||
logForDebugging(
|
||||
`[useAssistantHistory] ${isInitial ? 'initial' : 'older'} page: ${msgs.length} msgs (raw ${page.events.length}), hasMore=${page.hasMore}`,
|
||||
)
|
||||
},
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps -- scrollRef is a stable ref; mkSentinel reads refs only
|
||||
[setMessages],
|
||||
)
|
||||
|
||||
// Initial fetch on mount — best-effort.
|
||||
useEffect(() => {
|
||||
if (!enabled || !config) return
|
||||
let cancelled = false
|
||||
void (async () => {
|
||||
const ctx = await createHistoryAuthCtx(config.sessionId).catch(() => null)
|
||||
if (!ctx || cancelled) return
|
||||
ctxRef.current = ctx
|
||||
const page = await fetchLatestEvents(ctx)
|
||||
if (cancelled || !page) return
|
||||
fillBudgetRef.current = MAX_FILL_PAGES
|
||||
prepend(page, true)
|
||||
})()
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
// config identity is stable (created once in main.tsx, never recreated)
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [enabled])
|
||||
|
||||
const loadOlder = useCallback(async () => {
|
||||
if (!enabled || inflightRef.current) return
|
||||
const cursor = cursorRef.current
|
||||
const ctx = ctxRef.current
|
||||
if (!cursor || !ctx) return // null=exhausted, undefined=initial pending
|
||||
inflightRef.current = true
|
||||
// Swap sentinel to "loading…" — O(1) slice since sentinel is at index 0.
|
||||
setMessages(prev => {
|
||||
const base =
|
||||
prev[0]?.uuid === sentinelUuidRef.current ? prev.slice(1) : prev
|
||||
return [mkSentinel(SENTINEL_LOADING), ...base]
|
||||
})
|
||||
try {
|
||||
const page = await fetchOlderEvents(ctx, cursor)
|
||||
if (!page) {
|
||||
// Fetch failed — revert sentinel back to "start" placeholder so the user
|
||||
// can retry on next scroll-up. Cursor is preserved (not nulled out).
|
||||
setMessages(prev => {
|
||||
const base =
|
||||
prev[0]?.uuid === sentinelUuidRef.current ? prev.slice(1) : prev
|
||||
return [mkSentinel(SENTINEL_LOADING_FAILED), ...base]
|
||||
})
|
||||
return
|
||||
}
|
||||
prepend(page, false)
|
||||
} finally {
|
||||
inflightRef.current = false
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps -- mkSentinel reads refs only
|
||||
}, [enabled, prepend, setMessages])
|
||||
|
||||
// Scroll-anchor compensation — after React commits the prepended items,
|
||||
// shift scrollTop by the height delta so the viewport stays put. Also
|
||||
// fire onPrepend here (not in prepend()) so dividerIndex + baseline ref
|
||||
// are shifted with the ACTUAL height delta, not an estimate.
|
||||
// No deps: runs every render; cheap no-op when anchorRef is null.
|
||||
useLayoutEffect(() => {
|
||||
const anchor = anchorRef.current
|
||||
if (anchor === null) return
|
||||
anchorRef.current = null
|
||||
const s = scrollRef.current
|
||||
if (!s || s.isSticky()) return // sticky = pinned bottom; prepend is invisible
|
||||
const delta = s.getFreshScrollHeight() - anchor.beforeHeight
|
||||
if (delta > 0) s.scrollBy(delta)
|
||||
onPrepend?.(anchor.count, delta)
|
||||
})
|
||||
|
||||
// Fill-viewport chain: after paint, if content doesn't exceed the viewport,
|
||||
// load another page. Runs as useEffect (not layout effect) so Ink has
|
||||
// painted and scrollViewportHeight is populated. Self-chains via next
|
||||
// render's effect; budget caps the chain.
|
||||
//
|
||||
// The ScrollBox content wrapper has flexGrow:1 flexShrink:0 — it's clamped
|
||||
// to ≥ viewport. So `content < viewport` is never true; `<=` detects "no
|
||||
// overflow yet" correctly. Stops once there's at least something to scroll.
|
||||
useEffect(() => {
|
||||
if (
|
||||
fillBudgetRef.current <= 0 ||
|
||||
!cursorRef.current ||
|
||||
inflightRef.current
|
||||
) {
|
||||
return
|
||||
}
|
||||
const s = scrollRef.current
|
||||
if (!s) return
|
||||
const contentH = s.getFreshScrollHeight()
|
||||
const viewH = s.getViewportHeight()
|
||||
logForDebugging(
|
||||
`[useAssistantHistory] fill-check: content=${contentH} viewport=${viewH} budget=${fillBudgetRef.current}`,
|
||||
)
|
||||
if (contentH <= viewH) {
|
||||
fillBudgetRef.current--
|
||||
void loadOlder()
|
||||
} else {
|
||||
fillBudgetRef.current = 0
|
||||
}
|
||||
})
|
||||
|
||||
// Trigger wrapper for onScroll composition in REPL.
|
||||
const maybeLoadOlder = useCallback(
|
||||
(handle: ScrollBoxHandle) => {
|
||||
if (handle.getScrollTop() < PREFETCH_THRESHOLD_ROWS) void loadOlder()
|
||||
},
|
||||
[loadOlder],
|
||||
)
|
||||
|
||||
return { maybeLoadOlder }
|
||||
}
|
||||
@@ -0,0 +1,125 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import { useEffect, useRef } from 'react'
|
||||
import {
|
||||
getTerminalFocusState,
|
||||
subscribeTerminalFocus,
|
||||
} from '../ink/terminal-focus-state.js'
|
||||
import { getFeatureValue_CACHED_MAY_BE_STALE } from '../services/analytics/growthbook.js'
|
||||
import { generateAwaySummary } from '../services/awaySummary.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
import { createAwaySummaryMessage } from '../utils/messages.js'
|
||||
|
||||
const BLUR_DELAY_MS = 5 * 60_000
|
||||
|
||||
type SetMessages = (updater: (prev: Message[]) => Message[]) => void
|
||||
|
||||
function hasSummarySinceLastUserTurn(messages: readonly Message[]): boolean {
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
const m = messages[i]!
|
||||
if (m.type === 'user' && !m.isMeta && !m.isCompactSummary) return false
|
||||
if (m.type === 'system' && m.subtype === 'away_summary') return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "while you were away" summary message after the terminal has been
|
||||
* blurred for 5 minutes. Fires only when (a) 5min since blur, (b) no turn in
|
||||
* progress, and (c) no existing away_summary since the last user message.
|
||||
*
|
||||
* Focus state 'unknown' (terminal doesn't support DECSET 1004) is a no-op.
|
||||
*/
|
||||
export function useAwaySummary(
|
||||
messages: readonly Message[],
|
||||
setMessages: SetMessages,
|
||||
isLoading: boolean,
|
||||
): void {
|
||||
const timerRef = useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
const abortRef = useRef<AbortController | null>(null)
|
||||
const messagesRef = useRef(messages)
|
||||
const isLoadingRef = useRef(isLoading)
|
||||
const pendingRef = useRef(false)
|
||||
const generateRef = useRef<(() => Promise<void>) | null>(null)
|
||||
|
||||
messagesRef.current = messages
|
||||
isLoadingRef.current = isLoading
|
||||
|
||||
// 3P default: false
|
||||
const gbEnabled = getFeatureValue_CACHED_MAY_BE_STALE(
|
||||
'tengu_sedge_lantern',
|
||||
false,
|
||||
)
|
||||
|
||||
useEffect(() => {
|
||||
if (!feature('AWAY_SUMMARY')) return
|
||||
if (!gbEnabled) return
|
||||
|
||||
function clearTimer(): void {
|
||||
if (timerRef.current !== null) {
|
||||
clearTimeout(timerRef.current)
|
||||
timerRef.current = null
|
||||
}
|
||||
}
|
||||
|
||||
function abortInFlight(): void {
|
||||
abortRef.current?.abort()
|
||||
abortRef.current = null
|
||||
}
|
||||
|
||||
async function generate(): Promise<void> {
|
||||
pendingRef.current = false
|
||||
if (hasSummarySinceLastUserTurn(messagesRef.current)) return
|
||||
abortInFlight()
|
||||
const controller = new AbortController()
|
||||
abortRef.current = controller
|
||||
const text = await generateAwaySummary(
|
||||
messagesRef.current,
|
||||
controller.signal,
|
||||
)
|
||||
if (controller.signal.aborted || text === null) return
|
||||
setMessages(prev => [...prev, createAwaySummaryMessage(text)])
|
||||
}
|
||||
|
||||
function onBlurTimerFire(): void {
|
||||
timerRef.current = null
|
||||
if (isLoadingRef.current) {
|
||||
pendingRef.current = true
|
||||
return
|
||||
}
|
||||
void generate()
|
||||
}
|
||||
|
||||
function onFocusChange(): void {
|
||||
const state = getTerminalFocusState()
|
||||
if (state === 'blurred') {
|
||||
clearTimer()
|
||||
timerRef.current = setTimeout(onBlurTimerFire, BLUR_DELAY_MS)
|
||||
} else if (state === 'focused') {
|
||||
clearTimer()
|
||||
abortInFlight()
|
||||
pendingRef.current = false
|
||||
}
|
||||
// 'unknown' → no-op
|
||||
}
|
||||
|
||||
const unsubscribe = subscribeTerminalFocus(onFocusChange)
|
||||
// Handle the case where we're already blurred when the effect mounts
|
||||
onFocusChange()
|
||||
generateRef.current = generate
|
||||
|
||||
return () => {
|
||||
unsubscribe()
|
||||
clearTimer()
|
||||
abortInFlight()
|
||||
generateRef.current = null
|
||||
}
|
||||
}, [gbEnabled, setMessages])
|
||||
|
||||
// Timer fired mid-turn → fire when turn ends (if still blurred)
|
||||
useEffect(() => {
|
||||
if (isLoading) return
|
||||
if (!pendingRef.current) return
|
||||
if (getTerminalFocusState() !== 'blurred') return
|
||||
void generateRef.current?.()
|
||||
}, [isLoading])
|
||||
}
|
||||
@@ -0,0 +1,251 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { KeyboardEvent } from '../ink/events/keyboard-event.js'
|
||||
// eslint-disable-next-line custom-rules/prefer-use-keybindings -- backward-compat bridge until REPL wires handleKeyDown to <Box onKeyDown>
|
||||
import { useInput } from '../ink.js'
|
||||
import {
|
||||
type AppState,
|
||||
useAppState,
|
||||
useSetAppState,
|
||||
} from '../state/AppState.js'
|
||||
import {
|
||||
enterTeammateView,
|
||||
exitTeammateView,
|
||||
} from '../state/teammateViewHelpers.js'
|
||||
import {
|
||||
getRunningTeammatesSorted,
|
||||
InProcessTeammateTask,
|
||||
} from '../tasks/InProcessTeammateTask/InProcessTeammateTask.js'
|
||||
import {
|
||||
type InProcessTeammateTaskState,
|
||||
isInProcessTeammateTask,
|
||||
} from '../tasks/InProcessTeammateTask/types.js'
|
||||
import { isBackgroundTask } from '../tasks/types.js'
|
||||
|
||||
// Step teammate selection by delta, wrapping across leader(-1)..teammates(0..n-1)..hide(n).
|
||||
// First step from a collapsed tree expands it and parks on leader.
|
||||
function stepTeammateSelection(
|
||||
delta: 1 | -1,
|
||||
setAppState: (updater: (prev: AppState) => AppState) => void,
|
||||
): void {
|
||||
setAppState(prev => {
|
||||
const currentCount = getRunningTeammatesSorted(prev.tasks).length
|
||||
if (currentCount === 0) return prev
|
||||
|
||||
if (prev.expandedView !== 'teammates') {
|
||||
return {
|
||||
...prev,
|
||||
expandedView: 'teammates' as const,
|
||||
viewSelectionMode: 'selecting-agent',
|
||||
selectedIPAgentIndex: -1,
|
||||
}
|
||||
}
|
||||
|
||||
const maxIdx = currentCount // hide row
|
||||
const cur = prev.selectedIPAgentIndex
|
||||
const next =
|
||||
delta === 1
|
||||
? cur >= maxIdx
|
||||
? -1
|
||||
: cur + 1
|
||||
: cur <= -1
|
||||
? maxIdx
|
||||
: cur - 1
|
||||
return {
|
||||
...prev,
|
||||
selectedIPAgentIndex: next,
|
||||
viewSelectionMode: 'selecting-agent',
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom hook that handles Shift+Up/Down keyboard navigation for background tasks.
|
||||
* When teammates (swarm) are present, navigates between leader and teammates.
|
||||
* When only non-teammate background tasks exist, opens the background tasks dialog.
|
||||
* Also handles Enter to confirm selection, 'f' to view transcript, and 'k' to kill.
|
||||
*/
|
||||
export function useBackgroundTaskNavigation(options?: {
|
||||
onOpenBackgroundTasks?: () => void
|
||||
}): { handleKeyDown: (e: KeyboardEvent) => void } {
|
||||
const tasks = useAppState(s => s.tasks)
|
||||
const viewSelectionMode = useAppState(s => s.viewSelectionMode)
|
||||
const viewingAgentTaskId = useAppState(s => s.viewingAgentTaskId)
|
||||
const selectedIPAgentIndex = useAppState(s => s.selectedIPAgentIndex)
|
||||
const setAppState = useSetAppState()
|
||||
|
||||
// Filter to running teammates and sort alphabetically to match TeammateSpinnerTree display
|
||||
const teammateTasks = getRunningTeammatesSorted(tasks)
|
||||
const teammateCount = teammateTasks.length
|
||||
|
||||
// Check for non-teammate background tasks (local_agent, local_bash, etc.)
|
||||
const hasNonTeammateBackgroundTasks = Object.values(tasks).some(
|
||||
t => isBackgroundTask(t) && t.type !== 'in_process_teammate',
|
||||
)
|
||||
|
||||
// Track previous teammate count to detect when teammates are removed
|
||||
const prevTeammateCountRef = useRef<number>(teammateCount)
|
||||
|
||||
// Clamp selection index if teammates are removed or reset when count becomes 0
|
||||
useEffect(() => {
|
||||
const prevCount = prevTeammateCountRef.current
|
||||
prevTeammateCountRef.current = teammateCount
|
||||
|
||||
setAppState(prev => {
|
||||
const currentTeammates = getRunningTeammatesSorted(prev.tasks)
|
||||
const currentCount = currentTeammates.length
|
||||
|
||||
// When teammates are removed (count goes from >0 to 0), reset selection
|
||||
// Only reset if we previously had teammates (not on initial mount with 0)
|
||||
// Don't clobber viewSelectionMode if actively viewing a teammate transcript —
|
||||
// the user may be reviewing a completed teammate and needs escape to exit
|
||||
if (
|
||||
currentCount === 0 &&
|
||||
prevCount > 0 &&
|
||||
prev.selectedIPAgentIndex !== -1
|
||||
) {
|
||||
if (prev.viewSelectionMode === 'viewing-agent') {
|
||||
return {
|
||||
...prev,
|
||||
selectedIPAgentIndex: -1,
|
||||
}
|
||||
}
|
||||
return {
|
||||
...prev,
|
||||
selectedIPAgentIndex: -1,
|
||||
viewSelectionMode: 'none',
|
||||
}
|
||||
}
|
||||
|
||||
// Clamp if index is out of bounds
|
||||
// Max valid index is currentCount (the "hide" row) when spinner tree is shown
|
||||
const maxIndex =
|
||||
prev.expandedView === 'teammates' ? currentCount : currentCount - 1
|
||||
if (currentCount > 0 && prev.selectedIPAgentIndex > maxIndex) {
|
||||
return {
|
||||
...prev,
|
||||
selectedIPAgentIndex: maxIndex,
|
||||
}
|
||||
}
|
||||
|
||||
return prev
|
||||
})
|
||||
}, [teammateCount, setAppState])
|
||||
|
||||
// Get the selected teammate's task info
|
||||
const getSelectedTeammate = (): {
|
||||
taskId: string
|
||||
task: InProcessTeammateTaskState
|
||||
} | null => {
|
||||
if (teammateCount === 0) return null
|
||||
const selectedIndex = selectedIPAgentIndex
|
||||
const task = teammateTasks[selectedIndex]
|
||||
if (!task) return null
|
||||
|
||||
return { taskId: task.id, task }
|
||||
}
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent): void => {
|
||||
// Escape in viewing mode:
|
||||
// - If teammate is running: abort current work only (stops current turn, teammate stays alive)
|
||||
// - If teammate is not running (completed/killed/failed): exit the view back to leader
|
||||
if (e.key === 'escape' && viewSelectionMode === 'viewing-agent') {
|
||||
e.preventDefault()
|
||||
const taskId = viewingAgentTaskId
|
||||
if (taskId) {
|
||||
const task = tasks[taskId]
|
||||
if (isInProcessTeammateTask(task) && task.status === 'running') {
|
||||
// Abort currentWorkAbortController (stops current turn) NOT abortController (kills teammate)
|
||||
task.currentWorkAbortController?.abort()
|
||||
return
|
||||
}
|
||||
}
|
||||
// Teammate is not running or task doesn't exist — exit the view
|
||||
exitTeammateView(setAppState)
|
||||
return
|
||||
}
|
||||
|
||||
// Escape in selection mode: exit selection without aborting leader
|
||||
if (e.key === 'escape' && viewSelectionMode === 'selecting-agent') {
|
||||
e.preventDefault()
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
viewSelectionMode: 'none',
|
||||
selectedIPAgentIndex: -1,
|
||||
}))
|
||||
return
|
||||
}
|
||||
|
||||
// Shift+Up/Down for teammate transcript switching (with wrapping)
|
||||
// Index -1 represents the leader, 0+ are teammates
|
||||
// When showSpinnerTree is true, index === teammateCount is the "hide" row
|
||||
if (e.shift && (e.key === 'up' || e.key === 'down')) {
|
||||
e.preventDefault()
|
||||
if (teammateCount > 0) {
|
||||
stepTeammateSelection(e.key === 'down' ? 1 : -1, setAppState)
|
||||
} else if (hasNonTeammateBackgroundTasks) {
|
||||
options?.onOpenBackgroundTasks?.()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// 'f' to view selected teammate's transcript (only in selecting mode)
|
||||
if (
|
||||
e.key === 'f' &&
|
||||
viewSelectionMode === 'selecting-agent' &&
|
||||
teammateCount > 0
|
||||
) {
|
||||
e.preventDefault()
|
||||
const selected = getSelectedTeammate()
|
||||
if (selected) {
|
||||
enterTeammateView(selected.taskId, setAppState)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Enter to confirm selection (only when in selecting mode)
|
||||
if (e.key === 'return' && viewSelectionMode === 'selecting-agent') {
|
||||
e.preventDefault()
|
||||
if (selectedIPAgentIndex === -1) {
|
||||
exitTeammateView(setAppState)
|
||||
} else if (selectedIPAgentIndex >= teammateCount) {
|
||||
// "Hide" row selected - collapse the spinner tree
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
expandedView: 'none' as const,
|
||||
viewSelectionMode: 'none',
|
||||
selectedIPAgentIndex: -1,
|
||||
}))
|
||||
} else {
|
||||
const selected = getSelectedTeammate()
|
||||
if (selected) {
|
||||
enterTeammateView(selected.taskId, setAppState)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// k to kill selected teammate (only in selecting mode)
|
||||
if (
|
||||
e.key === 'k' &&
|
||||
viewSelectionMode === 'selecting-agent' &&
|
||||
selectedIPAgentIndex >= 0
|
||||
) {
|
||||
e.preventDefault()
|
||||
const selected = getSelectedTeammate()
|
||||
if (selected && selected.task.status === 'running') {
|
||||
void InProcessTeammateTask.kill(selected.taskId, setAppState)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Backward-compat bridge: REPL.tsx doesn't yet wire handleKeyDown to
|
||||
// <Box onKeyDown>. Subscribe via useInput and adapt InputEvent →
|
||||
// KeyboardEvent until the consumer is migrated (separate PR).
|
||||
// TODO(onKeyDown-migration): remove once REPL passes handleKeyDown.
|
||||
useInput((_input, _key, event) => {
|
||||
handleKeyDown(new KeyboardEvent(event.keypress))
|
||||
})
|
||||
|
||||
return { handleKeyDown }
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
import { type DOMElement, useAnimationFrame, useTerminalFocus } from '../ink.js'
|
||||
|
||||
const BLINK_INTERVAL_MS = 600
|
||||
|
||||
/**
|
||||
* Hook for synchronized blinking animations that pause when offscreen.
|
||||
*
|
||||
* Returns a ref to attach to the animated element and the current blink state.
|
||||
* All instances blink together because they derive state from the same
|
||||
* animation clock. The clock only runs when at least one subscriber is visible.
|
||||
* Pauses when the terminal is blurred.
|
||||
*
|
||||
* @param enabled - Whether blinking is active
|
||||
* @returns [ref, isVisible] - Ref to attach to element, true when visible in blink cycle
|
||||
*
|
||||
* @example
|
||||
* function BlinkingDot({ shouldAnimate }) {
|
||||
* const [ref, isVisible] = useBlink(shouldAnimate)
|
||||
* return <Box ref={ref}>{isVisible ? '●' : ' '}</Box>
|
||||
* }
|
||||
*/
|
||||
export function useBlink(
|
||||
enabled: boolean,
|
||||
intervalMs: number = BLINK_INTERVAL_MS,
|
||||
): [ref: (element: DOMElement | null) => void, isVisible: boolean] {
|
||||
const focused = useTerminalFocus()
|
||||
const [ref, time] = useAnimationFrame(enabled && focused ? intervalMs : null)
|
||||
|
||||
if (!enabled || !focused) return [ref, true]
|
||||
|
||||
// Derive blink state from time - all instances see the same time so they sync
|
||||
const isVisible = Math.floor(time / intervalMs) % 2 === 0
|
||||
return [ref, isVisible]
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,276 @@
|
||||
/**
|
||||
* CancelRequestHandler component for handling cancel/escape keybinding.
|
||||
*
|
||||
* Must be rendered inside KeybindingSetup to have access to the keybinding context.
|
||||
* This component renders nothing - it just registers the cancel keybinding handler.
|
||||
*/
|
||||
import { useCallback, useRef } from 'react'
|
||||
import { logEvent } from 'src/services/analytics/index.js'
|
||||
import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from 'src/services/analytics/metadata.js'
|
||||
import {
|
||||
useAppState,
|
||||
useAppStateStore,
|
||||
useSetAppState,
|
||||
} from 'src/state/AppState.js'
|
||||
import { isVimModeEnabled } from '../components/PromptInput/utils.js'
|
||||
import type { ToolUseConfirm } from '../components/permissions/PermissionRequest.js'
|
||||
import type { SpinnerMode } from '../components/Spinner/types.js'
|
||||
import { useNotifications } from '../context/notifications.js'
|
||||
import { useIsOverlayActive } from '../context/overlayContext.js'
|
||||
import { useCommandQueue } from '../hooks/useCommandQueue.js'
|
||||
import { getShortcutDisplay } from '../keybindings/shortcutFormat.js'
|
||||
import { useKeybinding } from '../keybindings/useKeybinding.js'
|
||||
import type { Screen } from '../screens/REPL.js'
|
||||
import { exitTeammateView } from '../state/teammateViewHelpers.js'
|
||||
import {
|
||||
killAllRunningAgentTasks,
|
||||
markAgentsNotified,
|
||||
} from '../tasks/LocalAgentTask/LocalAgentTask.js'
|
||||
import type { PromptInputMode, VimMode } from '../types/textInputTypes.js'
|
||||
import {
|
||||
clearCommandQueue,
|
||||
enqueuePendingNotification,
|
||||
hasCommandsInQueue,
|
||||
} from '../utils/messageQueueManager.js'
|
||||
import { emitTaskTerminatedSdk } from '../utils/sdkEventQueue.js'
|
||||
|
||||
/** Time window in ms during which a second press kills all background agents. */
|
||||
const KILL_AGENTS_CONFIRM_WINDOW_MS = 3000
|
||||
|
||||
type CancelRequestHandlerProps = {
|
||||
setToolUseConfirmQueue: (
|
||||
f: (toolUseConfirmQueue: ToolUseConfirm[]) => ToolUseConfirm[],
|
||||
) => void
|
||||
onCancel: () => void
|
||||
onAgentsKilled: () => void
|
||||
isMessageSelectorVisible: boolean
|
||||
screen: Screen
|
||||
abortSignal?: AbortSignal
|
||||
popCommandFromQueue?: () => void
|
||||
vimMode?: VimMode
|
||||
isLocalJSXCommand?: boolean
|
||||
isSearchingHistory?: boolean
|
||||
isHelpOpen?: boolean
|
||||
inputMode?: PromptInputMode
|
||||
inputValue?: string
|
||||
streamMode?: SpinnerMode
|
||||
}
|
||||
|
||||
/**
|
||||
* Component that handles cancel requests via keybinding.
|
||||
* Renders null but registers the 'chat:cancel' keybinding handler.
|
||||
*/
|
||||
export function CancelRequestHandler(props: CancelRequestHandlerProps): null {
|
||||
const {
|
||||
setToolUseConfirmQueue,
|
||||
onCancel,
|
||||
onAgentsKilled,
|
||||
isMessageSelectorVisible,
|
||||
screen,
|
||||
abortSignal,
|
||||
popCommandFromQueue,
|
||||
vimMode,
|
||||
isLocalJSXCommand,
|
||||
isSearchingHistory,
|
||||
isHelpOpen,
|
||||
inputMode,
|
||||
inputValue,
|
||||
streamMode,
|
||||
} = props
|
||||
const store = useAppStateStore()
|
||||
const setAppState = useSetAppState()
|
||||
const queuedCommandsLength = useCommandQueue().length
|
||||
const { addNotification, removeNotification } = useNotifications()
|
||||
const lastKillAgentsPressRef = useRef<number>(0)
|
||||
const viewSelectionMode = useAppState(s => s.viewSelectionMode)
|
||||
|
||||
const handleCancel = useCallback(() => {
|
||||
const cancelProps = {
|
||||
source:
|
||||
'escape' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
streamMode:
|
||||
streamMode as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
}
|
||||
|
||||
// Priority 1: If there's an active task running, cancel it first
|
||||
// This takes precedence over queue management so users can always interrupt Claude
|
||||
if (abortSignal !== undefined && !abortSignal.aborted) {
|
||||
logEvent('tengu_cancel', cancelProps)
|
||||
setToolUseConfirmQueue(() => [])
|
||||
onCancel()
|
||||
return
|
||||
}
|
||||
|
||||
// Priority 2: Pop queue when Claude is idle (no running task to cancel)
|
||||
if (hasCommandsInQueue()) {
|
||||
if (popCommandFromQueue) {
|
||||
popCommandFromQueue()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: nothing to cancel or pop (shouldn't reach here if isActive is correct)
|
||||
logEvent('tengu_cancel', cancelProps)
|
||||
setToolUseConfirmQueue(() => [])
|
||||
onCancel()
|
||||
}, [
|
||||
abortSignal,
|
||||
popCommandFromQueue,
|
||||
setToolUseConfirmQueue,
|
||||
onCancel,
|
||||
streamMode,
|
||||
])
|
||||
|
||||
// Determine if this handler should be active
|
||||
// Other contexts (Transcript, HistorySearch, Help) have their own escape handlers
|
||||
// Overlays (ModelPicker, ThinkingToggle, etc.) register themselves via useRegisterOverlay
|
||||
// Local JSX commands (like /model, /btw) handle their own input
|
||||
const isOverlayActive = useIsOverlayActive()
|
||||
const canCancelRunningTask = abortSignal !== undefined && !abortSignal.aborted
|
||||
const hasQueuedCommands = queuedCommandsLength > 0
|
||||
// When in bash/background mode with empty input, escape should exit the mode
|
||||
// rather than cancel the request. Let PromptInput handle mode exit.
|
||||
// This only applies to Escape, not Ctrl+C which should always cancel.
|
||||
const isInSpecialModeWithEmptyInput =
|
||||
inputMode !== undefined && inputMode !== 'prompt' && !inputValue
|
||||
// When viewing a teammate's transcript, let useBackgroundTaskNavigation handle Escape
|
||||
const isViewingTeammate = viewSelectionMode === 'viewing-agent'
|
||||
// Context guards: other screens/overlays handle their own cancel
|
||||
const isContextActive =
|
||||
screen !== 'transcript' &&
|
||||
!isSearchingHistory &&
|
||||
!isMessageSelectorVisible &&
|
||||
!isLocalJSXCommand &&
|
||||
!isHelpOpen &&
|
||||
!isOverlayActive &&
|
||||
!(isVimModeEnabled() && vimMode === 'INSERT')
|
||||
|
||||
// Escape (chat:cancel) defers to mode-exit when in special mode with empty
|
||||
// input, and to useBackgroundTaskNavigation when viewing a teammate
|
||||
const isEscapeActive =
|
||||
isContextActive &&
|
||||
(canCancelRunningTask || hasQueuedCommands) &&
|
||||
!isInSpecialModeWithEmptyInput &&
|
||||
!isViewingTeammate
|
||||
|
||||
// Ctrl+C (app:interrupt): when viewing a teammate, stops everything and
|
||||
// returns to main thread. Otherwise just handleCancel. Must NOT claim
|
||||
// ctrl+c when main is idle at the prompt — that blocks the copy-selection
|
||||
// handler and double-press-to-exit from ever seeing the keypress.
|
||||
const isCtrlCActive =
|
||||
isContextActive &&
|
||||
(canCancelRunningTask || hasQueuedCommands || isViewingTeammate)
|
||||
|
||||
useKeybinding('chat:cancel', handleCancel, {
|
||||
context: 'Chat',
|
||||
isActive: isEscapeActive,
|
||||
})
|
||||
|
||||
// Shared kill path: stop all agents, suppress per-agent notifications,
|
||||
// emit SDK events, enqueue a single aggregate model-facing notification.
|
||||
// Returns true if anything was killed.
|
||||
const killAllAgentsAndNotify = useCallback((): boolean => {
|
||||
const tasks = store.getState().tasks
|
||||
const running = Object.entries(tasks).filter(
|
||||
([, t]) => t.type === 'local_agent' && t.status === 'running',
|
||||
)
|
||||
if (running.length === 0) return false
|
||||
killAllRunningAgentTasks(tasks, setAppState)
|
||||
const descriptions: string[] = []
|
||||
for (const [taskId, task] of running) {
|
||||
markAgentsNotified(taskId, setAppState)
|
||||
descriptions.push(task.description)
|
||||
emitTaskTerminatedSdk(taskId, 'stopped', {
|
||||
toolUseId: task.toolUseId,
|
||||
summary: task.description,
|
||||
})
|
||||
}
|
||||
const summary =
|
||||
descriptions.length === 1
|
||||
? `Background agent "${descriptions[0]}" was stopped by the user.`
|
||||
: `${descriptions.length} background agents were stopped by the user: ${descriptions.map(d => `"${d}"`).join(', ')}.`
|
||||
enqueuePendingNotification({ value: summary, mode: 'task-notification' })
|
||||
onAgentsKilled()
|
||||
return true
|
||||
}, [store, setAppState, onAgentsKilled])
|
||||
|
||||
// Ctrl+C (app:interrupt). Scoped to teammate-view: killing agents from the
|
||||
// main prompt stays a deliberate gesture (chat:killAgents), not a
|
||||
// side-effect of cancelling a turn.
|
||||
const handleInterrupt = useCallback(() => {
|
||||
if (isViewingTeammate) {
|
||||
killAllAgentsAndNotify()
|
||||
exitTeammateView(setAppState)
|
||||
}
|
||||
if (canCancelRunningTask || hasQueuedCommands) {
|
||||
handleCancel()
|
||||
}
|
||||
}, [
|
||||
isViewingTeammate,
|
||||
killAllAgentsAndNotify,
|
||||
setAppState,
|
||||
canCancelRunningTask,
|
||||
hasQueuedCommands,
|
||||
handleCancel,
|
||||
])
|
||||
|
||||
useKeybinding('app:interrupt', handleInterrupt, {
|
||||
context: 'Global',
|
||||
isActive: isCtrlCActive,
|
||||
})
|
||||
|
||||
// chat:killAgents uses a two-press pattern: first press shows a
|
||||
// confirmation hint, second press within the window actually kills all
|
||||
// agents. Reads tasks from the store directly to avoid stale closures.
|
||||
const handleKillAgents = useCallback(() => {
|
||||
const tasks = store.getState().tasks
|
||||
const hasRunningAgents = Object.values(tasks).some(
|
||||
t => t.type === 'local_agent' && t.status === 'running',
|
||||
)
|
||||
if (!hasRunningAgents) {
|
||||
addNotification({
|
||||
key: 'kill-agents-none',
|
||||
text: 'No background agents running',
|
||||
priority: 'immediate',
|
||||
timeoutMs: 2000,
|
||||
})
|
||||
return
|
||||
}
|
||||
const now = Date.now()
|
||||
const elapsed = now - lastKillAgentsPressRef.current
|
||||
if (elapsed <= KILL_AGENTS_CONFIRM_WINDOW_MS) {
|
||||
// Second press within window -- kill all background agents
|
||||
lastKillAgentsPressRef.current = 0
|
||||
removeNotification('kill-agents-confirm')
|
||||
logEvent('tengu_cancel', {
|
||||
source:
|
||||
'kill_agents' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
})
|
||||
clearCommandQueue()
|
||||
killAllAgentsAndNotify()
|
||||
return
|
||||
}
|
||||
// First press -- show confirmation hint in status bar
|
||||
lastKillAgentsPressRef.current = now
|
||||
const shortcut = getShortcutDisplay(
|
||||
'chat:killAgents',
|
||||
'Chat',
|
||||
'ctrl+x ctrl+k',
|
||||
)
|
||||
addNotification({
|
||||
key: 'kill-agents-confirm',
|
||||
text: `Press ${shortcut} again to stop background agents`,
|
||||
priority: 'immediate',
|
||||
timeoutMs: KILL_AGENTS_CONFIRM_WINDOW_MS,
|
||||
})
|
||||
}, [store, addNotification, removeNotification, killAllAgentsAndNotify])
|
||||
|
||||
// Must stay always-active: ctrl+x is consumed as a chord prefix regardless
|
||||
// of isActive (because ctrl+x ctrl+e is always live), so an inactive handler
|
||||
// here would leak ctrl+k to readline kill-line. Handler gates internally.
|
||||
useKeybinding('chat:killAgents', handleKillAgents, {
|
||||
context: 'Chat',
|
||||
})
|
||||
|
||||
return null
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,77 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useNotifications } from '../context/notifications.js'
|
||||
import { getShortcutDisplay } from '../keybindings/shortcutFormat.js'
|
||||
import { hasImageInClipboard } from '../utils/imagePaste.js'
|
||||
|
||||
const NOTIFICATION_KEY = 'clipboard-image-hint'
|
||||
// Small debounce to batch rapid focus changes
|
||||
const FOCUS_CHECK_DEBOUNCE_MS = 1000
|
||||
// Don't show the hint more than once per this interval
|
||||
const HINT_COOLDOWN_MS = 30000
|
||||
|
||||
/**
|
||||
* Hook that shows a notification when the terminal regains focus
|
||||
* and the clipboard contains an image.
|
||||
*
|
||||
* @param isFocused - Whether the terminal is currently focused
|
||||
* @param enabled - Whether image paste is enabled (onImagePaste is defined)
|
||||
*/
|
||||
export function useClipboardImageHint(
|
||||
isFocused: boolean,
|
||||
enabled: boolean,
|
||||
): void {
|
||||
const { addNotification } = useNotifications()
|
||||
const lastFocusedRef = useRef(isFocused)
|
||||
const lastHintTimeRef = useRef(0)
|
||||
const checkTimeoutRef = useRef<NodeJS.Timeout | null>(null)
|
||||
|
||||
useEffect(() => {
|
||||
// Only trigger on focus regain (was unfocused, now focused)
|
||||
const wasFocused = lastFocusedRef.current
|
||||
lastFocusedRef.current = isFocused
|
||||
|
||||
if (!enabled || !isFocused || wasFocused) {
|
||||
return
|
||||
}
|
||||
|
||||
// Clear any pending check
|
||||
if (checkTimeoutRef.current) {
|
||||
clearTimeout(checkTimeoutRef.current)
|
||||
}
|
||||
|
||||
// Small debounce to batch rapid focus changes
|
||||
checkTimeoutRef.current = setTimeout(
|
||||
async (checkTimeoutRef, lastHintTimeRef, addNotification) => {
|
||||
checkTimeoutRef.current = null
|
||||
|
||||
// Check cooldown to avoid spamming the user
|
||||
const now = Date.now()
|
||||
if (now - lastHintTimeRef.current < HINT_COOLDOWN_MS) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if clipboard has an image (async osascript call)
|
||||
if (await hasImageInClipboard()) {
|
||||
lastHintTimeRef.current = now
|
||||
addNotification({
|
||||
key: NOTIFICATION_KEY,
|
||||
text: `Image in clipboard · ${getShortcutDisplay('chat:imagePaste', 'Chat', 'ctrl+v')} to paste`,
|
||||
priority: 'immediate',
|
||||
timeoutMs: 8000,
|
||||
})
|
||||
}
|
||||
},
|
||||
FOCUS_CHECK_DEBOUNCE_MS,
|
||||
checkTimeoutRef,
|
||||
lastHintTimeRef,
|
||||
addNotification,
|
||||
)
|
||||
|
||||
return () => {
|
||||
if (checkTimeoutRef.current) {
|
||||
clearTimeout(checkTimeoutRef.current)
|
||||
checkTimeoutRef.current = null
|
||||
}
|
||||
}
|
||||
}, [isFocused, enabled, addNotification])
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,15 @@
|
||||
import { useSyncExternalStore } from 'react'
|
||||
import type { QueuedCommand } from '../types/textInputTypes.js'
|
||||
import {
|
||||
getCommandQueueSnapshot,
|
||||
subscribeToCommandQueue,
|
||||
} from '../utils/messageQueueManager.js'
|
||||
|
||||
/**
|
||||
* React hook to subscribe to the unified command queue.
|
||||
* Returns a frozen array that only changes reference on mutation.
|
||||
* Components re-render only when the queue changes.
|
||||
*/
|
||||
export function useCommandQueue(): readonly QueuedCommand[] {
|
||||
return useSyncExternalStore(subscribeToCommandQueue, getCommandQueueSnapshot)
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useTheme } from '../components/design-system/ThemeProvider.js'
|
||||
import type { useSelection } from '../ink/hooks/use-selection.js'
|
||||
import { getGlobalConfig } from '../utils/config.js'
|
||||
import { getTheme } from '../utils/theme.js'
|
||||
|
||||
type Selection = ReturnType<typeof useSelection>
|
||||
|
||||
/**
|
||||
* Auto-copy the selection to the clipboard when the user finishes dragging
|
||||
* (mouse-up with a non-empty selection) or multi-clicks to select a word/line.
|
||||
* Mirrors iTerm2's "Copy to pasteboard on selection" — the highlight is left
|
||||
* intact so the user can see what was copied. Only fires in alt-screen mode
|
||||
* (selection state is ink-instance-owned; outside alt-screen, the native
|
||||
* terminal handles selection and this hook is a no-op via the ink stub).
|
||||
*
|
||||
* selection.subscribe fires on every mutation (start/update/finish/clear/
|
||||
* multiclick). Both char drags and multi-clicks set isDragging=true while
|
||||
* pressed, so a selection appearing with isDragging=false is always a
|
||||
* drag-finish. copiedRef guards against double-firing on spurious notifies.
|
||||
*
|
||||
* onCopied is optional — when omitted, copy is silent (clipboard is written
|
||||
* but no toast/notification fires). FleetView uses this silent mode; the
|
||||
* fullscreen REPL passes showCopiedToast for user feedback.
|
||||
*/
|
||||
export function useCopyOnSelect(
|
||||
selection: Selection,
|
||||
isActive: boolean,
|
||||
onCopied?: (text: string) => void,
|
||||
): void {
|
||||
// Tracks whether the *previous* notification had a visible selection with
|
||||
// isDragging=false (i.e., we already auto-copied it). Without this, the
|
||||
// finish→clear transition would look like a fresh selection-gone-idle
|
||||
// event and we'd toast twice for a single drag.
|
||||
const copiedRef = useRef(false)
|
||||
// onCopied is a fresh closure each render; read through a ref so the
|
||||
// effect doesn't re-subscribe (which would reset copiedRef via unmount).
|
||||
const onCopiedRef = useRef(onCopied)
|
||||
onCopiedRef.current = onCopied
|
||||
|
||||
useEffect(() => {
|
||||
if (!isActive) return
|
||||
|
||||
const unsubscribe = selection.subscribe(() => {
|
||||
const sel = selection.getState()
|
||||
const has = selection.hasSelection()
|
||||
// Drag in progress — wait for finish. Reset copied flag so a new drag
|
||||
// that ends on the same range still triggers a fresh copy.
|
||||
if (sel?.isDragging) {
|
||||
copiedRef.current = false
|
||||
return
|
||||
}
|
||||
// No selection (cleared, or click-without-drag) — reset.
|
||||
if (!has) {
|
||||
copiedRef.current = false
|
||||
return
|
||||
}
|
||||
// Selection settled (drag finished OR multi-click). Already copied
|
||||
// this one — the only way to get here again without going through
|
||||
// isDragging or !has is a spurious notify (shouldn't happen, but safe).
|
||||
if (copiedRef.current) return
|
||||
|
||||
// Default true: macOS users expect cmd+c to work. It can't — the
|
||||
// terminal's Edit > Copy intercepts it before the pty sees it, and
|
||||
// finds no native selection (mouse tracking disabled it). Auto-copy
|
||||
// on mouse-up makes cmd+c a no-op that leaves the clipboard intact
|
||||
// with the right content, so paste works as expected.
|
||||
const enabled = getGlobalConfig().copyOnSelect ?? true
|
||||
if (!enabled) return
|
||||
|
||||
const text = selection.copySelectionNoClear()
|
||||
// Whitespace-only (e.g., blank-line multi-click) — not worth a
|
||||
// clipboard write or toast. Still set copiedRef so we don't retry.
|
||||
if (!text || !text.trim()) {
|
||||
copiedRef.current = true
|
||||
return
|
||||
}
|
||||
copiedRef.current = true
|
||||
onCopiedRef.current?.(text)
|
||||
})
|
||||
return unsubscribe
|
||||
}, [isActive, selection])
|
||||
}
|
||||
|
||||
/**
|
||||
* Pipe the theme's selectionBg color into the Ink StylePool so the
|
||||
* selection overlay renders a solid blue bg instead of SGR-7 inverse.
|
||||
* Ink is theme-agnostic (layering: colorize.ts "theme resolution happens
|
||||
* at component layer, not here") — this is the bridge. Fires on mount
|
||||
* (before any mouse input is possible) and again whenever /theme flips,
|
||||
* so the selection color tracks the theme live.
|
||||
*/
|
||||
export function useSelectionBgColor(selection: Selection): void {
|
||||
const [themeName] = useTheme()
|
||||
useEffect(() => {
|
||||
selection.setSelectionBgColor(getTheme(themeName).selectionBg)
|
||||
}, [selection, themeName])
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
import type { HookResultMessage, Message } from '../types/message.js'
|
||||
|
||||
/**
|
||||
* Manages deferred SessionStart hook messages so the REPL can render
|
||||
* immediately instead of blocking on hook execution (~500ms).
|
||||
*
|
||||
* Hook messages are injected asynchronously when the promise resolves.
|
||||
* Returns a callback that onSubmit should call before the first API
|
||||
* request to ensure the model always sees hook context.
|
||||
*/
|
||||
export function useDeferredHookMessages(
|
||||
pendingHookMessages: Promise<HookResultMessage[]> | undefined,
|
||||
setMessages: (action: React.SetStateAction<Message[]>) => void,
|
||||
): () => Promise<void> {
|
||||
const pendingRef = useRef(pendingHookMessages ?? null)
|
||||
const resolvedRef = useRef(!pendingHookMessages)
|
||||
|
||||
useEffect(() => {
|
||||
const promise = pendingRef.current
|
||||
if (!promise) return
|
||||
let cancelled = false
|
||||
promise.then(msgs => {
|
||||
if (cancelled) return
|
||||
resolvedRef.current = true
|
||||
pendingRef.current = null
|
||||
if (msgs.length > 0) {
|
||||
setMessages(prev => [...msgs, ...prev])
|
||||
}
|
||||
})
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [setMessages])
|
||||
|
||||
return useCallback(async () => {
|
||||
if (resolvedRef.current || !pendingRef.current) return
|
||||
const msgs = await pendingRef.current
|
||||
if (resolvedRef.current) return
|
||||
resolvedRef.current = true
|
||||
pendingRef.current = null
|
||||
if (msgs.length > 0) {
|
||||
setMessages(prev => [...msgs, ...prev])
|
||||
}
|
||||
}, [setMessages])
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
import type { StructuredPatchHunk } from 'diff'
|
||||
import { useEffect, useMemo, useState } from 'react'
|
||||
import {
|
||||
fetchGitDiff,
|
||||
fetchGitDiffHunks,
|
||||
type GitDiffResult,
|
||||
type GitDiffStats,
|
||||
} from '../utils/gitDiff.js'
|
||||
|
||||
const MAX_LINES_PER_FILE = 400
|
||||
|
||||
export type DiffFile = {
|
||||
path: string
|
||||
linesAdded: number
|
||||
linesRemoved: number
|
||||
isBinary: boolean
|
||||
isLargeFile: boolean
|
||||
isTruncated: boolean
|
||||
isNewFile?: boolean
|
||||
isUntracked?: boolean
|
||||
}
|
||||
|
||||
export type DiffData = {
|
||||
stats: GitDiffStats | null
|
||||
files: DiffFile[]
|
||||
hunks: Map<string, StructuredPatchHunk[]>
|
||||
loading: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook to fetch current git diff data on demand.
|
||||
* Fetches both stats and hunks when component mounts.
|
||||
*/
|
||||
export function useDiffData(): DiffData {
|
||||
const [diffResult, setDiffResult] = useState<GitDiffResult | null>(null)
|
||||
const [hunks, setHunks] = useState<Map<string, StructuredPatchHunk[]>>(
|
||||
new Map(),
|
||||
)
|
||||
const [loading, setLoading] = useState(true)
|
||||
|
||||
// Fetch diff data on mount
|
||||
useEffect(() => {
|
||||
let cancelled = false
|
||||
|
||||
async function loadDiffData() {
|
||||
try {
|
||||
// Fetch both stats and hunks
|
||||
const [statsResult, hunksResult] = await Promise.all([
|
||||
fetchGitDiff(),
|
||||
fetchGitDiffHunks(),
|
||||
])
|
||||
|
||||
if (!cancelled) {
|
||||
setDiffResult(statsResult)
|
||||
setHunks(hunksResult)
|
||||
setLoading(false)
|
||||
}
|
||||
} catch (_error) {
|
||||
if (!cancelled) {
|
||||
setDiffResult(null)
|
||||
setHunks(new Map())
|
||||
setLoading(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void loadDiffData()
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [])
|
||||
|
||||
return useMemo(() => {
|
||||
if (!diffResult) {
|
||||
return { stats: null, files: [], hunks: new Map(), loading }
|
||||
}
|
||||
|
||||
const { stats, perFileStats } = diffResult
|
||||
const files: DiffFile[] = []
|
||||
|
||||
// Iterate over perFileStats to get all files including large/skipped ones
|
||||
for (const [path, fileStats] of perFileStats) {
|
||||
const fileHunks = hunks.get(path)
|
||||
const isUntracked = fileStats.isUntracked ?? false
|
||||
|
||||
// Detect large file (in perFileStats but not in hunks, and not binary/untracked)
|
||||
const isLargeFile = !fileStats.isBinary && !isUntracked && !fileHunks
|
||||
|
||||
// Detect truncated file (total > limit means we truncated)
|
||||
const totalLines = fileStats.added + fileStats.removed
|
||||
const isTruncated =
|
||||
!isLargeFile && !fileStats.isBinary && totalLines > MAX_LINES_PER_FILE
|
||||
|
||||
files.push({
|
||||
path,
|
||||
linesAdded: fileStats.added,
|
||||
linesRemoved: fileStats.removed,
|
||||
isBinary: fileStats.isBinary,
|
||||
isLargeFile,
|
||||
isTruncated,
|
||||
isUntracked,
|
||||
})
|
||||
}
|
||||
|
||||
files.sort((a, b) => a.path.localeCompare(b.path))
|
||||
|
||||
return { stats, files, hunks, loading: false }
|
||||
}, [diffResult, hunks, loading])
|
||||
}
|
||||
@@ -0,0 +1,379 @@
|
||||
import { randomUUID } from 'crypto'
|
||||
import { basename } from 'path'
|
||||
import { useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { logEvent } from 'src/services/analytics/index.js'
|
||||
import { readFileSync } from 'src/utils/fileRead.js'
|
||||
import { expandPath } from 'src/utils/path.js'
|
||||
import type { PermissionOption } from '../components/permissions/FilePermissionDialog/permissionOptions.js'
|
||||
import type {
|
||||
MCPServerConnection,
|
||||
McpSSEIDEServerConfig,
|
||||
McpWebSocketIDEServerConfig,
|
||||
} from '../services/mcp/types.js'
|
||||
import type { ToolUseContext } from '../Tool.js'
|
||||
import type { FileEdit } from '../tools/FileEditTool/types.js'
|
||||
import {
|
||||
getEditsForPatch,
|
||||
getPatchForEdits,
|
||||
} from '../tools/FileEditTool/utils.js'
|
||||
import { getGlobalConfig } from '../utils/config.js'
|
||||
import { getPatchFromContents } from '../utils/diff.js'
|
||||
import { isENOENT } from '../utils/errors.js'
|
||||
import {
|
||||
callIdeRpc,
|
||||
getConnectedIdeClient,
|
||||
getConnectedIdeName,
|
||||
hasAccessToIDEExtensionDiffFeature,
|
||||
} from '../utils/ide.js'
|
||||
import { WindowsToWSLConverter } from '../utils/idePathConversion.js'
|
||||
import { logError } from '../utils/log.js'
|
||||
import { getPlatform } from '../utils/platform.js'
|
||||
|
||||
type Props = {
|
||||
onChange(
|
||||
option: PermissionOption,
|
||||
input: {
|
||||
file_path: string
|
||||
edits: FileEdit[]
|
||||
},
|
||||
): void
|
||||
toolUseContext: ToolUseContext
|
||||
filePath: string
|
||||
edits: FileEdit[]
|
||||
editMode: 'single' | 'multiple'
|
||||
}
|
||||
|
||||
export function useDiffInIDE({
|
||||
onChange,
|
||||
toolUseContext,
|
||||
filePath,
|
||||
edits,
|
||||
editMode,
|
||||
}: Props): {
|
||||
closeTabInIDE: () => void
|
||||
showingDiffInIDE: boolean
|
||||
ideName: string
|
||||
hasError: boolean
|
||||
} {
|
||||
const isUnmounted = useRef(false)
|
||||
const [hasError, setHasError] = useState(false)
|
||||
|
||||
const sha = useMemo(() => randomUUID().slice(0, 6), [])
|
||||
const tabName = useMemo(
|
||||
() => `✻ [Claude Code] ${basename(filePath)} (${sha}) ⧉`,
|
||||
[filePath, sha],
|
||||
)
|
||||
|
||||
const shouldShowDiffInIDE =
|
||||
hasAccessToIDEExtensionDiffFeature(toolUseContext.options.mcpClients) &&
|
||||
getGlobalConfig().diffTool === 'auto' &&
|
||||
// Diffs should only be for file edits.
|
||||
// File writes may come through here but are not supported for diffs.
|
||||
!filePath.endsWith('.ipynb')
|
||||
|
||||
const ideName =
|
||||
getConnectedIdeName(toolUseContext.options.mcpClients) ?? 'IDE'
|
||||
|
||||
async function showDiff(): Promise<void> {
|
||||
if (!shouldShowDiffInIDE) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
logEvent('tengu_ext_will_show_diff', {})
|
||||
|
||||
const { oldContent, newContent } = await showDiffInIDE(
|
||||
filePath,
|
||||
edits,
|
||||
toolUseContext,
|
||||
tabName,
|
||||
)
|
||||
// Skip if component has been unmounted
|
||||
if (isUnmounted.current) {
|
||||
return
|
||||
}
|
||||
|
||||
logEvent('tengu_ext_diff_accepted', {})
|
||||
|
||||
const newEdits = computeEditsFromContents(
|
||||
filePath,
|
||||
oldContent,
|
||||
newContent,
|
||||
editMode,
|
||||
)
|
||||
|
||||
if (newEdits.length === 0) {
|
||||
// No changes -- edit was rejected (eg. reverted)
|
||||
logEvent('tengu_ext_diff_rejected', {})
|
||||
// We close the tab here because 'no' no longer auto-closes
|
||||
const ideClient = getConnectedIdeClient(
|
||||
toolUseContext.options.mcpClients,
|
||||
)
|
||||
if (ideClient) {
|
||||
// Close the tab in the IDE
|
||||
await closeTabInIDE(tabName, ideClient)
|
||||
}
|
||||
onChange(
|
||||
{ type: 'reject' },
|
||||
{
|
||||
file_path: filePath,
|
||||
edits: edits,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// File was modified - edit was accepted
|
||||
onChange(
|
||||
{ type: 'accept-once' },
|
||||
{
|
||||
file_path: filePath,
|
||||
edits: newEdits,
|
||||
},
|
||||
)
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
setHasError(true)
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
void showDiff()
|
||||
|
||||
// Set flag on unmount
|
||||
return () => {
|
||||
isUnmounted.current = true
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [])
|
||||
|
||||
return {
|
||||
closeTabInIDE() {
|
||||
const ideClient = getConnectedIdeClient(toolUseContext.options.mcpClients)
|
||||
|
||||
if (!ideClient) {
|
||||
return Promise.resolve()
|
||||
}
|
||||
|
||||
return closeTabInIDE(tabName, ideClient)
|
||||
},
|
||||
showingDiffInIDE: shouldShowDiffInIDE && !hasError,
|
||||
ideName: ideName,
|
||||
hasError,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Re-computes the edits from the old and new contents. This is necessary
|
||||
* to apply any edits the user may have made to the new contents.
|
||||
*/
|
||||
export function computeEditsFromContents(
|
||||
filePath: string,
|
||||
oldContent: string,
|
||||
newContent: string,
|
||||
editMode: 'single' | 'multiple',
|
||||
): FileEdit[] {
|
||||
// Use unformatted patches, otherwise the edits will be formatted.
|
||||
const singleHunk = editMode === 'single'
|
||||
const patch = getPatchFromContents({
|
||||
filePath,
|
||||
oldContent,
|
||||
newContent,
|
||||
singleHunk,
|
||||
})
|
||||
|
||||
if (patch.length === 0) {
|
||||
return []
|
||||
}
|
||||
|
||||
// For single edit mode, verify we only got one hunk
|
||||
if (singleHunk && patch.length > 1) {
|
||||
logError(
|
||||
new Error(
|
||||
`Unexpected number of hunks: ${patch.length}. Expected 1 hunk.`,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Re-compute the edits to match the patch
|
||||
return getEditsForPatch(patch)
|
||||
}
|
||||
|
||||
/**
|
||||
* Done if:
|
||||
*
|
||||
* 1. Tab is closed in IDE
|
||||
* 2. Tab is saved in IDE (we then close the tab)
|
||||
* 3. User selected an option in IDE
|
||||
* 4. User selected an option in terminal (or hit esc)
|
||||
*
|
||||
* Resolves with the new file content.
|
||||
*
|
||||
* TODO: Time out after 5 mins of inactivity?
|
||||
* TODO: Update auto-approval UI when IDE exits
|
||||
* TODO: Close the IDE tab when the approval prompt is unmounted
|
||||
*/
|
||||
async function showDiffInIDE(
|
||||
file_path: string,
|
||||
edits: FileEdit[],
|
||||
toolUseContext: ToolUseContext,
|
||||
tabName: string,
|
||||
): Promise<{ oldContent: string; newContent: string }> {
|
||||
let isCleanedUp = false
|
||||
|
||||
const oldFilePath = expandPath(file_path)
|
||||
let oldContent = ''
|
||||
try {
|
||||
oldContent = readFileSync(oldFilePath)
|
||||
} catch (e: unknown) {
|
||||
if (!isENOENT(e)) {
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanup() {
|
||||
// Careful to avoid race conditions, since this
|
||||
// function can be called from multiple places.
|
||||
if (isCleanedUp) {
|
||||
return
|
||||
}
|
||||
isCleanedUp = true
|
||||
|
||||
// Don't fail if this fails
|
||||
try {
|
||||
await closeTabInIDE(tabName, ideClient)
|
||||
} catch (e) {
|
||||
logError(e as Error)
|
||||
}
|
||||
|
||||
process.off('beforeExit', cleanup)
|
||||
toolUseContext.abortController.signal.removeEventListener('abort', cleanup)
|
||||
}
|
||||
|
||||
// Cleanup if the user hits esc to cancel the tool call - or on exit
|
||||
toolUseContext.abortController.signal.addEventListener('abort', cleanup)
|
||||
process.on('beforeExit', cleanup)
|
||||
|
||||
// Open the diff in the IDE
|
||||
const ideClient = getConnectedIdeClient(toolUseContext.options.mcpClients)
|
||||
try {
|
||||
const { updatedFile } = getPatchForEdits({
|
||||
filePath: oldFilePath,
|
||||
fileContents: oldContent,
|
||||
edits,
|
||||
})
|
||||
|
||||
if (!ideClient || ideClient.type !== 'connected') {
|
||||
throw new Error('IDE client not available')
|
||||
}
|
||||
let ideOldPath = oldFilePath
|
||||
|
||||
// Only convert paths if we're in WSL and IDE is on Windows
|
||||
const ideRunningInWindows =
|
||||
(ideClient.config as McpSSEIDEServerConfig | McpWebSocketIDEServerConfig)
|
||||
.ideRunningInWindows === true
|
||||
if (
|
||||
getPlatform() === 'wsl' &&
|
||||
ideRunningInWindows &&
|
||||
process.env.WSL_DISTRO_NAME
|
||||
) {
|
||||
const converter = new WindowsToWSLConverter(process.env.WSL_DISTRO_NAME)
|
||||
ideOldPath = converter.toIDEPath(oldFilePath)
|
||||
}
|
||||
|
||||
const rpcResult = await callIdeRpc(
|
||||
'openDiff',
|
||||
{
|
||||
old_file_path: ideOldPath,
|
||||
new_file_path: ideOldPath,
|
||||
new_file_contents: updatedFile,
|
||||
tab_name: tabName,
|
||||
},
|
||||
ideClient,
|
||||
)
|
||||
|
||||
// Convert the raw RPC result to a ToolCallResponse format
|
||||
const data = Array.isArray(rpcResult) ? rpcResult : [rpcResult]
|
||||
|
||||
// If the user saved the file then take the new contents and resolve with that.
|
||||
if (isSaveMessage(data)) {
|
||||
void cleanup()
|
||||
return {
|
||||
oldContent: oldContent,
|
||||
newContent: data[1].text,
|
||||
}
|
||||
} else if (isClosedMessage(data)) {
|
||||
void cleanup()
|
||||
return {
|
||||
oldContent: oldContent,
|
||||
newContent: updatedFile,
|
||||
}
|
||||
} else if (isRejectedMessage(data)) {
|
||||
void cleanup()
|
||||
return {
|
||||
oldContent: oldContent,
|
||||
newContent: oldContent,
|
||||
}
|
||||
}
|
||||
|
||||
// Indicates that the tool call completed with none of the expected
|
||||
// results. Did the user close the IDE?
|
||||
throw new Error('Not accepted')
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
void cleanup()
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async function closeTabInIDE(
|
||||
tabName: string,
|
||||
ideClient?: MCPServerConnection | undefined,
|
||||
): Promise<void> {
|
||||
try {
|
||||
if (!ideClient || ideClient.type !== 'connected') {
|
||||
throw new Error('IDE client not available')
|
||||
}
|
||||
|
||||
// Use direct RPC to close the tab
|
||||
await callIdeRpc('close_tab', { tab_name: tabName }, ideClient)
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
// Don't throw - this is a cleanup operation
|
||||
}
|
||||
}
|
||||
|
||||
function isClosedMessage(data: unknown): data is { text: 'TAB_CLOSED' } {
|
||||
return (
|
||||
Array.isArray(data) &&
|
||||
typeof data[0] === 'object' &&
|
||||
data[0] !== null &&
|
||||
'type' in data[0] &&
|
||||
data[0].type === 'text' &&
|
||||
'text' in data[0] &&
|
||||
data[0].text === 'TAB_CLOSED'
|
||||
)
|
||||
}
|
||||
|
||||
function isRejectedMessage(data: unknown): data is { text: 'DIFF_REJECTED' } {
|
||||
return (
|
||||
Array.isArray(data) &&
|
||||
typeof data[0] === 'object' &&
|
||||
data[0] !== null &&
|
||||
'type' in data[0] &&
|
||||
data[0].type === 'text' &&
|
||||
'text' in data[0] &&
|
||||
data[0].text === 'DIFF_REJECTED'
|
||||
)
|
||||
}
|
||||
|
||||
function isSaveMessage(
|
||||
data: unknown,
|
||||
): data is [{ text: 'FILE_SAVED' }, { text: string }] {
|
||||
return (
|
||||
Array.isArray(data) &&
|
||||
data[0]?.type === 'text' &&
|
||||
data[0].text === 'FILE_SAVED' &&
|
||||
typeof data[1].text === 'string'
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,229 @@
|
||||
import { useCallback, useEffect, useMemo, useRef } from 'react'
|
||||
import type { ToolUseConfirm } from '../components/permissions/PermissionRequest.js'
|
||||
import type { RemotePermissionResponse } from '../remote/RemoteSessionManager.js'
|
||||
import {
|
||||
createSyntheticAssistantMessage,
|
||||
createToolStub,
|
||||
} from '../remote/remotePermissionBridge.js'
|
||||
import {
|
||||
convertSDKMessage,
|
||||
isSessionEndMessage,
|
||||
} from '../remote/sdkMessageAdapter.js'
|
||||
import {
|
||||
type DirectConnectConfig,
|
||||
DirectConnectSessionManager,
|
||||
} from '../server/directConnectManager.js'
|
||||
import type { Tool } from '../Tool.js'
|
||||
import { findToolByName } from '../Tool.js'
|
||||
import type { Message as MessageType } from '../types/message.js'
|
||||
import type { PermissionAskDecision } from '../types/permissions.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { gracefulShutdown } from '../utils/gracefulShutdown.js'
|
||||
import type { RemoteMessageContent } from '../utils/teleport/api.js'
|
||||
|
||||
type UseDirectConnectResult = {
|
||||
isRemoteMode: boolean
|
||||
sendMessage: (content: RemoteMessageContent) => Promise<boolean>
|
||||
cancelRequest: () => void
|
||||
disconnect: () => void
|
||||
}
|
||||
|
||||
type UseDirectConnectProps = {
|
||||
config: DirectConnectConfig | undefined
|
||||
setMessages: React.Dispatch<React.SetStateAction<MessageType[]>>
|
||||
setIsLoading: (loading: boolean) => void
|
||||
setToolUseConfirmQueue: React.Dispatch<React.SetStateAction<ToolUseConfirm[]>>
|
||||
tools: Tool[]
|
||||
}
|
||||
|
||||
export function useDirectConnect({
|
||||
config,
|
||||
setMessages,
|
||||
setIsLoading,
|
||||
setToolUseConfirmQueue,
|
||||
tools,
|
||||
}: UseDirectConnectProps): UseDirectConnectResult {
|
||||
const isRemoteMode = !!config
|
||||
|
||||
const managerRef = useRef<DirectConnectSessionManager | null>(null)
|
||||
const hasReceivedInitRef = useRef(false)
|
||||
const isConnectedRef = useRef(false)
|
||||
|
||||
// Keep a ref to tools so the WebSocket callback doesn't go stale
|
||||
const toolsRef = useRef(tools)
|
||||
useEffect(() => {
|
||||
toolsRef.current = tools
|
||||
}, [tools])
|
||||
|
||||
useEffect(() => {
|
||||
if (!config) {
|
||||
return
|
||||
}
|
||||
|
||||
hasReceivedInitRef.current = false
|
||||
logForDebugging(`[useDirectConnect] Connecting to ${config.wsUrl}`)
|
||||
|
||||
const manager = new DirectConnectSessionManager(config, {
|
||||
onMessage: sdkMessage => {
|
||||
if (isSessionEndMessage(sdkMessage)) {
|
||||
setIsLoading(false)
|
||||
}
|
||||
|
||||
// Skip duplicate init messages (server sends one per turn)
|
||||
if (sdkMessage.type === 'system' && sdkMessage.subtype === 'init') {
|
||||
if (hasReceivedInitRef.current) {
|
||||
return
|
||||
}
|
||||
hasReceivedInitRef.current = true
|
||||
}
|
||||
|
||||
const converted = convertSDKMessage(sdkMessage, {
|
||||
convertToolResults: true,
|
||||
})
|
||||
if (converted.type === 'message') {
|
||||
setMessages(prev => [...prev, converted.message])
|
||||
}
|
||||
},
|
||||
onPermissionRequest: (request, requestId) => {
|
||||
logForDebugging(
|
||||
`[useDirectConnect] Permission request for tool: ${request.tool_name}`,
|
||||
)
|
||||
|
||||
const tool =
|
||||
findToolByName(toolsRef.current, request.tool_name) ??
|
||||
createToolStub(request.tool_name)
|
||||
|
||||
const syntheticMessage = createSyntheticAssistantMessage(
|
||||
request,
|
||||
requestId,
|
||||
)
|
||||
|
||||
const permissionResult: PermissionAskDecision = {
|
||||
behavior: 'ask',
|
||||
message:
|
||||
request.description ?? `${request.tool_name} requires permission`,
|
||||
suggestions: request.permission_suggestions,
|
||||
blockedPath: request.blocked_path,
|
||||
}
|
||||
|
||||
const toolUseConfirm: ToolUseConfirm = {
|
||||
assistantMessage: syntheticMessage,
|
||||
tool,
|
||||
description:
|
||||
request.description ?? `${request.tool_name} requires permission`,
|
||||
input: request.input,
|
||||
toolUseContext: {} as ToolUseConfirm['toolUseContext'],
|
||||
toolUseID: request.tool_use_id,
|
||||
permissionResult,
|
||||
permissionPromptStartTimeMs: Date.now(),
|
||||
onUserInteraction() {
|
||||
// No-op for remote
|
||||
},
|
||||
onAbort() {
|
||||
const response: RemotePermissionResponse = {
|
||||
behavior: 'deny',
|
||||
message: 'User aborted',
|
||||
}
|
||||
manager.respondToPermissionRequest(requestId, response)
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.filter(item => item.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
},
|
||||
onAllow(updatedInput, _permissionUpdates, _feedback) {
|
||||
const response: RemotePermissionResponse = {
|
||||
behavior: 'allow',
|
||||
updatedInput,
|
||||
}
|
||||
manager.respondToPermissionRequest(requestId, response)
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.filter(item => item.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
setIsLoading(true)
|
||||
},
|
||||
onReject(feedback?: string) {
|
||||
const response: RemotePermissionResponse = {
|
||||
behavior: 'deny',
|
||||
message: feedback ?? 'User denied permission',
|
||||
}
|
||||
manager.respondToPermissionRequest(requestId, response)
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.filter(item => item.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
},
|
||||
async recheckPermission() {
|
||||
// No-op for remote
|
||||
},
|
||||
}
|
||||
|
||||
setToolUseConfirmQueue(queue => [...queue, toolUseConfirm])
|
||||
setIsLoading(false)
|
||||
},
|
||||
onConnected: () => {
|
||||
logForDebugging('[useDirectConnect] Connected')
|
||||
isConnectedRef.current = true
|
||||
},
|
||||
onDisconnected: () => {
|
||||
logForDebugging('[useDirectConnect] Disconnected')
|
||||
if (!isConnectedRef.current) {
|
||||
// Never connected — connection failure (e.g. auth rejected)
|
||||
process.stderr.write(
|
||||
`\nFailed to connect to server at ${config.wsUrl}\n`,
|
||||
)
|
||||
} else {
|
||||
// Was connected then lost — server process exited or network dropped
|
||||
process.stderr.write('\nServer disconnected.\n')
|
||||
}
|
||||
isConnectedRef.current = false
|
||||
void gracefulShutdown(1)
|
||||
setIsLoading(false)
|
||||
},
|
||||
onError: error => {
|
||||
logForDebugging(`[useDirectConnect] Error: ${error.message}`)
|
||||
},
|
||||
})
|
||||
|
||||
managerRef.current = manager
|
||||
manager.connect()
|
||||
|
||||
return () => {
|
||||
logForDebugging('[useDirectConnect] Cleanup - disconnecting')
|
||||
manager.disconnect()
|
||||
managerRef.current = null
|
||||
}
|
||||
}, [config, setMessages, setIsLoading, setToolUseConfirmQueue])
|
||||
|
||||
const sendMessage = useCallback(
|
||||
async (content: RemoteMessageContent): Promise<boolean> => {
|
||||
const manager = managerRef.current
|
||||
if (!manager) {
|
||||
return false
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
|
||||
return manager.sendMessage(content)
|
||||
},
|
||||
[setIsLoading],
|
||||
)
|
||||
|
||||
// Cancel the current request
|
||||
const cancelRequest = useCallback(() => {
|
||||
// Send interrupt signal to the server
|
||||
managerRef.current?.sendInterrupt()
|
||||
|
||||
setIsLoading(false)
|
||||
}, [setIsLoading])
|
||||
|
||||
const disconnect = useCallback(() => {
|
||||
managerRef.current?.disconnect()
|
||||
managerRef.current = null
|
||||
isConnectedRef.current = false
|
||||
}, [])
|
||||
|
||||
// Same stability concern as useRemoteSession — memoize so consumers
|
||||
// that depend on the result object don't see a fresh reference per render.
|
||||
return useMemo(
|
||||
() => ({ isRemoteMode, sendMessage, cancelRequest, disconnect }),
|
||||
[isRemoteMode, sendMessage, cancelRequest, disconnect],
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
// Creates a function that calls one function on the first call and another
|
||||
// function on the second call within a certain timeout
|
||||
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
|
||||
export const DOUBLE_PRESS_TIMEOUT_MS = 800
|
||||
|
||||
export function useDoublePress(
|
||||
setPending: (pending: boolean) => void,
|
||||
onDoublePress: () => void,
|
||||
onFirstPress?: () => void,
|
||||
): () => void {
|
||||
const lastPressRef = useRef<number>(0)
|
||||
const timeoutRef = useRef<NodeJS.Timeout | undefined>(undefined)
|
||||
|
||||
const clearTimeoutSafe = useCallback(() => {
|
||||
if (timeoutRef.current) {
|
||||
clearTimeout(timeoutRef.current)
|
||||
timeoutRef.current = undefined
|
||||
}
|
||||
}, [])
|
||||
|
||||
// Cleanup timeout on unmount
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
clearTimeoutSafe()
|
||||
}
|
||||
}, [clearTimeoutSafe])
|
||||
|
||||
return useCallback(() => {
|
||||
const now = Date.now()
|
||||
const timeSinceLastPress = now - lastPressRef.current
|
||||
const isDoublePress =
|
||||
timeSinceLastPress <= DOUBLE_PRESS_TIMEOUT_MS &&
|
||||
timeoutRef.current !== undefined
|
||||
|
||||
if (isDoublePress) {
|
||||
// Double press detected
|
||||
clearTimeoutSafe()
|
||||
setPending(false)
|
||||
onDoublePress()
|
||||
} else {
|
||||
// First press
|
||||
onFirstPress?.()
|
||||
setPending(true)
|
||||
|
||||
// Clear any existing timeout and set new one
|
||||
clearTimeoutSafe()
|
||||
timeoutRef.current = setTimeout(
|
||||
(setPending, timeoutRef) => {
|
||||
setPending(false)
|
||||
timeoutRef.current = undefined
|
||||
},
|
||||
DOUBLE_PRESS_TIMEOUT_MS,
|
||||
setPending,
|
||||
timeoutRef,
|
||||
)
|
||||
}
|
||||
|
||||
lastPressRef.current = now
|
||||
}, [setPending, onDoublePress, onFirstPress, clearTimeoutSafe])
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
import React from 'react'
|
||||
import { getDynamicConfig_BLOCKS_ON_INIT } from '../services/analytics/growthbook.js'
|
||||
|
||||
/**
|
||||
* React hook for dynamic config values.
|
||||
* Returns the default value initially, then updates when the config is fetched.
|
||||
*/
|
||||
export function useDynamicConfig<T>(configName: string, defaultValue: T): T {
|
||||
const [configValue, setConfigValue] = React.useState<T>(defaultValue)
|
||||
|
||||
React.useEffect(() => {
|
||||
if (process.env.NODE_ENV === 'test') {
|
||||
// Prevents a test hang when using this hook in tests
|
||||
return
|
||||
}
|
||||
void getDynamicConfig_BLOCKS_ON_INIT<T>(configName, defaultValue).then(
|
||||
setConfigValue,
|
||||
)
|
||||
}, [configName, defaultValue])
|
||||
|
||||
return configValue
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
import { useCallback, useSyncExternalStore } from 'react'
|
||||
import { formatDuration } from '../utils/format.js'
|
||||
|
||||
/**
|
||||
* Hook that returns formatted elapsed time since startTime.
|
||||
* Uses useSyncExternalStore with interval-based updates for efficiency.
|
||||
*
|
||||
* @param startTime - Unix timestamp in ms
|
||||
* @param isRunning - Whether to actively update the timer
|
||||
* @param ms - How often should we trigger updates?
|
||||
* @param pausedMs - Total paused duration to subtract
|
||||
* @param endTime - If set, freezes the duration at this timestamp (for
|
||||
* terminal tasks). Without this, viewing a 2-min task 30 min after
|
||||
* completion would show "32m".
|
||||
* @returns Formatted duration string (e.g., "1m 23s")
|
||||
*/
|
||||
export function useElapsedTime(
|
||||
startTime: number,
|
||||
isRunning: boolean,
|
||||
ms: number = 1000,
|
||||
pausedMs: number = 0,
|
||||
endTime?: number,
|
||||
): string {
|
||||
const get = () =>
|
||||
formatDuration(Math.max(0, (endTime ?? Date.now()) - startTime - pausedMs))
|
||||
|
||||
const subscribe = useCallback(
|
||||
(notify: () => void) => {
|
||||
if (!isRunning) return () => {}
|
||||
const interval = setInterval(notify, ms)
|
||||
return () => clearInterval(interval)
|
||||
},
|
||||
[isRunning, ms],
|
||||
)
|
||||
|
||||
return useSyncExternalStore(subscribe, get, get)
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
import { useCallback, useMemo, useState } from 'react'
|
||||
import useApp from '../ink/hooks/use-app.js'
|
||||
import type { KeybindingContextName } from '../keybindings/types.js'
|
||||
import { useDoublePress } from './useDoublePress.js'
|
||||
|
||||
export type ExitState = {
|
||||
pending: boolean
|
||||
keyName: 'Ctrl-C' | 'Ctrl-D' | null
|
||||
}
|
||||
|
||||
type KeybindingOptions = {
|
||||
context?: KeybindingContextName
|
||||
isActive?: boolean
|
||||
}
|
||||
|
||||
type UseKeybindingsHook = (
|
||||
handlers: Record<string, () => void>,
|
||||
options?: KeybindingOptions,
|
||||
) => void
|
||||
|
||||
/**
|
||||
* Handle ctrl+c and ctrl+d for exiting the application.
|
||||
*
|
||||
* Uses a time-based double-press mechanism:
|
||||
* - First press: Shows "Press X again to exit" message
|
||||
* - Second press within timeout: Exits the application
|
||||
*
|
||||
* Note: We use time-based double-press rather than the chord system because
|
||||
* we want the first ctrl+c to also trigger interrupt (handled elsewhere).
|
||||
* The chord system would prevent the first press from firing any action.
|
||||
*
|
||||
* These keys are hardcoded and cannot be rebound via keybindings.json.
|
||||
*
|
||||
* @param useKeybindingsHook - The useKeybindings hook to use for registering handlers
|
||||
* (dependency injection to avoid import cycles)
|
||||
* @param onInterrupt - Optional callback for features to handle interrupt (ctrl+c).
|
||||
* Return true if handled, false to fall through to double-press exit.
|
||||
* @param onExit - Optional custom exit handler
|
||||
* @param isActive - Whether the keybinding is active (default true). Set false
|
||||
* while an embedded TextInput is focused — TextInput's own
|
||||
* ctrl+c/d handlers will manage cancel/exit, and Dialog's
|
||||
* handler would otherwise double-fire (child useInput runs
|
||||
* before parent useKeybindings, so both see every keypress).
|
||||
*/
|
||||
export function useExitOnCtrlCD(
|
||||
useKeybindingsHook: UseKeybindingsHook,
|
||||
onInterrupt?: () => boolean,
|
||||
onExit?: () => void,
|
||||
isActive = true,
|
||||
): ExitState {
|
||||
const { exit } = useApp()
|
||||
const [exitState, setExitState] = useState<ExitState>({
|
||||
pending: false,
|
||||
keyName: null,
|
||||
})
|
||||
|
||||
const exitFn = useMemo(() => onExit ?? exit, [onExit, exit])
|
||||
|
||||
// Double-press handler for ctrl+c
|
||||
const handleCtrlCDoublePress = useDoublePress(
|
||||
pending => setExitState({ pending, keyName: 'Ctrl-C' }),
|
||||
exitFn,
|
||||
)
|
||||
|
||||
// Double-press handler for ctrl+d
|
||||
const handleCtrlDDoublePress = useDoublePress(
|
||||
pending => setExitState({ pending, keyName: 'Ctrl-D' }),
|
||||
exitFn,
|
||||
)
|
||||
|
||||
// Handler for app:interrupt (ctrl+c by default)
|
||||
// Let features handle interrupt first via callback
|
||||
const handleInterrupt = useCallback(() => {
|
||||
if (onInterrupt?.()) return // Feature handled it
|
||||
handleCtrlCDoublePress()
|
||||
}, [handleCtrlCDoublePress, onInterrupt])
|
||||
|
||||
// Handler for app:exit (ctrl+d by default)
|
||||
// This also uses double-press to confirm exit
|
||||
const handleExit = useCallback(() => {
|
||||
handleCtrlDDoublePress()
|
||||
}, [handleCtrlDDoublePress])
|
||||
|
||||
const handlers = useMemo(
|
||||
() => ({
|
||||
'app:interrupt': handleInterrupt,
|
||||
'app:exit': handleExit,
|
||||
}),
|
||||
[handleInterrupt, handleExit],
|
||||
)
|
||||
|
||||
useKeybindingsHook(handlers, { context: 'Global', isActive })
|
||||
|
||||
return exitState
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
import { useKeybindings } from '../keybindings/useKeybinding.js'
|
||||
import { type ExitState, useExitOnCtrlCD } from './useExitOnCtrlCD.js'
|
||||
|
||||
export type { ExitState }
|
||||
|
||||
/**
|
||||
* Convenience hook that wires up useExitOnCtrlCD with useKeybindings.
|
||||
*
|
||||
* This is the standard way to use useExitOnCtrlCD in components.
|
||||
* The separation exists to avoid import cycles - useExitOnCtrlCD.ts
|
||||
* doesn't import from the keybindings module directly.
|
||||
*
|
||||
* @param onExit - Optional custom exit handler
|
||||
* @param onInterrupt - Optional callback for features to handle interrupt (ctrl+c).
|
||||
* Return true if handled, false to fall through to double-press exit.
|
||||
* @param isActive - Whether the keybinding is active (default true).
|
||||
*/
|
||||
export function useExitOnCtrlCDWithKeybindings(
|
||||
onExit?: () => void,
|
||||
onInterrupt?: () => boolean,
|
||||
isActive?: boolean,
|
||||
): ExitState {
|
||||
return useExitOnCtrlCD(useKeybindings, onInterrupt, onExit, isActive)
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import {
|
||||
type FileHistorySnapshot,
|
||||
type FileHistoryState,
|
||||
fileHistoryEnabled,
|
||||
fileHistoryRestoreStateFromLog,
|
||||
} from '../utils/fileHistory.js'
|
||||
|
||||
export function useFileHistorySnapshotInit(
|
||||
initialFileHistorySnapshots: FileHistorySnapshot[] | undefined,
|
||||
fileHistoryState: FileHistoryState,
|
||||
onUpdateState: (newState: FileHistoryState) => void,
|
||||
): void {
|
||||
const initialized = useRef(false)
|
||||
|
||||
useEffect(() => {
|
||||
if (!fileHistoryEnabled() || initialized.current) {
|
||||
return
|
||||
}
|
||||
initialized.current = true
|
||||
if (initialFileHistorySnapshots) {
|
||||
fileHistoryRestoreStateFromLog(initialFileHistorySnapshots, onUpdateState)
|
||||
}
|
||||
}, [fileHistoryState, initialFileHistorySnapshots, onUpdateState])
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,303 @@
|
||||
import { feature } from 'bun:bundle'
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import {
|
||||
getModeFromInput,
|
||||
getValueFromInput,
|
||||
} from '../components/PromptInput/inputModes.js'
|
||||
import { makeHistoryReader } from '../history.js'
|
||||
import { KeyboardEvent } from '../ink/events/keyboard-event.js'
|
||||
// eslint-disable-next-line custom-rules/prefer-use-keybindings -- backward-compat bridge until consumers wire handleKeyDown to <Box onKeyDown>
|
||||
import { useInput } from '../ink.js'
|
||||
import { useKeybinding, useKeybindings } from '../keybindings/useKeybinding.js'
|
||||
import type { PromptInputMode } from '../types/textInputTypes.js'
|
||||
import type { HistoryEntry } from '../utils/config.js'
|
||||
|
||||
export function useHistorySearch(
|
||||
onAcceptHistory: (entry: HistoryEntry) => void,
|
||||
currentInput: string,
|
||||
onInputChange: (input: string) => void,
|
||||
onCursorChange: (cursorOffset: number) => void,
|
||||
currentCursorOffset: number,
|
||||
onModeChange: (mode: PromptInputMode) => void,
|
||||
currentMode: PromptInputMode,
|
||||
isSearching: boolean,
|
||||
setIsSearching: (isSearching: boolean) => void,
|
||||
setPastedContents: (pastedContents: HistoryEntry['pastedContents']) => void,
|
||||
currentPastedContents: HistoryEntry['pastedContents'],
|
||||
): {
|
||||
historyQuery: string
|
||||
setHistoryQuery: (query: string) => void
|
||||
historyMatch: HistoryEntry | undefined
|
||||
historyFailedMatch: boolean
|
||||
handleKeyDown: (e: KeyboardEvent) => void
|
||||
} {
|
||||
const [historyQuery, setHistoryQuery] = useState('')
|
||||
const [historyFailedMatch, setHistoryFailedMatch] = useState(false)
|
||||
const [originalInput, setOriginalInput] = useState('')
|
||||
const [originalCursorOffset, setOriginalCursorOffset] = useState(0)
|
||||
const [originalMode, setOriginalMode] = useState<PromptInputMode>('prompt')
|
||||
const [originalPastedContents, setOriginalPastedContents] = useState<
|
||||
HistoryEntry['pastedContents']
|
||||
>({})
|
||||
const [historyMatch, setHistoryMatch] = useState<HistoryEntry | undefined>(
|
||||
undefined,
|
||||
)
|
||||
const historyReader = useRef<AsyncGenerator<HistoryEntry> | undefined>(
|
||||
undefined,
|
||||
)
|
||||
const seenPrompts = useRef<Set<string>>(new Set())
|
||||
const searchAbortController = useRef<AbortController | null>(null)
|
||||
|
||||
const closeHistoryReader = useCallback((): void => {
|
||||
if (historyReader.current) {
|
||||
// Must explicitly call .return() to trigger the finally block in readLinesReverse,
|
||||
// which closes the file handle. Without this, file descriptors leak.
|
||||
void historyReader.current.return(undefined)
|
||||
historyReader.current = undefined
|
||||
}
|
||||
}, [])
|
||||
|
||||
const reset = useCallback((): void => {
|
||||
setIsSearching(false)
|
||||
setHistoryQuery('')
|
||||
setHistoryFailedMatch(false)
|
||||
setOriginalInput('')
|
||||
setOriginalCursorOffset(0)
|
||||
setOriginalMode('prompt')
|
||||
setOriginalPastedContents({})
|
||||
setHistoryMatch(undefined)
|
||||
closeHistoryReader()
|
||||
seenPrompts.current.clear()
|
||||
}, [setIsSearching, closeHistoryReader])
|
||||
|
||||
const searchHistory = useCallback(
|
||||
async (resume: boolean, signal?: AbortSignal): Promise<void> => {
|
||||
if (!isSearching) {
|
||||
return
|
||||
}
|
||||
|
||||
if (historyQuery.length === 0) {
|
||||
closeHistoryReader()
|
||||
seenPrompts.current.clear()
|
||||
setHistoryMatch(undefined)
|
||||
setHistoryFailedMatch(false)
|
||||
onInputChange(originalInput)
|
||||
onCursorChange(originalCursorOffset)
|
||||
onModeChange(originalMode)
|
||||
setPastedContents(originalPastedContents)
|
||||
return
|
||||
}
|
||||
|
||||
if (!resume) {
|
||||
closeHistoryReader()
|
||||
historyReader.current = makeHistoryReader()
|
||||
seenPrompts.current.clear()
|
||||
}
|
||||
|
||||
if (!historyReader.current) {
|
||||
return
|
||||
}
|
||||
|
||||
while (true) {
|
||||
if (signal?.aborted) {
|
||||
return
|
||||
}
|
||||
|
||||
const item = await historyReader.current.next()
|
||||
if (item.done) {
|
||||
// No match found - keep last match but mark as failed
|
||||
setHistoryFailedMatch(true)
|
||||
return
|
||||
}
|
||||
|
||||
const display = item.value.display
|
||||
|
||||
const matchPosition = display.lastIndexOf(historyQuery)
|
||||
if (matchPosition !== -1 && !seenPrompts.current.has(display)) {
|
||||
seenPrompts.current.add(display)
|
||||
setHistoryMatch(item.value)
|
||||
setHistoryFailedMatch(false)
|
||||
const mode = getModeFromInput(display)
|
||||
onModeChange(mode)
|
||||
onInputChange(display)
|
||||
setPastedContents(item.value.pastedContents)
|
||||
|
||||
// Position cursor relative to the clean value, not the display
|
||||
const value = getValueFromInput(display)
|
||||
const cleanMatchPosition = value.lastIndexOf(historyQuery)
|
||||
onCursorChange(
|
||||
cleanMatchPosition !== -1 ? cleanMatchPosition : matchPosition,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
},
|
||||
[
|
||||
isSearching,
|
||||
historyQuery,
|
||||
closeHistoryReader,
|
||||
onInputChange,
|
||||
onCursorChange,
|
||||
onModeChange,
|
||||
setPastedContents,
|
||||
originalInput,
|
||||
originalCursorOffset,
|
||||
originalMode,
|
||||
originalPastedContents,
|
||||
],
|
||||
)
|
||||
|
||||
// Handler: Start history search (when not searching)
|
||||
const handleStartSearch = useCallback(() => {
|
||||
setIsSearching(true)
|
||||
setOriginalInput(currentInput)
|
||||
setOriginalCursorOffset(currentCursorOffset)
|
||||
setOriginalMode(currentMode)
|
||||
setOriginalPastedContents(currentPastedContents)
|
||||
historyReader.current = makeHistoryReader()
|
||||
seenPrompts.current.clear()
|
||||
}, [
|
||||
setIsSearching,
|
||||
currentInput,
|
||||
currentCursorOffset,
|
||||
currentMode,
|
||||
currentPastedContents,
|
||||
])
|
||||
|
||||
// Handler: Find next match (when searching)
|
||||
const handleNextMatch = useCallback(() => {
|
||||
void searchHistory(true)
|
||||
}, [searchHistory])
|
||||
|
||||
// Handler: Accept current match and exit search
|
||||
const handleAccept = useCallback(() => {
|
||||
if (historyMatch) {
|
||||
const mode = getModeFromInput(historyMatch.display)
|
||||
const value = getValueFromInput(historyMatch.display)
|
||||
onInputChange(value)
|
||||
onModeChange(mode)
|
||||
setPastedContents(historyMatch.pastedContents)
|
||||
} else {
|
||||
// No match - restore original pasted contents
|
||||
setPastedContents(originalPastedContents)
|
||||
}
|
||||
reset()
|
||||
}, [
|
||||
historyMatch,
|
||||
onInputChange,
|
||||
onModeChange,
|
||||
setPastedContents,
|
||||
originalPastedContents,
|
||||
reset,
|
||||
])
|
||||
|
||||
// Handler: Cancel search and restore original input
|
||||
const handleCancel = useCallback(() => {
|
||||
onInputChange(originalInput)
|
||||
onCursorChange(originalCursorOffset)
|
||||
setPastedContents(originalPastedContents)
|
||||
reset()
|
||||
}, [
|
||||
onInputChange,
|
||||
onCursorChange,
|
||||
setPastedContents,
|
||||
originalInput,
|
||||
originalCursorOffset,
|
||||
originalPastedContents,
|
||||
reset,
|
||||
])
|
||||
|
||||
// Handler: Execute (accept and submit)
|
||||
const handleExecute = useCallback(() => {
|
||||
if (historyQuery.length === 0) {
|
||||
onAcceptHistory({
|
||||
display: originalInput,
|
||||
pastedContents: originalPastedContents,
|
||||
})
|
||||
} else if (historyMatch) {
|
||||
const mode = getModeFromInput(historyMatch.display)
|
||||
const value = getValueFromInput(historyMatch.display)
|
||||
onModeChange(mode)
|
||||
onAcceptHistory({
|
||||
display: value,
|
||||
pastedContents: historyMatch.pastedContents,
|
||||
})
|
||||
}
|
||||
reset()
|
||||
}, [
|
||||
historyQuery,
|
||||
historyMatch,
|
||||
onAcceptHistory,
|
||||
onModeChange,
|
||||
originalInput,
|
||||
originalPastedContents,
|
||||
reset,
|
||||
])
|
||||
|
||||
// Gated off under HISTORY_PICKER — the modal dialog owns ctrl+r there.
|
||||
useKeybinding('history:search', handleStartSearch, {
|
||||
context: 'Global',
|
||||
isActive: feature('HISTORY_PICKER') ? false : !isSearching,
|
||||
})
|
||||
|
||||
// History search context keybindings (only active when searching)
|
||||
const historySearchHandlers = useMemo(
|
||||
() => ({
|
||||
'historySearch:next': handleNextMatch,
|
||||
'historySearch:accept': handleAccept,
|
||||
'historySearch:cancel': handleCancel,
|
||||
'historySearch:execute': handleExecute,
|
||||
}),
|
||||
[handleNextMatch, handleAccept, handleCancel, handleExecute],
|
||||
)
|
||||
|
||||
useKeybindings(historySearchHandlers, {
|
||||
context: 'HistorySearch',
|
||||
isActive: isSearching,
|
||||
})
|
||||
|
||||
// Handle backspace when query is empty (cancels search)
|
||||
// This is a conditional behavior that doesn't fit the keybinding model
|
||||
// well (backspace only cancels when query is empty)
|
||||
const handleKeyDown = (e: KeyboardEvent): void => {
|
||||
if (!isSearching) return
|
||||
if (e.key === 'backspace' && historyQuery === '') {
|
||||
e.preventDefault()
|
||||
handleCancel()
|
||||
}
|
||||
}
|
||||
|
||||
// Backward-compat bridge: PromptInput doesn't yet wire handleKeyDown to
|
||||
// <Box onKeyDown>. Subscribe via useInput and adapt InputEvent →
|
||||
// KeyboardEvent until the consumer is migrated (separate PR).
|
||||
// TODO(onKeyDown-migration): remove once PromptInput passes handleKeyDown.
|
||||
useInput(
|
||||
(_input, _key, event) => {
|
||||
handleKeyDown(new KeyboardEvent(event.keypress))
|
||||
},
|
||||
{ isActive: isSearching },
|
||||
)
|
||||
|
||||
// Keep a ref to searchHistory to avoid it being a dependency of useEffect
|
||||
const searchHistoryRef = useRef(searchHistory)
|
||||
searchHistoryRef.current = searchHistory
|
||||
|
||||
// Reset history search when query changes
|
||||
useEffect(() => {
|
||||
searchAbortController.current?.abort()
|
||||
const controller = new AbortController()
|
||||
searchAbortController.current = controller
|
||||
void searchHistoryRef.current(false, controller.signal)
|
||||
return () => {
|
||||
controller.abort()
|
||||
}
|
||||
}, [historyQuery])
|
||||
|
||||
return {
|
||||
historyQuery,
|
||||
setHistoryQuery,
|
||||
historyMatch,
|
||||
historyFailedMatch,
|
||||
handleKeyDown,
|
||||
}
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,76 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { logError } from 'src/utils/log.js'
|
||||
import { z } from 'zod/v4'
|
||||
import type {
|
||||
ConnectedMCPServer,
|
||||
MCPServerConnection,
|
||||
} from '../services/mcp/types.js'
|
||||
import { getConnectedIdeClient } from '../utils/ide.js'
|
||||
import { lazySchema } from '../utils/lazySchema.js'
|
||||
export type IDEAtMentioned = {
|
||||
filePath: string
|
||||
lineStart?: number
|
||||
lineEnd?: number
|
||||
}
|
||||
|
||||
const NOTIFICATION_METHOD = 'at_mentioned'
|
||||
|
||||
const AtMentionedSchema = lazySchema(() =>
|
||||
z.object({
|
||||
method: z.literal(NOTIFICATION_METHOD),
|
||||
params: z.object({
|
||||
filePath: z.string(),
|
||||
lineStart: z.number().optional(),
|
||||
lineEnd: z.number().optional(),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
|
||||
/**
|
||||
* A hook that tracks IDE at-mention notifications by directly registering
|
||||
* with MCP client notification handlers,
|
||||
*/
|
||||
export function useIdeAtMentioned(
|
||||
mcpClients: MCPServerConnection[],
|
||||
onAtMentioned: (atMentioned: IDEAtMentioned) => void,
|
||||
): void {
|
||||
const ideClientRef = useRef<ConnectedMCPServer | undefined>(undefined)
|
||||
|
||||
useEffect(() => {
|
||||
// Find the IDE client from the MCP clients list
|
||||
const ideClient = getConnectedIdeClient(mcpClients)
|
||||
|
||||
if (ideClientRef.current !== ideClient) {
|
||||
ideClientRef.current = ideClient
|
||||
}
|
||||
|
||||
// If we found a connected IDE client, register our handler
|
||||
if (ideClient) {
|
||||
ideClient.client.setNotificationHandler(
|
||||
AtMentionedSchema(),
|
||||
notification => {
|
||||
if (ideClientRef.current !== ideClient) {
|
||||
return
|
||||
}
|
||||
try {
|
||||
const data = notification.params
|
||||
// Adjust line numbers to be 1-based instead of 0-based
|
||||
const lineStart =
|
||||
data.lineStart !== undefined ? data.lineStart + 1 : undefined
|
||||
const lineEnd =
|
||||
data.lineEnd !== undefined ? data.lineEnd + 1 : undefined
|
||||
onAtMentioned({
|
||||
filePath: data.filePath,
|
||||
lineStart: lineStart,
|
||||
lineEnd: lineEnd,
|
||||
})
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// No cleanup needed as MCP clients manage their own lifecycle
|
||||
}, [mcpClients, onAtMentioned])
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
import { useMemo } from 'react'
|
||||
import type { MCPServerConnection } from '../services/mcp/types.js'
|
||||
|
||||
export type IdeStatus = 'connected' | 'disconnected' | 'pending' | null
|
||||
|
||||
type IdeConnectionResult = {
|
||||
status: IdeStatus
|
||||
ideName: string | null
|
||||
}
|
||||
|
||||
export function useIdeConnectionStatus(
|
||||
mcpClients?: MCPServerConnection[],
|
||||
): IdeConnectionResult {
|
||||
return useMemo(() => {
|
||||
const ideClient = mcpClients?.find(client => client.name === 'ide')
|
||||
if (!ideClient) {
|
||||
return { status: null, ideName: null }
|
||||
}
|
||||
// Extract IDE name from config if available
|
||||
const config = ideClient.config
|
||||
const ideName =
|
||||
config.type === 'sse-ide' || config.type === 'ws-ide'
|
||||
? config.ideName
|
||||
: null
|
||||
if (ideClient.type === 'connected') {
|
||||
return { status: 'connected', ideName }
|
||||
}
|
||||
if (ideClient.type === 'pending') {
|
||||
return { status: 'pending', ideName }
|
||||
}
|
||||
return { status: 'disconnected', ideName }
|
||||
}, [mcpClients])
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
import { useEffect } from 'react'
|
||||
import { logEvent } from 'src/services/analytics/index.js'
|
||||
import { z } from 'zod/v4'
|
||||
import type { MCPServerConnection } from '../services/mcp/types.js'
|
||||
import { getConnectedIdeClient } from '../utils/ide.js'
|
||||
import { lazySchema } from '../utils/lazySchema.js'
|
||||
|
||||
const LogEventSchema = lazySchema(() =>
|
||||
z.object({
|
||||
method: z.literal('log_event'),
|
||||
params: z.object({
|
||||
eventName: z.string(),
|
||||
eventData: z.object({}).passthrough(),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
|
||||
export function useIdeLogging(mcpClients: MCPServerConnection[]): void {
|
||||
useEffect(() => {
|
||||
// Skip if there are no clients
|
||||
if (!mcpClients.length) {
|
||||
return
|
||||
}
|
||||
|
||||
// Find the IDE client from the MCP clients list
|
||||
const ideClient = getConnectedIdeClient(mcpClients)
|
||||
if (ideClient) {
|
||||
// Register the log event handler
|
||||
ideClient.client.setNotificationHandler(
|
||||
LogEventSchema(),
|
||||
notification => {
|
||||
const { eventName, eventData } = notification.params
|
||||
logEvent(
|
||||
`tengu_ide_${eventName}`,
|
||||
eventData as { [key: string]: boolean | number | undefined },
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
}, [mcpClients])
|
||||
}
|
||||
@@ -0,0 +1,150 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { logError } from 'src/utils/log.js'
|
||||
import { z } from 'zod/v4'
|
||||
import type {
|
||||
ConnectedMCPServer,
|
||||
MCPServerConnection,
|
||||
} from '../services/mcp/types.js'
|
||||
import { getConnectedIdeClient } from '../utils/ide.js'
|
||||
import { lazySchema } from '../utils/lazySchema.js'
|
||||
export type SelectionPoint = {
|
||||
line: number
|
||||
character: number
|
||||
}
|
||||
|
||||
export type SelectionData = {
|
||||
selection: {
|
||||
start: SelectionPoint
|
||||
end: SelectionPoint
|
||||
} | null
|
||||
text?: string
|
||||
filePath?: string
|
||||
}
|
||||
|
||||
export type IDESelection = {
|
||||
lineCount: number
|
||||
lineStart?: number
|
||||
text?: string
|
||||
filePath?: string
|
||||
}
|
||||
|
||||
// Define the selection changed notification schema
|
||||
const SelectionChangedSchema = lazySchema(() =>
|
||||
z.object({
|
||||
method: z.literal('selection_changed'),
|
||||
params: z.object({
|
||||
selection: z
|
||||
.object({
|
||||
start: z.object({
|
||||
line: z.number(),
|
||||
character: z.number(),
|
||||
}),
|
||||
end: z.object({
|
||||
line: z.number(),
|
||||
character: z.number(),
|
||||
}),
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
text: z.string().optional(),
|
||||
filePath: z.string().optional(),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
|
||||
/**
|
||||
* A hook that tracks IDE text selection information by directly registering
|
||||
* with MCP client notification handlers
|
||||
*/
|
||||
export function useIdeSelection(
|
||||
mcpClients: MCPServerConnection[],
|
||||
onSelect: (selection: IDESelection) => void,
|
||||
): void {
|
||||
const handlersRegistered = useRef(false)
|
||||
const currentIDERef = useRef<ConnectedMCPServer | null>(null)
|
||||
|
||||
useEffect(() => {
|
||||
// Find the IDE client from the MCP clients list
|
||||
const ideClient = getConnectedIdeClient(mcpClients)
|
||||
|
||||
// If the IDE client changed, we need to re-register handlers.
|
||||
// Normalize undefined to null so the initial ref value (null) matches
|
||||
// "no IDE found" (undefined), avoiding spurious resets on every MCP update.
|
||||
if (currentIDERef.current !== (ideClient ?? null)) {
|
||||
handlersRegistered.current = false
|
||||
currentIDERef.current = ideClient || null
|
||||
// Reset the selection when the IDE client changes.
|
||||
onSelect({
|
||||
lineCount: 0,
|
||||
lineStart: undefined,
|
||||
text: undefined,
|
||||
filePath: undefined,
|
||||
})
|
||||
}
|
||||
|
||||
// Skip if we've already registered handlers for the current IDE or if there's no IDE client
|
||||
if (handlersRegistered.current || !ideClient) {
|
||||
return
|
||||
}
|
||||
|
||||
// Handler function for selection changes
|
||||
const selectionChangeHandler = (data: SelectionData) => {
|
||||
if (data.selection?.start && data.selection?.end) {
|
||||
const { start, end } = data.selection
|
||||
let lineCount = end.line - start.line + 1
|
||||
// If on the first character of the line, do not count the line
|
||||
// as being selected.
|
||||
if (end.character === 0) {
|
||||
lineCount--
|
||||
}
|
||||
const selection = {
|
||||
lineCount,
|
||||
lineStart: start.line,
|
||||
text: data.text,
|
||||
filePath: data.filePath,
|
||||
}
|
||||
|
||||
onSelect(selection)
|
||||
}
|
||||
}
|
||||
|
||||
// Register notification handler for selection_changed events
|
||||
ideClient.client.setNotificationHandler(
|
||||
SelectionChangedSchema(),
|
||||
notification => {
|
||||
if (currentIDERef.current !== ideClient) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
// Get the selection data from the notification params
|
||||
const selectionData = notification.params
|
||||
|
||||
// Process selection data - validate it has required properties
|
||||
if (
|
||||
selectionData.selection &&
|
||||
selectionData.selection.start &&
|
||||
selectionData.selection.end
|
||||
) {
|
||||
// Handle selection changes
|
||||
selectionChangeHandler(selectionData as SelectionData)
|
||||
} else if (selectionData.text !== undefined) {
|
||||
// Handle empty selection (when text is empty string)
|
||||
selectionChangeHandler({
|
||||
selection: null,
|
||||
text: selectionData.text,
|
||||
filePath: selectionData.filePath,
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error as Error)
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
// Mark that we've registered handlers
|
||||
handlersRegistered.current = true
|
||||
|
||||
// No cleanup needed as MCP clients manage their own lifecycle
|
||||
}, [mcpClients, onSelect])
|
||||
}
|
||||
@@ -0,0 +1,969 @@
|
||||
import { randomUUID } from 'crypto'
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
import { useInterval } from 'usehooks-ts'
|
||||
import type { ToolUseConfirm } from '../components/permissions/PermissionRequest.js'
|
||||
import { TEAMMATE_MESSAGE_TAG } from '../constants/xml.js'
|
||||
import { useTerminalNotification } from '../ink/useTerminalNotification.js'
|
||||
import { sendNotification } from '../services/notifier.js'
|
||||
import {
|
||||
type AppState,
|
||||
useAppState,
|
||||
useAppStateStore,
|
||||
useSetAppState,
|
||||
} from '../state/AppState.js'
|
||||
import { findToolByName } from '../Tool.js'
|
||||
import { isInProcessTeammateTask } from '../tasks/InProcessTeammateTask/types.js'
|
||||
import { getAllBaseTools } from '../tools.js'
|
||||
import type { PermissionUpdate } from '../types/permissions.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import {
|
||||
findInProcessTeammateTaskId,
|
||||
handlePlanApprovalResponse,
|
||||
} from '../utils/inProcessTeammateHelpers.js'
|
||||
import { createAssistantMessage } from '../utils/messages.js'
|
||||
import {
|
||||
permissionModeFromString,
|
||||
toExternalPermissionMode,
|
||||
} from '../utils/permissions/PermissionMode.js'
|
||||
import { applyPermissionUpdate } from '../utils/permissions/PermissionUpdate.js'
|
||||
import { jsonStringify } from '../utils/slowOperations.js'
|
||||
import { isInsideTmux } from '../utils/swarm/backends/detection.js'
|
||||
import {
|
||||
ensureBackendsRegistered,
|
||||
getBackendByType,
|
||||
} from '../utils/swarm/backends/registry.js'
|
||||
import type { PaneBackendType } from '../utils/swarm/backends/types.js'
|
||||
import { TEAM_LEAD_NAME } from '../utils/swarm/constants.js'
|
||||
import { getLeaderToolUseConfirmQueue } from '../utils/swarm/leaderPermissionBridge.js'
|
||||
import { sendPermissionResponseViaMailbox } from '../utils/swarm/permissionSync.js'
|
||||
import {
|
||||
removeTeammateFromTeamFile,
|
||||
setMemberMode,
|
||||
} from '../utils/swarm/teamHelpers.js'
|
||||
import { unassignTeammateTasks } from '../utils/tasks.js'
|
||||
import {
|
||||
getAgentName,
|
||||
isPlanModeRequired,
|
||||
isTeamLead,
|
||||
isTeammate,
|
||||
} from '../utils/teammate.js'
|
||||
import { isInProcessTeammate } from '../utils/teammateContext.js'
|
||||
import {
|
||||
isModeSetRequest,
|
||||
isPermissionRequest,
|
||||
isPermissionResponse,
|
||||
isPlanApprovalRequest,
|
||||
isPlanApprovalResponse,
|
||||
isSandboxPermissionRequest,
|
||||
isSandboxPermissionResponse,
|
||||
isShutdownApproved,
|
||||
isShutdownRequest,
|
||||
isTeamPermissionUpdate,
|
||||
markMessagesAsRead,
|
||||
readUnreadMessages,
|
||||
type TeammateMessage,
|
||||
writeToMailbox,
|
||||
} from '../utils/teammateMailbox.js'
|
||||
import {
|
||||
hasPermissionCallback,
|
||||
hasSandboxPermissionCallback,
|
||||
processMailboxPermissionResponse,
|
||||
processSandboxPermissionResponse,
|
||||
} from './useSwarmPermissionPoller.js'
|
||||
|
||||
/**
|
||||
* Get the agent name to poll for messages.
|
||||
* - In-process teammates return undefined (they use waitForNextPromptOrShutdown instead)
|
||||
* - Process-based teammates use their CLAUDE_CODE_AGENT_NAME
|
||||
* - Team leads use their name from teamContext.teammates
|
||||
* - Standalone sessions return undefined
|
||||
*/
|
||||
function getAgentNameToPoll(appState: AppState): string | undefined {
|
||||
// In-process teammates should NOT use useInboxPoller - they have their own
|
||||
// polling mechanism via waitForNextPromptOrShutdown() in inProcessRunner.ts.
|
||||
// Using useInboxPoller would cause message routing issues since in-process
|
||||
// teammates share the same React context and AppState with the leader.
|
||||
//
|
||||
// Note: This can be called when the leader's REPL re-renders while an
|
||||
// in-process teammate's AsyncLocalStorage context is active (due to shared
|
||||
// setAppState). We return undefined to gracefully skip polling rather than
|
||||
// throwing, since this is a normal occurrence during concurrent execution.
|
||||
if (isInProcessTeammate()) {
|
||||
return undefined
|
||||
}
|
||||
if (isTeammate()) {
|
||||
return getAgentName()
|
||||
}
|
||||
// Team lead polls using their agent name (not ID)
|
||||
if (isTeamLead(appState.teamContext)) {
|
||||
const leadAgentId = appState.teamContext!.leadAgentId
|
||||
// Look up the lead's name from teammates map
|
||||
const leadName = appState.teamContext!.teammates[leadAgentId]?.name
|
||||
return leadName || 'team-lead'
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
const INBOX_POLL_INTERVAL_MS = 1000
|
||||
|
||||
type Props = {
|
||||
enabled: boolean
|
||||
isLoading: boolean
|
||||
focusedInputDialog: string | undefined
|
||||
// Returns true if submission succeeded, false if rejected (e.g., query already running)
|
||||
// Dead code elimination: parameter named onSubmitMessage to avoid "teammate" string in external builds
|
||||
onSubmitMessage: (formatted: string) => boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Polls the teammate inbox for new messages and submits them as turns.
|
||||
*
|
||||
* This hook:
|
||||
* 1. Polls every 1s for unread messages (teammates or team leads)
|
||||
* 2. When idle: submits messages immediately as a new turn
|
||||
* 3. When busy: queues messages in AppState.inbox for UI display, delivers when turn ends
|
||||
*/
|
||||
export function useInboxPoller({
|
||||
enabled,
|
||||
isLoading,
|
||||
focusedInputDialog,
|
||||
onSubmitMessage,
|
||||
}: Props): void {
|
||||
// Assign to original name for clarity within the function
|
||||
const onSubmitTeammateMessage = onSubmitMessage
|
||||
const store = useAppStateStore()
|
||||
const setAppState = useSetAppState()
|
||||
const inboxMessageCount = useAppState(s => s.inbox.messages.length)
|
||||
const terminal = useTerminalNotification()
|
||||
|
||||
const poll = useCallback(async () => {
|
||||
if (!enabled) return
|
||||
|
||||
// Use ref to avoid dependency on appState object (prevents infinite loop)
|
||||
const currentAppState = store.getState()
|
||||
const agentName = getAgentNameToPoll(currentAppState)
|
||||
if (!agentName) return
|
||||
|
||||
const unread = await readUnreadMessages(
|
||||
agentName,
|
||||
currentAppState.teamContext?.teamName,
|
||||
)
|
||||
|
||||
if (unread.length === 0) return
|
||||
|
||||
logForDebugging(`[InboxPoller] Found ${unread.length} unread message(s)`)
|
||||
|
||||
// Check for plan approval responses and transition out of plan mode if approved
|
||||
// Security: Only accept approval responses from the team lead
|
||||
if (isTeammate() && isPlanModeRequired()) {
|
||||
for (const msg of unread) {
|
||||
const approvalResponse = isPlanApprovalResponse(msg.text)
|
||||
// Verify the message is from the team lead to prevent teammates from forging approvals
|
||||
if (approvalResponse && msg.from === 'team-lead') {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Received plan approval response from team-lead: approved=${approvalResponse.approved}`,
|
||||
)
|
||||
if (approvalResponse.approved) {
|
||||
// Use leader's permission mode if provided, otherwise default
|
||||
const targetMode = approvalResponse.permissionMode ?? 'default'
|
||||
|
||||
// Transition out of plan mode
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
toolPermissionContext: applyPermissionUpdate(
|
||||
prev.toolPermissionContext,
|
||||
{
|
||||
type: 'setMode',
|
||||
mode: toExternalPermissionMode(targetMode),
|
||||
destination: 'session',
|
||||
},
|
||||
),
|
||||
}))
|
||||
logForDebugging(
|
||||
`[InboxPoller] Plan approved by team lead, exited plan mode to ${targetMode}`,
|
||||
)
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Plan rejected by team lead: ${approvalResponse.feedback || 'No feedback provided'}`,
|
||||
)
|
||||
}
|
||||
} else if (approvalResponse) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Ignoring plan approval response from non-team-lead: ${msg.from}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to mark messages as read in the inbox file.
|
||||
// Called after messages are successfully delivered or reliably queued.
|
||||
const markRead = () => {
|
||||
void markMessagesAsRead(agentName, currentAppState.teamContext?.teamName)
|
||||
}
|
||||
|
||||
// Separate permission messages from regular teammate messages
|
||||
const permissionRequests: TeammateMessage[] = []
|
||||
const permissionResponses: TeammateMessage[] = []
|
||||
const sandboxPermissionRequests: TeammateMessage[] = []
|
||||
const sandboxPermissionResponses: TeammateMessage[] = []
|
||||
const shutdownRequests: TeammateMessage[] = []
|
||||
const shutdownApprovals: TeammateMessage[] = []
|
||||
const teamPermissionUpdates: TeammateMessage[] = []
|
||||
const modeSetRequests: TeammateMessage[] = []
|
||||
const planApprovalRequests: TeammateMessage[] = []
|
||||
const regularMessages: TeammateMessage[] = []
|
||||
|
||||
for (const m of unread) {
|
||||
const permReq = isPermissionRequest(m.text)
|
||||
const permResp = isPermissionResponse(m.text)
|
||||
const sandboxReq = isSandboxPermissionRequest(m.text)
|
||||
const sandboxResp = isSandboxPermissionResponse(m.text)
|
||||
const shutdownReq = isShutdownRequest(m.text)
|
||||
const shutdownApproval = isShutdownApproved(m.text)
|
||||
const teamPermUpdate = isTeamPermissionUpdate(m.text)
|
||||
const modeSetReq = isModeSetRequest(m.text)
|
||||
const planApprovalReq = isPlanApprovalRequest(m.text)
|
||||
|
||||
if (permReq) {
|
||||
permissionRequests.push(m)
|
||||
} else if (permResp) {
|
||||
permissionResponses.push(m)
|
||||
} else if (sandboxReq) {
|
||||
sandboxPermissionRequests.push(m)
|
||||
} else if (sandboxResp) {
|
||||
sandboxPermissionResponses.push(m)
|
||||
} else if (shutdownReq) {
|
||||
shutdownRequests.push(m)
|
||||
} else if (shutdownApproval) {
|
||||
shutdownApprovals.push(m)
|
||||
} else if (teamPermUpdate) {
|
||||
teamPermissionUpdates.push(m)
|
||||
} else if (modeSetReq) {
|
||||
modeSetRequests.push(m)
|
||||
} else if (planApprovalReq) {
|
||||
planApprovalRequests.push(m)
|
||||
} else {
|
||||
regularMessages.push(m)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle permission requests (leader side) - route to ToolUseConfirmQueue
|
||||
if (
|
||||
permissionRequests.length > 0 &&
|
||||
isTeamLead(currentAppState.teamContext)
|
||||
) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${permissionRequests.length} permission request(s)`,
|
||||
)
|
||||
|
||||
const setToolUseConfirmQueue = getLeaderToolUseConfirmQueue()
|
||||
const teamName = currentAppState.teamContext?.teamName
|
||||
|
||||
for (const m of permissionRequests) {
|
||||
const parsed = isPermissionRequest(m.text)
|
||||
if (!parsed) continue
|
||||
|
||||
if (setToolUseConfirmQueue) {
|
||||
// Route through the standard ToolUseConfirmQueue so tmux workers
|
||||
// get the same tool-specific UI (BashPermissionRequest, FileEditToolDiff, etc.)
|
||||
// as in-process teammates.
|
||||
const tool = findToolByName(getAllBaseTools(), parsed.tool_name)
|
||||
if (!tool) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Unknown tool ${parsed.tool_name}, skipping permission request`,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
const entry: ToolUseConfirm = {
|
||||
assistantMessage: createAssistantMessage({ content: '' }),
|
||||
tool,
|
||||
description: parsed.description,
|
||||
input: parsed.input,
|
||||
toolUseContext: {} as ToolUseConfirm['toolUseContext'],
|
||||
toolUseID: parsed.tool_use_id,
|
||||
permissionResult: {
|
||||
behavior: 'ask',
|
||||
message: parsed.description,
|
||||
},
|
||||
permissionPromptStartTimeMs: Date.now(),
|
||||
workerBadge: {
|
||||
name: parsed.agent_id,
|
||||
color: 'cyan',
|
||||
},
|
||||
onUserInteraction() {
|
||||
// No-op for tmux workers (no classifier auto-approval)
|
||||
},
|
||||
onAbort() {
|
||||
void sendPermissionResponseViaMailbox(
|
||||
parsed.agent_id,
|
||||
{ decision: 'rejected', resolvedBy: 'leader' },
|
||||
parsed.request_id,
|
||||
teamName,
|
||||
)
|
||||
},
|
||||
onAllow(
|
||||
updatedInput: Record<string, unknown>,
|
||||
permissionUpdates: PermissionUpdate[],
|
||||
) {
|
||||
void sendPermissionResponseViaMailbox(
|
||||
parsed.agent_id,
|
||||
{
|
||||
decision: 'approved',
|
||||
resolvedBy: 'leader',
|
||||
updatedInput,
|
||||
permissionUpdates,
|
||||
},
|
||||
parsed.request_id,
|
||||
teamName,
|
||||
)
|
||||
},
|
||||
onReject(feedback?: string) {
|
||||
void sendPermissionResponseViaMailbox(
|
||||
parsed.agent_id,
|
||||
{
|
||||
decision: 'rejected',
|
||||
resolvedBy: 'leader',
|
||||
feedback,
|
||||
},
|
||||
parsed.request_id,
|
||||
teamName,
|
||||
)
|
||||
},
|
||||
async recheckPermission() {
|
||||
// No-op for tmux workers — permission state is on the worker side
|
||||
},
|
||||
}
|
||||
|
||||
// Deduplicate: if markMessagesAsRead failed on a prior poll,
|
||||
// the same message will be re-read — skip if already queued.
|
||||
setToolUseConfirmQueue(queue => {
|
||||
if (queue.some(q => q.toolUseID === parsed.tool_use_id)) {
|
||||
return queue
|
||||
}
|
||||
return [...queue, entry]
|
||||
})
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[InboxPoller] ToolUseConfirmQueue unavailable, dropping permission request from ${parsed.agent_id}`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Send desktop notification for the first request
|
||||
const firstParsed = isPermissionRequest(permissionRequests[0]?.text ?? '')
|
||||
if (firstParsed && !isLoading && !focusedInputDialog) {
|
||||
void sendNotification(
|
||||
{
|
||||
message: `${firstParsed.agent_id} needs permission for ${firstParsed.tool_name}`,
|
||||
notificationType: 'worker_permission_prompt',
|
||||
},
|
||||
terminal,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle permission responses (worker side) - invoke registered callbacks
|
||||
if (permissionResponses.length > 0 && isTeammate()) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${permissionResponses.length} permission response(s)`,
|
||||
)
|
||||
|
||||
for (const m of permissionResponses) {
|
||||
const parsed = isPermissionResponse(m.text)
|
||||
if (!parsed) continue
|
||||
|
||||
if (hasPermissionCallback(parsed.request_id)) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Processing permission response for ${parsed.request_id}: ${parsed.subtype}`,
|
||||
)
|
||||
|
||||
if (parsed.subtype === 'success') {
|
||||
processMailboxPermissionResponse({
|
||||
requestId: parsed.request_id,
|
||||
decision: 'approved',
|
||||
updatedInput: parsed.response?.updated_input,
|
||||
permissionUpdates: parsed.response?.permission_updates,
|
||||
})
|
||||
} else {
|
||||
processMailboxPermissionResponse({
|
||||
requestId: parsed.request_id,
|
||||
decision: 'rejected',
|
||||
feedback: parsed.error,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle sandbox permission requests (leader side) - add to workerSandboxPermissions queue
|
||||
if (
|
||||
sandboxPermissionRequests.length > 0 &&
|
||||
isTeamLead(currentAppState.teamContext)
|
||||
) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${sandboxPermissionRequests.length} sandbox permission request(s)`,
|
||||
)
|
||||
|
||||
const newSandboxRequests: Array<{
|
||||
requestId: string
|
||||
workerId: string
|
||||
workerName: string
|
||||
workerColor?: string
|
||||
host: string
|
||||
createdAt: number
|
||||
}> = []
|
||||
|
||||
for (const m of sandboxPermissionRequests) {
|
||||
const parsed = isSandboxPermissionRequest(m.text)
|
||||
if (!parsed) continue
|
||||
|
||||
// Validate required nested fields to prevent crashes from malformed messages
|
||||
if (!parsed.hostPattern?.host) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Invalid sandbox permission request: missing hostPattern.host`,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
newSandboxRequests.push({
|
||||
requestId: parsed.requestId,
|
||||
workerId: parsed.workerId,
|
||||
workerName: parsed.workerName,
|
||||
workerColor: parsed.workerColor,
|
||||
host: parsed.hostPattern.host,
|
||||
createdAt: parsed.createdAt,
|
||||
})
|
||||
}
|
||||
|
||||
if (newSandboxRequests.length > 0) {
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
workerSandboxPermissions: {
|
||||
...prev.workerSandboxPermissions,
|
||||
queue: [
|
||||
...prev.workerSandboxPermissions.queue,
|
||||
...newSandboxRequests,
|
||||
],
|
||||
},
|
||||
}))
|
||||
|
||||
// Send desktop notification for the first new request
|
||||
const firstRequest = newSandboxRequests[0]
|
||||
if (firstRequest && !isLoading && !focusedInputDialog) {
|
||||
void sendNotification(
|
||||
{
|
||||
message: `${firstRequest.workerName} needs network access to ${firstRequest.host}`,
|
||||
notificationType: 'worker_permission_prompt',
|
||||
},
|
||||
terminal,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle sandbox permission responses (worker side) - invoke registered callbacks
|
||||
if (sandboxPermissionResponses.length > 0 && isTeammate()) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${sandboxPermissionResponses.length} sandbox permission response(s)`,
|
||||
)
|
||||
|
||||
for (const m of sandboxPermissionResponses) {
|
||||
const parsed = isSandboxPermissionResponse(m.text)
|
||||
if (!parsed) continue
|
||||
|
||||
// Check if we have a registered callback for this request
|
||||
if (hasSandboxPermissionCallback(parsed.requestId)) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Processing sandbox permission response for ${parsed.requestId}: allow=${parsed.allow}`,
|
||||
)
|
||||
|
||||
// Process the response using the exported function
|
||||
processSandboxPermissionResponse({
|
||||
requestId: parsed.requestId,
|
||||
host: parsed.host,
|
||||
allow: parsed.allow,
|
||||
})
|
||||
|
||||
// Clear the pending sandbox request indicator
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
pendingSandboxRequest: null,
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle team permission updates (teammate side) - apply permission to context
|
||||
if (teamPermissionUpdates.length > 0 && isTeammate()) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${teamPermissionUpdates.length} team permission update(s)`,
|
||||
)
|
||||
|
||||
for (const m of teamPermissionUpdates) {
|
||||
const parsed = isTeamPermissionUpdate(m.text)
|
||||
if (!parsed) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Failed to parse team permission update: ${m.text.substring(0, 100)}`,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate required nested fields to prevent crashes from malformed messages
|
||||
if (
|
||||
!parsed.permissionUpdate?.rules ||
|
||||
!parsed.permissionUpdate?.behavior
|
||||
) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Invalid team permission update: missing permissionUpdate.rules or permissionUpdate.behavior`,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply the permission update to the teammate's context
|
||||
logForDebugging(
|
||||
`[InboxPoller] Applying team permission update: ${parsed.toolName} allowed in ${parsed.directoryPath}`,
|
||||
)
|
||||
logForDebugging(
|
||||
`[InboxPoller] Permission update rules: ${jsonStringify(parsed.permissionUpdate.rules)}`,
|
||||
)
|
||||
|
||||
setAppState(prev => {
|
||||
const updated = applyPermissionUpdate(prev.toolPermissionContext, {
|
||||
type: 'addRules',
|
||||
rules: parsed.permissionUpdate.rules,
|
||||
behavior: parsed.permissionUpdate.behavior,
|
||||
destination: 'session',
|
||||
})
|
||||
logForDebugging(
|
||||
`[InboxPoller] Updated session allow rules: ${jsonStringify(updated.alwaysAllowRules.session)}`,
|
||||
)
|
||||
return {
|
||||
...prev,
|
||||
toolPermissionContext: updated,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Handle mode set requests (teammate side) - team lead changing teammate's mode
|
||||
if (modeSetRequests.length > 0 && isTeammate()) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${modeSetRequests.length} mode set request(s)`,
|
||||
)
|
||||
|
||||
for (const m of modeSetRequests) {
|
||||
// Only accept mode changes from team-lead
|
||||
if (m.from !== 'team-lead') {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Ignoring mode set request from non-team-lead: ${m.from}`,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
const parsed = isModeSetRequest(m.text)
|
||||
if (!parsed) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Failed to parse mode set request: ${m.text.substring(0, 100)}`,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
const targetMode = permissionModeFromString(parsed.mode)
|
||||
logForDebugging(
|
||||
`[InboxPoller] Applying mode change from team-lead: ${targetMode}`,
|
||||
)
|
||||
|
||||
// Update local permission context
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
toolPermissionContext: applyPermissionUpdate(
|
||||
prev.toolPermissionContext,
|
||||
{
|
||||
type: 'setMode',
|
||||
mode: toExternalPermissionMode(targetMode),
|
||||
destination: 'session',
|
||||
},
|
||||
),
|
||||
}))
|
||||
|
||||
// Update config.json so team lead can see the new mode
|
||||
const teamName = currentAppState.teamContext?.teamName
|
||||
const agentName = getAgentName()
|
||||
if (teamName && agentName) {
|
||||
setMemberMode(teamName, agentName, targetMode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle plan approval requests (leader side) - auto-approve and write response to teammate inbox
|
||||
if (
|
||||
planApprovalRequests.length > 0 &&
|
||||
isTeamLead(currentAppState.teamContext)
|
||||
) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${planApprovalRequests.length} plan approval request(s), auto-approving`,
|
||||
)
|
||||
|
||||
const teamName = currentAppState.teamContext?.teamName
|
||||
const leaderExternalMode = toExternalPermissionMode(
|
||||
currentAppState.toolPermissionContext.mode,
|
||||
)
|
||||
const modeToInherit =
|
||||
leaderExternalMode === 'plan' ? 'default' : leaderExternalMode
|
||||
|
||||
for (const m of planApprovalRequests) {
|
||||
const parsed = isPlanApprovalRequest(m.text)
|
||||
if (!parsed) continue
|
||||
|
||||
// Write approval response to teammate's inbox
|
||||
const approvalResponse = {
|
||||
type: 'plan_approval_response',
|
||||
requestId: parsed.requestId,
|
||||
approved: true,
|
||||
timestamp: new Date().toISOString(),
|
||||
permissionMode: modeToInherit,
|
||||
}
|
||||
|
||||
void writeToMailbox(
|
||||
m.from,
|
||||
{
|
||||
from: TEAM_LEAD_NAME,
|
||||
text: jsonStringify(approvalResponse),
|
||||
timestamp: new Date().toISOString(),
|
||||
},
|
||||
teamName,
|
||||
)
|
||||
|
||||
// Update in-process teammate task state if applicable
|
||||
const taskId = findInProcessTeammateTaskId(m.from, currentAppState)
|
||||
if (taskId) {
|
||||
handlePlanApprovalResponse(
|
||||
taskId,
|
||||
{
|
||||
type: 'plan_approval_response',
|
||||
requestId: parsed.requestId,
|
||||
approved: true,
|
||||
timestamp: new Date().toISOString(),
|
||||
permissionMode: modeToInherit,
|
||||
},
|
||||
setAppState,
|
||||
)
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[InboxPoller] Auto-approved plan from ${m.from} (request ${parsed.requestId})`,
|
||||
)
|
||||
|
||||
// Still pass through as a regular message so the model has context
|
||||
// about what the teammate is doing, but the approval is already sent
|
||||
regularMessages.push(m)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle shutdown requests (teammate side) - preserve JSON for UI rendering
|
||||
if (shutdownRequests.length > 0 && isTeammate()) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${shutdownRequests.length} shutdown request(s)`,
|
||||
)
|
||||
|
||||
// Pass through shutdown requests - the UI component will render them nicely
|
||||
// and the model will receive instructions via the tool prompt documentation
|
||||
for (const m of shutdownRequests) {
|
||||
regularMessages.push(m)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle shutdown approvals (leader side) - kill the teammate's pane
|
||||
if (
|
||||
shutdownApprovals.length > 0 &&
|
||||
isTeamLead(currentAppState.teamContext)
|
||||
) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Found ${shutdownApprovals.length} shutdown approval(s)`,
|
||||
)
|
||||
|
||||
for (const m of shutdownApprovals) {
|
||||
const parsed = isShutdownApproved(m.text)
|
||||
if (!parsed) continue
|
||||
|
||||
// Kill the pane if we have the info (pane-based teammates)
|
||||
if (parsed.paneId && parsed.backendType) {
|
||||
void (async () => {
|
||||
try {
|
||||
// Ensure backend classes are imported (no subprocess probes)
|
||||
await ensureBackendsRegistered()
|
||||
const insideTmux = await isInsideTmux()
|
||||
const backend = getBackendByType(
|
||||
parsed.backendType as PaneBackendType,
|
||||
)
|
||||
const success = await backend?.killPane(
|
||||
parsed.paneId!,
|
||||
!insideTmux,
|
||||
)
|
||||
logForDebugging(
|
||||
`[InboxPoller] Killed pane ${parsed.paneId} for ${parsed.from}: ${success}`,
|
||||
)
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Failed to kill pane for ${parsed.from}: ${error}`,
|
||||
)
|
||||
}
|
||||
})()
|
||||
}
|
||||
|
||||
// Remove the teammate from teamContext.teammates so the count is accurate
|
||||
const teammateToRemove = parsed.from
|
||||
if (teammateToRemove && currentAppState.teamContext?.teammates) {
|
||||
// Find the teammate ID by name
|
||||
const teammateId = Object.entries(
|
||||
currentAppState.teamContext.teammates,
|
||||
).find(([, t]) => t.name === teammateToRemove)?.[0]
|
||||
|
||||
if (teammateId) {
|
||||
// Remove from team file (leader owns team file mutations)
|
||||
const teamName = currentAppState.teamContext?.teamName
|
||||
if (teamName) {
|
||||
removeTeammateFromTeamFile(teamName, {
|
||||
agentId: teammateId,
|
||||
name: teammateToRemove,
|
||||
})
|
||||
}
|
||||
|
||||
// Unassign tasks and build notification message
|
||||
const { notificationMessage } = teamName
|
||||
? await unassignTeammateTasks(
|
||||
teamName,
|
||||
teammateId,
|
||||
teammateToRemove,
|
||||
'shutdown',
|
||||
)
|
||||
: { notificationMessage: `${teammateToRemove} has shut down.` }
|
||||
|
||||
setAppState(prev => {
|
||||
if (!prev.teamContext?.teammates) return prev
|
||||
if (!(teammateId in prev.teamContext.teammates)) return prev
|
||||
const { [teammateId]: _, ...remainingTeammates } =
|
||||
prev.teamContext.teammates
|
||||
|
||||
// Mark the teammate's task as completed so hasRunningTeammates
|
||||
// becomes false and the spinner stops. Without this, out-of-process
|
||||
// (tmux) teammate tasks stay status:'running' forever because
|
||||
// only in-process teammates have a runner that sets 'completed'.
|
||||
const updatedTasks = { ...prev.tasks }
|
||||
for (const [tid, task] of Object.entries(updatedTasks)) {
|
||||
if (
|
||||
isInProcessTeammateTask(task) &&
|
||||
task.identity.agentId === teammateId
|
||||
) {
|
||||
updatedTasks[tid] = {
|
||||
...task,
|
||||
status: 'completed' as const,
|
||||
endTime: Date.now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
...prev,
|
||||
tasks: updatedTasks,
|
||||
teamContext: {
|
||||
...prev.teamContext,
|
||||
teammates: remainingTeammates,
|
||||
},
|
||||
inbox: {
|
||||
messages: [
|
||||
...prev.inbox.messages,
|
||||
{
|
||||
id: randomUUID(),
|
||||
from: 'system',
|
||||
text: jsonStringify({
|
||||
type: 'teammate_terminated',
|
||||
message: notificationMessage,
|
||||
}),
|
||||
timestamp: new Date().toISOString(),
|
||||
status: 'pending' as const,
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
})
|
||||
logForDebugging(
|
||||
`[InboxPoller] Removed ${teammateToRemove} (${teammateId}) from teamContext`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Pass through for UI rendering - the component will render it nicely
|
||||
regularMessages.push(m)
|
||||
}
|
||||
}
|
||||
|
||||
// Process regular teammate messages (existing logic)
|
||||
if (regularMessages.length === 0) {
|
||||
// No regular messages, but we may have processed non-regular messages
|
||||
// (permissions, shutdown requests, etc.) above — mark those as read.
|
||||
markRead()
|
||||
return
|
||||
}
|
||||
|
||||
// Format messages with XML wrapper for Claude (include color if available)
|
||||
// Transform plan approval requests to include instructions for Claude
|
||||
const formatted = regularMessages
|
||||
.map(m => {
|
||||
const colorAttr = m.color ? ` color="${m.color}"` : ''
|
||||
const summaryAttr = m.summary ? ` summary="${m.summary}"` : ''
|
||||
const messageContent = m.text
|
||||
|
||||
return `<${TEAMMATE_MESSAGE_TAG} teammate_id="${m.from}"${colorAttr}${summaryAttr}>\n${messageContent}\n</${TEAMMATE_MESSAGE_TAG}>`
|
||||
})
|
||||
.join('\n\n')
|
||||
|
||||
// Helper to queue messages in AppState for later delivery
|
||||
const queueMessages = () => {
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
inbox: {
|
||||
messages: [
|
||||
...prev.inbox.messages,
|
||||
...regularMessages.map(m => ({
|
||||
id: randomUUID(),
|
||||
from: m.from,
|
||||
text: m.text,
|
||||
timestamp: m.timestamp,
|
||||
status: 'pending' as const,
|
||||
color: m.color,
|
||||
summary: m.summary,
|
||||
})),
|
||||
],
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
if (!isLoading && !focusedInputDialog) {
|
||||
// IDLE: Submit as new turn immediately
|
||||
logForDebugging(`[InboxPoller] Session idle, submitting immediately`)
|
||||
const submitted = onSubmitTeammateMessage(formatted)
|
||||
if (!submitted) {
|
||||
// Submission rejected (query already running), queue for later
|
||||
logForDebugging(
|
||||
`[InboxPoller] Submission rejected, queuing for later delivery`,
|
||||
)
|
||||
queueMessages()
|
||||
}
|
||||
} else {
|
||||
// BUSY: Add to inbox queue for UI display + later delivery
|
||||
logForDebugging(`[InboxPoller] Session busy, queuing for later delivery`)
|
||||
queueMessages()
|
||||
}
|
||||
|
||||
// Mark messages as read only after they have been successfully delivered
|
||||
// or reliably queued in AppState. This prevents permanent message loss
|
||||
// when the session is busy — if we crash before this point, the messages
|
||||
// will be re-read on the next poll cycle instead of being silently dropped.
|
||||
markRead()
|
||||
}, [
|
||||
enabled,
|
||||
isLoading,
|
||||
focusedInputDialog,
|
||||
onSubmitTeammateMessage,
|
||||
setAppState,
|
||||
terminal,
|
||||
store,
|
||||
])
|
||||
|
||||
// When session becomes idle, deliver any pending messages and clean up processed ones
|
||||
useEffect(() => {
|
||||
if (!enabled) return
|
||||
|
||||
// Skip if busy or in a dialog
|
||||
if (isLoading || focusedInputDialog) {
|
||||
return
|
||||
}
|
||||
|
||||
// Use ref to avoid dependency on appState object (prevents infinite loop)
|
||||
const currentAppState = store.getState()
|
||||
const agentName = getAgentNameToPoll(currentAppState)
|
||||
if (!agentName) return
|
||||
|
||||
const pendingMessages = currentAppState.inbox.messages.filter(
|
||||
m => m.status === 'pending',
|
||||
)
|
||||
const processedMessages = currentAppState.inbox.messages.filter(
|
||||
m => m.status === 'processed',
|
||||
)
|
||||
|
||||
// Clean up processed messages (they were already delivered mid-turn as attachments)
|
||||
if (processedMessages.length > 0) {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Cleaning up ${processedMessages.length} processed message(s) that were delivered mid-turn`,
|
||||
)
|
||||
const processedIds = new Set(processedMessages.map(m => m.id))
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
inbox: {
|
||||
messages: prev.inbox.messages.filter(m => !processedIds.has(m.id)),
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
// No pending messages to deliver
|
||||
if (pendingMessages.length === 0) return
|
||||
|
||||
logForDebugging(
|
||||
`[InboxPoller] Session idle, delivering ${pendingMessages.length} pending message(s)`,
|
||||
)
|
||||
|
||||
// Format messages with XML wrapper for Claude (include color if available)
|
||||
const formatted = pendingMessages
|
||||
.map(m => {
|
||||
const colorAttr = m.color ? ` color="${m.color}"` : ''
|
||||
const summaryAttr = m.summary ? ` summary="${m.summary}"` : ''
|
||||
return `<${TEAMMATE_MESSAGE_TAG} teammate_id="${m.from}"${colorAttr}${summaryAttr}>\n${m.text}\n</${TEAMMATE_MESSAGE_TAG}>`
|
||||
})
|
||||
.join('\n\n')
|
||||
|
||||
// Try to submit - only clear messages if successful
|
||||
const submitted = onSubmitTeammateMessage(formatted)
|
||||
if (submitted) {
|
||||
// Clear the specific messages we just submitted by their IDs
|
||||
const submittedIds = new Set(pendingMessages.map(m => m.id))
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
inbox: {
|
||||
messages: prev.inbox.messages.filter(m => !submittedIds.has(m.id)),
|
||||
},
|
||||
}))
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[InboxPoller] Submission rejected, keeping messages queued`,
|
||||
)
|
||||
}
|
||||
}, [
|
||||
enabled,
|
||||
isLoading,
|
||||
focusedInputDialog,
|
||||
onSubmitTeammateMessage,
|
||||
setAppState,
|
||||
inboxMessageCount,
|
||||
store,
|
||||
])
|
||||
|
||||
// Poll if running as a teammate or as a team lead
|
||||
const shouldPoll = enabled && !!getAgentNameToPoll(store.getState())
|
||||
useInterval(() => void poll(), shouldPoll ? INBOX_POLL_INTERVAL_MS : null)
|
||||
|
||||
// Initial poll on mount (only once)
|
||||
const hasDoneInitialPollRef = useRef(false)
|
||||
useEffect(() => {
|
||||
if (!enabled) return
|
||||
if (hasDoneInitialPollRef.current) return
|
||||
// Use store.getState() to avoid dependency on appState object
|
||||
if (getAgentNameToPoll(store.getState())) {
|
||||
hasDoneInitialPollRef.current = true
|
||||
void poll()
|
||||
}
|
||||
// Note: poll uses store.getState() (not appState) so it won't re-run on appState changes
|
||||
// The ref guard is a safety measure to ensure initial poll only happens once
|
||||
}, [enabled, poll, store])
|
||||
}
|
||||
@@ -0,0 +1,132 @@
|
||||
import { useCallback, useRef, useState } from 'react'
|
||||
import type { PastedContent } from '../utils/config.js'
|
||||
|
||||
export type BufferEntry = {
|
||||
text: string
|
||||
cursorOffset: number
|
||||
pastedContents: Record<number, PastedContent>
|
||||
timestamp: number
|
||||
}
|
||||
|
||||
export type UseInputBufferProps = {
|
||||
maxBufferSize: number
|
||||
debounceMs: number
|
||||
}
|
||||
|
||||
export type UseInputBufferResult = {
|
||||
pushToBuffer: (
|
||||
text: string,
|
||||
cursorOffset: number,
|
||||
pastedContents?: Record<number, PastedContent>,
|
||||
) => void
|
||||
undo: () => BufferEntry | undefined
|
||||
canUndo: boolean
|
||||
clearBuffer: () => void
|
||||
}
|
||||
|
||||
export function useInputBuffer({
|
||||
maxBufferSize,
|
||||
debounceMs,
|
||||
}: UseInputBufferProps): UseInputBufferResult {
|
||||
const [buffer, setBuffer] = useState<BufferEntry[]>([])
|
||||
const [currentIndex, setCurrentIndex] = useState(-1)
|
||||
const lastPushTime = useRef<number>(0)
|
||||
const pendingPush = useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
const pushToBuffer = useCallback(
|
||||
(
|
||||
text: string,
|
||||
cursorOffset: number,
|
||||
pastedContents: Record<number, PastedContent> = {},
|
||||
) => {
|
||||
const now = Date.now()
|
||||
|
||||
// Clear any pending push
|
||||
if (pendingPush.current) {
|
||||
clearTimeout(pendingPush.current)
|
||||
pendingPush.current = null
|
||||
}
|
||||
|
||||
// Debounce rapid changes
|
||||
if (now - lastPushTime.current < debounceMs) {
|
||||
pendingPush.current = setTimeout(
|
||||
pushToBuffer,
|
||||
debounceMs,
|
||||
text,
|
||||
cursorOffset,
|
||||
pastedContents,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
lastPushTime.current = now
|
||||
|
||||
setBuffer(prevBuffer => {
|
||||
// If we're not at the end of the buffer, truncate everything after current position
|
||||
const newBuffer =
|
||||
currentIndex >= 0 ? prevBuffer.slice(0, currentIndex + 1) : prevBuffer
|
||||
|
||||
// Don't add if it's the same as the last entry
|
||||
const lastEntry = newBuffer[newBuffer.length - 1]
|
||||
if (lastEntry && lastEntry.text === text) {
|
||||
return newBuffer
|
||||
}
|
||||
|
||||
// Add new entry
|
||||
const updatedBuffer = [
|
||||
...newBuffer,
|
||||
{ text, cursorOffset, pastedContents, timestamp: now },
|
||||
]
|
||||
|
||||
// Limit buffer size
|
||||
if (updatedBuffer.length > maxBufferSize) {
|
||||
return updatedBuffer.slice(-maxBufferSize)
|
||||
}
|
||||
|
||||
return updatedBuffer
|
||||
})
|
||||
|
||||
// Update current index to point to the new entry
|
||||
setCurrentIndex(prev => {
|
||||
const newIndex = prev >= 0 ? prev + 1 : buffer.length
|
||||
return Math.min(newIndex, maxBufferSize - 1)
|
||||
})
|
||||
},
|
||||
[debounceMs, maxBufferSize, currentIndex, buffer.length],
|
||||
)
|
||||
|
||||
const undo = useCallback((): BufferEntry | undefined => {
|
||||
if (currentIndex < 0 || buffer.length === 0) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const targetIndex = Math.max(0, currentIndex - 1)
|
||||
const entry = buffer[targetIndex]
|
||||
|
||||
if (entry) {
|
||||
setCurrentIndex(targetIndex)
|
||||
return entry
|
||||
}
|
||||
|
||||
return undefined
|
||||
}, [buffer, currentIndex])
|
||||
|
||||
const clearBuffer = useCallback(() => {
|
||||
setBuffer([])
|
||||
setCurrentIndex(-1)
|
||||
lastPushTime.current = 0
|
||||
if (pendingPush.current) {
|
||||
clearTimeout(pendingPush.current)
|
||||
pendingPush.current = null
|
||||
}
|
||||
}, [lastPushTime, pendingPush])
|
||||
|
||||
const canUndo = currentIndex > 0 && buffer.length > 1
|
||||
|
||||
return {
|
||||
pushToBuffer,
|
||||
undo,
|
||||
canUndo,
|
||||
clearBuffer,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
import { useMemo, useRef } from 'react'
|
||||
import { BASH_TOOL_NAME } from '../tools/BashTool/toolName.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
import { getUserMessageText } from '../utils/messages.js'
|
||||
|
||||
const EXTERNAL_COMMAND_PATTERNS = [
|
||||
/\bcurl\b/,
|
||||
/\bwget\b/,
|
||||
/\bssh\b/,
|
||||
/\bkubectl\b/,
|
||||
/\bsrun\b/,
|
||||
/\bdocker\b/,
|
||||
/\bbq\b/,
|
||||
/\bgsutil\b/,
|
||||
/\bgcloud\b/,
|
||||
/\baws\b/,
|
||||
/\bgit\s+push\b/,
|
||||
/\bgit\s+pull\b/,
|
||||
/\bgit\s+fetch\b/,
|
||||
/\bgh\s+(pr|issue)\b/,
|
||||
/\bnc\b/,
|
||||
/\bncat\b/,
|
||||
/\btelnet\b/,
|
||||
/\bftp\b/,
|
||||
]
|
||||
|
||||
const FRICTION_PATTERNS = [
|
||||
// "No," or "No!" at start — comma/exclamation implies correction tone
|
||||
// (avoids "No problem", "No thanks", "No I think we should...")
|
||||
/^no[,!]\s/i,
|
||||
// Direct corrections about Claude's output
|
||||
/\bthat'?s (wrong|incorrect|not (what|right|correct))\b/i,
|
||||
/\bnot what I (asked|wanted|meant|said)\b/i,
|
||||
// Referencing prior instructions Claude missed
|
||||
/\bI (said|asked|wanted|told you|already said)\b/i,
|
||||
// Questioning Claude's actions
|
||||
/\bwhy did you\b/i,
|
||||
/\byou should(n'?t| not)? have\b/i,
|
||||
/\byou were supposed to\b/i,
|
||||
// Explicit retry/revert of Claude's work
|
||||
/\btry again\b/i,
|
||||
/\b(undo|revert) (that|this|it|what you)\b/i,
|
||||
]
|
||||
|
||||
export function isSessionContainerCompatible(messages: Message[]): boolean {
|
||||
for (const msg of messages) {
|
||||
if (msg.type !== 'assistant') {
|
||||
continue
|
||||
}
|
||||
const content = msg.message.content
|
||||
if (!Array.isArray(content)) {
|
||||
continue
|
||||
}
|
||||
for (const block of content) {
|
||||
if (block.type !== 'tool_use' || !('name' in block)) {
|
||||
continue
|
||||
}
|
||||
const toolName = block.name as string
|
||||
if (toolName.startsWith('mcp__')) {
|
||||
return false
|
||||
}
|
||||
if (toolName === BASH_TOOL_NAME) {
|
||||
const input = (block as { input?: Record<string, unknown> }).input
|
||||
const command = (input?.command as string) || ''
|
||||
if (EXTERNAL_COMMAND_PATTERNS.some(p => p.test(command))) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
export function hasFrictionSignal(messages: Message[]): boolean {
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
const msg = messages[i]!
|
||||
if (msg.type !== 'user') {
|
||||
continue
|
||||
}
|
||||
const text = getUserMessageText(msg)
|
||||
if (!text) {
|
||||
continue
|
||||
}
|
||||
return FRICTION_PATTERNS.some(p => p.test(text))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const MIN_SUBMIT_COUNT = 3
|
||||
const COOLDOWN_MS = 30 * 60 * 1000
|
||||
|
||||
export function useIssueFlagBanner(
|
||||
messages: Message[],
|
||||
submitCount: number,
|
||||
): boolean {
|
||||
if (process.env.USER_TYPE !== 'ant') {
|
||||
return false
|
||||
}
|
||||
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: process.env.USER_TYPE is a compile-time constant
|
||||
const lastTriggeredAtRef = useRef(0)
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: process.env.USER_TYPE is a compile-time constant
|
||||
const activeForSubmitRef = useRef(-1)
|
||||
|
||||
// Memoize the O(messages) scans. This hook runs on every REPL render
|
||||
// (including every keystroke), but messages is stable during typing.
|
||||
// isSessionContainerCompatible walks all messages + regex-tests each
|
||||
// bash command — by far the heaviest work here.
|
||||
// biome-ignore lint/correctness/useHookAtTopLevel: process.env.USER_TYPE is a compile-time constant
|
||||
const shouldTrigger = useMemo(
|
||||
() => isSessionContainerCompatible(messages) && hasFrictionSignal(messages),
|
||||
[messages],
|
||||
)
|
||||
|
||||
// Keep showing the banner until the user submits another message
|
||||
if (activeForSubmitRef.current === submitCount) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (Date.now() - lastTriggeredAtRef.current < COOLDOWN_MS) {
|
||||
return false
|
||||
}
|
||||
if (submitCount < MIN_SUBMIT_COUNT) {
|
||||
return false
|
||||
}
|
||||
if (!shouldTrigger) {
|
||||
return false
|
||||
}
|
||||
|
||||
lastTriggeredAtRef.current = Date.now()
|
||||
activeForSubmitRef.current = submitCount
|
||||
return true
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
import type { UUID } from 'crypto'
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useAppState } from '../state/AppState.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
import { isAgentSwarmsEnabled } from '../utils/agentSwarmsEnabled.js'
|
||||
import {
|
||||
cleanMessagesForLogging,
|
||||
isChainParticipant,
|
||||
recordTranscript,
|
||||
} from '../utils/sessionStorage.js'
|
||||
|
||||
/**
|
||||
* Hook that logs messages to the transcript
|
||||
* conversation ID that only changes when a new conversation is started.
|
||||
*
|
||||
* @param messages The current conversation messages
|
||||
* @param ignore When true, messages will not be recorded to the transcript
|
||||
*/
|
||||
export function useLogMessages(messages: Message[], ignore: boolean = false) {
|
||||
const teamContext = useAppState(s => s.teamContext)
|
||||
|
||||
// messages is append-only between compactions, so track where we left off
|
||||
// and only pass the new tail to recordTranscript. Avoids O(n) filter+scan
|
||||
// on every setMessages (~20x/turn, so n=3000 was ~120k wasted iterations).
|
||||
const lastRecordedLengthRef = useRef(0)
|
||||
const lastParentUuidRef = useRef<UUID | undefined>(undefined)
|
||||
// First-uuid change = compaction or /clear rebuilt the array; length alone
|
||||
// can't detect this since post-compact [CB,summary,...keep,new] may be longer.
|
||||
const firstMessageUuidRef = useRef<UUID | undefined>(undefined)
|
||||
// Guard against stale async .then() overwriting a fresher sync update when
|
||||
// an incremental render fires before the compaction .then() resolves.
|
||||
const callSeqRef = useRef(0)
|
||||
|
||||
useEffect(() => {
|
||||
if (ignore) return
|
||||
|
||||
const currentFirstUuid = messages[0]?.uuid as UUID | undefined
|
||||
const prevLength = lastRecordedLengthRef.current
|
||||
|
||||
// First-render: firstMessageUuidRef is undefined. Compaction: first uuid changes.
|
||||
// Both are !isIncremental, but first-render sync-walk is safe (no messagesToKeep).
|
||||
const wasFirstRender = firstMessageUuidRef.current === undefined
|
||||
const isIncremental =
|
||||
currentFirstUuid !== undefined &&
|
||||
!wasFirstRender &&
|
||||
currentFirstUuid === firstMessageUuidRef.current &&
|
||||
prevLength <= messages.length
|
||||
// Same-head shrink: tombstone filter, rewind, snip, partial-compact.
|
||||
// Distinguished from compaction (first uuid changes) because the tail
|
||||
// is either an existing on-disk message or a fresh message that this
|
||||
// same effect's recordTranscript(fullArray) will write — see sync-walk
|
||||
// guard below.
|
||||
const isSameHeadShrink =
|
||||
currentFirstUuid !== undefined &&
|
||||
!wasFirstRender &&
|
||||
currentFirstUuid === firstMessageUuidRef.current &&
|
||||
prevLength > messages.length
|
||||
|
||||
const startIndex = isIncremental ? prevLength : 0
|
||||
if (startIndex === messages.length) return
|
||||
|
||||
// Full array on first call + after compaction: recordTranscript's own
|
||||
// O(n) dedup loop handles messagesToKeep interleaving correctly there.
|
||||
const slice = startIndex === 0 ? messages : messages.slice(startIndex)
|
||||
const parentHint = isIncremental ? lastParentUuidRef.current : undefined
|
||||
|
||||
// Fire and forget - we don't want to block the UI.
|
||||
const seq = ++callSeqRef.current
|
||||
void recordTranscript(
|
||||
slice,
|
||||
isAgentSwarmsEnabled()
|
||||
? {
|
||||
teamName: teamContext?.teamName,
|
||||
agentName: teamContext?.selfAgentName,
|
||||
}
|
||||
: {},
|
||||
parentHint,
|
||||
messages,
|
||||
).then(lastRecordedUuid => {
|
||||
// For compaction/full array case (!isIncremental): use the async return
|
||||
// value. After compaction, messagesToKeep in the array are skipped
|
||||
// (already in transcript), so the sync loop would find a wrong UUID.
|
||||
// Skip if a newer effect already ran (stale closure would overwrite the
|
||||
// fresher sync update from the subsequent incremental render).
|
||||
if (seq !== callSeqRef.current) return
|
||||
if (lastRecordedUuid && !isIncremental) {
|
||||
lastParentUuidRef.current = lastRecordedUuid
|
||||
}
|
||||
})
|
||||
|
||||
// Sync-walk safe for: incremental (pure new-tail slice), first-render
|
||||
// (no messagesToKeep interleaving), and same-head shrink. Shrink is the
|
||||
// subtle one: the picked uuid is either already on disk (tombstone/rewind
|
||||
// — survivors were written before) or is being written by THIS effect's
|
||||
// recordTranscript(fullArray) call (snip boundary / partial-compact tail
|
||||
// — enqueueWrite ordering guarantees it lands before any later write that
|
||||
// chains to it). Without this, the ref stays stale at a tombstoned uuid:
|
||||
// the async .then() correction is raced out by the next effect's seq bump
|
||||
// on large sessions where recordTranscript(fullArray) is slow. Only the
|
||||
// compaction case (first uuid changed) remains unsafe — tail may be
|
||||
// messagesToKeep whose last-actually-recorded uuid differs.
|
||||
if (isIncremental || wasFirstRender || isSameHeadShrink) {
|
||||
// Match EXACTLY what recordTranscript persists: cleanMessagesForLogging
|
||||
// applies both the isLoggableMessage filter and (for external users) the
|
||||
// REPL-strip + isVirtual-promote transform. Using the raw predicate here
|
||||
// would pick a UUID that the transform drops, leaving the parent hint
|
||||
// pointing at a message that never reached disk. Pass full messages as
|
||||
// replId context — REPL tool_use and its tool_result land in separate
|
||||
// render cycles, so the slice alone can't pair them.
|
||||
const last = cleanMessagesForLogging(slice, messages).findLast(
|
||||
isChainParticipant,
|
||||
)
|
||||
if (last) lastParentUuidRef.current = last.uuid as UUID
|
||||
}
|
||||
|
||||
lastRecordedLengthRef.current = messages.length
|
||||
firstMessageUuidRef.current = currentFirstUuid
|
||||
}, [messages, ignore, teamContext?.teamName, teamContext?.selfAgentName])
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,21 @@
|
||||
import { useCallback, useEffect, useMemo, useSyncExternalStore } from 'react'
|
||||
import { useMailbox } from '../context/mailbox.js'
|
||||
|
||||
type Props = {
|
||||
isLoading: boolean
|
||||
onSubmitMessage: (content: string) => boolean
|
||||
}
|
||||
|
||||
export function useMailboxBridge({ isLoading, onSubmitMessage }: Props): void {
|
||||
const mailbox = useMailbox()
|
||||
|
||||
const subscribe = useMemo(() => mailbox.subscribe.bind(mailbox), [mailbox])
|
||||
const getSnapshot = useCallback(() => mailbox.revision, [mailbox])
|
||||
const revision = useSyncExternalStore(subscribe, getSnapshot)
|
||||
|
||||
useEffect(() => {
|
||||
if (isLoading) return
|
||||
const msg = mailbox.poll()
|
||||
if (msg) onSubmitMessage(msg.content)
|
||||
}, [isLoading, revision, mailbox, onSubmitMessage])
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
import { useEffect, useReducer } from 'react'
|
||||
import { onGrowthBookRefresh } from '../services/analytics/growthbook.js'
|
||||
import { useAppState } from '../state/AppState.js'
|
||||
import {
|
||||
getDefaultMainLoopModelSetting,
|
||||
type ModelName,
|
||||
parseUserSpecifiedModel,
|
||||
} from '../utils/model/model.js'
|
||||
|
||||
// The value of the selector is a full model name that can be used directly in
|
||||
// API calls. Use this over getMainLoopModel() when the component needs to
|
||||
// update upon a model config change.
|
||||
export function useMainLoopModel(): ModelName {
|
||||
const mainLoopModel = useAppState(s => s.mainLoopModel)
|
||||
const mainLoopModelForSession = useAppState(s => s.mainLoopModelForSession)
|
||||
|
||||
// parseUserSpecifiedModel reads tengu_ant_model_override via
|
||||
// _CACHED_MAY_BE_STALE (in resolveAntModel). Until GB init completes,
|
||||
// that's the stale disk cache; after, it's the in-memory remoteEval map.
|
||||
// AppState doesn't change when GB init finishes, so we subscribe to the
|
||||
// refresh signal and force a re-render to re-resolve with fresh values.
|
||||
// Without this, the alias resolution is frozen until something else
|
||||
// happens to re-render the component — the API would sample one model
|
||||
// while /model (which also re-resolves) displays another.
|
||||
const [, forceRerender] = useReducer(x => x + 1, 0)
|
||||
useEffect(() => onGrowthBookRefresh(forceRerender), [])
|
||||
|
||||
const model = parseUserSpecifiedModel(
|
||||
mainLoopModelForSession ??
|
||||
mainLoopModel ??
|
||||
getDefaultMainLoopModelSetting(),
|
||||
)
|
||||
return model
|
||||
}
|
||||
@@ -0,0 +1,304 @@
|
||||
import { useCallback, useEffect } from 'react'
|
||||
import type { Command } from '../commands.js'
|
||||
import { useNotifications } from '../context/notifications.js'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
logEvent,
|
||||
} from '../services/analytics/index.js'
|
||||
import { reinitializeLspServerManager } from '../services/lsp/manager.js'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import type { AgentDefinition } from '../tools/AgentTool/loadAgentsDir.js'
|
||||
import { count } from '../utils/array.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { logForDiagnosticsNoPII } from '../utils/diagLogs.js'
|
||||
import { toError } from '../utils/errors.js'
|
||||
import { logError } from '../utils/log.js'
|
||||
import { loadPluginAgents } from '../utils/plugins/loadPluginAgents.js'
|
||||
import { getPluginCommands } from '../utils/plugins/loadPluginCommands.js'
|
||||
import { loadPluginHooks } from '../utils/plugins/loadPluginHooks.js'
|
||||
import { loadPluginLspServers } from '../utils/plugins/lspPluginIntegration.js'
|
||||
import { loadPluginMcpServers } from '../utils/plugins/mcpPluginIntegration.js'
|
||||
import { detectAndUninstallDelistedPlugins } from '../utils/plugins/pluginBlocklist.js'
|
||||
import { getFlaggedPlugins } from '../utils/plugins/pluginFlagging.js'
|
||||
import { loadAllPlugins } from '../utils/plugins/pluginLoader.js'
|
||||
|
||||
/**
|
||||
* Hook to manage plugin state and synchronize with AppState.
|
||||
*
|
||||
* On mount: loads all plugins, runs delisting enforcement, surfaces flagged-
|
||||
* plugin notifications, populates AppState.plugins. This is the initial
|
||||
* Layer-3 load — subsequent refresh goes through /reload-plugins.
|
||||
*
|
||||
* On needsRefresh: shows a notification directing the user to /reload-plugins.
|
||||
* Does NOT auto-refresh. All Layer-3 swap (commands, agents, hooks, MCP)
|
||||
* goes through refreshActivePlugins() via /reload-plugins for one consistent
|
||||
* mental model. See Outline: declarative-settings-hXHBMDIf4b PR 5c.
|
||||
*/
|
||||
export function useManagePlugins({
|
||||
enabled = true,
|
||||
}: {
|
||||
enabled?: boolean
|
||||
} = {}) {
|
||||
const setAppState = useSetAppState()
|
||||
const needsRefresh = useAppState(s => s.plugins.needsRefresh)
|
||||
const { addNotification } = useNotifications()
|
||||
|
||||
// Initial plugin load. Runs once on mount. NOT used for refresh — all
|
||||
// post-mount refresh goes through /reload-plugins → refreshActivePlugins().
|
||||
// Unlike refreshActivePlugins, this also runs delisting enforcement and
|
||||
// flagged-plugin notifications (session-start concerns), and does NOT bump
|
||||
// mcp.pluginReconnectKey (MCP effects fire on their own mount).
|
||||
const initialPluginLoad = useCallback(async () => {
|
||||
try {
|
||||
// Load all plugins - capture errors array
|
||||
const { enabled, disabled, errors } = await loadAllPlugins()
|
||||
|
||||
// Detect delisted plugins, auto-uninstall them, and record as flagged.
|
||||
await detectAndUninstallDelistedPlugins()
|
||||
|
||||
// Notify if there are flagged plugins pending dismissal
|
||||
const flagged = getFlaggedPlugins()
|
||||
if (Object.keys(flagged).length > 0) {
|
||||
addNotification({
|
||||
key: 'plugin-delisted-flagged',
|
||||
text: 'Plugins flagged. Check /plugins',
|
||||
color: 'warning',
|
||||
priority: 'high',
|
||||
})
|
||||
}
|
||||
|
||||
// Load commands, agents, and hooks with individual error handling
|
||||
// Errors are added to the errors array for user visibility in Doctor UI
|
||||
let commands: Command[] = []
|
||||
let agents: AgentDefinition[] = []
|
||||
|
||||
try {
|
||||
commands = await getPluginCommands()
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error)
|
||||
errors.push({
|
||||
type: 'generic-error',
|
||||
source: 'plugin-commands',
|
||||
error: `Failed to load plugin commands: ${errorMessage}`,
|
||||
})
|
||||
}
|
||||
|
||||
try {
|
||||
agents = await loadPluginAgents()
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error)
|
||||
errors.push({
|
||||
type: 'generic-error',
|
||||
source: 'plugin-agents',
|
||||
error: `Failed to load plugin agents: ${errorMessage}`,
|
||||
})
|
||||
}
|
||||
|
||||
try {
|
||||
await loadPluginHooks()
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error)
|
||||
errors.push({
|
||||
type: 'generic-error',
|
||||
source: 'plugin-hooks',
|
||||
error: `Failed to load plugin hooks: ${errorMessage}`,
|
||||
})
|
||||
}
|
||||
|
||||
// Load MCP server configs per plugin to get an accurate count.
|
||||
// LoadedPlugin.mcpServers is not populated by loadAllPlugins — it's a
|
||||
// cache slot that extractMcpServersFromPlugins fills later, which races
|
||||
// with this metric. Calling loadPluginMcpServers directly (as
|
||||
// cli/handlers/plugins.ts does) gives the correct count and also
|
||||
// warms the cache for the MCP connection manager.
|
||||
//
|
||||
// Runs BEFORE setAppState so any errors pushed by these loaders make it
|
||||
// into AppState.plugins.errors (Doctor UI), not just telemetry.
|
||||
const mcpServerCounts = await Promise.all(
|
||||
enabled.map(async p => {
|
||||
if (p.mcpServers) return Object.keys(p.mcpServers).length
|
||||
const servers = await loadPluginMcpServers(p, errors)
|
||||
if (servers) p.mcpServers = servers
|
||||
return servers ? Object.keys(servers).length : 0
|
||||
}),
|
||||
)
|
||||
const mcp_count = mcpServerCounts.reduce((sum, n) => sum + n, 0)
|
||||
|
||||
// LSP: the primary fix for issue #15521 is in refresh.ts (via
|
||||
// performBackgroundPluginInstallations → refreshActivePlugins, which
|
||||
// clears caches first). This reinit is defensive — it reads the same
|
||||
// memoized loadAllPlugins() result as the original init unless a cache
|
||||
// invalidation happened between main.tsx:3203 and REPL mount (e.g.
|
||||
// seed marketplace registration or policySettings hot-reload).
|
||||
const lspServerCounts = await Promise.all(
|
||||
enabled.map(async p => {
|
||||
if (p.lspServers) return Object.keys(p.lspServers).length
|
||||
const servers = await loadPluginLspServers(p, errors)
|
||||
if (servers) p.lspServers = servers
|
||||
return servers ? Object.keys(servers).length : 0
|
||||
}),
|
||||
)
|
||||
const lsp_count = lspServerCounts.reduce((sum, n) => sum + n, 0)
|
||||
reinitializeLspServerManager()
|
||||
|
||||
// Update AppState - merge errors to preserve LSP errors
|
||||
setAppState(prevState => {
|
||||
// Keep existing LSP/non-plugin-loading errors (source 'lsp-manager' or 'plugin:*')
|
||||
const existingLspErrors = prevState.plugins.errors.filter(
|
||||
e => e.source === 'lsp-manager' || e.source.startsWith('plugin:'),
|
||||
)
|
||||
// Deduplicate: remove existing LSP errors that are also in new errors
|
||||
const newErrorKeys = new Set(
|
||||
errors.map(e =>
|
||||
e.type === 'generic-error'
|
||||
? `generic-error:${e.source}:${e.error}`
|
||||
: `${e.type}:${e.source}`,
|
||||
),
|
||||
)
|
||||
const filteredExisting = existingLspErrors.filter(e => {
|
||||
const key =
|
||||
e.type === 'generic-error'
|
||||
? `generic-error:${e.source}:${e.error}`
|
||||
: `${e.type}:${e.source}`
|
||||
return !newErrorKeys.has(key)
|
||||
})
|
||||
const mergedErrors = [...filteredExisting, ...errors]
|
||||
|
||||
return {
|
||||
...prevState,
|
||||
plugins: {
|
||||
...prevState.plugins,
|
||||
enabled,
|
||||
disabled,
|
||||
commands,
|
||||
errors: mergedErrors,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
logForDebugging(
|
||||
`Loaded plugins - Enabled: ${enabled.length}, Disabled: ${disabled.length}, Commands: ${commands.length}, Agents: ${agents.length}, Errors: ${errors.length}`,
|
||||
)
|
||||
|
||||
// Count component types across enabled plugins
|
||||
const hook_count = enabled.reduce((sum, p) => {
|
||||
if (!p.hooksConfig) return sum
|
||||
return (
|
||||
sum +
|
||||
Object.values(p.hooksConfig).reduce(
|
||||
(s, matchers) =>
|
||||
s + (matchers?.reduce((h, m) => h + m.hooks.length, 0) ?? 0),
|
||||
0,
|
||||
)
|
||||
)
|
||||
}, 0)
|
||||
|
||||
return {
|
||||
enabled_count: enabled.length,
|
||||
disabled_count: disabled.length,
|
||||
inline_count: count(enabled, p => p.source.endsWith('@inline')),
|
||||
marketplace_count: count(enabled, p => !p.source.endsWith('@inline')),
|
||||
error_count: errors.length,
|
||||
skill_count: commands.length,
|
||||
agent_count: agents.length,
|
||||
hook_count,
|
||||
mcp_count,
|
||||
lsp_count,
|
||||
// Ant-only: which plugins are enabled, to correlate with RSS/FPS.
|
||||
// Kept separate from base metrics so it doesn't flow into
|
||||
// logForDiagnosticsNoPII.
|
||||
ant_enabled_names:
|
||||
process.env.USER_TYPE === 'ant' && enabled.length > 0
|
||||
? (enabled
|
||||
.map(p => p.name)
|
||||
.sort()
|
||||
.join(
|
||||
',',
|
||||
) as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS)
|
||||
: undefined,
|
||||
}
|
||||
} catch (error) {
|
||||
// Only plugin loading errors should reach here - log for monitoring
|
||||
const errorObj = toError(error)
|
||||
logError(errorObj)
|
||||
logForDebugging(`Error loading plugins: ${error}`)
|
||||
// Set empty state on error, but preserve LSP errors and add the new error
|
||||
setAppState(prevState => {
|
||||
// Keep existing LSP/non-plugin-loading errors
|
||||
const existingLspErrors = prevState.plugins.errors.filter(
|
||||
e => e.source === 'lsp-manager' || e.source.startsWith('plugin:'),
|
||||
)
|
||||
const newError = {
|
||||
type: 'generic-error' as const,
|
||||
source: 'plugin-system',
|
||||
error: errorObj.message,
|
||||
}
|
||||
return {
|
||||
...prevState,
|
||||
plugins: {
|
||||
...prevState.plugins,
|
||||
enabled: [],
|
||||
disabled: [],
|
||||
commands: [],
|
||||
errors: [...existingLspErrors, newError],
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
enabled_count: 0,
|
||||
disabled_count: 0,
|
||||
inline_count: 0,
|
||||
marketplace_count: 0,
|
||||
error_count: 1,
|
||||
skill_count: 0,
|
||||
agent_count: 0,
|
||||
hook_count: 0,
|
||||
mcp_count: 0,
|
||||
lsp_count: 0,
|
||||
load_failed: true,
|
||||
ant_enabled_names: undefined,
|
||||
}
|
||||
}
|
||||
}, [setAppState, addNotification])
|
||||
|
||||
// Load plugins on mount and emit telemetry
|
||||
useEffect(() => {
|
||||
if (!enabled) return
|
||||
void initialPluginLoad().then(metrics => {
|
||||
const { ant_enabled_names, ...baseMetrics } = metrics
|
||||
const allMetrics = {
|
||||
...baseMetrics,
|
||||
has_custom_plugin_cache_dir: !!process.env.CLAUDE_CODE_PLUGIN_CACHE_DIR,
|
||||
}
|
||||
logEvent('tengu_plugins_loaded', {
|
||||
...allMetrics,
|
||||
...(ant_enabled_names !== undefined && {
|
||||
enabled_names: ant_enabled_names,
|
||||
}),
|
||||
})
|
||||
logForDiagnosticsNoPII('info', 'tengu_plugins_loaded', allMetrics)
|
||||
})
|
||||
}, [initialPluginLoad, enabled])
|
||||
|
||||
// Plugin state changed on disk (background reconcile, /plugin menu,
|
||||
// external settings edit). Show a notification; user runs /reload-plugins
|
||||
// to apply. The previous auto-refresh here had a stale-cache bug (only
|
||||
// cleared loadAllPlugins, downstream memoized loaders returned old data)
|
||||
// and was incomplete (no MCP, no agentDefinitions). /reload-plugins
|
||||
// handles all of that correctly via refreshActivePlugins().
|
||||
useEffect(() => {
|
||||
if (!enabled || !needsRefresh) return
|
||||
addNotification({
|
||||
key: 'plugin-reload-pending',
|
||||
text: 'Plugins changed. Run /reload-plugins to activate.',
|
||||
color: 'suggestion',
|
||||
priority: 'low',
|
||||
})
|
||||
// Do NOT auto-refresh. Do NOT reset needsRefresh — /reload-plugins
|
||||
// consumes it via refreshActivePlugins().
|
||||
}, [enabled, needsRefresh, addNotification])
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
import { useState } from 'react'
|
||||
import { useInterval } from 'usehooks-ts'
|
||||
|
||||
export type MemoryUsageStatus = 'normal' | 'high' | 'critical'
|
||||
|
||||
export type MemoryUsageInfo = {
|
||||
heapUsed: number
|
||||
status: MemoryUsageStatus
|
||||
}
|
||||
|
||||
const HIGH_MEMORY_THRESHOLD = 1.5 * 1024 * 1024 * 1024 // 1.5GB in bytes
|
||||
const CRITICAL_MEMORY_THRESHOLD = 2.5 * 1024 * 1024 * 1024 // 2.5GB in bytes
|
||||
|
||||
/**
|
||||
* Hook to monitor Node.js process memory usage.
|
||||
* Polls every 10 seconds; returns null while status is 'normal'.
|
||||
*/
|
||||
export function useMemoryUsage(): MemoryUsageInfo | null {
|
||||
const [memoryUsage, setMemoryUsage] = useState<MemoryUsageInfo | null>(null)
|
||||
|
||||
useInterval(() => {
|
||||
const heapUsed = process.memoryUsage().heapUsed
|
||||
const status: MemoryUsageStatus =
|
||||
heapUsed >= CRITICAL_MEMORY_THRESHOLD
|
||||
? 'critical'
|
||||
: heapUsed >= HIGH_MEMORY_THRESHOLD
|
||||
? 'high'
|
||||
: 'normal'
|
||||
setMemoryUsage(prev => {
|
||||
// Bail when status is 'normal' — nothing is shown, so heapUsed is
|
||||
// irrelevant and we avoid re-rendering the whole Notifications subtree
|
||||
// every 10 seconds for the 99%+ of users who never reach 1.5GB.
|
||||
if (status === 'normal') return prev === null ? prev : null
|
||||
return { heapUsed, status }
|
||||
})
|
||||
}, 10_000)
|
||||
|
||||
return memoryUsage
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
import uniqBy from 'lodash-es/uniqBy.js'
|
||||
import { useMemo } from 'react'
|
||||
import type { MCPServerConnection } from '../services/mcp/types.js'
|
||||
|
||||
export function mergeClients(
|
||||
initialClients: MCPServerConnection[] | undefined,
|
||||
mcpClients: readonly MCPServerConnection[] | undefined,
|
||||
): MCPServerConnection[] {
|
||||
if (initialClients && mcpClients && mcpClients.length > 0) {
|
||||
return uniqBy([...initialClients, ...mcpClients], 'name')
|
||||
}
|
||||
return initialClients || []
|
||||
}
|
||||
|
||||
export function useMergedClients(
|
||||
initialClients: MCPServerConnection[] | undefined,
|
||||
mcpClients: MCPServerConnection[] | undefined,
|
||||
): MCPServerConnection[] {
|
||||
return useMemo(
|
||||
() => mergeClients(initialClients, mcpClients),
|
||||
[initialClients, mcpClients],
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
import uniqBy from 'lodash-es/uniqBy.js'
|
||||
import { useMemo } from 'react'
|
||||
import type { Command } from '../commands.js'
|
||||
|
||||
export function useMergedCommands(
|
||||
initialCommands: Command[],
|
||||
mcpCommands: Command[],
|
||||
): Command[] {
|
||||
return useMemo(() => {
|
||||
if (mcpCommands.length > 0) {
|
||||
return uniqBy([...initialCommands, ...mcpCommands], 'name')
|
||||
}
|
||||
return initialCommands
|
||||
}, [initialCommands, mcpCommands])
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
// biome-ignore-all assist/source/organizeImports: ANT-ONLY import markers must not be reordered
|
||||
import { useMemo } from 'react'
|
||||
import type { Tools, ToolPermissionContext } from '../Tool.js'
|
||||
import { assembleToolPool } from '../tools.js'
|
||||
import { useAppState } from '../state/AppState.js'
|
||||
import { mergeAndFilterTools } from '../utils/toolPool.js'
|
||||
|
||||
/**
|
||||
* React hook that assembles the full tool pool for the REPL.
|
||||
*
|
||||
* Uses assembleToolPool() (the shared pure function used by both REPL and runAgent)
|
||||
* to combine built-in tools with MCP tools, applying deny rules and deduplication.
|
||||
* Any extra initialTools are merged on top.
|
||||
*
|
||||
* @param initialTools - Extra tools to include (built-in + startup MCP from props).
|
||||
* These are merged with the assembled pool and take precedence in deduplication.
|
||||
* @param mcpTools - MCP tools discovered dynamically (from mcp state)
|
||||
* @param toolPermissionContext - Permission context for filtering
|
||||
*/
|
||||
export function useMergedTools(
|
||||
initialTools: Tools,
|
||||
mcpTools: Tools,
|
||||
toolPermissionContext: ToolPermissionContext,
|
||||
): Tools {
|
||||
let replBridgeEnabled = false
|
||||
let replBridgeOutboundOnly = false
|
||||
return useMemo(() => {
|
||||
// assembleToolPool is the shared function that both REPL and runAgent use.
|
||||
// It handles: getTools() + MCP deny-rule filtering + dedup + MCP CLI exclusion.
|
||||
const assembled = assembleToolPool(toolPermissionContext, mcpTools)
|
||||
|
||||
return mergeAndFilterTools(
|
||||
initialTools,
|
||||
assembled,
|
||||
toolPermissionContext.mode,
|
||||
)
|
||||
}, [
|
||||
initialTools,
|
||||
mcpTools,
|
||||
toolPermissionContext,
|
||||
replBridgeEnabled,
|
||||
replBridgeOutboundOnly,
|
||||
])
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
|
||||
/**
|
||||
* Throttles a value so each distinct value stays visible for at least `minMs`.
|
||||
* Prevents fast-cycling progress text from flickering past before it's readable.
|
||||
*
|
||||
* Unlike debounce (wait for quiet) or throttle (limit rate), this guarantees
|
||||
* each value gets its minimum screen time before being replaced.
|
||||
*/
|
||||
export function useMinDisplayTime<T>(value: T, minMs: number): T {
|
||||
const [displayed, setDisplayed] = useState(value)
|
||||
const lastShownAtRef = useRef(0)
|
||||
|
||||
useEffect(() => {
|
||||
const elapsed = Date.now() - lastShownAtRef.current
|
||||
if (elapsed >= minMs) {
|
||||
lastShownAtRef.current = Date.now()
|
||||
setDisplayed(value)
|
||||
return
|
||||
}
|
||||
const timer = setTimeout(
|
||||
(shownAtRef, setFn, v) => {
|
||||
shownAtRef.current = Date.now()
|
||||
setFn(v)
|
||||
},
|
||||
minMs - elapsed,
|
||||
lastShownAtRef,
|
||||
setDisplayed,
|
||||
value,
|
||||
)
|
||||
return () => clearTimeout(timer)
|
||||
}, [value, minMs])
|
||||
|
||||
return displayed
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
import { useEffect } from 'react'
|
||||
import {
|
||||
getLastInteractionTime,
|
||||
updateLastInteractionTime,
|
||||
} from '../bootstrap/state.js'
|
||||
import { useTerminalNotification } from '../ink/useTerminalNotification.js'
|
||||
import { sendNotification } from '../services/notifier.js'
|
||||
// The time threshold in milliseconds for considering an interaction "recent" (6 seconds)
|
||||
export const DEFAULT_INTERACTION_THRESHOLD_MS = 6000
|
||||
|
||||
function getTimeSinceLastInteraction(): number {
|
||||
return Date.now() - getLastInteractionTime()
|
||||
}
|
||||
|
||||
function hasRecentInteraction(threshold: number): boolean {
|
||||
return getTimeSinceLastInteraction() < threshold
|
||||
}
|
||||
|
||||
function shouldNotify(threshold: number): boolean {
|
||||
return process.env.NODE_ENV !== 'test' && !hasRecentInteraction(threshold)
|
||||
}
|
||||
|
||||
// NOTE: User interaction tracking is now done in App.tsx's processKeysInBatch
|
||||
// function, which calls updateLastInteractionTime() when any input is received.
|
||||
// This avoids having a separate stdin 'data' listener that would compete with
|
||||
// the main 'readable' listener and cause dropped input characters.
|
||||
|
||||
/**
|
||||
* Hook that manages desktop notifications after a timeout period.
|
||||
*
|
||||
* Shows a notification in two cases:
|
||||
* 1. Immediately if the app has been idle for longer than the threshold
|
||||
* 2. After the specified timeout if the user doesn't interact within that time
|
||||
*
|
||||
* @param message - The notification message to display
|
||||
* @param timeout - The timeout in milliseconds (defaults to 6000ms)
|
||||
*/
|
||||
export function useNotifyAfterTimeout(
|
||||
message: string,
|
||||
notificationType: string,
|
||||
): void {
|
||||
const terminal = useTerminalNotification()
|
||||
|
||||
// Reset interaction time when hook is called to make sure that requests
|
||||
// that took a long time to complete don't pop up a notification right away.
|
||||
// Must be immediate because useEffect runs after Ink's render cycle has
|
||||
// already flushed; without it the timestamp stays stale and a premature
|
||||
// notification fires if the user is idle (no subsequent renders to flush).
|
||||
useEffect(() => {
|
||||
updateLastInteractionTime(true)
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
let hasNotified = false
|
||||
const timer = setInterval(() => {
|
||||
if (shouldNotify(DEFAULT_INTERACTION_THRESHOLD_MS) && !hasNotified) {
|
||||
hasNotified = true
|
||||
clearInterval(timer)
|
||||
void sendNotification({ message, notificationType }, terminal)
|
||||
}
|
||||
}, DEFAULT_INTERACTION_THRESHOLD_MS)
|
||||
|
||||
return () => clearInterval(timer)
|
||||
}, [message, notificationType, terminal])
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,285 @@
|
||||
import { basename } from 'path'
|
||||
import React from 'react'
|
||||
import { logError } from 'src/utils/log.js'
|
||||
import { useDebounceCallback } from 'usehooks-ts'
|
||||
import type { InputEvent, Key } from '../ink.js'
|
||||
import {
|
||||
getImageFromClipboard,
|
||||
isImageFilePath,
|
||||
PASTE_THRESHOLD,
|
||||
tryReadImageFromPath,
|
||||
} from '../utils/imagePaste.js'
|
||||
import type { ImageDimensions } from '../utils/imageResizer.js'
|
||||
import { getPlatform } from '../utils/platform.js'
|
||||
|
||||
const CLIPBOARD_CHECK_DEBOUNCE_MS = 50
|
||||
const PASTE_COMPLETION_TIMEOUT_MS = 100
|
||||
|
||||
type PasteHandlerProps = {
|
||||
onPaste?: (text: string) => void
|
||||
onInput: (input: string, key: Key) => void
|
||||
onImagePaste?: (
|
||||
base64Image: string,
|
||||
mediaType?: string,
|
||||
filename?: string,
|
||||
dimensions?: ImageDimensions,
|
||||
sourcePath?: string,
|
||||
) => void
|
||||
}
|
||||
|
||||
export function usePasteHandler({
|
||||
onPaste,
|
||||
onInput,
|
||||
onImagePaste,
|
||||
}: PasteHandlerProps): {
|
||||
wrappedOnInput: (input: string, key: Key, event: InputEvent) => void
|
||||
pasteState: {
|
||||
chunks: string[]
|
||||
timeoutId: ReturnType<typeof setTimeout> | null
|
||||
}
|
||||
isPasting: boolean
|
||||
} {
|
||||
const [pasteState, setPasteState] = React.useState<{
|
||||
chunks: string[]
|
||||
timeoutId: ReturnType<typeof setTimeout> | null
|
||||
}>({ chunks: [], timeoutId: null })
|
||||
const [isPasting, setIsPasting] = React.useState(false)
|
||||
const isMountedRef = React.useRef(true)
|
||||
// Mirrors pasteState.timeoutId but updated synchronously. When paste + a
|
||||
// keystroke arrive in the same stdin chunk, both wrappedOnInput calls run
|
||||
// in the same discreteUpdates batch before React commits — the second call
|
||||
// reads stale pasteState.timeoutId (null) and takes the onInput path. If
|
||||
// that key is Enter, it submits the old input and the paste is lost.
|
||||
const pastePendingRef = React.useRef(false)
|
||||
|
||||
const isMacOS = React.useMemo(() => getPlatform() === 'macos', [])
|
||||
|
||||
React.useEffect(() => {
|
||||
return () => {
|
||||
isMountedRef.current = false
|
||||
}
|
||||
}, [])
|
||||
|
||||
const checkClipboardForImageImpl = React.useCallback(() => {
|
||||
if (!onImagePaste || !isMountedRef.current) return
|
||||
|
||||
void getImageFromClipboard()
|
||||
.then(imageData => {
|
||||
if (imageData && isMountedRef.current) {
|
||||
onImagePaste(
|
||||
imageData.base64,
|
||||
imageData.mediaType,
|
||||
undefined, // no filename for clipboard images
|
||||
imageData.dimensions,
|
||||
)
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
if (isMountedRef.current) {
|
||||
logError(error as Error)
|
||||
}
|
||||
})
|
||||
.finally(() => {
|
||||
if (isMountedRef.current) {
|
||||
setIsPasting(false)
|
||||
}
|
||||
})
|
||||
}, [onImagePaste])
|
||||
|
||||
const checkClipboardForImage = useDebounceCallback(
|
||||
checkClipboardForImageImpl,
|
||||
CLIPBOARD_CHECK_DEBOUNCE_MS,
|
||||
)
|
||||
|
||||
const resetPasteTimeout = React.useCallback(
|
||||
(currentTimeoutId: ReturnType<typeof setTimeout> | null) => {
|
||||
if (currentTimeoutId) {
|
||||
clearTimeout(currentTimeoutId)
|
||||
}
|
||||
return setTimeout(
|
||||
(
|
||||
setPasteState,
|
||||
onImagePaste,
|
||||
onPaste,
|
||||
setIsPasting,
|
||||
checkClipboardForImage,
|
||||
isMacOS,
|
||||
pastePendingRef,
|
||||
) => {
|
||||
pastePendingRef.current = false
|
||||
setPasteState(({ chunks }) => {
|
||||
// Join chunks and filter out orphaned focus sequences
|
||||
// These can appear when focus events split during paste
|
||||
const pastedText = chunks
|
||||
.join('')
|
||||
.replace(/\[I$/, '')
|
||||
.replace(/\[O$/, '')
|
||||
|
||||
// Check if the pasted text contains image file paths
|
||||
// When dragging multiple images, they may come as:
|
||||
// 1. Newline-separated paths (common in some terminals)
|
||||
// 2. Space-separated paths (common when dragging from Finder)
|
||||
// For space-separated paths, we split on spaces that precede absolute paths:
|
||||
// - Unix: space followed by `/` (e.g., `/Users/...`)
|
||||
// - Windows: space followed by drive letter and `:\` (e.g., `C:\Users\...`)
|
||||
// This works because spaces within paths are escaped (e.g., `file\ name.png`)
|
||||
const lines = pastedText
|
||||
.split(/ (?=\/|[A-Za-z]:\\)/)
|
||||
.flatMap(part => part.split('\n'))
|
||||
.filter(line => line.trim())
|
||||
const imagePaths = lines.filter(line => isImageFilePath(line))
|
||||
|
||||
if (onImagePaste && imagePaths.length > 0) {
|
||||
const isTempScreenshot =
|
||||
/\/TemporaryItems\/.*screencaptureui.*\/Screenshot/i.test(
|
||||
pastedText,
|
||||
)
|
||||
|
||||
// Process all image paths
|
||||
void Promise.all(
|
||||
imagePaths.map(imagePath => tryReadImageFromPath(imagePath)),
|
||||
).then(results => {
|
||||
const validImages = results.filter(
|
||||
(r): r is NonNullable<typeof r> => r !== null,
|
||||
)
|
||||
|
||||
if (validImages.length > 0) {
|
||||
// Successfully read at least one image
|
||||
for (const imageData of validImages) {
|
||||
const filename = basename(imageData.path)
|
||||
onImagePaste(
|
||||
imageData.base64,
|
||||
imageData.mediaType,
|
||||
filename,
|
||||
imageData.dimensions,
|
||||
imageData.path,
|
||||
)
|
||||
}
|
||||
// If some paths weren't images, paste them as text
|
||||
const nonImageLines = lines.filter(
|
||||
line => !isImageFilePath(line),
|
||||
)
|
||||
if (nonImageLines.length > 0 && onPaste) {
|
||||
onPaste(nonImageLines.join('\n'))
|
||||
}
|
||||
setIsPasting(false)
|
||||
} else if (isTempScreenshot && isMacOS) {
|
||||
// For temporary screenshot files that no longer exist, try clipboard
|
||||
checkClipboardForImage()
|
||||
} else {
|
||||
if (onPaste) {
|
||||
onPaste(pastedText)
|
||||
}
|
||||
setIsPasting(false)
|
||||
}
|
||||
})
|
||||
return { chunks: [], timeoutId: null }
|
||||
}
|
||||
|
||||
// If paste is empty (common when trying to paste images with Cmd+V),
|
||||
// check if clipboard has an image (macOS only)
|
||||
if (isMacOS && onImagePaste && pastedText.length === 0) {
|
||||
checkClipboardForImage()
|
||||
return { chunks: [], timeoutId: null }
|
||||
}
|
||||
|
||||
// Handle regular paste
|
||||
if (onPaste) {
|
||||
onPaste(pastedText)
|
||||
}
|
||||
// Reset isPasting state after paste is complete
|
||||
setIsPasting(false)
|
||||
return { chunks: [], timeoutId: null }
|
||||
})
|
||||
},
|
||||
PASTE_COMPLETION_TIMEOUT_MS,
|
||||
setPasteState,
|
||||
onImagePaste,
|
||||
onPaste,
|
||||
setIsPasting,
|
||||
checkClipboardForImage,
|
||||
isMacOS,
|
||||
pastePendingRef,
|
||||
)
|
||||
},
|
||||
[checkClipboardForImage, isMacOS, onImagePaste, onPaste],
|
||||
)
|
||||
|
||||
// Paste detection is now done via the InputEvent's keypress.isPasted flag,
|
||||
// which is set by the keypress parser when it detects bracketed paste mode.
|
||||
// This avoids the race condition caused by having multiple listeners on stdin.
|
||||
// Previously, we had a stdin.on('data') listener here which competed with
|
||||
// the 'readable' listener in App.tsx, causing dropped characters.
|
||||
|
||||
const wrappedOnInput = (input: string, key: Key, event: InputEvent): void => {
|
||||
// Detect paste from the parsed keypress event.
|
||||
// The keypress parser sets isPasted=true for content within bracketed paste.
|
||||
const isFromPaste = event.keypress.isPasted
|
||||
|
||||
// If this is pasted content, set isPasting state for UI feedback
|
||||
if (isFromPaste) {
|
||||
setIsPasting(true)
|
||||
}
|
||||
|
||||
// Handle large pastes (>PASTE_THRESHOLD chars)
|
||||
// Usually we get one or two input characters at a time. If we
|
||||
// get more than the threshold, the user has probably pasted.
|
||||
// Unfortunately node batches long pastes, so it's possible
|
||||
// that we would see e.g. 1024 characters and then just a few
|
||||
// more in the next frame that belong with the original paste.
|
||||
// This batching number is not consistent.
|
||||
|
||||
// Handle potential image filenames (even if they're shorter than paste threshold)
|
||||
// When dragging multiple images, they may come as newline-separated or
|
||||
// space-separated paths. Split on spaces preceding absolute paths:
|
||||
// - Unix: ` /` - Windows: ` C:\` etc.
|
||||
const hasImageFilePath = input
|
||||
.split(/ (?=\/|[A-Za-z]:\\)/)
|
||||
.flatMap(part => part.split('\n'))
|
||||
.some(line => isImageFilePath(line.trim()))
|
||||
|
||||
// Handle empty paste (clipboard image on macOS)
|
||||
// When the user pastes an image with Cmd+V, the terminal sends an empty
|
||||
// bracketed paste sequence. The keypress parser emits this as isPasted=true
|
||||
// with empty input.
|
||||
if (isFromPaste && input.length === 0 && isMacOS && onImagePaste) {
|
||||
checkClipboardForImage()
|
||||
// Reset isPasting since there's no text content to process
|
||||
setIsPasting(false)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if we should handle as paste (from bracketed paste, large input, or continuation)
|
||||
const shouldHandleAsPaste =
|
||||
onPaste &&
|
||||
(input.length > PASTE_THRESHOLD ||
|
||||
pastePendingRef.current ||
|
||||
hasImageFilePath ||
|
||||
isFromPaste)
|
||||
|
||||
if (shouldHandleAsPaste) {
|
||||
pastePendingRef.current = true
|
||||
setPasteState(({ chunks, timeoutId }) => {
|
||||
return {
|
||||
chunks: [...chunks, input],
|
||||
timeoutId: resetPasteTimeout(timeoutId),
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
onInput(input, key)
|
||||
if (input.length > 10) {
|
||||
// Ensure that setIsPasting is turned off on any other multicharacter
|
||||
// input, because the stdin buffer may chunk at arbitrary points and split
|
||||
// the closing escape sequence if the input length is too long for the
|
||||
// stdin buffer.
|
||||
setIsPasting(false)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
wrappedOnInput,
|
||||
pasteState,
|
||||
isPasting,
|
||||
}
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,106 @@
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { getLastInteractionTime } from '../bootstrap/state.js'
|
||||
import { fetchPrStatus, type PrReviewState } from '../utils/ghPrStatus.js'
|
||||
|
||||
const POLL_INTERVAL_MS = 60_000
|
||||
const SLOW_GH_THRESHOLD_MS = 4_000
|
||||
const IDLE_STOP_MS = 60 * 60_000 // stop polling after 60 min idle
|
||||
|
||||
export type PrStatusState = {
|
||||
number: number | null
|
||||
url: string | null
|
||||
reviewState: PrReviewState | null
|
||||
lastUpdated: number
|
||||
}
|
||||
|
||||
const INITIAL_STATE: PrStatusState = {
|
||||
number: null,
|
||||
url: null,
|
||||
reviewState: null,
|
||||
lastUpdated: 0,
|
||||
}
|
||||
|
||||
/**
|
||||
* Polls PR review status every 60s while the session is active.
|
||||
* When no interaction is detected for 60 minutes, the loop stops — no
|
||||
* timers remain. React re-runs the effect when isLoading changes
|
||||
* (turn starts/ends), restarting the loop. Effect setup schedules
|
||||
* the next poll relative to the last fetch time so turn boundaries
|
||||
* don't spawn `gh` more than once per interval. Disables permanently
|
||||
* if a fetch exceeds 4s.
|
||||
*
|
||||
* Pass `enabled: false` to skip polling entirely (hook still must be
|
||||
* called unconditionally to satisfy the rules of hooks).
|
||||
*/
|
||||
export function usePrStatus(isLoading: boolean, enabled = true): PrStatusState {
|
||||
const [prStatus, setPrStatus] = useState<PrStatusState>(INITIAL_STATE)
|
||||
const timeoutRef = useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
const disabledRef = useRef(false)
|
||||
const lastFetchRef = useRef(0)
|
||||
|
||||
useEffect(() => {
|
||||
if (!enabled) return
|
||||
if (disabledRef.current) return
|
||||
|
||||
let cancelled = false
|
||||
let lastSeenInteractionTime = -1
|
||||
let lastActivityTimestamp = Date.now()
|
||||
|
||||
async function poll() {
|
||||
if (cancelled) return
|
||||
|
||||
const currentInteractionTime = getLastInteractionTime()
|
||||
if (lastSeenInteractionTime !== currentInteractionTime) {
|
||||
lastSeenInteractionTime = currentInteractionTime
|
||||
lastActivityTimestamp = Date.now()
|
||||
} else if (Date.now() - lastActivityTimestamp >= IDLE_STOP_MS) {
|
||||
return
|
||||
}
|
||||
|
||||
const start = Date.now()
|
||||
const result = await fetchPrStatus()
|
||||
if (cancelled) return
|
||||
lastFetchRef.current = start
|
||||
|
||||
setPrStatus(prev => {
|
||||
const newNumber = result?.number ?? null
|
||||
const newReviewState = result?.reviewState ?? null
|
||||
if (prev.number === newNumber && prev.reviewState === newReviewState) {
|
||||
return prev
|
||||
}
|
||||
return {
|
||||
number: newNumber,
|
||||
url: result?.url ?? null,
|
||||
reviewState: newReviewState,
|
||||
lastUpdated: Date.now(),
|
||||
}
|
||||
})
|
||||
|
||||
if (Date.now() - start > SLOW_GH_THRESHOLD_MS) {
|
||||
disabledRef.current = true
|
||||
return
|
||||
}
|
||||
|
||||
if (!cancelled) {
|
||||
timeoutRef.current = setTimeout(poll, POLL_INTERVAL_MS)
|
||||
}
|
||||
}
|
||||
|
||||
const elapsed = Date.now() - lastFetchRef.current
|
||||
if (elapsed >= POLL_INTERVAL_MS) {
|
||||
void poll()
|
||||
} else {
|
||||
timeoutRef.current = setTimeout(poll, POLL_INTERVAL_MS - elapsed)
|
||||
}
|
||||
|
||||
return () => {
|
||||
cancelled = true
|
||||
if (timeoutRef.current) {
|
||||
clearTimeout(timeoutRef.current)
|
||||
timeoutRef.current = null
|
||||
}
|
||||
}
|
||||
}, [isLoading, enabled])
|
||||
|
||||
return prStatus
|
||||
}
|
||||
@@ -0,0 +1,177 @@
|
||||
import { useCallback, useRef } from 'react'
|
||||
import { useTerminalFocus } from '../ink/hooks/use-terminal-focus.js'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
logEvent,
|
||||
} from '../services/analytics/index.js'
|
||||
import { abortSpeculation } from '../services/PromptSuggestion/speculation.js'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
|
||||
type Props = {
|
||||
inputValue: string
|
||||
isAssistantResponding: boolean
|
||||
}
|
||||
|
||||
export function usePromptSuggestion({
|
||||
inputValue,
|
||||
isAssistantResponding,
|
||||
}: Props): {
|
||||
suggestion: string | null
|
||||
markAccepted: () => void
|
||||
markShown: () => void
|
||||
logOutcomeAtSubmission: (
|
||||
finalInput: string,
|
||||
opts?: { skipReset: boolean },
|
||||
) => void
|
||||
} {
|
||||
const promptSuggestion = useAppState(s => s.promptSuggestion)
|
||||
const setAppState = useSetAppState()
|
||||
const isTerminalFocused = useTerminalFocus()
|
||||
const {
|
||||
text: suggestionText,
|
||||
promptId,
|
||||
shownAt,
|
||||
acceptedAt,
|
||||
generationRequestId,
|
||||
} = promptSuggestion
|
||||
|
||||
const suggestion =
|
||||
isAssistantResponding || inputValue.length > 0 ? null : suggestionText
|
||||
|
||||
const isValidSuggestion = suggestionText && shownAt > 0
|
||||
|
||||
// Track engagement depth for telemetry
|
||||
const firstKeystrokeAt = useRef<number>(0)
|
||||
const wasFocusedWhenShown = useRef<boolean>(true)
|
||||
const prevShownAt = useRef<number>(0)
|
||||
|
||||
// Capture focus state when a new suggestion appears (shownAt changes)
|
||||
if (shownAt > 0 && shownAt !== prevShownAt.current) {
|
||||
prevShownAt.current = shownAt
|
||||
wasFocusedWhenShown.current = isTerminalFocused
|
||||
firstKeystrokeAt.current = 0
|
||||
} else if (shownAt === 0) {
|
||||
prevShownAt.current = 0
|
||||
}
|
||||
|
||||
// Record first keystroke while suggestion is visible
|
||||
if (
|
||||
inputValue.length > 0 &&
|
||||
firstKeystrokeAt.current === 0 &&
|
||||
isValidSuggestion
|
||||
) {
|
||||
firstKeystrokeAt.current = Date.now()
|
||||
}
|
||||
|
||||
const resetSuggestion = useCallback(() => {
|
||||
abortSpeculation(setAppState)
|
||||
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
promptSuggestion: {
|
||||
text: null,
|
||||
promptId: null,
|
||||
shownAt: 0,
|
||||
acceptedAt: 0,
|
||||
generationRequestId: null,
|
||||
},
|
||||
}))
|
||||
}, [setAppState])
|
||||
|
||||
const markAccepted = useCallback(() => {
|
||||
if (!isValidSuggestion) return
|
||||
setAppState(prev => ({
|
||||
...prev,
|
||||
promptSuggestion: {
|
||||
...prev.promptSuggestion,
|
||||
acceptedAt: Date.now(),
|
||||
},
|
||||
}))
|
||||
}, [isValidSuggestion, setAppState])
|
||||
|
||||
const markShown = useCallback(() => {
|
||||
// Check shownAt inside setAppState callback to avoid depending on it
|
||||
// (depending on shownAt causes infinite loop when this callback is called)
|
||||
setAppState(prev => {
|
||||
// Only mark shown if not already shown and suggestion exists
|
||||
if (prev.promptSuggestion.shownAt !== 0 || !prev.promptSuggestion.text) {
|
||||
return prev
|
||||
}
|
||||
return {
|
||||
...prev,
|
||||
promptSuggestion: {
|
||||
...prev.promptSuggestion,
|
||||
shownAt: Date.now(),
|
||||
},
|
||||
}
|
||||
})
|
||||
}, [setAppState])
|
||||
|
||||
const logOutcomeAtSubmission = useCallback(
|
||||
(finalInput: string, opts?: { skipReset: boolean }) => {
|
||||
if (!isValidSuggestion) return
|
||||
|
||||
// Determine if accepted: either Tab was pressed (acceptedAt set) OR
|
||||
// final input matches suggestion (empty Enter case)
|
||||
const tabWasPressed = acceptedAt > shownAt
|
||||
const wasAccepted = tabWasPressed || finalInput === suggestionText
|
||||
const timeMs = wasAccepted ? acceptedAt || Date.now() : Date.now()
|
||||
|
||||
logEvent('tengu_prompt_suggestion', {
|
||||
source:
|
||||
'cli' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
outcome: (wasAccepted
|
||||
? 'accepted'
|
||||
: 'ignored') as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
prompt_id:
|
||||
promptId as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
...(generationRequestId && {
|
||||
generationRequestId:
|
||||
generationRequestId as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
}),
|
||||
...(wasAccepted && {
|
||||
acceptMethod: (tabWasPressed
|
||||
? 'tab'
|
||||
: 'enter') as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
}),
|
||||
...(wasAccepted && {
|
||||
timeToAcceptMs: timeMs - shownAt,
|
||||
}),
|
||||
...(!wasAccepted && {
|
||||
timeToIgnoreMs: timeMs - shownAt,
|
||||
}),
|
||||
...(firstKeystrokeAt.current > 0 && {
|
||||
timeToFirstKeystrokeMs: firstKeystrokeAt.current - shownAt,
|
||||
}),
|
||||
wasFocusedWhenShown: wasFocusedWhenShown.current,
|
||||
similarity:
|
||||
Math.round(
|
||||
(finalInput.length / (suggestionText?.length || 1)) * 100,
|
||||
) / 100,
|
||||
...(process.env.USER_TYPE === 'ant' && {
|
||||
suggestion:
|
||||
suggestionText as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
userInput:
|
||||
finalInput as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
}),
|
||||
})
|
||||
if (!opts?.skipReset) resetSuggestion()
|
||||
},
|
||||
[
|
||||
isValidSuggestion,
|
||||
acceptedAt,
|
||||
shownAt,
|
||||
suggestionText,
|
||||
promptId,
|
||||
generationRequestId,
|
||||
resetSuggestion,
|
||||
],
|
||||
)
|
||||
|
||||
return {
|
||||
suggestion,
|
||||
markAccepted,
|
||||
markShown,
|
||||
logOutcomeAtSubmission,
|
||||
}
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,68 @@
|
||||
import { useEffect, useSyncExternalStore } from 'react'
|
||||
import type { QueuedCommand } from '../types/textInputTypes.js'
|
||||
import {
|
||||
getCommandQueueSnapshot,
|
||||
subscribeToCommandQueue,
|
||||
} from '../utils/messageQueueManager.js'
|
||||
import type { QueryGuard } from '../utils/QueryGuard.js'
|
||||
import { processQueueIfReady } from '../utils/queueProcessor.js'
|
||||
|
||||
type UseQueueProcessorParams = {
|
||||
executeQueuedInput: (commands: QueuedCommand[]) => Promise<void>
|
||||
hasActiveLocalJsxUI: boolean
|
||||
queryGuard: QueryGuard
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook that processes queued commands when conditions are met.
|
||||
*
|
||||
* Uses a single unified command queue (module-level store). Priority determines
|
||||
* processing order: 'now' > 'next' (user input) > 'later' (task notifications).
|
||||
* The dequeue() function handles priority ordering automatically.
|
||||
*
|
||||
* Processing triggers when:
|
||||
* - No query active (queryGuard — reactive via useSyncExternalStore)
|
||||
* - Queue has items
|
||||
* - No active local JSX UI blocking input
|
||||
*/
|
||||
export function useQueueProcessor({
|
||||
executeQueuedInput,
|
||||
hasActiveLocalJsxUI,
|
||||
queryGuard,
|
||||
}: UseQueueProcessorParams): void {
|
||||
// Subscribe to the query guard. Re-renders when a query starts or ends
|
||||
// (or when reserve/cancelReservation transitions dispatching state).
|
||||
const isQueryActive = useSyncExternalStore(
|
||||
queryGuard.subscribe,
|
||||
queryGuard.getSnapshot,
|
||||
)
|
||||
|
||||
// Subscribe to the unified command queue via useSyncExternalStore.
|
||||
// This guarantees re-render when the store changes, bypassing
|
||||
// React context propagation delays that cause missed notifications in Ink.
|
||||
const queueSnapshot = useSyncExternalStore(
|
||||
subscribeToCommandQueue,
|
||||
getCommandQueueSnapshot,
|
||||
)
|
||||
|
||||
useEffect(() => {
|
||||
if (isQueryActive) return
|
||||
if (hasActiveLocalJsxUI) return
|
||||
if (queueSnapshot.length === 0) return
|
||||
|
||||
// Reservation is now owned by handlePromptSubmit (inside executeUserInput's
|
||||
// try block). The sync chain executeQueuedInput → handlePromptSubmit →
|
||||
// executeUserInput → queryGuard.reserve() runs before the first real await,
|
||||
// so by the time React re-runs this effect (due to the dequeue-triggered
|
||||
// snapshot change), isQueryActive is already true (dispatching) and the
|
||||
// guard above returns early. handlePromptSubmit's finally releases the
|
||||
// reservation via cancelReservation() (no-op if onQuery already ran end()).
|
||||
processQueueIfReady({ executeInput: executeQueuedInput })
|
||||
}, [
|
||||
queueSnapshot,
|
||||
isQueryActive,
|
||||
executeQueuedInput,
|
||||
hasActiveLocalJsxUI,
|
||||
queryGuard,
|
||||
])
|
||||
}
|
||||
@@ -0,0 +1,605 @@
|
||||
import { useCallback, useEffect, useMemo, useRef } from 'react'
|
||||
import { BoundedUUIDSet } from '../bridge/bridgeMessaging.js'
|
||||
import type { ToolUseConfirm } from '../components/permissions/PermissionRequest.js'
|
||||
import type { SpinnerMode } from '../components/Spinner/types.js'
|
||||
import {
|
||||
type RemotePermissionResponse,
|
||||
type RemoteSessionConfig,
|
||||
RemoteSessionManager,
|
||||
} from '../remote/RemoteSessionManager.js'
|
||||
import {
|
||||
createSyntheticAssistantMessage,
|
||||
createToolStub,
|
||||
} from '../remote/remotePermissionBridge.js'
|
||||
import {
|
||||
convertSDKMessage,
|
||||
isSessionEndMessage,
|
||||
} from '../remote/sdkMessageAdapter.js'
|
||||
import { useSetAppState } from '../state/AppState.js'
|
||||
import type { AppState } from '../state/AppStateStore.js'
|
||||
import type { Tool } from '../Tool.js'
|
||||
import { findToolByName } from '../Tool.js'
|
||||
import type { Message as MessageType } from '../types/message.js'
|
||||
import type { PermissionAskDecision } from '../types/permissions.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { truncateToWidth } from '../utils/format.js'
|
||||
import {
|
||||
createSystemMessage,
|
||||
extractTextContent,
|
||||
handleMessageFromStream,
|
||||
type StreamingToolUse,
|
||||
} from '../utils/messages.js'
|
||||
import { generateSessionTitle } from '../utils/sessionTitle.js'
|
||||
import type { RemoteMessageContent } from '../utils/teleport/api.js'
|
||||
import { updateSessionTitle } from '../utils/teleport/api.js'
|
||||
|
||||
// How long to wait for a response before showing a warning
|
||||
const RESPONSE_TIMEOUT_MS = 60000 // 60 seconds
|
||||
// Extended timeout during compaction — compact API calls take 5-30s and
|
||||
// block other SDK messages, so the normal 60s timeout isn't enough when
|
||||
// compaction itself runs close to the edge.
|
||||
const COMPACTION_TIMEOUT_MS = 180000 // 3 minutes
|
||||
|
||||
type UseRemoteSessionProps = {
|
||||
config: RemoteSessionConfig | undefined
|
||||
setMessages: React.Dispatch<React.SetStateAction<MessageType[]>>
|
||||
setIsLoading: (loading: boolean) => void
|
||||
onInit?: (slashCommands: string[]) => void
|
||||
setToolUseConfirmQueue: React.Dispatch<React.SetStateAction<ToolUseConfirm[]>>
|
||||
tools: Tool[]
|
||||
setStreamingToolUses?: React.Dispatch<
|
||||
React.SetStateAction<StreamingToolUse[]>
|
||||
>
|
||||
setStreamMode?: React.Dispatch<React.SetStateAction<SpinnerMode>>
|
||||
setInProgressToolUseIDs?: (f: (prev: Set<string>) => Set<string>) => void
|
||||
}
|
||||
|
||||
type UseRemoteSessionResult = {
|
||||
isRemoteMode: boolean
|
||||
sendMessage: (
|
||||
content: RemoteMessageContent,
|
||||
opts?: { uuid?: string },
|
||||
) => Promise<boolean>
|
||||
cancelRequest: () => void
|
||||
disconnect: () => void
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook for managing a remote CCR session in the REPL.
|
||||
*
|
||||
* Handles:
|
||||
* - WebSocket connection to CCR
|
||||
* - Converting SDK messages to REPL messages
|
||||
* - Sending user input to CCR via HTTP POST
|
||||
* - Permission request/response flow via existing ToolUseConfirm queue
|
||||
*/
|
||||
export function useRemoteSession({
|
||||
config,
|
||||
setMessages,
|
||||
setIsLoading,
|
||||
onInit,
|
||||
setToolUseConfirmQueue,
|
||||
tools,
|
||||
setStreamingToolUses,
|
||||
setStreamMode,
|
||||
setInProgressToolUseIDs,
|
||||
}: UseRemoteSessionProps): UseRemoteSessionResult {
|
||||
const isRemoteMode = !!config
|
||||
|
||||
const setAppState = useSetAppState()
|
||||
const setConnStatus = useCallback(
|
||||
(s: AppState['remoteConnectionStatus']) =>
|
||||
setAppState(prev =>
|
||||
prev.remoteConnectionStatus === s
|
||||
? prev
|
||||
: { ...prev, remoteConnectionStatus: s },
|
||||
),
|
||||
[setAppState],
|
||||
)
|
||||
|
||||
// Event-sourced count of subagents running inside the remote daemon child.
|
||||
// The viewer's own AppState.tasks is empty — tasks live in a different
|
||||
// process. task_started/task_notification reach us via the bridge WS.
|
||||
const runningTaskIdsRef = useRef(new Set<string>())
|
||||
const writeTaskCount = useCallback(() => {
|
||||
const n = runningTaskIdsRef.current.size
|
||||
setAppState(prev =>
|
||||
prev.remoteBackgroundTaskCount === n
|
||||
? prev
|
||||
: { ...prev, remoteBackgroundTaskCount: n },
|
||||
)
|
||||
}, [setAppState])
|
||||
|
||||
// Timer for detecting stuck sessions
|
||||
const responseTimeoutRef = useRef<NodeJS.Timeout | null>(null)
|
||||
|
||||
// Track whether the remote session is compacting. During compaction the
|
||||
// CLI worker is busy with an API call and won't emit messages for a while;
|
||||
// use a longer timeout and suppress spurious "unresponsive" warnings.
|
||||
const isCompactingRef = useRef(false)
|
||||
|
||||
const managerRef = useRef<RemoteSessionManager | null>(null)
|
||||
|
||||
// Track whether we've already updated the session title (for no-initial-prompt sessions)
|
||||
const hasUpdatedTitleRef = useRef(false)
|
||||
|
||||
// UUIDs of user messages we POSTed locally — the WS echoes them back and
|
||||
// we must filter them out when convertUserTextMessages is on, or the viewer
|
||||
// sees every typed message twice (once from local createUserMessage, once
|
||||
// from the echo). A single POST can echo MULTIPLE times with the same uuid:
|
||||
// the server may broadcast the POST directly to /subscribe, AND the worker
|
||||
// (cowork desktop / CLI daemon) echoes it again on its write path. A
|
||||
// delete-on-first-match Set would let the second echo through — use a
|
||||
// bounded ring instead. Cap is generous: users don't type 50 messages
|
||||
// faster than echoes arrive.
|
||||
// NOTE: this does NOT dedup history-vs-live overlap at attach time (nothing
|
||||
// seeds the set from history UUIDs; only sendMessage populates it).
|
||||
const sentUUIDsRef = useRef(new BoundedUUIDSet(50))
|
||||
|
||||
// Keep a ref to tools so the WebSocket callback doesn't go stale
|
||||
const toolsRef = useRef(tools)
|
||||
useEffect(() => {
|
||||
toolsRef.current = tools
|
||||
}, [tools])
|
||||
|
||||
// Initialize and connect to remote session
|
||||
useEffect(() => {
|
||||
// Skip if not in remote mode
|
||||
if (!config) {
|
||||
return
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[useRemoteSession] Initializing for session ${config.sessionId}`,
|
||||
)
|
||||
|
||||
const manager = new RemoteSessionManager(config, {
|
||||
onMessage: sdkMessage => {
|
||||
const parts = [`type=${sdkMessage.type}`]
|
||||
if ('subtype' in sdkMessage) parts.push(`subtype=${sdkMessage.subtype}`)
|
||||
if (sdkMessage.type === 'user') {
|
||||
const c = sdkMessage.message?.content
|
||||
parts.push(
|
||||
`content=${Array.isArray(c) ? c.map(b => b.type).join(',') : typeof c}`,
|
||||
)
|
||||
}
|
||||
logForDebugging(`[useRemoteSession] Received ${parts.join(' ')}`)
|
||||
|
||||
// Clear response timeout on any message received — including the WS
|
||||
// echo of our own POST, which acts as a heartbeat. This must run
|
||||
// BEFORE the echo filter, or slow-to-stream agents (compaction, cold
|
||||
// start) spuriously trip the 60s unresponsive warning + reconnect.
|
||||
if (responseTimeoutRef.current) {
|
||||
clearTimeout(responseTimeoutRef.current)
|
||||
responseTimeoutRef.current = null
|
||||
}
|
||||
|
||||
// Echo filter: drop user messages we already added locally before POST.
|
||||
// The server and/or worker round-trip our own send back on the WS with
|
||||
// the same uuid we passed to sendEventToRemoteSession. DO NOT delete on
|
||||
// match — the same uuid can echo more than once (server broadcast +
|
||||
// worker echo), and BoundedUUIDSet already caps growth via its ring.
|
||||
if (
|
||||
sdkMessage.type === 'user' &&
|
||||
sdkMessage.uuid &&
|
||||
sentUUIDsRef.current.has(sdkMessage.uuid)
|
||||
) {
|
||||
logForDebugging(
|
||||
`[useRemoteSession] Dropping echoed user message ${sdkMessage.uuid}`,
|
||||
)
|
||||
return
|
||||
}
|
||||
// Handle init message - extract available slash commands
|
||||
if (
|
||||
sdkMessage.type === 'system' &&
|
||||
sdkMessage.subtype === 'init' &&
|
||||
onInit
|
||||
) {
|
||||
logForDebugging(
|
||||
`[useRemoteSession] Init received with ${sdkMessage.slash_commands.length} slash commands`,
|
||||
)
|
||||
onInit(sdkMessage.slash_commands)
|
||||
}
|
||||
|
||||
// Track remote subagent lifecycle for the "N in background" counter.
|
||||
// All task types (Agent/teammate/workflow/bash) flow through
|
||||
// registerTask() → task_started, and complete via task_notification.
|
||||
// Return early — these are status signals, not renderable messages.
|
||||
if (sdkMessage.type === 'system') {
|
||||
if (sdkMessage.subtype === 'task_started') {
|
||||
runningTaskIdsRef.current.add(sdkMessage.task_id)
|
||||
writeTaskCount()
|
||||
return
|
||||
}
|
||||
if (sdkMessage.subtype === 'task_notification') {
|
||||
runningTaskIdsRef.current.delete(sdkMessage.task_id)
|
||||
writeTaskCount()
|
||||
return
|
||||
}
|
||||
if (sdkMessage.subtype === 'task_progress') {
|
||||
return
|
||||
}
|
||||
// Track compaction state. The CLI emits status='compacting' at
|
||||
// the start and status=null when done; compact_boundary also
|
||||
// signals completion. Repeated 'compacting' status messages
|
||||
// (keep-alive ticks) update the ref but don't append to messages.
|
||||
if (sdkMessage.subtype === 'status') {
|
||||
const wasCompacting = isCompactingRef.current
|
||||
isCompactingRef.current = sdkMessage.status === 'compacting'
|
||||
if (wasCompacting && isCompactingRef.current) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if (sdkMessage.subtype === 'compact_boundary') {
|
||||
isCompactingRef.current = false
|
||||
}
|
||||
}
|
||||
|
||||
// Check if session ended
|
||||
if (isSessionEndMessage(sdkMessage)) {
|
||||
isCompactingRef.current = false
|
||||
setIsLoading(false)
|
||||
}
|
||||
|
||||
// Clear in-progress tool_use IDs when their tool_result arrives.
|
||||
// Must read the RAW sdkMessage: in non-viewerOnly mode,
|
||||
// convertSDKMessage returns {type:'ignored'} for user messages, so the
|
||||
// delete would never fire post-conversion. Mirrors the add site below
|
||||
// and inProcessRunner.ts; without this the set grows unbounded for the
|
||||
// session lifetime (BQ: CCR cohort shows 5.2x higher RSS slope).
|
||||
if (setInProgressToolUseIDs && sdkMessage.type === 'user') {
|
||||
const content = sdkMessage.message?.content
|
||||
if (Array.isArray(content)) {
|
||||
const resultIds: string[] = []
|
||||
for (const block of content) {
|
||||
if (block.type === 'tool_result') {
|
||||
resultIds.push(block.tool_use_id)
|
||||
}
|
||||
}
|
||||
if (resultIds.length > 0) {
|
||||
setInProgressToolUseIDs(prev => {
|
||||
const next = new Set(prev)
|
||||
for (const id of resultIds) next.delete(id)
|
||||
return next.size === prev.size ? prev : next
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert SDK message to REPL message. In viewerOnly mode, the
|
||||
// remote agent runs BriefTool (SendUserMessage) — its tool_use block
|
||||
// renders empty (userFacingName() === ''), actual content is in the
|
||||
// tool_result. So we must convert tool_results to render them.
|
||||
const converted = convertSDKMessage(
|
||||
sdkMessage,
|
||||
config.viewerOnly
|
||||
? { convertToolResults: true, convertUserTextMessages: true }
|
||||
: undefined,
|
||||
)
|
||||
|
||||
if (converted.type === 'message') {
|
||||
// When we receive a complete message, clear streaming tool uses
|
||||
// since the complete message replaces the partial streaming state
|
||||
setStreamingToolUses?.(prev => (prev.length > 0 ? [] : prev))
|
||||
|
||||
// Mark tool_use blocks as in-progress so the UI shows the correct
|
||||
// spinner state instead of "Waiting…" (queued). In local sessions,
|
||||
// toolOrchestration.ts handles this, but remote sessions receive
|
||||
// pre-built assistant messages without running local tool execution.
|
||||
if (
|
||||
setInProgressToolUseIDs &&
|
||||
converted.message.type === 'assistant'
|
||||
) {
|
||||
const toolUseIds = converted.message.message.content
|
||||
.filter(block => block.type === 'tool_use')
|
||||
.map(block => block.id)
|
||||
if (toolUseIds.length > 0) {
|
||||
setInProgressToolUseIDs(prev => {
|
||||
const next = new Set(prev)
|
||||
for (const id of toolUseIds) {
|
||||
next.add(id)
|
||||
}
|
||||
return next
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
setMessages(prev => [...prev, converted.message])
|
||||
// Note: Don't stop loading on assistant messages - the agent may still be
|
||||
// working (tool use loops). Loading stops only on session end or permission request.
|
||||
} else if (converted.type === 'stream_event') {
|
||||
// Process streaming events to update UI in real-time
|
||||
if (setStreamingToolUses && setStreamMode) {
|
||||
handleMessageFromStream(
|
||||
converted.event,
|
||||
message => setMessages(prev => [...prev, message]),
|
||||
() => {
|
||||
// No-op for response length - remote sessions don't track this
|
||||
},
|
||||
setStreamMode,
|
||||
setStreamingToolUses,
|
||||
)
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[useRemoteSession] Stream event received but streaming callbacks not provided`,
|
||||
)
|
||||
}
|
||||
}
|
||||
// 'ignored' messages are silently dropped
|
||||
},
|
||||
onPermissionRequest: (request, requestId) => {
|
||||
logForDebugging(
|
||||
`[useRemoteSession] Permission request for tool: ${request.tool_name}`,
|
||||
)
|
||||
|
||||
// Look up the Tool object by name, or create a stub for unknown tools
|
||||
const tool =
|
||||
findToolByName(toolsRef.current, request.tool_name) ??
|
||||
createToolStub(request.tool_name)
|
||||
|
||||
const syntheticMessage = createSyntheticAssistantMessage(
|
||||
request,
|
||||
requestId,
|
||||
)
|
||||
|
||||
const permissionResult: PermissionAskDecision = {
|
||||
behavior: 'ask',
|
||||
message:
|
||||
request.description ?? `${request.tool_name} requires permission`,
|
||||
suggestions: request.permission_suggestions,
|
||||
blockedPath: request.blocked_path,
|
||||
}
|
||||
|
||||
const toolUseConfirm: ToolUseConfirm = {
|
||||
assistantMessage: syntheticMessage,
|
||||
tool,
|
||||
description:
|
||||
request.description ?? `${request.tool_name} requires permission`,
|
||||
input: request.input,
|
||||
toolUseContext: {} as ToolUseConfirm['toolUseContext'],
|
||||
toolUseID: request.tool_use_id,
|
||||
permissionResult,
|
||||
permissionPromptStartTimeMs: Date.now(),
|
||||
onUserInteraction() {
|
||||
// No-op for remote — classifier runs on the container
|
||||
},
|
||||
onAbort() {
|
||||
const response: RemotePermissionResponse = {
|
||||
behavior: 'deny',
|
||||
message: 'User aborted',
|
||||
}
|
||||
manager.respondToPermissionRequest(requestId, response)
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.filter(item => item.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
},
|
||||
onAllow(updatedInput, _permissionUpdates, _feedback) {
|
||||
const response: RemotePermissionResponse = {
|
||||
behavior: 'allow',
|
||||
updatedInput,
|
||||
}
|
||||
manager.respondToPermissionRequest(requestId, response)
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.filter(item => item.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
// Resume loading indicator after approving
|
||||
setIsLoading(true)
|
||||
},
|
||||
onReject(feedback?: string) {
|
||||
const response: RemotePermissionResponse = {
|
||||
behavior: 'deny',
|
||||
message: feedback ?? 'User denied permission',
|
||||
}
|
||||
manager.respondToPermissionRequest(requestId, response)
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.filter(item => item.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
},
|
||||
async recheckPermission() {
|
||||
// No-op for remote — permission state is on the container
|
||||
},
|
||||
}
|
||||
|
||||
setToolUseConfirmQueue(queue => [...queue, toolUseConfirm])
|
||||
// Pause loading indicator while waiting for permission
|
||||
setIsLoading(false)
|
||||
},
|
||||
onPermissionCancelled: (requestId, toolUseId) => {
|
||||
logForDebugging(
|
||||
`[useRemoteSession] Permission request cancelled: ${requestId}`,
|
||||
)
|
||||
const idToRemove = toolUseId ?? requestId
|
||||
setToolUseConfirmQueue(queue =>
|
||||
queue.filter(item => item.toolUseID !== idToRemove),
|
||||
)
|
||||
setIsLoading(true)
|
||||
},
|
||||
onConnected: () => {
|
||||
logForDebugging('[useRemoteSession] Connected')
|
||||
setConnStatus('connected')
|
||||
},
|
||||
onReconnecting: () => {
|
||||
logForDebugging('[useRemoteSession] Reconnecting')
|
||||
setConnStatus('reconnecting')
|
||||
// WS gap = we may miss task_notification events. Clear rather than
|
||||
// drift high forever. Undercounts tasks that span the gap; accepted.
|
||||
runningTaskIdsRef.current.clear()
|
||||
writeTaskCount()
|
||||
// Same for tool_use IDs: missed tool_result during the gap would
|
||||
// leave stale spinner state forever.
|
||||
setInProgressToolUseIDs?.(prev => (prev.size > 0 ? new Set() : prev))
|
||||
},
|
||||
onDisconnected: () => {
|
||||
logForDebugging('[useRemoteSession] Disconnected')
|
||||
setConnStatus('disconnected')
|
||||
setIsLoading(false)
|
||||
runningTaskIdsRef.current.clear()
|
||||
writeTaskCount()
|
||||
setInProgressToolUseIDs?.(prev => (prev.size > 0 ? new Set() : prev))
|
||||
},
|
||||
onError: error => {
|
||||
logForDebugging(`[useRemoteSession] Error: ${error.message}`)
|
||||
},
|
||||
})
|
||||
|
||||
managerRef.current = manager
|
||||
manager.connect()
|
||||
|
||||
return () => {
|
||||
logForDebugging('[useRemoteSession] Cleanup - disconnecting')
|
||||
// Clear any pending timeout
|
||||
if (responseTimeoutRef.current) {
|
||||
clearTimeout(responseTimeoutRef.current)
|
||||
responseTimeoutRef.current = null
|
||||
}
|
||||
manager.disconnect()
|
||||
managerRef.current = null
|
||||
}
|
||||
}, [
|
||||
config,
|
||||
setMessages,
|
||||
setIsLoading,
|
||||
onInit,
|
||||
setToolUseConfirmQueue,
|
||||
setStreamingToolUses,
|
||||
setStreamMode,
|
||||
setInProgressToolUseIDs,
|
||||
setConnStatus,
|
||||
writeTaskCount,
|
||||
])
|
||||
|
||||
// Send a user message to the remote session
|
||||
const sendMessage = useCallback(
|
||||
async (
|
||||
content: RemoteMessageContent,
|
||||
opts?: { uuid?: string },
|
||||
): Promise<boolean> => {
|
||||
const manager = managerRef.current
|
||||
if (!manager) {
|
||||
logForDebugging('[useRemoteSession] Cannot send - no manager')
|
||||
return false
|
||||
}
|
||||
|
||||
// Clear any existing timeout
|
||||
if (responseTimeoutRef.current) {
|
||||
clearTimeout(responseTimeoutRef.current)
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
|
||||
// Track locally-added message UUIDs so the WS echo can be filtered.
|
||||
// Must record BEFORE the POST to close the race where the echo arrives
|
||||
// before the POST promise resolves.
|
||||
if (opts?.uuid) sentUUIDsRef.current.add(opts.uuid)
|
||||
|
||||
const success = await manager.sendMessage(content, opts)
|
||||
|
||||
if (!success) {
|
||||
// No need to undo the pre-POST add — BoundedUUIDSet's ring evicts it.
|
||||
setIsLoading(false)
|
||||
return false
|
||||
}
|
||||
|
||||
// Update the session title after the first message when no initial prompt was provided.
|
||||
// This gives the session a meaningful title on claude.ai instead of "Background task".
|
||||
// Skip in viewerOnly mode — the remote agent owns the session title.
|
||||
if (
|
||||
!hasUpdatedTitleRef.current &&
|
||||
config &&
|
||||
!config.hasInitialPrompt &&
|
||||
!config.viewerOnly
|
||||
) {
|
||||
hasUpdatedTitleRef.current = true
|
||||
const sessionId = config.sessionId
|
||||
// Extract plain text from content (may be string or content block array)
|
||||
const description =
|
||||
typeof content === 'string'
|
||||
? content
|
||||
: extractTextContent(content, ' ')
|
||||
if (description) {
|
||||
// generateSessionTitle never rejects (wraps body in try/catch,
|
||||
// returns null on failure), so no .catch needed on this chain.
|
||||
void generateSessionTitle(
|
||||
description,
|
||||
new AbortController().signal,
|
||||
).then(title => {
|
||||
void updateSessionTitle(
|
||||
sessionId,
|
||||
title ?? truncateToWidth(description, 75),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Start timeout to detect stuck sessions. Skip in viewerOnly mode —
|
||||
// the remote agent may be idle-shut and take >60s to respawn.
|
||||
// Use a longer timeout when the remote session is compacting, since
|
||||
// the CLI worker is busy with an API call and won't emit messages.
|
||||
if (!config?.viewerOnly) {
|
||||
const timeoutMs = isCompactingRef.current
|
||||
? COMPACTION_TIMEOUT_MS
|
||||
: RESPONSE_TIMEOUT_MS
|
||||
responseTimeoutRef.current = setTimeout(
|
||||
(setMessages, manager) => {
|
||||
logForDebugging(
|
||||
'[useRemoteSession] Response timeout - attempting reconnect',
|
||||
)
|
||||
// Add a warning message to the conversation
|
||||
const warningMessage = createSystemMessage(
|
||||
'Remote session may be unresponsive. Attempting to reconnect…',
|
||||
'warning',
|
||||
)
|
||||
setMessages(prev => [...prev, warningMessage])
|
||||
|
||||
// Attempt to reconnect the WebSocket - the subscription may have become stale
|
||||
manager.reconnect()
|
||||
},
|
||||
timeoutMs,
|
||||
setMessages,
|
||||
manager,
|
||||
)
|
||||
}
|
||||
|
||||
return success
|
||||
},
|
||||
[config, setIsLoading, setMessages],
|
||||
)
|
||||
|
||||
// Cancel the current request on the remote session
|
||||
const cancelRequest = useCallback(() => {
|
||||
// Clear any pending timeout
|
||||
if (responseTimeoutRef.current) {
|
||||
clearTimeout(responseTimeoutRef.current)
|
||||
responseTimeoutRef.current = null
|
||||
}
|
||||
|
||||
// Send interrupt signal to CCR. Skip in viewerOnly mode — Ctrl+C
|
||||
// should never interrupt the remote agent.
|
||||
if (!config?.viewerOnly) {
|
||||
managerRef.current?.cancelSession()
|
||||
}
|
||||
|
||||
setIsLoading(false)
|
||||
}, [config, setIsLoading])
|
||||
|
||||
// Disconnect from the session
|
||||
const disconnect = useCallback(() => {
|
||||
// Clear any pending timeout
|
||||
if (responseTimeoutRef.current) {
|
||||
clearTimeout(responseTimeoutRef.current)
|
||||
responseTimeoutRef.current = null
|
||||
}
|
||||
managerRef.current?.disconnect()
|
||||
managerRef.current = null
|
||||
}, [])
|
||||
|
||||
// All four fields are already stable (boolean derived from a prop that
|
||||
// doesn't change mid-session, three useCallbacks with stable deps). The
|
||||
// result object is consumed by REPL's onSubmit useCallback deps — without
|
||||
// memoization the fresh literal invalidates onSubmit on every REPL render,
|
||||
// which in turn churns PromptInput's props and downstream memoization.
|
||||
return useMemo(
|
||||
() => ({ isRemoteMode, sendMessage, cancelRequest, disconnect }),
|
||||
[isRemoteMode, sendMessage, cancelRequest, disconnect],
|
||||
)
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,241 @@
|
||||
/**
|
||||
* REPL integration hook for `claude ssh` sessions.
|
||||
*
|
||||
* Sibling to useDirectConnect — same shape (isRemoteMode/sendMessage/
|
||||
* cancelRequest/disconnect), same REPL wiring, but drives an SSH child
|
||||
* process instead of a WebSocket. Kept separate rather than generalizing
|
||||
* useDirectConnect because the lifecycle differs: the ssh process and auth
|
||||
* proxy are created BEFORE this hook runs (during startup, in main.tsx) and
|
||||
* handed in; useDirectConnect creates its WebSocket inside the effect.
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'crypto'
|
||||
import { useCallback, useEffect, useMemo, useRef } from 'react'
|
||||
import type { ToolUseConfirm } from '../components/permissions/PermissionRequest.js'
|
||||
import {
|
||||
createSyntheticAssistantMessage,
|
||||
createToolStub,
|
||||
} from '../remote/remotePermissionBridge.js'
|
||||
import {
|
||||
convertSDKMessage,
|
||||
isSessionEndMessage,
|
||||
} from '../remote/sdkMessageAdapter.js'
|
||||
import type { SSHSession } from '../ssh/createSSHSession.js'
|
||||
import type { SSHSessionManager } from '../ssh/SSHSessionManager.js'
|
||||
import type { Tool } from '../Tool.js'
|
||||
import { findToolByName } from '../Tool.js'
|
||||
import type { Message as MessageType } from '../types/message.js'
|
||||
import type { PermissionAskDecision } from '../types/permissions.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { gracefulShutdown } from '../utils/gracefulShutdown.js'
|
||||
import type { RemoteMessageContent } from '../utils/teleport/api.js'
|
||||
|
||||
type UseSSHSessionResult = {
|
||||
isRemoteMode: boolean
|
||||
sendMessage: (content: RemoteMessageContent) => Promise<boolean>
|
||||
cancelRequest: () => void
|
||||
disconnect: () => void
|
||||
}
|
||||
|
||||
type UseSSHSessionProps = {
|
||||
session: SSHSession | undefined
|
||||
setMessages: React.Dispatch<React.SetStateAction<MessageType[]>>
|
||||
setIsLoading: (loading: boolean) => void
|
||||
setToolUseConfirmQueue: React.Dispatch<React.SetStateAction<ToolUseConfirm[]>>
|
||||
tools: Tool[]
|
||||
}
|
||||
|
||||
export function useSSHSession({
|
||||
session,
|
||||
setMessages,
|
||||
setIsLoading,
|
||||
setToolUseConfirmQueue,
|
||||
tools,
|
||||
}: UseSSHSessionProps): UseSSHSessionResult {
|
||||
const isRemoteMode = !!session
|
||||
|
||||
const managerRef = useRef<SSHSessionManager | null>(null)
|
||||
const hasReceivedInitRef = useRef(false)
|
||||
const isConnectedRef = useRef(false)
|
||||
|
||||
const toolsRef = useRef(tools)
|
||||
useEffect(() => {
|
||||
toolsRef.current = tools
|
||||
}, [tools])
|
||||
|
||||
useEffect(() => {
|
||||
if (!session) return
|
||||
|
||||
hasReceivedInitRef.current = false
|
||||
logForDebugging('[useSSHSession] wiring SSH session manager')
|
||||
|
||||
const manager = session.createManager({
|
||||
onMessage: sdkMessage => {
|
||||
if (isSessionEndMessage(sdkMessage)) {
|
||||
setIsLoading(false)
|
||||
}
|
||||
|
||||
// Skip duplicate init messages (one per turn from stream-json mode).
|
||||
if (sdkMessage.type === 'system' && sdkMessage.subtype === 'init') {
|
||||
if (hasReceivedInitRef.current) return
|
||||
hasReceivedInitRef.current = true
|
||||
}
|
||||
|
||||
const converted = convertSDKMessage(sdkMessage, {
|
||||
convertToolResults: true,
|
||||
})
|
||||
if (converted.type === 'message') {
|
||||
setMessages(prev => [...prev, converted.message])
|
||||
}
|
||||
},
|
||||
onPermissionRequest: (request, requestId) => {
|
||||
logForDebugging(
|
||||
`[useSSHSession] permission request: ${request.tool_name}`,
|
||||
)
|
||||
|
||||
const tool =
|
||||
findToolByName(toolsRef.current, request.tool_name) ??
|
||||
createToolStub(request.tool_name)
|
||||
|
||||
const syntheticMessage = createSyntheticAssistantMessage(
|
||||
request,
|
||||
requestId,
|
||||
)
|
||||
|
||||
const permissionResult: PermissionAskDecision = {
|
||||
behavior: 'ask',
|
||||
message:
|
||||
request.description ?? `${request.tool_name} requires permission`,
|
||||
suggestions: request.permission_suggestions,
|
||||
blockedPath: request.blocked_path,
|
||||
}
|
||||
|
||||
const toolUseConfirm: ToolUseConfirm = {
|
||||
assistantMessage: syntheticMessage,
|
||||
tool,
|
||||
description:
|
||||
request.description ?? `${request.tool_name} requires permission`,
|
||||
input: request.input,
|
||||
toolUseContext: {} as ToolUseConfirm['toolUseContext'],
|
||||
toolUseID: request.tool_use_id,
|
||||
permissionResult,
|
||||
permissionPromptStartTimeMs: Date.now(),
|
||||
onUserInteraction() {},
|
||||
onAbort() {
|
||||
manager.respondToPermissionRequest(requestId, {
|
||||
behavior: 'deny',
|
||||
message: 'User aborted',
|
||||
})
|
||||
setToolUseConfirmQueue(q =>
|
||||
q.filter(i => i.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
},
|
||||
onAllow(updatedInput) {
|
||||
manager.respondToPermissionRequest(requestId, {
|
||||
behavior: 'allow',
|
||||
updatedInput,
|
||||
})
|
||||
setToolUseConfirmQueue(q =>
|
||||
q.filter(i => i.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
setIsLoading(true)
|
||||
},
|
||||
onReject(feedback) {
|
||||
manager.respondToPermissionRequest(requestId, {
|
||||
behavior: 'deny',
|
||||
message: feedback ?? 'User denied permission',
|
||||
})
|
||||
setToolUseConfirmQueue(q =>
|
||||
q.filter(i => i.toolUseID !== request.tool_use_id),
|
||||
)
|
||||
},
|
||||
async recheckPermission() {},
|
||||
}
|
||||
|
||||
setToolUseConfirmQueue(q => [...q, toolUseConfirm])
|
||||
setIsLoading(false)
|
||||
},
|
||||
onConnected: () => {
|
||||
logForDebugging('[useSSHSession] connected')
|
||||
isConnectedRef.current = true
|
||||
},
|
||||
onReconnecting: (attempt, max) => {
|
||||
logForDebugging(
|
||||
`[useSSHSession] ssh dropped, reconnecting (${attempt}/${max})`,
|
||||
)
|
||||
isConnectedRef.current = false
|
||||
// Surface a transient system message in the transcript so the user
|
||||
// knows what's happening — the next onConnected clears the state.
|
||||
// Any in-flight request is lost; the remote's --continue reloads
|
||||
// history but there's no turn in progress to resume.
|
||||
setIsLoading(false)
|
||||
const msg: MessageType = {
|
||||
type: 'system',
|
||||
subtype: 'informational',
|
||||
content: `SSH connection dropped — reconnecting (attempt ${attempt}/${max})...`,
|
||||
timestamp: new Date().toISOString(),
|
||||
uuid: randomUUID(),
|
||||
level: 'warning',
|
||||
}
|
||||
setMessages(prev => [...prev, msg])
|
||||
},
|
||||
onDisconnected: () => {
|
||||
logForDebugging('[useSSHSession] ssh process exited (giving up)')
|
||||
const stderr = session.getStderrTail().trim()
|
||||
const connected = isConnectedRef.current
|
||||
const exitCode = session.proc.exitCode
|
||||
isConnectedRef.current = false
|
||||
setIsLoading(false)
|
||||
|
||||
let msg = connected
|
||||
? 'Remote session ended.'
|
||||
: 'SSH session failed before connecting.'
|
||||
// Surface remote stderr if it looks like an error (pre-connect always,
|
||||
// post-connect only on nonzero exit — normal --verbose noise otherwise).
|
||||
if (stderr && (!connected || exitCode !== 0)) {
|
||||
msg += `\nRemote stderr (exit ${exitCode ?? 'signal ' + session.proc.signalCode}):\n${stderr}`
|
||||
}
|
||||
void gracefulShutdown(1, 'other', { finalMessage: msg })
|
||||
},
|
||||
onError: error => {
|
||||
logForDebugging(`[useSSHSession] error: ${error.message}`)
|
||||
},
|
||||
})
|
||||
|
||||
managerRef.current = manager
|
||||
manager.connect()
|
||||
|
||||
return () => {
|
||||
logForDebugging('[useSSHSession] cleanup')
|
||||
manager.disconnect()
|
||||
session.proxy.stop()
|
||||
managerRef.current = null
|
||||
}
|
||||
}, [session, setMessages, setIsLoading, setToolUseConfirmQueue])
|
||||
|
||||
const sendMessage = useCallback(
|
||||
async (content: RemoteMessageContent): Promise<boolean> => {
|
||||
const m = managerRef.current
|
||||
if (!m) return false
|
||||
setIsLoading(true)
|
||||
return m.sendMessage(content)
|
||||
},
|
||||
[setIsLoading],
|
||||
)
|
||||
|
||||
const cancelRequest = useCallback(() => {
|
||||
managerRef.current?.sendInterrupt()
|
||||
setIsLoading(false)
|
||||
}, [setIsLoading])
|
||||
|
||||
const disconnect = useCallback(() => {
|
||||
managerRef.current?.disconnect()
|
||||
managerRef.current = null
|
||||
isConnectedRef.current = false
|
||||
}, [])
|
||||
|
||||
return useMemo(
|
||||
() => ({ isRemoteMode, sendMessage, cancelRequest, disconnect }),
|
||||
[isRemoteMode, sendMessage, cancelRequest, disconnect],
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useAppStateStore, useSetAppState } from '../state/AppState.js'
|
||||
import { isTerminalTaskStatus } from '../Task.js'
|
||||
import {
|
||||
findTeammateTaskByAgentId,
|
||||
injectUserMessageToTeammate,
|
||||
} from '../tasks/InProcessTeammateTask/InProcessTeammateTask.js'
|
||||
import { isKairosCronEnabled } from '../tools/ScheduleCronTool/prompt.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
import { getCronJitterConfig } from '../utils/cronJitterConfig.js'
|
||||
import { createCronScheduler } from '../utils/cronScheduler.js'
|
||||
import { removeCronTasks } from '../utils/cronTasks.js'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { enqueuePendingNotification } from '../utils/messageQueueManager.js'
|
||||
import { createScheduledTaskFireMessage } from '../utils/messages.js'
|
||||
import { WORKLOAD_CRON } from '../utils/workloadContext.js'
|
||||
|
||||
type Props = {
|
||||
isLoading: boolean
|
||||
/**
|
||||
* When true, bypasses the isLoading gate so tasks can enqueue while a
|
||||
* query is streaming rather than deferring to the next 1s check tick
|
||||
* after the turn ends. Assistant mode no longer forces --proactive
|
||||
* (#20425) so isLoading drops between turns like a normal REPL — this
|
||||
* bypass is now a latency nicety, not a starvation fix. The prompt is
|
||||
* enqueued at 'later' priority either way and drains between turns.
|
||||
*/
|
||||
assistantMode?: boolean
|
||||
setMessages: React.Dispatch<React.SetStateAction<Message[]>>
|
||||
}
|
||||
|
||||
/**
|
||||
* REPL wrapper for the cron scheduler. Mounts the scheduler once and tears
|
||||
* it down on unmount. Fired prompts go into the command queue as 'later'
|
||||
* priority, which the REPL drains via useCommandQueue between turns.
|
||||
*
|
||||
* Scheduler core (timer, file watcher, fire logic) lives in cronScheduler.ts
|
||||
* so SDK/-p mode can share it — see print.ts for the headless wiring.
|
||||
*/
|
||||
export function useScheduledTasks({
|
||||
isLoading,
|
||||
assistantMode = false,
|
||||
setMessages,
|
||||
}: Props): void {
|
||||
// Latest-value ref so the scheduler's isLoading() getter doesn't capture
|
||||
// a stale closure. The effect mounts once; isLoading changes every turn.
|
||||
const isLoadingRef = useRef(isLoading)
|
||||
isLoadingRef.current = isLoading
|
||||
|
||||
const store = useAppStateStore()
|
||||
const setAppState = useSetAppState()
|
||||
|
||||
useEffect(() => {
|
||||
// Runtime gate checked here (not at the hook call site) so the hook
|
||||
// stays unconditionally mounted — rules-of-hooks forbid wrapping the
|
||||
// call in a dynamic condition. getFeatureValue_CACHED_WITH_REFRESH
|
||||
// reads from disk; the 5-min TTL fires a background refetch but the
|
||||
// effect won't re-run on value flip (assistantMode is the only dep),
|
||||
// so this guard alone is launch-grain. The mid-session killswitch is
|
||||
// the isKilled option below — check() polls it every tick.
|
||||
if (!isKairosCronEnabled()) return
|
||||
|
||||
// System-generated — hidden from queue preview and transcript UI.
|
||||
// In brief mode, executeForkedSlashCommand runs as a background
|
||||
// subagent and returns no visible messages. In normal mode,
|
||||
// isMeta is only propagated for plain-text prompts (via
|
||||
// processTextPrompt); slash commands like /context:fork do not
|
||||
// forward isMeta, so their messages remain visible in the
|
||||
// transcript. This is acceptable since normal mode is not the
|
||||
// primary use case for scheduled tasks.
|
||||
const enqueueForLead = (prompt: string) =>
|
||||
enqueuePendingNotification({
|
||||
value: prompt,
|
||||
mode: 'prompt',
|
||||
priority: 'later',
|
||||
isMeta: true,
|
||||
// Threaded through to cc_workload= in the billing-header
|
||||
// attribution block so the API can serve cron-initiated requests
|
||||
// at lower QoS when capacity is tight. No human is actively
|
||||
// waiting on this response.
|
||||
workload: WORKLOAD_CRON,
|
||||
})
|
||||
|
||||
const scheduler = createCronScheduler({
|
||||
// Missed-task surfacing (onFire fallback). Teammate crons are always
|
||||
// session-only (durable:false) so they never appear in the missed list,
|
||||
// which is populated from disk at scheduler startup — this path only
|
||||
// handles team-lead durable crons.
|
||||
onFire: enqueueForLead,
|
||||
// Normal fires receive the full CronTask so we can route by agentId.
|
||||
onFireTask: task => {
|
||||
if (task.agentId) {
|
||||
const teammate = findTeammateTaskByAgentId(
|
||||
task.agentId,
|
||||
store.getState().tasks,
|
||||
)
|
||||
if (teammate && !isTerminalTaskStatus(teammate.status)) {
|
||||
injectUserMessageToTeammate(teammate.id, task.prompt, setAppState)
|
||||
return
|
||||
}
|
||||
// Teammate is gone — clean up the orphaned cron so it doesn't keep
|
||||
// firing into nowhere every tick. One-shots would auto-delete on
|
||||
// fire anyway, but recurring crons would loop until auto-expiry.
|
||||
logForDebugging(
|
||||
`[ScheduledTasks] teammate ${task.agentId} gone, removing orphaned cron ${task.id}`,
|
||||
)
|
||||
void removeCronTasks([task.id])
|
||||
return
|
||||
}
|
||||
const msg = createScheduledTaskFireMessage(
|
||||
`Running scheduled task (${formatCronFireTime(new Date())})`,
|
||||
)
|
||||
setMessages(prev => [...prev, msg])
|
||||
enqueueForLead(task.prompt)
|
||||
},
|
||||
isLoading: () => isLoadingRef.current,
|
||||
assistantMode,
|
||||
getJitterConfig: getCronJitterConfig,
|
||||
isKilled: () => !isKairosCronEnabled(),
|
||||
})
|
||||
scheduler.start()
|
||||
return () => scheduler.stop()
|
||||
// assistantMode is stable for the session lifetime; store/setAppState are
|
||||
// stable refs from useSyncExternalStore; setMessages is a stable useCallback.
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [assistantMode])
|
||||
}
|
||||
|
||||
function formatCronFireTime(d: Date): string {
|
||||
return d
|
||||
.toLocaleString('en-US', {
|
||||
month: 'short',
|
||||
day: 'numeric',
|
||||
hour: 'numeric',
|
||||
minute: '2-digit',
|
||||
})
|
||||
.replace(/,? at |, /, ' ')
|
||||
.replace(/ ([AP]M)/, (_, ampm) => ampm.toLowerCase())
|
||||
}
|
||||
@@ -0,0 +1,364 @@
|
||||
import { useCallback, useState } from 'react'
|
||||
import { KeyboardEvent } from '../ink/events/keyboard-event.js'
|
||||
// eslint-disable-next-line custom-rules/prefer-use-keybindings -- backward-compat bridge until consumers wire handleKeyDown to <Box onKeyDown>
|
||||
import { useInput } from '../ink.js'
|
||||
import {
|
||||
Cursor,
|
||||
getLastKill,
|
||||
pushToKillRing,
|
||||
recordYank,
|
||||
resetKillAccumulation,
|
||||
resetYankState,
|
||||
updateYankLength,
|
||||
yankPop,
|
||||
} from '../utils/Cursor.js'
|
||||
import { useTerminalSize } from './useTerminalSize.js'
|
||||
|
||||
type UseSearchInputOptions = {
|
||||
isActive: boolean
|
||||
onExit: () => void
|
||||
/** Esc + Ctrl+C abandon (distinct from onExit = Enter commit). When
|
||||
* provided: single-Esc calls this directly (no clear-first-then-exit
|
||||
* two-press). When absent: current behavior — Esc clears non-empty
|
||||
* query, exits on empty; Ctrl+C silently swallowed (no switch case). */
|
||||
onCancel?: () => void
|
||||
onExitUp?: () => void
|
||||
columns?: number
|
||||
passthroughCtrlKeys?: string[]
|
||||
initialQuery?: string
|
||||
/** Backspace (and ctrl+h) on empty query calls onCancel ?? onExit — the
|
||||
* less/vim "delete past the /" convention. Dialogs that want Esc-only
|
||||
* cancel set this false so a held backspace doesn't eject the user. */
|
||||
backspaceExitsOnEmpty?: boolean
|
||||
}
|
||||
|
||||
type UseSearchInputReturn = {
|
||||
query: string
|
||||
setQuery: (q: string) => void
|
||||
cursorOffset: number
|
||||
handleKeyDown: (e: KeyboardEvent) => void
|
||||
}
|
||||
|
||||
function isKillKey(e: KeyboardEvent): boolean {
|
||||
if (e.ctrl && (e.key === 'k' || e.key === 'u' || e.key === 'w')) {
|
||||
return true
|
||||
}
|
||||
if (e.meta && e.key === 'backspace') {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
function isYankKey(e: KeyboardEvent): boolean {
|
||||
return (e.ctrl || e.meta) && e.key === 'y'
|
||||
}
|
||||
|
||||
// Special key names that fall through the explicit handlers above the
|
||||
// text-input branch (return/escape/arrows/home/end/tab/backspace/delete
|
||||
// all early-return). Reject these so e.g. PageUp doesn't leak 'pageup'
|
||||
// as literal text. The length>=1 check below is intentionally loose —
|
||||
// batched input like stdin.write('abc') arrives as one multi-char e.key,
|
||||
// matching the old useInput(input) behavior where cursor.insert(input)
|
||||
// inserted the full chunk.
|
||||
const UNHANDLED_SPECIAL_KEYS = new Set([
|
||||
'pageup',
|
||||
'pagedown',
|
||||
'insert',
|
||||
'wheelup',
|
||||
'wheeldown',
|
||||
'mouse',
|
||||
'f1',
|
||||
'f2',
|
||||
'f3',
|
||||
'f4',
|
||||
'f5',
|
||||
'f6',
|
||||
'f7',
|
||||
'f8',
|
||||
'f9',
|
||||
'f10',
|
||||
'f11',
|
||||
'f12',
|
||||
])
|
||||
|
||||
export function useSearchInput({
|
||||
isActive,
|
||||
onExit,
|
||||
onCancel,
|
||||
onExitUp,
|
||||
columns,
|
||||
passthroughCtrlKeys = [],
|
||||
initialQuery = '',
|
||||
backspaceExitsOnEmpty = true,
|
||||
}: UseSearchInputOptions): UseSearchInputReturn {
|
||||
const { columns: terminalColumns } = useTerminalSize()
|
||||
const effectiveColumns = columns ?? terminalColumns
|
||||
const [query, setQueryState] = useState(initialQuery)
|
||||
const [cursorOffset, setCursorOffset] = useState(initialQuery.length)
|
||||
|
||||
const setQuery = useCallback((q: string) => {
|
||||
setQueryState(q)
|
||||
setCursorOffset(q.length)
|
||||
}, [])
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent): void => {
|
||||
if (!isActive) return
|
||||
|
||||
const cursor = Cursor.fromText(query, effectiveColumns, cursorOffset)
|
||||
|
||||
// Check passthrough ctrl keys
|
||||
if (e.ctrl && passthroughCtrlKeys.includes(e.key.toLowerCase())) {
|
||||
return
|
||||
}
|
||||
|
||||
// Reset kill accumulation for non-kill keys
|
||||
if (!isKillKey(e)) {
|
||||
resetKillAccumulation()
|
||||
}
|
||||
|
||||
// Reset yank state for non-yank keys
|
||||
if (!isYankKey(e)) {
|
||||
resetYankState()
|
||||
}
|
||||
|
||||
// Exit conditions
|
||||
if (e.key === 'return' || e.key === 'down') {
|
||||
e.preventDefault()
|
||||
onExit()
|
||||
return
|
||||
}
|
||||
if (e.key === 'up') {
|
||||
e.preventDefault()
|
||||
if (onExitUp) {
|
||||
onExitUp()
|
||||
}
|
||||
return
|
||||
}
|
||||
if (e.key === 'escape') {
|
||||
e.preventDefault()
|
||||
if (onCancel) {
|
||||
onCancel()
|
||||
} else if (query.length > 0) {
|
||||
setQueryState('')
|
||||
setCursorOffset(0)
|
||||
} else {
|
||||
onExit()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Backspace/Delete
|
||||
if (e.key === 'backspace') {
|
||||
e.preventDefault()
|
||||
if (e.meta) {
|
||||
// Meta+Backspace: kill word before
|
||||
const { cursor: newCursor, killed } = cursor.deleteWordBefore()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
if (query.length === 0) {
|
||||
// Backspace past the / — cancel (clear + snap back), not commit.
|
||||
// less: same. vim: deletes the / and exits command mode.
|
||||
if (backspaceExitsOnEmpty) (onCancel ?? onExit)()
|
||||
return
|
||||
}
|
||||
const newCursor = cursor.backspace()
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
|
||||
if (e.key === 'delete') {
|
||||
e.preventDefault()
|
||||
const newCursor = cursor.del()
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
|
||||
// Arrow keys with modifiers (word jump)
|
||||
if (e.key === 'left' && (e.ctrl || e.meta || e.fn)) {
|
||||
e.preventDefault()
|
||||
const newCursor = cursor.prevWord()
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
if (e.key === 'right' && (e.ctrl || e.meta || e.fn)) {
|
||||
e.preventDefault()
|
||||
const newCursor = cursor.nextWord()
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
|
||||
// Plain arrow keys
|
||||
if (e.key === 'left') {
|
||||
e.preventDefault()
|
||||
const newCursor = cursor.left()
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
if (e.key === 'right') {
|
||||
e.preventDefault()
|
||||
const newCursor = cursor.right()
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
|
||||
// Home/End
|
||||
if (e.key === 'home') {
|
||||
e.preventDefault()
|
||||
setCursorOffset(0)
|
||||
return
|
||||
}
|
||||
if (e.key === 'end') {
|
||||
e.preventDefault()
|
||||
setCursorOffset(query.length)
|
||||
return
|
||||
}
|
||||
|
||||
// Ctrl key bindings
|
||||
if (e.ctrl) {
|
||||
e.preventDefault()
|
||||
switch (e.key.toLowerCase()) {
|
||||
case 'a':
|
||||
setCursorOffset(0)
|
||||
return
|
||||
case 'e':
|
||||
setCursorOffset(query.length)
|
||||
return
|
||||
case 'b':
|
||||
setCursorOffset(cursor.left().offset)
|
||||
return
|
||||
case 'f':
|
||||
setCursorOffset(cursor.right().offset)
|
||||
return
|
||||
case 'd': {
|
||||
if (query.length === 0) {
|
||||
;(onCancel ?? onExit)()
|
||||
return
|
||||
}
|
||||
const newCursor = cursor.del()
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
case 'h': {
|
||||
if (query.length === 0) {
|
||||
if (backspaceExitsOnEmpty) (onCancel ?? onExit)()
|
||||
return
|
||||
}
|
||||
const newCursor = cursor.backspace()
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
case 'k': {
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineEnd()
|
||||
pushToKillRing(killed, 'append')
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
case 'u': {
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineStart()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
case 'w': {
|
||||
const { cursor: newCursor, killed } = cursor.deleteWordBefore()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
case 'y': {
|
||||
const text = getLastKill()
|
||||
if (text.length > 0) {
|
||||
const startOffset = cursor.offset
|
||||
const newCursor = cursor.insert(text)
|
||||
recordYank(startOffset, text.length)
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
}
|
||||
return
|
||||
}
|
||||
case 'g':
|
||||
case 'c':
|
||||
// Cancel (abandon search). ctrl+g is less's cancel key. Only
|
||||
// fires if onCancel provided — otherwise falls through and
|
||||
// returns silently (11 call sites, most expect ctrl+c to no-op).
|
||||
if (onCancel) {
|
||||
onCancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Meta key bindings
|
||||
if (e.meta) {
|
||||
e.preventDefault()
|
||||
switch (e.key.toLowerCase()) {
|
||||
case 'b':
|
||||
setCursorOffset(cursor.prevWord().offset)
|
||||
return
|
||||
case 'f':
|
||||
setCursorOffset(cursor.nextWord().offset)
|
||||
return
|
||||
case 'd': {
|
||||
const newCursor = cursor.deleteWordAfter()
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
return
|
||||
}
|
||||
case 'y': {
|
||||
const popResult = yankPop()
|
||||
if (popResult) {
|
||||
const { text, start, length } = popResult
|
||||
const before = query.slice(0, start)
|
||||
const after = query.slice(start + length)
|
||||
const newText = before + text + after
|
||||
const newOffset = start + text.length
|
||||
updateYankLength(text.length)
|
||||
setQueryState(newText)
|
||||
setCursorOffset(newOffset)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Tab: ignore
|
||||
if (e.key === 'tab') {
|
||||
return
|
||||
}
|
||||
|
||||
// Regular character input. Accepts multi-char e.key so batched writes
|
||||
// (stdin.write('abc') in tests, or paste outside bracketed-paste mode)
|
||||
// insert the full chunk — matching the old useInput behavior.
|
||||
if (e.key.length >= 1 && !UNHANDLED_SPECIAL_KEYS.has(e.key)) {
|
||||
e.preventDefault()
|
||||
const newCursor = cursor.insert(e.key)
|
||||
setQueryState(newCursor.text)
|
||||
setCursorOffset(newCursor.offset)
|
||||
}
|
||||
}
|
||||
|
||||
// Backward-compat bridge: existing consumers don't yet wire handleKeyDown
|
||||
// to <Box onKeyDown>. Subscribe via useInput and adapt InputEvent →
|
||||
// KeyboardEvent until all 11 call sites are migrated (separate PRs).
|
||||
// TODO(onKeyDown-migration): remove once all consumers pass handleKeyDown.
|
||||
useInput(
|
||||
(_input, _key, event) => {
|
||||
handleKeyDown(new KeyboardEvent(event.keypress))
|
||||
},
|
||||
{ isActive },
|
||||
)
|
||||
|
||||
return { query, setQuery, cursorOffset, handleKeyDown }
|
||||
}
|
||||
@@ -0,0 +1,158 @@
|
||||
/**
|
||||
* Hook for managing session backgrounding (Ctrl+B to background/foreground sessions).
|
||||
*
|
||||
* Handles:
|
||||
* - Calling onBackgroundQuery to spawn a background task for the current query
|
||||
* - Re-backgrounding foregrounded tasks
|
||||
* - Syncing foregrounded task messages/state to main view
|
||||
*/
|
||||
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
|
||||
type UseSessionBackgroundingProps = {
|
||||
setMessages: (messages: Message[] | ((prev: Message[]) => Message[])) => void
|
||||
setIsLoading: (loading: boolean) => void
|
||||
resetLoadingState: () => void
|
||||
setAbortController: (controller: AbortController | null) => void
|
||||
onBackgroundQuery: () => void
|
||||
}
|
||||
|
||||
type UseSessionBackgroundingResult = {
|
||||
/** Call when user wants to background (Ctrl+B) */
|
||||
handleBackgroundSession: () => void
|
||||
}
|
||||
|
||||
export function useSessionBackgrounding({
|
||||
setMessages,
|
||||
setIsLoading,
|
||||
resetLoadingState,
|
||||
setAbortController,
|
||||
onBackgroundQuery,
|
||||
}: UseSessionBackgroundingProps): UseSessionBackgroundingResult {
|
||||
const foregroundedTaskId = useAppState(s => s.foregroundedTaskId)
|
||||
const foregroundedTask = useAppState(s =>
|
||||
s.foregroundedTaskId ? s.tasks[s.foregroundedTaskId] : undefined,
|
||||
)
|
||||
const setAppState = useSetAppState()
|
||||
const lastSyncedMessagesLengthRef = useRef<number>(0)
|
||||
|
||||
const handleBackgroundSession = useCallback(() => {
|
||||
if (foregroundedTaskId) {
|
||||
// Re-background the foregrounded task
|
||||
setAppState(prev => {
|
||||
const taskId = prev.foregroundedTaskId
|
||||
if (!taskId) return prev
|
||||
const task = prev.tasks[taskId]
|
||||
if (!task) {
|
||||
return { ...prev, foregroundedTaskId: undefined }
|
||||
}
|
||||
return {
|
||||
...prev,
|
||||
foregroundedTaskId: undefined,
|
||||
tasks: {
|
||||
...prev.tasks,
|
||||
[taskId]: { ...task, isBackgrounded: true },
|
||||
},
|
||||
}
|
||||
})
|
||||
setMessages([])
|
||||
resetLoadingState()
|
||||
setAbortController(null)
|
||||
return
|
||||
}
|
||||
|
||||
onBackgroundQuery()
|
||||
}, [
|
||||
foregroundedTaskId,
|
||||
setAppState,
|
||||
setMessages,
|
||||
resetLoadingState,
|
||||
setAbortController,
|
||||
onBackgroundQuery,
|
||||
])
|
||||
|
||||
// Sync foregrounded task's messages and loading state to the main view
|
||||
useEffect(() => {
|
||||
if (!foregroundedTaskId) {
|
||||
// Reset when no foregrounded task
|
||||
lastSyncedMessagesLengthRef.current = 0
|
||||
return
|
||||
}
|
||||
|
||||
if (!foregroundedTask || foregroundedTask.type !== 'local_agent') {
|
||||
setAppState(prev => ({ ...prev, foregroundedTaskId: undefined }))
|
||||
resetLoadingState()
|
||||
lastSyncedMessagesLengthRef.current = 0
|
||||
return
|
||||
}
|
||||
|
||||
// Sync messages from background task to main view
|
||||
// Only update if messages have actually changed to avoid redundant renders
|
||||
const taskMessages = foregroundedTask.messages ?? []
|
||||
if (taskMessages.length !== lastSyncedMessagesLengthRef.current) {
|
||||
lastSyncedMessagesLengthRef.current = taskMessages.length
|
||||
setMessages([...taskMessages])
|
||||
}
|
||||
|
||||
if (foregroundedTask.status === 'running') {
|
||||
// Check if the task was aborted (user pressed Escape)
|
||||
const taskAbortController = foregroundedTask.abortController
|
||||
if (taskAbortController?.signal.aborted) {
|
||||
// Task was aborted - clear foregrounded state immediately
|
||||
setAppState(prev => {
|
||||
if (!prev.foregroundedTaskId) return prev
|
||||
const task = prev.tasks[prev.foregroundedTaskId]
|
||||
if (!task) return { ...prev, foregroundedTaskId: undefined }
|
||||
return {
|
||||
...prev,
|
||||
foregroundedTaskId: undefined,
|
||||
tasks: {
|
||||
...prev.tasks,
|
||||
[prev.foregroundedTaskId]: { ...task, isBackgrounded: true },
|
||||
},
|
||||
}
|
||||
})
|
||||
resetLoadingState()
|
||||
setAbortController(null)
|
||||
lastSyncedMessagesLengthRef.current = 0
|
||||
return
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
// Set abort controller to the foregrounded task's controller for Escape handling
|
||||
if (taskAbortController) {
|
||||
setAbortController(taskAbortController)
|
||||
}
|
||||
} else {
|
||||
// Task completed - restore to background and clear foregrounded view
|
||||
setAppState(prev => {
|
||||
const taskId = prev.foregroundedTaskId
|
||||
if (!taskId) return prev
|
||||
const task = prev.tasks[taskId]
|
||||
if (!task) return { ...prev, foregroundedTaskId: undefined }
|
||||
return {
|
||||
...prev,
|
||||
foregroundedTaskId: undefined,
|
||||
tasks: { ...prev.tasks, [taskId]: { ...task, isBackgrounded: true } },
|
||||
}
|
||||
})
|
||||
resetLoadingState()
|
||||
setAbortController(null)
|
||||
lastSyncedMessagesLengthRef.current = 0
|
||||
}
|
||||
}, [
|
||||
foregroundedTaskId,
|
||||
foregroundedTask,
|
||||
setAppState,
|
||||
setMessages,
|
||||
setIsLoading,
|
||||
resetLoadingState,
|
||||
setAbortController,
|
||||
])
|
||||
|
||||
return {
|
||||
handleBackgroundSession,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
import { type AppState, useAppState } from '../state/AppState.js'
|
||||
|
||||
/**
|
||||
* Settings type as stored in AppState (DeepImmutable wrapped).
|
||||
* Use this type when you need to annotate variables that hold settings from useSettings().
|
||||
*/
|
||||
export type ReadonlySettings = AppState['settings']
|
||||
|
||||
/**
|
||||
* React hook to access current settings from AppState.
|
||||
* Settings automatically update when files change on disk via settingsChangeDetector.
|
||||
*
|
||||
* Use this instead of getSettings_DEPRECATED() in React components for reactive updates.
|
||||
*/
|
||||
export function useSettings(): ReadonlySettings {
|
||||
return useAppState(s => s.settings)
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
import { useCallback, useEffect } from 'react'
|
||||
import { settingsChangeDetector } from '../utils/settings/changeDetector.js'
|
||||
import type { SettingSource } from '../utils/settings/constants.js'
|
||||
import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
|
||||
import type { SettingsJson } from '../utils/settings/types.js'
|
||||
|
||||
export function useSettingsChange(
|
||||
onChange: (source: SettingSource, settings: SettingsJson) => void,
|
||||
): void {
|
||||
const handleChange = useCallback(
|
||||
(source: SettingSource) => {
|
||||
// Cache is already reset by the notifier (changeDetector.fanOut) —
|
||||
// resetting here caused N-way thrashing with N subscribers: each
|
||||
// cleared the cache, re-read from disk, then the next cleared again.
|
||||
const newSettings = getSettings_DEPRECATED()
|
||||
onChange(source, newSettings)
|
||||
},
|
||||
[onChange],
|
||||
)
|
||||
|
||||
useEffect(
|
||||
() => settingsChangeDetector.subscribe(handleChange),
|
||||
[handleChange],
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
import { useCallback, useRef, useState } from 'react'
|
||||
import type { FeedbackSurveyResponse } from '../components/FeedbackSurvey/utils.js'
|
||||
import {
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
type AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
logEvent,
|
||||
} from '../services/analytics/index.js'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
import type { SkillUpdate } from '../utils/hooks/skillImprovement.js'
|
||||
import { applySkillImprovement } from '../utils/hooks/skillImprovement.js'
|
||||
import { createSystemMessage } from '../utils/messages.js'
|
||||
|
||||
type SkillImprovementSuggestion = {
|
||||
skillName: string
|
||||
updates: SkillUpdate[]
|
||||
}
|
||||
|
||||
type SetMessages = (fn: (prev: Message[]) => Message[]) => void
|
||||
|
||||
export function useSkillImprovementSurvey(setMessages: SetMessages): {
|
||||
isOpen: boolean
|
||||
suggestion: SkillImprovementSuggestion | null
|
||||
handleSelect: (selected: FeedbackSurveyResponse) => void
|
||||
} {
|
||||
const suggestion = useAppState(s => s.skillImprovement.suggestion)
|
||||
const setAppState = useSetAppState()
|
||||
const [isOpen, setIsOpen] = useState(false)
|
||||
const lastSuggestionRef = useRef(suggestion)
|
||||
const loggedAppearanceRef = useRef(false)
|
||||
|
||||
// Track the suggestion for display even after clearing AppState
|
||||
if (suggestion) {
|
||||
lastSuggestionRef.current = suggestion
|
||||
}
|
||||
|
||||
// Open when a new suggestion arrives
|
||||
if (suggestion && !isOpen) {
|
||||
setIsOpen(true)
|
||||
if (!loggedAppearanceRef.current) {
|
||||
loggedAppearanceRef.current = true
|
||||
logEvent('tengu_skill_improvement_survey', {
|
||||
event_type:
|
||||
'appeared' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
// _PROTO_skill_name routes to the privileged skill_name BQ column.
|
||||
// Unredacted names don't go in additional_metadata.
|
||||
_PROTO_skill_name: (suggestion.skillName ??
|
||||
'unknown') as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const handleSelect = useCallback(
|
||||
(selected: FeedbackSurveyResponse) => {
|
||||
const current = lastSuggestionRef.current
|
||||
if (!current) return
|
||||
|
||||
const applied = selected !== 'dismissed'
|
||||
|
||||
logEvent('tengu_skill_improvement_survey', {
|
||||
event_type:
|
||||
'responded' as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
response: (applied
|
||||
? 'applied'
|
||||
: 'dismissed') as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
|
||||
// _PROTO_skill_name routes to the privileged skill_name BQ column.
|
||||
// Unredacted names don't go in additional_metadata.
|
||||
_PROTO_skill_name:
|
||||
current.skillName as AnalyticsMetadata_I_VERIFIED_THIS_IS_PII_TAGGED,
|
||||
})
|
||||
|
||||
if (applied) {
|
||||
void applySkillImprovement(current.skillName, current.updates).then(
|
||||
() => {
|
||||
setMessages(prev => [
|
||||
...prev,
|
||||
createSystemMessage(
|
||||
`Skill "${current.skillName}" updated with improvements.`,
|
||||
'suggestion',
|
||||
),
|
||||
])
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Close and clear
|
||||
setIsOpen(false)
|
||||
loggedAppearanceRef.current = false
|
||||
setAppState(prev => {
|
||||
if (!prev.skillImprovement.suggestion) return prev
|
||||
return {
|
||||
...prev,
|
||||
skillImprovement: { suggestion: null },
|
||||
}
|
||||
})
|
||||
},
|
||||
[setAppState, setMessages],
|
||||
)
|
||||
|
||||
return {
|
||||
isOpen,
|
||||
suggestion: lastSuggestionRef.current,
|
||||
handleSelect,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
import { useCallback, useEffect } from 'react'
|
||||
import type { Command } from '../commands.js'
|
||||
import {
|
||||
clearCommandMemoizationCaches,
|
||||
clearCommandsCache,
|
||||
getCommands,
|
||||
} from '../commands.js'
|
||||
import { onGrowthBookRefresh } from '../services/analytics/growthbook.js'
|
||||
import { logError } from '../utils/log.js'
|
||||
import { skillChangeDetector } from '../utils/skills/skillChangeDetector.js'
|
||||
|
||||
/**
|
||||
* Keep the commands list fresh across two triggers:
|
||||
*
|
||||
* 1. Skill file changes (watcher) — full cache clear + disk re-scan, since
|
||||
* skill content changed on disk.
|
||||
* 2. GrowthBook init/refresh — memo-only clear, since only `isEnabled()`
|
||||
* predicates may have changed. Handles commands like /btw whose gate
|
||||
* reads a flag that isn't in the disk cache yet on first session after
|
||||
* a flag rename: getCommands() runs before GB init (main.tsx:2855 vs
|
||||
* showSetupScreens at :3106), so the memoized list is baked with the
|
||||
* default. Once init populates remoteEvalFeatureValues, re-filter.
|
||||
*/
|
||||
export function useSkillsChange(
|
||||
cwd: string | undefined,
|
||||
onCommandsChange: (commands: Command[]) => void,
|
||||
): void {
|
||||
const handleChange = useCallback(async () => {
|
||||
if (!cwd) return
|
||||
try {
|
||||
// Clear all command caches to ensure fresh load
|
||||
clearCommandsCache()
|
||||
const commands = await getCommands(cwd)
|
||||
onCommandsChange(commands)
|
||||
} catch (error) {
|
||||
// Errors during reload are non-fatal - log and continue
|
||||
if (error instanceof Error) {
|
||||
logError(error)
|
||||
}
|
||||
}
|
||||
}, [cwd, onCommandsChange])
|
||||
|
||||
useEffect(() => skillChangeDetector.subscribe(handleChange), [handleChange])
|
||||
|
||||
const handleGrowthBookRefresh = useCallback(async () => {
|
||||
if (!cwd) return
|
||||
try {
|
||||
clearCommandMemoizationCaches()
|
||||
const commands = await getCommands(cwd)
|
||||
onCommandsChange(commands)
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
logError(error)
|
||||
}
|
||||
}
|
||||
}, [cwd, onCommandsChange])
|
||||
|
||||
useEffect(
|
||||
() => onGrowthBookRefresh(handleGrowthBookRefresh),
|
||||
[handleGrowthBookRefresh],
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
/**
|
||||
* Swarm Initialization Hook
|
||||
*
|
||||
* Initializes swarm features: teammate hooks and context.
|
||||
* Handles both fresh spawns and resumed teammate sessions.
|
||||
*
|
||||
* This hook is conditionally loaded to allow dead code elimination when swarms are disabled.
|
||||
*/
|
||||
|
||||
import { useEffect } from 'react'
|
||||
import { getSessionId } from '../bootstrap/state.js'
|
||||
import type { AppState } from '../state/AppState.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
import { isAgentSwarmsEnabled } from '../utils/agentSwarmsEnabled.js'
|
||||
import { initializeTeammateContextFromSession } from '../utils/swarm/reconnection.js'
|
||||
import { readTeamFile } from '../utils/swarm/teamHelpers.js'
|
||||
import { initializeTeammateHooks } from '../utils/swarm/teammateInit.js'
|
||||
import { getDynamicTeamContext } from '../utils/teammate.js'
|
||||
|
||||
type SetAppState = (f: (prevState: AppState) => AppState) => void
|
||||
|
||||
/**
|
||||
* Hook that initializes swarm features when ENABLE_AGENT_SWARMS is true.
|
||||
*
|
||||
* Handles both:
|
||||
* - Resumed teammate sessions (from --resume or /resume) where teamName/agentName
|
||||
* are stored in transcript messages
|
||||
* - Fresh spawns where context is read from environment variables
|
||||
*/
|
||||
export function useSwarmInitialization(
|
||||
setAppState: SetAppState,
|
||||
initialMessages: Message[] | undefined,
|
||||
{ enabled = true }: { enabled?: boolean } = {},
|
||||
): void {
|
||||
useEffect(() => {
|
||||
if (!enabled) return
|
||||
if (isAgentSwarmsEnabled()) {
|
||||
// Check if this is a resumed agent session (from --resume or /resume)
|
||||
// Resumed sessions have teamName/agentName stored in transcript messages
|
||||
const firstMessage = initialMessages?.[0]
|
||||
const teamName =
|
||||
firstMessage && 'teamName' in firstMessage
|
||||
? (firstMessage.teamName as string | undefined)
|
||||
: undefined
|
||||
const agentName =
|
||||
firstMessage && 'agentName' in firstMessage
|
||||
? (firstMessage.agentName as string | undefined)
|
||||
: undefined
|
||||
|
||||
if (teamName && agentName) {
|
||||
// Resumed agent session - set up team context from stored info
|
||||
initializeTeammateContextFromSession(setAppState, teamName, agentName)
|
||||
|
||||
// Get agentId from team file for hook initialization
|
||||
const teamFile = readTeamFile(teamName)
|
||||
const member = teamFile?.members.find(
|
||||
(m: { name: string }) => m.name === agentName,
|
||||
)
|
||||
if (member) {
|
||||
initializeTeammateHooks(setAppState, getSessionId(), {
|
||||
teamName,
|
||||
agentId: member.agentId,
|
||||
agentName,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Fresh spawn or standalone session
|
||||
// teamContext is already computed in main.tsx via computeInitialTeamContext()
|
||||
// and included in initialState, so we only need to initialize hooks here
|
||||
const context = getDynamicTeamContext?.()
|
||||
if (context?.teamName && context?.agentId && context?.agentName) {
|
||||
initializeTeammateHooks(setAppState, getSessionId(), {
|
||||
teamName: context.teamName,
|
||||
agentId: context.agentId,
|
||||
agentName: context.agentName,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}, [setAppState, initialMessages, enabled])
|
||||
}
|
||||
@@ -0,0 +1,330 @@
|
||||
/**
|
||||
* Swarm Permission Poller Hook
|
||||
*
|
||||
* This hook polls for permission responses from the team leader when running
|
||||
* as a worker agent in a swarm. When a response is received, it calls the
|
||||
* appropriate callback (onAllow/onReject) to continue execution.
|
||||
*
|
||||
* This hook should be used in conjunction with the worker-side integration
|
||||
* in useCanUseTool.ts, which creates pending requests that this hook monitors.
|
||||
*/
|
||||
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
import { useInterval } from 'usehooks-ts'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import { errorMessage } from '../utils/errors.js'
|
||||
import {
|
||||
type PermissionUpdate,
|
||||
permissionUpdateSchema,
|
||||
} from '../utils/permissions/PermissionUpdateSchema.js'
|
||||
import {
|
||||
isSwarmWorker,
|
||||
type PermissionResponse,
|
||||
pollForResponse,
|
||||
removeWorkerResponse,
|
||||
} from '../utils/swarm/permissionSync.js'
|
||||
import { getAgentName, getTeamName } from '../utils/teammate.js'
|
||||
|
||||
const POLL_INTERVAL_MS = 500
|
||||
|
||||
/**
|
||||
* Validate permissionUpdates from external sources (mailbox IPC, disk polling).
|
||||
* Malformed entries from buggy/old teammate processes are filtered out rather
|
||||
* than propagated unchecked into callback.onAllow().
|
||||
*/
|
||||
function parsePermissionUpdates(raw: unknown): PermissionUpdate[] {
|
||||
if (!Array.isArray(raw)) {
|
||||
return []
|
||||
}
|
||||
const schema = permissionUpdateSchema()
|
||||
const valid: PermissionUpdate[] = []
|
||||
for (const entry of raw) {
|
||||
const result = schema.safeParse(entry)
|
||||
if (result.success) {
|
||||
valid.push(result.data)
|
||||
} else {
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Dropping malformed permissionUpdate entry: ${result.error.message}`,
|
||||
{ level: 'warn' },
|
||||
)
|
||||
}
|
||||
}
|
||||
return valid
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback signature for handling permission responses
|
||||
*/
|
||||
export type PermissionResponseCallback = {
|
||||
requestId: string
|
||||
toolUseId: string
|
||||
onAllow: (
|
||||
updatedInput: Record<string, unknown> | undefined,
|
||||
permissionUpdates: PermissionUpdate[],
|
||||
feedback?: string,
|
||||
) => void
|
||||
onReject: (feedback?: string) => void
|
||||
}
|
||||
|
||||
/**
|
||||
* Registry for pending permission request callbacks
|
||||
* This allows the poller to find and invoke the right callbacks when responses arrive
|
||||
*/
|
||||
type PendingCallbackRegistry = Map<string, PermissionResponseCallback>
|
||||
|
||||
// Module-level registry that persists across renders
|
||||
const pendingCallbacks: PendingCallbackRegistry = new Map()
|
||||
|
||||
/**
|
||||
* Register a callback for a pending permission request
|
||||
* Called by useCanUseTool when a worker submits a permission request
|
||||
*/
|
||||
export function registerPermissionCallback(
|
||||
callback: PermissionResponseCallback,
|
||||
): void {
|
||||
pendingCallbacks.set(callback.requestId, callback)
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Registered callback for request ${callback.requestId}`,
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister a callback (e.g., when the request is resolved locally or times out)
|
||||
*/
|
||||
export function unregisterPermissionCallback(requestId: string): void {
|
||||
pendingCallbacks.delete(requestId)
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Unregistered callback for request ${requestId}`,
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a request has a registered callback
|
||||
*/
|
||||
export function hasPermissionCallback(requestId: string): boolean {
|
||||
return pendingCallbacks.has(requestId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all pending callbacks (both permission and sandbox).
|
||||
* Called from clearSessionCaches() on /clear to reset stale state,
|
||||
* and also used in tests for isolation.
|
||||
*/
|
||||
export function clearAllPendingCallbacks(): void {
|
||||
pendingCallbacks.clear()
|
||||
pendingSandboxCallbacks.clear()
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a permission response from a mailbox message.
|
||||
* This is called by the inbox poller when it detects a permission_response message.
|
||||
*
|
||||
* @returns true if the response was processed, false if no callback was registered
|
||||
*/
|
||||
export function processMailboxPermissionResponse(params: {
|
||||
requestId: string
|
||||
decision: 'approved' | 'rejected'
|
||||
feedback?: string
|
||||
updatedInput?: Record<string, unknown>
|
||||
permissionUpdates?: unknown
|
||||
}): boolean {
|
||||
const callback = pendingCallbacks.get(params.requestId)
|
||||
|
||||
if (!callback) {
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] No callback registered for mailbox response ${params.requestId}`,
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Processing mailbox response for request ${params.requestId}: ${params.decision}`,
|
||||
)
|
||||
|
||||
// Remove from registry before invoking callback
|
||||
pendingCallbacks.delete(params.requestId)
|
||||
|
||||
if (params.decision === 'approved') {
|
||||
const permissionUpdates = parsePermissionUpdates(params.permissionUpdates)
|
||||
const updatedInput = params.updatedInput
|
||||
callback.onAllow(updatedInput, permissionUpdates)
|
||||
} else {
|
||||
callback.onReject(params.feedback)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Sandbox Permission Callback Registry
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Callback signature for handling sandbox permission responses
|
||||
*/
|
||||
export type SandboxPermissionResponseCallback = {
|
||||
requestId: string
|
||||
host: string
|
||||
resolve: (allow: boolean) => void
|
||||
}
|
||||
|
||||
// Module-level registry for sandbox permission callbacks
|
||||
const pendingSandboxCallbacks: Map<string, SandboxPermissionResponseCallback> =
|
||||
new Map()
|
||||
|
||||
/**
|
||||
* Register a callback for a pending sandbox permission request
|
||||
* Called when a worker sends a sandbox permission request to the leader
|
||||
*/
|
||||
export function registerSandboxPermissionCallback(
|
||||
callback: SandboxPermissionResponseCallback,
|
||||
): void {
|
||||
pendingSandboxCallbacks.set(callback.requestId, callback)
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Registered sandbox callback for request ${callback.requestId}`,
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a sandbox request has a registered callback
|
||||
*/
|
||||
export function hasSandboxPermissionCallback(requestId: string): boolean {
|
||||
return pendingSandboxCallbacks.has(requestId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a sandbox permission response from a mailbox message.
|
||||
* Called by the inbox poller when it detects a sandbox_permission_response message.
|
||||
*
|
||||
* @returns true if the response was processed, false if no callback was registered
|
||||
*/
|
||||
export function processSandboxPermissionResponse(params: {
|
||||
requestId: string
|
||||
host: string
|
||||
allow: boolean
|
||||
}): boolean {
|
||||
const callback = pendingSandboxCallbacks.get(params.requestId)
|
||||
|
||||
if (!callback) {
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] No sandbox callback registered for request ${params.requestId}`,
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Processing sandbox response for request ${params.requestId}: allow=${params.allow}`,
|
||||
)
|
||||
|
||||
// Remove from registry before invoking callback
|
||||
pendingSandboxCallbacks.delete(params.requestId)
|
||||
|
||||
// Resolve the promise with the allow decision
|
||||
callback.resolve(params.allow)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a permission response by invoking the registered callback
|
||||
*/
|
||||
function processResponse(response: PermissionResponse): boolean {
|
||||
const callback = pendingCallbacks.get(response.requestId)
|
||||
|
||||
if (!callback) {
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] No callback registered for request ${response.requestId}`,
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Processing response for request ${response.requestId}: ${response.decision}`,
|
||||
)
|
||||
|
||||
// Remove from registry before invoking callback
|
||||
pendingCallbacks.delete(response.requestId)
|
||||
|
||||
if (response.decision === 'approved') {
|
||||
const permissionUpdates = parsePermissionUpdates(response.permissionUpdates)
|
||||
const updatedInput = response.updatedInput
|
||||
callback.onAllow(updatedInput, permissionUpdates)
|
||||
} else {
|
||||
callback.onReject(response.feedback)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook that polls for permission responses when running as a swarm worker.
|
||||
*
|
||||
* This hook:
|
||||
* 1. Only activates when isSwarmWorker() returns true
|
||||
* 2. Polls every 500ms for responses
|
||||
* 3. When a response is found, invokes the registered callback
|
||||
* 4. Cleans up the response file after processing
|
||||
*/
|
||||
export function useSwarmPermissionPoller(): void {
|
||||
const isProcessingRef = useRef(false)
|
||||
|
||||
const poll = useCallback(async () => {
|
||||
// Don't poll if not a swarm worker
|
||||
if (!isSwarmWorker()) {
|
||||
return
|
||||
}
|
||||
|
||||
// Prevent concurrent polling
|
||||
if (isProcessingRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't poll if no callbacks are registered
|
||||
if (pendingCallbacks.size === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
isProcessingRef.current = true
|
||||
|
||||
try {
|
||||
const agentName = getAgentName()
|
||||
const teamName = getTeamName()
|
||||
|
||||
if (!agentName || !teamName) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check each pending request for a response
|
||||
for (const [requestId, _callback] of pendingCallbacks) {
|
||||
const response = await pollForResponse(requestId, agentName, teamName)
|
||||
|
||||
if (response) {
|
||||
// Process the response
|
||||
const processed = processResponse(response)
|
||||
|
||||
if (processed) {
|
||||
// Clean up the response from the worker's inbox
|
||||
await removeWorkerResponse(requestId, agentName, teamName)
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logForDebugging(
|
||||
`[SwarmPermissionPoller] Error during poll: ${errorMessage(error)}`,
|
||||
)
|
||||
} finally {
|
||||
isProcessingRef.current = false
|
||||
}
|
||||
}, [])
|
||||
|
||||
// Only poll if we're a swarm worker
|
||||
const shouldPoll = isSwarmWorker()
|
||||
useInterval(() => void poll(), shouldPoll ? POLL_INTERVAL_MS : null)
|
||||
|
||||
// Initial poll on mount
|
||||
useEffect(() => {
|
||||
if (isSwarmWorker()) {
|
||||
void poll()
|
||||
}
|
||||
}, [poll])
|
||||
}
|
||||
@@ -0,0 +1,221 @@
|
||||
import { type FSWatcher, watch } from 'fs'
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { logForDebugging } from '../utils/debug.js'
|
||||
import {
|
||||
claimTask,
|
||||
DEFAULT_TASKS_MODE_TASK_LIST_ID,
|
||||
ensureTasksDir,
|
||||
getTasksDir,
|
||||
listTasks,
|
||||
type Task,
|
||||
updateTask,
|
||||
} from '../utils/tasks.js'
|
||||
|
||||
const DEBOUNCE_MS = 1000
|
||||
|
||||
type Props = {
|
||||
/** When undefined, the hook does nothing. The task list id is also used as the agent ID. */
|
||||
taskListId?: string
|
||||
isLoading: boolean
|
||||
/**
|
||||
* Called when a task is ready to be worked on.
|
||||
* Returns true if submission succeeded, false if rejected.
|
||||
*/
|
||||
onSubmitTask: (prompt: string) => boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook that watches a task list directory and automatically picks up
|
||||
* open, unowned tasks to work on.
|
||||
*
|
||||
* This enables "tasks mode" where Claude watches for externally-created
|
||||
* tasks and processes them one at a time.
|
||||
*/
|
||||
export function useTaskListWatcher({
|
||||
taskListId,
|
||||
isLoading,
|
||||
onSubmitTask,
|
||||
}: Props): void {
|
||||
const currentTaskRef = useRef<string | null>(null)
|
||||
const debounceTimerRef = useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
// Stabilize unstable props via refs so the watcher effect doesn't depend on
|
||||
// them. isLoading flips every turn, and onSubmitTask's identity changes
|
||||
// whenever onQuery's deps change. Without this, the watcher effect re-runs
|
||||
// on every turn, calling watcher.close() + watch() each time — which is a
|
||||
// trigger for Bun's PathWatcherManager deadlock (oven-sh/bun#27469).
|
||||
const isLoadingRef = useRef(isLoading)
|
||||
isLoadingRef.current = isLoading
|
||||
const onSubmitTaskRef = useRef(onSubmitTask)
|
||||
onSubmitTaskRef.current = onSubmitTask
|
||||
|
||||
const enabled = taskListId !== undefined
|
||||
const agentId = taskListId ?? DEFAULT_TASKS_MODE_TASK_LIST_ID
|
||||
|
||||
// checkForTasks reads isLoading and onSubmitTask from refs — always
|
||||
// up-to-date, no stale closure, and doesn't force a new function identity
|
||||
// per render. Stored in a ref so the watcher effect can call it without
|
||||
// depending on it.
|
||||
const checkForTasksRef = useRef<() => Promise<void>>(async () => {})
|
||||
checkForTasksRef.current = async () => {
|
||||
if (!enabled) {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't need to submit new tasks if we are already working
|
||||
if (isLoadingRef.current) {
|
||||
return
|
||||
}
|
||||
|
||||
const tasks = await listTasks(taskListId)
|
||||
|
||||
// If we have a current task, check if it's been resolved
|
||||
if (currentTaskRef.current !== null) {
|
||||
const currentTask = tasks.find(t => t.id === currentTaskRef.current)
|
||||
if (!currentTask || currentTask.status === 'completed') {
|
||||
logForDebugging(
|
||||
`[TaskListWatcher] Task #${currentTaskRef.current} is marked complete, ready for next task`,
|
||||
)
|
||||
currentTaskRef.current = null
|
||||
} else {
|
||||
// Still working on current task
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Find an open task with no owner that isn't blocked
|
||||
const availableTask = findAvailableTask(tasks)
|
||||
|
||||
if (!availableTask) {
|
||||
return
|
||||
}
|
||||
|
||||
logForDebugging(
|
||||
`[TaskListWatcher] Found available task #${availableTask.id}: ${availableTask.subject}`,
|
||||
)
|
||||
|
||||
// Claim the task using the task list's agent ID
|
||||
const result = await claimTask(taskListId, availableTask.id, agentId)
|
||||
|
||||
if (!result.success) {
|
||||
logForDebugging(
|
||||
`[TaskListWatcher] Failed to claim task #${availableTask.id}: ${result.reason}`,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
currentTaskRef.current = availableTask.id
|
||||
|
||||
// Format the task as a prompt
|
||||
const prompt = formatTaskAsPrompt(availableTask)
|
||||
|
||||
logForDebugging(
|
||||
`[TaskListWatcher] Submitting task #${availableTask.id} as prompt`,
|
||||
)
|
||||
|
||||
const submitted = onSubmitTaskRef.current(prompt)
|
||||
if (!submitted) {
|
||||
logForDebugging(
|
||||
`[TaskListWatcher] Failed to submit task #${availableTask.id}, releasing claim`,
|
||||
)
|
||||
// Release the claim
|
||||
await updateTask(taskListId, availableTask.id, { owner: undefined })
|
||||
currentTaskRef.current = null
|
||||
}
|
||||
}
|
||||
|
||||
// -- Watcher setup
|
||||
|
||||
// Schedules a check after DEBOUNCE_MS, collapsing rapid fs events.
|
||||
// Shared between the watcher callback and the idle-trigger effect below.
|
||||
const scheduleCheckRef = useRef<() => void>(() => {})
|
||||
|
||||
useEffect(() => {
|
||||
if (!enabled) return
|
||||
|
||||
void ensureTasksDir(taskListId)
|
||||
const tasksDir = getTasksDir(taskListId)
|
||||
|
||||
let watcher: FSWatcher | null = null
|
||||
|
||||
const debouncedCheck = (): void => {
|
||||
if (debounceTimerRef.current) {
|
||||
clearTimeout(debounceTimerRef.current)
|
||||
}
|
||||
debounceTimerRef.current = setTimeout(
|
||||
ref => void ref.current(),
|
||||
DEBOUNCE_MS,
|
||||
checkForTasksRef,
|
||||
)
|
||||
}
|
||||
scheduleCheckRef.current = debouncedCheck
|
||||
|
||||
try {
|
||||
watcher = watch(tasksDir, debouncedCheck)
|
||||
watcher.unref()
|
||||
logForDebugging(`[TaskListWatcher] Watching for tasks in ${tasksDir}`)
|
||||
} catch (error) {
|
||||
// fs.watch throws synchronously on ENOENT — ensureTasksDir should have
|
||||
// created the dir, but handle the race gracefully
|
||||
logForDebugging(`[TaskListWatcher] Failed to watch ${tasksDir}: ${error}`)
|
||||
}
|
||||
|
||||
// Initial check
|
||||
debouncedCheck()
|
||||
|
||||
return () => {
|
||||
// This cleanup only fires when taskListId changes or on unmount —
|
||||
// never per-turn. That keeps watcher.close() out of the Bun
|
||||
// PathWatcherManager deadlock window.
|
||||
scheduleCheckRef.current = () => {}
|
||||
if (watcher) {
|
||||
watcher.close()
|
||||
}
|
||||
if (debounceTimerRef.current) {
|
||||
clearTimeout(debounceTimerRef.current)
|
||||
}
|
||||
}
|
||||
}, [enabled, taskListId])
|
||||
|
||||
// Previously, the watcher effect depended on checkForTasks (and transitively
|
||||
// isLoading), so going idle triggered a re-setup whose initial debouncedCheck
|
||||
// would pick up the next task. Preserve that behavior explicitly: when
|
||||
// isLoading drops, schedule a check.
|
||||
useEffect(() => {
|
||||
if (!enabled) return
|
||||
if (isLoading) return
|
||||
scheduleCheckRef.current()
|
||||
}, [enabled, isLoading])
|
||||
}
|
||||
|
||||
/**
|
||||
* Find an available task that can be worked on:
|
||||
* - Status is 'pending'
|
||||
* - No owner assigned
|
||||
* - Not blocked by any unresolved tasks
|
||||
*/
|
||||
function findAvailableTask(tasks: Task[]): Task | undefined {
|
||||
const unresolvedTaskIds = new Set(
|
||||
tasks.filter(t => t.status !== 'completed').map(t => t.id),
|
||||
)
|
||||
|
||||
return tasks.find(task => {
|
||||
if (task.status !== 'pending') return false
|
||||
if (task.owner) return false
|
||||
// Check all blockers are completed
|
||||
return task.blockedBy.every(id => !unresolvedTaskIds.has(id))
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a task as a prompt for Claude to work on.
|
||||
*/
|
||||
function formatTaskAsPrompt(task: Task): string {
|
||||
let prompt = `Complete all open tasks. Start with task #${task.id}: \n\n ${task.subject}`
|
||||
|
||||
if (task.description) {
|
||||
prompt += `\n\n${task.description}`
|
||||
}
|
||||
|
||||
return prompt
|
||||
}
|
||||
@@ -0,0 +1,250 @@
|
||||
import { type FSWatcher, watch } from 'fs'
|
||||
import { useEffect, useSyncExternalStore } from 'react'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import { createSignal } from '../utils/signal.js'
|
||||
import type { Task } from '../utils/tasks.js'
|
||||
import {
|
||||
getTaskListId,
|
||||
getTasksDir,
|
||||
isTodoV2Enabled,
|
||||
listTasks,
|
||||
onTasksUpdated,
|
||||
resetTaskList,
|
||||
} from '../utils/tasks.js'
|
||||
import { isTeamLead } from '../utils/teammate.js'
|
||||
|
||||
const HIDE_DELAY_MS = 5000
|
||||
const DEBOUNCE_MS = 50
|
||||
const FALLBACK_POLL_MS = 5000 // Fallback in case fs.watch misses events
|
||||
|
||||
/**
|
||||
* Singleton store for the TodoV2 task list. Owns the file watcher, timers,
|
||||
* and cached task list. Multiple hook instances (REPL, Spinner,
|
||||
* PromptInputFooterLeftSide) subscribe to one shared store instead of each
|
||||
* setting up their own fs.watch on the same directory. The Spinner mounts/
|
||||
* unmounts every turn — per-hook watchers caused constant watch/unwatch churn.
|
||||
*
|
||||
* Implements the useSyncExternalStore contract: subscribe/getSnapshot.
|
||||
*/
|
||||
class TasksV2Store {
|
||||
/** Stable array reference; replaced only on fetch. undefined until started. */
|
||||
#tasks: Task[] | undefined = undefined
|
||||
/**
|
||||
* Set when the hide timer has elapsed (all tasks completed for >5s), or
|
||||
* when the task list is empty. Starts false so the first fetch runs the
|
||||
* "all completed → schedule 5s hide" path (matches original behavior:
|
||||
* resuming a session with completed tasks shows them briefly).
|
||||
*/
|
||||
#hidden = false
|
||||
#watcher: FSWatcher | null = null
|
||||
#watchedDir: string | null = null
|
||||
#hideTimer: ReturnType<typeof setTimeout> | null = null
|
||||
#debounceTimer: ReturnType<typeof setTimeout> | null = null
|
||||
#pollTimer: ReturnType<typeof setTimeout> | null = null
|
||||
#unsubscribeTasksUpdated: (() => void) | null = null
|
||||
#changed = createSignal()
|
||||
#subscriberCount = 0
|
||||
#started = false
|
||||
|
||||
/**
|
||||
* useSyncExternalStore snapshot. Returns the same Task[] reference between
|
||||
* updates (required for Object.is stability). Returns undefined when hidden.
|
||||
*/
|
||||
getSnapshot = (): Task[] | undefined => {
|
||||
return this.#hidden ? undefined : this.#tasks
|
||||
}
|
||||
|
||||
subscribe = (fn: () => void): (() => void) => {
|
||||
// Lazy init on first subscriber. useSyncExternalStore calls this
|
||||
// post-commit, so I/O here is safe (no render-phase side effects).
|
||||
// REPL.tsx keeps a subscription alive for the whole session, so
|
||||
// Spinner mount/unmount churn never drives the count to zero.
|
||||
const unsubscribe = this.#changed.subscribe(fn)
|
||||
this.#subscriberCount++
|
||||
if (!this.#started) {
|
||||
this.#started = true
|
||||
this.#unsubscribeTasksUpdated = onTasksUpdated(this.#debouncedFetch)
|
||||
// Fire-and-forget: subscribe is called post-commit (not in render),
|
||||
// and the store notifies subscribers when the fetch resolves.
|
||||
void this.#fetch()
|
||||
}
|
||||
let unsubscribed = false
|
||||
return () => {
|
||||
if (unsubscribed) return
|
||||
unsubscribed = true
|
||||
unsubscribe()
|
||||
this.#subscriberCount--
|
||||
if (this.#subscriberCount === 0) this.#stop()
|
||||
}
|
||||
}
|
||||
|
||||
#notify(): void {
|
||||
this.#changed.emit()
|
||||
}
|
||||
|
||||
/**
|
||||
* Point the file watcher at the current tasks directory. Called on start
|
||||
* and whenever #fetch detects the task list ID has changed (e.g. when
|
||||
* TeamCreateTool sets leaderTeamName mid-session).
|
||||
*/
|
||||
#rewatch(dir: string): void {
|
||||
// Retry even on same dir if the previous watch attempt failed (dir
|
||||
// didn't exist yet). Once the watcher is established, same-dir is a no-op.
|
||||
if (dir === this.#watchedDir && this.#watcher !== null) return
|
||||
this.#watcher?.close()
|
||||
this.#watcher = null
|
||||
this.#watchedDir = dir
|
||||
try {
|
||||
this.#watcher = watch(dir, this.#debouncedFetch)
|
||||
this.#watcher.unref()
|
||||
} catch {
|
||||
// Directory may not exist yet (ensureTasksDir is called by writers).
|
||||
// Not critical — onTasksUpdated covers in-process updates and the
|
||||
// poll timer covers cross-process updates.
|
||||
}
|
||||
}
|
||||
|
||||
#debouncedFetch = (): void => {
|
||||
if (this.#debounceTimer) clearTimeout(this.#debounceTimer)
|
||||
this.#debounceTimer = setTimeout(() => void this.#fetch(), DEBOUNCE_MS)
|
||||
this.#debounceTimer.unref()
|
||||
}
|
||||
|
||||
#fetch = async (): Promise<void> => {
|
||||
const taskListId = getTaskListId()
|
||||
// Task list ID can change mid-session (TeamCreateTool sets
|
||||
// leaderTeamName) — point the watcher at the current dir.
|
||||
this.#rewatch(getTasksDir(taskListId))
|
||||
const current = (await listTasks(taskListId)).filter(
|
||||
t => !t.metadata?._internal,
|
||||
)
|
||||
this.#tasks = current
|
||||
|
||||
const hasIncomplete = current.some(t => t.status !== 'completed')
|
||||
|
||||
if (hasIncomplete || current.length === 0) {
|
||||
// Has unresolved tasks (open/in_progress) or empty — reset hide state
|
||||
this.#hidden = current.length === 0
|
||||
this.#clearHideTimer()
|
||||
} else if (this.#hideTimer === null && !this.#hidden) {
|
||||
// All tasks just became completed — schedule clear
|
||||
this.#hideTimer = setTimeout(
|
||||
this.#onHideTimerFired.bind(this, taskListId),
|
||||
HIDE_DELAY_MS,
|
||||
)
|
||||
this.#hideTimer.unref()
|
||||
}
|
||||
|
||||
this.#notify()
|
||||
|
||||
// Schedule fallback poll only when there are incomplete tasks that
|
||||
// need monitoring. When all tasks are completed (or there are none),
|
||||
// the fs.watch watcher and onTasksUpdated callback are sufficient to
|
||||
// detect new activity — no need to keep polling and re-rendering.
|
||||
if (this.#pollTimer) {
|
||||
clearTimeout(this.#pollTimer)
|
||||
this.#pollTimer = null
|
||||
}
|
||||
if (hasIncomplete) {
|
||||
this.#pollTimer = setTimeout(this.#debouncedFetch, FALLBACK_POLL_MS)
|
||||
this.#pollTimer.unref()
|
||||
}
|
||||
}
|
||||
|
||||
#onHideTimerFired(scheduledForTaskListId: string): void {
|
||||
this.#hideTimer = null
|
||||
// Bail if the task list ID changed since scheduling (team created/deleted
|
||||
// during the 5s window) — don't reset the wrong list.
|
||||
const currentId = getTaskListId()
|
||||
if (currentId !== scheduledForTaskListId) return
|
||||
// Verify all tasks are still completed before clearing
|
||||
void listTasks(currentId).then(async tasksToCheck => {
|
||||
const allStillCompleted =
|
||||
tasksToCheck.length > 0 &&
|
||||
tasksToCheck.every(t => t.status === 'completed')
|
||||
if (allStillCompleted) {
|
||||
await resetTaskList(currentId)
|
||||
this.#tasks = []
|
||||
this.#hidden = true
|
||||
}
|
||||
this.#notify()
|
||||
})
|
||||
}
|
||||
|
||||
#clearHideTimer(): void {
|
||||
if (this.#hideTimer) {
|
||||
clearTimeout(this.#hideTimer)
|
||||
this.#hideTimer = null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tear down the watcher, timers, and in-process subscription. Called when
|
||||
* the last subscriber unsubscribes. Preserves #tasks/#hidden cache so a
|
||||
* subsequent re-subscribe renders the last known state immediately.
|
||||
*/
|
||||
#stop(): void {
|
||||
this.#watcher?.close()
|
||||
this.#watcher = null
|
||||
this.#watchedDir = null
|
||||
this.#unsubscribeTasksUpdated?.()
|
||||
this.#unsubscribeTasksUpdated = null
|
||||
this.#clearHideTimer()
|
||||
if (this.#debounceTimer) clearTimeout(this.#debounceTimer)
|
||||
if (this.#pollTimer) clearTimeout(this.#pollTimer)
|
||||
this.#debounceTimer = null
|
||||
this.#pollTimer = null
|
||||
this.#started = false
|
||||
}
|
||||
}
|
||||
|
||||
let _store: TasksV2Store | null = null
|
||||
function getStore(): TasksV2Store {
|
||||
return (_store ??= new TasksV2Store())
|
||||
}
|
||||
|
||||
// Stable no-ops for the disabled path so useSyncExternalStore doesn't
|
||||
// churn its subscription on every render.
|
||||
const NOOP = (): void => {}
|
||||
const NOOP_SUBSCRIBE = (): (() => void) => NOOP
|
||||
const NOOP_SNAPSHOT = (): undefined => undefined
|
||||
|
||||
/**
|
||||
* Hook to get the current task list for the persistent UI display.
|
||||
* Returns tasks when TodoV2 is enabled, otherwise returns undefined.
|
||||
* All hook instances share a single file watcher via TasksV2Store.
|
||||
* Hides the list after 5 seconds if there are no open tasks.
|
||||
*/
|
||||
export function useTasksV2(): Task[] | undefined {
|
||||
const teamContext = useAppState(s => s.teamContext)
|
||||
|
||||
const enabled = isTodoV2Enabled() && (!teamContext || isTeamLead(teamContext))
|
||||
|
||||
const store = enabled ? getStore() : null
|
||||
|
||||
return useSyncExternalStore(
|
||||
store ? store.subscribe : NOOP_SUBSCRIBE,
|
||||
store ? store.getSnapshot : NOOP_SNAPSHOT,
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as useTasksV2, plus collapses the expanded task view when the list
|
||||
* becomes hidden. Call this from exactly one always-mounted component (REPL)
|
||||
* so the collapse effect runs once instead of N× per consumer.
|
||||
*/
|
||||
export function useTasksV2WithCollapseEffect(): Task[] | undefined {
|
||||
const tasks = useTasksV2()
|
||||
const setAppState = useSetAppState()
|
||||
|
||||
const hidden = tasks === undefined
|
||||
useEffect(() => {
|
||||
if (!hidden) return
|
||||
setAppState(prev => {
|
||||
if (prev.expandedView !== 'tasks') return prev
|
||||
return { ...prev, expandedView: 'none' as const }
|
||||
})
|
||||
}, [hidden, setAppState])
|
||||
|
||||
return tasks
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
import { useEffect } from 'react'
|
||||
import { useAppState, useSetAppState } from '../state/AppState.js'
|
||||
import { exitTeammateView } from '../state/teammateViewHelpers.js'
|
||||
import { isInProcessTeammateTask } from '../tasks/InProcessTeammateTask/types.js'
|
||||
|
||||
/**
|
||||
* Auto-exits teammate viewing mode when the viewed teammate
|
||||
* is killed or encounters an error. Users stay viewing completed
|
||||
* teammates so they can review the full transcript.
|
||||
*/
|
||||
export function useTeammateViewAutoExit(): void {
|
||||
const setAppState = useSetAppState()
|
||||
const viewingAgentTaskId = useAppState(s => s.viewingAgentTaskId)
|
||||
// Select only the viewed task, not the full tasks map — otherwise every
|
||||
// streaming update from any teammate re-renders this hook.
|
||||
const task = useAppState(s =>
|
||||
s.viewingAgentTaskId ? s.tasks[s.viewingAgentTaskId] : undefined,
|
||||
)
|
||||
|
||||
const viewedTask = task && isInProcessTeammateTask(task) ? task : undefined
|
||||
const viewedStatus = viewedTask?.status
|
||||
const viewedError = viewedTask?.error
|
||||
const taskExists = task !== undefined
|
||||
|
||||
useEffect(() => {
|
||||
// Not viewing any teammate
|
||||
if (!viewingAgentTaskId) {
|
||||
return
|
||||
}
|
||||
|
||||
// Task no longer exists in the map — evicted out from under us.
|
||||
// Check raw `task` not teammate-narrowed `viewedTask`; local_agent
|
||||
// tasks exist but narrow to undefined, which would eject immediately.
|
||||
if (!taskExists) {
|
||||
exitTeammateView(setAppState)
|
||||
return
|
||||
}
|
||||
// Status checks below are teammate-only (viewedTask is teammate-narrowed).
|
||||
// For local_agent, viewedStatus is undefined → all checks falsy → no eject.
|
||||
if (!viewedTask) return
|
||||
|
||||
// Auto-exit if teammate is killed, stopped, has error, or is no longer running
|
||||
// This handles shutdown scenarios where teammate becomes inactive
|
||||
if (
|
||||
viewedStatus === 'killed' ||
|
||||
viewedStatus === 'failed' ||
|
||||
viewedError ||
|
||||
(viewedStatus !== 'running' &&
|
||||
viewedStatus !== 'completed' &&
|
||||
viewedStatus !== 'pending')
|
||||
) {
|
||||
exitTeammateView(setAppState)
|
||||
return
|
||||
}
|
||||
}, [
|
||||
viewingAgentTaskId,
|
||||
taskExists,
|
||||
viewedTask,
|
||||
viewedStatus,
|
||||
viewedError,
|
||||
setAppState,
|
||||
])
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,15 @@
|
||||
import { useContext } from 'react'
|
||||
import {
|
||||
type TerminalSize,
|
||||
TerminalSizeContext,
|
||||
} from 'src/ink/components/TerminalSizeContext.js'
|
||||
|
||||
export function useTerminalSize(): TerminalSize {
|
||||
const size = useContext(TerminalSizeContext)
|
||||
|
||||
if (!size) {
|
||||
throw new Error('useTerminalSize must be used within an Ink App component')
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
@@ -0,0 +1,529 @@
|
||||
import { isInputModeCharacter } from 'src/components/PromptInput/inputModes.js'
|
||||
import { useNotifications } from 'src/context/notifications.js'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
import { markBackslashReturnUsed } from '../commands/terminalSetup/terminalSetup.js'
|
||||
import { addToHistory } from '../history.js'
|
||||
import type { Key } from '../ink.js'
|
||||
import type {
|
||||
InlineGhostText,
|
||||
TextInputState,
|
||||
} from '../types/textInputTypes.js'
|
||||
import {
|
||||
Cursor,
|
||||
getLastKill,
|
||||
pushToKillRing,
|
||||
recordYank,
|
||||
resetKillAccumulation,
|
||||
resetYankState,
|
||||
updateYankLength,
|
||||
yankPop,
|
||||
} from '../utils/Cursor.js'
|
||||
import { env } from '../utils/env.js'
|
||||
import { isFullscreenEnvEnabled } from '../utils/fullscreen.js'
|
||||
import type { ImageDimensions } from '../utils/imageResizer.js'
|
||||
import { isModifierPressed, prewarmModifiers } from '../utils/modifiers.js'
|
||||
import { useDoublePress } from './useDoublePress.js'
|
||||
|
||||
type MaybeCursor = void | Cursor
|
||||
type InputHandler = (input: string) => MaybeCursor
|
||||
type InputMapper = (input: string) => MaybeCursor
|
||||
const NOOP_HANDLER: InputHandler = () => {}
|
||||
function mapInput(input_map: Array<[string, InputHandler]>): InputMapper {
|
||||
const map = new Map(input_map)
|
||||
return function (input: string): MaybeCursor {
|
||||
return (map.get(input) ?? NOOP_HANDLER)(input)
|
||||
}
|
||||
}
|
||||
|
||||
export type UseTextInputProps = {
|
||||
value: string
|
||||
onChange: (value: string) => void
|
||||
onSubmit?: (value: string) => void
|
||||
onExit?: () => void
|
||||
onExitMessage?: (show: boolean, key?: string) => void
|
||||
onHistoryUp?: () => void
|
||||
onHistoryDown?: () => void
|
||||
onHistoryReset?: () => void
|
||||
onClearInput?: () => void
|
||||
focus?: boolean
|
||||
mask?: string
|
||||
multiline?: boolean
|
||||
cursorChar: string
|
||||
highlightPastedText?: boolean
|
||||
invert: (text: string) => string
|
||||
themeText: (text: string) => string
|
||||
columns: number
|
||||
onImagePaste?: (
|
||||
base64Image: string,
|
||||
mediaType?: string,
|
||||
filename?: string,
|
||||
dimensions?: ImageDimensions,
|
||||
sourcePath?: string,
|
||||
) => void
|
||||
disableCursorMovementForUpDownKeys?: boolean
|
||||
disableEscapeDoublePress?: boolean
|
||||
maxVisibleLines?: number
|
||||
externalOffset: number
|
||||
onOffsetChange: (offset: number) => void
|
||||
inputFilter?: (input: string, key: Key) => string
|
||||
inlineGhostText?: InlineGhostText
|
||||
dim?: (text: string) => string
|
||||
}
|
||||
|
||||
export function useTextInput({
|
||||
value: originalValue,
|
||||
onChange,
|
||||
onSubmit,
|
||||
onExit,
|
||||
onExitMessage,
|
||||
onHistoryUp,
|
||||
onHistoryDown,
|
||||
onHistoryReset,
|
||||
onClearInput,
|
||||
mask = '',
|
||||
multiline = false,
|
||||
cursorChar,
|
||||
invert,
|
||||
columns,
|
||||
onImagePaste: _onImagePaste,
|
||||
disableCursorMovementForUpDownKeys = false,
|
||||
disableEscapeDoublePress = false,
|
||||
maxVisibleLines,
|
||||
externalOffset,
|
||||
onOffsetChange,
|
||||
inputFilter,
|
||||
inlineGhostText,
|
||||
dim,
|
||||
}: UseTextInputProps): TextInputState {
|
||||
// Pre-warm the modifiers module for Apple Terminal (has internal guard, safe to call multiple times)
|
||||
if (env.terminal === 'Apple_Terminal') {
|
||||
prewarmModifiers()
|
||||
}
|
||||
|
||||
const offset = externalOffset
|
||||
const setOffset = onOffsetChange
|
||||
const cursor = Cursor.fromText(originalValue, columns, offset)
|
||||
const { addNotification, removeNotification } = useNotifications()
|
||||
|
||||
const handleCtrlC = useDoublePress(
|
||||
show => {
|
||||
onExitMessage?.(show, 'Ctrl-C')
|
||||
},
|
||||
() => onExit?.(),
|
||||
() => {
|
||||
if (originalValue) {
|
||||
onChange('')
|
||||
setOffset(0)
|
||||
onHistoryReset?.()
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
// NOTE(keybindings): This escape handler is intentionally NOT migrated to the keybindings system.
|
||||
// It's a text-level double-press escape for clearing input, not an action-level keybinding.
|
||||
// Double-press Esc clears the input and saves to history - this is text editing behavior,
|
||||
// not dialog dismissal, and needs the double-press safety mechanism.
|
||||
const handleEscape = useDoublePress(
|
||||
(show: boolean) => {
|
||||
if (!originalValue || !show) {
|
||||
return
|
||||
}
|
||||
addNotification({
|
||||
key: 'escape-again-to-clear',
|
||||
text: 'Esc again to clear',
|
||||
priority: 'immediate',
|
||||
timeoutMs: 1000,
|
||||
})
|
||||
},
|
||||
() => {
|
||||
// Remove the "Esc again to clear" notification immediately
|
||||
removeNotification('escape-again-to-clear')
|
||||
onClearInput?.()
|
||||
if (originalValue) {
|
||||
// Track double-escape usage for feature discovery
|
||||
// Save to history before clearing
|
||||
if (originalValue.trim() !== '') {
|
||||
addToHistory(originalValue)
|
||||
}
|
||||
onChange('')
|
||||
setOffset(0)
|
||||
onHistoryReset?.()
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
const handleEmptyCtrlD = useDoublePress(
|
||||
show => {
|
||||
if (originalValue !== '') {
|
||||
return
|
||||
}
|
||||
onExitMessage?.(show, 'Ctrl-D')
|
||||
},
|
||||
() => {
|
||||
if (originalValue !== '') {
|
||||
return
|
||||
}
|
||||
onExit?.()
|
||||
},
|
||||
)
|
||||
|
||||
function handleCtrlD(): MaybeCursor {
|
||||
if (cursor.text === '') {
|
||||
// When input is empty, handle double-press
|
||||
handleEmptyCtrlD()
|
||||
return cursor
|
||||
}
|
||||
// When input is not empty, delete forward like iPython
|
||||
return cursor.del()
|
||||
}
|
||||
|
||||
function killToLineEnd(): Cursor {
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineEnd()
|
||||
pushToKillRing(killed, 'append')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function killToLineStart(): Cursor {
|
||||
const { cursor: newCursor, killed } = cursor.deleteToLineStart()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function killWordBefore(): Cursor {
|
||||
const { cursor: newCursor, killed } = cursor.deleteWordBefore()
|
||||
pushToKillRing(killed, 'prepend')
|
||||
return newCursor
|
||||
}
|
||||
|
||||
function yank(): Cursor {
|
||||
const text = getLastKill()
|
||||
if (text.length > 0) {
|
||||
const startOffset = cursor.offset
|
||||
const newCursor = cursor.insert(text)
|
||||
recordYank(startOffset, text.length)
|
||||
return newCursor
|
||||
}
|
||||
return cursor
|
||||
}
|
||||
|
||||
function handleYankPop(): Cursor {
|
||||
const popResult = yankPop()
|
||||
if (!popResult) {
|
||||
return cursor
|
||||
}
|
||||
const { text, start, length } = popResult
|
||||
// Replace the previously yanked text with the new one
|
||||
const before = cursor.text.slice(0, start)
|
||||
const after = cursor.text.slice(start + length)
|
||||
const newText = before + text + after
|
||||
const newOffset = start + text.length
|
||||
updateYankLength(text.length)
|
||||
return Cursor.fromText(newText, columns, newOffset)
|
||||
}
|
||||
|
||||
const handleCtrl = mapInput([
|
||||
['a', () => cursor.startOfLine()],
|
||||
['b', () => cursor.left()],
|
||||
['c', handleCtrlC],
|
||||
['d', handleCtrlD],
|
||||
['e', () => cursor.endOfLine()],
|
||||
['f', () => cursor.right()],
|
||||
['h', () => cursor.deleteTokenBefore() ?? cursor.backspace()],
|
||||
['k', killToLineEnd],
|
||||
['n', () => downOrHistoryDown()],
|
||||
['p', () => upOrHistoryUp()],
|
||||
['u', killToLineStart],
|
||||
['w', killWordBefore],
|
||||
['y', yank],
|
||||
])
|
||||
|
||||
const handleMeta = mapInput([
|
||||
['b', () => cursor.prevWord()],
|
||||
['f', () => cursor.nextWord()],
|
||||
['d', () => cursor.deleteWordAfter()],
|
||||
['y', handleYankPop],
|
||||
])
|
||||
|
||||
function handleEnter(key: Key) {
|
||||
if (
|
||||
multiline &&
|
||||
cursor.offset > 0 &&
|
||||
cursor.text[cursor.offset - 1] === '\\'
|
||||
) {
|
||||
// Track that the user has used backslash+return
|
||||
markBackslashReturnUsed()
|
||||
return cursor.backspace().insert('\n')
|
||||
}
|
||||
// Meta+Enter or Shift+Enter inserts a newline
|
||||
if (key.meta || key.shift) {
|
||||
return cursor.insert('\n')
|
||||
}
|
||||
// Apple Terminal doesn't support custom Shift+Enter keybindings,
|
||||
// so we use native macOS modifier detection to check if Shift is held
|
||||
if (env.terminal === 'Apple_Terminal' && isModifierPressed('shift')) {
|
||||
return cursor.insert('\n')
|
||||
}
|
||||
onSubmit?.(originalValue)
|
||||
}
|
||||
|
||||
function upOrHistoryUp() {
|
||||
if (disableCursorMovementForUpDownKeys) {
|
||||
onHistoryUp?.()
|
||||
return cursor
|
||||
}
|
||||
// Try to move by wrapped lines first
|
||||
const cursorUp = cursor.up()
|
||||
if (!cursorUp.equals(cursor)) {
|
||||
return cursorUp
|
||||
}
|
||||
|
||||
// If we can't move by wrapped lines and this is multiline input,
|
||||
// try to move by logical lines (to handle paragraph boundaries)
|
||||
if (multiline) {
|
||||
const cursorUpLogical = cursor.upLogicalLine()
|
||||
if (!cursorUpLogical.equals(cursor)) {
|
||||
return cursorUpLogical
|
||||
}
|
||||
}
|
||||
|
||||
// Can't move up at all - trigger history navigation
|
||||
onHistoryUp?.()
|
||||
return cursor
|
||||
}
|
||||
function downOrHistoryDown() {
|
||||
if (disableCursorMovementForUpDownKeys) {
|
||||
onHistoryDown?.()
|
||||
return cursor
|
||||
}
|
||||
// Try to move by wrapped lines first
|
||||
const cursorDown = cursor.down()
|
||||
if (!cursorDown.equals(cursor)) {
|
||||
return cursorDown
|
||||
}
|
||||
|
||||
// If we can't move by wrapped lines and this is multiline input,
|
||||
// try to move by logical lines (to handle paragraph boundaries)
|
||||
if (multiline) {
|
||||
const cursorDownLogical = cursor.downLogicalLine()
|
||||
if (!cursorDownLogical.equals(cursor)) {
|
||||
return cursorDownLogical
|
||||
}
|
||||
}
|
||||
|
||||
// Can't move down at all - trigger history navigation
|
||||
onHistoryDown?.()
|
||||
return cursor
|
||||
}
|
||||
|
||||
function mapKey(key: Key): InputMapper {
|
||||
switch (true) {
|
||||
case key.escape:
|
||||
return () => {
|
||||
// Skip when a keybinding context (e.g. Autocomplete) owns escape.
|
||||
// useKeybindings can't shield us via stopImmediatePropagation —
|
||||
// BaseTextInput's useInput registers first (child effects fire
|
||||
// before parent effects), so this handler has already run by the
|
||||
// time the keybinding's handler stops propagation.
|
||||
if (disableEscapeDoublePress) return cursor
|
||||
handleEscape()
|
||||
// Return the current cursor unchanged - handleEscape manages state internally
|
||||
return cursor
|
||||
}
|
||||
case key.leftArrow && (key.ctrl || key.meta || key.fn):
|
||||
return () => cursor.prevWord()
|
||||
case key.rightArrow && (key.ctrl || key.meta || key.fn):
|
||||
return () => cursor.nextWord()
|
||||
case key.backspace:
|
||||
return key.meta || key.ctrl
|
||||
? killWordBefore
|
||||
: () => cursor.deleteTokenBefore() ?? cursor.backspace()
|
||||
case key.delete:
|
||||
return key.meta ? killToLineEnd : () => cursor.del()
|
||||
case key.ctrl:
|
||||
return handleCtrl
|
||||
case key.home:
|
||||
return () => cursor.startOfLine()
|
||||
case key.end:
|
||||
return () => cursor.endOfLine()
|
||||
case key.pageDown:
|
||||
// In fullscreen mode, PgUp/PgDn scroll the message viewport instead
|
||||
// of moving the cursor — no-op here, ScrollKeybindingHandler handles it.
|
||||
if (isFullscreenEnvEnabled()) {
|
||||
return NOOP_HANDLER
|
||||
}
|
||||
return () => cursor.endOfLine()
|
||||
case key.pageUp:
|
||||
if (isFullscreenEnvEnabled()) {
|
||||
return NOOP_HANDLER
|
||||
}
|
||||
return () => cursor.startOfLine()
|
||||
case key.wheelUp:
|
||||
case key.wheelDown:
|
||||
// Mouse wheel events only exist when fullscreen mouse tracking is on.
|
||||
// ScrollKeybindingHandler handles them; no-op here to avoid inserting
|
||||
// the raw SGR sequence as text.
|
||||
return NOOP_HANDLER
|
||||
case key.return:
|
||||
// Must come before key.meta so Option+Return inserts newline
|
||||
return () => handleEnter(key)
|
||||
case key.meta:
|
||||
return handleMeta
|
||||
case key.tab:
|
||||
return () => cursor
|
||||
case key.upArrow && !key.shift:
|
||||
return upOrHistoryUp
|
||||
case key.downArrow && !key.shift:
|
||||
return downOrHistoryDown
|
||||
case key.leftArrow:
|
||||
return () => cursor.left()
|
||||
case key.rightArrow:
|
||||
return () => cursor.right()
|
||||
default: {
|
||||
return function (input: string) {
|
||||
switch (true) {
|
||||
// Home key
|
||||
case input === '\x1b[H' || input === '\x1b[1~':
|
||||
return cursor.startOfLine()
|
||||
// End key
|
||||
case input === '\x1b[F' || input === '\x1b[4~':
|
||||
return cursor.endOfLine()
|
||||
default: {
|
||||
// Trailing \r after text is SSH-coalesced Enter ("o\r") —
|
||||
// strip it so the Enter isn't inserted as content. Lone \r
|
||||
// here is Alt+Enter leaking through (META_KEY_CODE_RE doesn't
|
||||
// match \x1b\r) — leave it for the \r→\n below. Embedded \r
|
||||
// is multi-line paste from a terminal without bracketed
|
||||
// paste — convert to \n. Backslash+\r is a stale VS Code
|
||||
// Shift+Enter binding (pre-#8991 /terminal-setup wrote
|
||||
// args.text "\\\r\n" to keybindings.json); keep the \r so
|
||||
// it becomes \n below (anthropics/claude-code#31316).
|
||||
const text = stripAnsi(input)
|
||||
// eslint-disable-next-line custom-rules/no-lookbehind-regex -- .replace(re, str) on 1-2 char keystrokes: no-match returns same string (Object.is), regex never runs
|
||||
.replace(/(?<=[^\\\r\n])\r$/, '')
|
||||
.replace(/\r/g, '\n')
|
||||
if (cursor.isAtStart() && isInputModeCharacter(input)) {
|
||||
return cursor.insert(text).left()
|
||||
}
|
||||
return cursor.insert(text)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this is a kill command (Ctrl+K, Ctrl+U, Ctrl+W, or Meta+Backspace/Delete)
|
||||
function isKillKey(key: Key, input: string): boolean {
|
||||
if (key.ctrl && (input === 'k' || input === 'u' || input === 'w')) {
|
||||
return true
|
||||
}
|
||||
if (key.meta && (key.backspace || key.delete)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if this is a yank command (Ctrl+Y or Alt+Y)
|
||||
function isYankKey(key: Key, input: string): boolean {
|
||||
return (key.ctrl || key.meta) && input === 'y'
|
||||
}
|
||||
|
||||
function onInput(input: string, key: Key): void {
|
||||
// Note: Image paste shortcut (chat:imagePaste) is handled via useKeybindings in PromptInput
|
||||
|
||||
// Apply filter if provided
|
||||
const filteredInput = inputFilter ? inputFilter(input, key) : input
|
||||
|
||||
// If the input was filtered out, do nothing
|
||||
if (filteredInput === '' && input !== '') {
|
||||
return
|
||||
}
|
||||
|
||||
// Fix Issue #1853: Filter DEL characters that interfere with backspace in SSH/tmux
|
||||
// In SSH/tmux environments, backspace generates both key events and raw DEL chars
|
||||
if (!key.backspace && !key.delete && input.includes('\x7f')) {
|
||||
const delCount = (input.match(/\x7f/g) || []).length
|
||||
|
||||
// Apply all DEL characters as backspace operations synchronously
|
||||
// Try to delete tokens first, fall back to character backspace
|
||||
let currentCursor = cursor
|
||||
for (let i = 0; i < delCount; i++) {
|
||||
currentCursor =
|
||||
currentCursor.deleteTokenBefore() ?? currentCursor.backspace()
|
||||
}
|
||||
|
||||
// Update state once with the final result
|
||||
if (!cursor.equals(currentCursor)) {
|
||||
if (cursor.text !== currentCursor.text) {
|
||||
onChange(currentCursor.text)
|
||||
}
|
||||
setOffset(currentCursor.offset)
|
||||
}
|
||||
resetKillAccumulation()
|
||||
resetYankState()
|
||||
return
|
||||
}
|
||||
|
||||
// Reset kill accumulation for non-kill keys
|
||||
if (!isKillKey(key, filteredInput)) {
|
||||
resetKillAccumulation()
|
||||
}
|
||||
|
||||
// Reset yank state for non-yank keys (breaks yank-pop chain)
|
||||
if (!isYankKey(key, filteredInput)) {
|
||||
resetYankState()
|
||||
}
|
||||
|
||||
const nextCursor = mapKey(key)(filteredInput)
|
||||
if (nextCursor) {
|
||||
if (!cursor.equals(nextCursor)) {
|
||||
if (cursor.text !== nextCursor.text) {
|
||||
onChange(nextCursor.text)
|
||||
}
|
||||
setOffset(nextCursor.offset)
|
||||
}
|
||||
// SSH-coalesced Enter: on slow links, "o" + Enter can arrive as one
|
||||
// chunk "o\r". parseKeypress only matches s === '\r', so it hit the
|
||||
// default handler above (which stripped the trailing \r). Text with
|
||||
// exactly one trailing \r is coalesced Enter; lone \r is Alt+Enter
|
||||
// (newline); embedded \r is multi-line paste.
|
||||
if (
|
||||
filteredInput.length > 1 &&
|
||||
filteredInput.endsWith('\r') &&
|
||||
!filteredInput.slice(0, -1).includes('\r') &&
|
||||
// Backslash+CR is a stale VS Code Shift+Enter binding, not
|
||||
// coalesced Enter. See default handler above.
|
||||
filteredInput[filteredInput.length - 2] !== '\\'
|
||||
) {
|
||||
onSubmit?.(nextCursor.text)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare ghost text for rendering - validate insertPosition matches current
|
||||
// cursor offset to prevent stale ghost text from a previous keystroke causing
|
||||
// a one-frame jitter (ghost text state is updated via useEffect after render)
|
||||
const ghostTextForRender =
|
||||
inlineGhostText && dim && inlineGhostText.insertPosition === offset
|
||||
? { text: inlineGhostText.text, dim }
|
||||
: undefined
|
||||
|
||||
const cursorPos = cursor.getPosition()
|
||||
|
||||
return {
|
||||
onInput,
|
||||
renderedValue: cursor.render(
|
||||
cursorChar,
|
||||
mask,
|
||||
invert,
|
||||
ghostTextForRender,
|
||||
maxVisibleLines,
|
||||
),
|
||||
offset,
|
||||
setOffset,
|
||||
cursorLine: cursorPos.line - cursor.getViewportStartLine(maxVisibleLines),
|
||||
cursorColumn: cursorPos.column,
|
||||
viewportCharOffset: cursor.getViewportCharOffset(maxVisibleLines),
|
||||
viewportCharEnd: cursor.getViewportCharEnd(maxVisibleLines),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
import { useEffect, useState } from 'react'
|
||||
|
||||
export function useTimeout(delay: number, resetTrigger?: number): boolean {
|
||||
const [isElapsed, setIsElapsed] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
setIsElapsed(false)
|
||||
const timer = setTimeout(setIsElapsed, delay, true)
|
||||
|
||||
return () => clearTimeout(timer)
|
||||
}, [delay, resetTrigger])
|
||||
|
||||
return isElapsed
|
||||
}
|
||||
@@ -0,0 +1,213 @@
|
||||
import type { StructuredPatchHunk } from 'diff'
|
||||
import { useMemo, useRef } from 'react'
|
||||
import type { FileEditOutput } from '../tools/FileEditTool/types.js'
|
||||
import type { Output as FileWriteOutput } from '../tools/FileWriteTool/FileWriteTool.js'
|
||||
import type { Message } from '../types/message.js'
|
||||
|
||||
export type TurnFileDiff = {
|
||||
filePath: string
|
||||
hunks: StructuredPatchHunk[]
|
||||
isNewFile: boolean
|
||||
linesAdded: number
|
||||
linesRemoved: number
|
||||
}
|
||||
|
||||
export type TurnDiff = {
|
||||
turnIndex: number
|
||||
userPromptPreview: string
|
||||
timestamp: string
|
||||
files: Map<string, TurnFileDiff>
|
||||
stats: {
|
||||
filesChanged: number
|
||||
linesAdded: number
|
||||
linesRemoved: number
|
||||
}
|
||||
}
|
||||
|
||||
type FileEditResult = FileEditOutput | FileWriteOutput
|
||||
|
||||
type TurnDiffCache = {
|
||||
completedTurns: TurnDiff[]
|
||||
currentTurn: TurnDiff | null
|
||||
lastProcessedIndex: number
|
||||
lastTurnIndex: number
|
||||
}
|
||||
|
||||
function isFileEditResult(result: unknown): result is FileEditResult {
|
||||
if (!result || typeof result !== 'object') return false
|
||||
const r = result as Record<string, unknown>
|
||||
// FileEditTool: has structuredPatch with content
|
||||
// FileWriteTool (update): has structuredPatch with content
|
||||
// FileWriteTool (create): has type='create' and content (structuredPatch is empty)
|
||||
const hasFilePath = typeof r.filePath === 'string'
|
||||
const hasStructuredPatch =
|
||||
Array.isArray(r.structuredPatch) && r.structuredPatch.length > 0
|
||||
const isNewFile = r.type === 'create' && typeof r.content === 'string'
|
||||
return hasFilePath && (hasStructuredPatch || isNewFile)
|
||||
}
|
||||
|
||||
function isFileWriteOutput(result: FileEditResult): result is FileWriteOutput {
|
||||
return (
|
||||
'type' in result && (result.type === 'create' || result.type === 'update')
|
||||
)
|
||||
}
|
||||
|
||||
function countHunkLines(hunks: StructuredPatchHunk[]): {
|
||||
added: number
|
||||
removed: number
|
||||
} {
|
||||
let added = 0
|
||||
let removed = 0
|
||||
for (const hunk of hunks) {
|
||||
for (const line of hunk.lines) {
|
||||
if (line.startsWith('+')) added++
|
||||
else if (line.startsWith('-')) removed++
|
||||
}
|
||||
}
|
||||
return { added, removed }
|
||||
}
|
||||
|
||||
function getUserPromptPreview(message: Message): string {
|
||||
if (message.type !== 'user') return ''
|
||||
const content = message.message.content
|
||||
const text = typeof content === 'string' ? content : ''
|
||||
// Truncate to ~30 chars
|
||||
if (text.length <= 30) return text
|
||||
return text.slice(0, 29) + '…'
|
||||
}
|
||||
|
||||
function computeTurnStats(turn: TurnDiff): void {
|
||||
let totalAdded = 0
|
||||
let totalRemoved = 0
|
||||
for (const file of turn.files.values()) {
|
||||
totalAdded += file.linesAdded
|
||||
totalRemoved += file.linesRemoved
|
||||
}
|
||||
turn.stats = {
|
||||
filesChanged: turn.files.size,
|
||||
linesAdded: totalAdded,
|
||||
linesRemoved: totalRemoved,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract turn-based diffs from messages.
|
||||
* A turn is defined as a user prompt followed by assistant responses and tool results.
|
||||
* Each turn with file edits is included in the result.
|
||||
*
|
||||
* Uses incremental accumulation - only processes new messages since last render.
|
||||
*/
|
||||
export function useTurnDiffs(messages: Message[]): TurnDiff[] {
|
||||
const cache = useRef<TurnDiffCache>({
|
||||
completedTurns: [],
|
||||
currentTurn: null,
|
||||
lastProcessedIndex: 0,
|
||||
lastTurnIndex: 0,
|
||||
})
|
||||
|
||||
return useMemo(() => {
|
||||
const c = cache.current
|
||||
|
||||
// Reset if messages shrunk (user rewound conversation)
|
||||
if (messages.length < c.lastProcessedIndex) {
|
||||
c.completedTurns = []
|
||||
c.currentTurn = null
|
||||
c.lastProcessedIndex = 0
|
||||
c.lastTurnIndex = 0
|
||||
}
|
||||
|
||||
// Process only new messages
|
||||
for (let i = c.lastProcessedIndex; i < messages.length; i++) {
|
||||
const message = messages[i]
|
||||
if (!message || message.type !== 'user') continue
|
||||
|
||||
// Check if this is a user prompt (not a tool result)
|
||||
const isToolResult =
|
||||
message.toolUseResult ||
|
||||
(Array.isArray(message.message.content) &&
|
||||
message.message.content[0]?.type === 'tool_result')
|
||||
|
||||
if (!isToolResult && !message.isMeta) {
|
||||
// Start a new turn on user prompt
|
||||
if (c.currentTurn && c.currentTurn.files.size > 0) {
|
||||
computeTurnStats(c.currentTurn)
|
||||
c.completedTurns.push(c.currentTurn)
|
||||
}
|
||||
|
||||
c.lastTurnIndex++
|
||||
c.currentTurn = {
|
||||
turnIndex: c.lastTurnIndex,
|
||||
userPromptPreview: getUserPromptPreview(message),
|
||||
timestamp: message.timestamp,
|
||||
files: new Map(),
|
||||
stats: { filesChanged: 0, linesAdded: 0, linesRemoved: 0 },
|
||||
}
|
||||
} else if (c.currentTurn && message.toolUseResult) {
|
||||
// Collect file edits from tool results
|
||||
const result = message.toolUseResult
|
||||
if (isFileEditResult(result)) {
|
||||
const { filePath, structuredPatch } = result
|
||||
const isNewFile = 'type' in result && result.type === 'create'
|
||||
|
||||
// Get or create file entry
|
||||
let fileEntry = c.currentTurn.files.get(filePath)
|
||||
if (!fileEntry) {
|
||||
fileEntry = {
|
||||
filePath,
|
||||
hunks: [],
|
||||
isNewFile,
|
||||
linesAdded: 0,
|
||||
linesRemoved: 0,
|
||||
}
|
||||
c.currentTurn.files.set(filePath, fileEntry)
|
||||
}
|
||||
|
||||
// For new files, generate synthetic hunk from content
|
||||
if (
|
||||
isNewFile &&
|
||||
structuredPatch.length === 0 &&
|
||||
isFileWriteOutput(result)
|
||||
) {
|
||||
const content = result.content
|
||||
const lines = content.split('\n')
|
||||
const syntheticHunk: StructuredPatchHunk = {
|
||||
oldStart: 0,
|
||||
oldLines: 0,
|
||||
newStart: 1,
|
||||
newLines: lines.length,
|
||||
lines: lines.map(l => '+' + l),
|
||||
}
|
||||
fileEntry.hunks.push(syntheticHunk)
|
||||
fileEntry.linesAdded += lines.length
|
||||
} else {
|
||||
// Append hunks (same file may be edited multiple times in a turn)
|
||||
fileEntry.hunks.push(...structuredPatch)
|
||||
|
||||
// Update line counts
|
||||
const { added, removed } = countHunkLines(structuredPatch)
|
||||
fileEntry.linesAdded += added
|
||||
fileEntry.linesRemoved += removed
|
||||
}
|
||||
|
||||
// If file was created and then edited, it's still a new file
|
||||
if (isNewFile) {
|
||||
fileEntry.isNewFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.lastProcessedIndex = messages.length
|
||||
|
||||
// Build result: completed turns + current turn if it has files
|
||||
const result = [...c.completedTurns]
|
||||
if (c.currentTurn && c.currentTurn.files.size > 0) {
|
||||
// Compute stats for current turn before including
|
||||
computeTurnStats(c.currentTurn)
|
||||
result.push(c.currentTurn)
|
||||
}
|
||||
|
||||
// Return in reverse order (most recent first)
|
||||
return result.reverse()
|
||||
}, [messages])
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,34 @@
|
||||
import { useState } from 'react'
|
||||
import { major, minor, patch } from 'semver'
|
||||
|
||||
export function getSemverPart(version: string): string {
|
||||
return `${major(version, { loose: true })}.${minor(version, { loose: true })}.${patch(version, { loose: true })}`
|
||||
}
|
||||
|
||||
export function shouldShowUpdateNotification(
|
||||
updatedVersion: string,
|
||||
lastNotifiedSemver: string | null,
|
||||
): boolean {
|
||||
const updatedSemver = getSemverPart(updatedVersion)
|
||||
return updatedSemver !== lastNotifiedSemver
|
||||
}
|
||||
|
||||
export function useUpdateNotification(
|
||||
updatedVersion: string | null | undefined,
|
||||
initialVersion: string = MACRO.VERSION,
|
||||
): string | null {
|
||||
const [lastNotifiedSemver, setLastNotifiedSemver] = useState<string | null>(
|
||||
() => getSemverPart(initialVersion),
|
||||
)
|
||||
|
||||
if (!updatedVersion) {
|
||||
return null
|
||||
}
|
||||
|
||||
const updatedSemver = getSemverPart(updatedVersion)
|
||||
if (updatedSemver !== lastNotifiedSemver) {
|
||||
setLastNotifiedSemver(updatedSemver)
|
||||
return updatedSemver
|
||||
}
|
||||
return null
|
||||
}
|
||||
@@ -0,0 +1,316 @@
|
||||
import React, { useCallback, useState } from 'react'
|
||||
import type { Key } from '../ink.js'
|
||||
import type { VimInputState, VimMode } from '../types/textInputTypes.js'
|
||||
import { Cursor } from '../utils/Cursor.js'
|
||||
import { lastGrapheme } from '../utils/intl.js'
|
||||
import {
|
||||
executeIndent,
|
||||
executeJoin,
|
||||
executeOpenLine,
|
||||
executeOperatorFind,
|
||||
executeOperatorMotion,
|
||||
executeOperatorTextObj,
|
||||
executeReplace,
|
||||
executeToggleCase,
|
||||
executeX,
|
||||
type OperatorContext,
|
||||
} from '../vim/operators.js'
|
||||
import { type TransitionContext, transition } from '../vim/transitions.js'
|
||||
import {
|
||||
createInitialPersistentState,
|
||||
createInitialVimState,
|
||||
type PersistentState,
|
||||
type RecordedChange,
|
||||
type VimState,
|
||||
} from '../vim/types.js'
|
||||
import { type UseTextInputProps, useTextInput } from './useTextInput.js'
|
||||
|
||||
type UseVimInputProps = Omit<UseTextInputProps, 'inputFilter'> & {
|
||||
onModeChange?: (mode: VimMode) => void
|
||||
onUndo?: () => void
|
||||
inputFilter?: UseTextInputProps['inputFilter']
|
||||
}
|
||||
|
||||
export function useVimInput(props: UseVimInputProps): VimInputState {
|
||||
const vimStateRef = React.useRef<VimState>(createInitialVimState())
|
||||
const [mode, setMode] = useState<VimMode>('INSERT')
|
||||
|
||||
const persistentRef = React.useRef<PersistentState>(
|
||||
createInitialPersistentState(),
|
||||
)
|
||||
|
||||
// inputFilter is applied once at the top of handleVimInput (not here) so
|
||||
// vim-handled paths that return without calling textInput.onInput still
|
||||
// run the filter — otherwise a stateful filter (e.g. lazy-space-after-
|
||||
// pill) stays armed across an Escape → NORMAL → INSERT round-trip.
|
||||
const textInput = useTextInput({ ...props, inputFilter: undefined })
|
||||
const { onModeChange, inputFilter } = props
|
||||
|
||||
const switchToInsertMode = useCallback(
|
||||
(offset?: number): void => {
|
||||
if (offset !== undefined) {
|
||||
textInput.setOffset(offset)
|
||||
}
|
||||
vimStateRef.current = { mode: 'INSERT', insertedText: '' }
|
||||
setMode('INSERT')
|
||||
onModeChange?.('INSERT')
|
||||
},
|
||||
[textInput, onModeChange],
|
||||
)
|
||||
|
||||
const switchToNormalMode = useCallback((): void => {
|
||||
const current = vimStateRef.current
|
||||
if (current.mode === 'INSERT' && current.insertedText) {
|
||||
persistentRef.current.lastChange = {
|
||||
type: 'insert',
|
||||
text: current.insertedText,
|
||||
}
|
||||
}
|
||||
|
||||
// Vim behavior: move cursor left by 1 when exiting insert mode
|
||||
// (unless at beginning of line or at offset 0)
|
||||
const offset = textInput.offset
|
||||
if (offset > 0 && props.value[offset - 1] !== '\n') {
|
||||
textInput.setOffset(offset - 1)
|
||||
}
|
||||
|
||||
vimStateRef.current = { mode: 'NORMAL', command: { type: 'idle' } }
|
||||
setMode('NORMAL')
|
||||
onModeChange?.('NORMAL')
|
||||
}, [onModeChange, textInput, props.value])
|
||||
|
||||
function createOperatorContext(
|
||||
cursor: Cursor,
|
||||
isReplay: boolean = false,
|
||||
): OperatorContext {
|
||||
return {
|
||||
cursor,
|
||||
text: props.value,
|
||||
setText: (newText: string) => props.onChange(newText),
|
||||
setOffset: (offset: number) => textInput.setOffset(offset),
|
||||
enterInsert: (offset: number) => switchToInsertMode(offset),
|
||||
getRegister: () => persistentRef.current.register,
|
||||
setRegister: (content: string, linewise: boolean) => {
|
||||
persistentRef.current.register = content
|
||||
persistentRef.current.registerIsLinewise = linewise
|
||||
},
|
||||
getLastFind: () => persistentRef.current.lastFind,
|
||||
setLastFind: (type, char) => {
|
||||
persistentRef.current.lastFind = { type, char }
|
||||
},
|
||||
recordChange: isReplay
|
||||
? () => {}
|
||||
: (change: RecordedChange) => {
|
||||
persistentRef.current.lastChange = change
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
function replayLastChange(): void {
|
||||
const change = persistentRef.current.lastChange
|
||||
if (!change) return
|
||||
|
||||
const cursor = Cursor.fromText(props.value, props.columns, textInput.offset)
|
||||
const ctx = createOperatorContext(cursor, true)
|
||||
|
||||
switch (change.type) {
|
||||
case 'insert':
|
||||
if (change.text) {
|
||||
const newCursor = cursor.insert(change.text)
|
||||
props.onChange(newCursor.text)
|
||||
textInput.setOffset(newCursor.offset)
|
||||
}
|
||||
break
|
||||
|
||||
case 'x':
|
||||
executeX(change.count, ctx)
|
||||
break
|
||||
|
||||
case 'replace':
|
||||
executeReplace(change.char, change.count, ctx)
|
||||
break
|
||||
|
||||
case 'toggleCase':
|
||||
executeToggleCase(change.count, ctx)
|
||||
break
|
||||
|
||||
case 'indent':
|
||||
executeIndent(change.dir, change.count, ctx)
|
||||
break
|
||||
|
||||
case 'join':
|
||||
executeJoin(change.count, ctx)
|
||||
break
|
||||
|
||||
case 'openLine':
|
||||
executeOpenLine(change.direction, ctx)
|
||||
break
|
||||
|
||||
case 'operator':
|
||||
executeOperatorMotion(change.op, change.motion, change.count, ctx)
|
||||
break
|
||||
|
||||
case 'operatorFind':
|
||||
executeOperatorFind(
|
||||
change.op,
|
||||
change.find,
|
||||
change.char,
|
||||
change.count,
|
||||
ctx,
|
||||
)
|
||||
break
|
||||
|
||||
case 'operatorTextObj':
|
||||
executeOperatorTextObj(
|
||||
change.op,
|
||||
change.scope,
|
||||
change.objType,
|
||||
change.count,
|
||||
ctx,
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
function handleVimInput(rawInput: string, key: Key): void {
|
||||
const state = vimStateRef.current
|
||||
// Run inputFilter in all modes so stateful filters disarm on any key,
|
||||
// but only apply the transformed input in INSERT — NORMAL-mode command
|
||||
// lookups expect single chars and a prepended space would break them.
|
||||
const filtered = inputFilter ? inputFilter(rawInput, key) : rawInput
|
||||
const input = state.mode === 'INSERT' ? filtered : rawInput
|
||||
const cursor = Cursor.fromText(props.value, props.columns, textInput.offset)
|
||||
|
||||
if (key.ctrl) {
|
||||
textInput.onInput(input, key)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE(keybindings): This escape handler is intentionally NOT migrated to the keybindings system.
|
||||
// It's vim's standard INSERT->NORMAL mode switch - a vim-specific behavior that should not be
|
||||
// configurable via keybindings. Vim users expect Esc to always exit INSERT mode.
|
||||
if (key.escape && state.mode === 'INSERT') {
|
||||
switchToNormalMode()
|
||||
return
|
||||
}
|
||||
|
||||
// Escape in NORMAL mode cancels any pending command (replace, operator, etc.)
|
||||
if (key.escape && state.mode === 'NORMAL') {
|
||||
vimStateRef.current = { mode: 'NORMAL', command: { type: 'idle' } }
|
||||
return
|
||||
}
|
||||
|
||||
// Pass Enter to base handler regardless of mode (allows submission from NORMAL)
|
||||
if (key.return) {
|
||||
textInput.onInput(input, key)
|
||||
return
|
||||
}
|
||||
|
||||
if (state.mode === 'INSERT') {
|
||||
// Track inserted text for dot-repeat
|
||||
if (key.backspace || key.delete) {
|
||||
if (state.insertedText.length > 0) {
|
||||
vimStateRef.current = {
|
||||
mode: 'INSERT',
|
||||
insertedText: state.insertedText.slice(
|
||||
0,
|
||||
-(lastGrapheme(state.insertedText).length || 1),
|
||||
),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vimStateRef.current = {
|
||||
mode: 'INSERT',
|
||||
insertedText: state.insertedText + input,
|
||||
}
|
||||
}
|
||||
textInput.onInput(input, key)
|
||||
return
|
||||
}
|
||||
|
||||
if (state.mode !== 'NORMAL') {
|
||||
return
|
||||
}
|
||||
|
||||
// In idle state, delegate arrow keys to base handler for cursor movement
|
||||
// and history fallback (upOrHistoryUp / downOrHistoryDown)
|
||||
if (
|
||||
state.command.type === 'idle' &&
|
||||
(key.upArrow || key.downArrow || key.leftArrow || key.rightArrow)
|
||||
) {
|
||||
textInput.onInput(input, key)
|
||||
return
|
||||
}
|
||||
|
||||
const ctx: TransitionContext = {
|
||||
...createOperatorContext(cursor, false),
|
||||
onUndo: props.onUndo,
|
||||
onDotRepeat: replayLastChange,
|
||||
}
|
||||
|
||||
// Backspace/Delete are only mapped in motion-expecting states. In
|
||||
// literal-char states (replace, find, operatorFind), mapping would turn
|
||||
// r+Backspace into "replace with h" and df+Delete into "delete to next x".
|
||||
// Delete additionally skips count state: in vim, N<Del> removes a count
|
||||
// digit rather than executing Nx; we don't implement digit removal but
|
||||
// should at least not turn a cancel into a destructive Nx.
|
||||
const expectsMotion =
|
||||
state.command.type === 'idle' ||
|
||||
state.command.type === 'count' ||
|
||||
state.command.type === 'operator' ||
|
||||
state.command.type === 'operatorCount'
|
||||
|
||||
// Map arrow keys to vim motions in NORMAL mode
|
||||
let vimInput = input
|
||||
if (key.leftArrow) vimInput = 'h'
|
||||
else if (key.rightArrow) vimInput = 'l'
|
||||
else if (key.upArrow) vimInput = 'k'
|
||||
else if (key.downArrow) vimInput = 'j'
|
||||
else if (expectsMotion && key.backspace) vimInput = 'h'
|
||||
else if (expectsMotion && state.command.type !== 'count' && key.delete)
|
||||
vimInput = 'x'
|
||||
|
||||
const result = transition(state.command, vimInput, ctx)
|
||||
|
||||
if (result.execute) {
|
||||
result.execute()
|
||||
}
|
||||
|
||||
// Update command state (only if execute didn't switch to INSERT)
|
||||
if (vimStateRef.current.mode === 'NORMAL') {
|
||||
if (result.next) {
|
||||
vimStateRef.current = { mode: 'NORMAL', command: result.next }
|
||||
} else if (result.execute) {
|
||||
vimStateRef.current = { mode: 'NORMAL', command: { type: 'idle' } }
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
input === '?' &&
|
||||
state.mode === 'NORMAL' &&
|
||||
state.command.type === 'idle'
|
||||
) {
|
||||
props.onChange('?')
|
||||
}
|
||||
}
|
||||
|
||||
const setModeExternal = useCallback(
|
||||
(newMode: VimMode) => {
|
||||
if (newMode === 'INSERT') {
|
||||
vimStateRef.current = { mode: 'INSERT', insertedText: '' }
|
||||
} else {
|
||||
vimStateRef.current = { mode: 'NORMAL', command: { type: 'idle' } }
|
||||
}
|
||||
setMode(newMode)
|
||||
onModeChange?.(newMode)
|
||||
},
|
||||
[onModeChange],
|
||||
)
|
||||
|
||||
return {
|
||||
...textInput,
|
||||
onInput: handleVimInput,
|
||||
mode,
|
||||
setMode: setModeExternal,
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user