init claude-code

This commit is contained in:
2026-04-01 17:32:37 +02:00
commit 73b208c009
1902 changed files with 513237 additions and 0 deletions
File diff suppressed because it is too large Load Diff
+324
View File
@@ -0,0 +1,324 @@
/**
* Client-side secret scanner for team memory (PSR M22174).
*
* Scans content for credentials before upload so secrets never leave the
* user's machine. Uses a curated subset of high-confidence rules from
* gitleaks (https://github.com/gitleaks/gitleaks, MIT license) — only
* rules with distinctive prefixes that have near-zero false-positive
* rates are included. Generic keyword-context rules are omitted.
*
* Rule IDs and regexes sourced directly from the public gitleaks config:
* https://github.com/gitleaks/gitleaks/blob/master/config/gitleaks.toml
*
* JS regex notes:
* - gitleaks uses Go regex; inline (?i) and mode groups (?-i:...) are
* not portable to JS. Affected rules are rewritten with explicit
* character classes ([a-zA-Z0-9] instead of (?i)[a-z0-9]).
* - Trailing boundary alternations like (?:[\x60'"\s;]|\\[nr]|$) from
* Go regex are kept (JS $ matches end-of-string in default mode).
*/
import { capitalize } from '../../utils/stringUtils.js'
type SecretRule = {
/** Gitleaks rule ID (kebab-case), used in labels and analytics */
id: string
/** Regex source, lazily compiled on first scan */
source: string
/** Optional JS regex flags (most rules are case-sensitive by default) */
flags?: string
}
export type SecretMatch = {
/** Gitleaks rule ID that matched (e.g., "github-pat", "aws-access-token") */
ruleId: string
/** Human-readable label derived from the rule ID */
label: string
}
// ─── Curated rules ──────────────────────────────────────────────
// High-confidence patterns from gitleaks with distinctive prefixes.
// Ordered roughly by likelihood of appearing in dev-team content.
// Anthropic API key prefix, assembled at runtime so the literal byte
// sequence isn't present in the external bundle (excluded-strings check).
// join() is not constant-folded by the minifier.
const ANT_KEY_PFX = ['sk', 'ant', 'api'].join('-')
const SECRET_RULES: SecretRule[] = [
// — Cloud providers —
{
id: 'aws-access-token',
source: '\\b((?:A3T[A-Z0-9]|AKIA|ASIA|ABIA|ACCA)[A-Z2-7]{16})\\b',
},
{
id: 'gcp-api-key',
source: '\\b(AIza[\\w-]{35})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'azure-ad-client-secret',
source:
'(?:^|[\\\\\'"\\x60\\s>=:(,)])([a-zA-Z0-9_~.]{3}\\dQ~[a-zA-Z0-9_~.-]{31,34})(?:$|[\\\\\'"\\x60\\s<),])',
},
{
id: 'digitalocean-pat',
source: '\\b(dop_v1_[a-f0-9]{64})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'digitalocean-access-token',
source: '\\b(doo_v1_[a-f0-9]{64})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
// — AI APIs —
{
id: 'anthropic-api-key',
source: `\\b(${ANT_KEY_PFX}03-[a-zA-Z0-9_\\-]{93}AA)(?:[\\x60'"\\s;]|\\\\[nr]|$)`,
},
{
id: 'anthropic-admin-api-key',
source:
'\\b(sk-ant-admin01-[a-zA-Z0-9_\\-]{93}AA)(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'openai-api-key',
source:
'\\b(sk-(?:proj|svcacct|admin)-(?:[A-Za-z0-9_-]{74}|[A-Za-z0-9_-]{58})T3BlbkFJ(?:[A-Za-z0-9_-]{74}|[A-Za-z0-9_-]{58})\\b|sk-[a-zA-Z0-9]{20}T3BlbkFJ[a-zA-Z0-9]{20})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'huggingface-access-token',
// gitleaks: hf_(?i:[a-z]{34}) → JS: hf_[a-zA-Z]{34}
source: '\\b(hf_[a-zA-Z]{34})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
// — Version control —
{
id: 'github-pat',
source: 'ghp_[0-9a-zA-Z]{36}',
},
{
id: 'github-fine-grained-pat',
source: 'github_pat_\\w{82}',
},
{
id: 'github-app-token',
source: '(?:ghu|ghs)_[0-9a-zA-Z]{36}',
},
{
id: 'github-oauth',
source: 'gho_[0-9a-zA-Z]{36}',
},
{
id: 'github-refresh-token',
source: 'ghr_[0-9a-zA-Z]{36}',
},
{
id: 'gitlab-pat',
source: 'glpat-[\\w-]{20}',
},
{
id: 'gitlab-deploy-token',
source: 'gldt-[0-9a-zA-Z_\\-]{20}',
},
// — Communication —
{
id: 'slack-bot-token',
source: 'xoxb-[0-9]{10,13}-[0-9]{10,13}[a-zA-Z0-9-]*',
},
{
id: 'slack-user-token',
source: 'xox[pe](?:-[0-9]{10,13}){3}-[a-zA-Z0-9-]{28,34}',
},
{
id: 'slack-app-token',
source: 'xapp-\\d-[A-Z0-9]+-\\d+-[a-z0-9]+',
flags: 'i',
},
{
id: 'twilio-api-key',
source: 'SK[0-9a-fA-F]{32}',
},
{
id: 'sendgrid-api-token',
// gitleaks: SG\.(?i)[a-z0-9=_\-\.]{66} → JS: case-insensitive via flag
source: '\\b(SG\\.[a-zA-Z0-9=_\\-.]{66})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
// — Dev tooling —
{
id: 'npm-access-token',
source: '\\b(npm_[a-zA-Z0-9]{36})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'pypi-upload-token',
source: 'pypi-AgEIcHlwaS5vcmc[\\w-]{50,1000}',
},
{
id: 'databricks-api-token',
source: '\\b(dapi[a-f0-9]{32}(?:-\\d)?)(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'hashicorp-tf-api-token',
// gitleaks: (?i)[a-z0-9]{14}\.(?-i:atlasv1)\.[a-z0-9\-_=]{60,70}
// → JS: case-insensitive hex+alnum prefix, literal "atlasv1", case-insensitive suffix
source: '[a-zA-Z0-9]{14}\\.atlasv1\\.[a-zA-Z0-9\\-_=]{60,70}',
},
{
id: 'pulumi-api-token',
source: '\\b(pul-[a-f0-9]{40})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'postman-api-token',
// gitleaks: PMAK-(?i)[a-f0-9]{24}\-[a-f0-9]{34} → JS: use [a-fA-F0-9]
source:
'\\b(PMAK-[a-fA-F0-9]{24}-[a-fA-F0-9]{34})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
// — Observability —
{
id: 'grafana-api-key',
source:
'\\b(eyJrIjoi[A-Za-z0-9+/]{70,400}={0,3})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'grafana-cloud-api-token',
source: '\\b(glc_[A-Za-z0-9+/]{32,400}={0,3})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'grafana-service-account-token',
source:
'\\b(glsa_[A-Za-z0-9]{32}_[A-Fa-f0-9]{8})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'sentry-user-token',
source: '\\b(sntryu_[a-f0-9]{64})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'sentry-org-token',
source:
'\\bsntrys_eyJpYXQiO[a-zA-Z0-9+/]{10,200}(?:LCJyZWdpb25fdXJs|InJlZ2lvbl91cmwi|cmVnaW9uX3VybCI6)[a-zA-Z0-9+/]{10,200}={0,2}_[a-zA-Z0-9+/]{43}',
},
// — Payment / commerce —
{
id: 'stripe-access-token',
source:
'\\b((?:sk|rk)_(?:test|live|prod)_[a-zA-Z0-9]{10,99})(?:[\\x60\'"\\s;]|\\\\[nr]|$)',
},
{
id: 'shopify-access-token',
source: 'shpat_[a-fA-F0-9]{32}',
},
{
id: 'shopify-shared-secret',
source: 'shpss_[a-fA-F0-9]{32}',
},
// — Crypto —
{
id: 'private-key',
source:
'-----BEGIN[ A-Z0-9_-]{0,100}PRIVATE KEY(?: BLOCK)?-----[\\s\\S-]{64,}?-----END[ A-Z0-9_-]{0,100}PRIVATE KEY(?: BLOCK)?-----',
flags: 'i',
},
]
// Lazily compiled pattern cache — compile once on first scan.
let compiledRules: Array<{ id: string; re: RegExp }> | null = null
function getCompiledRules(): Array<{ id: string; re: RegExp }> {
if (compiledRules === null) {
compiledRules = SECRET_RULES.map(r => ({
id: r.id,
re: new RegExp(r.source, r.flags),
}))
}
return compiledRules
}
/**
* Convert a gitleaks rule ID (kebab-case) to a human-readable label.
* e.g., "github-pat" → "GitHub PAT", "aws-access-token" → "AWS Access Token"
*/
function ruleIdToLabel(ruleId: string): string {
// Words where the canonical capitalization differs from title case
const specialCase: Record<string, string> = {
aws: 'AWS',
gcp: 'GCP',
api: 'API',
pat: 'PAT',
ad: 'AD',
tf: 'TF',
oauth: 'OAuth',
npm: 'NPM',
pypi: 'PyPI',
jwt: 'JWT',
github: 'GitHub',
gitlab: 'GitLab',
openai: 'OpenAI',
digitalocean: 'DigitalOcean',
huggingface: 'HuggingFace',
hashicorp: 'HashiCorp',
sendgrid: 'SendGrid',
}
return ruleId
.split('-')
.map(part => specialCase[part] ?? capitalize(part))
.join(' ')
}
/**
* Scan a string for potential secrets.
*
* Returns one match per rule that fired (deduplicated by rule ID). The
* actual matched text is intentionally NOT returned — we never log or
* display secret values.
*/
export function scanForSecrets(content: string): SecretMatch[] {
const matches: SecretMatch[] = []
const seen = new Set<string>()
for (const rule of getCompiledRules()) {
if (seen.has(rule.id)) {
continue
}
if (rule.re.test(content)) {
seen.add(rule.id)
matches.push({
ruleId: rule.id,
label: ruleIdToLabel(rule.id),
})
}
}
return matches
}
/**
* Get a human-readable label for a gitleaks rule ID.
* Falls back to kebab-to-Title conversion for unknown IDs.
*/
export function getSecretLabel(ruleId: string): string {
return ruleIdToLabel(ruleId)
}
/**
* Redact any matched secrets in-place with [REDACTED].
* Unlike scanForSecrets, this returns the content with spans replaced
* so the surrounding text can still be written to disk safely.
*/
let redactRules: RegExp[] | null = null
export function redactSecrets(content: string): string {
redactRules ??= SECRET_RULES.map(
r => new RegExp(r.source, (r.flags ?? '').replace('g', '') + 'g'),
)
for (const re of redactRules) {
// Replace only the captured group, not the full match — patterns include
// boundary chars (space, quote, ;) outside the group that must survive.
content = content.replace(re, (match, g1) =>
typeof g1 === 'string' ? match.replace(g1, '[REDACTED]') : '[REDACTED]',
)
}
return content
}
@@ -0,0 +1,44 @@
import { feature } from 'bun:bundle'
/**
* Check if a file write/edit to a team memory path contains secrets.
* Returns an error message if secrets are detected, or null if safe.
*
* This is called from FileWriteTool and FileEditTool validateInput to
* prevent the model from writing secrets into team memory files, which
* would be synced to all repository collaborators.
*
* Callers can import and call this unconditionally — the internal
* feature('TEAMMEM') guard keeps it inert when the build flag is off.
* secretScanner assembles sensitive prefixes at runtime (ANT_KEY_PFX).
*/
export function checkTeamMemSecrets(
filePath: string,
content: string,
): string | null {
if (feature('TEAMMEM')) {
/* eslint-disable @typescript-eslint/no-require-imports */
const { isTeamMemPath } =
require('../../memdir/teamMemPaths.js') as typeof import('../../memdir/teamMemPaths.js')
const { scanForSecrets } =
require('./secretScanner.js') as typeof import('./secretScanner.js')
/* eslint-enable @typescript-eslint/no-require-imports */
if (!isTeamMemPath(filePath)) {
return null
}
const matches = scanForSecrets(content)
if (matches.length === 0) {
return null
}
const labels = matches.map(m => m.label).join(', ')
return (
`Content contains potential secrets (${labels}) and cannot be written to team memory. ` +
'Team memory is shared with all repository collaborators. ' +
'Remove the sensitive content and try again.'
)
}
return null
}
+156
View File
@@ -0,0 +1,156 @@
/**
* Team Memory Sync Types
*
* Zod schemas and types for the repo-scoped team memory sync API.
* Based on the backend API contract from anthropic/anthropic#250711.
*/
import { z } from 'zod/v4'
import { lazySchema } from '../../utils/lazySchema.js'
/**
* Content portion of team memory data - flat key-value storage.
* Keys are file paths relative to the team memory directory (e.g. "MEMORY.md", "patterns.md").
* Values are UTF-8 string content (typically Markdown).
*/
export const TeamMemoryContentSchema = lazySchema(() =>
z.object({
entries: z.record(z.string(), z.string()),
// Per-key SHA-256 of entry content (`sha256:<hex>`). Added in
// anthropic/anthropic#283027. Optional for forward-compat with older
// server deployments; empty map when entries is empty.
entryChecksums: z.record(z.string(), z.string()).optional(),
}),
)
/**
* Full response from GET /api/claude_code/team_memory
*/
export const TeamMemoryDataSchema = lazySchema(() =>
z.object({
organizationId: z.string(),
repo: z.string(),
version: z.number(),
lastModified: z.string(), // ISO 8601 timestamp
checksum: z.string(), // SHA256 with 'sha256:' prefix
content: TeamMemoryContentSchema(),
}),
)
/**
* Structured 413 error body from the server (anthropic/anthropic#293258).
* The server's RequestTooLargeException serializes error_code and the
* extra_details dict flattened into error.details. We only model the
* too-many-entries case; entry-too-large is handled via MAX_FILE_SIZE_BYTES
* pre-check on the client side and would need a separate schema.
*/
export const TeamMemoryTooManyEntriesSchema = lazySchema(() =>
z.object({
error: z.object({
details: z.object({
error_code: z.literal('team_memory_too_many_entries'),
max_entries: z.number().int().positive(),
received_entries: z.number().int().positive(),
}),
}),
}),
)
export type TeamMemoryData = z.infer<ReturnType<typeof TeamMemoryDataSchema>>
/**
* A file skipped during push because it contains a detected secret.
* The path is relative to the team memory directory. Only the matched
* gitleaks rule ID is recorded — never the secret value itself.
*/
export type SkippedSecretFile = {
path: string
/** Gitleaks rule ID (e.g., "github-pat", "aws-access-token") */
ruleId: string
/** Human-readable label derived from rule ID */
label: string
}
/**
* Result from fetching team memory
*/
export type TeamMemorySyncFetchResult = {
success: boolean
data?: TeamMemoryData
isEmpty?: boolean // true if 404 (no data exists)
notModified?: boolean // true if 304 (ETag matched, no changes)
checksum?: string // ETag from response header
error?: string
skipRetry?: boolean
errorType?: 'auth' | 'timeout' | 'network' | 'parse' | 'unknown'
httpStatus?: number
}
/**
* Lightweight metadata-only probe result (GET ?view=hashes).
* Contains per-key checksums without entry bodies. Used to refresh
* serverChecksums cheaply during 412 conflict resolution.
*/
export type TeamMemoryHashesResult = {
success: boolean
version?: number
checksum?: string
entryChecksums?: Record<string, string>
error?: string
errorType?: 'auth' | 'timeout' | 'network' | 'parse' | 'unknown'
httpStatus?: number
}
/**
* Result from uploading team memory with conflict info
*/
export type TeamMemorySyncPushResult = {
success: boolean
filesUploaded: number
checksum?: string
conflict?: boolean // true if 412 Precondition Failed
error?: string
/** Files skipped because they contain detected secrets (PSR M22174). */
skippedSecrets?: SkippedSecretFile[]
errorType?:
| 'auth'
| 'timeout'
| 'network'
| 'conflict'
| 'unknown'
| 'no_oauth'
| 'no_repo'
httpStatus?: number
}
/**
* Result from uploading team memory
*/
export type TeamMemorySyncUploadResult = {
success: boolean
checksum?: string
lastModified?: string
conflict?: boolean // true if 412 Precondition Failed
error?: string
errorType?: 'auth' | 'timeout' | 'network' | 'unknown'
httpStatus?: number
/**
* Structured error_code from a parsed 413 body (anthropic/anthropic#293258).
* Currently only 'team_memory_too_many_entries' is modelled; if the server
* adds more (entry_too_large, total_bytes_exceeded) they'd extend this
* union. Passed straight through to the tengu_team_mem_sync_push event
* as a Datadog-filterable facet.
*/
serverErrorCode?: 'team_memory_too_many_entries'
/**
* Server-enforced max_entries, populated when serverErrorCode is
* team_memory_too_many_entries. Lets the caller cache the effective
* (possibly per-org) limit for subsequent pushes.
*/
serverMaxEntries?: number
/**
* How many entries the rejected push would have produced after merge.
* Populated alongside serverMaxEntries.
*/
serverReceivedEntries?: number
}
+387
View File
@@ -0,0 +1,387 @@
/**
* Team Memory File Watcher
*
* Watches the team memory directory for changes and triggers
* a debounced push to the server when files are modified.
* Performs an initial pull on startup, then starts a directory-level
* fs.watch so first-time writes to a fresh repo get picked up.
*/
import { feature } from 'bun:bundle'
import { type FSWatcher, watch } from 'fs'
import { mkdir, stat } from 'fs/promises'
import { join } from 'path'
import {
getTeamMemPath,
isTeamMemoryEnabled,
} from '../../memdir/teamMemPaths.js'
import { registerCleanup } from '../../utils/cleanupRegistry.js'
import { logForDebugging } from '../../utils/debug.js'
import { errorMessage } from '../../utils/errors.js'
import { getGithubRepo } from '../../utils/git.js'
import {
type AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
logEvent,
} from '../analytics/index.js'
import {
createSyncState,
isTeamMemorySyncAvailable,
pullTeamMemory,
pushTeamMemory,
type SyncState,
} from './index.js'
import type { TeamMemorySyncPushResult } from './types.js'
const DEBOUNCE_MS = 2000 // Wait 2s after last change before pushing
// ─── Watcher state ──────────────────────────────────────────
let watcher: FSWatcher | null = null
let debounceTimer: ReturnType<typeof setTimeout> | null = null
let pushInProgress = false
let hasPendingChanges = false
let currentPushPromise: Promise<void> | null = null
let watcherStarted = false
// Set after a push fails for a reason that can't self-heal on retry.
// Prevents watch events from other sessions' writes to the shared team
// dir driving an infinite retry loop (BQ Mar 14-16: one no_oauth device
// emitted 167K push events over 2.5 days). Cleared on unlink — file deletion
// is a recovery action for the too-many-entries case, and for no_oauth the
// suppression persisting until session restart is correct.
let pushSuppressedReason: string | null = null
/**
* Permanent = retry without user action will fail the same way.
* - no_oauth / no_repo: pre-request client checks, no status code
* - 4xx except 409/429: client error (404 missing repo, 413 too many
* entries, 403 permission). 409 is a transient conflict — server state
* changed under us, a fresh push after next pull can succeed. 429 is a
* rate limit — watcher-driven backoff is fine.
*/
export function isPermanentFailure(r: TeamMemorySyncPushResult): boolean {
if (r.errorType === 'no_oauth' || r.errorType === 'no_repo') return true
if (
r.httpStatus !== undefined &&
r.httpStatus >= 400 &&
r.httpStatus < 500 &&
r.httpStatus !== 409 &&
r.httpStatus !== 429
) {
return true
}
return false
}
// Sync state owned by the watcher — shared across all sync operations.
let syncState: SyncState | null = null
/**
* Execute the push and track its lifecycle.
* Push is read-only on disk (delta+probe, no merge writes), so no event
* suppression is needed — edits arriving mid-push hit schedulePush() and
* the debounce re-arms after this push completes.
*/
async function executePush(): Promise<void> {
if (!syncState) {
return
}
pushInProgress = true
try {
const result = await pushTeamMemory(syncState)
if (result.success) {
hasPendingChanges = false
}
if (result.success && result.filesUploaded > 0) {
logForDebugging(
`team-memory-watcher: pushed ${result.filesUploaded} files`,
{ level: 'info' },
)
} else if (!result.success) {
logForDebugging(`team-memory-watcher: push failed: ${result.error}`, {
level: 'warn',
})
if (isPermanentFailure(result) && pushSuppressedReason === null) {
pushSuppressedReason =
result.httpStatus !== undefined
? `http_${result.httpStatus}`
: (result.errorType ?? 'unknown')
logForDebugging(
`team-memory-watcher: suppressing retry until next unlink or session restart (${pushSuppressedReason})`,
{ level: 'warn' },
)
logEvent('tengu_team_mem_push_suppressed', {
reason:
pushSuppressedReason as AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS,
...(result.httpStatus && { status: result.httpStatus }),
})
}
}
} catch (e) {
logForDebugging(`team-memory-watcher: push error: ${errorMessage(e)}`, {
level: 'warn',
})
} finally {
pushInProgress = false
currentPushPromise = null
}
}
/**
* Debounced push: waits for writes to settle, then pushes once.
*/
function schedulePush(): void {
if (pushSuppressedReason !== null) return
hasPendingChanges = true
if (debounceTimer) {
clearTimeout(debounceTimer)
}
debounceTimer = setTimeout(() => {
if (pushInProgress) {
schedulePush()
return
}
currentPushPromise = executePush()
}, DEBOUNCE_MS)
}
/**
* Start watching the team memory directory for changes.
*
* Uses `fs.watch({recursive: true})` on the directory (not chokidar).
* chokidar 4+ dropped fsevents, and Bun's `fs.watch` fallback uses kqueue,
* which requires one open fd per watched file — with 500+ team memory files
* that's 500+ permanently-held fds (confirmed via lsof + repro).
*
* `recursive: true` is required because team memory supports subdirs
* (validateTeamMemKey, pushTeamMemory's walkDir). On macOS Bun uses
* FSEvents for recursive — O(1) fds regardless of tree size (verified:
* 2 fds for 60 files across 5 subdirs). On Linux inotify needs one watch
* per directory — O(subdirs), still fine (team memory rarely nests).
*
* `fs.watch` on a directory doesn't distinguish add/change/unlink — all three
* emit `rename`. To clear suppression on the too-many-entries recovery path
* (user deletes files), we stat the filename on each event: ENOENT → treat as
* unlink. For `no_oauth` suppression this is correct: no_oauth users don't
* delete team memory files to recover, they restart with auth.
*/
async function startFileWatcher(teamDir: string): Promise<void> {
if (watcherStarted) {
return
}
watcherStarted = true
try {
// pullTeamMemory returns early without creating the dir for fresh repos
// with no server content (index.ts isEmpty path). mkdir with
// recursive:true is idempotent — no existence check needed.
await mkdir(teamDir, { recursive: true })
watcher = watch(
teamDir,
{ persistent: true, recursive: true },
(_eventType, filename) => {
if (filename === null) {
schedulePush()
return
}
if (pushSuppressedReason !== null) {
// Suppression is only cleared by unlink (recovery action for
// too-many-entries). fs.watch doesn't distinguish unlink from
// add/write — stat to disambiguate. ENOENT → file gone → clear.
void stat(join(teamDir, filename)).catch(
(err: NodeJS.ErrnoException) => {
if (err.code !== 'ENOENT') return
if (pushSuppressedReason !== null) {
logForDebugging(
`team-memory-watcher: unlink cleared suppression (was: ${pushSuppressedReason})`,
{ level: 'info' },
)
pushSuppressedReason = null
}
schedulePush()
},
)
return
}
schedulePush()
},
)
watcher.on('error', err => {
logForDebugging(
`team-memory-watcher: fs.watch error: ${errorMessage(err)}`,
{ level: 'warn' },
)
})
logForDebugging(`team-memory-watcher: watching ${teamDir}`, {
level: 'debug',
})
} catch (err) {
// fs.watch throws synchronously on ENOENT (race: dir deleted between
// mkdir and watch) or EACCES. watcherStarted is already true above,
// so notifyTeamMemoryWrite's explicit schedulePush path still works.
logForDebugging(
`team-memory-watcher: failed to watch ${teamDir}: ${errorMessage(err)}`,
{ level: 'warn' },
)
}
registerCleanup(async () => stopTeamMemoryWatcher())
}
/**
* Start the team memory sync system.
*
* Returns early (before creating any state) if:
* - TEAMMEM build flag is off
* - team memory is disabled (isTeamMemoryEnabled)
* - OAuth is not available (isTeamMemorySyncAvailable)
* - the current repo has no github.com remote
*
* The early github.com check prevents a noisy failure mode where the
* watcher starts, it fires on local edits, and every push/pull
* logs `errorType: no_repo` forever. Team memory is GitHub-scoped on
* the server side, so non-github.com remotes can never sync anyway.
*
* Pulls from server, then starts the file watcher unconditionally.
* The watcher must start even when the server has no content yet
* (fresh EAP repo) — otherwise Claude's first team-memory write
* depends entirely on PostToolUse hooks firing notifyTeamMemoryWrite,
* which is a chicken-and-egg: Claude's write rate is low enough that
* a fresh partner can sit in the bootstrap dead zone for days.
*/
export async function startTeamMemoryWatcher(): Promise<void> {
if (!feature('TEAMMEM')) {
return
}
if (!isTeamMemoryEnabled() || !isTeamMemorySyncAvailable()) {
return
}
const repoSlug = await getGithubRepo()
if (!repoSlug) {
logForDebugging(
'team-memory-watcher: no github.com remote, skipping sync',
{ level: 'debug' },
)
return
}
syncState = createSyncState()
// Initial pull from server (runs before the watcher starts, so its disk
// writes won't trigger schedulePush)
let initialPullSuccess = false
let initialFilesPulled = 0
let serverHasContent = false
try {
const pullResult = await pullTeamMemory(syncState)
initialPullSuccess = pullResult.success
serverHasContent = pullResult.entryCount > 0
if (pullResult.success && pullResult.filesWritten > 0) {
initialFilesPulled = pullResult.filesWritten
logForDebugging(
`team-memory-watcher: initial pull got ${pullResult.filesWritten} files`,
{ level: 'info' },
)
}
} catch (e) {
logForDebugging(
`team-memory-watcher: initial pull failed: ${errorMessage(e)}`,
{ level: 'warn' },
)
}
// Always start the watcher. Watching an empty dir is cheap,
// and the alternative (lazy start on notifyTeamMemoryWrite) creates
// a bootstrap dead zone for fresh repos.
await startFileWatcher(getTeamMemPath())
logEvent('tengu_team_mem_sync_started', {
initial_pull_success: initialPullSuccess,
initial_files_pulled: initialFilesPulled,
// Kept for dashboard continuity; now always true when this event fires.
watcher_started: true,
server_has_content: serverHasContent,
})
}
/**
* Call this when a team memory file is written (e.g. from PostToolUse hooks).
* Schedules a push explicitly in case fs.watch misses the write —
* a file written in the same tick the watcher starts may not fire an
* event, and some platforms coalesce rapid successive writes.
* If the watcher does fire, the debounce timer just resets.
*/
export async function notifyTeamMemoryWrite(): Promise<void> {
if (!syncState) {
return
}
schedulePush()
}
/**
* Stop the file watcher and flush pending changes.
* Note: runs within the 2s graceful shutdown budget, so the flush
* is best-effort — if the HTTP PUT doesn't complete in time,
* process.exit() will kill it.
*/
export async function stopTeamMemoryWatcher(): Promise<void> {
if (debounceTimer) {
clearTimeout(debounceTimer)
debounceTimer = null
}
if (watcher) {
watcher.close()
watcher = null
}
// Await any in-flight push
if (currentPushPromise) {
try {
await currentPushPromise
} catch {
// Ignore errors during shutdown
}
}
// Flush pending changes that were debounced but not yet pushed
if (hasPendingChanges && syncState && pushSuppressedReason === null) {
try {
await pushTeamMemory(syncState)
} catch {
// Best-effort — shutdown may kill this
}
}
}
/**
* Test-only: reset module state and optionally seed syncState.
* The feature('TEAMMEM') gate at the top of startTeamMemoryWatcher() is
* always false in bun test, so tests can't set syncState through the normal
* path. This helper lets tests drive notifyTeamMemoryWrite() /
* stopTeamMemoryWatcher() directly.
*
* `skipWatcher: true` marks the watcher as already-started without actually
* starting it. Tests that only exercise the schedulePush/flush path don't
* need a real watcher.
*/
export function _resetWatcherStateForTesting(opts?: {
syncState?: SyncState
skipWatcher?: boolean
pushSuppressedReason?: string | null
}): void {
watcher = null
debounceTimer = null
pushInProgress = false
hasPendingChanges = false
currentPushPromise = null
watcherStarted = opts?.skipWatcher ?? false
pushSuppressedReason = opts?.pushSuppressedReason ?? null
syncState = opts?.syncState ?? null
}
/**
* Test-only: start the real fs.watch on a specified directory.
* Used by the fd-count regression test — startTeamMemoryWatcher() is gated
* by feature('TEAMMEM') which is false under bun test.
*/
export function _startFileWatcherForTesting(dir: string): Promise<void> {
return startFileWatcher(dir)
}