chore: snapshot in-flight work (mcp graph refactor, native edit module, misc)

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Mikael Hugo 2026-05-02 08:31:44 +02:00
parent f4dd66d4ed
commit 5f52680285
53 changed files with 6354 additions and 2462 deletions

1
package-lock.json generated
View file

@ -16346,6 +16346,7 @@
"license": "MIT",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.27.1",
"@singularity-forge/pi-agent-core": "^2.75.0",
"@singularity-forge/rpc-client": "^2.75.0",
"zod": "^4.0.0"
},

View file

@ -1,46 +1,47 @@
{
"name": "@singularity-forge/mcp-server",
"version": "2.75.0",
"description": "MCP server exposing sf-run orchestration tools for Claude Code, Cursor, and other MCP clients",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/singularity-forge/sf-run.git",
"directory": "packages/mcp-server"
},
"publishConfig": {
"access": "public"
},
"type": "module",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
"exports": {
".": {
"types": "./dist/index.d.ts",
"import": "./dist/index.js"
}
},
"bin": {
"sf-mcp-server": "./dist/cli.js"
},
"scripts": {
"build": "tsc",
"test": "node --test dist/mcp-server.test.js"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.27.1",
"@singularity-forge/rpc-client": "^2.75.0",
"zod": "^4.0.0"
},
"devDependencies": {
"@types/node": "^24.12.0",
"typescript": "^5.4.0"
},
"engines": {
"node": ">=24.15.0"
},
"files": [
"dist",
"!dist/**/*.test.*"
]
"name": "@singularity-forge/mcp-server",
"version": "2.75.0",
"description": "MCP server exposing sf-run orchestration tools for Claude Code, Cursor, and other MCP clients",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/singularity-forge/sf-run.git",
"directory": "packages/mcp-server"
},
"publishConfig": {
"access": "public"
},
"type": "module",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
"exports": {
".": {
"types": "./dist/index.d.ts",
"import": "./dist/index.js"
}
},
"bin": {
"sf-mcp-server": "./dist/cli.js"
},
"scripts": {
"build": "tsc",
"test": "node --test dist/mcp-server.test.js"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.27.1",
"@singularity-forge/pi-agent-core": "^2.75.0",
"@singularity-forge/rpc-client": "^2.75.0",
"zod": "^4.0.0"
},
"devDependencies": {
"@types/node": "^24.12.0",
"typescript": "^5.4.0"
},
"engines": {
"node": ">=24.15.0"
},
"files": [
"dist",
"!dist/**/*.test.*"
]
}

View file

@ -2,42 +2,52 @@
* @singularity-forge/mcp-server MCP server for SF orchestration and project state.
*/
export { SessionManager } from './session-manager.js';
export { createMcpServer } from './server.js';
export type {
SessionStatus,
ManagedSession,
ExecuteOptions,
PendingBlocker,
CostAccumulator,
} from './types.js';
export { MAX_EVENTS, INIT_TIMEOUT_MS } from './types.js';
// Path resolution utilities
export { resolveSFRoot } from './readers/paths.js';
// Read-only state readers (usable without a running session)
export { readProgress } from './readers/state.js';
export type { ProgressResult } from './readers/state.js';
export { readRoadmap } from './readers/roadmap.js';
export type { RoadmapResult, MilestoneInfo, SliceInfo, TaskInfo } from './readers/roadmap.js';
export { readHistory } from './readers/metrics.js';
export type { HistoryResult, MetricsUnit } from './readers/metrics.js';
export { readCaptures } from './readers/captures.js';
export type { CapturesResult, CaptureEntry } from './readers/captures.js';
export { readKnowledge } from './readers/knowledge.js';
export type { KnowledgeResult, KnowledgeEntry } from './readers/knowledge.js';
export { runDoctorLite } from './readers/doctor-lite.js';
export type { DoctorResult, DoctorIssue } from './readers/doctor-lite.js';
export { buildGraph, writeGraph, writeSnapshot, graphStatus, graphQuery, graphDiff } from './readers/graph.js';
export { resolveSFRoot } from "@singularity-forge/pi-agent-core";
export type { CaptureEntry, CapturesResult } from "./readers/captures.js";
export { readCaptures } from "./readers/captures.js";
export type { DoctorIssue, DoctorResult } from "./readers/doctor-lite.js";
export { runDoctorLite } from "./readers/doctor-lite.js";
export type {
NodeType,
EdgeType,
ConfidenceTier,
GraphNode,
GraphEdge,
KnowledgeGraph,
GraphStatusResult,
GraphQueryResult,
GraphDiffResult,
} from './readers/graph.js';
ConfidenceTier,
EdgeType,
GraphDiffResult,
GraphEdge,
GraphNode,
GraphQueryResult,
GraphStatusResult,
KnowledgeGraph,
NodeType,
} from "./readers/graph.js";
export {
buildGraph,
graphDiff,
graphQuery,
graphStatus,
writeGraph,
writeSnapshot,
} from "./readers/graph.js";
export type { KnowledgeEntry, KnowledgeResult } from "./readers/knowledge.js";
export { readKnowledge } from "./readers/knowledge.js";
export type { HistoryResult, MetricsUnit } from "./readers/metrics.js";
export { readHistory } from "./readers/metrics.js";
export type {
MilestoneInfo,
RoadmapResult,
SliceInfo,
TaskInfo,
} from "./readers/roadmap.js";
export { readRoadmap } from "./readers/roadmap.js";
export type { ProgressResult } from "./readers/state.js";
// Read-only state readers (usable without a running session)
export { readProgress } from "./readers/state.js";
export { createMcpServer } from "./server.js";
export { SessionManager } from "./session-manager.js";
export type {
CostAccumulator,
ExecuteOptions,
ManagedSession,
PendingBlocker,
SessionStatus,
} from "./types.js";
export { INIT_TIMEOUT_MS, MAX_EVENTS } from "./types.js";

File diff suppressed because it is too large Load diff

View file

@ -1,855 +1,29 @@
// SF MCP Server — knowledge graph reader
// Copyright (c) 2026 Jeremy McSpadden <jeremy@fluxlabs.net>
/**
* Knowledge Graph for SF projects.
* MCP graph reader compatibility exports.
*
* Parses .sf/ artifacts (STATE.md, milestone ROADMAPs, slice PLANs,
* KNOWLEDGE.md) into a graph of nodes and edges. Parse errors in any
* single artifact are caught and never propagate the artifact is skipped
* and the rest of the graph is returned.
* Purpose: keep MCP as a transport wrapper over the SF project graph while the
* core graph implementation lives in `@singularity-forge/pi-agent-core`.
*
* writeGraph() is atomic: writes to graph.tmp.json then renames to graph.json.
* Consumer: MCP `sf_graph` tool and older imports from `readers/graph.js`.
*/
import { readFileSync, writeFileSync, renameSync, existsSync, mkdirSync } from 'node:fs';
import { join, resolve } from 'node:path';
import { resolveSFRoot, findMilestoneIds, resolveMilestoneDir, findSliceIds, resolveSliceDir } from './paths.js';
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
export type NodeType =
| 'milestone'
| 'slice'
| 'task'
| 'rule'
| 'pattern'
| 'lesson'
| 'concept'
| 'decision';
export type EdgeType =
| 'contains'
| 'depends_on'
| 'relates_to'
| 'implements';
export type ConfidenceTier = 'EXTRACTED' | 'INFERRED' | 'AMBIGUOUS';
export interface GraphNode {
id: string;
label: string;
type: NodeType;
description?: string;
confidence: ConfidenceTier;
sourceFile?: string;
}
export interface GraphEdge {
from: string;
to: string;
type: EdgeType;
confidence: ConfidenceTier;
}
export interface KnowledgeGraph {
nodes: GraphNode[];
edges: GraphEdge[];
builtAt: string;
}
export interface GraphStatusResult {
exists: boolean;
lastBuild?: string;
nodeCount?: number;
edgeCount?: number;
stale?: boolean;
ageHours?: number;
}
export interface GraphQueryResult {
nodes: GraphNode[];
edges: GraphEdge[];
term: string;
budget: number;
}
export interface GraphDiffResult {
nodes: {
added: string[];
removed: string[];
changed: string[];
};
edges: {
added: string[];
removed: string[];
};
}
// ---------------------------------------------------------------------------
// Graph file paths
// ---------------------------------------------------------------------------
function graphsDir(sfRoot: string): string {
return join(sfRoot, 'graphs');
}
function graphJsonPath(sfRoot: string): string {
return join(graphsDir(sfRoot), 'graph.json');
}
function graphTmpPath(sfRoot: string): string {
return join(graphsDir(sfRoot), 'graph.tmp.json');
}
function snapshotPath(sfRoot: string): string {
return join(graphsDir(sfRoot), '.last-build-snapshot.json');
}
// ---------------------------------------------------------------------------
// Parsers — each returns nodes/edges and never throws
// ---------------------------------------------------------------------------
/**
* Parse STATE.md for active milestone and phase concepts.
*/
function parseStateFile(sfRoot: string, nodes: GraphNode[], _edges: GraphEdge[]): void {
const statePath = join(sfRoot, 'STATE.md');
if (!existsSync(statePath)) return;
let content: string;
try {
content = readFileSync(statePath, 'utf-8');
} catch {
return;
}
// Extract active milestone
const activeMilestoneMatch = content.match(/\*\*Active Milestone:\*\*\s+([A-Z]\d+):\s+(.+)/i);
if (activeMilestoneMatch) {
const [, milestoneId, title] = activeMilestoneMatch;
const id = `milestone:${milestoneId}`;
if (!nodes.some((n) => n.id === id)) {
nodes.push({
id,
label: `${milestoneId}: ${title.trim()}`,
type: 'milestone',
description: `Active milestone: ${milestoneId}`,
confidence: 'EXTRACTED',
sourceFile: 'STATE.md',
});
}
}
// Extract phase as concept
const phaseMatch = content.match(/\*\*Phase:\*\*\s+(\S+)/i);
if (phaseMatch) {
const phase = phaseMatch[1].trim();
nodes.push({
id: `concept:phase:${phase}`,
label: `Phase: ${phase}`,
type: 'concept',
confidence: 'EXTRACTED',
sourceFile: 'STATE.md',
});
}
}
/**
* Parse KNOWLEDGE.md for rules, patterns, and lessons.
*/
function parseKnowledgeFile(sfRoot: string, nodes: GraphNode[], _edges: GraphEdge[]): void {
const knowledgePath = join(sfRoot, 'KNOWLEDGE.md');
if (!existsSync(knowledgePath)) return;
let content: string;
try {
content = readFileSync(knowledgePath, 'utf-8');
} catch {
return;
}
// Parse Rules table
const rulesMatch = content.match(/## Rules\s*\n([\s\S]*?)(?=\n## |$)/i);
if (rulesMatch) {
for (const line of rulesMatch[1].split('\n')) {
if (!line.includes('|')) continue;
const cells = line.split('|').map((c) => c.trim()).filter(Boolean);
if (cells.length < 3) continue;
if (cells[0].startsWith('#') || cells[0].startsWith('-')) continue;
const id = cells[0];
if (!/^K\d+$/i.test(id)) continue;
nodes.push({
id: `rule:${id}`,
label: id,
type: 'rule',
description: cells[2] ?? '',
confidence: 'EXTRACTED',
sourceFile: 'KNOWLEDGE.md',
});
}
}
// Parse Patterns table
const patternsMatch = content.match(/## Patterns\s*\n([\s\S]*?)(?=\n## |$)/i);
if (patternsMatch) {
for (const line of patternsMatch[1].split('\n')) {
if (!line.includes('|')) continue;
const cells = line.split('|').map((c) => c.trim()).filter(Boolean);
if (cells.length < 2) continue;
if (cells[0].startsWith('#') || cells[0].startsWith('-')) continue;
const id = cells[0];
if (!/^P\d+$/i.test(id)) continue;
nodes.push({
id: `pattern:${id}`,
label: id,
type: 'pattern',
description: cells[1] ?? '',
confidence: 'EXTRACTED',
sourceFile: 'KNOWLEDGE.md',
});
}
}
// Parse Lessons Learned table
const lessonsMatch = content.match(/## Lessons Learned\s*\n([\s\S]*?)(?=\n## |$)/i);
if (lessonsMatch) {
for (const line of lessonsMatch[1].split('\n')) {
if (!line.includes('|')) continue;
const cells = line.split('|').map((c) => c.trim()).filter(Boolean);
if (cells.length < 2) continue;
if (cells[0].startsWith('#') || cells[0].startsWith('-')) continue;
const id = cells[0];
if (!/^L\d+$/i.test(id)) continue;
nodes.push({
id: `lesson:${id}`,
label: id,
type: 'lesson',
description: cells[1] ?? '',
confidence: 'EXTRACTED',
sourceFile: 'KNOWLEDGE.md',
});
}
}
}
/**
* Parse milestone ROADMAP.md files for milestones and slices.
*/
function parseMilestoneFiles(
sfRoot: string,
nodes: GraphNode[],
edges: GraphEdge[],
): void {
const milestoneIds = findMilestoneIds(sfRoot);
for (const milestoneId of milestoneIds) {
try {
parseSingleMilestone(sfRoot, milestoneId, nodes, edges);
} catch {
// Skip this milestone on any error
}
}
}
function parseSingleMilestone(
sfRoot: string,
milestoneId: string,
nodes: GraphNode[],
edges: GraphEdge[],
): void {
const mDir = resolveMilestoneDir(sfRoot, milestoneId);
if (!mDir) return;
const milestoneNodeId = `milestone:${milestoneId}`;
// Try to read the roadmap file
const roadmapPath = join(mDir, `${milestoneId}-ROADMAP.md`);
let roadmapContent: string | null = null;
if (existsSync(roadmapPath)) {
try {
roadmapContent = readFileSync(roadmapPath, 'utf-8');
} catch {
// Skip
}
}
// Extract milestone title from roadmap
let milestoneTitle = milestoneId;
if (roadmapContent) {
const titleMatch = roadmapContent.match(/^#\s+[A-Z]\d+:\s+(.+)/m);
if (titleMatch) milestoneTitle = `${milestoneId}: ${titleMatch[1].trim()}`;
}
// Ensure milestone node exists
if (!nodes.some((n) => n.id === milestoneNodeId)) {
nodes.push({
id: milestoneNodeId,
label: milestoneTitle,
type: 'milestone',
confidence: 'EXTRACTED',
sourceFile: roadmapContent ? `milestones/${milestoneId}/${milestoneId}-ROADMAP.md` : undefined,
});
}
// Parse slices from roadmap table or filesystem
const sliceIds = findSliceIds(sfRoot, milestoneId);
for (const sliceId of sliceIds) {
try {
parseSingleSlice(sfRoot, milestoneId, sliceId, milestoneNodeId, nodes, edges);
} catch {
// Skip this slice on any error
}
}
}
function parseSingleSlice(
sfRoot: string,
milestoneId: string,
sliceId: string,
milestoneNodeId: string,
nodes: GraphNode[],
edges: GraphEdge[],
): void {
const sDir = resolveSliceDir(sfRoot, milestoneId, sliceId);
if (!sDir) return;
const sliceNodeId = `slice:${milestoneId}:${sliceId}`;
// Try to read the slice plan
const planPath = join(sDir, `${sliceId}-PLAN.md`);
let sliceTitle = `${milestoneId}/${sliceId}`;
let planContent: string | null = null;
if (existsSync(planPath)) {
try {
planContent = readFileSync(planPath, 'utf-8');
const titleMatch = planContent.match(/^#\s+[A-Z]\d+:\s+(.+)/m);
if (titleMatch) sliceTitle = `${sliceId}: ${titleMatch[1].trim()}`;
} catch {
// Use default title
}
}
nodes.push({
id: sliceNodeId,
label: sliceTitle,
type: 'slice',
confidence: 'EXTRACTED',
sourceFile: planContent ? `milestones/${milestoneId}/slices/${sliceId}/${sliceId}-PLAN.md` : undefined,
});
// Edge: milestone contains slice
edges.push({
from: milestoneNodeId,
to: sliceNodeId,
type: 'contains',
confidence: 'EXTRACTED',
});
// Parse tasks from the slice plan
if (planContent) {
parseTasksFromPlan(planContent, milestoneId, sliceId, sliceNodeId, nodes, edges);
}
}
function parseTasksFromPlan(
content: string,
milestoneId: string,
sliceId: string,
sliceNodeId: string,
nodes: GraphNode[],
edges: GraphEdge[],
): void {
// Match lines like: - [ ] **T01: Title** — description
const taskPattern = /[-*]\s+\[[ x]\]\s+\*\*(T\d+):\s*([^*]+)\*\*/g;
let match: RegExpExecArray | null;
while ((match = taskPattern.exec(content)) !== null) {
const [, taskId, taskTitle] = match;
const taskNodeId = `task:${milestoneId}:${sliceId}:${taskId}`;
nodes.push({
id: taskNodeId,
label: `${taskId}: ${taskTitle.trim()}`,
type: 'task',
confidence: 'EXTRACTED',
});
edges.push({
from: sliceNodeId,
to: taskNodeId,
type: 'contains',
confidence: 'EXTRACTED',
});
}
}
// ---------------------------------------------------------------------------
// LEARNINGS.md parser
// ---------------------------------------------------------------------------
/**
* Parse all *-LEARNINGS.md files found in milestone directories.
* Extracts Decisions, Lessons, Patterns, and Surprises as typed graph nodes.
* Surprises are mapped to the 'lesson' NodeType (no distinct type exists).
* Parse errors per file are caught the file is skipped, never rethrows.
*/
function parseLearningsFiles(sfRoot: string, nodes: GraphNode[], edges: GraphEdge[]): void {
const milestoneIds = findMilestoneIds(sfRoot);
for (const milestoneId of milestoneIds) {
try {
parseSingleLearningsFile(sfRoot, milestoneId, nodes, edges);
} catch {
// Skip this milestone's LEARNINGS.md on any error
}
}
}
function parseSingleLearningsFile(
sfRoot: string,
milestoneId: string,
nodes: GraphNode[],
edges: GraphEdge[],
): void {
const mDir = resolveMilestoneDir(sfRoot, milestoneId);
if (!mDir) return;
const learningsPath = join(mDir, `${milestoneId}-LEARNINGS.md`);
if (!existsSync(learningsPath)) return;
let content: string;
try {
content = readFileSync(learningsPath, 'utf-8');
} catch {
return;
}
// Strip YAML frontmatter if present
const withoutFrontmatter = content.replace(/^---[\s\S]*?---\n?/, '');
const milestoneNodeId = `milestone:${milestoneId}`;
const sourceFile = `milestones/${milestoneId}/${milestoneId}-LEARNINGS.md`;
// Parse each section: [sectionName, nodeType, idPrefix]
const sections: Array<[string, NodeType, string]> = [
['Decisions', 'decision', 'decision'],
['Lessons', 'lesson', 'lesson'],
['Patterns', 'pattern', 'pattern'],
['Surprises', 'lesson', 'surprise'],
];
for (const [sectionName, nodeType, idPrefix] of sections) {
const sectionMatch = withoutFrontmatter.match(
new RegExp(`##\\s+${sectionName}\\s*\\n([\\s\\S]*?)(?=\\n##\\s|$)`, 'i'),
);
if (!sectionMatch) continue;
const sectionContent = sectionMatch[1];
parseLearningsSection(
sectionContent,
milestoneId,
idPrefix,
nodeType,
milestoneNodeId,
sourceFile,
nodes,
edges,
);
}
}
function parseLearningsSection(
sectionContent: string,
milestoneId: string,
idPrefix: string,
nodeType: NodeType,
milestoneNodeId: string,
sourceFile: string,
nodes: GraphNode[],
edges: GraphEdge[],
): void {
// Each item is a bullet line starting with "- " followed by optional
// indented "Source: ..." line.
// We collect bullet items and their associated source attribution.
const lines = sectionContent.split('\n');
let itemIndex = 0;
let currentText: string | null = null;
let currentSource: string | null = null;
const flushItem = (): void => {
if (!currentText) return;
itemIndex += 1;
const nodeId = `${idPrefix}:${milestoneId}:${itemIndex}`;
const description = currentSource ? `${currentSource}` : undefined;
nodes.push({
id: nodeId,
label: currentText,
type: nodeType,
description,
confidence: 'EXTRACTED',
sourceFile,
});
// Edge: milestone relates_to this learning node
edges.push({
from: milestoneNodeId,
to: nodeId,
type: 'relates_to',
confidence: 'EXTRACTED',
});
currentText = null;
currentSource = null;
};
for (const line of lines) {
const bulletMatch = line.match(/^[-*]\s+(.+)/);
if (bulletMatch) {
flushItem();
currentText = bulletMatch[1].trim();
continue;
}
// Indented source attribution: " Source: ..."
const sourceMatch = line.match(/^\s+Source:\s+(.+)/i);
if (sourceMatch && currentText !== null) {
currentSource = `Source: ${sourceMatch[1].trim()}`;
continue;
}
// Continuation of current item text (indented non-source line)
const continuationMatch = line.match(/^\s{2,}(.+)/);
if (continuationMatch && currentText !== null && currentSource === null) {
currentText += ' ' + continuationMatch[1].trim();
}
}
flushItem();
}
// ---------------------------------------------------------------------------
// buildGraph
// ---------------------------------------------------------------------------
/**
* Build a KnowledgeGraph by parsing all .sf/ artifacts.
*
* Parse errors in any single artifact are caught the artifact is skipped
* and never causes buildGraph() to throw.
*/
export async function buildGraph(projectDir: string): Promise<KnowledgeGraph> {
const sfRoot = resolveSFRoot(resolve(projectDir));
const nodes: GraphNode[] = [];
const edges: GraphEdge[] = [];
// Each parser is wrapped so a crash in one never stops others
const parsers: Array<(g: string, n: GraphNode[], e: GraphEdge[]) => void> = [
parseStateFile,
parseKnowledgeFile,
parseMilestoneFiles,
parseLearningsFiles,
];
for (const parser of parsers) {
try {
parser(sfRoot, nodes, edges);
} catch {
// Parsing error — skip this artifact, mark as ambiguous
nodes.push({
id: `error:${parser.name}:${Date.now()}`,
label: `Parse error in ${parser.name}`,
type: 'concept',
confidence: 'AMBIGUOUS',
});
}
}
// Deduplicate nodes by id (keep first occurrence)
const seen = new Set<string>();
const dedupedNodes = nodes.filter((n) => {
if (seen.has(n.id)) return false;
seen.add(n.id);
return true;
});
return {
nodes: dedupedNodes,
edges,
builtAt: new Date().toISOString(),
};
}
// ---------------------------------------------------------------------------
// writeGraph — atomic write via tmp + rename
// ---------------------------------------------------------------------------
/**
* Write the graph to .sf/graphs/graph.json atomically.
*
* Writes to graph.tmp.json first, then renames to graph.json.
* Creates the graphs/ directory if it does not exist.
*/
export async function writeGraph(sfRoot: string, graph: KnowledgeGraph): Promise<void> {
const dir = graphsDir(sfRoot);
mkdirSync(dir, { recursive: true });
const tmp = graphTmpPath(sfRoot);
const final = graphJsonPath(sfRoot);
writeFileSync(tmp, JSON.stringify(graph, null, 2), 'utf-8');
renameSync(tmp, final);
}
// ---------------------------------------------------------------------------
// writeSnapshot
// ---------------------------------------------------------------------------
/**
* Copy the current graph.json to .last-build-snapshot.json.
* Adds a snapshotAt timestamp to the copy.
*/
export async function writeSnapshot(sfRoot: string): Promise<void> {
const src = graphJsonPath(sfRoot);
if (!existsSync(src)) return;
const dir = graphsDir(sfRoot);
mkdirSync(dir, { recursive: true });
const raw = readFileSync(src, 'utf-8');
let graph: KnowledgeGraph;
try {
graph = JSON.parse(raw) as KnowledgeGraph;
} catch {
return;
}
const snapshot = { ...graph, snapshotAt: new Date().toISOString() };
writeFileSync(snapshotPath(sfRoot), JSON.stringify(snapshot, null, 2), 'utf-8');
}
// ---------------------------------------------------------------------------
// graphStatus
// ---------------------------------------------------------------------------
/**
* Return status of the graph: whether it exists, its age, and whether it is stale.
* Stale means builtAt is older than 24 hours.
*/
export async function graphStatus(projectDir: string): Promise<GraphStatusResult> {
const sfRoot = resolveSFRoot(resolve(projectDir));
const graphPath = graphJsonPath(sfRoot);
if (!existsSync(graphPath)) {
return { exists: false };
}
try {
const raw = readFileSync(graphPath, 'utf-8');
const graph = JSON.parse(raw) as KnowledgeGraph;
const builtAt = graph.builtAt;
const ageMs = Date.now() - new Date(builtAt).getTime();
const ageHours = ageMs / (1000 * 60 * 60);
const stale = ageHours > 24;
return {
exists: true,
lastBuild: builtAt,
nodeCount: graph.nodes.length,
edgeCount: graph.edges.length,
stale,
ageHours,
};
} catch {
return { exists: false };
}
}
// ---------------------------------------------------------------------------
// applyBudget — trim edges to stay within token budget
// ---------------------------------------------------------------------------
/**
* Given a set of seed node IDs and the full graph, apply BFS to collect
* reachable nodes and edges. Trims AMBIGUOUS edges first, then INFERRED,
* stopping when the estimated token count drops within budget.
*
* Budget is a rough token estimate: 1 node 20 tokens, 1 edge 10 tokens.
*/
function applyBudget(
graph: KnowledgeGraph,
seedIds: Set<string>,
budget: number,
): { nodes: GraphNode[]; edges: GraphEdge[] } {
// BFS to collect reachable nodes (start from seeds)
const reachable = new Set<string>(seedIds);
const queue = [...seedIds];
while (queue.length > 0) {
const current = queue.shift()!;
for (const edge of graph.edges) {
if (edge.from === current && !reachable.has(edge.to)) {
reachable.add(edge.to);
queue.push(edge.to);
}
}
}
let resultNodes = graph.nodes.filter((n) => reachable.has(n.id));
let resultEdges = graph.edges.filter(
(e) => reachable.has(e.from) && reachable.has(e.to),
);
// Estimate tokens and trim if over budget
// Trim AMBIGUOUS edges first, then INFERRED
const estimate = (): number =>
resultNodes.length * 20 + resultEdges.length * 10;
if (estimate() > budget) {
resultEdges = resultEdges.filter((e) => e.confidence !== 'AMBIGUOUS');
}
if (estimate() > budget) {
resultEdges = resultEdges.filter((e) => e.confidence !== 'INFERRED');
}
if (estimate() > budget) {
// Hard trim — keep only seed nodes and their EXTRACTED edges
const seedNodes = resultNodes.filter((n) => seedIds.has(n.id));
const seedEdges = resultEdges.filter(
(e) => seedIds.has(e.from) && e.confidence === 'EXTRACTED',
);
return { nodes: seedNodes, edges: seedEdges };
}
return { nodes: resultNodes, edges: resultEdges };
}
// ---------------------------------------------------------------------------
// graphQuery
// ---------------------------------------------------------------------------
/**
* Query the graph for nodes matching a term (case-insensitive on label + description).
* BFS from seed nodes, applying budget trimming.
*
* Reads from the pre-built graph.json. Falls back to an empty result if no
* graph exists.
*/
export async function graphQuery(
projectDir: string,
term: string,
budget = 4000,
): Promise<GraphQueryResult> {
const sfRoot = resolveSFRoot(resolve(projectDir));
const graphPath = graphJsonPath(sfRoot);
if (!existsSync(graphPath)) {
return { nodes: [], edges: [], term, budget };
}
let graph: KnowledgeGraph;
try {
const raw = readFileSync(graphPath, 'utf-8');
graph = JSON.parse(raw) as KnowledgeGraph;
} catch {
return { nodes: [], edges: [], term, budget };
}
if (!term || term.trim() === '') {
// Empty term — return empty result
return { nodes: [], edges: [], term, budget };
}
const lower = term.toLowerCase();
// Find seed nodes that match the term
const seedIds = new Set<string>(
graph.nodes
.filter((n) => {
const labelMatch = n.label.toLowerCase().includes(lower);
const descMatch = n.description?.toLowerCase().includes(lower) ?? false;
return labelMatch || descMatch;
})
.map((n) => n.id),
);
if (seedIds.size === 0) {
return { nodes: [], edges: [], term, budget };
}
const result = applyBudget(graph, seedIds, budget);
return { ...result, term, budget };
}
// ---------------------------------------------------------------------------
// graphDiff
// ---------------------------------------------------------------------------
/**
* Compare the current graph.json with .last-build-snapshot.json.
* Returns added/removed/changed nodes and added/removed edges.
*
* If no snapshot exists, returns empty diff arrays.
*/
export async function graphDiff(projectDir: string): Promise<GraphDiffResult> {
const sfRoot = resolveSFRoot(resolve(projectDir));
const empty: GraphDiffResult = {
nodes: { added: [], removed: [], changed: [] },
edges: { added: [], removed: [] },
};
const graphPath = graphJsonPath(sfRoot);
const snap = snapshotPath(sfRoot);
if (!existsSync(graphPath)) return empty;
if (!existsSync(snap)) return empty;
let current: KnowledgeGraph;
let snapshot: KnowledgeGraph;
try {
current = JSON.parse(readFileSync(graphPath, 'utf-8')) as KnowledgeGraph;
} catch {
return empty;
}
try {
snapshot = JSON.parse(readFileSync(snap, 'utf-8')) as KnowledgeGraph;
} catch {
return empty;
}
const currentNodeIds = new Set(current.nodes.map((n) => n.id));
const snapshotNodeIds = new Set(snapshot.nodes.map((n) => n.id));
const added = current.nodes.filter((n) => !snapshotNodeIds.has(n.id)).map((n) => n.id);
const removed = snapshot.nodes.filter((n) => !currentNodeIds.has(n.id)).map((n) => n.id);
// Changed: same id but different label or description
const snapshotNodeMap = new Map(snapshot.nodes.map((n) => [n.id, n]));
const changed = current.nodes
.filter((n) => {
const snap = snapshotNodeMap.get(n.id);
if (!snap) return false;
return n.label !== snap.label || n.description !== snap.description;
})
.map((n) => n.id);
// Edges — compare by string key "from->to:type"
const edgeKey = (e: GraphEdge): string => `${e.from}->${e.to}:${e.type}`;
const currentEdgeKeys = new Set(current.edges.map(edgeKey));
const snapshotEdgeKeys = new Set(snapshot.edges.map(edgeKey));
const edgesAdded = current.edges.filter((e) => !snapshotEdgeKeys.has(edgeKey(e))).map(edgeKey);
const edgesRemoved = snapshot.edges.filter((e) => !currentEdgeKeys.has(edgeKey(e))).map(edgeKey);
return {
nodes: { added, removed, changed },
edges: { added: edgesAdded, removed: edgesRemoved },
};
}
export type {
ConfidenceTier,
EdgeType,
GraphDiffResult,
GraphEdge,
GraphNode,
GraphQueryResult,
GraphStatusResult,
KnowledgeGraph,
NodeType,
} from "@singularity-forge/pi-agent-core";
export {
buildGraph,
graphDiff,
graphQuery,
graphStatus,
resolveSFRoot,
writeGraph,
writeSnapshot,
} from "@singularity-forge/pi-agent-core";

View file

@ -1,28 +1,41 @@
// SF MCP Server — readers barrel export
// Copyright (c) 2026 Jeremy McSpadden <jeremy@fluxlabs.net>
export { resolveSFRoot, resolveRootFile } from './paths.js';
export { readProgress } from './state.js';
export type { ProgressResult } from './state.js';
export { readRoadmap } from './roadmap.js';
export type { RoadmapResult, MilestoneInfo, SliceInfo, TaskInfo } from './roadmap.js';
export { readHistory } from './metrics.js';
export type { HistoryResult, MetricsUnit } from './metrics.js';
export { readCaptures } from './captures.js';
export type { CapturesResult, CaptureEntry } from './captures.js';
export { readKnowledge } from './knowledge.js';
export type { KnowledgeResult, KnowledgeEntry } from './knowledge.js';
export { runDoctorLite } from './doctor-lite.js';
export type { DoctorResult, DoctorIssue } from './doctor-lite.js';
export { buildGraph, writeGraph, writeSnapshot, graphStatus, graphQuery, graphDiff } from './graph.js';
export { resolveSFRoot } from "@singularity-forge/pi-agent-core";
export type { CaptureEntry, CapturesResult } from "./captures.js";
export { readCaptures } from "./captures.js";
export type { DoctorIssue, DoctorResult } from "./doctor-lite.js";
export { runDoctorLite } from "./doctor-lite.js";
export type {
NodeType,
EdgeType,
ConfidenceTier,
GraphNode,
GraphEdge,
KnowledgeGraph,
GraphStatusResult,
GraphQueryResult,
GraphDiffResult,
} from './graph.js';
ConfidenceTier,
EdgeType,
GraphDiffResult,
GraphEdge,
GraphNode,
GraphQueryResult,
GraphStatusResult,
KnowledgeGraph,
NodeType,
} from "./graph.js";
export {
buildGraph,
graphDiff,
graphQuery,
graphStatus,
writeGraph,
writeSnapshot,
} from "./graph.js";
export type { KnowledgeEntry, KnowledgeResult } from "./knowledge.js";
export { readKnowledge } from "./knowledge.js";
export type { HistoryResult, MetricsUnit } from "./metrics.js";
export { readHistory } from "./metrics.js";
export { resolveRootFile } from "./paths.js";
export type {
MilestoneInfo,
RoadmapResult,
SliceInfo,
TaskInfo,
} from "./roadmap.js";
export { readRoadmap } from "./roadmap.js";
export type { ProgressResult } from "./state.js";
export { readProgress } from "./state.js";

File diff suppressed because it is too large Load diff

View file

@ -9,7 +9,7 @@
"build": "tsc -p tsconfig.json",
"build:native": "node ../../rust-engine/scripts/build.js",
"build:native:dev": "node ../../rust-engine/scripts/build.js --dev",
"test": "npm run build:native:dev && node --test src/__tests__/grep.test.mjs src/__tests__/ps.test.mjs src/__tests__/glob.test.mjs src/__tests__/clipboard.test.mjs src/__tests__/highlight.test.mjs src/__tests__/html.test.mjs src/__tests__/text.test.mjs src/__tests__/fd.test.mjs src/__tests__/image.test.mjs"
"test": "npm run build:native:dev && node --test src/__tests__/grep.test.mjs src/__tests__/ps.test.mjs src/__tests__/glob.test.mjs src/__tests__/clipboard.test.mjs src/__tests__/highlight.test.mjs src/__tests__/html.test.mjs src/__tests__/text.test.mjs src/__tests__/fd.test.mjs src/__tests__/image.test.mjs src/__tests__/edit.test.mjs src/__tests__/symbol.test.mjs src/__tests__/watch.test.mjs"
},
"exports": {
".": {
@ -60,6 +60,10 @@
"types": "./dist/diff/index.d.ts",
"default": "./dist/diff/index.js"
},
"./edit": {
"types": "./dist/edit/index.d.ts",
"default": "./dist/edit/index.js"
},
"./forge-parser": {
"types": "./dist/forge-parser/index.d.ts",
"default": "./dist/forge-parser/index.js"

View file

@ -0,0 +1,95 @@
import { describe, test } from "vitest";
import assert from "node:assert/strict";
import { createRequire } from "node:module";
import * as path from "node:path";
import { fileURLToPath } from "node:url";
import * as fs from "node:fs";
import * as os from "node:os";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const require = createRequire(import.meta.url);
const addonDir = path.resolve(__dirname, "..", "..", "..", "..", "rust-engine", "addon");
const platformTag = `${process.platform}-${process.arch}`;
const candidates = [
path.join(addonDir, "forge_engine.dev.node"),
path.join(addonDir, `forge_engine.${platformTag}.node`),
];
let native;
for (const candidate of candidates) {
try {
native = require(candidate);
break;
} catch {
// try next
}
}
if (!native) {
console.error("Native addon not found. Run `npm run build:native -w @singularity-forge/native` first.");
process.exit(1);
}
function tempFile(contents) {
const dir = fs.mkdtempSync(path.join(os.tmpdir(), "sf-edit-test-"));
const file = path.join(dir, "sample.txt");
fs.writeFileSync(file, contents, "utf8");
return { dir, file };
}
const range = (startLine, startCharacter, endLine, endCharacter) => ({
start: { line: startLine, character: startCharacter },
end: { line: endLine, character: endCharacter },
});
describe("native edit: applyEdits()", () => {
test("applies LSP-style edits atomically", ({ onTestFinished }) => {
const { dir, file } = tempFile("hello world\n");
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const result = native.applyEdits(
file,
[{ range: range(0, 6, 0, 11), newText: "forge" }],
{ fsync: false },
);
assert.deepEqual(result, {
editsApplied: 1,
bytesWritten: Buffer.byteLength("hello forge\n"),
});
assert.equal(fs.readFileSync(file, "utf8"), "hello forge\n");
});
test("uses UTF-16 character offsets", ({ onTestFinished }) => {
const { dir, file } = tempFile("a😀b\n");
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
native.applyEdits(
file,
[{ range: range(0, 1, 0, 3), newText: "X" }],
{ fsync: false },
);
assert.equal(fs.readFileSync(file, "utf8"), "aXb\n");
});
test("rejects overlapping edits without changing the file", ({ onTestFinished }) => {
const { dir, file } = tempFile("abcdef\n");
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
assert.throws(
() =>
native.applyEdits(
file,
[
{ range: range(0, 0, 0, 4), newText: "x" },
{ range: range(0, 2, 0, 6), newText: "y" },
],
{ fsync: false },
),
/overlapping edits/,
);
assert.equal(fs.readFileSync(file, "utf8"), "abcdef\n");
});
});

View file

@ -0,0 +1,232 @@
/**
* Tests for replaceSymbol and insertAroundSymbol.
*
* These tests require the native addon to be compiled. They are designed but
* will fail at runtime until:
* 1. The integration owner adds `mod symbol;` to `lib.rs`.
* 2. The native addon is rebuilt (`npm run build:native -w @singularity-forge/native`).
*
* Run with:
* npx vitest run packages/native/src/__tests__/symbol.test.mjs --config vitest.config.ts
*/
import { describe, test } from "vitest";
import assert from "node:assert/strict";
import { createRequire } from "node:module";
import * as path from "node:path";
import { fileURLToPath } from "node:url";
import * as fs from "node:fs";
import * as os from "node:os";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const require = createRequire(import.meta.url);
const addonDir = path.resolve(__dirname, "..", "..", "..", "..", "rust-engine", "addon");
const platformTag = `${process.platform}-${process.arch}`;
const candidates = [
path.join(addonDir, "forge_engine.dev.node"),
path.join(addonDir, `forge_engine.${platformTag}.node`),
];
let native;
for (const candidate of candidates) {
try {
native = require(candidate);
break;
} catch {
// try next
}
}
if (!native) {
console.error(
"Native addon not found. Run `npm run build:native -w @singularity-forge/native` first.",
);
process.exit(1);
}
// ── helpers ───────────────────────────────────────────────────────────────────
function tempTsFile(contents) {
const dir = fs.mkdtempSync(path.join(os.tmpdir(), "sf-symbol-test-"));
const file = path.join(dir, "sample.ts");
fs.writeFileSync(file, contents, "utf8");
return { dir, file };
}
// ── replaceSymbol ─────────────────────────────────────────────────────────────
describe("native symbol: replaceSymbol()", () => {
test("replaces a top-level TypeScript function declaration", ({ onTestFinished }) => {
const original = `function foo() { return 1; }\n`;
const { dir, file } = tempTsFile(original);
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const result = native.replaceSymbol(
file,
"foo",
"function foo() { return 42; }",
{ fsync: false },
);
assert.equal(result.matched, true);
assert.ok(typeof result.byteStart === "number", "byteStart should be set");
assert.ok(typeof result.startLine === "number", "startLine should be set");
assert.equal(result.startLine, 1, "startLine should be 1-based");
const written = fs.readFileSync(file, "utf8");
assert.ok(
written.includes("return 42"),
`Expected new body in file, got: ${written}`,
);
assert.ok(
!written.includes("return 1;"),
`Old body should be gone, got: ${written}`,
);
});
test("returns matched:false when the symbol does not exist", ({ onTestFinished }) => {
const { dir, file } = tempTsFile("function bar() { return 0; }\n");
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const result = native.replaceSymbol(
file,
"nonExistentFunction",
"function nonExistentFunction() {}",
{ fsync: false },
);
assert.equal(result.matched, false);
assert.equal(result.byteStart, undefined);
// File must be unchanged.
assert.ok(fs.readFileSync(file, "utf8").includes("function bar()"));
});
test("throws on ambiguous symbol (multiple matches)", ({ onTestFinished }) => {
// Two functions with different names but both match a loose pattern is not
// the ambiguity scenario; the ambiguity arises from an overloaded symbol.
// Simulate it by writing a file with the same function name appearing twice
// (which can happen if tree-sitter picks up both the function and an arrow
// alias with the same logical name via multiple patterns — or in practice
// when the user uses duplicate declarations).
const source = [
"function executeCommand() { return 1; }",
"const executeCommand = () => { return 2; }",
].join("\n") + "\n";
const { dir, file } = tempTsFile(source);
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
assert.throws(
() =>
native.replaceSymbol(
file,
"executeCommand",
"function executeCommand() { return 99; }",
{ fsync: false },
),
/[Aa]mbiguous/,
);
// File must be unchanged.
assert.ok(fs.readFileSync(file, "utf8").includes("return 1;"));
});
test("replaces an arrow function declaration", ({ onTestFinished }) => {
const { dir, file } = tempTsFile(
"const greet = (name: string) => { return `Hello ${name}`; }\n",
);
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const result = native.replaceSymbol(
file,
"greet",
"const greet = (name: string) => { return `Hi ${name}!`; }",
{ fsync: false },
);
assert.equal(result.matched, true);
const written = fs.readFileSync(file, "utf8");
assert.ok(written.includes("Hi"), `Expected new body, got: ${written}`);
});
});
// ── insertAroundSymbol ────────────────────────────────────────────────────────
describe("native symbol: insertAroundSymbol()", () => {
test("BeforeDecl inserts code on the line immediately before the declaration", ({
onTestFinished,
}) => {
const original = "function hello() { return 'world'; }\n";
const { dir, file } = tempTsFile(original);
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const result = native.insertAroundSymbol(
file,
"hello",
"BeforeDecl",
"// AUTO-GENERATED\n",
{ fsync: false },
);
assert.equal(result.inserted, true);
assert.ok(typeof result.byteOffset === "number", "byteOffset should be set");
assert.equal(result.byteOffset, 0, "should insert at start of file for top-level decl");
const written = fs.readFileSync(file, "utf8");
assert.ok(
written.startsWith("// AUTO-GENERATED\n"),
`Expected comment at top, got: ${written}`,
);
assert.ok(written.includes("function hello()"), "Original declaration must still exist");
});
test("AfterDecl inserts code immediately after the declaration", ({ onTestFinished }) => {
const original = "function bye() { return 'cya'; }\n";
const { dir, file } = tempTsFile(original);
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const result = native.insertAroundSymbol(
file,
"bye",
"AfterDecl",
"\n// end of bye\n",
{ fsync: false },
);
assert.equal(result.inserted, true);
const written = fs.readFileSync(file, "utf8");
assert.ok(
written.includes("// end of bye"),
`Expected trailing comment, got: ${written}`,
);
});
test("returns inserted:false when symbol does not exist", ({ onTestFinished }) => {
const { dir, file } = tempTsFile("function noop() {}\n");
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const result = native.insertAroundSymbol(
file,
"phantom",
"BeforeDecl",
"// phantom\n",
{ fsync: false },
);
assert.equal(result.inserted, false);
assert.equal(result.byteOffset, undefined);
assert.equal(fs.readFileSync(file, "utf8"), "function noop() {}\n");
});
test("AtBodyStart and AtBodyEnd throw 'not yet implemented'", ({ onTestFinished }) => {
const { dir, file } = tempTsFile("function x() { return 1; }\n");
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
for (const pos of ["AtBodyStart", "AtBodyEnd"]) {
assert.throws(
() => native.insertAroundSymbol(file, "x", pos, "/* code */", { fsync: false }),
/not yet implemented/i,
`Expected 'not yet implemented' error for position ${pos}`,
);
}
});
});

View file

@ -0,0 +1,219 @@
import { describe, test } from "vitest";
import assert from "node:assert/strict";
import { createRequire } from "node:module";
import * as path from "node:path";
import { fileURLToPath } from "node:url";
import * as fs from "node:fs";
import * as os from "node:os";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const require = createRequire(import.meta.url);
// ─── Load native addon ────────────────────────────────────────────────────────
const addonDir = path.resolve(__dirname, "..", "..", "..", "..", "rust-engine", "addon");
const platformTag = `${process.platform}-${process.arch}`;
const candidates = [
path.join(addonDir, `forge_engine.${platformTag}.node`),
path.join(addonDir, "forge_engine.dev.node"),
];
let native;
for (const candidate of candidates) {
try {
native = require(candidate);
break;
} catch {
// try next
}
}
if (!native) {
console.error(
"Native addon not found. Run `npm run build:native -w @singularity-forge/native` first.",
);
process.exit(1);
}
// ─── Helpers ─────────────────────────────────────────────────────────────────
/**
* Create a unique temporary directory for one test and return its path.
* The caller is responsible for cleanup via `onTestFinished`.
*/
function makeTmpDir() {
return fs.mkdtempSync(path.join(os.tmpdir(), "sf-watch-test-"));
}
/**
* Collect the first batch of events from `watchTree` that satisfy `predicate`,
* then stop the watcher and resolve with the matching event array.
*
* Rejects after `timeoutMs` if no satisfying batch arrives.
*/
function waitForEvents(root, options, predicate, timeoutMs = 3000) {
return new Promise((resolve, reject) => {
const timer = setTimeout(() => {
native.stopWatch(handle);
reject(new Error(`Timed out after ${timeoutMs}ms waiting for events in ${root}`));
}, timeoutMs);
const handle = native.watchTree(root, options ?? null, (events) => {
if (predicate(events)) {
clearTimeout(timer);
native.stopWatch(handle);
resolve(events);
}
});
});
}
// ─── Tests ───────────────────────────────────────────────────────────────────
describe("native watch: watchTree / stopWatch", () => {
// ── creation ───────────────────────────────────────────────────────────────
test("detects file creation", async ({ onTestFinished }) => {
const dir = makeTmpDir();
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const pending = waitForEvents(
dir,
{ debounceMs: 50 },
(events) => events.some((e) => e.kind === "create"),
);
// Give the watcher a moment to register before writing.
await new Promise((r) => setTimeout(r, 100));
fs.writeFileSync(path.join(dir, "hello.txt"), "hi");
const events = await pending;
const created = events.filter((e) => e.kind === "create");
assert.ok(created.length >= 1, `expected >=1 create event, got: ${JSON.stringify(events)}`);
assert.ok(
created.some((e) => e.path.endsWith("hello.txt")),
`expected hello.txt in create events, got: ${JSON.stringify(created)}`,
);
});
// ── modification ───────────────────────────────────────────────────────────
test("detects file modification", async ({ onTestFinished }) => {
const dir = makeTmpDir();
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const file = path.join(dir, "data.txt");
fs.writeFileSync(file, "initial");
// Wait for any initial create events to drain before starting the real watch.
await new Promise((r) => setTimeout(r, 200));
const pending = waitForEvents(
dir,
{ debounceMs: 50 },
(events) => events.some((e) => e.kind === "modify" && e.path.endsWith("data.txt")),
);
await new Promise((r) => setTimeout(r, 100));
fs.writeFileSync(file, "updated");
const events = await pending;
const modified = events.filter((e) => e.kind === "modify");
assert.ok(modified.length >= 1, `expected >=1 modify event, got: ${JSON.stringify(events)}`);
});
// ── removal ────────────────────────────────────────────────────────────────
test("detects file removal", async ({ onTestFinished }) => {
const dir = makeTmpDir();
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const file = path.join(dir, "todelete.txt");
fs.writeFileSync(file, "bye");
await new Promise((r) => setTimeout(r, 200));
const pending = waitForEvents(
dir,
{ debounceMs: 50 },
(events) => events.some((e) => e.kind === "remove" && e.path.endsWith("todelete.txt")),
);
await new Promise((r) => setTimeout(r, 100));
fs.unlinkSync(file);
const events = await pending;
const removed = events.filter((e) => e.kind === "remove");
assert.ok(removed.length >= 1, `expected >=1 remove event, got: ${JSON.stringify(events)}`);
});
// ── ignore patterns ────────────────────────────────────────────────────────
test("respects ignore pattern (*.log ignored, .txt not)", async ({ onTestFinished }) => {
const dir = makeTmpDir();
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
// Collect ALL events for 600ms after writing both files, then inspect.
const collected = [];
let handle;
const settled = new Promise((resolve) => {
handle = native.watchTree(
dir,
{ ignore: ["*.log"], debounceMs: 50 },
(events) => {
collected.push(...events);
},
);
setTimeout(resolve, 600);
});
await new Promise((r) => setTimeout(r, 100));
fs.writeFileSync(path.join(dir, "ignored.log"), "log data");
fs.writeFileSync(path.join(dir, "kept.txt"), "text data");
await settled;
native.stopWatch(handle);
const logEvents = collected.filter((e) => e.path.endsWith("ignored.log"));
const txtEvents = collected.filter((e) => e.path.endsWith("kept.txt"));
assert.equal(logEvents.length, 0, `*.log file should be ignored, got: ${JSON.stringify(logEvents)}`);
assert.ok(txtEvents.length >= 1, `*.txt file should produce events, got: ${JSON.stringify(collected)}`);
});
// ── stop ───────────────────────────────────────────────────────────────────
test("stop() ends the watch — no further events delivered", async ({ onTestFinished }) => {
const dir = makeTmpDir();
onTestFinished(() => fs.rmSync(dir, { recursive: true, force: true }));
const received = [];
const handle = native.watchTree(dir, { debounceMs: 50 }, (events) => {
received.push(...events);
});
// Write a file, wait for the debounce to fire, then stop.
await new Promise((r) => setTimeout(r, 100));
fs.writeFileSync(path.join(dir, "before.txt"), "a");
await new Promise((r) => setTimeout(r, 300));
const stopped = native.stopWatch(handle);
assert.equal(stopped, true, "stopWatch should return true for a live handle");
const countAfterStop = received.length;
// Write another file after stopping — should NOT trigger any new events.
fs.writeFileSync(path.join(dir, "after.txt"), "b");
await new Promise((r) => setTimeout(r, 300));
assert.equal(
received.length,
countAfterStop,
`No new events should arrive after stop. Got extra: ${JSON.stringify(received.slice(countAfterStop))}`,
);
// Stopping an already-stopped handle should return false, not throw.
const stoppedAgain = native.stopWatch(handle);
assert.equal(stoppedAgain, false, "second stopWatch on same handle should return false");
});
});

View file

@ -0,0 +1,174 @@
/**
* Atomic LSP-style text edits backed by the Rust native engine.
*
* Purpose: let agents apply LSP WorkspaceEdit/TextEdit output without a
* read-splice-write loop in JavaScript.
*
* Consumer: SF agent editing flows that receive semantic edits from LSP and
* need to commit them to disk as one native operation.
*/
import { native } from "../native.js";
import { EventEmitter } from "node:events";
import type {
ApplyEditsOptions,
ApplyEditsResult,
ApplyWorkspaceEditResult,
InsertAroundSymbolOptions,
InsertAroundSymbolResult,
InsertPosition,
ReplaceSymbolOptions,
ReplaceSymbolResult,
TextDocumentEdit,
TextEdit,
WatchEvent,
WatchHandle,
WatchOptions,
WorkspaceEditLike,
} from "./types.js";
export type {
ApplyEditsOptions,
ApplyEditsResult,
ApplyWorkspaceEditResult,
InsertAroundSymbolOptions,
InsertAroundSymbolResult,
InsertPosition,
Position,
Range,
ReplaceSymbolOptions,
ReplaceSymbolResult,
TextDocumentEdit,
TextEdit,
WatchEvent,
WatchEventKind,
WatchHandle,
WatchOptions,
WorkspaceEditLike,
} from "./types.js";
/**
* Apply LSP-style TextEdit entries to one file atomically.
*
* Purpose: preserve LSP range semantics while avoiding JavaScript string
* slicing and partial writes for agent-driven edits.
*
* Consumer: agent edit tools that hand LSP rename/code-action results to the
* native module for durable file updates.
*/
export function applyEdits(
filePath: string,
edits: readonly TextEdit[],
options?: ApplyEditsOptions,
): ApplyEditsResult {
return native.applyEdits(filePath, [...edits], options) as ApplyEditsResult;
}
/**
* Apply LSP-style text edits across multiple files with native two-phase staging.
*
* Purpose: commit semantic edits from LSP rename/code-action results with one
* native call and no per-file JavaScript splice loop.
*
* Consumer: agent refactor flows that receive a WorkspaceEdit-shaped object
* from the LSP client.
*/
export function applyWorkspaceEdit(
workspaceEdit: readonly TextDocumentEdit[] | WorkspaceEditLike,
options?: ApplyEditsOptions,
): ApplyWorkspaceEditResult {
return native.applyWorkspaceEdit(normalizeWorkspaceEdit(workspaceEdit), options) as ApplyWorkspaceEditResult;
}
/**
* Replace the declaration matched by a symbol path.
*
* Purpose: let agent code target stable semantic names instead of stale line
* numbers or fragile string matches.
*
* Consumer: symbol-aware edit flows for TypeScript/JavaScript/TSX files.
*/
export function replaceSymbol(
filePath: string,
symbolPath: string,
newBody: string,
options?: ReplaceSymbolOptions,
): ReplaceSymbolResult {
return native.replaceSymbol(filePath, symbolPath, newBody, options) as ReplaceSymbolResult;
}
/**
* Insert code around a declaration matched by a symbol path.
*
* Purpose: support common agent edits such as adding adjacent declarations or
* declaration comments without regex-based placement.
*
* Consumer: symbol-aware edit flows for TypeScript/JavaScript/TSX files.
*/
export function insertAroundSymbol(
filePath: string,
symbolPath: string,
position: InsertPosition,
code: string,
options?: InsertAroundSymbolOptions,
): InsertAroundSymbolResult {
return native.insertAroundSymbol(
filePath,
symbolPath,
position,
code,
options,
) as InsertAroundSymbolResult;
}
/**
* Watch a directory tree with native notify/globset filtering.
*
* Purpose: keep file-change pressure out of the JavaScript event loop while
* still presenting a Node-friendly EventEmitter surface.
*
* Consumer: long-running agent sessions and UI flows that need debounced file
* change batches.
*/
export function watchTree(root: string, options?: WatchOptions): WatchHandle {
const emitter = new EventEmitter();
const handle = native.watchTree(root, options ?? null, (events: unknown[]) => {
emitter.emit("events", events as WatchEvent[]);
});
let stopped = false;
return {
stop() {
if (!stopped) {
stopped = true;
native.stopWatch(handle);
emitter.removeAllListeners();
}
},
on(event: "events", listener: (events: WatchEvent[]) => void) {
emitter.on(event, listener);
return this;
},
};
}
function normalizeWorkspaceEdit(
workspaceEdit: readonly TextDocumentEdit[] | WorkspaceEditLike,
): TextDocumentEdit[] {
if (Array.isArray(workspaceEdit)) {
return workspaceEdit.map((entry) => ({
filePath: entry.filePath,
edits: [...entry.edits],
}));
}
if (Array.isArray(workspaceEdit.documentChanges)) {
return workspaceEdit.documentChanges.map((entry) => ({
filePath: entry.filePath,
edits: [...entry.edits],
}));
}
return Object.entries(workspaceEdit.changes ?? {}).map(([filePath, edits]) => ({
filePath,
edits: [...edits],
}));
}

View file

@ -0,0 +1,97 @@
export interface Position {
/** 0-based line number. */
line: number;
/** 0-based UTF-16 code-unit offset from the line start, matching LSP. */
character: number;
}
export interface Range {
start: Position;
end: Position;
}
export interface TextEdit {
range: Range;
newText: string;
}
export interface ApplyEditsOptions {
/** fsync the temp file and parent directory before/after rename. Defaults to true. */
fsync?: boolean;
}
export interface ApplyEditsResult {
editsApplied: number;
bytesWritten: number;
}
export interface TextDocumentEdit {
filePath: string;
edits: TextEdit[];
}
export interface WorkspaceEditLike {
documentChanges?: TextDocumentEdit[];
changes?: Record<string, TextEdit[]>;
}
export interface WorkspaceEditFileResult {
filePath: string;
editsApplied: number;
bytesWritten: number;
}
export interface ApplyWorkspaceEditResult {
filesChanged: number;
totalEditsApplied: number;
files: WorkspaceEditFileResult[];
}
export interface ReplaceSymbolOptions {
/** Force a specific language. If absent, inferred from the file extension. */
lang?: string;
/** fsync the write. Defaults to true. */
fsync?: boolean;
}
export interface ReplaceSymbolResult {
matched: boolean;
byteStart?: number;
byteEnd?: number;
startLine?: number;
}
export type InsertPosition = "BeforeDecl" | "AfterDecl" | "AtBodyStart" | "AtBodyEnd";
export interface InsertAroundSymbolOptions {
/** Force a specific language. If absent, inferred from the file extension. */
lang?: string;
/** fsync the write. Defaults to true. */
fsync?: boolean;
}
export interface InsertAroundSymbolResult {
inserted: boolean;
byteOffset?: number;
}
export type WatchEventKind = "create" | "modify" | "remove" | "rename";
export interface WatchEvent {
kind: WatchEventKind;
path: string;
}
export interface WatchOptions {
/** Glob patterns to ignore. Bare patterns match anywhere below the root. */
ignore?: string[];
/** Coalesce events that fire within this window in milliseconds. Defaults to 50. */
debounceMs?: number;
/** Watch recursively. Defaults to true. */
recursive?: boolean;
}
export interface WatchHandle {
stop(): void;
on(event: "events", listener: (events: WatchEvent[]) => void): this;
}

View file

@ -81,6 +81,33 @@ export {
} from "./diff/index.js";
export type { FuzzyMatchResult, DiffResult } from "./diff/index.js";
export {
applyEdits,
applyWorkspaceEdit,
insertAroundSymbol,
replaceSymbol,
watchTree,
} from "./edit/index.js";
export type {
ApplyEditsOptions,
ApplyEditsResult,
ApplyWorkspaceEditResult,
InsertAroundSymbolOptions,
InsertAroundSymbolResult,
InsertPosition,
Position,
Range,
ReplaceSymbolOptions,
ReplaceSymbolResult,
TextDocumentEdit,
TextEdit,
WatchEvent,
WatchEventKind,
WatchHandle,
WatchOptions,
WorkspaceEditLike,
} from "./edit/index.js";
export { fuzzyFind } from "./fd/index.js";
export type {
FuzzyFindMatch,

View file

@ -132,6 +132,27 @@ export const native = loadNative() as {
normalizeForFuzzyMatch: (text: string) => string;
fuzzyFindText: (content: string, oldText: string) => unknown;
generateDiff: (oldContent: string, newContent: string, contextLines?: number) => unknown;
applyEdits: (filePath: string, edits: unknown[], options?: unknown) => unknown;
applyWorkspaceEdit: (documentEdits: unknown[], options?: unknown) => unknown;
replaceSymbol: (
filePath: string,
symbolName: string,
newBody: string,
options?: unknown,
) => unknown;
insertAroundSymbol: (
filePath: string,
symbolName: string,
position: string,
code: string,
options?: unknown,
) => unknown;
watchTree: (
root: string,
options: unknown,
onEvents: (events: unknown[]) => void,
) => number;
stopWatch: (handle: number) => boolean;
NativeImage: unknown;
ttsrCompileRules: (rules: unknown[]) => number;
ttsrCheckBuffer: (handle: number, buffer: string) => string[];

View file

@ -2,7 +2,11 @@
export * from "./agent.js";
// Loop functions
export * from "./agent-loop.js";
// Interactive question contract
export * from "./interactive-questions.js";
// Proxy utilities
export * from "./proxy.js";
// SF project graph
export * from "./sf-graph.js";
// Types
export * from "./types.js";

View file

@ -0,0 +1,73 @@
import assert from "node:assert/strict";
import { test } from "vitest";
import {
formatRoundResultForTool,
type Question,
roundResultFromElicitationContent,
roundResultFromRemoteAnswer,
} from "./interactive-questions.js";
const questions: Question[] = [
{
id: "choice",
header: "Choice",
question: "Pick one",
options: [
{ label: "Alpha", description: "A" },
{ label: "None of the above", description: "Other" },
],
},
{
id: "multi",
header: "Multi",
question: "Pick many",
allowMultiple: true,
options: [
{ label: "Frontend", description: "UI" },
{ label: "Backend", description: "API" },
],
},
];
test("roundResultFromElicitationContent preserves notes and multi-select arrays", () => {
const result = roundResultFromElicitationContent(questions, {
action: "accept",
content: {
choice: "None of the above",
choice__note: "Hybrid",
multi: ["Frontend"],
},
});
assert.deepEqual(result, {
endInterview: false,
answers: {
choice: { selected: "None of the above", notes: "Hybrid" },
multi: { selected: ["Frontend"], notes: "" },
},
});
});
test("roundResultFromRemoteAnswer uses question metadata to keep one multi-select as array", () => {
const result = roundResultFromRemoteAnswer(
{
answers: {
choice: { answers: ["Alpha"] },
multi: { answers: ["Backend"] },
},
},
questions,
);
assert.deepEqual(result.answers.choice.selected, "Alpha");
assert.deepEqual(result.answers.multi.selected, ["Backend"]);
assert.equal(
formatRoundResultForTool(result),
JSON.stringify({
answers: {
choice: { answers: ["Alpha"] },
multi: { answers: ["Backend"] },
},
}),
);
});

View file

@ -0,0 +1,171 @@
/**
* Shared structured-question contract for local UI, remote channels, and MCP.
*
* Purpose: keep every ask_user_questions transport on the same answer shape so
* gate hooks and LLM-facing JSON do not drift between local TUI, remote
* Slack/Discord/Telegram, and MCP elicitation paths.
*
* Consumer: SF ask_user_questions extension, remote question manager, and the
* packaged MCP server.
*/
export interface QuestionOption {
label: string;
description: string;
}
export interface Question {
id: string;
header: string;
question: string;
options: QuestionOption[];
allowMultiple?: boolean;
}
export interface RoundAnswer {
selected: string | string[];
notes: string;
}
export interface RoundResult {
/** Always false; wrap-up/exit is handled outside a single question round. */
endInterview: false;
answers: Record<string, RoundAnswer>;
}
export interface RemoteAnswerLike {
answers: Record<string, { answers?: string[]; user_note?: string }>;
}
export type ElicitationContentValue = string | number | boolean | string[];
export interface ElicitationResultLike {
action?: "accept" | "decline" | "cancel" | string;
content?: Record<string, ElicitationContentValue>;
}
export const DEFAULT_OTHER_OPTION_LABEL = "None of the above";
function normalizeNote(value: ElicitationContentValue | undefined): string {
return typeof value === "string" ? value.trim() : "";
}
function normalizeSelectedList(
value: ElicitationContentValue | undefined,
allowMultiple: boolean,
): string[] {
if (allowMultiple) {
return Array.isArray(value)
? value.filter((item): item is string => typeof item === "string")
: [];
}
return typeof value === "string" && value.length > 0 ? [value] : [];
}
/**
* Convert local/MCP elicitation form content into the canonical RoundResult.
*
* Purpose: preserve the multi-select array contract and "None of the above"
* notes consistently across transports.
*
* Consumer: MCP ask_user_questions handler and any form-based local bridge.
*/
export function roundResultFromElicitationContent(
questions: readonly Question[],
result: ElicitationResultLike,
otherOptionLabel = DEFAULT_OTHER_OPTION_LABEL,
): RoundResult {
const content = result.content ?? {};
const answers: Record<string, RoundAnswer> = {};
for (const question of questions) {
if (question.allowMultiple) {
answers[question.id] = {
selected: normalizeSelectedList(content[question.id], true),
notes: "",
};
continue;
}
const list = normalizeSelectedList(content[question.id], false);
const selected = list[0] ?? "";
const notes =
selected === otherOptionLabel
? normalizeNote(content[`${question.id}__note`])
: "";
answers[question.id] = { selected, notes };
}
return { endInterview: false, answers };
}
/**
* Convert a remote-channel answer into the canonical RoundResult.
*
* Purpose: remote adapters store answers as `{ answers: string[] }`; consumers
* need the same `selected` shape as local TUI, especially array preservation for
* multi-select questions with a single selected item.
*
* Consumer: SF remote question manager.
*/
export function roundResultFromRemoteAnswer(
answer: RemoteAnswerLike,
questions: readonly Question[],
): RoundResult {
const allowMultipleById = new Map<string, boolean>();
for (const question of questions) {
allowMultipleById.set(question.id, question.allowMultiple ?? false);
}
const answers: Record<string, RoundAnswer> = {};
for (const [id, data] of Object.entries(answer.answers)) {
const list = data.answers ?? [];
const allowMultiple = allowMultipleById.get(id) ?? false;
answers[id] = {
selected: allowMultiple ? [...list] : (list[0] ?? ""),
notes: data.user_note ?? "",
};
}
return { endInterview: false, answers };
}
/**
* Render the canonical RoundResult as the historical LLM/tool JSON payload.
*
* Purpose: keep the text response backward-compatible while structured callers
* consume RoundResult directly.
*
* Consumer: ask_user_questions local/remote/MCP handlers.
*/
export function formatRoundResultForTool(result: RoundResult): string {
const answers: Record<string, { answers: string[] }> = {};
for (const [id, answer] of Object.entries(result.answers)) {
const list = Array.isArray(answer.selected)
? [...answer.selected]
: [answer.selected];
if (answer.notes) list.push(`user_note: ${answer.notes}`);
answers[id] = { answers: list };
}
return JSON.stringify({ answers });
}
/**
* Build the structured content payload shared by MCP and extension details.
*
* Purpose: provide the same cancellation and response contract to gate hooks
* regardless of transport.
*
* Consumer: MCP ask_user_questions handler.
*/
export function buildQuestionStructuredContent(
questions: readonly Question[],
response: RoundResult | null,
cancelled: boolean,
): {
questions: readonly Question[];
response: RoundResult | null;
cancelled: boolean;
} {
return { questions, response, cancelled };
}

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
import { describe } from 'vitest';
import assert from "node:assert/strict";
import { describe, test } from "vitest";
import { parseStreamingJson } from "../json-parse.js";
describe("parseStreamingJson — XML parameter recovery (#3751)", () => {

View file

@ -1,10 +1,11 @@
import { describe } from 'vitest';
import assert from "node:assert/strict";
import { isContextOverflow } from "../overflow.js";
import { describe, test } from "vitest";
import type { AssistantMessage } from "../../types.js";
import { isContextOverflow } from "../overflow.js";
function makeAssistantMessage(overrides: Partial<AssistantMessage> = {}): AssistantMessage {
function makeAssistantMessage(
overrides: Partial<AssistantMessage> = {},
): AssistantMessage {
return {
role: "assistant",
content: [],

View file

@ -1,11 +1,11 @@
import { describe } from 'vitest';
import assert from "node:assert/strict";
import { describe, test } from "vitest";
import {
hasTruncatedNumbers,
hasXmlParameterTags,
hasYamlBulletLists,
repairToolJson,
repairToolJsonWithReport,
hasYamlBulletLists,
hasXmlParameterTags,
hasTruncatedNumbers,
TOOL_JSON_REPAIR_PIPELINE_VERSION,
} from "../repair-tool-json.js";
@ -83,11 +83,17 @@ describe("repairToolJson — YAML bullet list repair (#2660)", () => {
assert.equal(parsed.milestoneId, "M005");
assert.equal(parsed.title, "Native Desktop Polish");
assert.ok(Array.isArray(parsed.keyDecisions), "keyDecisions should be an array");
assert.ok(
Array.isArray(parsed.keyDecisions),
"keyDecisions should be an array",
);
assert.ok(parsed.keyDecisions[0].includes("Web Notification API"));
assert.ok(Array.isArray(parsed.keyFiles), "keyFiles should be an array");
assert.ok(parsed.keyFiles[0].includes("src-tauri/src/lib.rs"));
assert.ok(Array.isArray(parsed.lessonsLearned), "lessonsLearned should be an array");
assert.ok(
Array.isArray(parsed.lessonsLearned),
"lessonsLearned should be an array",
);
assert.equal(parsed.verificationPassed, true);
});
@ -155,10 +161,7 @@ describe("repairToolJson — full YAML object fallback", () => {
assert.ok(report.repairs.includes("yaml"));
assert.deepEqual(parsed, {
title: "Done",
keyDecisions: [
"Keep semantic model aliases",
"Prefer strict validation",
],
keyDecisions: ["Keep semantic model aliases", "Prefer strict validation"],
verificationPassed: true,
});
});
@ -177,25 +180,33 @@ describe("repairToolJson — XML parameter tag stripping (#3403)", () => {
});
test("hasXmlParameterTags returns false for clean JSON", () => {
assert.equal(
hasXmlParameterTags('{"narrative": "some text"}'),
false,
);
assert.equal(hasXmlParameterTags('{"narrative": "some text"}'), false);
});
test("strips XML parameter tags from JSON values", () => {
const malformed = '{"sliceId": "S03", "narrative": <parameter name="narrative">The slice work</parameter>}';
const malformed =
'{"sliceId": "S03", "narrative": <parameter name="narrative">The slice work</parameter>}';
const repaired = repairToolJson(malformed);
// After stripping tags, the content should be parseable or at least tag-free
assert.ok(!repaired.includes("<parameter"), "should not contain <parameter tags");
assert.ok(!repaired.includes("</parameter>"), "should not contain </parameter> tags");
assert.ok(
!repaired.includes("<parameter"),
"should not contain <parameter tags",
);
assert.ok(
!repaired.includes("</parameter>"),
"should not contain </parameter> tags",
);
});
test("handles mixed XML and JSON content", () => {
const malformed = '{"oneLiner": "done", "verification": <parameter name="verification">all tests pass</parameter>}';
const malformed =
'{"oneLiner": "done", "verification": <parameter name="verification">all tests pass</parameter>}';
const repaired = repairToolJson(malformed);
assert.ok(!repaired.includes("<parameter"), "XML tags should be stripped");
assert.ok(repaired.includes("all tests pass"), "content should be preserved");
assert.ok(
repaired.includes("all tests pass"),
"content should be preserved",
);
});
test("promotes XML parameters trapped inside valid JSON string values", () => {
@ -208,7 +219,10 @@ describe("repairToolJson — XML parameter tag stripping (#3403)", () => {
assert.equal(parsed.verification, "all tests pass");
assert.deepEqual(parsed.verificationEvidence, ["npm test"]);
assert.equal(parsed.oneLiner, "done");
assert.ok(!parsed.narrative.includes("<parameter"), "narrative should not retain leaked XML");
assert.ok(
!parsed.narrative.includes("<parameter"),
"narrative should not retain leaked XML",
);
});
});
@ -230,7 +244,10 @@ describe("repairToolJson — truncated number repair (#3464)", () => {
});
test("hasTruncatedNumbers returns false for valid numbers", () => {
assert.equal(hasTruncatedNumbers('"exitCode": 0, "durationMs": 1234'), false);
assert.equal(
hasTruncatedNumbers('"exitCode": 0, "durationMs": 1234'),
false,
);
});
test("hasTruncatedNumbers returns false for negative numbers", () => {
@ -238,7 +255,8 @@ describe("repairToolJson — truncated number repair (#3464)", () => {
});
test("repairs truncated exitCode with bare comma", () => {
const malformed = '{"command": "npm test", "exitCode": , "verdict": "pass", "durationMs": 500}';
const malformed =
'{"command": "npm test", "exitCode": , "verdict": "pass", "durationMs": 500}';
const repaired = repairToolJson(malformed);
const parsed = JSON.parse(repaired);
assert.equal(parsed.exitCode, 0);
@ -246,7 +264,8 @@ describe("repairToolJson — truncated number repair (#3464)", () => {
});
test("repairs truncated exitCode with bare minus", () => {
const malformed = '{"command": "npm test", "exitCode": -, "verdict": "pass", "durationMs": 1234}';
const malformed =
'{"command": "npm test", "exitCode": -, "verdict": "pass", "durationMs": 1234}';
const repaired = repairToolJson(malformed);
const parsed = JSON.parse(repaired);
assert.equal(parsed.exitCode, 0);
@ -254,7 +273,8 @@ describe("repairToolJson — truncated number repair (#3464)", () => {
});
test("repairs truncated durationMs at end of object", () => {
const malformed = '{"command": "npm test", "exitCode": 0, "verdict": "pass", "durationMs": -}';
const malformed =
'{"command": "npm test", "exitCode": 0, "verdict": "pass", "durationMs": -}';
const repaired = repairToolJson(malformed);
const parsed = JSON.parse(repaired);
assert.equal(parsed.durationMs, 0);

View file

@ -1,5 +1,5 @@
import { describe } from 'vitest';
import assert from "node:assert/strict";
import { describe, test } from "vitest";
import { buildAuthUrlPresentation } from "../login-dialog.js";
describe("LoginDialogComponent", () => {
@ -14,8 +14,14 @@ describe("LoginDialogComponent", () => {
"https://auth.example.com/device?code=ABCD-1234&callback=oauth&state=needs-full-visibility",
"narrow terminals should still truncate the hyperlink label",
);
assert.ok(presentation.fullUrlLines.length > 1, "truncated URLs should expose wrapped full-url lines");
assert.match(presentation.fullUrlLines[0] ?? "", /https:\/\/auth\.example\.com\/device\?code=ABCD-1234&/);
assert.ok(
presentation.fullUrlLines.length > 1,
"truncated URLs should expose wrapped full-url lines",
);
assert.match(
presentation.fullUrlLines[0] ?? "",
/https:\/\/auth\.example\.com\/device\?code=ABCD-1234&/,
);
assert.match(
presentation.fullUrlLines[presentation.fullUrlLines.length - 1] ?? "",
/state=needs-full-visibility/,

View file

@ -1,6 +1,7 @@
// SF — Provider display name mapping tests
import { describe } from 'vitest';
import assert from "node:assert/strict";
import { describe, test } from "vitest";
import { providerDisplayName } from "../model-selector.js";
describe("providerDisplayName", () => {

View file

@ -1,5 +1,5 @@
import { describe } from 'vitest';
import assert from "node:assert/strict";
import { describe, test } from "vitest";
import { formatTimestamp } from "../timestamp.js";
describe("formatTimestamp", () => {
@ -28,7 +28,10 @@ describe("formatTimestamp", () => {
test("US format handles midnight as 12 AM", () => {
const midnight = new Date(2026, 2, 24, 0, 0, 0).getTime();
assert.equal(formatTimestamp(midnight, "date-time-us"), "03-24-2026 12:00 AM");
assert.equal(
formatTimestamp(midnight, "date-time-us"),
"03-24-2026 12:00 AM",
);
});
test("ISO format pads single digit months and days", () => {

View file

@ -1,8 +1,8 @@
import { describe } from 'vitest';
import assert from "node:assert/strict";
import stripAnsi from "strip-ansi";
import { ToolExecutionComponent } from "../tool-execution.js";
import { describe, test } from "vitest";
import { initTheme } from "../../theme/theme.js";
import { ToolExecutionComponent } from "../tool-execution.js";
initTheme("dark", false);
@ -37,13 +37,9 @@ function renderToolCollapsed(
details?: Record<string, unknown>;
},
): string {
const component = new ToolExecutionComponent(
toolName,
args,
{},
undefined,
{ requestRender() {} } as any,
);
const component = new ToolExecutionComponent(toolName, args, {}, undefined, {
requestRender() {},
} as any);
if (result) component.updateResult(result);
return stripAnsi(component.render(120).join("\n"));
}
@ -87,10 +83,11 @@ describe("ToolExecutionComponent", () => {
});
test("generic fallback renders compact key=value args for primitive args", () => {
const rendered = renderTool(
"some_unknown_tool",
{ count: 3, enabled: true, label: "hello" },
);
const rendered = renderTool("some_unknown_tool", {
count: 3,
enabled: true,
label: "hello",
});
assert.match(rendered, /Some Unknown Tool/);
assert.doesNotMatch(rendered, /some_unknown_tool/);
@ -101,7 +98,10 @@ describe("ToolExecutionComponent", () => {
});
test("generic fallback truncates long output when collapsed", () => {
const longOutput = Array.from({ length: 25 }, (_, i) => `line ${i + 1}`).join("\n");
const longOutput = Array.from(
{ length: 25 },
(_, i) => `line ${i + 1}`,
).join("\n");
const rendered = renderToolCollapsed(
"mcp__demo__do_thing",
{ ok: true },
@ -115,10 +115,10 @@ describe("ToolExecutionComponent", () => {
});
test("generic fallback falls back to truncated JSON for complex args", () => {
const rendered = renderTool(
"mcp__demo__nested",
{ payload: { nested: { deeply: ["a", "b", "c"] } }, name: "x" },
);
const rendered = renderTool("mcp__demo__nested", {
payload: { nested: { deeply: ["a", "b", "c"] } },
name: "x",
});
assert.match(rendered, /demo\u00b7nested/);
// Multi-line JSON dump for the complex payload

144
rust-engine/Cargo.lock generated
View file

@ -113,6 +113,12 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.11.0"
@ -274,7 +280,7 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e0e367e4e7da84520dedcac1901e4da967309406d1e51017ae1abfb97adbd38"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"objc2",
]
@ -464,6 +470,7 @@ name = "forge-engine"
version = "0.1.0"
dependencies = [
"arboard",
"ast-grep-core",
"dashmap",
"forge-ast",
"forge-grep",
@ -477,6 +484,8 @@ dependencies = [
"napi",
"napi-build",
"napi-derive",
"notify",
"notify-debouncer-mini",
"regex",
"serde_json",
"similar",
@ -507,6 +516,15 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "fsevent-sys"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2"
dependencies = [
"libc",
]
[[package]]
name = "gethostname"
version = "1.1.0"
@ -556,7 +574,7 @@ version = "0.20.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b88256088d75a56f8ecfa070513a775dd9107f6530ef14919dac831af9cfe2b"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"libc",
"libgit2-sys",
"log",
@ -834,6 +852,26 @@ dependencies = [
"hashbrown 0.16.1",
]
[[package]]
name = "inotify"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd5b3eaf1a28b758ac0faa5a4254e8ab2705605496f1b1f3fbbc3988ad73d199"
dependencies = [
"bitflags 2.11.0",
"inotify-sys",
"libc",
]
[[package]]
name = "inotify-sys"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb"
dependencies = [
"libc",
]
[[package]]
name = "itoa"
version = "1.0.17"
@ -850,6 +888,26 @@ dependencies = [
"libc",
]
[[package]]
name = "kqueue"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a"
dependencies = [
"kqueue-sys",
"libc",
]
[[package]]
name = "kqueue-sys"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b"
dependencies = [
"bitflags 1.3.2",
"libc",
]
[[package]]
name = "libc"
version = "0.2.183"
@ -962,6 +1020,18 @@ dependencies = [
"simd-adler32",
]
[[package]]
name = "mio"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1"
dependencies = [
"libc",
"log",
"wasi",
"windows-sys 0.61.2",
]
[[package]]
name = "moxcms"
version = "0.8.1"
@ -978,7 +1048,7 @@ version = "2.16.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55740c4ae1d8696773c78fdafd5d0e5fe9bc9f1b071c7ba493ba5c413a9184f3"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"ctor",
"napi-derive",
"napi-sys",
@ -1035,6 +1105,45 @@ version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086"
[[package]]
name = "notify"
version = "8.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3"
dependencies = [
"bitflags 2.11.0",
"fsevent-sys",
"inotify",
"kqueue",
"libc",
"log",
"mio",
"notify-types",
"walkdir",
"windows-sys 0.60.2",
]
[[package]]
name = "notify-debouncer-mini"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17849edfaabd9a5fef1c606d99cfc615a8e99f7ac4366406d86c7942a3184cf2"
dependencies = [
"log",
"notify",
"notify-types",
"tempfile",
]
[[package]]
name = "notify-types"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42b8cfee0e339a0337359f3c88165702ac6e600dc01c0cc9579a92d62b08477a"
dependencies = [
"bitflags 2.11.0",
]
[[package]]
name = "num-traits"
version = "0.2.19"
@ -1059,7 +1168,7 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d49e936b501e5c5bf01fda3a9452ff86dc3ea98ad5f283e1455153142d97518c"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"objc2",
"objc2-core-graphics",
"objc2-foundation",
@ -1071,7 +1180,7 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"dispatch2",
"objc2",
]
@ -1082,7 +1191,7 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e022c9d066895efa1345f8e33e584b9f958da2fd4cd116792e15e07e4720a807"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"dispatch2",
"objc2",
"objc2-core-foundation",
@ -1101,7 +1210,7 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3e0adef53c21f888deb4fa59fc59f7eb17404926ee8a6f59f5df0fd7f9f3272"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"objc2",
"objc2-core-foundation",
]
@ -1112,7 +1221,7 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180788110936d59bab6bd83b6060ffdfffb3b922ba1396b312ae795e1de9d81d"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"objc2",
"objc2-core-foundation",
]
@ -1217,7 +1326,7 @@ version = "0.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60769b8b31b2a9f263dae2776c37b1b28ae246943cf719eb6946a1db05128a61"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"crc32fast",
"fdeflate",
"flate2",
@ -1301,7 +1410,7 @@ version = "0.5.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d"
dependencies = [
"bitflags",
"bitflags 2.11.0",
]
[[package]]
@ -1339,7 +1448,7 @@ version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190"
dependencies = [
"bitflags",
"bitflags 2.11.0",
"errno",
"libc",
"linux-raw-sys",
@ -1516,6 +1625,19 @@ dependencies = [
"walkdir",
]
[[package]]
name = "tempfile"
version = "3.27.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd"
dependencies = [
"fastrand",
"getrandom 0.3.4",
"once_cell",
"rustix",
"windows-sys 0.61.2",
]
[[package]]
name = "tendril"
version = "0.5.0"

View file

@ -16,6 +16,7 @@ doctest = false
forge-ast = { path = "../ast" }
forge-grep = { path = "../grep" }
arboard = "3"
ast-grep-core = { version = "0.39", default-features = false, features = ["tree-sitter"] }
dashmap = "6"
globset = "0.4"
html-to-markdown-rs = { version = "2", default-features = false }
@ -29,6 +30,8 @@ image = { version = "0.25", default-features = false, features = [
] }
napi = { version = "2", features = ["napi8"] }
napi-derive = "2"
notify = "8"
notify-debouncer-mini = "0.7"
regex = "1"
serde_json = "1"
similar = "2"

View file

@ -0,0 +1,796 @@
//! Fast atomic file edits with LSP-compatible position semantics.
//!
//! Applies an array of `TextEdit { range, newText }` to a file and writes the
//! result atomically (write to temp file in same directory, fsync, rename).
//! Edits are sorted in descending order by start position so positions remain
//! valid during application - there is no rope; one allocation, one splice
//! pass, one write.
//!
//! Position semantics match LSP: `line` is 0-based, `character` is the count
//! of UTF-16 code units from the line start.
use napi::{Error, Result, Status};
use napi_derive::napi;
use std::collections::HashSet;
use std::fs::{self, File, OpenOptions};
use std::io::Write;
use std::path::{Path, PathBuf};
#[napi(object)]
pub struct Position {
/// 0-based line number.
pub line: u32,
/// 0-based offset in UTF-16 code units from line start (LSP convention).
pub character: u32,
}
#[napi(object)]
pub struct Range {
pub start: Position,
pub end: Position,
}
#[napi(object)]
pub struct TextEdit {
pub range: Range,
#[napi(js_name = "newText")]
pub new_text: String,
}
#[napi(object)]
pub struct ApplyEditsOptions {
/// fsync the temp file and parent dir before/after rename. Default true.
pub fsync: Option<bool>,
}
#[napi(object)]
#[derive(Debug)]
pub struct ApplyEditsResult {
/// Number of edits applied.
#[napi(js_name = "editsApplied")]
pub edits_applied: u32,
/// Final file size in bytes after the write.
#[napi(js_name = "bytesWritten")]
pub bytes_written: u32,
}
#[napi(object)]
pub struct TextDocumentEdit {
#[napi(js_name = "filePath")]
pub file_path: String,
pub edits: Vec<TextEdit>,
}
#[napi(object)]
#[derive(Debug)]
pub struct WorkspaceEditFileResult {
#[napi(js_name = "filePath")]
pub file_path: String,
#[napi(js_name = "editsApplied")]
pub edits_applied: u32,
#[napi(js_name = "bytesWritten")]
pub bytes_written: u32,
}
#[napi(object)]
#[derive(Debug)]
pub struct ApplyWorkspaceEditResult {
#[napi(js_name = "filesChanged")]
pub files_changed: u32,
#[napi(js_name = "totalEditsApplied")]
pub total_edits_applied: u32,
pub files: Vec<WorkspaceEditFileResult>,
}
// ─── In-memory edit pipeline ──────────────────────────────────────────────
/// Apply `edits` to `original` bytes (which must be valid UTF-8) and return
/// the resulting bytes. `context` is a human-readable label (e.g. file path)
/// used only in error messages.
fn compute_new_bytes(original: &[u8], edits: &[TextEdit], context: &str) -> Result<Vec<u8>> {
let content = std::str::from_utf8(original).map_err(|e| {
Error::new(
Status::InvalidArg,
format!(
"{context}: file is not valid UTF-8 at byte {}: {e}",
e.valid_up_to()
),
)
})?;
if edits.is_empty() {
return Ok(original.to_vec());
}
let line_starts = compute_line_starts(content);
// Resolve each edit to (start_byte, end_byte, new_text).
let mut resolved: Vec<(usize, usize, String)> = Vec::with_capacity(edits.len());
for (idx, e) in edits.iter().enumerate() {
let start = position_to_byte(content, &line_starts, &e.range.start).ok_or_else(|| {
Error::new(
Status::InvalidArg,
format!(
"{context}: edit[{idx}]: start position line {} character {} is out of range",
e.range.start.line, e.range.start.character
),
)
})?;
let end = position_to_byte(content, &line_starts, &e.range.end).ok_or_else(|| {
Error::new(
Status::InvalidArg,
format!(
"{context}: edit[{idx}]: end position line {} character {} is out of range",
e.range.end.line, e.range.end.character
),
)
})?;
if start > end {
return Err(Error::new(
Status::InvalidArg,
format!("{context}: edit[{idx}]: start ({start}) > end ({end}) in byte offsets"),
));
}
resolved.push((start, end, e.new_text.clone()));
}
// Sort ascending by start to detect overlaps deterministically.
resolved.sort_by_key(|(s, _, _)| *s);
for w in resolved.windows(2) {
let (_, prev_end, _) = &w[0];
let (next_start, _, _) = &w[1];
if next_start < prev_end {
return Err(Error::new(
Status::InvalidArg,
format!(
"{context}: overlapping edits: prev ends at {prev_end}, next starts at {next_start}"
),
));
}
}
// Sort descending by start so we can splice from the back without
// invalidating earlier offsets.
resolved.sort_by(|a, b| b.0.cmp(&a.0));
let mut out = original.to_vec();
for (start, end, new_text) in &resolved {
out.splice(*start..*end, new_text.bytes());
}
Ok(out)
}
// ─── Atomic-write helpers ─────────────────────────────────────────────────
/// Return a sibling `.{name}.applyEdits.<pid>.<tid>` path for `final_path`.
fn make_tmp_path(final_path: &Path) -> std::io::Result<PathBuf> {
let parent = final_path.parent().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("path has no parent: {}", final_path.display()),
)
})?;
let file_name = final_path
.file_name()
.and_then(|s| s.to_str())
.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("path has no filename: {}", final_path.display()),
)
})?;
let tmp_name = format!(
".{file_name}.applyEdits.{}.{}",
std::process::id(),
thread_id()
);
Ok(parent.join(tmp_name))
}
/// Write `content` to `tmp_path`, optionally fsyncing before returning.
/// Does NOT rename; the caller is responsible for cleanup on error.
fn write_tmp(tmp_path: &Path, content: &[u8], do_fsync: bool) -> std::io::Result<()> {
let mut f = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(tmp_path)?;
f.write_all(content)?;
if do_fsync {
f.sync_all()?;
}
Ok(())
}
/// Atomic write: temp file in same dir → fsync → rename → fsync parent.
fn atomic_write(path: &Path, content: &[u8], do_fsync: bool) -> std::io::Result<()> {
let parent = path.parent().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("path has no parent: {}", path.display()),
)
})?;
let tmp_path = make_tmp_path(path)?;
write_tmp(&tmp_path, content, do_fsync)?;
// Rename is atomic on POSIX; on Windows it's atomic for files on the same
// volume, which the same-directory placement guarantees.
let rename_result = fs::rename(&tmp_path, path);
if rename_result.is_err() {
// Best-effort cleanup of the temp file before propagating.
let _ = fs::remove_file(&tmp_path);
return rename_result;
}
if do_fsync {
// fsync the directory so the rename hits the disk's filesystem journal,
// not just the page cache. Best-effort: not all platforms support
// directory fsync (Windows ignores it).
if let Ok(dir_fd) = File::open(parent) {
let _ = dir_fd.sync_all();
}
}
Ok(())
}
// ─── Public API ───────────────────────────────────────────────────────────
/// Apply LSP-style TextEdits to a file atomically.
///
/// Steps:
/// 1. Read file as UTF-8.
/// 2. Resolve every (line, character-utf16) pair to a byte offset.
/// 3. Validate that no two edits overlap.
/// 4. Sort edits in descending order by start byte.
/// 5. Splice into a single output Vec<u8>.
/// 6. Write to `<file>.tmp.<pid>`, fsync, atomic rename, fsync parent dir.
///
/// Errors:
/// - File missing or unreadable
/// - File is not valid UTF-8
/// - Any range references a line/character position that does not exist
/// - Any two edits overlap
/// - Write or rename fails
#[napi(js_name = "applyEdits")]
pub fn apply_edits(
file_path: String,
edits: Vec<TextEdit>,
options: Option<ApplyEditsOptions>,
) -> Result<ApplyEditsResult> {
let opts = options.unwrap_or(ApplyEditsOptions { fsync: None });
let do_fsync = opts.fsync.unwrap_or(true);
let path = PathBuf::from(&file_path);
let bytes = fs::read(&path)
.map_err(|e| Error::new(Status::GenericFailure, format!("read {file_path}: {e}")))?;
if edits.is_empty() {
return Ok(ApplyEditsResult {
edits_applied: 0,
bytes_written: bytes.len() as u32,
});
}
let out = compute_new_bytes(&bytes, &edits, &file_path)?;
atomic_write(&path, &out, do_fsync)
.map_err(|e| Error::new(Status::GenericFailure, format!("write {file_path}: {e}")))?;
Ok(ApplyEditsResult {
edits_applied: edits.len() as u32,
bytes_written: out.len() as u32,
})
}
/// Apply LSP-style WorkspaceEdit (multiple files) atomically using two-phase commit.
///
/// Phase 1 (validate + stage): for each file, read it, compute the new bytes,
/// write them to a sibling `.tmp` file with fsync. If ANY file fails (I/O,
/// UTF-8, overlap), all staged `.tmp` files are cleaned up and the originals
/// are left untouched.
///
/// Phase 2 (commit): rename every staged `.tmp` over its original. If a rename
/// fails partway, remaining `.tmp` files are cleaned up and an error is returned
/// that includes how many files were successfully renamed.
///
/// After all renames: fsync each unique parent directory once.
#[napi(js_name = "applyWorkspaceEdit")]
pub fn apply_workspace_edit(
document_edits: Vec<TextDocumentEdit>,
options: Option<ApplyEditsOptions>,
) -> Result<ApplyWorkspaceEditResult> {
if document_edits.is_empty() {
return Ok(ApplyWorkspaceEditResult {
files_changed: 0,
total_edits_applied: 0,
files: vec![],
});
}
let opts = options.unwrap_or(ApplyEditsOptions { fsync: None });
let do_fsync = opts.fsync.unwrap_or(true);
// ── Phase 1: validate + stage ────────────────────────────────────────
// staged[i] = (final_path, tmp_path, new_bytes, edits_applied)
let mut staged: Vec<(PathBuf, PathBuf, Vec<u8>, u32)> =
Vec::with_capacity(document_edits.len());
for doc_edit in &document_edits {
let path = PathBuf::from(&doc_edit.file_path);
let bytes = match fs::read(&path) {
Ok(b) => b,
Err(e) => {
// Cleanup already-staged tmps before returning.
for (_, tmp, _, _) in &staged {
let _ = fs::remove_file(tmp);
}
return Err(Error::new(
Status::GenericFailure,
format!("read {}: {e}", doc_edit.file_path),
));
}
};
let new_bytes = match compute_new_bytes(&bytes, &doc_edit.edits, &doc_edit.file_path) {
Ok(b) => b,
Err(e) => {
for (_, tmp, _, _) in &staged {
let _ = fs::remove_file(tmp);
}
return Err(e);
}
};
let tmp_path = match make_tmp_path(&path) {
Ok(p) => p,
Err(e) => {
for (_, tmp, _, _) in &staged {
let _ = fs::remove_file(tmp);
}
return Err(Error::new(
Status::GenericFailure,
format!("make_tmp_path {}: {e}", doc_edit.file_path),
));
}
};
if let Err(e) = write_tmp(&tmp_path, &new_bytes, do_fsync) {
let _ = fs::remove_file(&tmp_path);
for (_, tmp, _, _) in &staged {
let _ = fs::remove_file(tmp);
}
return Err(Error::new(
Status::GenericFailure,
format!("write_tmp {}: {e}", doc_edit.file_path),
));
}
let edits_applied = if doc_edit.edits.is_empty() {
0
} else {
doc_edit.edits.len() as u32
};
staged.push((path, tmp_path, new_bytes, edits_applied));
}
// ── Phase 2: commit ──────────────────────────────────────────────────
let mut file_results: Vec<WorkspaceEditFileResult> = Vec::with_capacity(staged.len());
let mut succeeded = 0usize;
for (final_path, tmp_path, new_bytes, edits_applied) in &staged {
if let Err(e) = fs::rename(tmp_path, final_path) {
// Cleanup remaining staged tmps (including this one if rename failed
// before touching the original).
let _ = fs::remove_file(tmp_path);
for (_, remaining_tmp, _, _) in staged.iter().skip(succeeded + 1) {
let _ = fs::remove_file(remaining_tmp);
}
return Err(Error::new(
Status::GenericFailure,
format!(
"rename failed for {} after {succeeded} successful renames: {e}",
final_path.display()
),
));
}
file_results.push(WorkspaceEditFileResult {
file_path: final_path.to_string_lossy().into_owned(),
edits_applied: *edits_applied,
bytes_written: new_bytes.len() as u32,
});
succeeded += 1;
}
// ── fsync parent directories (deduplicated) ──────────────────────────
if do_fsync {
let mut parents: HashSet<PathBuf> = HashSet::new();
for (final_path, _, _, _) in &staged {
if let Some(parent) = final_path.parent() {
parents.insert(parent.to_path_buf());
}
}
for parent in &parents {
if let Ok(dir_fd) = File::open(parent) {
let _ = dir_fd.sync_all();
}
}
}
let total_edits_applied: u32 = file_results.iter().map(|r| r.edits_applied).sum();
Ok(ApplyWorkspaceEditResult {
files_changed: file_results.len() as u32,
total_edits_applied,
files: file_results,
})
}
// ─── Private helpers ──────────────────────────────────────────────────────
/// Pre-compute byte offsets where each line begins (line 0 = byte 0; line N
/// begins at the byte after the (N-1)th '\n').
fn compute_line_starts(content: &str) -> Vec<usize> {
let mut starts = Vec::with_capacity(content.len() / 40 + 1);
starts.push(0);
for (i, c) in content.char_indices() {
if c == '\n' {
starts.push(i + 1);
}
}
starts
}
/// Convert an LSP `Position { line, character (UTF-16) }` to a byte offset
/// into `content`. Returns `None` if the line index is past EOF or the UTF-16
/// character offset is past the line's text.
fn position_to_byte(content: &str, line_starts: &[usize], pos: &Position) -> Option<usize> {
let line_idx = pos.line as usize;
if line_idx >= line_starts.len() {
return None;
}
let line_start = line_starts[line_idx];
let line_end = line_starts
.get(line_idx + 1)
.copied()
.unwrap_or(content.len());
let line_text = &content[line_start..line_end];
let mut utf16_units: u32 = 0;
for (offset_in_line, c) in line_text.char_indices() {
if c == '\n' || c == '\r' {
return if utf16_units == pos.character {
Some(line_start + offset_in_line)
} else {
None
};
}
if utf16_units >= pos.character {
return Some(line_start + offset_in_line);
}
utf16_units += c.len_utf16() as u32;
}
if utf16_units == pos.character {
Some(line_end)
} else {
None
}
}
fn thread_id() -> u64 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut h = DefaultHasher::new();
std::thread::current().id().hash(&mut h);
h.finish()
}
// ─── Tests ────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use std::io::Read;
use std::path::PathBuf;
fn tmp_file(content: &str) -> PathBuf {
use std::sync::atomic::{AtomicU64, Ordering};
static COUNTER: AtomicU64 = AtomicU64::new(0);
let dir = std::env::temp_dir();
let path = dir.join(format!(
"forge-edit-test-{}-{}-{}",
std::process::id(),
thread_id(),
COUNTER.fetch_add(1, Ordering::Relaxed),
));
fs::write(&path, content).unwrap();
path
}
fn read(path: &PathBuf) -> String {
let mut s = String::new();
File::open(path).unwrap().read_to_string(&mut s).unwrap();
s
}
fn pos(line: u32, character: u32) -> Position {
Position { line, character }
}
fn range(s_line: u32, s_char: u32, e_line: u32, e_char: u32) -> Range {
Range {
start: pos(s_line, s_char),
end: pos(e_line, e_char),
}
}
#[test]
fn replaces_single_word() {
let path = tmp_file("hello world\n");
let edits = vec![TextEdit {
range: range(0, 6, 0, 11),
new_text: "Earth".into(),
}];
let r = apply_edits(path.to_string_lossy().into(), edits, None).unwrap();
assert_eq!(r.edits_applied, 1);
assert_eq!(read(&path), "hello Earth\n");
fs::remove_file(&path).ok();
}
#[test]
fn applies_multiple_non_overlapping_edits() {
let path = tmp_file("aaa bbb ccc\n");
let edits = vec![
TextEdit {
range: range(0, 0, 0, 3),
new_text: "XXX".into(),
},
TextEdit {
range: range(0, 8, 0, 11),
new_text: "YYY".into(),
},
];
let r = apply_edits(path.to_string_lossy().into(), edits, None).unwrap();
assert_eq!(r.edits_applied, 2);
assert_eq!(read(&path), "XXX bbb YYY\n");
fs::remove_file(&path).ok();
}
#[test]
fn applies_multiline_edit() {
let path = tmp_file("line one\nline two\nline three\n");
let edits = vec![TextEdit {
range: range(0, 5, 2, 4),
new_text: "ONE\n_REPLACED_\nfour".into(),
}];
apply_edits(path.to_string_lossy().into(), edits, None).unwrap();
assert_eq!(read(&path), "line ONE\n_REPLACED_\nfour three\n");
fs::remove_file(&path).ok();
}
#[test]
fn rejects_overlapping_edits() {
let path = tmp_file("aaaaa\n");
let edits = vec![
TextEdit {
range: range(0, 0, 0, 3),
new_text: "X".into(),
},
TextEdit {
range: range(0, 2, 0, 5),
new_text: "Y".into(),
},
];
let err = apply_edits(path.to_string_lossy().into(), edits, None).unwrap_err();
assert!(err.reason.contains("overlapping"), "got: {}", err.reason);
// Original file untouched
assert_eq!(read(&path), "aaaaa\n");
fs::remove_file(&path).ok();
}
#[test]
fn rejects_out_of_range_line() {
let path = tmp_file("only one line\n");
let edits = vec![TextEdit {
range: range(99, 0, 99, 1),
new_text: "x".into(),
}];
let err = apply_edits(path.to_string_lossy().into(), edits, None).unwrap_err();
assert!(err.reason.contains("out of range"), "got: {}", err.reason);
fs::remove_file(&path).ok();
}
#[test]
fn handles_utf16_code_units_for_emoji() {
// 😀 is one Unicode codepoint but two UTF-16 code units (surrogate pair).
// LSP measures `character` in UTF-16 units, so character=2 should land
// just after the emoji.
let path = tmp_file("a😀b\n");
let edits = vec![TextEdit {
// Replace just the emoji: chars 1..3 (UTF-16 units, since emoji is 2)
range: range(0, 1, 0, 3),
new_text: "X".into(),
}];
apply_edits(path.to_string_lossy().into(), edits, None).unwrap();
assert_eq!(read(&path), "aXb\n");
fs::remove_file(&path).ok();
}
#[test]
fn empty_edits_no_op() {
let path = tmp_file("unchanged\n");
let r = apply_edits(path.to_string_lossy().into(), vec![], None).unwrap();
assert_eq!(r.edits_applied, 0);
assert_eq!(read(&path), "unchanged\n");
fs::remove_file(&path).ok();
}
#[test]
fn insertion_at_position() {
// Pure insertion: range start == range end
let path = tmp_file("ab\n");
let edits = vec![TextEdit {
range: range(0, 1, 0, 1),
new_text: "X".into(),
}];
apply_edits(path.to_string_lossy().into(), edits, None).unwrap();
assert_eq!(read(&path), "aXb\n");
fs::remove_file(&path).ok();
}
#[test]
fn append_after_eof() {
let path = tmp_file("first line\nsecond line");
let edits = vec![TextEdit {
range: range(1, 11, 1, 11),
new_text: "\nthird line".into(),
}];
apply_edits(path.to_string_lossy().into(), edits, None).unwrap();
assert_eq!(read(&path), "first line\nsecond line\nthird line");
fs::remove_file(&path).ok();
}
#[test]
fn allows_disabling_fsync_for_speed() {
let path = tmp_file("hi\n");
let edits = vec![TextEdit {
range: range(0, 0, 0, 2),
new_text: "yo".into(),
}];
apply_edits(
path.to_string_lossy().into(),
edits,
Some(ApplyEditsOptions { fsync: Some(false) }),
)
.unwrap();
assert_eq!(read(&path), "yo\n");
fs::remove_file(&path).ok();
}
// ── workspace_edit tests ──────────────────────────────────────────────
#[test]
fn workspace_edit_applies_to_two_files() {
let path_a = tmp_file("alpha beta\n");
let path_b = tmp_file("foo bar\n");
let doc_edits = vec![
TextDocumentEdit {
file_path: path_a.to_string_lossy().into_owned(),
edits: vec![TextEdit {
range: range(0, 6, 0, 10),
new_text: "gamma".into(),
}],
},
TextDocumentEdit {
file_path: path_b.to_string_lossy().into_owned(),
edits: vec![TextEdit {
range: range(0, 4, 0, 7),
new_text: "baz".into(),
}],
},
];
let r = apply_workspace_edit(doc_edits, None).unwrap();
assert_eq!(r.files_changed, 2);
assert_eq!(r.total_edits_applied, 2);
assert_eq!(read(&path_a), "alpha gamma\n");
assert_eq!(read(&path_b), "foo baz\n");
fs::remove_file(&path_a).ok();
fs::remove_file(&path_b).ok();
}
#[test]
fn workspace_edit_rolls_back_on_phase1_error() {
let path_a = tmp_file("good file\n");
let path_b = tmp_file("aaaaa\n");
let path_c = tmp_file("another good file\n");
// path_b has overlapping edits — should cause Phase 1 failure.
let doc_edits = vec![
TextDocumentEdit {
file_path: path_a.to_string_lossy().into_owned(),
edits: vec![TextEdit {
range: range(0, 0, 0, 4),
new_text: "nice".into(),
}],
},
TextDocumentEdit {
file_path: path_b.to_string_lossy().into_owned(),
edits: vec![
TextEdit {
range: range(0, 0, 0, 3),
new_text: "X".into(),
},
TextEdit {
range: range(0, 2, 0, 5),
new_text: "Y".into(),
},
],
},
TextDocumentEdit {
file_path: path_c.to_string_lossy().into_owned(),
edits: vec![TextEdit {
range: range(0, 8, 0, 12),
new_text: "great".into(),
}],
},
];
let err = apply_workspace_edit(doc_edits, None).unwrap_err();
assert!(err.reason.contains("overlapping"), "got: {}", err.reason);
// All originals must be untouched.
assert_eq!(read(&path_a), "good file\n");
assert_eq!(read(&path_b), "aaaaa\n");
assert_eq!(read(&path_c), "another good file\n");
// No leftover .tmp files.
let tmp_a = make_tmp_path(&path_a).unwrap();
let tmp_b = make_tmp_path(&path_b).unwrap();
let tmp_c = make_tmp_path(&path_c).unwrap();
assert!(!tmp_a.exists(), "stale tmp left behind for path_a");
assert!(!tmp_b.exists(), "stale tmp left behind for path_b");
assert!(!tmp_c.exists(), "stale tmp left behind for path_c");
fs::remove_file(&path_a).ok();
fs::remove_file(&path_b).ok();
fs::remove_file(&path_c).ok();
}
#[test]
fn workspace_edit_empty_input() {
let r = apply_workspace_edit(vec![], None).unwrap();
assert_eq!(r.files_changed, 0);
assert_eq!(r.total_edits_applied, 0);
assert!(r.files.is_empty());
}
#[test]
fn workspace_edit_handles_missing_file() {
let missing = std::env::temp_dir().join("forge-edit-test-nonexistent-99999999.txt");
// Ensure it really doesn't exist.
let _ = fs::remove_file(&missing);
let doc_edits = vec![TextDocumentEdit {
file_path: missing.to_string_lossy().into_owned(),
edits: vec![TextEdit {
range: range(0, 0, 0, 1),
new_text: "x".into(),
}],
}];
let err = apply_workspace_edit(doc_edits, None).unwrap_err();
// Error must mention the path.
assert!(
err.reason.contains("forge-edit-test-nonexistent-99999999"),
"error does not mention path: {}",
err.reason
);
}
}

View file

@ -12,20 +12,22 @@ mod ast;
mod clipboard;
mod diff;
mod fd;
mod forge_parser;
mod fs_cache;
mod git;
mod glob;
mod glob_util;
mod grep;
mod highlight;
mod html;
mod image;
mod json_parse;
mod ps;
mod stream_process;
mod symbol;
mod task;
mod text;
mod ttsr;
mod forge_parser;
mod image;
mod truncate;
mod json_parse;
mod stream_process;
mod ttsr;
mod watch;
mod xxhash;
mod git;

View file

@ -0,0 +1,451 @@
//! Symbol-level structural replace and insert.
//!
//! Exposes [`replace_symbol`] and [`insert_around_symbol`] to JavaScript via
//! napi-rs. Both functions use the same ast-grep infrastructure as
//! `forge_ast::ast` (tree-sitter pattern matching) but add a higher-level
//! "find me the declaration named X" abstraction on top.
//!
//! ## Language support (v1)
//!
//! Only **TypeScript / JavaScript / TSX** are fully supported. For all other
//! languages the functions return an `Err` asking the caller to fall back to
//! `astEdit` with a custom pattern.
//!
//! ## Replacement scope (v1)
//!
//! For simplicity `replaceSymbol` replaces the **entire matched declaration**
//! (function / arrow / method node), not just the body. `new_body` is
//! therefore expected to be the full declaration text, e.g.:
//!
//! ```text
//! function foo(x: number): number { return x + 1; }
//! ```
//!
//! `insertAroundSymbol` supports only `BeforeDecl` and `AfterDecl` in v1.
//! `AtBodyStart` / `AtBodyEnd` return `Err("not yet implemented")`.
use ast_grep_core::{matcher::Pattern, tree_sitter::LanguageExt, Language};
use forge_ast::language::SupportLang;
use napi::{Error, Result};
use napi_derive::napi;
use std::{
fs,
path::{Path, PathBuf},
};
// ─── napi types ──────────────────────────────────────────────────────────────
#[napi(object)]
pub struct ReplaceSymbolOptions {
/// Force a specific language ("typescript", "rust", "python", …).
/// If absent, inferred from the file extension.
pub lang: Option<String>,
/// fsync the write. Defaults to true.
pub fsync: Option<bool>,
}
#[napi(object)]
pub struct ReplaceSymbolResult {
pub matched: bool,
/// Byte offset of the start of the replaced range (set only when matched).
#[napi(js_name = "byteStart")]
pub byte_start: Option<u32>,
/// Byte offset of the end of the replaced range (set only when matched).
#[napi(js_name = "byteEnd")]
pub byte_end: Option<u32>,
/// 1-based line number of the replacement start (set only when matched).
#[napi(js_name = "startLine")]
pub start_line: Option<u32>,
}
#[napi(string_enum)]
pub enum InsertPosition {
BeforeDecl,
AfterDecl,
AtBodyStart,
AtBodyEnd,
}
#[napi(object)]
pub struct InsertAroundSymbolOptions {
pub lang: Option<String>,
pub fsync: Option<bool>,
}
#[napi(object)]
pub struct InsertAroundSymbolResult {
pub inserted: bool,
/// Byte offset at which the code was inserted (set only when inserted).
#[napi(js_name = "byteOffset")]
pub byte_offset: Option<u32>,
}
// ─── language detection (self-contained, no phf dependency) ──────────────────
/// Resolve a user-supplied language name string to a `SupportLang`.
/// Covers the same aliases as `forge_ast::ast::LANG_ALIASES` but implemented
/// as a simple match to avoid a direct `phf` dependency in this crate.
fn resolve_lang_from_str(value: &str) -> Result<SupportLang> {
let l = value.to_ascii_lowercase();
let lang = match l.as_str() {
"bash" | "sh" => SupportLang::Bash,
"c" => SupportLang::C,
"cpp" | "c++" | "cc" | "cxx" => SupportLang::Cpp,
"csharp" | "c#" | "cs" => SupportLang::CSharp,
"css" => SupportLang::Css,
"diff" | "patch" => SupportLang::Diff,
"elixir" | "ex" => SupportLang::Elixir,
"go" | "golang" => SupportLang::Go,
"haskell" | "hs" => SupportLang::Haskell,
"hcl" | "tf" | "tfvars" | "terraform" => SupportLang::Hcl,
"html" | "htm" => SupportLang::Html,
"java" => SupportLang::Java,
"javascript" | "js" | "jsx" | "mjs" | "cjs" => SupportLang::JavaScript,
"json" => SupportLang::Json,
"julia" | "jl" => SupportLang::Julia,
"kotlin" | "kt" => SupportLang::Kotlin,
"lua" => SupportLang::Lua,
"make" | "makefile" => SupportLang::Make,
"markdown" | "md" | "mdx" => SupportLang::Markdown,
"nix" => SupportLang::Nix,
"objc" | "objective-c" => SupportLang::ObjC,
"odin" => SupportLang::Odin,
"php" => SupportLang::Php,
"python" | "py" => SupportLang::Python,
"regex" => SupportLang::Regex,
"ruby" | "rb" => SupportLang::Ruby,
"rust" | "rs" => SupportLang::Rust,
"scala" => SupportLang::Scala,
"solidity" | "sol" => SupportLang::Solidity,
"starlark" | "star" => SupportLang::Starlark,
"swift" => SupportLang::Swift,
"toml" => SupportLang::Toml,
"tsx" => SupportLang::Tsx,
"typescript" | "ts" | "mts" | "cts" => SupportLang::TypeScript,
"verilog" | "systemverilog" | "sv" => SupportLang::Verilog,
"xml" | "xsl" | "svg" => SupportLang::Xml,
"yaml" | "yml" => SupportLang::Yaml,
"zig" => SupportLang::Zig,
_ => {
return Err(Error::from_reason(format!(
"Unsupported language '{value}'"
)))
}
};
Ok(lang)
}
fn resolve_lang(lang_opt: Option<&str>, file_path: &Path) -> Result<SupportLang> {
if let Some(lang) = lang_opt.map(str::trim).filter(|l| !l.is_empty()) {
return resolve_lang_from_str(lang);
}
// Use the SupportLang trait impl which calls from_extension internally.
<SupportLang as Language>::from_path(file_path).ok_or_else(|| {
Error::from_reason(format!(
"Cannot infer language from '{}'. Specify `lang` explicitly.",
file_path.display()
))
})
}
// ─── language family check ────────────────────────────────────────────────────
/// Returns `true` for the TypeScript/JavaScript/TSX family.
fn is_ts_js(lang: SupportLang) -> bool {
matches!(
lang,
SupportLang::TypeScript | SupportLang::JavaScript | SupportLang::Tsx
)
}
// ─── pattern building ─────────────────────────────────────────────────────────
/// Build ast-grep patterns to try for a given symbol name in a TS/JS/TSX file.
///
/// Plain name → function declaration + arrow patterns.
/// Dotted name like `"Class.method"` → class method pattern.
fn ts_patterns_for_symbol(symbol: &str) -> Result<Vec<String>> {
if symbol.contains('.') {
let parts: Vec<&str> = symbol.splitn(2, '.').collect();
let class_name = parts[0].trim();
let method_name = parts[1].trim();
if class_name.is_empty() || method_name.is_empty() {
return Err(Error::from_reason(format!(
"Invalid symbol name '{symbol}': expected 'ClassName.methodName'"
)));
}
Ok(vec![
// Method inside a named class
format!("class {class_name} {{ $$$ {method_name}($$$ARGS) {{ $$$BODY }} $$$ }}"),
])
} else {
Ok(vec![
// function declaration
format!("function {symbol}($$$ARGS) {{ $$$BODY }}"),
// arrow with parens
format!("const {symbol} = ($$$ARGS) => {{ $$$BODY }}"),
// arrow without parens (single param)
format!("const {symbol} = $ARG => {{ $$$BODY }}"),
])
}
}
// ─── matching helpers ─────────────────────────────────────────────────────────
struct SymbolMatch {
byte_start: usize,
byte_end: usize,
start_line: usize, // 0-based (ast-grep convention)
}
/// Run all patterns against `source` and collect distinct top-level matches,
/// deduped by start byte. Returns an error if more than one distinct
/// declaration was found (ambiguity).
fn find_symbol_matches(
source: &str,
patterns: &[String],
lang: SupportLang,
) -> Result<Vec<SymbolMatch>> {
let mut compiled: Vec<Pattern> = Vec::new();
for pat_str in patterns {
match Pattern::try_new(pat_str, lang) {
Ok(p) => compiled.push(p),
Err(_) => {} // skip patterns that don't compile for this lang variant
}
}
if compiled.is_empty() {
return Err(Error::from_reason(
"No patterns compiled successfully for this symbol/language combination".to_string(),
));
}
let ast = lang.ast_grep(source);
// BTreeMap keyed on start byte → deduplicates when multiple patterns hit
// the same node.
let mut by_start: std::collections::BTreeMap<usize, SymbolMatch> =
std::collections::BTreeMap::new();
for pattern in compiled {
for m in ast.root().find_all(pattern) {
let range = m.range();
by_start.entry(range.start).or_insert(SymbolMatch {
byte_start: range.start,
byte_end: range.end,
start_line: m.start_pos().line(),
});
}
}
Ok(by_start.into_values().collect())
}
// ─── atomic write ────────────────────────────────────────────────────────────
fn atomic_write_bytes(path: &Path, content: &[u8], do_fsync: bool) -> std::io::Result<()> {
use std::io::Write;
let parent = path.parent().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("path has no parent: {}", path.display()),
)
})?;
let file_name = path.file_name().and_then(|s| s.to_str()).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("path has no filename: {}", path.display()),
)
})?;
let tmp_name = format!(".{file_name}.symbol.{}", std::process::id());
let tmp_path = parent.join(tmp_name);
{
let mut f = std::fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(&tmp_path)?;
f.write_all(content)?;
if do_fsync {
f.sync_all()?;
}
}
let rename_result = fs::rename(&tmp_path, path);
if rename_result.is_err() {
let _ = fs::remove_file(&tmp_path);
return rename_result;
}
if do_fsync {
if let Ok(dir_fd) = std::fs::File::open(parent) {
let _ = dir_fd.sync_all();
}
}
Ok(())
}
// ─── public napi functions ───────────────────────────────────────────────────
/// Replace the entire declaration of the symbol identified by `symbol_name`
/// with `new_body`.
///
/// `symbol_name` is either a plain identifier (e.g. `"executeCommand"`) or a
/// dotted path (e.g. `"MyClass.myMethod"`).
///
/// **v1 scope**: only TypeScript / JavaScript / TSX are supported. For other
/// languages use `astEdit` with a custom pattern.
///
/// **v1 replacement**: the *entire* matched declaration node is replaced, not
/// just its body. `new_body` should be the complete declaration text.
///
/// Returns `matched: false` when no declaration matches. Returns an error
/// when multiple distinct declarations match (ambiguity).
#[napi(js_name = "replaceSymbol")]
pub fn replace_symbol(
file_path: String,
symbol_name: String,
new_body: String,
options: Option<ReplaceSymbolOptions>,
) -> Result<ReplaceSymbolResult> {
let opts = options.unwrap_or(ReplaceSymbolOptions {
lang: None,
fsync: None,
});
let do_fsync = opts.fsync.unwrap_or(true);
let path = PathBuf::from(&file_path);
let lang = resolve_lang(opts.lang.as_deref(), &path)?;
if !is_ts_js(lang) {
return Err(Error::from_reason(format!(
"Language '{}' is not yet supported for symbol resolution. \
Use astEdit with a custom pattern instead.",
lang.canonical_name()
)));
}
let source = fs::read_to_string(&path)
.map_err(|e| Error::from_reason(format!("read {file_path}: {e}")))?;
let patterns = ts_patterns_for_symbol(&symbol_name)?;
let matches = find_symbol_matches(&source, &patterns, lang)?;
match matches.len() {
0 => Ok(ReplaceSymbolResult {
matched: false,
byte_start: None,
byte_end: None,
start_line: None,
}),
1 => {
let m = &matches[0];
let before = &source.as_bytes()[..m.byte_start];
let after = &source.as_bytes()[m.byte_end..];
let mut out = Vec::with_capacity(before.len() + new_body.len() + after.len());
out.extend_from_slice(before);
out.extend_from_slice(new_body.as_bytes());
out.extend_from_slice(after);
atomic_write_bytes(&path, &out, do_fsync)
.map_err(|e| Error::from_reason(format!("write {file_path}: {e}")))?;
Ok(ReplaceSymbolResult {
matched: true,
byte_start: Some(m.byte_start as u32),
byte_end: Some(m.byte_end as u32),
start_line: Some((m.start_line + 1) as u32),
})
}
n => Err(Error::from_reason(format!(
"Ambiguous symbol '{symbol_name}': found {n} matching declarations in '{file_path}'. \
Qualify the name (e.g. 'ClassName.methodName') or use astEdit with a narrower pattern."
))),
}
}
/// Insert `code` before or after the declaration of the symbol identified by
/// `symbol_name`.
///
/// **v1 scope**: only TypeScript / JavaScript / TSX are supported.
///
/// **v1 positions**: only `BeforeDecl` and `AfterDecl` are implemented.
/// `AtBodyStart` / `AtBodyEnd` return `Err("not yet implemented")`.
#[napi(js_name = "insertAroundSymbol")]
pub fn insert_around_symbol(
file_path: String,
symbol_name: String,
position: InsertPosition,
code: String,
options: Option<InsertAroundSymbolOptions>,
) -> Result<InsertAroundSymbolResult> {
match position {
InsertPosition::AtBodyStart | InsertPosition::AtBodyEnd => {
return Err(Error::from_reason(
"AtBodyStart / AtBodyEnd are not yet implemented in v1. \
Use BeforeDecl or AfterDecl, or use astEdit with a custom pattern."
.to_string(),
));
}
_ => {}
}
let opts = options.unwrap_or(InsertAroundSymbolOptions {
lang: None,
fsync: None,
});
let do_fsync = opts.fsync.unwrap_or(true);
let path = PathBuf::from(&file_path);
let lang = resolve_lang(opts.lang.as_deref(), &path)?;
if !is_ts_js(lang) {
return Err(Error::from_reason(format!(
"Language '{}' is not yet supported for symbol resolution. \
Use astEdit with a custom pattern instead.",
lang.canonical_name()
)));
}
let source = fs::read_to_string(&path)
.map_err(|e| Error::from_reason(format!("read {file_path}: {e}")))?;
let patterns = ts_patterns_for_symbol(&symbol_name)?;
let matches = find_symbol_matches(&source, &patterns, lang)?;
match matches.len() {
0 => Ok(InsertAroundSymbolResult {
inserted: false,
byte_offset: None,
}),
1 => {
let m = &matches[0];
let insert_at = match position {
InsertPosition::BeforeDecl => m.byte_start,
InsertPosition::AfterDecl => m.byte_end,
_ => unreachable!(),
};
let before = &source.as_bytes()[..insert_at];
let after = &source.as_bytes()[insert_at..];
let mut out = Vec::with_capacity(before.len() + code.len() + after.len());
out.extend_from_slice(before);
out.extend_from_slice(code.as_bytes());
out.extend_from_slice(after);
atomic_write_bytes(&path, &out, do_fsync)
.map_err(|e| Error::from_reason(format!("write {file_path}: {e}")))?;
Ok(InsertAroundSymbolResult {
inserted: true,
byte_offset: Some(insert_at as u32),
})
}
n => Err(Error::from_reason(format!(
"Ambiguous symbol '{symbol_name}': found {n} matching declarations in '{file_path}'. \
Qualify the name (e.g. 'ClassName.methodName') or use astEdit with a narrower pattern."
))),
}
}

View file

@ -0,0 +1,255 @@
//! Recursive filesystem watcher exposed to JavaScript via napi-rs.
//!
//! Purpose: keep high-volume filesystem notifications out of the JavaScript
//! event loop while preserving a small, debounced batch API for agent sessions.
use dashmap::DashMap;
use globset::{GlobBuilder, GlobSet, GlobSetBuilder};
use napi::bindgen_prelude::*;
use napi::threadsafe_function::{
ErrorStrategy, ThreadSafeCallContext, ThreadsafeFunction, ThreadsafeFunctionCallMode,
};
use napi_derive::napi;
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use std::sync::{Arc, mpsc};
use std::thread::{self, JoinHandle};
use std::time::{Duration, Instant};
static NEXT_HANDLE: AtomicU32 = AtomicU32::new(1);
static WATCHERS: std::sync::OnceLock<DashMap<u32, WatcherHandle>> = std::sync::OnceLock::new();
fn watchers() -> &'static DashMap<u32, WatcherHandle> {
WATCHERS.get_or_init(DashMap::new)
}
struct WatcherHandle {
_watcher: RecommendedWatcher,
stop: Arc<AtomicBool>,
thread: Option<JoinHandle<()>>,
}
impl Drop for WatcherHandle {
fn drop(&mut self) {
self.stop.store(true, Ordering::Relaxed);
if let Some(thread) = self.thread.take() {
let _ = thread.join();
}
}
}
#[napi(object)]
pub struct WatchOptions {
/// Glob patterns to ignore. Bare patterns match anywhere below the root.
pub ignore: Option<Vec<String>>,
/// Coalesce events that fire within this window in milliseconds. Default 50.
#[napi(js_name = "debounceMs")]
pub debounce_ms: Option<u32>,
/// Watch recursively. Default true.
pub recursive: Option<bool>,
}
#[napi(object)]
#[derive(Clone)]
pub struct WatchEvent {
/// "create" | "modify" | "remove" | "rename"
pub kind: String,
/// Absolute path of the affected entry.
pub path: String,
}
fn build_ignore_set(patterns: &[String]) -> std::result::Result<GlobSet, String> {
let mut builder = GlobSetBuilder::new();
for pattern in patterns {
let normalized = pattern.replace('\\', "/");
let full = if !normalized.contains('/') && !normalized.starts_with("**") {
format!("**/{normalized}")
} else {
normalized
};
builder.add(
GlobBuilder::new(&full)
.literal_separator(true)
.build()
.map_err(|e| format!("invalid ignore pattern '{pattern}': {e}"))?,
);
}
builder
.build()
.map_err(|e| format!("failed to build ignore set: {e}"))
}
fn event_kind(kind: &EventKind) -> Option<&'static str> {
use notify::EventKind::*;
use notify::event::ModifyKind;
match kind {
Create(_) => Some("create"),
Remove(_) => Some("remove"),
Modify(ModifyKind::Name(_)) => Some("rename"),
Modify(_) => Some("modify"),
_ => None,
}
}
fn path_is_ignored(path: &Path, root: &Path, ignore_set: &GlobSet, has_ignores: bool) -> bool {
if !has_ignores {
return false;
}
path.strip_prefix(root)
.ok()
.is_some_and(|relative| ignore_set.is_match(relative))
}
fn drain_batch(
receiver: &mpsc::Receiver<notify::Result<Event>>,
first: notify::Result<Event>,
debounce: Duration,
) -> Vec<notify::Result<Event>> {
let deadline = Instant::now() + debounce;
let mut events = vec![first];
loop {
let now = Instant::now();
if now >= deadline {
break;
}
match receiver.recv_timeout(deadline - now) {
Ok(event) => events.push(event),
Err(mpsc::RecvTimeoutError::Timeout) => break,
Err(mpsc::RecvTimeoutError::Disconnected) => break,
}
}
events
}
fn convert_batch(
root: &Path,
ignore_set: &GlobSet,
has_ignores: bool,
raw_events: Vec<notify::Result<Event>>,
) -> Vec<WatchEvent> {
let mut batch = Vec::new();
for raw in raw_events {
let event = match raw {
Ok(event) => event,
Err(_) => continue,
};
let Some(kind) = event_kind(&event.kind) else {
continue;
};
for path in event.paths {
if path_is_ignored(&path, root, ignore_set, has_ignores) {
continue;
}
batch.push(WatchEvent {
kind: kind.to_string(),
path: path.to_string_lossy().into_owned(),
});
}
}
batch
}
/// Start watching a directory tree and return a numeric handle for `stopWatch`.
#[napi(
js_name = "watchTree",
ts_args_type = "root: string, options: WatchOptions | undefined | null, onEvents: (events: WatchEvent[]) => void"
)]
pub fn watch_tree(
_env: Env,
root: String,
options: Option<WatchOptions>,
on_events: JsFunction,
) -> Result<u32> {
let opts = options.unwrap_or(WatchOptions {
ignore: None,
debounce_ms: None,
recursive: None,
});
let debounce = Duration::from_millis(u64::from(opts.debounce_ms.unwrap_or(50)));
let recursive = opts.recursive.unwrap_or(true);
let root_path = PathBuf::from(&root);
if !root_path.exists() {
return Err(Error::new(
Status::InvalidArg,
format!("watch root does not exist: {root}"),
));
}
let ignore_patterns = opts.ignore.unwrap_or_default();
let ignore_set =
build_ignore_set(&ignore_patterns).map_err(|e| Error::new(Status::InvalidArg, e))?;
let has_ignores = !ignore_patterns.is_empty();
let tsfn: ThreadsafeFunction<Vec<WatchEvent>, ErrorStrategy::CalleeHandled> = on_events
.create_threadsafe_function(0, |ctx: ThreadSafeCallContext<Vec<WatchEvent>>| {
let events: Vec<WatchEvent> = ctx.value;
let env = ctx.env;
let mut arr = env.create_array_with_length(events.len())?;
for (i, event) in events.into_iter().enumerate() {
let mut obj = env.create_object()?;
obj.set_named_property("kind", env.create_string(&event.kind)?)?;
obj.set_named_property("path", env.create_string(&event.path)?)?;
arr.set_element(i as u32, obj)?;
}
Ok(vec![arr])
})?;
let (sender, receiver) = mpsc::channel();
let mut watcher = RecommendedWatcher::new(
move |result| {
let _ = sender.send(result);
},
Config::default(),
)
.map_err(|e| Error::new(Status::GenericFailure, format!("failed to create watcher: {e}")))?;
let mode = if recursive {
RecursiveMode::Recursive
} else {
RecursiveMode::NonRecursive
};
watcher.watch(&root_path, mode).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("failed to watch '{root}': {e}"),
)
})?;
let stop = Arc::new(AtomicBool::new(false));
let stop_thread = Arc::clone(&stop);
let root_thread = root_path.clone();
let tsfn_thread = tsfn.clone();
let thread = thread::spawn(move || {
while !stop_thread.load(Ordering::Relaxed) {
let first = match receiver.recv_timeout(Duration::from_millis(100)) {
Ok(event) => event,
Err(mpsc::RecvTimeoutError::Timeout) => continue,
Err(mpsc::RecvTimeoutError::Disconnected) => break,
};
let raw = drain_batch(&receiver, first, debounce);
let batch = convert_batch(&root_thread, &ignore_set, has_ignores, raw);
if !batch.is_empty() {
tsfn_thread.call(Ok(batch), ThreadsafeFunctionCallMode::NonBlocking);
}
}
});
let handle = NEXT_HANDLE.fetch_add(1, Ordering::Relaxed);
watchers().insert(
handle,
WatcherHandle {
_watcher: watcher,
stop,
thread: Some(thread),
},
);
Ok(handle)
}
/// Stop a watcher returned by `watchTree`.
#[napi(js_name = "stopWatch")]
pub fn stop_watch(handle: u32) -> Result<bool> {
Ok(watchers().remove(&handle).is_some())
}

View file

@ -1,5 +1,6 @@
import { readFileSync } from "node:fs";
import { join } from "node:path";
import type { Api, Model } from "@singularity-forge/pi-ai";
import {
AuthStorage,
createAgentSession,
@ -13,7 +14,6 @@ import {
SessionManager,
SettingsManager,
} from "@singularity-forge/pi-coding-agent";
import type { Api, Model } from "@singularity-forge/pi-ai";
import chalk from "chalk";
import { agentDir, authFilePath, sessionsDir } from "./app-paths.js";
import {
@ -222,7 +222,7 @@ function ensureRtkBootstrap(): Promise<void> {
}
// `sf update` — update to the latest version via npm
if (cliFlags.messages[0] === 'update') {
if (cliFlags.messages[0] === "update") {
const { runUpdate } = await import("./update-cmd.js");
await runUpdate();
process.exit(0);
@ -235,12 +235,12 @@ if (cliFlags.messages[0] === "graph") {
const sub = cliFlags.messages[1];
const {
buildGraph,
writeGraph,
graphStatus,
graphQuery,
graphDiff,
resolveSFRoot,
} = await import("@singularity-forge/mcp-server");
writeGraph,
} = await import("@singularity-forge/pi-agent-core");
const projectDir = process.cwd();
const sfRoot = resolveSFRoot(projectDir);
@ -467,7 +467,9 @@ if (cliFlags.messages[0] === "sessions") {
let sessions;
if (cliFlags.allSessions) {
process.stderr.write(chalk.dim("Loading all sessions across all projects...\n"));
process.stderr.write(
chalk.dim("Loading all sessions across all projects...\n"),
);
sessions = await SessionManager.listAll();
} else {
const safePath = `--${cwd.replace(/^[/\\]/, "").replace(/[/\\:]/g, "-")}--`;
@ -477,14 +479,14 @@ if (cliFlags.messages[0] === "sessions") {
}
if (sessions.length === 0) {
process.stderr.write(
chalk.yellow("No sessions found.\n"),
);
process.stderr.write(chalk.yellow("No sessions found.\n"));
process.exit(0);
}
const label = cliFlags.allSessions ? "all projects" : cwd;
process.stderr.write(chalk.bold(`\n Sessions (${sessions.length}) for ${label}:\n\n`));
process.stderr.write(
chalk.bold(`\n Sessions (${sessions.length}) for ${label}:\n\n`),
);
const maxShow = 20;
const toShow = sessions.slice(0, maxShow);
@ -497,9 +499,8 @@ if (cliFlags.messages[0] === "sessions") {
? s.firstMessage.replace(/\n/g, " ").substring(0, 80)
: chalk.dim("(empty)");
const num = String(i + 1).padStart(3);
const projectLabel = cliFlags.allSessions && s.cwd
? ` ${chalk.yellow(`[${s.cwd}]`)}`
: "";
const projectLabel =
cliFlags.allSessions && s.cwd ? ` ${chalk.yellow(`[${s.cwd}]`)}` : "";
process.stderr.write(
` ${chalk.bold(num)}. ${chalk.green(date)} ${chalk.dim(`(${msgs} msgs)`)}${name}${projectLabel}\n`,
);
@ -873,8 +874,7 @@ if (!cliFlags.worktree && !isPrintMode) {
// which handles non-interactive output gracefully.
// ---------------------------------------------------------------------------
if (
(cliFlags.messages[0] === "auto" ||
cliFlags.messages[0] === "autonomous") &&
(cliFlags.messages[0] === "auto" || cliFlags.messages[0] === "autonomous") &&
!process.stdout.isTTY
) {
process.stderr.write(

View file

@ -10,6 +10,10 @@
*/
import { Type } from "@sinclair/typebox";
import {
formatRoundResultForTool,
type RoundResult,
} from "@singularity-forge/pi-agent-core";
import type {
ExtensionAPI,
ExtensionCommandContext,
@ -19,7 +23,6 @@ import { sanitizeError } from "./shared/sanitize.js";
import {
type Question,
type QuestionOption,
type RoundResult,
showInterviewRound,
} from "./shared/tui.js";
@ -42,7 +45,7 @@ interface RemoteResultDetails {
autoResolved?: boolean;
autoResolveStrategy?: string;
questions?: Question[];
response?: import("./remote-questions/types.js").RemoteAnswer;
response?: RoundResult;
error?: boolean;
}
@ -227,8 +230,10 @@ async function askLocalQuestionRound(
if (result !== undefined) return result;
if (signal?.aborted) return null;
const answers: Record<string, { selected: string | string[]; notes: string }> =
{};
const answers: Record<
string,
{ selected: string | string[]; notes: string }
> = {};
for (const q of questions) {
const options = q.options.map((o) => o.label);
if (!q.allowMultiple) {
@ -294,11 +299,11 @@ function logHeadlessLocalAutoResolve(result: RaceableResult): void {
)
return;
const questions = details.questions as Question[];
const response =
details.response as import("./remote-questions/types.js").RemoteAnswer;
const response = details.response as RoundResult;
const firstQuestion = questions[0];
if (!firstQuestion) return;
const firstAnswer = response.answers[firstQuestion.id]?.answers?.[0];
const selected = response.answers[firstQuestion.id]?.selected;
const firstAnswer = Array.isArray(selected) ? selected[0] : selected;
if (!firstAnswer) return;
process.stderr.write(
`[gate] auto-resolved ${gateLogId(firstQuestion.id)} → "${cleanRecommendedLabel(firstAnswer)}" (timeout, headless, no telegram)\n`,
@ -306,22 +311,7 @@ function logHeadlessLocalAutoResolve(result: RaceableResult): void {
}
/** Convert the shared RoundResult into the JSON the LLM expects. */
function formatForLLM(result: RoundResult): string {
const answers: Record<string, { answers: string[] }> = {};
for (const [id, answer] of Object.entries(result.answers)) {
const list: string[] = [];
if (Array.isArray(answer.selected)) {
list.push(...answer.selected);
} else {
list.push(answer.selected);
}
if (answer.notes) {
list.push(`user_note: ${answer.notes}`);
}
answers[id] = { answers: list };
}
return JSON.stringify({ answers });
}
const formatForLLM = formatRoundResultForTool;
// ─── Extension ────────────────────────────────────────────────────────────────
@ -393,12 +383,7 @@ export default function AskUserQuestions(pi: ExtensionAPI) {
const raceResult = await raceRemoteAndLocal(
() => tryRemoteQuestions(params.questions, raceSignal),
() =>
askLocalQuestionRound(
params.questions,
raceSignal,
ctx as any,
),
() => askLocalQuestionRound(params.questions, raceSignal, ctx as any),
raceController,
params.questions,
);
@ -500,7 +485,10 @@ export default function AskUserQuestions(pi: ExtensionAPI) {
ctx as any,
);
if (!result) {
return errorResult("ask_user_questions was cancelled", params.questions);
return errorResult(
"ask_user_questions was cancelled",
params.questions,
);
}
// Check if cancelled (empty answers = user exited)
@ -600,13 +588,13 @@ export default function AskUserQuestions(pi: ExtensionAPI) {
);
continue;
}
const answerText =
answer.answers.length > 0
? answer.answers.join(", ")
: "(custom)";
const selected = answer.selected;
const answerText = Array.isArray(selected)
? selected.join(", ")
: selected || "(custom)";
let line = `${theme.fg("success", "✓ ")}${theme.fg("accent", q.header)}: ${answerText}`;
if (answer.user_note) {
line += ` ${theme.fg("muted", `[note: ${answer.user_note}]`)}`;
if (answer.notes) {
line += ` ${theme.fg("muted", `[note: ${answer.notes}]`)}`;
}
lines.push(line);
}

View file

@ -3,6 +3,11 @@
*/
import { randomUUID } from "node:crypto";
import {
formatRoundResultForTool,
type RoundResult,
roundResultFromRemoteAnswer,
} from "@singularity-forge/pi-agent-core";
import { sanitizeError } from "../shared/sanitize.js";
import {
type ResolvedConfig,
@ -101,14 +106,10 @@ export async function tryHeadlessLocalAutoResolveQuestions(
policy.autoResolveStrategy,
);
if (!autoResolved) return null;
const resolved = resultFromRemoteAnswer(autoResolved, questions);
return {
content: [
{
type: "text",
text: JSON.stringify({ answers: formatForTool(autoResolved) }),
},
],
content: resolved.content,
details: {
remote: true,
channel: policy.channel,
@ -119,7 +120,7 @@ export async function tryHeadlessLocalAutoResolveQuestions(
localFallback: true,
unavailableReason: options.unavailableReason,
questions,
response: autoResolved,
response: resolved.response,
},
};
}
@ -190,13 +191,9 @@ export async function tryRemoteQuestions(
: null;
if (autoResolved) {
markPromptAnswered(prompt.id, autoResolved);
const resolved = resultFromRemoteAnswer(autoResolved, questions);
return {
content: [
{
type: "text",
text: JSON.stringify({ answers: formatForTool(autoResolved) }),
},
],
content: resolved.content,
details: {
remote: true,
channel: config.channel,
@ -207,7 +204,7 @@ export async function tryRemoteQuestions(
autoResolved: true,
autoResolveStrategy: config.autoResolveStrategy,
questions,
response: autoResolved,
response: resolved.response,
},
};
}
@ -246,14 +243,10 @@ export async function tryRemoteQuestions(
} catch {
/* best-effort */
}
const resolved = resultFromRemoteAnswer(pollResult.answer, questions);
return {
content: [
{
type: "text",
text: JSON.stringify({ answers: formatForTool(pollResult.answer) }),
},
],
content: resolved.content,
details: {
remote: true,
channel: config.channel,
@ -261,7 +254,7 @@ export async function tryRemoteQuestions(
promptId: prompt.id,
threadUrl: dispatch.ref.threadUrl ?? null,
questions,
response: pollResult.answer,
response: resolved.response,
status: "answered",
},
};
@ -291,6 +284,20 @@ function createPrompt(
};
}
function resultFromRemoteAnswer(
answer: RemoteAnswer,
questions: QuestionInput[],
): {
content: Array<{ type: "text"; text: string }>;
response: RoundResult;
} {
const response = roundResultFromRemoteAnswer(answer, questions);
return {
content: [{ type: "text", text: formatRoundResultForTool(response) }],
response,
};
}
function createAdapter(config: ResolvedConfig): ChannelAdapter {
if (config.channel === "slack")
return new SlackAdapter(config.token, config.channelId);
@ -350,18 +357,6 @@ function sleep(ms: number, signal?: AbortSignal): Promise<void> {
});
}
function formatForTool(
answer: RemoteAnswer,
): Record<string, { answers: string[] }> {
const out: Record<string, { answers: string[] }> = {};
for (const [id, data] of Object.entries(answer.answers)) {
const list = [...data.answers];
if (data.user_note) list.push(`user_note: ${data.user_note}`);
out[id] = { answers: list };
}
return out;
}
function errorResult(message: string, channel: string): ToolResult {
return {
content: [{ type: "text", text: sanitizeError(message) }],

View file

@ -1638,6 +1638,9 @@ export async function buildResearchMilestonePrompt(
base,
extractKeywords(midTitle),
);
const graphBlockRM = await inlineGraphSubgraph(base, `${mid} ${midTitle}`, {
budget: 3000,
});
const parts: string[] = [];
if (knowledgeInlineRM && composed) {
// Insert knowledge before the template block so the overall order is:
@ -1654,6 +1657,7 @@ export async function buildResearchMilestonePrompt(
parts.push(composed);
if (knowledgeInlineRM) parts.push(knowledgeInlineRM);
}
if (graphBlockRM) parts.push(graphBlockRM);
const inlinedContext = capPreamble(
`## Inlined Context (preloaded — do not re-read these files)\n\n${parts.join("\n\n---\n\n")}`,
@ -1741,6 +1745,10 @@ export async function buildPlanMilestonePrompt(
extractKeywords(midTitle),
);
if (knowledgeInlinePM) inlined.push(knowledgeInlinePM);
const graphBlockPM = await inlineGraphSubgraph(base, `${mid} ${midTitle}`, {
budget: 3000,
});
if (graphBlockPM) inlined.push(graphBlockPM);
inlined.push(inlineTemplate("roadmap", "Roadmap"));
if (inlineLevel === "full") {
inlined.push(inlineTemplate("decisions", "Decisions"));
@ -2537,9 +2545,15 @@ export async function buildCompleteSlicePrompt(
...extractKeywords(midTitle),
...extractKeywords(sTitle),
]);
const graphBlockCS = await inlineGraphSubgraph(base, `${sid} ${sTitle}`, {
budget: 3000,
});
let body = composed;
if (knowledgeInlineCS && body) {
const graphAwareKnowledgeInline = [knowledgeInlineCS, graphBlockCS]
.filter((block): block is string => Boolean(block))
.join("\n\n---\n\n");
if (graphAwareKnowledgeInline && body) {
// Splice knowledge right before the first "### Task Summary:" block
// to preserve pre-migration ordering. If no task summaries exist,
// append after requirements (before templates).
@ -2549,9 +2563,9 @@ export async function buildCompleteSlicePrompt(
if (spliceIdx > 0) {
const before = body.slice(0, spliceIdx).replace(/\n\n---\n\n$/, "");
const after = body.slice(spliceIdx);
body = [before, knowledgeInlineCS, after].join("\n\n---\n\n");
body = [before, graphAwareKnowledgeInline, after].join("\n\n---\n\n");
} else {
body = `${body}\n\n---\n\n${knowledgeInlineCS}`;
body = `${body}\n\n---\n\n${graphAwareKnowledgeInline}`;
}
}
@ -2691,6 +2705,10 @@ export async function buildCompleteMilestonePrompt(
extractKeywords(midTitle),
);
if (knowledgeInlineCM) inlined.push(knowledgeInlineCM);
const graphBlockCM = await inlineGraphSubgraph(base, `${mid} ${midTitle}`, {
budget: 3000,
});
if (graphBlockCM) inlined.push(graphBlockCM);
// Inline milestone context file (milestone-level, not SF root)
const contextPath = resolveMilestoneFile(base, mid, "CONTEXT");
const contextRel = relMilestoneFile(base, mid, "CONTEXT");
@ -2908,6 +2926,10 @@ export async function buildValidateMilestonePrompt(
extractKeywords(midTitle),
);
if (knowledgeInline) inlined.push(knowledgeInline);
const graphBlockVM = await inlineGraphSubgraph(base, `${mid} ${midTitle}`, {
budget: 3000,
});
if (graphBlockVM) inlined.push(graphBlockVM);
// Inline milestone context file
const contextPath = resolveMilestoneFile(base, mid, "CONTEXT");
const contextRel = relMilestoneFile(base, mid, "CONTEXT");
@ -3191,6 +3213,10 @@ export async function buildReassessRoadmapPrompt(
extractKeywords(midTitle),
);
if (knowledgeInlineRA) parts.push(knowledgeInlineRA);
const graphBlockRA = await inlineGraphSubgraph(base, `${mid} ${midTitle}`, {
budget: 3000,
});
if (graphBlockRA) parts.push(graphBlockRA);
const inlinedContext = capPreamble(
`## Inlined Context (preloaded — do not re-read these files)\n\n${parts.join("\n\n---\n\n")}`,
@ -3436,31 +3462,31 @@ export async function buildParallelResearchSlicesPrompt(
);
const subagentSections = entries.map(({ slice, guardedPrompt }) => {
return [
`### ${slice.id}: ${slice.title}`,
"",
"Task payload:",
"",
"```",
guardedPrompt,
"```",
].join("\n");
});
return [
`### ${slice.id}: ${slice.title}`,
"",
"Task payload:",
"",
"```",
guardedPrompt,
"```",
].join("\n");
});
const tasks = entries.map(({ guardedPrompt }) => {
const task: {
agent: string;
task: string;
cwd: string;
model?: string;
} = {
agent: "worker",
cwd: basePath,
task: guardedPrompt,
};
if (subagentModel) task.model = subagentModel;
return task;
});
const task: {
agent: string;
task: string;
cwd: string;
model?: string;
} = {
agent: "worker",
cwd: basePath,
task: guardedPrompt,
};
if (subagentModel) task.model = subagentModel;
return task;
});
const subagentCall = JSON.stringify({ tasks }, null, 2);
return loadPrompt("parallel-research-slices", {

View file

@ -159,14 +159,14 @@ async function resolveGraphApi(): Promise<GraphApi> {
resolvedGraphApi = true;
try {
const imported = await import("@singularity-forge/mcp-server");
const imported = await import("@singularity-forge/pi-agent-core");
if (isGraphApi(imported)) {
cachedGraphApi = imported;
return cachedGraphApi;
}
logWarning(
"prompt",
"@singularity-forge/mcp-server graph exports unavailable; using local graph fallback",
"@singularity-forge/pi-agent-core graph exports unavailable; using local graph fallback",
);
} catch {
// Fall back to local reader implementation.
@ -184,7 +184,7 @@ async function resolveGraphApi(): Promise<GraphApi> {
* the result as an inlined context block.
*
* Returns null when:
* - @singularity-forge/mcp-server fails to import
* - @singularity-forge/pi-agent-core fails to import
* - graph.json does not exist (graphQuery already handles this gracefully)
* - query returns zero nodes
*

View file

@ -6,10 +6,37 @@
// execSync calls because git2 credential handling is too complex.
import { execFileSync } from "node:child_process";
import { lstatSync } from "node:fs";
import { isAbsolute, join } from "node:path";
import { getErrorMessage } from "./error-utils.js";
import { SF_GIT_ERROR, SFError } from "./errors.js";
import { GIT_NO_PROMPT_ENV } from "./git-constants.js";
/**
* Return true when any directory component of `relPath` (relative to `basePath`)
* is itself a symlink. Git refuses to add paths that traverse a symlink, so
* callers should drop these silently rather than surface a "beyond a symbolic
* link" stage failure for state directories that the user intentionally pointed
* at out-of-tree storage (e.g. `.sf` symlinked to `~/.sf/projects/<id>/`).
*/
function isPathBeyondSymlink(basePath: string, relPath: string): boolean {
if (isAbsolute(relPath)) return false;
const segments = relPath.split(/[\\/]/).filter(Boolean);
let acc = basePath;
// Stop one short of the final segment — git only complains when the
// traversal is through a symlink, not when the leaf itself is one.
for (let i = 0; i < segments.length - 1; i++) {
acc = join(acc, segments[i]);
try {
if (lstatSync(acc).isSymbolicLink()) return true;
} catch {
// Missing intermediate; let git report whatever it would have.
return false;
}
}
return false;
}
// Issue #453: keep auto-mode bookkeeping on the stable git CLI path unless a
// caller explicitly opts into the native helper.
const NATIVE_SF_GIT_ENABLED = process.env.SF_ENABLE_NATIVE_SF_GIT === "1";
@ -915,12 +942,18 @@ export function nativeAddAllWithExclusions(
*/
export function nativeAddPaths(basePath: string, paths: string[]): void {
if (paths.length === 0) return;
// Drop paths that traverse a symlink out of the worktree (e.g. `.sf/...`
// when `.sf` points at `~/.sf/projects/<id>/`). Both libgit2 and the git
// CLI reject these with "beyond a symbolic link", and the user has
// intentionally placed that state outside version control.
const safe = paths.filter((p) => !isPathBeyondSymlink(basePath, p));
if (safe.length === 0) return;
const native = loadNative();
if (native) {
native.gitAddPaths(basePath, paths);
native.gitAddPaths(basePath, safe);
return;
}
gitFileExec(basePath, ["add", "--", ...paths]);
gitFileExec(basePath, ["add", "--", ...safe]);
}
/**

View file

@ -1,4 +1,4 @@
Research slice {{sliceId}} ("{{sliceTitle}}") of milestone {{milestoneId}}. Read `.sf/DECISIONS.md` if it exists — respect existing decisions, don't contradict them. Read `.sf/REQUIREMENTS.md` if it exists — identify which Active requirements this slice owns or supports and target research toward risks, unknowns, and constraints that could affect delivery of those requirements. {{skillActivation}} If a repo-intelligence MCP (e.g. Serena) is configured, prefer it for symbol lookup, references, and cross-file architecture mapping. For direct text inspection use `rg`/`find` for targeted reads, or `scout` if the area is broad or unfamiliar. If there are 2-3 independent unknowns, use a research swarm with parallel `scout`/`researcher` subagents and synthesize their findings here; do not swarm narrow sequence-dependent research. Check libraries DeepWiki-first: `ask_question` / `read_wiki_structure` / `read_wiki_contents` for any GitHub-hosted library; fall back to `resolve_library` / `get_library_docs` (Context7, capped at 1000 req/month free) for npm/pypi/crates packages DeepWiki doesn't have. Skip both for libraries already used in this codebase. Use the **Research** output template below. Call `sf_summary_save` with `milestone_id: {{milestoneId}}`, `slice_id: {{sliceId}}`, `artifact_type: "RESEARCH"`, and the research content — the tool writes the file to disk and persists to DB.
Research slice {{sliceId}} ("{{sliceTitle}}") of milestone {{milestoneId}}. Read `.sf/DECISIONS.md` if it exists — respect existing decisions, don't contradict them. Read `.sf/REQUIREMENTS.md` if it exists — identify which Active requirements this slice owns or supports and target research toward risks, unknowns, and constraints that could affect delivery of those requirements. {{skillActivation}} Use native `lsp` first for symbol lookup, references, and cross-file navigation. For direct text inspection use `rg`/`find` for targeted reads, or `scout` if the area is broad or unfamiliar. If there are 2-3 independent unknowns, use a research swarm with parallel `scout`/`researcher` subagents and synthesize their findings here; do not swarm narrow sequence-dependent research. Check libraries DeepWiki-first: `ask_question` / `read_wiki_structure` / `read_wiki_contents` for any GitHub-hosted library; fall back to `resolve_library` / `get_library_docs` (Context7, capped at 1000 req/month free) for npm/pypi/crates packages DeepWiki doesn't have. Skip both for libraries already used in this codebase. Use the **Research** output template below. Call `sf_summary_save` with `milestone_id: {{milestoneId}}`, `slice_id: {{sliceId}}`, `artifact_type: "RESEARCH"`, and the research content — the tool writes the file to disk and persists to DB.
**You are the scout.** A planner agent reads your output in a fresh context to decompose this slice into tasks. Write for the planner — surface key files, where the work divides naturally, what to build first, and how to verify. If the research doc is vague, the planner re-explores code you already read. If it's precise, the planner decomposes immediately.

View file

@ -20,7 +20,7 @@ After you finish, each slice goes through its own plan → execute cycle. Slice
Before decomposing, build your understanding:
1. **Codebase exploration.** If a repo-intelligence MCP (e.g. Serena) is configured, prefer it for symbol lookup, references, and cross-file architecture mapping. For small/familiar codebases, use `rg`, `find`, and targeted reads. For large or unfamiliar codebases, use `scout` to build a broad map efficiently before diving in.
1. **Codebase exploration.** Use native `lsp` first for symbol lookup, references, and cross-file navigation. For small/familiar codebases, use `rg`, `find`, and targeted reads. For large or unfamiliar codebases, use `scout` to build a broad map efficiently before diving in.
2. **Library docs — DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) as the default for any GitHub-hosted library. Fall back to `resolve_library` / `get_library_docs` (Context7) for npm/pypi/crates packages DeepWiki doesn't have. Context7 free tier is capped at 1000 req/month — spend those on cases DeepWiki can't cover. Skip both for libraries already used in this codebase.
3. **Skill Discovery ({{skillDiscoveryMode}}):**{{skillDiscoveryInstructions}}
4. **Requirements analysis.** If `.sf/REQUIREMENTS.md` exists, research against it. Identify which Active requirements are table stakes, likely omissions, overbuilt risks, or domain-standard behaviors.

View file

@ -26,7 +26,7 @@ Check prior slice summaries (inlined above as dependency summaries, if present).
### Explore Slice Scope
Read the code files relevant to this slice. Confirm the roadmap's description of what exists, what needs to change, and what boundaries apply. If a repo-intelligence MCP (e.g. Serena) is configured, prefer it for symbol lookup, references, and cross-file architecture mapping. Use `rg`, `find`, and targeted reads for direct text inspection.
Read the code files relevant to this slice. Confirm the roadmap's description of what exists, what needs to change, and what boundaries apply. Use native `lsp` first for symbol lookup, references, and cross-file navigation. Use `rg`, `find`, and targeted reads for direct text inspection.
### Source Files

View file

@ -18,10 +18,10 @@ Use repo docs as the product contract. Do not hardcode assumptions from another
You must inspect the codebase enough to confirm whether declared product capabilities exist. Prefer semantic or structured search when available:
- Start with MCP discovery: call `mcp_servers`, then `mcp_discover` for configured repo-intelligence servers. In our repositories, use Serena first when present for symbol search, references, and cross-file codebase mapping. If Serena is not configured or a call fails, state that and continue with AST/text search.
- Start with native code intelligence: use `lsp` for symbol search, definitions, references, and call hierarchy; use `rg` for exact text evidence. If LSP is unavailable for a language, state that and continue with AST/text search.
- Use AST-aware search (`ast-grep`, repository semantic tools, or equivalent) for implementation patterns, exported APIs, route handlers, CLI commands, service definitions, config keys, and test coverage.
- Use text search (`rg`) for docs, deployment scripts, runbooks, CI workflows, build targets, and evidence strings.
- Use available repo-intelligence MCP tools such as Serena, DeepWiki, Context7, package-intelligence, or project-specific servers when configured, especially for cross-file symbol tracing and architecture questions.
- Use external MCP tools only for external knowledge or explicitly configured project-specific services. Native `lsp` is the first-party symbol tracing path.
- Do not rely only on preloaded docs. If a required capability is declared, look for concrete implementation, tests, config, deploy, and operational evidence.
- Do not edit source files in this audit. The only write path is the `sf_product_audit` tool.

View file

@ -31,7 +31,7 @@ A milestone adding a small feature to an established codebase needs targeted res
Then research the codebase and relevant technologies. Narrate key findings and surprises as you go — what exists, what's missing, what constrains the approach.
1. {{skillActivation}}
2. **Skill Discovery ({{skillDiscoveryMode}}):**{{skillDiscoveryInstructions}}
3. Explore relevant code. If a repo-intelligence MCP (e.g. Serena) is configured, prefer it for symbol lookup, references, and cross-file architecture mapping. For small/familiar codebases, use `rg`, `find`, and targeted reads. For large or unfamiliar codebases, use `scout` to build a broad map efficiently before diving in.
3. Explore relevant code. Use native `lsp` first for symbol lookup, references, and cross-file navigation. For small/familiar codebases, use `rg`, `find`, and targeted reads. For large or unfamiliar codebases, use `scout` to build a broad map efficiently before diving in.
3a. Use research swarms when the questions fan out cleanly. If the milestone spans 2-3 independent subsystems, dispatch parallel `scout`/`researcher` subagents with separate lenses, then synthesize their findings into one research artifact. Do not swarm one tightly-coupled question; do it inline.
4. **Documentation lookup — prefer DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) as the default for any GitHub-hosted library or framework — AI-indexed, no free-tier cap. Fall back to `resolve_library``get_library_docs` (Context7) for npm/pypi/crates packages DeepWiki doesn't have. **Context7 free tier is capped at 1000 requests/month — spend those on cases DeepWiki can't cover.** Skip both for libraries already used in this codebase.
5. **Web search budget:** You have a limited budget of web searches (max ~15 per session). Use them strategically — try DeepWiki → Context7 → web search in that order. Do NOT repeat the same or similar queries. If a search didn't find what you need, rephrase once or move on. Target 3-5 total web searches for a typical research unit.

View file

@ -44,7 +44,7 @@ Research what this slice needs. Narrate key findings and surprises as you go —
0. If `REQUIREMENTS.md` was preloaded above, identify which Active requirements this slice owns or supports. Research should target these requirements — surfacing risks, unknowns, and implementation constraints that could affect whether the slice actually delivers them.
1. {{skillActivation}} Reference specific rules from loaded skills in your findings where they inform the implementation approach.
2. **Skill Discovery ({{skillDiscoveryMode}}):**{{skillDiscoveryInstructions}}
3. Explore relevant code for this slice's scope. If a repo-intelligence MCP (e.g. Serena) is configured, prefer it for symbol lookup, references, and cross-file architecture mapping. For direct text inspection, use `rg`, `find`, and reads. For broad or unfamiliar subsystems, use `scout` to map the relevant area first.
3. Explore relevant code for this slice's scope. Use native `lsp` first for symbol lookup, references, and cross-file navigation. For direct text inspection, use `rg`, `find`, and reads. For broad or unfamiliar subsystems, use `scout` to map the relevant area first.
3a. Use a research swarm when the slice has 2-3 independent unknowns or subsystems. Dispatch parallel `scout`/`researcher` subagents with distinct lenses, then synthesize what each found into this single RESEARCH artifact. Do not swarm a narrow, sequence-dependent investigation.
4. **Documentation lookup — prefer DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) as the default for any GitHub-hosted library or framework — AI-indexed, no free-tier cap. Fall back to `resolve_library``get_library_docs` (Context7) for npm/pypi/crates packages DeepWiki doesn't have. **Context7 free tier is capped at 1000 requests/month — spend those on cases DeepWiki can't cover.** Skip both for libraries already used in this codebase.
5. **Web search budget:** You have a limited budget of web searches (max ~15 per session). Use them strategically — try DeepWiki → Context7 → web search in that order. Do NOT repeat the same or similar queries. If a search didn't find what you need, rephrase once or move on. Target 3-5 total web searches for a typical research unit.

View file

@ -5,8 +5,8 @@ description: Researches codebase, project state, and external knowledge using lo
<objective>
Research a topic using four complementary information sources, in priority order:
1. **Serena MCP** (46 LSP-backed tools: symbol search, file read, find references, pattern search) — use FIRST for code exploration
2. **sift** (hybrid BM25+vector local search) — use when Serena symbol search isn't enough
1. **Native LSP tool + local search** (`lsp`, `rg`, `find`, `ls`) — use FIRST for code exploration
2. **sift** (hybrid BM25+vector local search) — use when LSP/rg is not enough
3. **SF project database** (sqlite3) — use for project state (milestones, requirements, decisions)
4. **Web search** — use for external documentation and current information
@ -34,28 +34,24 @@ If a research question genuinely requires a write to answer (e.g., "does X actua
<quick_start>
**Serena MCP (code intelligence — USE FIRST for code exploration):**
**Native code intelligence — USE FIRST for code exploration:**
```bash
# Discover Serena tools (she has 46 tools available)
mcp_servers
# Workspace symbol search
lsp action=symbols query=resolveSubagentLaunchSpec
# Get Serena's full tool list
mcp_discover server=serena
# Document symbols
lsp action=symbols file=src/resources/extensions/subagent/index.ts
# Symbol search — find where a function/type is defined
mcp_call server=serena tool=find_symbol arguments={contextLines=5,matchPattern="resolveSubagentLaunchSpec"}
# Go to definition at a line/symbol
lsp action=definition file=src/resources/extensions/subagent/index.ts line=42 symbol=resolveSubagentLaunchSpec
# Find all references to a symbol (callers, usages)
mcp_call server=serena tool=find_referencing_symbols arguments={contextLines=3,matchPattern="resolveSubagentLaunchSpec"}
# Find references/callers
lsp action=references file=src/resources/extensions/subagent/index.ts line=42 symbol=resolveSubagentLaunchSpec
lsp action=incoming_calls file=src/resources/extensions/subagent/index.ts line=42 symbol=resolveSubagentLaunchSpec
# Read a file (Serena's LSP-backed read is faster than bash cat)
mcp_call server=serena tool=read_file arguments={file_path="src/resources/extensions/subagent/index.ts"}
# Search for pattern in files
mcp_call server=serena tool=search_for_pattern arguments={pattern="codebase_search",filePattern="*.ts",contextLines=3}
# List directory
mcp_call server=serena tool=list_dir arguments={path="src/resources/extensions/sf/skills/"}
# Fast text/file search
rg -n "codebase_search|resolveSubagentLaunchSpec" src packages
rg --files src/resources/extensions/sf/skills
```
**Local code search — sift (hybrid BM25+vector search):**
@ -87,30 +83,32 @@ sqlite3 .sf/sf.db "SELECT id, title, status FROM tasks WHERE milestone_id='M001'
## Step 1: Clarify the research goal
Before searching, identify what you need to know:
- **Code exploration** (finding functions, types, references) → use Serena MCP first
- **Code exploration** (finding functions, types, references) → use native `lsp` first, then `rg`
- **Project state** (milestones, slices, tasks, requirements) → query the SF DB
- **Current external information** → use web search
- **All of the above** → combine all four sources
## Step 2: Explore code with Serena MCP (priority)
## Step 2: Explore code with native LSP (priority)
Serena is an LSP-backed code intelligence layer. Use `mcp_call` to invoke her tools:
Use the built-in `lsp` tool for symbol-aware navigation:
```bash
# Find where a function or type is defined
mcp_call server=serena tool=find_symbol arguments={matchPattern="MyFunction",contextLines=5}
lsp action=symbols query=MyFunction
lsp action=definition file=src/my-file.ts line=42 symbol=MyFunction
# Find all callers/references to a symbol
mcp_call server=serena tool=find_referencing_symbols arguments={matchPattern="MyFunction",contextLines=3}
lsp action=references file=src/my-file.ts line=42 symbol=MyFunction
lsp action=incoming_calls file=src/my-file.ts line=42 symbol=MyFunction
# Read a specific file
mcp_call server=serena tool=read_file arguments={file_path="src/my-file.ts"}
read file=src/my-file.ts
# Grep-like search across the codebase
mcp_call server=serena tool=search_for_pattern arguments={pattern="TODO.*auth",filePattern="*.ts"}
rg -n "TODO.*auth" src packages
```
## Step 3: Supplement with sift (when Serena isn't enough)
## Step 3: Supplement with sift (when LSP/rg is not enough)
Use sift when you need semantic/hybrid search across unstructured content:

View file

@ -0,0 +1,379 @@
import assert from "node:assert/strict";
import { mkdirSync } from "node:fs";
import { test } from "vitest";
import {
buildSliceFileName,
resolveMilestonePath,
resolveSliceFile,
resolveSlicePath,
} from "../paths.js";
import {
computeOverallScore,
deriveCounts,
EVAL_REVIEW_SCHEMA_VERSION,
EvalReviewFrontmatter,
extractFrontmatterRaw,
parseEvalReviewFrontmatter,
verdictForScore,
} from "../eval-review-schema.js";
import {
buildEvalReviewContext,
detectEvalReviewState,
EvalReviewArgError,
evalReviewWritePath,
findEvalReviewFile,
parseEvalReviewArgs,
planEvalReviewAction,
SLICE_ID_PATTERN,
} from "../commands-eval-review.js";
import { TOP_LEVEL_SUBCOMMANDS } from "../commands/catalog.js";
import { makeTempDir, cleanup, createFile } from "./test-utils.js";
// ─── Argument parsing ───────────────────────────────────────────────────────
test("parseEvalReviewArgs: valid slice ID only", () => {
const args = parseEvalReviewArgs("S07");
assert.equal(args.sliceId, "S07");
assert.equal(args.force, false);
assert.equal(args.show, false);
});
test("parseEvalReviewArgs: with --force", () => {
const args = parseEvalReviewArgs("S07 --force");
assert.equal(args.sliceId, "S07");
assert.equal(args.force, true);
assert.equal(args.show, false);
});
test("parseEvalReviewArgs: with --show", () => {
const args = parseEvalReviewArgs("S07 --show");
assert.equal(args.sliceId, "S07");
assert.equal(args.force, false);
assert.equal(args.show, true);
});
test("parseEvalReviewArgs: rejects missing slice ID", () => {
assert.throws(() => parseEvalReviewArgs(""), EvalReviewArgError);
assert.throws(() => parseEvalReviewArgs("--force"), EvalReviewArgError);
});
test("parseEvalReviewArgs: rejects invalid slice ID", () => {
assert.throws(() => parseEvalReviewArgs("../etc/passwd"), EvalReviewArgError);
assert.throws(() => parseEvalReviewArgs("foo"), EvalReviewArgError);
assert.throws(() => parseEvalReviewArgs("S"), EvalReviewArgError);
});
test("parseEvalReviewArgs: rejects unknown flags", () => {
assert.throws(() => parseEvalReviewArgs("S07 --wipe"), EvalReviewArgError);
});
test("parseEvalReviewArgs: rejects multiple slice IDs", () => {
assert.throws(() => parseEvalReviewArgs("S07 S08"), EvalReviewArgError);
});
// ─── Slice ID pattern ───────────────────────────────────────────────────────
test("SLICE_ID_PATTERN: accepts canonical IDs", () => {
assert.ok(SLICE_ID_PATTERN.test("S01"));
assert.ok(SLICE_ID_PATTERN.test("S99"));
assert.ok(SLICE_ID_PATTERN.test("S123"));
});
test("SLICE_ID_PATTERN: rejects path traversal", () => {
assert.ok(!SLICE_ID_PATTERN.test("../foo"));
assert.ok(!SLICE_ID_PATTERN.test("S01/../../etc"));
assert.ok(!SLICE_ID_PATTERN.test("foo/bar"));
});
test("SLICE_ID_PATTERN: rejects malformed IDs", () => {
assert.ok(!SLICE_ID_PATTERN.test("S"));
assert.ok(!SLICE_ID_PATTERN.test("s01"));
assert.ok(!SLICE_ID_PATTERN.test("01"));
assert.ok(!SLICE_ID_PATTERN.test("S01a"));
assert.ok(!SLICE_ID_PATTERN.test(""));
});
// ─── State detection ────────────────────────────────────────────────────────
test("detectEvalReviewState: no-slice-dir when slice missing", () => {
const dir = makeTempDir();
try {
mkdirSync(`${dir}/.sf/milestones/M001/slices`, { recursive: true });
const args = parseEvalReviewArgs("S99");
const state = detectEvalReviewState(args, dir, "M001");
assert.equal(state.kind, "no-slice-dir");
if (state.kind === "no-slice-dir") {
assert.ok(state.expectedDir.includes("S99"));
}
} finally {
cleanup(dir);
}
});
test("detectEvalReviewState: no-summary when SUMMARY.md missing", () => {
const dir = makeTempDir();
try {
mkdirSync(`${dir}/.sf/milestones/M001/slices/S01`, { recursive: true });
const args = parseEvalReviewArgs("S01");
const state = detectEvalReviewState(args, dir, "M001");
assert.equal(state.kind, "no-summary");
} finally {
cleanup(dir);
}
});
test("detectEvalReviewState: ready when SUMMARY.md present", () => {
const dir = makeTempDir();
try {
createFile(dir, ".sf/milestones/M001/slices/S01/S01-SUMMARY.md", "# Summary\n");
const args = parseEvalReviewArgs("S01");
const state = detectEvalReviewState(args, dir, "M001");
assert.equal(state.kind, "ready");
if (state.kind === "ready") {
assert.ok(state.summaryPath.includes("S01-SUMMARY.md"));
}
} finally {
cleanup(dir);
}
});
// ─── Path helpers ───────────────────────────────────────────────────────────
test("evalReviewWritePath: canonical naming", () => {
const path = evalReviewWritePath("/tmp/milestone/slices/S01", "S01");
assert.ok(path.endsWith("S01-EVAL-REVIEW.md"));
assert.ok(path.includes("/tmp/milestone/slices/S01"));
});
test("findEvalReviewFile: returns null when absent", () => {
const dir = makeTempDir();
try {
mkdirSync(`${dir}/.sf/milestones/M001/slices/S01`, { recursive: true });
const result = findEvalReviewFile(dir, "M001", "S01");
assert.equal(result, null);
} finally {
cleanup(dir);
}
});
test("findEvalReviewFile: returns path when present", () => {
const dir = makeTempDir();
try {
createFile(dir, ".sf/milestones/M001/slices/S01/S01-EVAL-REVIEW.md", "---\n---\n");
const result = findEvalReviewFile(dir, "M001", "S01");
assert.ok(result !== null);
assert.ok(result?.includes("S01-EVAL-REVIEW.md"));
} finally {
cleanup(dir);
}
});
// ─── Action planner ─────────────────────────────────────────────────────────
test("planEvalReviewAction: no-slice-dir → no-slice-dir", () => {
const args = parseEvalReviewArgs("S01");
const state = { kind: "no-slice-dir", sliceId: "S01", expectedDir: "/tmp" } as const;
const action = planEvalReviewAction(args, state, null);
assert.equal(action.kind, "no-slice-dir");
});
test("planEvalReviewAction: --show tolerates missing summary", () => {
const args = parseEvalReviewArgs("S01 --show");
const state = { kind: "no-summary", sliceId: "S01", sliceDir: "/tmp", specPath: null } as const;
const action = planEvalReviewAction(args, state, null);
assert.equal(action.kind, "show");
assert.equal(action.path, null);
});
test("planEvalReviewAction: no-summary without --show → no-summary", () => {
const args = parseEvalReviewArgs("S01");
const state = { kind: "no-summary", sliceId: "S01", sliceDir: "/tmp", specPath: null } as const;
const action = planEvalReviewAction(args, state, null);
assert.equal(action.kind, "no-summary");
});
test("planEvalReviewAction: existing file without --force → exists-no-force", () => {
const args = parseEvalReviewArgs("S01");
const state = { kind: "ready", sliceId: "S01", sliceDir: "/tmp", summaryPath: "/tmp/SUMMARY.md", specPath: null } as const;
const action = planEvalReviewAction(args, state, "/tmp/S01-EVAL-REVIEW.md");
assert.equal(action.kind, "exists-no-force");
});
test("planEvalReviewAction: existing file with --force → dispatch", () => {
const args = parseEvalReviewArgs("S01 --force");
const state = { kind: "ready", sliceId: "S01", sliceDir: "/tmp", summaryPath: "/tmp/SUMMARY.md", specPath: null } as const;
const action = planEvalReviewAction(args, state, "/tmp/S01-EVAL-REVIEW.md");
assert.equal(action.kind, "dispatch");
});
test("planEvalReviewAction: no existing file → dispatch", () => {
const args = parseEvalReviewArgs("S01");
const state = { kind: "ready", sliceId: "S01", sliceDir: "/tmp", summaryPath: "/tmp/SUMMARY.md", specPath: null } as const;
const action = planEvalReviewAction(args, state, null);
assert.equal(action.kind, "dispatch");
});
// ─── Context builder ────────────────────────────────────────────────────────
test("buildEvalReviewContext: reads SUMMARY.md and builds context", async () => {
const dir = makeTempDir();
try {
createFile(dir, ".sf/milestones/M001/slices/S01/S01-SUMMARY.md", "# Summary\nDone.\n");
const state = {
kind: "ready" as const,
sliceId: "S01",
sliceDir: `${dir}/.sf/milestones/M001/slices/S01`,
summaryPath: `${dir}/.sf/milestones/M001/slices/S01/S01-SUMMARY.md`,
specPath: null,
};
const ctx = await buildEvalReviewContext(state, "M001", () => new Date("2024-01-01T00:00:00Z"));
assert.equal(ctx.milestoneId, "M001");
assert.equal(ctx.sliceId, "S01");
assert.ok(ctx.summary.includes("Done."));
assert.equal(ctx.spec, null);
assert.equal(ctx.truncated, false);
assert.ok(ctx.outputPath.includes("S01-EVAL-REVIEW.md"));
} finally {
cleanup(dir);
}
});
test("buildEvalReviewContext: reads optional AI-SPEC.md", async () => {
const dir = makeTempDir();
try {
createFile(dir, ".sf/milestones/M001/slices/S01/S01-SUMMARY.md", "# Summary\n");
createFile(dir, ".sf/milestones/M001/slices/S01/S01-AI-SPEC.md", "# Spec\n");
const state = {
kind: "ready" as const,
sliceId: "S01",
sliceDir: `${dir}/.sf/milestones/M001/slices/S01`,
summaryPath: `${dir}/.sf/milestones/M001/slices/S01/S01-SUMMARY.md`,
specPath: `${dir}/.sf/milestones/M001/slices/S01/S01-AI-SPEC.md`,
};
const ctx = await buildEvalReviewContext(state, "M001");
assert.ok(ctx.spec?.includes("# Spec"));
} finally {
cleanup(dir);
}
});
// ─── Schema / frontmatter ───────────────────────────────────────────────────
test("extractFrontmatterRaw: finds YAML block", () => {
const raw = "---\nschema: eval-review/v1\nverdict: PRODUCTION_READY\n---\n# Body\n";
const result = extractFrontmatterRaw(raw);
assert.ok("yaml" in result);
if ("yaml" in result) {
assert.ok(result.yaml.includes("schema: eval-review/v1"));
}
});
test("extractFrontmatterRaw: rejects missing opening delimiter", () => {
const result = extractFrontmatterRaw("no frontmatter");
assert.ok("error" in result);
});
test("extractFrontmatterRaw: rejects missing closing delimiter", () => {
const result = extractFrontmatterRaw("---\nschema: eval-review/v1\n");
assert.ok("error" in result);
});
test("parseEvalReviewFrontmatter: valid frontmatter parses", () => {
const raw = `---
schema: eval-review/v1
verdict: PRODUCTION_READY
coverage_score: 80
infrastructure_score: 75
overall_score: 78
generated: 2024-01-01T00:00:00Z
slice: S01
milestone: M001
gaps: []
counts:
blocker: 0
major: 0
minor: 0
---
# Body
`;
const result = parseEvalReviewFrontmatter(raw);
assert.ok(result.ok);
if (result.ok) {
assert.equal(result.data.schema, EVAL_REVIEW_SCHEMA_VERSION);
assert.equal(result.data.verdict, "PRODUCTION_READY");
assert.equal(result.data.coverage_score, 80);
assert.equal(result.data.gaps.length, 0);
}
});
test("parseEvalReviewFrontmatter: invalid schema version fails", () => {
const raw = `---
schema: eval-review/v2
verdict: PRODUCTION_READY
coverage_score: 80
infrastructure_score: 75
overall_score: 78
generated: 2024-01-01T00:00:00Z
slice: S01
milestone: M001
gaps: []
counts:
blocker: 0
major: 0
minor: 0
---
`;
const result = parseEvalReviewFrontmatter(raw);
assert.ok(!result.ok);
});
test("parseEvalReviewFrontmatter: missing frontmatter fails", () => {
const result = parseEvalReviewFrontmatter("# Just a markdown file\n");
assert.ok(!result.ok);
});
// ─── Derived fields ─────────────────────────────────────────────────────────
test("computeOverallScore: 60/40 weighting", () => {
assert.equal(computeOverallScore(80, 75), Math.round(80 * 0.6 + 75 * 0.4));
assert.equal(computeOverallScore(100, 0), 60);
assert.equal(computeOverallScore(0, 100), 40);
assert.equal(computeOverallScore(50, 50), 50);
});
test("computeOverallScore: clamps to bounds", () => {
assert.equal(computeOverallScore(-10, -10), 0);
assert.equal(computeOverallScore(200, 200), 100);
});
test("deriveCounts: tallies severities correctly", () => {
const gaps = [
{ id: "G01", dimension: "tests", severity: "blocker", description: "d", evidence: "e", suggested_fix: "f" },
{ id: "G02", dimension: "tests", severity: "major", description: "d", evidence: "e", suggested_fix: "f" },
{ id: "G03", dimension: "tests", severity: "major", description: "d", evidence: "e", suggested_fix: "f" },
{ id: "G04", dimension: "tests", severity: "minor", description: "d", evidence: "e", suggested_fix: "f" },
] as const;
const counts = deriveCounts(gaps as any);
assert.equal(counts.blocker, 1);
assert.equal(counts.major, 2);
assert.equal(counts.minor, 1);
});
test("verdictForScore: maps to correct bands", () => {
assert.equal(verdictForScore(100), "PRODUCTION_READY");
assert.equal(verdictForScore(80), "PRODUCTION_READY");
assert.equal(verdictForScore(79), "NEEDS_WORK");
assert.equal(verdictForScore(60), "NEEDS_WORK");
assert.equal(verdictForScore(59), "SIGNIFICANT_GAPS");
assert.equal(verdictForScore(40), "SIGNIFICANT_GAPS");
assert.equal(verdictForScore(39), "NOT_IMPLEMENTED");
assert.equal(verdictForScore(0), "NOT_IMPLEMENTED");
});
// ─── Catalog wiring ─────────────────────────────────────────────────────────
test("eval-review command is in catalog", () => {
const entry = TOP_LEVEL_SUBCOMMANDS.find((c) => c.cmd === "eval-review");
assert.ok(entry, "eval-review should be registered in TOP_LEVEL_SUBCOMMANDS");
assert.ok(entry!.desc.includes("EVAL-REVIEW"));
});

View file

@ -7,7 +7,7 @@
* Group 3: Node formatting (description, confidence, no-description)
*
* Testing strategy:
* @singularity-forge/mcp-server is dynamically imported inside inlineGraphSubgraph().
* @singularity-forge/pi-agent-core is dynamically imported inside inlineGraphSubgraph().
* Because node:test (v22) does not support mock.module() without the
* --experimental-test-module-mocks flag (not enabled in test:unit), we
* exercise the real graphQuery/graphStatus functions by controlling the
@ -24,7 +24,7 @@ import assert from "node:assert/strict";
import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { describe, it } from 'vitest';
import { describe, it } from "vitest";
import { inlineGraphSubgraph } from "../graph-context.ts";

View file

@ -1,5 +1,5 @@
import assert from "node:assert/strict";
import { test } from 'vitest';
import { test } from "vitest";
import { isUsableRemoteQuestionResult } from "../../ask-user-questions.ts";
import {
tryAutoResolveQuestions,
@ -119,8 +119,12 @@ test("headless local auto-resolution fires after timeout when telegram is unavai
assert.equal(result?.details?.autoResolved, true);
assert.equal(result?.details?.localFallback, true);
assert.deepEqual(result?.details?.response, {
endInterview: false,
answers: {
depth_verification_M001_confirm: { answers: ["Sufficient"] },
depth_verification_M001_confirm: {
selected: "Sufficient",
notes: "",
},
},
});
});

View file

@ -13,6 +13,7 @@ import { clearParseCache } from "../files.js";
import { getGatesForTurn } from "../gate-registry.js";
import { renderRoadmapCheckboxes } from "../markdown-renderer.js";
import { clearPathCache, resolveSlicePath } from "../paths.js";
import { checkSafeIds } from "../safety/safe-id.js";
import {
getMilestone,
getPendingGatesForTurn,
@ -25,7 +26,6 @@ import {
transaction,
updateSliceStatus,
} from "../sf-db.js";
import { checkSafeIds } from "../safety/safe-id.js";
import { invalidateStateCache } from "../state.js";
import { isClosedStatus } from "../status-guards.js";
import type { CompleteSliceParams } from "../types.js";
@ -418,7 +418,9 @@ ${params.uatContent}
export async function handleCompleteSlice(
paramsInput: CompleteSliceParams,
basePath: string,
): Promise<CompleteSliceResult | { error: string; field?: string; reason?: string }> {
): Promise<
CompleteSliceResult | { error: string; field?: string; reason?: string }
> {
// ── Path-traversal guard (validation-safe-id-path-segments) ───────────
// Checked on raw input before normalizeCompleteSliceParams so the
// structured error shape is preserved. Rejects any ID that could escape
@ -698,25 +700,25 @@ export async function handleCompleteSlice(
(async () => {
try {
const graphMod = (await import(
"@singularity-forge/mcp-server"
"@singularity-forge/pi-agent-core"
)) as unknown as Partial<{
buildGraph: (
dir: string,
) => Promise<{ nodes: unknown[]; edges: unknown[]; builtAt: string }>;
writeGraph: (sfRoot: string, graph: unknown) => Promise<void>;
resolveGsdRoot: (basePath: string) => string;
resolveSFRoot: (basePath: string) => string;
}>;
if (
typeof graphMod.buildGraph !== "function" ||
typeof graphMod.writeGraph !== "function" ||
typeof graphMod.resolveGsdRoot !== "function"
typeof graphMod.resolveSFRoot !== "function"
) {
throw new Error(
"graph helpers unavailable from @singularity-forge/mcp-server",
"graph helpers unavailable from @singularity-forge/pi-agent-core",
);
}
const g = await graphMod.buildGraph(basePath);
await graphMod.writeGraph(graphMod.resolveGsdRoot(basePath), g);
await graphMod.writeGraph(graphMod.resolveSFRoot(basePath), g);
} catch (graphErr) {
// Graph rebuild is best-effort — log at warning level but never propagate
logWarning(

View file

@ -27,7 +27,7 @@ hard-block milestone completion — actionable gaps become follow-up slices.
1. Load the audit prompt at `prompts/product-audit.md`.
2. Treat repo docs (VISION.md, README.md, docs/RUNBOOKS.md, milestone artifacts)
as the product contract. Do not invent expectations.
3. Inspect the codebase using semantic search (Serena/MCP), AST tools, and `rg`
3. Inspect the codebase using native `lsp`, AST tools, and `rg`
to confirm declared capabilities have concrete evidence (code, tests,
deployment artifacts, runbooks).
4. Score each gap with severity, confidence, and a suggested follow-up slice.

View file

@ -40,25 +40,13 @@ import { INDENT, makeUI } from "./ui.js";
// ─── Exported types ───────────────────────────────────────────────────────────
export interface QuestionOption {
label: string;
description: string;
}
export type {
Question,
QuestionOption,
RoundResult,
} from "@singularity-forge/pi-agent-core";
export interface Question {
id: string;
header: string;
question: string;
options: QuestionOption[];
/** If true, user can toggle multiple options with SPACE, confirm with ENTER */
allowMultiple?: boolean;
}
export interface RoundResult {
/** Always false — end is handled by showWrapUpScreen, not per-question */
endInterview: false;
answers: Record<string, { selected: string | string[]; notes: string }>;
}
import type { Question, RoundResult } from "@singularity-forge/pi-agent-core";
export interface WrapUpResult {
/** true = wrap up and write file, false = keep going */

View file

@ -1,11 +1,12 @@
import assert from "node:assert/strict";
import { test } from 'vitest';
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { InMemoryTransport } from "@modelcontextprotocol/sdk/inMemory.js";
import { ElicitRequestSchema } from "@modelcontextprotocol/sdk/types.js";
import { test } from "vitest";
import {
buildAskUserQuestionsElicitRequest,
buildAskUserQuestionsRoundResult,
createMcpServer,
formatAskUserQuestionsElicitResult,
} from "../../packages/mcp-server/src/server.js";
@ -130,6 +131,32 @@ test("ask_user_questions returns the packaged answers JSON shape for form elicit
const text = result.content.find((item) => item.type === "text");
assert.ok(text && "text" in text);
assert.deepEqual(result.structuredContent, {
questions: [
{
id: "deployment",
header: "Deploy",
question: "Where will this run?",
options: [
{ label: "Cloud", description: "Managed hosting." },
{
label: "On-prem",
description: "Runs in customer infrastructure.",
},
],
},
],
response: {
endInterview: false,
answers: {
deployment: {
selected: "None of the above",
notes: "Need hybrid deployment.",
},
},
},
cancelled: false,
});
assert.equal(
text.text,
JSON.stringify({
@ -203,6 +230,21 @@ test("ask_user_questions returns the cancellation message when elicitation is de
const text = result.content.find((item) => item.type === "text");
assert.ok(text && "text" in text);
assert.deepEqual(result.structuredContent, {
questions: [
{
id: "continue",
header: "Continue",
question: "Continue?",
options: [
{ label: "Yes", description: "Proceed." },
{ label: "No", description: "Stop here." },
],
},
],
response: null,
cancelled: true,
});
assert.equal(
text.text,
"ask_user_questions was cancelled before receiving a response",
@ -234,7 +276,13 @@ test("helper formatting stays aligned with the tool contract", () => {
const formatted = formatAskUserQuestionsElicitResult(questions, {
action: "accept",
content: {
focus_areas: ["Frontend", "Backend"],
focus_areas: ["Frontend"],
},
});
const round = buildAskUserQuestionsRoundResult(questions, {
action: "accept",
content: {
focus_areas: ["Frontend"],
},
});
@ -243,9 +291,10 @@ test("helper formatting stays aligned with the tool contract", () => {
JSON.stringify({
answers: {
focus_areas: {
answers: ["Frontend", "Backend"],
answers: ["Frontend"],
},
},
}),
);
assert.deepEqual(round.answers.focus_areas.selected, ["Frontend"]);
});

View file

@ -11,113 +11,189 @@
* npx vitest # watch mode
* npx vitest run --changed # only tests affected by recent changes
*/
import { defineConfig } from "vitest/config";
import { resolve } from "node:path";
import { defineConfig } from "vitest/config";
const __dirname = import.meta.dirname;
export default defineConfig({
test: {
// ── File patterns ─────────────────────────────────────────────────────────
// Files without vitest imports (standalone test scripts that run assertions
// directly at module load time — these are skipped by the old node --test
// runner and must be excluded here too to avoid "No test suite found" errors.
include: [
"src/tests/**/*.test.ts",
"src/tests/**/*.test.mjs",
"src/resources/extensions/sf/tests/**/*.test.ts",
"src/resources/extensions/sf/tests/**/*.test.mjs",
"src/resources/extensions/shared/tests/**/*.test.ts",
"src/resources/extensions/claude-code-cli/tests/**/*.test.ts",
"src/resources/extensions/github-sync/tests/**/*.test.ts",
"src/resources/extensions/universal-config/tests/**/*.test.ts",
"src/resources/extensions/voice/tests/**/*.test.ts",
"src/resources/extensions/vectordrive/tests/**/*.test.ts",
"src/resources/extensions/mcp-client/tests/**/*.test.ts",
"src/resources/extensions/async-jobs/*.test.ts",
"src/resources/extensions/browser-tools/tests/*.test.mjs",
"packages/pi-coding-agent/src/**/*.test.ts",
"packages/pi-ai/src/**/*.test.ts",
"packages/pi-agent-core/src/**/*.test.ts",
"packages/pi-tui/src/**/*.test.ts",
"packages/daemon/src/**/*.test.ts",
"packages/mcp-server/src/**/*.test.ts",
"packages/rpc-client/src/**/*.test.ts",
"packages/native/src/**/*.test.mjs",
"web/lib/**/*.test.ts",
"studio/test/**/*.test.mjs",
"scripts/*.test.mjs",
],
// ── TypeScript / module resolution ─────────────────────────────────────────
// Vitest uses esbuild for TS transform (fast, bundled). We still set up
// NodeNext module resolution and path aliases to match the project's tsconfig.
resolve: {
alias: {
"@singularity-forge/pi-coding-agent": resolve(
__dirname,
"packages/pi-coding-agent/src/index.ts",
),
"@singularity-forge/pi-ai/oauth": resolve(
__dirname,
"packages/pi-ai/src/utils/oauth/index.ts",
),
"@singularity-forge/pi-ai/bedrock-provider": resolve(
__dirname,
"packages/pi-ai/src/bedrock-provider.ts",
),
"@singularity-forge/pi-ai": resolve(
__dirname,
"packages/pi-ai/src/index.ts",
),
"@singularity-forge/pi-agent-core": resolve(
__dirname,
"packages/pi-agent-core/src/index.ts",
),
"@singularity-forge/pi-tui": resolve(
__dirname,
"packages/pi-tui/src/index.ts",
),
"@singularity-forge/native/ast": resolve(
__dirname,
"packages/native/src/ast/index.ts",
),
"@singularity-forge/native/clipboard": resolve(
__dirname,
"packages/native/src/clipboard/index.ts",
),
"@singularity-forge/native/diff": resolve(
__dirname,
"packages/native/src/diff/index.ts",
),
"@singularity-forge/native/edit": resolve(
__dirname,
"packages/native/src/edit/index.ts",
),
"@singularity-forge/native/fd": resolve(
__dirname,
"packages/native/src/fd/index.ts",
),
"@singularity-forge/native/forge-parser": resolve(
__dirname,
"packages/native/src/forge-parser/index.ts",
),
"@singularity-forge/native/glob": resolve(
__dirname,
"packages/native/src/glob/index.ts",
),
"@singularity-forge/native/grep": resolve(
__dirname,
"packages/native/src/grep/index.ts",
),
"@singularity-forge/native/highlight": resolve(
__dirname,
"packages/native/src/highlight/index.ts",
),
"@singularity-forge/native/html": resolve(
__dirname,
"packages/native/src/html/index.ts",
),
"@singularity-forge/native/image": resolve(
__dirname,
"packages/native/src/image/index.ts",
),
"@singularity-forge/native/json-parse": resolve(
__dirname,
"packages/native/src/json-parse/index.ts",
),
"@singularity-forge/native/ps": resolve(
__dirname,
"packages/native/src/ps/index.ts",
),
"@singularity-forge/native/stream-process": resolve(
__dirname,
"packages/native/src/stream-process/index.ts",
),
"@singularity-forge/native/text": resolve(
__dirname,
"packages/native/src/text/index.ts",
),
"@singularity-forge/native/truncate": resolve(
__dirname,
"packages/native/src/truncate/index.ts",
),
"@singularity-forge/native/ttsr": resolve(
__dirname,
"packages/native/src/ttsr/index.ts",
),
"@singularity-forge/native/xxhash": resolve(
__dirname,
"packages/native/src/xxhash/index.ts",
),
"@singularity-forge/native": resolve(
__dirname,
"packages/native/src/index.ts",
),
"@singularity-forge/mcp-server": resolve(
__dirname,
"packages/mcp-server/src/index.ts",
),
"@singularity-forge/rpc-client": resolve(
__dirname,
"packages/rpc-client/src/index.ts",
),
},
},
test: {
// ── File patterns ─────────────────────────────────────────────────────────
// Files without vitest imports (standalone test scripts that run assertions
// directly at module load time — these are skipped by the old node --test
// runner and must be excluded here too to avoid "No test suite found" errors.
include: [
"src/tests/**/*.test.ts",
"src/tests/**/*.test.mjs",
"src/resources/extensions/sf/tests/**/*.test.ts",
"src/resources/extensions/sf/tests/**/*.test.mjs",
"src/resources/extensions/shared/tests/**/*.test.ts",
"src/resources/extensions/claude-code-cli/tests/**/*.test.ts",
"src/resources/extensions/github-sync/tests/**/*.test.ts",
"src/resources/extensions/universal-config/tests/**/*.test.ts",
"src/resources/extensions/voice/tests/**/*.test.ts",
"src/resources/extensions/vectordrive/tests/**/*.test.ts",
"src/resources/extensions/mcp-client/tests/**/*.test.ts",
"src/resources/extensions/async-jobs/*.test.ts",
"src/resources/extensions/browser-tools/tests/*.test.mjs",
"packages/pi-coding-agent/src/**/*.test.ts",
"packages/pi-ai/src/**/*.test.ts",
"packages/pi-agent-core/src/**/*.test.ts",
"packages/pi-tui/src/**/*.test.ts",
"packages/daemon/src/**/*.test.ts",
"packages/mcp-server/src/**/*.test.ts",
"packages/rpc-client/src/**/*.test.ts",
"packages/native/src/**/*.test.mjs",
"web/lib/**/*.test.ts",
"studio/test/**/*.test.mjs",
"scripts/*.test.mjs",
],
// ── Timeouts ──────────────────────────────────────────────────────────────
testTimeout: 30_000,
hookTimeout: 30_000,
// ── Timeouts ──────────────────────────────────────────────────────────────
testTimeout: 30_000,
hookTimeout: 30_000,
// ── Pool: forks = one Node process per test file (best for Node.js tests) ─
pool: "forks",
// Single worker in CI; parallel in dev for speed
singleFork: process.env.CI === "true",
// ── Pool: forks = one Node process per test file (best for Node.js tests) ─
pool: "forks",
// Single worker in CI; parallel in dev for speed
singleFork: process.env.CI === "true",
// ── Coverage ──────────────────────────────────────────────────────────────
coverage: {
provider: "v8",
reporter: ["text", "lcov"],
exclude: [
"src/resources/extensions/sf/tests/**",
"src/tests/**",
"scripts/**",
"rust-engine/**",
"node_modules/**",
"dist/**",
"dist-test/**",
"web/**",
],
thresholds: {
statements: 40,
lines: 40,
branches: 20,
functions: 20,
},
},
// ── TypeScript / module resolution ─────────────────────────────────────────
// Vitest uses esbuild for TS transform (fast, bundled). We still set up
// NodeNext module resolution and path aliases to match the project's tsconfig.
resolve: {
alias: {
"@singularity-forge/pi-coding-agent": resolve(
__dirname,
"packages/pi-coding-agent/src/index.ts",
),
"@singularity-forge/pi-ai": resolve(
__dirname,
"packages/pi-ai/src/index.ts",
),
"@singularity-forge/pi-ai/oauth": resolve(
__dirname,
"packages/pi-ai/src/utils/oauth/index.ts",
),
"@singularity-forge/pi-agent-core": resolve(
__dirname,
"packages/pi-agent-core/src/index.ts",
),
"@singularity-forge/pi-tui": resolve(
__dirname,
"packages/pi-tui/src/index.ts",
),
"@singularity-forge/native": resolve(
__dirname,
"packages/native/src/index.ts",
),
"@singularity-forge/mcp-server": resolve(
__dirname,
"packages/mcp-server/src/index.ts",
),
"@singularity-forge/rpc-client": resolve(
__dirname,
"packages/rpc-client/src/index.ts",
),
},
},
},
// ── Coverage ──────────────────────────────────────────────────────────────
coverage: {
provider: "v8",
reporter: ["text", "lcov"],
exclude: [
"src/resources/extensions/sf/tests/**",
"src/tests/**",
"scripts/**",
"rust-engine/**",
"node_modules/**",
"dist/**",
"dist-test/**",
"web/**",
],
thresholds: {
statements: 40,
lines: 40,
branches: 20,
functions: 20,
},
},
},
});