singularity-forge/src/resources/extensions/sf/sf-db.js
2026-05-08 05:51:06 +02:00

6748 lines
232 KiB
JavaScript

// SF Database Abstraction Layer
// Provides a SQLite database via node:sqlite (Node >= 26 built-in).
//
// Exposes a unified sync API for decisions and requirements storage.
// Schema is initialized on first open with WAL mode for file-backed DBs.
//
// ─── Single-writer invariant ─────────────────────────────────────────────
// This file is the ONLY place in the codebase that issues write SQL
// (INSERT / UPDATE / DELETE / REPLACE / BEGIN-COMMIT transactions) against
// the engine database at `.sf/sf.db`. All other modules must call the
// typed wrappers exported here. The structural test
// `tests/single-writer-invariant.test.ts` fails CI if a new bypass appears.
//
// `_getAdapter()` is retained for read-only SELECTs in query modules
// (context-store, memory-store queries, doctor checks, projections).
// Do NOT use it for writes — add a wrapper here instead.
//
// The separate `.sf/unit-claims.db` managed by `unit-ownership.ts` is an
// intentionally independent store for cross-worktree claim races and is
// excluded from this invariant.
import { copyFileSync, existsSync, mkdirSync, realpathSync } from "node:fs";
import { dirname } from "node:path";
import { DatabaseSync } from "node:sqlite";
import { SF_STALE_STATE, SFError } from "./errors.js";
import { getGateIdsForTurn } from "./gate-registry.js";
import { logError, logWarning } from "./workflow-logger.js";
let loadAttempted = false;
function loadProvider() {
if (loadAttempted) return;
loadAttempted = true;
// node:sqlite is built-in in Node >= 26
}
function normalizeRow(row) {
if (row == null) return undefined;
if (Object.getPrototypeOf(row) === null) {
return { ...row };
}
return row;
}
function normalizeRows(rows) {
return rows.map((r) => normalizeRow(r));
}
function createAdapter(rawDb) {
const db = rawDb;
const stmtCache = new Map();
function wrapStmt(raw) {
return {
run(...params) {
return raw.run(...params);
},
get(...params) {
return normalizeRow(raw.get(...params));
},
all(...params) {
return normalizeRows(raw.all(...params));
},
};
}
return {
exec(sql) {
db.exec(sql);
},
prepare(sql) {
let cached = stmtCache.get(sql);
if (cached) return cached;
cached = wrapStmt(db.prepare(sql));
stmtCache.set(sql, cached);
return cached;
},
close() {
stmtCache.clear();
db.close();
},
};
}
function openRawDb(path) {
loadProvider();
return new DatabaseSync(path);
}
const SCHEMA_VERSION = 43;
function indexExists(db, name) {
return !!db
.prepare(
"SELECT 1 as present FROM sqlite_master WHERE type = 'index' AND name = ?",
)
.get(name);
}
function dedupeVerificationEvidenceRows(db) {
db.exec(`
DELETE FROM verification_evidence
WHERE rowid NOT IN (
SELECT MIN(rowid)
FROM verification_evidence
GROUP BY task_id, slice_id, milestone_id, command, verdict
)
`);
}
function ensureVerificationEvidenceDedupIndex(db) {
if (indexExists(db, "idx_verification_evidence_dedup")) return;
dedupeVerificationEvidenceRows(db);
db.exec(
"CREATE UNIQUE INDEX IF NOT EXISTS idx_verification_evidence_dedup ON verification_evidence(task_id, slice_id, milestone_id, command, verdict)",
);
}
function ensureRepoProfileTables(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS repo_profiles (
profile_id TEXT PRIMARY KEY,
project_hash TEXT NOT NULL,
project_root TEXT NOT NULL DEFAULT '',
head TEXT DEFAULT NULL,
branch TEXT DEFAULT NULL,
remote_hash TEXT DEFAULT NULL,
dirty INTEGER NOT NULL DEFAULT 0,
profile_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS repo_file_observations (
path TEXT PRIMARY KEY,
latest_profile_id TEXT NOT NULL,
git_status TEXT NOT NULL,
ownership TEXT NOT NULL,
language TEXT DEFAULT NULL,
size_bytes INTEGER NOT NULL DEFAULT 0,
content_hash TEXT DEFAULT NULL,
summary TEXT DEFAULT NULL,
first_seen_at TEXT NOT NULL,
last_seen_at TEXT NOT NULL,
adopted_at TEXT DEFAULT NULL,
adoption_unit_id TEXT DEFAULT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_repo_profiles_created ON repo_profiles(created_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_repo_file_observations_status ON repo_file_observations(git_status, ownership)",
);
}
function ensureBacklogTables(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS backlog_items (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
note TEXT NOT NULL DEFAULT '',
source TEXT NOT NULL DEFAULT '',
triage_run_id TEXT DEFAULT NULL,
sequence INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
promoted_at TEXT DEFAULT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_backlog_items_status_sequence ON backlog_items(status, sequence, id)",
);
}
function ensureScheduleTables(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS schedule_entries (
seq INTEGER PRIMARY KEY AUTOINCREMENT,
scope TEXT NOT NULL DEFAULT 'project',
id TEXT NOT NULL,
schema_version INTEGER NOT NULL DEFAULT 1,
kind TEXT NOT NULL DEFAULT 'reminder',
status TEXT NOT NULL DEFAULT 'pending',
due_at TEXT NOT NULL DEFAULT '',
created_at TEXT NOT NULL DEFAULT '',
snoozed_at TEXT DEFAULT NULL,
payload_json TEXT NOT NULL DEFAULT '{}',
created_by TEXT NOT NULL DEFAULT 'user',
autonomous_dispatch INTEGER NOT NULL DEFAULT 0,
full_json TEXT NOT NULL DEFAULT '{}',
imported_from TEXT DEFAULT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_schedule_entries_scope_id_created ON schedule_entries(scope, id, created_at DESC, seq DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_schedule_entries_scope_due ON schedule_entries(scope, status, due_at)",
);
ensureColumn(
db,
"schedule_entries",
"autonomous_dispatch",
"ALTER TABLE schedule_entries ADD COLUMN autonomous_dispatch INTEGER NOT NULL DEFAULT 0",
);
}
function ensureSolverEvalTables(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS solver_eval_runs (
run_id TEXT PRIMARY KEY,
suite_source TEXT NOT NULL DEFAULT '',
cases_count INTEGER NOT NULL DEFAULT 0,
summary_json TEXT NOT NULL DEFAULT '{}',
report_path TEXT NOT NULL DEFAULT '',
results_path TEXT NOT NULL DEFAULT '',
db_recorded INTEGER NOT NULL DEFAULT 1,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS solver_eval_case_results (
run_id TEXT NOT NULL,
case_id TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
mode TEXT NOT NULL,
passed INTEGER NOT NULL DEFAULT 0,
false_complete INTEGER NOT NULL DEFAULT 0,
duration_ms INTEGER DEFAULT NULL,
command_status INTEGER DEFAULT NULL,
solver_outcome TEXT DEFAULT NULL,
pdd_complete INTEGER DEFAULT NULL,
result_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
PRIMARY KEY (run_id, case_id, mode),
FOREIGN KEY (run_id) REFERENCES solver_eval_runs(run_id) ON DELETE CASCADE
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_solver_eval_runs_created ON solver_eval_runs(created_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_solver_eval_case_lookup ON solver_eval_case_results(run_id, case_id)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_solver_eval_case_false_complete ON solver_eval_case_results(false_complete, mode)",
);
}
function ensureHeadlessRunTables(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS headless_runs (
run_id TEXT PRIMARY KEY,
command TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT '',
exit_code INTEGER NOT NULL DEFAULT 0,
timed_out INTEGER NOT NULL DEFAULT 0,
interrupted INTEGER NOT NULL DEFAULT 0,
restart_count INTEGER NOT NULL DEFAULT 0,
max_restarts INTEGER NOT NULL DEFAULT 0,
duration_ms INTEGER NOT NULL DEFAULT 0,
total_events INTEGER NOT NULL DEFAULT 0,
tool_calls INTEGER NOT NULL DEFAULT 0,
solver_eval_run_id TEXT DEFAULT NULL,
solver_eval_report_path TEXT DEFAULT NULL,
details_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_headless_runs_created ON headless_runs(created_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_headless_runs_status ON headless_runs(status, created_at DESC)",
);
}
function ensureUokMessageTables(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS uok_messages (
id TEXT PRIMARY KEY,
from_agent TEXT NOT NULL,
to_agent TEXT NOT NULL,
body TEXT NOT NULL DEFAULT '',
metadata_json TEXT NOT NULL DEFAULT '{}',
sent_at TEXT NOT NULL DEFAULT '',
delivered_at TEXT DEFAULT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS uok_message_reads (
message_id TEXT NOT NULL,
agent_id TEXT NOT NULL,
read_at TEXT NOT NULL DEFAULT '',
PRIMARY KEY (message_id, agent_id),
FOREIGN KEY (message_id) REFERENCES uok_messages(id) ON DELETE CASCADE
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_uok_messages_to ON uok_messages(to_agent, sent_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_uok_messages_conversation ON uok_messages(from_agent, to_agent, sent_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_uok_messages_sent ON uok_messages(sent_at DESC)",
);
}
function ensureSelfFeedbackTables(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS self_feedback (
id TEXT PRIMARY KEY,
ts TEXT NOT NULL,
kind TEXT NOT NULL,
severity TEXT NOT NULL,
blocking INTEGER NOT NULL DEFAULT 0,
repo_identity TEXT NOT NULL DEFAULT '',
sf_version TEXT NOT NULL DEFAULT '',
base_path TEXT NOT NULL DEFAULT '',
unit_type TEXT DEFAULT NULL,
milestone_id TEXT DEFAULT NULL,
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
summary TEXT NOT NULL DEFAULT '',
evidence TEXT NOT NULL DEFAULT '',
suggested_fix TEXT NOT NULL DEFAULT '',
full_json TEXT NOT NULL,
resolved_at TEXT DEFAULT NULL,
resolved_reason TEXT DEFAULT NULL,
resolved_by_sf_version TEXT DEFAULT NULL,
resolved_evidence_json TEXT DEFAULT NULL,
resolved_criteria_json TEXT DEFAULT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_self_feedback_open ON self_feedback(resolved_at, severity, ts)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_self_feedback_kind ON self_feedback(kind, ts)",
);
}
function ensureRetrievalEvidenceTables(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS retrieval_evidence (
id INTEGER PRIMARY KEY AUTOINCREMENT,
backend TEXT NOT NULL,
source_kind TEXT NOT NULL DEFAULT 'code',
query TEXT NOT NULL DEFAULT '',
strategy TEXT NOT NULL DEFAULT '',
scope TEXT NOT NULL DEFAULT '',
project_root TEXT NOT NULL DEFAULT '',
git_head TEXT DEFAULT NULL,
git_branch TEXT DEFAULT NULL,
worktree_dirty INTEGER NOT NULL DEFAULT 0,
freshness TEXT NOT NULL DEFAULT 'unknown',
status TEXT NOT NULL DEFAULT 'ok',
hit_count INTEGER NOT NULL DEFAULT 0,
elapsed_ms INTEGER NOT NULL DEFAULT 0,
cache_path TEXT DEFAULT NULL,
error TEXT DEFAULT NULL,
result_json TEXT NOT NULL DEFAULT '{}',
recorded_at TEXT NOT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_backend_recorded ON retrieval_evidence(backend, recorded_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_scope_recorded ON retrieval_evidence(scope, recorded_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_status_recorded ON retrieval_evidence(status, recorded_at DESC)",
);
}
function ensureSpecSchemaTables(db) {
// Tier 1.3: Spec/Runtime/Evidence schema separation
// Creates 9 normalized tables for milestone, slice, task entities
// Each entity type has: <entity>_specs (immutable intent), <entity> (runtime state), <entity>_evidence (audit trail)
// ── Milestone Spec Table (immutable record of intent) ───────────
db.exec(`
CREATE TABLE IF NOT EXISTS milestone_specs (
id TEXT NOT NULL,
vision TEXT NOT NULL DEFAULT '',
success_criteria TEXT DEFAULT '',
key_risks TEXT DEFAULT '',
proof_strategy TEXT DEFAULT '',
verification_contract TEXT DEFAULT '',
verification_integration TEXT DEFAULT '',
verification_operational TEXT DEFAULT '',
verification_uat TEXT DEFAULT '',
definition_of_done TEXT DEFAULT '',
requirement_coverage TEXT DEFAULT '',
boundary_map_markdown TEXT DEFAULT '',
vision_meeting_json TEXT DEFAULT '',
product_research_json TEXT DEFAULT '',
spec_version INTEGER NOT NULL DEFAULT 1,
created_at TEXT NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY (id) REFERENCES milestones(id)
)
`);
// ── Slice Spec Table (immutable record of intent) ───────────
db.exec(`
CREATE TABLE IF NOT EXISTS slice_specs (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
goal TEXT NOT NULL DEFAULT '',
success_criteria TEXT DEFAULT '',
proof_level TEXT DEFAULT '',
integration_closure TEXT DEFAULT '',
observability_impact TEXT DEFAULT '',
adversarial_partner TEXT DEFAULT '',
adversarial_combatant TEXT DEFAULT '',
adversarial_architect TEXT DEFAULT '',
planning_meeting_json TEXT DEFAULT '',
spec_version INTEGER NOT NULL DEFAULT 1,
created_at TEXT NOT NULL,
PRIMARY KEY (milestone_id, slice_id),
FOREIGN KEY (milestone_id) REFERENCES milestones(id),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id)
)
`);
// ── Task Spec Table (immutable record of intent) ───────────
db.exec(`
CREATE TABLE IF NOT EXISTS task_specs (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
task_id TEXT NOT NULL,
verify TEXT NOT NULL DEFAULT '',
inputs TEXT DEFAULT '',
expected_output TEXT DEFAULT '',
spec_version INTEGER NOT NULL DEFAULT 1,
created_at TEXT NOT NULL,
PRIMARY KEY (milestone_id, slice_id, task_id),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id),
FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id)
)
`);
// ── Milestone Evidence Table (append-only audit trail) ───────────
db.exec(`
CREATE TABLE IF NOT EXISTS milestone_evidence (
milestone_id TEXT NOT NULL,
evidence_type TEXT NOT NULL,
content TEXT NOT NULL,
recorded_at TEXT NOT NULL,
phase_name TEXT DEFAULT '',
recorded_by TEXT DEFAULT '',
evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))),
PRIMARY KEY (milestone_id, evidence_id),
FOREIGN KEY (milestone_id) REFERENCES milestones(id)
)
`);
// ── Slice Evidence Table (append-only audit trail) ───────────
db.exec(`
CREATE TABLE IF NOT EXISTS slice_evidence (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
evidence_type TEXT NOT NULL,
content TEXT NOT NULL,
recorded_at TEXT NOT NULL,
phase_name TEXT DEFAULT '',
recorded_by TEXT DEFAULT '',
evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))),
PRIMARY KEY (milestone_id, slice_id, evidence_id),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id)
)
`);
// ── Task Evidence Table (append-only audit trail) ───────────
db.exec(`
CREATE TABLE IF NOT EXISTS task_evidence (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
task_id TEXT NOT NULL,
evidence_type TEXT NOT NULL,
content TEXT NOT NULL,
recorded_at TEXT NOT NULL,
phase_name TEXT DEFAULT '',
recorded_by TEXT DEFAULT '',
evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))),
PRIMARY KEY (milestone_id, slice_id, task_id, evidence_id),
FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id)
)
`);
// Indices for efficient querying of evidence trails
db.exec(`
CREATE INDEX IF NOT EXISTS idx_milestone_evidence_type
ON milestone_evidence(milestone_id, evidence_type, recorded_at DESC)
`);
db.exec(`
CREATE INDEX IF NOT EXISTS idx_slice_evidence_type
ON slice_evidence(milestone_id, slice_id, evidence_type, recorded_at DESC)
`);
db.exec(`
CREATE INDEX IF NOT EXISTS idx_task_evidence_type
ON task_evidence(milestone_id, slice_id, task_id, evidence_type, recorded_at DESC)
`);
}
function initSchema(db, fileBacked) {
if (fileBacked) db.exec("PRAGMA journal_mode=WAL");
if (fileBacked) db.exec("PRAGMA busy_timeout = 5000");
if (fileBacked) db.exec("PRAGMA synchronous = NORMAL");
if (fileBacked) db.exec("PRAGMA auto_vacuum = INCREMENTAL");
if (fileBacked) db.exec("PRAGMA cache_size = -8000"); // 8 MB page cache
if (fileBacked && process.platform !== "darwin")
db.exec("PRAGMA mmap_size = 67108864"); // 64 MB mmap
db.exec("PRAGMA temp_store = MEMORY");
db.exec("PRAGMA foreign_keys = ON");
db.exec("BEGIN");
try {
db.exec(`
CREATE TABLE IF NOT EXISTS schema_version (
version INTEGER NOT NULL,
applied_at TEXT NOT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS decisions (
seq INTEGER PRIMARY KEY AUTOINCREMENT,
id TEXT NOT NULL UNIQUE,
when_context TEXT NOT NULL DEFAULT '',
scope TEXT NOT NULL DEFAULT '',
decision TEXT NOT NULL DEFAULT '',
choice TEXT NOT NULL DEFAULT '',
rationale TEXT NOT NULL DEFAULT '',
revisable TEXT NOT NULL DEFAULT '',
made_by TEXT NOT NULL DEFAULT 'agent',
superseded_by TEXT DEFAULT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS requirements (
id TEXT PRIMARY KEY,
class TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
why TEXT NOT NULL DEFAULT '',
source TEXT NOT NULL DEFAULT '',
primary_owner TEXT NOT NULL DEFAULT '',
supporting_slices TEXT NOT NULL DEFAULT '',
validation TEXT NOT NULL DEFAULT '',
notes TEXT NOT NULL DEFAULT '',
full_content TEXT NOT NULL DEFAULT '',
superseded_by TEXT DEFAULT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS artifacts (
path TEXT PRIMARY KEY,
artifact_type TEXT NOT NULL DEFAULT '',
milestone_id TEXT DEFAULT NULL,
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
full_content TEXT NOT NULL DEFAULT '',
imported_at TEXT NOT NULL DEFAULT ''
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS memories (
seq INTEGER PRIMARY KEY AUTOINCREMENT,
id TEXT NOT NULL UNIQUE,
category TEXT NOT NULL,
content TEXT NOT NULL,
confidence REAL NOT NULL DEFAULT 0.8,
source_unit_type TEXT,
source_unit_id TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
superseded_by TEXT DEFAULT NULL,
hit_count INTEGER NOT NULL DEFAULT 0,
tags TEXT NOT NULL DEFAULT '[]'
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS memory_processed_units (
unit_key TEXT PRIMARY KEY,
activity_file TEXT,
processed_at TEXT NOT NULL
)
`);
// memory_embeddings, memory_relations, memory_sources used to be referenced
// by helper functions and queries (memory-embeddings.ts, memory-relations.ts,
// memory-ingest.ts) without a corresponding CREATE TABLE — any actual write
// would have failed with "no such table". Creating them as IF NOT EXISTS so
// existing DBs that somehow have them survive, and fresh DBs work.
db.exec(`
CREATE TABLE IF NOT EXISTS memory_embeddings (
memory_id TEXT PRIMARY KEY,
model TEXT NOT NULL,
dim INTEGER NOT NULL,
vector BLOB NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS memory_relations (
from_id TEXT NOT NULL,
to_id TEXT NOT NULL,
rel TEXT NOT NULL,
confidence REAL NOT NULL DEFAULT 0.8,
created_at TEXT NOT NULL,
PRIMARY KEY (from_id, to_id, rel),
FOREIGN KEY (from_id) REFERENCES memories(id) ON DELETE CASCADE,
FOREIGN KEY (to_id) REFERENCES memories(id) ON DELETE CASCADE
)
`);
// PK covers from_id as leading column already; reverse lookups
// (memory-relations.ts queries WHERE to_id = ?) need their own index
// to avoid a full table scan as the relation count grows.
db.exec(
"CREATE INDEX IF NOT EXISTS idx_memory_relations_to ON memory_relations(to_id)",
);
db.exec(`
CREATE TABLE IF NOT EXISTS memory_sources (
id TEXT PRIMARY KEY,
kind TEXT NOT NULL,
uri TEXT,
title TEXT,
content TEXT NOT NULL,
content_hash TEXT NOT NULL,
imported_at TEXT NOT NULL,
scope TEXT NOT NULL DEFAULT 'project',
tags TEXT NOT NULL DEFAULT '[]'
)
`);
// content_hash is queried on every insert for deduplication; without an
// index the lookup becomes a full table scan as ingestion volume grows.
db.exec(
"CREATE INDEX IF NOT EXISTS idx_memory_sources_content_hash ON memory_sources(content_hash)",
);
// Category GROUP BY queries (e.g. /memory stats) need a covering
// index that filters active memories and groups by category.
db.exec(
"CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(superseded_by, category)",
);
db.exec(`
CREATE TABLE IF NOT EXISTS judgments (
id INTEGER PRIMARY KEY AUTOINCREMENT,
unit_id TEXT NOT NULL,
decision TEXT NOT NULL DEFAULT '',
alternatives_json TEXT NOT NULL DEFAULT '[]',
reasoning TEXT NOT NULL DEFAULT '',
confidence TEXT NOT NULL DEFAULT 'medium',
ts TEXT NOT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_judgments_unit_id ON judgments(unit_id, ts DESC)",
);
db.exec(`
CREATE TABLE IF NOT EXISTS milestones (
id TEXT PRIMARY KEY,
title TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'active',
depends_on TEXT NOT NULL DEFAULT '[]',
created_at TEXT NOT NULL DEFAULT '',
completed_at TEXT DEFAULT NULL,
vision TEXT NOT NULL DEFAULT '',
success_criteria TEXT NOT NULL DEFAULT '[]',
key_risks TEXT NOT NULL DEFAULT '[]',
proof_strategy TEXT NOT NULL DEFAULT '[]',
verification_contract TEXT NOT NULL DEFAULT '',
verification_integration TEXT NOT NULL DEFAULT '',
verification_operational TEXT NOT NULL DEFAULT '',
verification_uat TEXT NOT NULL DEFAULT '',
definition_of_done TEXT NOT NULL DEFAULT '[]',
requirement_coverage TEXT NOT NULL DEFAULT '',
boundary_map_markdown TEXT NOT NULL DEFAULT '',
vision_meeting_json TEXT NOT NULL DEFAULT '',
product_research_json TEXT NOT NULL DEFAULT '',
sequence INTEGER DEFAULT 0
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS slices (
milestone_id TEXT NOT NULL,
id TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'pending',
risk TEXT NOT NULL DEFAULT 'medium',
depends TEXT NOT NULL DEFAULT '[]',
demo TEXT NOT NULL DEFAULT '',
created_at TEXT NOT NULL DEFAULT '',
completed_at TEXT DEFAULT NULL,
full_summary_md TEXT NOT NULL DEFAULT '',
full_uat_md TEXT NOT NULL DEFAULT '',
goal TEXT NOT NULL DEFAULT '',
success_criteria TEXT NOT NULL DEFAULT '',
proof_level TEXT NOT NULL DEFAULT '',
integration_closure TEXT NOT NULL DEFAULT '',
observability_impact TEXT NOT NULL DEFAULT '',
adversarial_partner TEXT NOT NULL DEFAULT '',
adversarial_combatant TEXT NOT NULL DEFAULT '',
adversarial_architect TEXT NOT NULL DEFAULT '',
planning_meeting_json TEXT NOT NULL DEFAULT '',
sequence INTEGER DEFAULT 0, -- Ordering hint: tools may set this to control execution order
replan_triggered_at TEXT DEFAULT NULL,
is_sketch INTEGER NOT NULL DEFAULT 0, -- SF ADR-011: 1 = slice is a sketch awaiting refine-slice
sketch_scope TEXT NOT NULL DEFAULT '', -- SF ADR-011: 2-3 sentence scope hint from plan-milestone
PRIMARY KEY (milestone_id, id),
FOREIGN KEY (milestone_id) REFERENCES milestones(id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS tasks (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
id TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'pending',
one_liner TEXT NOT NULL DEFAULT '',
narrative TEXT NOT NULL DEFAULT '',
verification_result TEXT NOT NULL DEFAULT '',
duration TEXT NOT NULL DEFAULT '',
completed_at TEXT DEFAULT NULL,
blocker_discovered INTEGER DEFAULT 0,
deviations TEXT NOT NULL DEFAULT '',
known_issues TEXT NOT NULL DEFAULT '',
key_files TEXT NOT NULL DEFAULT '[]',
key_decisions TEXT NOT NULL DEFAULT '[]',
full_summary_md TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
estimate TEXT NOT NULL DEFAULT '',
files TEXT NOT NULL DEFAULT '[]',
verify TEXT NOT NULL DEFAULT '',
inputs TEXT NOT NULL DEFAULT '[]',
expected_output TEXT NOT NULL DEFAULT '[]',
observability_impact TEXT NOT NULL DEFAULT '',
full_plan_md TEXT NOT NULL DEFAULT '',
created_at TEXT NOT NULL DEFAULT '',
verification_status TEXT NOT NULL DEFAULT '',
sequence INTEGER DEFAULT 0, -- Ordering hint: tools may set this to control execution order
escalation_pending INTEGER NOT NULL DEFAULT 0, -- ADR-011 P2 (SF): pause-on-escalation flag
escalation_awaiting_review INTEGER NOT NULL DEFAULT 0, -- ADR-011 P2 (SF): continueWithDefault=true marker (no pause)
escalation_override_applied INTEGER NOT NULL DEFAULT 0, -- SF ADR-011 P2: 1 once carry-forward injected into a downstream prompt
escalation_artifact_path TEXT DEFAULT NULL, -- ADR-011 P2 (SF): path to T##-ESCALATION.json
PRIMARY KEY (milestone_id, slice_id, id),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id)
)
`);
if (columnExists(db, "tasks", "escalation_pending")) {
db.exec(`
CREATE INDEX IF NOT EXISTS idx_tasks_escalation_pending ON tasks(milestone_id, slice_id, escalation_pending)
`);
}
db.exec(`
CREATE TABLE IF NOT EXISTS verification_evidence (
id INTEGER PRIMARY KEY AUTOINCREMENT,
task_id TEXT NOT NULL DEFAULT '',
slice_id TEXT NOT NULL DEFAULT '',
milestone_id TEXT NOT NULL DEFAULT '',
command TEXT NOT NULL DEFAULT '',
exit_code INTEGER DEFAULT 0,
verdict TEXT NOT NULL DEFAULT '',
duration_ms INTEGER DEFAULT 0,
created_at TEXT NOT NULL DEFAULT '',
FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS replan_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
milestone_id TEXT NOT NULL DEFAULT '',
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
summary TEXT NOT NULL DEFAULT '',
previous_artifact_path TEXT DEFAULT NULL,
replacement_artifact_path TEXT DEFAULT NULL,
created_at TEXT NOT NULL DEFAULT '',
FOREIGN KEY (milestone_id) REFERENCES milestones(id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS assessments (
path TEXT PRIMARY KEY,
milestone_id TEXT NOT NULL DEFAULT '',
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
status TEXT NOT NULL DEFAULT '',
scope TEXT NOT NULL DEFAULT '',
full_content TEXT NOT NULL DEFAULT '',
created_at TEXT NOT NULL DEFAULT '',
FOREIGN KEY (milestone_id) REFERENCES milestones(id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS quality_gates (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
gate_id TEXT NOT NULL,
scope TEXT NOT NULL DEFAULT 'slice',
task_id TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'pending',
verdict TEXT NOT NULL DEFAULT '',
rationale TEXT NOT NULL DEFAULT '',
findings TEXT NOT NULL DEFAULT '',
evaluated_at TEXT DEFAULT NULL,
PRIMARY KEY (milestone_id, slice_id, gate_id, task_id),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id)
)
`);
// Slice dependency junction table (v14)
db.exec(`
CREATE TABLE IF NOT EXISTS slice_dependencies (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
depends_on_slice_id TEXT NOT NULL,
PRIMARY KEY (milestone_id, slice_id, depends_on_slice_id),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id),
FOREIGN KEY (milestone_id, depends_on_slice_id) REFERENCES slices(milestone_id, id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS gate_runs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
trace_id TEXT NOT NULL,
turn_id TEXT NOT NULL,
gate_id TEXT NOT NULL,
gate_type TEXT NOT NULL DEFAULT '',
unit_type TEXT DEFAULT NULL,
unit_id TEXT DEFAULT NULL,
milestone_id TEXT DEFAULT NULL,
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
outcome TEXT NOT NULL DEFAULT 'pass',
failure_class TEXT NOT NULL DEFAULT 'none',
rationale TEXT NOT NULL DEFAULT '',
findings TEXT NOT NULL DEFAULT '',
attempt INTEGER NOT NULL DEFAULT 1,
max_attempts INTEGER NOT NULL DEFAULT 1,
retryable INTEGER NOT NULL DEFAULT 0,
evaluated_at TEXT NOT NULL DEFAULT '',
duration_ms INTEGER DEFAULT NULL,
cost_micro_usd INTEGER DEFAULT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS gate_circuit_breakers (
gate_id TEXT PRIMARY KEY,
state TEXT NOT NULL DEFAULT 'closed',
failure_streak INTEGER NOT NULL DEFAULT 0,
last_failure_at TEXT DEFAULT NULL,
opened_at TEXT DEFAULT NULL,
half_open_attempts INTEGER NOT NULL DEFAULT 0,
updated_at TEXT NOT NULL DEFAULT ''
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS turn_git_transactions (
trace_id TEXT NOT NULL,
turn_id TEXT NOT NULL,
unit_type TEXT DEFAULT NULL,
unit_id TEXT DEFAULT NULL,
stage TEXT NOT NULL DEFAULT 'turn-start',
action TEXT NOT NULL DEFAULT 'status-only',
push INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'ok',
error TEXT DEFAULT NULL,
metadata_json TEXT NOT NULL DEFAULT '{}',
updated_at TEXT NOT NULL DEFAULT '',
PRIMARY KEY (trace_id, turn_id, stage)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS audit_events (
event_id TEXT PRIMARY KEY,
trace_id TEXT NOT NULL,
turn_id TEXT DEFAULT NULL,
caused_by TEXT DEFAULT NULL,
category TEXT NOT NULL,
type TEXT NOT NULL,
ts TEXT NOT NULL,
payload_json TEXT NOT NULL DEFAULT '{}'
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS audit_turn_index (
trace_id TEXT NOT NULL,
turn_id TEXT NOT NULL,
first_ts TEXT NOT NULL,
last_ts TEXT NOT NULL,
event_count INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY (trace_id, turn_id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS llm_task_outcomes (
model_id TEXT NOT NULL,
provider TEXT NOT NULL,
unit_type TEXT NOT NULL,
unit_id TEXT NOT NULL,
succeeded INTEGER NOT NULL DEFAULT 0,
retries INTEGER NOT NULL DEFAULT 0,
escalated INTEGER NOT NULL DEFAULT 0,
verification_passed INTEGER DEFAULT NULL,
blocker_discovered INTEGER NOT NULL DEFAULT 0,
duration_ms INTEGER DEFAULT NULL,
tokens_total INTEGER DEFAULT NULL,
cost_usd REAL DEFAULT NULL,
recorded_at INTEGER NOT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS uok_runs (
run_id TEXT PRIMARY KEY,
session_id TEXT DEFAULT NULL,
path TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'started',
started_at TEXT NOT NULL,
ended_at TEXT DEFAULT NULL,
error TEXT DEFAULT NULL,
flags_json TEXT NOT NULL DEFAULT '{}',
updated_at TEXT NOT NULL
)
`);
ensureSelfFeedbackTables(db);
ensureSolverEvalTables(db);
ensureRetrievalEvidenceTables(db);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_replan_history_milestone ON replan_history(milestone_id, created_at)",
);
// v13 indexes — hot-path dispatch queries
db.exec(
"CREATE INDEX IF NOT EXISTS idx_tasks_active ON tasks(milestone_id, slice_id, status)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_slices_active ON slices(milestone_id, status)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_milestones_status ON milestones(status)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_quality_gates_pending ON quality_gates(milestone_id, slice_id, status)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_verification_evidence_task ON verification_evidence(milestone_id, slice_id, task_id)",
);
ensureVerificationEvidenceDedupIndex(db);
// v14 index — slice dependency lookups
db.exec(
"CREATE INDEX IF NOT EXISTS idx_slice_deps_target ON slice_dependencies(milestone_id, depends_on_slice_id)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_gate_runs_turn ON gate_runs(trace_id, turn_id)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_gate_runs_lookup ON gate_runs(milestone_id, slice_id, task_id, gate_id)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_turn_git_tx_turn ON turn_git_transactions(trace_id, turn_id)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_audit_events_trace ON audit_events(trace_id, ts)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_audit_events_turn ON audit_events(trace_id, turn_id, ts)",
);
db.exec(
"CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_task_outcomes_identity ON llm_task_outcomes(unit_type, unit_id, recorded_at)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_model_unit ON llm_task_outcomes(model_id, unit_type, recorded_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_unit ON llm_task_outcomes(unit_type, recorded_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_provider ON llm_task_outcomes(provider, recorded_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_uok_runs_status_started ON uok_runs(status, started_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_uok_runs_session ON uok_runs(session_id, started_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_self_feedback_open ON self_feedback(resolved_at, severity, ts)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_self_feedback_kind ON self_feedback(kind, ts)",
);
ensureRepoProfileTables(db);
ensureBacklogTables(db);
ensureScheduleTables(db);
ensureSolverEvalTables(db);
ensureHeadlessRunTables(db);
ensureUokMessageTables(db);
ensureSpecSchemaTables(db);
ensureRetrievalEvidenceTables(db);
db.exec(
`CREATE VIEW IF NOT EXISTS active_decisions AS SELECT * FROM decisions WHERE superseded_by IS NULL`,
);
db.exec(
`CREATE VIEW IF NOT EXISTS active_requirements AS SELECT * FROM requirements WHERE superseded_by IS NULL`,
);
db.exec(
`CREATE VIEW IF NOT EXISTS active_memories AS SELECT * FROM memories WHERE superseded_by IS NULL`,
);
const existing = db
.prepare("SELECT count(*) as cnt FROM schema_version")
.get();
if (existing && existing["cnt"] === 0) {
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": SCHEMA_VERSION,
":applied_at": new Date().toISOString(),
});
}
db.exec("COMMIT");
} catch (err) {
db.exec("ROLLBACK");
throw err;
}
migrateSchema(db);
}
function columnExists(db, table, column) {
const rows = db.prepare(`PRAGMA table_info(${table})`).all();
return rows.some((row) => row["name"] === column);
}
function ensureColumn(db, table, column, ddl) {
if (!columnExists(db, table, column)) db.exec(ddl);
}
function hasPlanningPayload(planning = {}) {
return (
Boolean(planning.vision) ||
(planning.successCriteria?.length ?? 0) > 0 ||
(planning.keyRisks?.length ?? 0) > 0 ||
(planning.proofStrategy?.length ?? 0) > 0 ||
Boolean(planning.verificationContract) ||
Boolean(planning.verificationIntegration) ||
Boolean(planning.verificationOperational) ||
Boolean(planning.verificationUat) ||
(planning.definitionOfDone?.length ?? 0) > 0 ||
Boolean(planning.requirementCoverage) ||
Boolean(planning.boundaryMapMarkdown) ||
Boolean(planning.visionMeeting) ||
Boolean(planning.productResearch)
);
}
function parseJsonOrFallback(raw, fallback) {
if (typeof raw !== "string" || raw.trim().length === 0) return fallback;
try {
return JSON.parse(raw);
} catch {
return fallback;
}
}
function isEmptyMilestoneSpec(row) {
if (!row) return true;
return (
(row["vision"] ?? "") === "" &&
parseJsonOrFallback(row["success_criteria"], []).length === 0 &&
parseJsonOrFallback(row["key_risks"], []).length === 0 &&
parseJsonOrFallback(row["proof_strategy"], []).length === 0 &&
(row["verification_contract"] ?? "") === "" &&
(row["verification_integration"] ?? "") === "" &&
(row["verification_operational"] ?? "") === "" &&
(row["verification_uat"] ?? "") === "" &&
parseJsonOrFallback(row["definition_of_done"], []).length === 0 &&
(row["requirement_coverage"] ?? "") === "" &&
(row["boundary_map_markdown"] ?? "") === "" &&
(row["vision_meeting_json"] ?? "") === "" &&
(row["product_research_json"] ?? "") === ""
);
}
function ensureTaskCreatedAtColumn(db) {
ensureColumn(
db,
"tasks",
"created_at",
`ALTER TABLE tasks ADD COLUMN created_at TEXT NOT NULL DEFAULT ''`,
);
}
function migrateCostUsdToMicroUsd(db) {
// Tier 2.7: Migrate cost_usd REAL to cost_micro_usd INTEGER
// Converts floating-point USD values to integer micro-USD (multiply by 1,000,000)
// Benefits: eliminates float drift on accumulated costs, easier reasoning about totals
// Purpose: Enable accurate cost tracking at scale without rounding errors
// Consumer: gate_runs cost tracking, cost analytics, budget checks
// Add cost_micro_usd column if it doesn't exist
if (!columnExists(db, "gate_runs", "cost_micro_usd")) {
db.exec(
`ALTER TABLE gate_runs ADD COLUMN cost_micro_usd INTEGER DEFAULT NULL`,
);
}
// Migrate data: convert cost_usd to cost_micro_usd
// NULL values stay NULL; non-NULL values are multiplied by 1,000,000
if (columnExists(db, "gate_runs", "cost_usd")) {
db.prepare(`
UPDATE gate_runs
SET cost_micro_usd = CAST(ROUND(cost_usd * 1000000) AS INTEGER)
WHERE cost_usd IS NOT NULL
AND cost_micro_usd IS NULL
`).run();
}
// Drop old cost_usd column (SQLite ALTER TABLE DROP is only available in 3.35.0+)
// For safety, we keep the old column as deprecated but unused
// Future: drop after confirming all queries use cost_micro_usd
}
function populateSpecTablesFromExisting(db) {
// Tier 1.3 Phase 2: Migrate existing spec data to new spec tables
// This populates milestone_specs, slice_specs, task_specs from existing columns
// Evidence tables are left empty; they populate as tools create new evidence.
const now = new Date().toISOString();
// Migrate milestone specs
db.prepare(`
INSERT OR IGNORE INTO milestone_specs (
id, vision, success_criteria, key_risks, proof_strategy,
verification_contract, verification_integration, verification_operational, verification_uat,
definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json,
spec_version, created_at
)
SELECT
id, vision, success_criteria, key_risks, proof_strategy,
verification_contract, verification_integration, verification_operational, verification_uat,
definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, '',
1, COALESCE(created_at, ?)
FROM milestones
WHERE id NOT IN (SELECT id FROM milestone_specs)
`).run(now);
// Migrate slice specs
db.prepare(`
INSERT OR IGNORE INTO slice_specs (
milestone_id, slice_id, goal, success_criteria, proof_level,
integration_closure, observability_impact,
adversarial_partner, adversarial_combatant, adversarial_architect,
planning_meeting_json, spec_version, created_at
)
SELECT
milestone_id, id, goal, success_criteria, proof_level,
integration_closure, observability_impact,
adversarial_partner, adversarial_combatant, adversarial_architect,
planning_meeting_json, 1, COALESCE(created_at, ?)
FROM slices
WHERE (milestone_id, id) NOT IN (SELECT milestone_id, slice_id FROM slice_specs)
`).run(now);
// Migrate task specs
db.prepare(`
INSERT OR IGNORE INTO task_specs (
milestone_id, slice_id, task_id, verify, inputs, expected_output,
spec_version, created_at
)
SELECT
milestone_id, slice_id, id, verify, inputs, expected_output,
1, COALESCE(created_at, ?)
FROM tasks
WHERE (milestone_id, slice_id, id) NOT IN (SELECT milestone_id, slice_id, task_id FROM task_specs)
`).run(now);
}
function migrateSchema(db) {
const row = db.prepare("SELECT MAX(version) as v FROM schema_version").get();
const currentVersion = row ? row["v"] : 0;
if (currentVersion >= SCHEMA_VERSION) return;
// Backup database before migration so a mid-migration crash doesn't
// leave a partially-migrated DB with no recovery path.
// WAL-safe: checkpoint first to flush WAL into the main DB file, then copy.
if (currentPath && currentPath !== ":memory:" && existsSync(currentPath)) {
try {
const backupPath = `${currentPath}.backup-v${currentVersion}`;
if (!existsSync(backupPath)) {
// Flush WAL to main DB file before copying — without this, the backup
// may be missing committed data that only exists in the -wal file.
try {
db.exec("PRAGMA wal_checkpoint(TRUNCATE)");
} catch {
/* checkpoint is best-effort */
}
copyFileSync(currentPath, backupPath);
}
} catch (backupErr) {
// Log but proceed — blocking migration leaves the DB stuck at an old
// schema version permanently on read-only or full filesystems.
logWarning(
"db",
`Pre-migration backup failed: ${backupErr instanceof Error ? backupErr.message : String(backupErr)}`,
);
}
}
db.exec("BEGIN");
try {
if (currentVersion < 2) {
db.exec(`
CREATE TABLE IF NOT EXISTS artifacts (
path TEXT PRIMARY KEY,
artifact_type TEXT NOT NULL DEFAULT '',
milestone_id TEXT DEFAULT NULL,
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
full_content TEXT NOT NULL DEFAULT '',
imported_at TEXT NOT NULL DEFAULT ''
)
`);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 2,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 3) {
db.exec(`
CREATE TABLE IF NOT EXISTS memories (
seq INTEGER PRIMARY KEY AUTOINCREMENT,
id TEXT NOT NULL UNIQUE,
category TEXT NOT NULL,
content TEXT NOT NULL,
confidence REAL NOT NULL DEFAULT 0.8,
source_unit_type TEXT,
source_unit_id TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
superseded_by TEXT DEFAULT NULL,
hit_count INTEGER NOT NULL DEFAULT 0
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS memory_processed_units (
unit_key TEXT PRIMARY KEY,
activity_file TEXT,
processed_at TEXT NOT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)",
);
db.exec("DROP VIEW IF EXISTS active_memories");
db.exec(
"CREATE VIEW active_memories AS SELECT * FROM memories WHERE superseded_by IS NULL",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 3,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 4) {
ensureColumn(
db,
"decisions",
"made_by",
`ALTER TABLE decisions ADD COLUMN made_by TEXT NOT NULL DEFAULT 'agent'`,
);
db.exec("DROP VIEW IF EXISTS active_decisions");
db.exec(
"CREATE VIEW active_decisions AS SELECT * FROM decisions WHERE superseded_by IS NULL",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 4,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 5) {
db.exec(`
CREATE TABLE IF NOT EXISTS milestones (
id TEXT PRIMARY KEY,
title TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'active',
created_at TEXT NOT NULL,
completed_at TEXT DEFAULT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS slices (
milestone_id TEXT NOT NULL,
id TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'pending',
risk TEXT NOT NULL DEFAULT 'medium',
created_at TEXT NOT NULL DEFAULT '',
completed_at TEXT DEFAULT NULL,
PRIMARY KEY (milestone_id, id),
FOREIGN KEY (milestone_id) REFERENCES milestones(id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS tasks (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
id TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'pending',
one_liner TEXT NOT NULL DEFAULT '',
narrative TEXT NOT NULL DEFAULT '',
verification_result TEXT NOT NULL DEFAULT '',
duration TEXT NOT NULL DEFAULT '',
completed_at TEXT DEFAULT NULL,
blocker_discovered INTEGER DEFAULT 0,
deviations TEXT NOT NULL DEFAULT '',
known_issues TEXT NOT NULL DEFAULT '',
key_files TEXT NOT NULL DEFAULT '[]',
key_decisions TEXT NOT NULL DEFAULT '[]',
full_summary_md TEXT NOT NULL DEFAULT '',
PRIMARY KEY (milestone_id, slice_id, id),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS verification_evidence (
id INTEGER PRIMARY KEY AUTOINCREMENT,
task_id TEXT NOT NULL DEFAULT '',
slice_id TEXT NOT NULL DEFAULT '',
milestone_id TEXT NOT NULL DEFAULT '',
command TEXT NOT NULL DEFAULT '',
exit_code INTEGER DEFAULT 0,
verdict TEXT NOT NULL DEFAULT '',
duration_ms INTEGER DEFAULT 0,
created_at TEXT NOT NULL DEFAULT '',
FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id)
)
`);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 5,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 6) {
ensureColumn(
db,
"slices",
"full_summary_md",
`ALTER TABLE slices ADD COLUMN full_summary_md TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"slices",
"full_uat_md",
`ALTER TABLE slices ADD COLUMN full_uat_md TEXT NOT NULL DEFAULT ''`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 6,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 7) {
ensureColumn(
db,
"slices",
"depends",
`ALTER TABLE slices ADD COLUMN depends TEXT NOT NULL DEFAULT '[]'`,
);
ensureColumn(
db,
"slices",
"demo",
`ALTER TABLE slices ADD COLUMN demo TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"milestones",
"depends_on",
`ALTER TABLE milestones ADD COLUMN depends_on TEXT NOT NULL DEFAULT '[]'`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 7,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 8) {
ensureColumn(
db,
"milestones",
"vision",
`ALTER TABLE milestones ADD COLUMN vision TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"milestones",
"success_criteria",
`ALTER TABLE milestones ADD COLUMN success_criteria TEXT NOT NULL DEFAULT '[]'`,
);
ensureColumn(
db,
"milestones",
"key_risks",
`ALTER TABLE milestones ADD COLUMN key_risks TEXT NOT NULL DEFAULT '[]'`,
);
ensureColumn(
db,
"milestones",
"proof_strategy",
`ALTER TABLE milestones ADD COLUMN proof_strategy TEXT NOT NULL DEFAULT '[]'`,
);
ensureColumn(
db,
"milestones",
"verification_contract",
`ALTER TABLE milestones ADD COLUMN verification_contract TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"milestones",
"verification_integration",
`ALTER TABLE milestones ADD COLUMN verification_integration TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"milestones",
"verification_operational",
`ALTER TABLE milestones ADD COLUMN verification_operational TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"milestones",
"verification_uat",
`ALTER TABLE milestones ADD COLUMN verification_uat TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"milestones",
"definition_of_done",
`ALTER TABLE milestones ADD COLUMN definition_of_done TEXT NOT NULL DEFAULT '[]'`,
);
ensureColumn(
db,
"milestones",
"requirement_coverage",
`ALTER TABLE milestones ADD COLUMN requirement_coverage TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"milestones",
"boundary_map_markdown",
`ALTER TABLE milestones ADD COLUMN boundary_map_markdown TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"slices",
"goal",
`ALTER TABLE slices ADD COLUMN goal TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"slices",
"success_criteria",
`ALTER TABLE slices ADD COLUMN success_criteria TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"slices",
"proof_level",
`ALTER TABLE slices ADD COLUMN proof_level TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"slices",
"integration_closure",
`ALTER TABLE slices ADD COLUMN integration_closure TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"slices",
"observability_impact",
`ALTER TABLE slices ADD COLUMN observability_impact TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"tasks",
"description",
`ALTER TABLE tasks ADD COLUMN description TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"tasks",
"estimate",
`ALTER TABLE tasks ADD COLUMN estimate TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"tasks",
"files",
`ALTER TABLE tasks ADD COLUMN files TEXT NOT NULL DEFAULT '[]'`,
);
ensureColumn(
db,
"tasks",
"verify",
`ALTER TABLE tasks ADD COLUMN verify TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"tasks",
"inputs",
`ALTER TABLE tasks ADD COLUMN inputs TEXT NOT NULL DEFAULT '[]'`,
);
ensureColumn(
db,
"tasks",
"expected_output",
`ALTER TABLE tasks ADD COLUMN expected_output TEXT NOT NULL DEFAULT '[]'`,
);
ensureColumn(
db,
"tasks",
"observability_impact",
`ALTER TABLE tasks ADD COLUMN observability_impact TEXT NOT NULL DEFAULT ''`,
);
db.exec(`
CREATE TABLE IF NOT EXISTS replan_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
milestone_id TEXT NOT NULL DEFAULT '',
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
summary TEXT NOT NULL DEFAULT '',
previous_artifact_path TEXT DEFAULT NULL,
replacement_artifact_path TEXT DEFAULT NULL,
created_at TEXT NOT NULL DEFAULT '',
FOREIGN KEY (milestone_id) REFERENCES milestones(id)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS assessments (
path TEXT PRIMARY KEY,
milestone_id TEXT NOT NULL DEFAULT '',
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
status TEXT NOT NULL DEFAULT '',
scope TEXT NOT NULL DEFAULT '',
full_content TEXT NOT NULL DEFAULT '',
created_at TEXT NOT NULL DEFAULT '',
FOREIGN KEY (milestone_id) REFERENCES milestones(id)
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_replan_history_milestone ON replan_history(milestone_id, created_at)",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 8,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 9) {
ensureColumn(
db,
"slices",
"sequence",
`ALTER TABLE slices ADD COLUMN sequence INTEGER DEFAULT 0`,
);
ensureColumn(
db,
"tasks",
"sequence",
`ALTER TABLE tasks ADD COLUMN sequence INTEGER DEFAULT 0`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 9,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 10) {
ensureColumn(
db,
"slices",
"replan_triggered_at",
`ALTER TABLE slices ADD COLUMN replan_triggered_at TEXT DEFAULT NULL`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 10,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 11) {
ensureColumn(
db,
"tasks",
"full_plan_md",
`ALTER TABLE tasks ADD COLUMN full_plan_md TEXT NOT NULL DEFAULT ''`,
);
// Add unique constraint to replan_history for idempotency:
// one replan record per blocker task per slice per milestone.
db.exec(`
CREATE UNIQUE INDEX IF NOT EXISTS idx_replan_history_unique
ON replan_history(milestone_id, slice_id, task_id)
WHERE slice_id IS NOT NULL AND task_id IS NOT NULL
`);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 11,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 12) {
db.exec(`
CREATE TABLE IF NOT EXISTS quality_gates (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
gate_id TEXT NOT NULL,
scope TEXT NOT NULL DEFAULT 'slice',
task_id TEXT DEFAULT NULL,
status TEXT NOT NULL DEFAULT 'pending',
verdict TEXT NOT NULL DEFAULT '',
rationale TEXT NOT NULL DEFAULT '',
findings TEXT NOT NULL DEFAULT '',
evaluated_at TEXT DEFAULT NULL,
PRIMARY KEY (milestone_id, slice_id, gate_id, COALESCE(task_id, '')),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id)
)
`);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 12,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 13) {
// Hot-path indexes for auto-loop dispatch queries
db.exec(
"CREATE INDEX IF NOT EXISTS idx_tasks_active ON tasks(milestone_id, slice_id, status)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_slices_active ON slices(milestone_id, status)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_milestones_status ON milestones(status)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_quality_gates_pending ON quality_gates(milestone_id, slice_id, status)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_verification_evidence_task ON verification_evidence(milestone_id, slice_id, task_id)",
);
ensureVerificationEvidenceDedupIndex(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 13,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 14) {
db.exec(`
CREATE TABLE IF NOT EXISTS slice_dependencies (
milestone_id TEXT NOT NULL,
slice_id TEXT NOT NULL,
depends_on_slice_id TEXT NOT NULL,
PRIMARY KEY (milestone_id, slice_id, depends_on_slice_id),
FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id),
FOREIGN KEY (milestone_id, depends_on_slice_id) REFERENCES slices(milestone_id, id)
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_slice_deps_target ON slice_dependencies(milestone_id, depends_on_slice_id)",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 14,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 15) {
db.exec(`
CREATE TABLE IF NOT EXISTS gate_runs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
trace_id TEXT NOT NULL,
turn_id TEXT NOT NULL,
gate_id TEXT NOT NULL,
gate_type TEXT NOT NULL DEFAULT '',
unit_type TEXT DEFAULT NULL,
unit_id TEXT DEFAULT NULL,
milestone_id TEXT DEFAULT NULL,
slice_id TEXT DEFAULT NULL,
task_id TEXT DEFAULT NULL,
outcome TEXT NOT NULL DEFAULT 'pass',
failure_class TEXT NOT NULL DEFAULT 'none',
rationale TEXT NOT NULL DEFAULT '',
findings TEXT NOT NULL DEFAULT '',
attempt INTEGER NOT NULL DEFAULT 1,
max_attempts INTEGER NOT NULL DEFAULT 1,
retryable INTEGER NOT NULL DEFAULT 0,
evaluated_at TEXT NOT NULL DEFAULT '',
duration_ms INTEGER DEFAULT NULL,
cost_micro_usd INTEGER DEFAULT NULL
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS turn_git_transactions (
trace_id TEXT NOT NULL,
turn_id TEXT NOT NULL,
unit_type TEXT DEFAULT NULL,
unit_id TEXT DEFAULT NULL,
stage TEXT NOT NULL DEFAULT 'turn-start',
action TEXT NOT NULL DEFAULT 'status-only',
push INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'ok',
error TEXT DEFAULT NULL,
metadata_json TEXT NOT NULL DEFAULT '{}',
updated_at TEXT NOT NULL DEFAULT '',
PRIMARY KEY (trace_id, turn_id, stage)
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS audit_events (
event_id TEXT PRIMARY KEY,
trace_id TEXT NOT NULL,
turn_id TEXT DEFAULT NULL,
caused_by TEXT DEFAULT NULL,
category TEXT NOT NULL,
type TEXT NOT NULL,
ts TEXT NOT NULL,
payload_json TEXT NOT NULL DEFAULT '{}'
)
`);
db.exec(`
CREATE TABLE IF NOT EXISTS audit_turn_index (
trace_id TEXT NOT NULL,
turn_id TEXT NOT NULL,
first_ts TEXT NOT NULL,
last_ts TEXT NOT NULL,
event_count INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY (trace_id, turn_id)
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_gate_runs_turn ON gate_runs(trace_id, turn_id)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_gate_runs_lookup ON gate_runs(milestone_id, slice_id, task_id, gate_id)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_turn_git_tx_turn ON turn_git_transactions(trace_id, turn_id)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_audit_events_trace ON audit_events(trace_id, ts)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_audit_events_turn ON audit_events(trace_id, turn_id, ts)",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 15,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 16) {
db.exec(`
CREATE TABLE IF NOT EXISTS llm_task_outcomes (
model_id TEXT NOT NULL,
provider TEXT NOT NULL,
unit_type TEXT NOT NULL,
unit_id TEXT NOT NULL,
succeeded INTEGER NOT NULL DEFAULT 0,
retries INTEGER NOT NULL DEFAULT 0,
escalated INTEGER NOT NULL DEFAULT 0,
verification_passed INTEGER DEFAULT NULL,
blocker_discovered INTEGER NOT NULL DEFAULT 0,
duration_ms INTEGER DEFAULT NULL,
tokens_total INTEGER DEFAULT NULL,
cost_usd REAL DEFAULT NULL,
recorded_at INTEGER NOT NULL
)
`);
db.exec(
"CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_task_outcomes_identity ON llm_task_outcomes(unit_type, unit_id, recorded_at)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_model_unit ON llm_task_outcomes(model_id, unit_type, recorded_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_unit ON llm_task_outcomes(unit_type, recorded_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_provider ON llm_task_outcomes(provider, recorded_at DESC)",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 16,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 17) {
ensureColumn(
db,
"tasks",
"verification_status",
`ALTER TABLE tasks ADD COLUMN verification_status TEXT NOT NULL DEFAULT ''`,
);
// Backfill verification_status from existing verification_evidence rows so the
// prior-task guard works on databases upgraded mid-project (not just new ones).
db.exec(`
UPDATE tasks
SET verification_status = CASE
WHEN (SELECT COUNT(*) FROM verification_evidence ve
WHERE ve.milestone_id = tasks.milestone_id
AND ve.slice_id = tasks.slice_id
AND ve.task_id = tasks.id) = 0
THEN ''
WHEN (SELECT COUNT(*) FROM verification_evidence ve
WHERE ve.milestone_id = tasks.milestone_id
AND ve.slice_id = tasks.slice_id
AND ve.task_id = tasks.id
AND ve.exit_code != 0) = 0
THEN 'all_pass'
WHEN (SELECT COUNT(*) FROM verification_evidence ve
WHERE ve.milestone_id = tasks.milestone_id
AND ve.slice_id = tasks.slice_id
AND ve.task_id = tasks.id
AND ve.exit_code = 0) > 0
THEN 'partial'
ELSE 'all_fail'
END
WHERE tasks.status IN ('complete', 'done')
`);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 17,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 18) {
ensureColumn(
db,
"slices",
"adversarial_partner",
`ALTER TABLE slices ADD COLUMN adversarial_partner TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"slices",
"adversarial_combatant",
`ALTER TABLE slices ADD COLUMN adversarial_combatant TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"slices",
"adversarial_architect",
`ALTER TABLE slices ADD COLUMN adversarial_architect TEXT NOT NULL DEFAULT ''`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 18,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 19) {
ensureColumn(
db,
"slices",
"planning_meeting_json",
`ALTER TABLE slices ADD COLUMN planning_meeting_json TEXT NOT NULL DEFAULT ''`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 19,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 20) {
ensureColumn(
db,
"milestones",
"vision_meeting_json",
`ALTER TABLE milestones ADD COLUMN vision_meeting_json TEXT NOT NULL DEFAULT ''`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 20,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 21) {
ensureRepoProfileTables(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 21,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 22) {
// SF ADR-011: progressive planning. is_sketch=1 means the slice is a 2-3
// sentence sketch awaiting refine-slice expansion; refine fills in the
// real plan and clears the flag. sketch_scope holds the milestone
// planner's stored scope hint that refine treats as a hard boundary.
ensureColumn(
db,
"slices",
"is_sketch",
`ALTER TABLE slices ADD COLUMN is_sketch INTEGER NOT NULL DEFAULT 0`,
);
ensureColumn(
db,
"slices",
"sketch_scope",
`ALTER TABLE slices ADD COLUMN sketch_scope TEXT NOT NULL DEFAULT ''`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 22,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 23) {
// ADR-011 Phase 2 (SF ADR): mid-execution escalation. escalation_pending=1
// marks a task that paused for a user decision; escalation_artifact_path
// points to the T##-ESCALATION.json file containing options + recommendation.
// State derivation will emit phase='escalating-task' when any task in the
// active slice has escalation_pending=1; dispatch returns 'stop' so the
// loop never bypasses a pending decision.
ensureColumn(
db,
"tasks",
"escalation_pending",
`ALTER TABLE tasks ADD COLUMN escalation_pending INTEGER NOT NULL DEFAULT 0`,
);
ensureColumn(
db,
"tasks",
"escalation_artifact_path",
`ALTER TABLE tasks ADD COLUMN escalation_artifact_path TEXT DEFAULT NULL`,
);
try {
db.exec(
"CREATE INDEX IF NOT EXISTS idx_tasks_escalation_pending ON tasks(milestone_id, slice_id, escalation_pending)",
);
} catch {
/* index creation is opportunistic — fall through if backend lacks it */
}
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 23,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 24) {
// ADR-011 P2 (SF ADR): the third escalation flag for the
// continueWithDefault=true case — an artifact is recorded for human
// review later, but the loop is NOT paused. Mutually exclusive with
// escalation_pending (the writer flips one or the other).
ensureColumn(
db,
"tasks",
"escalation_awaiting_review",
`ALTER TABLE tasks ADD COLUMN escalation_awaiting_review INTEGER NOT NULL DEFAULT 0`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 24,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 25) {
// SF ADR-011 P2 carry-forward: when an escalation is resolved, the user's
// choice should be visible to the next execute-task agent in the same
// slice. escalation_override_applied=0 marks "resolved but not yet
// injected into a downstream prompt"; the prompt builder calls
// claimEscalationOverride which atomically flips it to 1 (idempotent
// race-safe claim). Per-task granularity so multi-task slices can
// carry multiple resolved escalations forward independently.
ensureColumn(
db,
"tasks",
"escalation_override_applied",
`ALTER TABLE tasks ADD COLUMN escalation_override_applied INTEGER NOT NULL DEFAULT 0`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 25,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 26) {
db.exec(`
CREATE TABLE IF NOT EXISTS uok_runs (
run_id TEXT PRIMARY KEY,
session_id TEXT DEFAULT NULL,
path TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'started',
started_at TEXT NOT NULL,
ended_at TEXT DEFAULT NULL,
error TEXT DEFAULT NULL,
flags_json TEXT NOT NULL DEFAULT '{}',
updated_at TEXT NOT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_uok_runs_status_started ON uok_runs(status, started_at DESC)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_uok_runs_session ON uok_runs(session_id, started_at DESC)",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 26,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 27) {
ensureSolverEvalTables(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 27,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 28) {
// UOK observability: gate execution latency
ensureColumn(
db,
"gate_runs",
"duration_ms",
"ALTER TABLE gate_runs ADD COLUMN duration_ms INTEGER DEFAULT NULL",
);
// UOK circuit breaker state
db.exec(`
CREATE TABLE IF NOT EXISTS gate_circuit_breakers (
gate_id TEXT PRIMARY KEY,
state TEXT NOT NULL DEFAULT 'closed',
failure_streak INTEGER NOT NULL DEFAULT 0,
last_failure_at TEXT DEFAULT NULL,
opened_at TEXT DEFAULT NULL,
half_open_attempts INTEGER NOT NULL DEFAULT 0,
updated_at TEXT NOT NULL DEFAULT ''
)
`);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 28,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 29) {
ensureHeadlessRunTables(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 29,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 30) {
ensureSelfFeedbackTables(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 30,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 31) {
ensureUokMessageTables(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 31,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 32) {
ensureTaskCreatedAtColumn(db);
ensureSpecSchemaTables(db);
// Populate spec tables from existing spec columns in runtime tables
populateSpecTablesFromExisting(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 32,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 33) {
ensureColumn(
db,
"milestones",
"sequence",
`ALTER TABLE milestones ADD COLUMN sequence INTEGER DEFAULT 0`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 33,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 34) {
ensureTaskCreatedAtColumn(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 34,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 35) {
ensureBacklogTables(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 35,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 36) {
migrateCostUsdToMicroUsd(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 36,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 37) {
ensureScheduleTables(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 37,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 38) {
try {
db.exec(
"ALTER TABLE memories ADD COLUMN tags TEXT NOT NULL DEFAULT '[]'",
);
} catch {
// Column may already exist on fresh DBs
}
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 38,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 39) {
db.exec(
"CREATE INDEX IF NOT EXISTS idx_memory_sources_content_hash ON memory_sources(content_hash)",
);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(superseded_by, category)",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 39,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 40) {
db.exec(`
CREATE TABLE IF NOT EXISTS judgments (
id INTEGER PRIMARY KEY AUTOINCREMENT,
unit_id TEXT NOT NULL,
decision TEXT NOT NULL DEFAULT '',
alternatives_json TEXT NOT NULL DEFAULT '[]',
reasoning TEXT NOT NULL DEFAULT '',
confidence TEXT NOT NULL DEFAULT 'medium',
ts TEXT NOT NULL
)
`);
db.exec(
"CREATE INDEX IF NOT EXISTS idx_judgments_unit_id ON judgments(unit_id, ts DESC)",
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 40,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 41) {
ensureRetrievalEvidenceTables(db);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 41,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 42) {
ensureColumn(
db,
"milestones",
"product_research_json",
`ALTER TABLE milestones ADD COLUMN product_research_json TEXT NOT NULL DEFAULT ''`,
);
ensureColumn(
db,
"milestone_specs",
"product_research_json",
`ALTER TABLE milestone_specs ADD COLUMN product_research_json TEXT DEFAULT ''`,
);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 42,
":applied_at": new Date().toISOString(),
});
}
if (currentVersion < 43) {
db.exec(`
CREATE TABLE IF NOT EXISTS session_mode_state (
id INTEGER PRIMARY KEY CHECK (id = 1),
work_mode TEXT NOT NULL DEFAULT 'chat',
run_control TEXT NOT NULL DEFAULT 'manual',
permission_profile TEXT NOT NULL DEFAULT 'restricted',
model_mode TEXT NOT NULL DEFAULT 'smart',
surface TEXT NOT NULL DEFAULT 'tui',
updated_at TEXT NOT NULL DEFAULT ''
)
`);
db.exec(`
INSERT OR IGNORE INTO session_mode_state (id, work_mode, run_control, permission_profile, model_mode, surface, updated_at)
VALUES (1, 'chat', 'manual', 'restricted', 'smart', 'tui', datetime('now'))
`);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 43,
":applied_at": new Date().toISOString(),
});
}
db.exec("COMMIT");
} catch (err) {
db.exec("ROLLBACK");
throw err;
}
}
let currentDb = null;
let currentPath = null;
let currentPid = 0;
let _exitHandlerRegistered = false;
let _dbOpenAttempted = false;
/**
* Get the name of the SQLite provider currently loaded (or null if unavailable).
*/
export function getDbProvider() {
loadProvider();
return "node:sqlite";
}
/**
* Check if the database is currently open and available for queries.
*/
export function isDbAvailable() {
return currentDb !== null;
}
/**
* Returns true if openDatabase() has been called at least once this session.
* Used to distinguish "DB not yet initialized" from "DB genuinely unavailable"
* so that early callers (e.g. before_agent_start context injection) don't
* trigger a false degraded-mode warning.
*/
export function wasDbOpenAttempted() {
return _dbOpenAttempted;
}
/**
* Get the current database adapter, or null if the database is not open.
*/
export function getDatabase() {
return currentDb;
}
/**
* Open the database at the specified path. Returns true if successful.
*/
export function openDatabase(path) {
_dbOpenAttempted = true;
if (currentDb && currentPath !== path) closeDatabase();
if (currentDb && currentPath === path) return true;
const rawDb = openRawDb(path);
if (!rawDb) return false;
const adapter = createAdapter(rawDb);
const fileBacked = path !== ":memory:";
try {
initSchema(adapter, fileBacked);
} catch (err) {
// Corrupt freelist: DDL fails with "malformed" but VACUUM can rebuild.
// Attempt VACUUM recovery before giving up (see #2519).
if (
fileBacked &&
err instanceof Error &&
err.message?.includes("malformed")
) {
try {
adapter.exec("VACUUM");
initSchema(adapter, fileBacked);
process.stderr.write("sf-db: recovered corrupt database via VACUUM\n");
} catch (retryErr) {
try {
adapter.close();
} catch (e) {
logWarning("db", `close after VACUUM failed: ${e.message}`);
}
throw retryErr;
}
} else {
try {
adapter.close();
} catch (e) {
logWarning("db", `close after VACUUM failed: ${e.message}`);
}
throw err;
}
}
currentDb = adapter;
currentPath = path;
currentPid = process.pid;
if (!_exitHandlerRegistered) {
_exitHandlerRegistered = true;
process.on("exit", () => {
try {
closeDatabase();
} catch (e) {
logWarning("db", `exit handler close failed: ${e.message}`);
}
});
}
return true;
}
/**
* Close the database connection.
*/
export function closeDatabase() {
if (currentDb) {
try {
currentDb.exec("PRAGMA wal_checkpoint(TRUNCATE)");
} catch (e) {
logWarning("db", `WAL checkpoint failed: ${e.message}`);
}
try {
// Incremental vacuum to reclaim space without blocking
currentDb.exec("PRAGMA incremental_vacuum(64)");
} catch (e) {
logWarning("db", `incremental vacuum failed: ${e.message}`);
}
try {
currentDb.close();
} catch (e) {
logWarning("db", `database close failed: ${e.message}`);
}
currentDb = null;
currentPath = null;
currentPid = 0;
_dbOpenAttempted = false;
}
}
/** Run a full VACUUM — call sparingly (e.g. after milestone completion). */
/**
* Vacuum the database to reclaim disk space and optimize.
*/
export function vacuumDatabase() {
if (!currentDb) return;
try {
currentDb.exec("VACUUM");
} catch (e) {
logWarning("db", `VACUUM failed: ${e.message}`);
}
}
let _txDepth = 0;
/**
* Execute a callback within a database transaction (BEGIN...COMMIT or ROLLBACK).
*/
export function transaction(fn) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
// Re-entrant: if already inside a transaction, just run fn() without
// starting a new one. SQLite does not support nested BEGIN/COMMIT.
if (_txDepth > 0) {
_txDepth++;
try {
return fn();
} finally {
_txDepth--;
}
}
_txDepth++;
currentDb.exec("BEGIN");
try {
const result = fn();
currentDb.exec("COMMIT");
return result;
} catch (err) {
currentDb.exec("ROLLBACK");
throw err;
} finally {
_txDepth--;
}
}
/**
* Wrap a block of reads in a DEFERRED transaction so that all SELECTs observe
* a consistent snapshot of the DB even if a concurrent writer commits between
* them. Use this for multi-query read flows (e.g. tool executors that query
* milestone + slices + counts and want one snapshot). Re-entrant — if already
* inside a transaction, runs fn() without starting a nested one.
*/
/**
* Execute a callback within a read-only database transaction.
*/
export function readTransaction(fn) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
if (_txDepth > 0) {
_txDepth++;
try {
return fn();
} finally {
_txDepth--;
}
}
_txDepth++;
currentDb.exec("BEGIN DEFERRED");
try {
const result = fn();
currentDb.exec("COMMIT");
return result;
} catch (err) {
try {
currentDb.exec("ROLLBACK");
} catch (rollbackErr) {
// A failed ROLLBACK after a failed read is a split-brain signal —
// the transaction is in an indeterminate state. Surface it via the
// logger instead of swallowing it.
logError("db", "snapshotState ROLLBACK failed", {
error: rollbackErr.message,
});
}
throw err;
} finally {
_txDepth--;
}
}
export function insertDecision(d) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO decisions (id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by)
VALUES (:id, :when_context, :scope, :decision, :choice, :rationale, :revisable, :made_by, :superseded_by)`)
.run({
":id": d.id,
":when_context": d.when_context,
":scope": d.scope,
":decision": d.decision,
":choice": d.choice,
":rationale": d.rationale,
":revisable": d.revisable,
":made_by": d.made_by ?? "agent",
":superseded_by": d.superseded_by,
});
}
export function getDecisionById(id) {
if (!currentDb) return null;
const row = currentDb.prepare("SELECT * FROM decisions WHERE id = ?").get(id);
if (!row) return null;
return {
seq: row["seq"],
id: row["id"],
when_context: row["when_context"],
scope: row["scope"],
decision: row["decision"],
choice: row["choice"],
rationale: row["rationale"],
revisable: row["revisable"],
made_by: row["made_by"] ?? "agent",
superseded_by: row["superseded_by"] ?? null,
};
}
export function getActiveDecisions() {
if (!currentDb) return [];
const rows = currentDb.prepare("SELECT * FROM active_decisions").all();
return rows.map((row) => ({
seq: row["seq"],
id: row["id"],
when_context: row["when_context"],
scope: row["scope"],
decision: row["decision"],
choice: row["choice"],
rationale: row["rationale"],
revisable: row["revisable"],
made_by: row["made_by"] ?? "agent",
superseded_by: null,
}));
}
export function insertRequirement(r) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO requirements (id, class, status, description, why, source, primary_owner, supporting_slices, validation, notes, full_content, superseded_by)
VALUES (:id, :class, :status, :description, :why, :source, :primary_owner, :supporting_slices, :validation, :notes, :full_content, :superseded_by)`)
.run({
":id": r.id,
":class": r.class,
":status": r.status,
":description": r.description,
":why": r.why,
":source": r.source,
":primary_owner": r.primary_owner,
":supporting_slices": r.supporting_slices,
":validation": r.validation,
":notes": r.notes,
":full_content": r.full_content,
":superseded_by": r.superseded_by,
});
}
export function getRequirementById(id) {
if (!currentDb) return null;
const row = currentDb
.prepare("SELECT * FROM requirements WHERE id = ?")
.get(id);
if (!row) return null;
return {
id: row["id"],
class: row["class"],
status: row["status"],
description: row["description"],
why: row["why"],
source: row["source"],
primary_owner: row["primary_owner"],
supporting_slices: row["supporting_slices"],
validation: row["validation"],
notes: row["notes"],
full_content: row["full_content"],
superseded_by: row["superseded_by"] ?? null,
};
}
export function getActiveRequirements() {
if (!currentDb) return [];
const rows = currentDb.prepare("SELECT * FROM active_requirements").all();
return rows.map((row) => ({
id: row["id"],
class: row["class"],
status: row["status"],
description: row["description"],
why: row["why"],
source: row["source"],
primary_owner: row["primary_owner"],
supporting_slices: row["supporting_slices"],
validation: row["validation"],
notes: row["notes"],
full_content: row["full_content"],
superseded_by: null,
}));
}
export function getDbOwnerPid() {
return currentPid;
}
export function getDbPath() {
return currentPath;
}
/**
* Load persisted session mode state from DB.
*
* Purpose: restore mode state across session restarts.
*
* Consumer: AutoSession initialization.
*/
export function loadSessionModeState() {
if (!currentDb) return null;
try {
const row = currentDb
.prepare("SELECT * FROM session_mode_state WHERE id = 1")
.get();
if (!row) return null;
return {
workMode: row["work_mode"] ?? "chat",
runControl: row["run_control"] ?? "manual",
permissionProfile: row["permission_profile"] ?? "restricted",
modelMode: row["model_mode"] ?? "smart",
surface: row["surface"] ?? "tui",
updatedAt: row["updated_at"] ?? null,
};
} catch {
return null;
}
}
/**
* Persist the current session mode into the project database.
*
* Purpose: keep work mode, run control, permission profile, and model mode
* stable across reload/resume without letting command handlers write SQL.
*
* Consumer: AutoSession.setMode() after validated mode transitions.
*/
export function saveSessionModeState(mode) {
if (!currentDb) return false;
currentDb
.prepare(`
INSERT INTO session_mode_state (id, work_mode, run_control, permission_profile, model_mode, surface, updated_at)
VALUES (1, :workMode, :runControl, :permissionProfile, :modelMode, :surface, :updatedAt)
ON CONFLICT(id) DO UPDATE SET
work_mode = excluded.work_mode,
run_control = excluded.run_control,
permission_profile = excluded.permission_profile,
model_mode = excluded.model_mode,
surface = excluded.surface,
updated_at = excluded.updated_at
`)
.run({
":workMode": mode.workMode,
":runControl": mode.runControl,
":permissionProfile": mode.permissionProfile,
":modelMode": mode.modelMode,
":surface": mode.surface ?? "tui",
":updatedAt": mode.updatedAt ?? new Date().toISOString(),
});
return true;
}
export function _getAdapter() {
return currentDb;
}
export function _resetProvider() {
loadAttempted = false;
}
export function upsertDecision(d) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
// Use ON CONFLICT DO UPDATE instead of INSERT OR REPLACE to preserve the
// seq column. INSERT OR REPLACE deletes then reinserts, resetting seq and
// corrupting decision ordering in DECISIONS.md after reconcile replay.
currentDb
.prepare(`INSERT INTO decisions (id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by)
VALUES (:id, :when_context, :scope, :decision, :choice, :rationale, :revisable, :made_by, :superseded_by)
ON CONFLICT(id) DO UPDATE SET
when_context = excluded.when_context,
scope = excluded.scope,
decision = excluded.decision,
choice = excluded.choice,
rationale = excluded.rationale,
revisable = excluded.revisable,
made_by = excluded.made_by,
superseded_by = excluded.superseded_by`)
.run({
":id": d.id,
":when_context": d.when_context,
":scope": d.scope,
":decision": d.decision,
":choice": d.choice,
":rationale": d.rationale,
":revisable": d.revisable,
":made_by": d.made_by ?? "agent",
":superseded_by": d.superseded_by ?? null,
});
}
export function upsertRequirement(r) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR REPLACE INTO requirements (id, class, status, description, why, source, primary_owner, supporting_slices, validation, notes, full_content, superseded_by)
VALUES (:id, :class, :status, :description, :why, :source, :primary_owner, :supporting_slices, :validation, :notes, :full_content, :superseded_by)`)
.run({
":id": r.id,
":class": r.class,
":status": r.status,
":description": r.description,
":why": r.why,
":source": r.source,
":primary_owner": r.primary_owner,
":supporting_slices": r.supporting_slices,
":validation": r.validation,
":notes": r.notes,
":full_content": r.full_content,
":superseded_by": r.superseded_by ?? null,
});
}
export function clearArtifacts() {
if (!currentDb) return;
try {
currentDb.exec("DELETE FROM artifacts");
} catch (e) {
logWarning("db", `clearArtifacts failed: ${e.message}`);
}
}
export function insertArtifact(a) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR REPLACE INTO artifacts (path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at)
VALUES (:path, :artifact_type, :milestone_id, :slice_id, :task_id, :full_content, :imported_at)`)
.run({
":path": a.path,
":artifact_type": a.artifact_type,
":milestone_id": a.milestone_id,
":slice_id": a.slice_id,
":task_id": a.task_id,
":full_content": a.full_content,
":imported_at": new Date().toISOString(),
});
}
export function insertMilestone(m) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR IGNORE INTO milestones (
id, title, status, depends_on, created_at,
vision, success_criteria, key_risks, proof_strategy,
verification_contract, verification_integration, verification_operational, verification_uat,
definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, sequence
) VALUES (
:id, :title, :status, :depends_on, :created_at,
:vision, :success_criteria, :key_risks, :proof_strategy,
:verification_contract, :verification_integration, :verification_operational, :verification_uat,
:definition_of_done, :requirement_coverage, :boundary_map_markdown, :vision_meeting_json, :product_research_json, :sequence
)`)
.run({
":id": m.id,
":title": m.title ?? "",
// Default to "queued" — never auto-create milestones as "active" (#3380).
// Callers that need "active" must pass it explicitly.
":status": m.status ?? "queued",
":depends_on": JSON.stringify(m.depends_on ?? []),
":created_at": new Date().toISOString(),
":vision": m.planning?.vision ?? "",
":success_criteria": JSON.stringify(m.planning?.successCriteria ?? []),
":key_risks": JSON.stringify(m.planning?.keyRisks ?? []),
":proof_strategy": JSON.stringify(m.planning?.proofStrategy ?? []),
":verification_contract": m.planning?.verificationContract ?? "",
":verification_integration": m.planning?.verificationIntegration ?? "",
":verification_operational": m.planning?.verificationOperational ?? "",
":verification_uat": m.planning?.verificationUat ?? "",
":definition_of_done": JSON.stringify(m.planning?.definitionOfDone ?? []),
":requirement_coverage": m.planning?.requirementCoverage ?? "",
":boundary_map_markdown": m.planning?.boundaryMapMarkdown ?? "",
":vision_meeting_json": m.planning?.visionMeeting
? JSON.stringify(m.planning.visionMeeting)
: "",
":product_research_json": m.planning?.productResearch
? JSON.stringify(m.planning.productResearch)
: "",
":sequence": m.sequence ?? 0,
});
if (hasPlanningPayload(m.planning)) {
insertMilestoneSpecIfAbsent(m.id, m.planning ?? {});
}
}
function insertMilestoneSpecIfAbsent(milestoneId, planning = {}) {
if (!hasPlanningPayload(planning)) return;
const existing = currentDb
.prepare("SELECT * FROM milestone_specs WHERE id = ?")
.get(milestoneId);
if (existing && !isEmptyMilestoneSpec(existing)) return;
const params = {
":id": milestoneId,
":vision": planning.vision ?? "",
":success_criteria": JSON.stringify(planning.successCriteria ?? []),
":key_risks": JSON.stringify(planning.keyRisks ?? []),
":proof_strategy": JSON.stringify(planning.proofStrategy ?? []),
":verification_contract": planning.verificationContract ?? "",
":verification_integration": planning.verificationIntegration ?? "",
":verification_operational": planning.verificationOperational ?? "",
":verification_uat": planning.verificationUat ?? "",
":definition_of_done": JSON.stringify(planning.definitionOfDone ?? []),
":requirement_coverage": planning.requirementCoverage ?? "",
":boundary_map_markdown": planning.boundaryMapMarkdown ?? "",
":vision_meeting_json": planning.visionMeeting
? JSON.stringify(planning.visionMeeting)
: "",
":product_research_json": planning.productResearch
? JSON.stringify(planning.productResearch)
: "",
":created_at": new Date().toISOString(),
};
if (existing) {
currentDb
.prepare(`UPDATE milestone_specs SET
vision = :vision,
success_criteria = :success_criteria,
key_risks = :key_risks,
proof_strategy = :proof_strategy,
verification_contract = :verification_contract,
verification_integration = :verification_integration,
verification_operational = :verification_operational,
verification_uat = :verification_uat,
definition_of_done = :definition_of_done,
requirement_coverage = :requirement_coverage,
boundary_map_markdown = :boundary_map_markdown,
vision_meeting_json = :vision_meeting_json,
product_research_json = :product_research_json
WHERE id = :id`)
.run(params);
return;
}
currentDb
.prepare(`INSERT OR IGNORE INTO milestone_specs (
id, vision, success_criteria, key_risks, proof_strategy,
verification_contract, verification_integration, verification_operational, verification_uat,
definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json,
spec_version, created_at
) VALUES (
:id, :vision, :success_criteria, :key_risks, :proof_strategy,
:verification_contract, :verification_integration, :verification_operational, :verification_uat,
:definition_of_done, :requirement_coverage, :boundary_map_markdown, :vision_meeting_json, :product_research_json,
1, :created_at
)`)
.run(params);
}
export function upsertMilestonePlanning(milestoneId, planning) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
insertMilestoneSpecIfAbsent(milestoneId, planning);
currentDb
.prepare(`UPDATE milestones SET
title = COALESCE(NULLIF(:title, ''), title),
status = COALESCE(NULLIF(:status, ''), status),
vision = COALESCE(:vision, vision),
success_criteria = COALESCE(:success_criteria, success_criteria),
key_risks = COALESCE(:key_risks, key_risks),
proof_strategy = COALESCE(:proof_strategy, proof_strategy),
verification_contract = COALESCE(:verification_contract, verification_contract),
verification_integration = COALESCE(:verification_integration, verification_integration),
verification_operational = COALESCE(:verification_operational, verification_operational),
verification_uat = COALESCE(:verification_uat, verification_uat),
definition_of_done = COALESCE(:definition_of_done, definition_of_done),
requirement_coverage = COALESCE(:requirement_coverage, requirement_coverage),
boundary_map_markdown = COALESCE(:boundary_map_markdown, boundary_map_markdown),
vision_meeting_json = COALESCE(:vision_meeting_json, vision_meeting_json),
product_research_json = COALESCE(:product_research_json, product_research_json)
WHERE id = :id`)
.run({
":id": milestoneId,
":title": planning.title ?? "",
":status": planning.status ?? "",
":vision": planning.vision ?? null,
":success_criteria": planning.successCriteria
? JSON.stringify(planning.successCriteria)
: null,
":key_risks": planning.keyRisks
? JSON.stringify(planning.keyRisks)
: null,
":proof_strategy": planning.proofStrategy
? JSON.stringify(planning.proofStrategy)
: null,
":verification_contract": planning.verificationContract ?? null,
":verification_integration": planning.verificationIntegration ?? null,
":verification_operational": planning.verificationOperational ?? null,
":verification_uat": planning.verificationUat ?? null,
":definition_of_done": planning.definitionOfDone
? JSON.stringify(planning.definitionOfDone)
: null,
":requirement_coverage": planning.requirementCoverage ?? null,
":boundary_map_markdown": planning.boundaryMapMarkdown ?? null,
":vision_meeting_json": planning.visionMeeting
? JSON.stringify(planning.visionMeeting)
: null,
":product_research_json": planning.productResearch
? JSON.stringify(planning.productResearch)
: null,
});
}
export function insertSlice(s) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO slices (
milestone_id, id, title, status, risk, depends, demo, created_at,
goal, success_criteria, proof_level, integration_closure, observability_impact,
adversarial_partner, adversarial_combatant, adversarial_architect, planning_meeting_json, sequence,
is_sketch, sketch_scope
) VALUES (
:milestone_id, :id, :title, :status, :risk, :depends, :demo, :created_at,
:goal, :success_criteria, :proof_level, :integration_closure, :observability_impact,
:adversarial_partner, :adversarial_combatant, :adversarial_architect, :planning_meeting_json, :sequence,
:is_sketch, :sketch_scope
)
ON CONFLICT (milestone_id, id) DO UPDATE SET
title = CASE WHEN :raw_title IS NOT NULL THEN excluded.title ELSE slices.title END,
status = CASE WHEN slices.status IN ('complete', 'done') THEN slices.status ELSE excluded.status END,
risk = CASE WHEN :raw_risk IS NOT NULL THEN excluded.risk ELSE slices.risk END,
depends = excluded.depends,
demo = CASE WHEN :raw_demo IS NOT NULL THEN excluded.demo ELSE slices.demo END,
goal = CASE WHEN :raw_goal IS NOT NULL THEN excluded.goal ELSE slices.goal END,
success_criteria = CASE WHEN :raw_success_criteria IS NOT NULL THEN excluded.success_criteria ELSE slices.success_criteria END,
proof_level = CASE WHEN :raw_proof_level IS NOT NULL THEN excluded.proof_level ELSE slices.proof_level END,
integration_closure = CASE WHEN :raw_integration_closure IS NOT NULL THEN excluded.integration_closure ELSE slices.integration_closure END,
observability_impact = CASE WHEN :raw_observability_impact IS NOT NULL THEN excluded.observability_impact ELSE slices.observability_impact END,
adversarial_partner = CASE WHEN :raw_adversarial_partner IS NOT NULL THEN excluded.adversarial_partner ELSE slices.adversarial_partner END,
adversarial_combatant = CASE WHEN :raw_adversarial_combatant IS NOT NULL THEN excluded.adversarial_combatant ELSE slices.adversarial_combatant END,
adversarial_architect = CASE WHEN :raw_adversarial_architect IS NOT NULL THEN excluded.adversarial_architect ELSE slices.adversarial_architect END,
planning_meeting_json = CASE WHEN :raw_planning_meeting_json IS NOT NULL THEN excluded.planning_meeting_json ELSE slices.planning_meeting_json END,
sequence = CASE WHEN :raw_sequence IS NOT NULL THEN excluded.sequence ELSE slices.sequence END,
is_sketch = CASE WHEN :raw_is_sketch IS NOT NULL THEN excluded.is_sketch ELSE slices.is_sketch END,
sketch_scope = CASE WHEN :raw_sketch_scope IS NOT NULL THEN excluded.sketch_scope ELSE slices.sketch_scope END`)
.run({
":milestone_id": s.milestoneId,
":id": s.id,
":title": s.title ?? "",
":status": s.status ?? "pending",
":risk": s.risk ?? "medium",
":depends": JSON.stringify(s.depends ?? []),
":demo": s.demo ?? "",
":created_at": new Date().toISOString(),
":goal": s.planning?.goal ?? "",
":success_criteria": s.planning?.successCriteria ?? "",
":proof_level": s.planning?.proofLevel ?? "",
":integration_closure": s.planning?.integrationClosure ?? "",
":observability_impact": s.planning?.observabilityImpact ?? "",
":adversarial_partner": s.planning?.adversarialReview?.partner ?? "",
":adversarial_combatant": s.planning?.adversarialReview?.combatant ?? "",
":adversarial_architect": s.planning?.adversarialReview?.architect ?? "",
":planning_meeting_json": s.planning?.planningMeeting
? JSON.stringify(s.planning.planningMeeting)
: "",
":sequence": s.sequence ?? 0,
":is_sketch": s.isSketch === true ? 1 : 0,
":sketch_scope": s.sketchScope ?? "",
// Raw sentinel params: NULL when caller omitted the field, used in ON CONFLICT guards
":raw_title": s.title ?? null,
":raw_risk": s.risk ?? null,
":raw_demo": s.demo ?? null,
":raw_goal": s.planning?.goal ?? null,
":raw_success_criteria": s.planning?.successCriteria ?? null,
":raw_proof_level": s.planning?.proofLevel ?? null,
":raw_integration_closure": s.planning?.integrationClosure ?? null,
":raw_observability_impact": s.planning?.observabilityImpact ?? null,
":raw_adversarial_partner":
s.planning?.adversarialReview?.partner ?? null,
":raw_adversarial_combatant":
s.planning?.adversarialReview?.combatant ?? null,
":raw_adversarial_architect":
s.planning?.adversarialReview?.architect ?? null,
":raw_planning_meeting_json": s.planning?.planningMeeting
? JSON.stringify(s.planning.planningMeeting)
: null,
":raw_sequence": s.sequence ?? null,
":raw_is_sketch": s.isSketch === undefined ? null : s.isSketch ? 1 : 0,
":raw_sketch_scope": s.sketchScope === undefined ? null : s.sketchScope,
});
insertSliceSpecIfAbsent(s.milestoneId, s.id, s.planning ?? {});
}
function insertSliceSpecIfAbsent(milestoneId, sliceId, planning = {}) {
currentDb
.prepare(`INSERT OR IGNORE INTO slice_specs (
milestone_id, slice_id, goal, success_criteria, proof_level,
integration_closure, observability_impact,
adversarial_partner, adversarial_combatant, adversarial_architect,
planning_meeting_json, spec_version, created_at
) VALUES (
:milestone_id, :slice_id, :goal, :success_criteria, :proof_level,
:integration_closure, :observability_impact,
:adversarial_partner, :adversarial_combatant, :adversarial_architect,
:planning_meeting_json, 1, :created_at
)`)
.run({
":milestone_id": milestoneId,
":slice_id": sliceId,
":goal": planning.goal ?? "",
":success_criteria": planning.successCriteria ?? "",
":proof_level": planning.proofLevel ?? "",
":integration_closure": planning.integrationClosure ?? "",
":observability_impact": planning.observabilityImpact ?? "",
":adversarial_partner": planning.adversarialReview?.partner ?? "",
":adversarial_combatant": planning.adversarialReview?.combatant ?? "",
":adversarial_architect": planning.adversarialReview?.architect ?? "",
":planning_meeting_json": planning.planningMeeting
? JSON.stringify(planning.planningMeeting)
: "",
":created_at": new Date().toISOString(),
});
}
/**
* SF ADR-011: clear the is_sketch flag after refine-slice fills in the full plan.
* Idempotent — safe to call on already-refined slices.
*/
export function clearSliceSketch(milestoneId, sliceId) {
setSliceSketchFlag(milestoneId, sliceId, false);
}
/**
* SF ADR-011: generalized sketch-flag setter — flip true or false.
* Idempotent. Use this instead of clearSliceSketch when you also need to
* mark a slice as a sketch (e.g., a re-plan flow that wants to revert to
* sketch-then-refine).
*/
export function setSliceSketchFlag(milestoneId, sliceId, isSketch) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
`UPDATE slices SET is_sketch = :is_sketch WHERE milestone_id = :mid AND id = :sid`,
)
.run({
":is_sketch": isSketch ? 1 : 0,
":mid": milestoneId,
":sid": sliceId,
});
}
/**
* SF ADR-011 auto-heal: reconcile stale is_sketch=1 rows whose PLAN file already
* exists on disk. The caller passes a predicate that uses the canonical path
* resolver so path logic stays in one place. Safe to call repeatedly — only
* flips rows that meet the predicate.
*/
export function autoHealSketchFlags(milestoneId, hasPlanFile) {
if (!currentDb) return;
const rows = currentDb
.prepare(
`SELECT id FROM slices WHERE milestone_id = :mid AND is_sketch = 1`,
)
.all({ ":mid": milestoneId });
for (const row of rows) {
if (hasPlanFile(row.id)) {
setSliceSketchFlag(milestoneId, row.id, false);
}
}
}
/**
* SF ADR-011 P2: list tasks across a milestone that have an
* escalation artifact path. By default returns only ACTIVE escalations
* (pending OR awaiting_review); pass includeResolved=true to also return
* resolved-but-still-recorded entries (audit trail).
*
* Used by `/escalate list` to enumerate cross-slice escalations.
*/
export function listEscalationArtifacts(milestoneId, includeResolved = false) {
if (!currentDb) return [];
const filter = includeResolved
? "escalation_artifact_path IS NOT NULL"
: "(escalation_pending = 1 OR escalation_awaiting_review = 1) AND escalation_artifact_path IS NOT NULL";
const rows = currentDb
.prepare(
`SELECT * FROM tasks WHERE milestone_id = :mid AND ${filter} ORDER BY slice_id, sequence, id`,
)
.all({ ":mid": milestoneId });
return rows.map(rowToTask);
}
export function upsertSlicePlanning(milestoneId, sliceId, planning) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
insertSliceSpecIfAbsent(milestoneId, sliceId, planning);
currentDb
.prepare(`UPDATE slices SET
goal = COALESCE(:goal, goal),
success_criteria = COALESCE(:success_criteria, success_criteria),
proof_level = COALESCE(:proof_level, proof_level),
integration_closure = COALESCE(:integration_closure, integration_closure),
observability_impact = COALESCE(:observability_impact, observability_impact),
adversarial_partner = COALESCE(:adversarial_partner, adversarial_partner),
adversarial_combatant = COALESCE(:adversarial_combatant, adversarial_combatant),
adversarial_architect = COALESCE(:adversarial_architect, adversarial_architect),
planning_meeting_json = COALESCE(:planning_meeting_json, planning_meeting_json)
WHERE milestone_id = :milestone_id AND id = :id`)
.run({
":milestone_id": milestoneId,
":id": sliceId,
":goal": planning.goal ?? null,
":success_criteria": planning.successCriteria ?? null,
":proof_level": planning.proofLevel ?? null,
":integration_closure": planning.integrationClosure ?? null,
":observability_impact": planning.observabilityImpact ?? null,
":adversarial_partner": planning.adversarialReview?.partner ?? null,
":adversarial_combatant": planning.adversarialReview?.combatant ?? null,
":adversarial_architect": planning.adversarialReview?.architect ?? null,
":planning_meeting_json": planning.planningMeeting
? JSON.stringify(planning.planningMeeting)
: null,
});
}
export function insertTask(t) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO tasks (
milestone_id, slice_id, id, title, status, one_liner, narrative,
verification_result, verification_status, duration, completed_at, blocker_discovered,
deviations, known_issues, key_files, key_decisions, full_summary_md,
description, estimate, files, verify, inputs, expected_output, observability_impact, sequence
) VALUES (
:milestone_id, :slice_id, :id, :title, :status, :one_liner, :narrative,
:verification_result, :verification_status, :duration, :completed_at, :blocker_discovered,
:deviations, :known_issues, :key_files, :key_decisions, :full_summary_md,
:description, :estimate, :files, :verify, :inputs, :expected_output, :observability_impact, :sequence
)
ON CONFLICT(milestone_id, slice_id, id) DO UPDATE SET
title = CASE WHEN NULLIF(:title, '') IS NOT NULL THEN :title ELSE tasks.title END,
status = :status,
one_liner = :one_liner,
narrative = :narrative,
verification_result = :verification_result,
verification_status = :verification_status,
duration = :duration,
completed_at = :completed_at,
blocker_discovered = :blocker_discovered,
deviations = :deviations,
known_issues = :known_issues,
key_files = :key_files,
key_decisions = :key_decisions,
full_summary_md = :full_summary_md,
description = CASE WHEN NULLIF(:description, '') IS NOT NULL THEN :description ELSE tasks.description END,
estimate = CASE WHEN NULLIF(:estimate, '') IS NOT NULL THEN :estimate ELSE tasks.estimate END,
files = CASE WHEN NULLIF(:files, '[]') IS NOT NULL THEN :files ELSE tasks.files END,
verify = CASE WHEN NULLIF(:verify, '') IS NOT NULL THEN :verify ELSE tasks.verify END,
inputs = CASE WHEN NULLIF(:inputs, '[]') IS NOT NULL THEN :inputs ELSE tasks.inputs END,
expected_output = CASE WHEN NULLIF(:expected_output, '[]') IS NOT NULL THEN :expected_output ELSE tasks.expected_output END,
observability_impact = CASE WHEN NULLIF(:observability_impact, '') IS NOT NULL THEN :observability_impact ELSE tasks.observability_impact END,
sequence = :sequence`)
.run({
":milestone_id": t.milestoneId,
":slice_id": t.sliceId,
":id": t.id,
":title": t.title ?? "",
":status": t.status ?? "pending",
":one_liner": t.oneLiner ?? "",
":narrative": t.narrative ?? "",
":verification_result": t.verificationResult ?? "",
":verification_status": t.verificationStatus ?? "",
":duration": t.duration ?? "",
":completed_at":
t.status === "done" || t.status === "complete"
? new Date().toISOString()
: null,
":blocker_discovered": t.blockerDiscovered ? 1 : 0,
":deviations": t.deviations ?? "",
":known_issues": t.knownIssues ?? "",
":key_files": JSON.stringify(t.keyFiles ?? []),
":key_decisions": JSON.stringify(t.keyDecisions ?? []),
":full_summary_md": t.fullSummaryMd ?? "",
":description": t.planning?.description ?? "",
":estimate": t.planning?.estimate ?? "",
":files": JSON.stringify(t.planning?.files ?? []),
":verify": t.planning?.verify ?? "",
":inputs": JSON.stringify(t.planning?.inputs ?? []),
":expected_output": JSON.stringify(t.planning?.expectedOutput ?? []),
":observability_impact": t.planning?.observabilityImpact ?? "",
":sequence": t.sequence ?? 0,
});
insertTaskSpecIfAbsent(t.milestoneId, t.sliceId, t.id, t.planning ?? {});
}
function insertTaskSpecIfAbsent(milestoneId, sliceId, taskId, planning = {}) {
currentDb
.prepare(`INSERT OR IGNORE INTO task_specs (
milestone_id, slice_id, task_id, verify, inputs, expected_output,
spec_version, created_at
) VALUES (
:milestone_id, :slice_id, :task_id, :verify, :inputs, :expected_output,
1, :created_at
)`)
.run({
":milestone_id": milestoneId,
":slice_id": sliceId,
":task_id": taskId,
":verify": planning.verify ?? "",
":inputs": JSON.stringify(planning.inputs ?? []),
":expected_output": JSON.stringify(planning.expectedOutput ?? []),
":created_at": new Date().toISOString(),
});
}
export function updateTaskStatus(
milestoneId,
sliceId,
taskId,
status,
completedAt,
) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE tasks SET status = :status, completed_at = :completed_at
WHERE milestone_id = :milestone_id AND slice_id = :slice_id AND id = :id`)
.run({
":status": status,
":completed_at": completedAt ?? null,
":milestone_id": milestoneId,
":slice_id": sliceId,
":id": taskId,
});
}
/** SF ADR-011 P2: set pause-on-escalation state on a task. The two flags are
* mutually exclusive — pending=1 forces awaiting_review=0. */
export function setTaskEscalationPending(
milestoneId,
sliceId,
taskId,
artifactPath,
) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE tasks
SET escalation_pending = 1,
escalation_awaiting_review = 0,
escalation_artifact_path = :path
WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`)
.run({
":path": artifactPath,
":mid": milestoneId,
":sid": sliceId,
":tid": taskId,
});
}
/** SF ADR-011 P2: continueWithDefault=true marker — artifact exists but no pause.
* Mutually exclusive with escalation_pending. */
export function setTaskEscalationAwaitingReview(
milestoneId,
sliceId,
taskId,
artifactPath,
) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE tasks
SET escalation_awaiting_review = 1,
escalation_pending = 0,
escalation_artifact_path = :path
WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`)
.run({
":path": artifactPath,
":mid": milestoneId,
":sid": sliceId,
":tid": taskId,
});
}
/** SF ADR-011 P2: clear both escalation flags (called when an escalation is
* resolved or its artifact is removed). Leaves escalation_artifact_path so
* the resolution audit trail survives. */
export function clearTaskEscalationFlags(milestoneId, sliceId, taskId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE tasks
SET escalation_pending = 0,
escalation_awaiting_review = 0
WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`)
.run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId });
}
/** SF ADR-011 P2 carry-forward: find a task in this slice that has a resolved
* escalation override that has NOT yet been injected into a downstream
* prompt. Returns the first match by sequence (lowest first), or null when
* no carry-forward is pending.
*
* The match criterion: escalation_artifact_path IS NOT NULL AND
* escalation_pending=0 AND escalation_awaiting_review=0 AND
* escalation_override_applied=0. The artifact's respondedAt is checked by
* the caller (claimOverrideForInjection in escalation.ts) — keeping artifact
* schema knowledge out of the DB layer. */
export function findUnappliedEscalationOverride(milestoneId, sliceId) {
if (!currentDb) return null;
const row = currentDb
.prepare(`SELECT id, escalation_artifact_path
FROM tasks
WHERE milestone_id = :mid
AND slice_id = :sid
AND escalation_artifact_path IS NOT NULL
AND escalation_pending = 0
AND escalation_awaiting_review = 0
AND escalation_override_applied = 0
ORDER BY sequence ASC, id ASC
LIMIT 1`)
.get({ ":mid": milestoneId, ":sid": sliceId });
if (!row || !row.escalation_artifact_path) return null;
return { taskId: row.id, artifactPath: row.escalation_artifact_path };
}
/** SF ADR-011 P2 carry-forward: atomically claim the override for injection.
* Returns true when this caller successfully flipped 0→1 (race winner) or
* false when another caller claimed it first (race loser). Use this to
* guarantee the override is injected exactly once. */
export function claimEscalationOverride(milestoneId, sliceId, taskId) {
if (!currentDb) return false;
const result = currentDb
.prepare(`UPDATE tasks
SET escalation_override_applied = 1
WHERE milestone_id = :mid
AND slice_id = :sid
AND id = :tid
AND escalation_override_applied = 0`)
.run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId });
return (result?.changes ?? 0) > 0;
}
export function setTaskBlockerDiscovered(
milestoneId,
sliceId,
taskId,
discovered,
) {
if (!currentDb) return;
currentDb
.prepare(
`UPDATE tasks SET blocker_discovered = :discovered WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`,
)
.run({
":discovered": discovered ? 1 : 0,
":mid": milestoneId,
":sid": sliceId,
":tid": taskId,
});
}
export function upsertTaskPlanning(milestoneId, sliceId, taskId, planning) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
insertTaskSpecIfAbsent(milestoneId, sliceId, taskId, planning);
currentDb
.prepare(`UPDATE tasks SET
title = COALESCE(:title, title),
description = COALESCE(:description, description),
estimate = COALESCE(:estimate, estimate),
files = COALESCE(:files, files),
verify = COALESCE(:verify, verify),
inputs = COALESCE(:inputs, inputs),
expected_output = COALESCE(:expected_output, expected_output),
observability_impact = COALESCE(:observability_impact, observability_impact),
full_plan_md = COALESCE(:full_plan_md, full_plan_md)
WHERE milestone_id = :milestone_id AND slice_id = :slice_id AND id = :id`)
.run({
":milestone_id": milestoneId,
":slice_id": sliceId,
":id": taskId,
":title": planning.title ?? null,
":description": planning.description ?? null,
":estimate": planning.estimate ?? null,
":files": planning.files ? JSON.stringify(planning.files) : null,
":verify": planning.verify ?? null,
":inputs": planning.inputs ? JSON.stringify(planning.inputs) : null,
":expected_output": planning.expectedOutput
? JSON.stringify(planning.expectedOutput)
: null,
":observability_impact": planning.observabilityImpact ?? null,
":full_plan_md": planning.fullPlanMd ?? null,
});
}
function parsePlanningMeeting(raw) {
if (typeof raw !== "string" || raw.trim() === "") return null;
try {
return JSON.parse(raw);
} catch {
return null;
}
}
function rowToSlice(row) {
return {
milestone_id: row["milestone_id"],
id: row["id"],
title: row["title"],
status: row["status"],
risk: row["risk"],
depends: safeParseJsonArray(row["depends"]),
demo: row["demo"] ?? "",
created_at: row["created_at"],
completed_at: row["completed_at"] ?? null,
full_summary_md: row["full_summary_md"] ?? "",
full_uat_md: row["full_uat_md"] ?? "",
goal: row["goal"] ?? "",
success_criteria: row["success_criteria"] ?? "",
proof_level: row["proof_level"] ?? "",
integration_closure: row["integration_closure"] ?? "",
observability_impact: row["observability_impact"] ?? "",
adversarial_partner: row["adversarial_partner"] ?? "",
adversarial_combatant: row["adversarial_combatant"] ?? "",
adversarial_architect: row["adversarial_architect"] ?? "",
planning_meeting: parsePlanningMeeting(row["planning_meeting_json"]),
sequence: row["sequence"] ?? 0,
replan_triggered_at: row["replan_triggered_at"] ?? null,
sketch_scope: row["sketch_scope"] ?? "",
is_sketch: row["is_sketch"] ?? 0,
};
}
export function getSlice(milestoneId, sliceId) {
if (!currentDb) return null;
const row = currentDb
.prepare("SELECT * FROM slices WHERE milestone_id = :mid AND id = :sid")
.get({ ":mid": milestoneId, ":sid": sliceId });
if (!row) return null;
return rowToSlice(row);
}
export function updateSliceStatus(milestoneId, sliceId, status, completedAt) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE slices SET status = :status, completed_at = :completed_at
WHERE milestone_id = :milestone_id AND id = :id`)
.run({
":status": status,
":completed_at": completedAt ?? null,
":milestone_id": milestoneId,
":id": sliceId,
});
}
export function setTaskSummaryMd(milestoneId, sliceId, taskId, md) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
`UPDATE tasks SET full_summary_md = :md WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`,
)
.run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId, ":md": md });
}
export function setSliceSummaryMd(milestoneId, sliceId, summaryMd, uatMd) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
`UPDATE slices SET full_summary_md = :summary_md, full_uat_md = :uat_md WHERE milestone_id = :mid AND id = :sid`,
)
.run({
":mid": milestoneId,
":sid": sliceId,
":summary_md": summaryMd,
":uat_md": uatMd,
});
}
function safeParseJsonArray(raw, fallback = []) {
if (typeof raw !== "string" || raw.trim() === "") return fallback;
try {
const parsed = JSON.parse(raw);
return Array.isArray(parsed) ? parsed : fallback;
} catch {
return fallback;
}
}
function parseTaskArrayColumn(raw) {
if (typeof raw !== "string" || raw.trim() === "") return [];
try {
const parsed = JSON.parse(raw);
if (Array.isArray(parsed)) return parsed.map((value) => String(value));
if (parsed === null || parsed === undefined || parsed === "") return [];
return [String(parsed)];
} catch {
// Older/corrupt rows may contain comma-separated strings instead of JSON.
return raw
.split(",")
.map((value) => value.trim())
.filter(Boolean);
}
}
function rowToTask(row) {
const parseTaskArray = (value) => {
if (Array.isArray(value)) {
return value.filter((entry) => typeof entry === "string");
}
if (typeof value !== "string") return [];
const trimmed = value.trim();
if (!trimmed) return [];
try {
const parsed = JSON.parse(trimmed);
if (Array.isArray(parsed)) {
return parsed.filter((entry) => typeof entry === "string");
}
if (typeof parsed === "string" && parsed.trim()) {
return [parsed.trim()];
}
} catch {
// Older/corrupt DB rows may contain raw comma-separated paths instead of JSON arrays.
}
return trimmed
.split(",")
.map((entry) => entry.trim())
.filter(Boolean);
};
return {
milestone_id: row["milestone_id"],
slice_id: row["slice_id"],
id: row["id"],
title: row["title"],
status: row["status"],
one_liner: row["one_liner"],
narrative: row["narrative"],
verification_result: row["verification_result"],
duration: row["duration"],
completed_at: row["completed_at"] ?? null,
blocker_discovered: row["blocker_discovered"] === 1,
deviations: row["deviations"],
known_issues: row["known_issues"],
key_files: parseTaskArrayColumn(row["key_files"]),
key_decisions: parseTaskArrayColumn(row["key_decisions"]),
full_summary_md: row["full_summary_md"],
description: row["description"] ?? "",
estimate: row["estimate"] ?? "",
files: parseTaskArray(row["files"]),
verify: row["verify"] ?? "",
inputs: parseTaskArray(row["inputs"]),
expected_output: parseTaskArray(row["expected_output"]),
observability_impact: row["observability_impact"] ?? "",
full_plan_md: row["full_plan_md"] ?? "",
sequence: row["sequence"] ?? 0,
verification_status: row["verification_status"] ?? "",
escalation_pending: row["escalation_pending"] ?? 0,
escalation_awaiting_review: row["escalation_awaiting_review"] ?? 0,
escalation_override_applied: row["escalation_override_applied"] ?? 0,
escalation_artifact_path: row["escalation_artifact_path"] ?? null,
};
}
export function getTask(milestoneId, sliceId, taskId) {
if (!currentDb) return null;
const row = currentDb
.prepare(
"SELECT * FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid",
)
.get({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId });
if (!row) return null;
return rowToTask(row);
}
export function getSliceTasks(milestoneId, sliceId) {
if (!currentDb) return [];
const rows = currentDb
.prepare(
"SELECT * FROM tasks WHERE milestone_id = :mid AND slice_id = :sid ORDER BY sequence, id",
)
.all({ ":mid": milestoneId, ":sid": sliceId });
return rows.map(rowToTask);
}
export function insertVerificationEvidence(e) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR IGNORE INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at)
VALUES (:task_id, :slice_id, :milestone_id, :command, :exit_code, :verdict, :duration_ms, :created_at)`)
.run({
":task_id": e.taskId,
":slice_id": e.sliceId,
":milestone_id": e.milestoneId,
":command": e.command,
":exit_code": e.exitCode,
":verdict": e.verdict,
":duration_ms": e.durationMs,
":created_at": new Date().toISOString(),
});
}
export function getVerificationEvidence(milestoneId, sliceId, taskId) {
if (!currentDb) return [];
const rows = currentDb
.prepare(
"SELECT * FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid ORDER BY id",
)
.all({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId });
return rows;
}
function rowToSelfFeedback(row) {
try {
const parsed = JSON.parse(row["full_json"]);
return {
...parsed,
resolvedAt: row["resolved_at"] ?? parsed.resolvedAt,
resolvedReason: row["resolved_reason"] ?? parsed.resolvedReason,
resolvedBySfVersion:
row["resolved_by_sf_version"] ?? parsed.resolvedBySfVersion,
resolvedEvidence: row["resolved_evidence_json"]
? JSON.parse(row["resolved_evidence_json"])
: parsed.resolvedEvidence,
resolvedCriteriaMet: row["resolved_criteria_json"]
? JSON.parse(row["resolved_criteria_json"])
: parsed.resolvedCriteriaMet,
};
} catch {
return {
id: row["id"],
ts: row["ts"],
kind: row["kind"],
severity: row["severity"],
blocking: row["blocking"] === 1,
repoIdentity: row["repo_identity"],
sfVersion: row["sf_version"],
basePath: row["base_path"],
occurredIn: {
unitType: row["unit_type"] ?? undefined,
milestone: row["milestone_id"] ?? undefined,
slice: row["slice_id"] ?? undefined,
task: row["task_id"] ?? undefined,
},
summary: row["summary"],
evidence: row["evidence"],
suggestedFix: row["suggested_fix"],
resolvedAt: row["resolved_at"] ?? undefined,
resolvedReason: row["resolved_reason"] ?? undefined,
resolvedBySfVersion: row["resolved_by_sf_version"] ?? undefined,
resolvedEvidence: row["resolved_evidence_json"]
? JSON.parse(row["resolved_evidence_json"])
: undefined,
resolvedCriteriaMet: row["resolved_criteria_json"]
? JSON.parse(row["resolved_criteria_json"])
: undefined,
};
}
}
export function insertSelfFeedbackEntry(entry) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const occurred = entry.occurredIn ?? {};
currentDb
.prepare(`INSERT INTO self_feedback (
id, ts, kind, severity, blocking, repo_identity, sf_version, base_path,
unit_type, milestone_id, slice_id, task_id, summary, evidence, suggested_fix, full_json,
resolved_at, resolved_reason, resolved_by_sf_version, resolved_evidence_json, resolved_criteria_json
) VALUES (
:id, :ts, :kind, :severity, :blocking, :repo_identity, :sf_version, :base_path,
:unit_type, :milestone_id, :slice_id, :task_id, :summary, :evidence, :suggested_fix, :full_json,
:resolved_at, :resolved_reason, :resolved_by_sf_version, :resolved_evidence_json, :resolved_criteria_json
)
ON CONFLICT(id) DO NOTHING`)
.run({
":id": entry.id,
":ts": entry.ts,
":kind": entry.kind,
":severity": entry.severity,
":blocking": entry.blocking ? 1 : 0,
":repo_identity": entry.repoIdentity ?? "",
":sf_version": entry.sfVersion ?? "",
":base_path": entry.basePath ?? "",
":unit_type": occurred.unitType ?? null,
":milestone_id": occurred.milestone ?? null,
":slice_id": occurred.slice ?? null,
":task_id": occurred.task ?? null,
":summary": entry.summary ?? "",
":evidence": entry.evidence ?? "",
":suggested_fix": entry.suggestedFix ?? "",
":full_json": JSON.stringify(entry),
":resolved_at": entry.resolvedAt ?? null,
":resolved_reason": entry.resolvedReason ?? null,
":resolved_by_sf_version": entry.resolvedBySfVersion ?? null,
":resolved_evidence_json": entry.resolvedEvidence
? JSON.stringify(entry.resolvedEvidence)
: null,
":resolved_criteria_json": entry.resolvedCriteriaMet
? JSON.stringify(entry.resolvedCriteriaMet)
: null,
});
}
export function listSelfFeedbackEntries() {
if (!currentDb) return [];
const rows = currentDb
.prepare("SELECT * FROM self_feedback ORDER BY ts ASC, id ASC")
.all();
return rows.map(rowToSelfFeedback);
}
export function resolveSelfFeedbackEntry(entryId, resolution) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const existing = currentDb
.prepare("SELECT * FROM self_feedback WHERE id = :id")
.get({ ":id": entryId });
if (!existing || existing["resolved_at"]) return false;
const resolvedAt = resolution.resolvedAt ?? new Date().toISOString();
const entry = {
...rowToSelfFeedback(existing),
resolvedAt,
resolvedReason: resolution.reason,
resolvedBySfVersion: resolution.resolvedBySfVersion ?? "",
resolvedEvidence: resolution.evidence,
};
if (resolution.criteriaMet)
entry.resolvedCriteriaMet = resolution.criteriaMet;
const result = currentDb
.prepare(`UPDATE self_feedback SET
full_json = :full_json,
resolved_at = :resolved_at,
resolved_reason = :resolved_reason,
resolved_by_sf_version = :resolved_by_sf_version,
resolved_evidence_json = :resolved_evidence_json,
resolved_criteria_json = :resolved_criteria_json
WHERE id = :id AND resolved_at IS NULL`)
.run({
":id": entryId,
":full_json": JSON.stringify(entry),
":resolved_at": resolvedAt,
":resolved_reason": resolution.reason ?? "",
":resolved_by_sf_version": resolution.resolvedBySfVersion ?? "",
":resolved_evidence_json": resolution.evidence
? JSON.stringify(resolution.evidence)
: null,
":resolved_criteria_json": resolution.criteriaMet
? JSON.stringify(resolution.criteriaMet)
: null,
});
return result.changes > 0;
}
function parseVisionMeeting(raw) {
if (typeof raw !== "string" || raw.trim().length === 0) return null;
try {
return JSON.parse(raw);
} catch {
return null;
}
}
function parseProductResearch(raw) {
if (typeof raw !== "string" || raw.trim().length === 0) return null;
try {
return JSON.parse(raw);
} catch {
return null;
}
}
function rowToMilestone(row) {
return {
id: row["id"],
title: row["title"],
status: row["status"],
depends_on: safeParseJsonArray(row["depends_on"]),
created_at: row["created_at"],
completed_at: row["completed_at"] ?? null,
vision: row["vision"] ?? "",
success_criteria: safeParseJsonArray(row["success_criteria"]),
key_risks: safeParseJsonArray(row["key_risks"]),
proof_strategy: safeParseJsonArray(row["proof_strategy"]),
verification_contract: row["verification_contract"] ?? "",
verification_integration: row["verification_integration"] ?? "",
verification_operational: row["verification_operational"] ?? "",
verification_uat: row["verification_uat"] ?? "",
definition_of_done: safeParseJsonArray(row["definition_of_done"]),
requirement_coverage: row["requirement_coverage"] ?? "",
boundary_map_markdown: row["boundary_map_markdown"] ?? "",
vision_meeting: parseVisionMeeting(row["vision_meeting_json"]),
product_research: parseProductResearch(row["product_research_json"]),
sequence: row["sequence"] ?? 0,
};
}
function rowToArtifact(row) {
return {
path: row["path"],
artifact_type: row["artifact_type"],
milestone_id: row["milestone_id"] ?? null,
slice_id: row["slice_id"] ?? null,
task_id: row["task_id"] ?? null,
full_content: row["full_content"],
imported_at: row["imported_at"],
};
}
export function getAllMilestones() {
if (!currentDb) return [];
const rows = currentDb
.prepare(
"SELECT * FROM milestones ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id",
)
.all();
return rows.map(rowToMilestone);
}
export function getMilestone(id) {
if (!currentDb) return null;
const row = currentDb
.prepare("SELECT * FROM milestones WHERE id = :id")
.get({ ":id": id });
if (!row) return null;
return rowToMilestone(row);
}
function rowToBacklogItem(row) {
return {
id: row["id"],
title: row["title"],
status: row["status"],
note: row["note"] ?? "",
source: row["source"] ?? "",
triageRunId: row["triage_run_id"] ?? null,
sequence: row["sequence"] ?? 0,
createdAt: row["created_at"],
updatedAt: row["updated_at"],
promotedAt: row["promoted_at"] ?? null,
};
}
export function listBacklogItems() {
if (!currentDb) return [];
return currentDb
.prepare(
"SELECT * FROM backlog_items ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id",
)
.all()
.map(rowToBacklogItem);
}
export function nextBacklogItemId() {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const row = currentDb
.prepare(
"SELECT id FROM backlog_items WHERE id LIKE '999.%' ORDER BY CAST(substr(id, 5) AS INTEGER) DESC LIMIT 1",
)
.get();
const next = row?.id ? Number.parseInt(String(row.id).slice(4), 10) + 1 : 1;
return `999.${Number.isFinite(next) ? next : 1}`;
}
export function addBacklogItem({
id,
title,
note = "",
source = "manual",
triageRunId = null,
status = "pending",
}) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const itemId = id ?? nextBacklogItemId();
const now = new Date().toISOString();
const sequenceRow = currentDb
.prepare(
"SELECT COALESCE(MAX(sequence), 0) + 1 AS sequence FROM backlog_items",
)
.get();
currentDb
.prepare(`INSERT INTO backlog_items (
id, title, status, note, source, triage_run_id, sequence, created_at, updated_at, promoted_at
) VALUES (
:id, :title, :status, :note, :source, :triage_run_id, :sequence, :created_at, :updated_at, :promoted_at
)
ON CONFLICT(id) DO UPDATE SET
title = excluded.title,
status = excluded.status,
note = excluded.note,
source = excluded.source,
triage_run_id = excluded.triage_run_id,
updated_at = excluded.updated_at,
promoted_at = excluded.promoted_at`)
.run({
":id": itemId,
":title": title,
":status": status,
":note": note,
":source": source,
":triage_run_id": triageRunId,
":sequence": sequenceRow?.sequence ?? 1,
":created_at": now,
":updated_at": now,
":promoted_at": status === "promoted" ? now : null,
});
return itemId;
}
export function updateBacklogItemStatus(id, status, note = "") {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const now = new Date().toISOString();
const result = currentDb
.prepare(`UPDATE backlog_items
SET status = :status,
note = :note,
updated_at = :updated_at,
promoted_at = CASE WHEN :status = 'promoted' THEN :updated_at ELSE promoted_at END
WHERE id = :id`)
.run({
":id": id,
":status": status,
":note": note,
":updated_at": now,
});
return (result?.changes ?? 0) > 0;
}
export function removeBacklogItem(id) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const result = currentDb
.prepare("DELETE FROM backlog_items WHERE id = :id")
.run({ ":id": id });
return (result?.changes ?? 0) > 0;
}
/**
* Update a milestone's status in the database.
* Used by park/unpark to keep the DB in sync with the filesystem marker.
* See: https://github.com/singularity-forge/sf-run/issues/2694
*/
export function updateMilestoneStatus(milestoneId, status, completedAt) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
`UPDATE milestones SET status = :status, completed_at = :completed_at WHERE id = :id`,
)
.run({
":status": status,
":completed_at": completedAt ?? null,
":id": milestoneId,
});
}
/**
* Persist explicit milestone execution order in the structured runtime DB.
*
* Purpose: make roadmap priority/order queryable and schema-owned instead of
* relying on `.sf/QUEUE-ORDER.json` as a peer source of truth.
*
* Consumer: queue-order.js when `/queue` or rethink reorders milestones.
*/
export function updateMilestoneQueueOrder(order) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
transaction(() => {
const stmt = currentDb.prepare(
"UPDATE milestones SET sequence = :sequence WHERE id = :id",
);
for (let i = 0; i < order.length; i++) {
stmt.run({ ":sequence": i + 1, ":id": order[i] });
}
});
}
export function getActiveMilestoneFromDb() {
if (!currentDb) return null;
const row = currentDb
.prepare(
"SELECT * FROM milestones WHERE status NOT IN ('complete', 'parked') ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id LIMIT 1",
)
.get();
if (!row) return null;
return rowToMilestone(row);
}
export function getActiveSliceFromDb(milestoneId) {
if (!currentDb) return null;
// Single query: find the first non-complete slice whose dependencies are all satisfied.
// Uses json_each() to expand the JSON depends array and checks each dep is complete.
const row = currentDb
.prepare(`SELECT s.* FROM slices s
WHERE s.milestone_id = :mid
AND s.status NOT IN ('complete', 'done', 'skipped')
AND NOT EXISTS (
SELECT 1 FROM json_each(s.depends) AS dep
WHERE dep.value NOT IN (
SELECT id FROM slices WHERE milestone_id = :mid AND status IN ('complete', 'done', 'skipped')
)
)
ORDER BY s.sequence, s.id
LIMIT 1`)
.get({ ":mid": milestoneId });
if (!row) return null;
return rowToSlice(row);
}
export function getActiveTaskFromDb(milestoneId, sliceId) {
if (!currentDb) return null;
const row = currentDb
.prepare(
"SELECT * FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND status NOT IN ('complete', 'done') ORDER BY sequence, id LIMIT 1",
)
.get({ ":mid": milestoneId, ":sid": sliceId });
if (!row) return null;
return rowToTask(row);
}
export function getMilestoneSlices(milestoneId) {
if (!currentDb) return [];
const rows = currentDb
.prepare(
"SELECT * FROM slices WHERE milestone_id = :mid ORDER BY sequence, id",
)
.all({ ":mid": milestoneId });
return rows.map(rowToSlice);
}
export function getArtifact(path) {
if (!currentDb) return null;
const row = currentDb
.prepare("SELECT * FROM artifacts WHERE path = :path")
.get({ ":path": path });
if (!row) return null;
return rowToArtifact(row);
}
// ─── Lightweight Query Variants (hot-path optimized) ─────────────────────
/** Fast milestone status check — avoids deserializing JSON planning fields. */
export function getActiveMilestoneIdFromDb() {
if (!currentDb) return null;
const row = currentDb
.prepare(
"SELECT id, status FROM milestones WHERE status NOT IN ('complete', 'parked') ORDER BY id LIMIT 1",
)
.get();
if (!row) return null;
return { id: row["id"], status: row["status"] };
}
/** Fast slice status check — avoids deserializing JSON depends/planning fields. */
export function getSliceStatusSummary(milestoneId) {
if (!currentDb) return [];
return currentDb
.prepare(
"SELECT id, status FROM slices WHERE milestone_id = :mid ORDER BY sequence, id",
)
.all({ ":mid": milestoneId })
.map((r) => ({ id: r["id"], status: r["status"] }));
}
/** Fast task status check — avoids deserializing JSON arrays and large text fields. */
export function getActiveTaskIdFromDb(milestoneId, sliceId) {
if (!currentDb) return null;
const row = currentDb
.prepare(
"SELECT id, status, title FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND status NOT IN ('complete', 'done') ORDER BY sequence, id LIMIT 1",
)
.get({ ":mid": milestoneId, ":sid": sliceId });
if (!row) return null;
return {
id: row["id"],
status: row["status"],
title: row["title"],
};
}
/** Count tasks by status for a slice — useful for progress reporting without full row load. */
export function getSliceTaskCounts(milestoneId, sliceId) {
if (!currentDb) return { total: 0, done: 0, pending: 0 };
const row = currentDb
.prepare(`SELECT
COUNT(*) as total,
SUM(CASE WHEN status IN ('complete', 'done') THEN 1 ELSE 0 END) as done,
SUM(CASE WHEN status NOT IN ('complete', 'done') THEN 1 ELSE 0 END) as pending
FROM tasks WHERE milestone_id = :mid AND slice_id = :sid`)
.get({ ":mid": milestoneId, ":sid": sliceId });
if (!row) return { total: 0, done: 0, pending: 0 };
return {
total: row["total"] ?? 0,
done: row["done"] ?? 0,
pending: row["pending"] ?? 0,
};
}
// ─── Slice Dependencies (junction table) ─────────────────────────────────
/** Sync the slice_dependencies junction table from a slice's JSON depends array. */
export function syncSliceDependencies(milestoneId, sliceId, depends) {
if (!currentDb) return;
currentDb
.prepare(
"DELETE FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid",
)
.run({ ":mid": milestoneId, ":sid": sliceId });
for (const dep of depends) {
currentDb
.prepare(
"INSERT OR IGNORE INTO slice_dependencies (milestone_id, slice_id, depends_on_slice_id) VALUES (:mid, :sid, :dep)",
)
.run({ ":mid": milestoneId, ":sid": sliceId, ":dep": dep });
}
}
/** Get all slices that depend on a given slice. */
export function getDependentSlices(milestoneId, sliceId) {
if (!currentDb) return [];
return currentDb
.prepare(
"SELECT slice_id FROM slice_dependencies WHERE milestone_id = :mid AND depends_on_slice_id = :sid",
)
.all({ ":mid": milestoneId, ":sid": sliceId })
.map((r) => r["slice_id"]);
}
// ─── Worktree DB Helpers ──────────────────────────────────────────────────
export function copyWorktreeDb(srcDbPath, destDbPath) {
try {
if (!existsSync(srcDbPath)) return false;
const destDir = dirname(destDbPath);
mkdirSync(destDir, { recursive: true });
copyFileSync(srcDbPath, destDbPath);
return true;
} catch (err) {
logError("db", "failed to copy DB to worktree", {
error: err.message,
});
return false;
}
}
export function reconcileWorktreeDb(mainDbPath, worktreeDbPath) {
const zero = {
decisions: 0,
requirements: 0,
artifacts: 0,
milestones: 0,
slices: 0,
tasks: 0,
memories: 0,
verification_evidence: 0,
conflicts: [],
};
if (!existsSync(worktreeDbPath)) return zero;
// Guard: bail when both paths resolve to the same physical file.
// ATTACHing a WAL-mode DB to itself corrupts the WAL (#2823).
try {
if (realpathSync(mainDbPath) === realpathSync(worktreeDbPath)) return zero;
} catch (e) {
logWarning("db", `realpathSync failed: ${e.message}`);
}
// Sanitize path: reject any characters that could break ATTACH syntax.
// ATTACH DATABASE doesn't support parameterized paths in all providers,
// so we use strict allowlist validation instead.
if (/['";\x00]/.test(worktreeDbPath)) {
logError(
"db",
"worktree DB reconciliation failed: path contains unsafe characters",
);
return zero;
}
if (!currentDb) {
const opened = openDatabase(mainDbPath);
if (!opened) {
logError("db", "worktree DB reconciliation failed: cannot open main DB");
return zero;
}
}
const adapter = currentDb;
const conflicts = [];
try {
adapter.exec(`ATTACH DATABASE '${worktreeDbPath}' AS wt`);
try {
const wtInfo = adapter.prepare("PRAGMA wt.table_info('decisions')").all();
const hasMadeBy = wtInfo.some((col) => col["name"] === "made_by");
const wtMilestoneInfo = adapter
.prepare("PRAGMA wt.table_info('milestones')")
.all();
const hasProductResearch = wtMilestoneInfo.some(
(col) => col["name"] === "product_research_json",
);
const decConf = adapter
.prepare(
`SELECT m.id FROM decisions m INNER JOIN wt.decisions w ON m.id = w.id WHERE m.decision != w.decision OR m.choice != w.choice OR m.rationale != w.rationale OR ${hasMadeBy ? "m.made_by != w.made_by" : "'agent' != 'agent'"} OR m.superseded_by IS NOT w.superseded_by`,
)
.all();
for (const row of decConf)
conflicts.push(`decision ${row["id"]}: modified in both`);
const reqConf = adapter
.prepare(
`SELECT m.id FROM requirements m INNER JOIN wt.requirements w ON m.id = w.id WHERE m.description != w.description OR m.status != w.status OR m.notes != w.notes OR m.superseded_by IS NOT w.superseded_by`,
)
.all();
for (const row of reqConf)
conflicts.push(`requirement ${row["id"]}: modified in both`);
const merged = {
decisions: 0,
requirements: 0,
artifacts: 0,
milestones: 0,
slices: 0,
tasks: 0,
memories: 0,
verification_evidence: 0,
};
function countChanges(result) {
return typeof result === "object" && result !== null
? (result.changes ?? 0)
: 0;
}
adapter.exec("BEGIN");
try {
merged.decisions = countChanges(
adapter
.prepare(`
INSERT OR REPLACE INTO decisions (
id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by
)
SELECT id, when_context, scope, decision, choice, rationale, revisable, ${hasMadeBy ? "made_by" : "'agent'"}, superseded_by FROM wt.decisions
`)
.run(),
);
merged.requirements = countChanges(
adapter
.prepare(`
INSERT OR REPLACE INTO requirements (
id, class, status, description, why, source, primary_owner,
supporting_slices, validation, notes, full_content, superseded_by
)
SELECT id, class, status, description, why, source, primary_owner,
supporting_slices, validation, notes, full_content, superseded_by
FROM wt.requirements
`)
.run(),
);
merged.artifacts = countChanges(
adapter
.prepare(`
INSERT OR REPLACE INTO artifacts (
path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at
)
SELECT path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at
FROM wt.artifacts
`)
.run(),
);
// Merge milestones — worktree may have updated status/planning fields
merged.milestones = countChanges(
adapter
.prepare(`
INSERT OR REPLACE INTO milestones (
id, title, status, depends_on, created_at, completed_at,
vision, success_criteria, key_risks, proof_strategy,
verification_contract, verification_integration, verification_operational, verification_uat,
definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json
)
SELECT id, title, status, depends_on, created_at, completed_at,
vision, success_criteria, key_risks, proof_strategy,
verification_contract, verification_integration, verification_operational, verification_uat,
definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, ${hasProductResearch ? "product_research_json" : "''"}
FROM wt.milestones
`)
.run(),
);
// Merge slices — preserve worktree progress but never downgrade completed status (#2558).
// Uses INSERT OR REPLACE with a subquery that picks the best status — if the main DB
// already has a completed slice, keep that status even if the worktree copy is stale.
merged.slices = countChanges(
adapter
.prepare(`
INSERT OR REPLACE INTO slices (
milestone_id, id, title, status, risk, depends, demo, created_at, completed_at,
full_summary_md, full_uat_md, goal, success_criteria, proof_level,
integration_closure, observability_impact, adversarial_partner, adversarial_combatant,
adversarial_architect, planning_meeting_json, sequence, replan_triggered_at
)
SELECT w.milestone_id, w.id, w.title,
CASE
WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done')
THEN m.status ELSE w.status
END,
w.risk, w.depends, w.demo, w.created_at,
CASE
WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done')
THEN m.completed_at ELSE w.completed_at
END,
w.full_summary_md, w.full_uat_md, w.goal, w.success_criteria, w.proof_level,
w.integration_closure, w.observability_impact, w.adversarial_partner, w.adversarial_combatant,
w.adversarial_architect, w.planning_meeting_json, w.sequence, w.replan_triggered_at
FROM wt.slices w
LEFT JOIN slices m ON m.milestone_id = w.milestone_id AND m.id = w.id
`)
.run(),
);
// Merge tasks — preserve execution results, never downgrade completed status (#2558)
merged.tasks = countChanges(
adapter
.prepare(`
INSERT OR REPLACE INTO tasks (
milestone_id, slice_id, id, title, status, one_liner, narrative,
verification_result, duration, completed_at, blocker_discovered,
deviations, known_issues, key_files, key_decisions, full_summary_md,
description, estimate, files, verify, inputs, expected_output,
observability_impact, full_plan_md, sequence
)
SELECT w.milestone_id, w.slice_id, w.id, w.title,
CASE
WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done')
THEN m.status ELSE w.status
END,
w.one_liner, w.narrative,
w.verification_result, w.duration,
CASE
WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done')
THEN m.completed_at ELSE w.completed_at
END,
w.blocker_discovered,
w.deviations, w.known_issues, w.key_files, w.key_decisions, w.full_summary_md,
w.description, w.estimate, w.files, w.verify, w.inputs, w.expected_output,
w.observability_impact, w.full_plan_md, w.sequence
FROM wt.tasks w
LEFT JOIN tasks m ON m.milestone_id = w.milestone_id AND m.slice_id = w.slice_id AND m.id = w.id
`)
.run(),
);
// Merge memories — keep worktree-learned insights
merged.memories = countChanges(
adapter
.prepare(`
INSERT OR REPLACE INTO memories (
seq, id, category, content, confidence, source_unit_type, source_unit_id,
created_at, updated_at, superseded_by, hit_count
)
SELECT seq, id, category, content, confidence, source_unit_type, source_unit_id,
created_at, updated_at, superseded_by, hit_count
FROM wt.memories
`)
.run(),
);
// Merge verification evidence — append-only, use INSERT OR IGNORE to avoid duplicates
merged.verification_evidence = countChanges(
adapter
.prepare(`
INSERT OR IGNORE INTO verification_evidence (
task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at
)
SELECT task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at
FROM wt.verification_evidence
`)
.run(),
);
adapter.exec("COMMIT");
} catch (txErr) {
try {
adapter.exec("ROLLBACK");
} catch (e) {
logWarning("db", `rollback failed: ${e.message}`);
}
throw txErr;
}
return { ...merged, conflicts };
} finally {
try {
adapter.exec("DETACH DATABASE wt");
} catch (e) {
logWarning("db", `detach worktree DB failed: ${e.message}`);
}
}
} catch (err) {
logError("db", "worktree DB reconciliation failed", {
error: err.message,
});
return { ...zero, conflicts };
}
}
// ─── Replan & Assessment Helpers ──────────────────────────────────────────
export function insertReplanHistory(entry) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
// INSERT OR REPLACE: idempotent on (milestone_id, slice_id, task_id) via schema v11 unique index.
// Retrying the same replan silently updates summary instead of accumulating duplicate rows.
currentDb
.prepare(`INSERT OR REPLACE INTO replan_history (milestone_id, slice_id, task_id, summary, previous_artifact_path, replacement_artifact_path, created_at)
VALUES (:milestone_id, :slice_id, :task_id, :summary, :previous_artifact_path, :replacement_artifact_path, :created_at)`)
.run({
":milestone_id": entry.milestoneId,
":slice_id": entry.sliceId ?? null,
":task_id": entry.taskId ?? null,
":summary": entry.summary,
":previous_artifact_path": entry.previousArtifactPath ?? null,
":replacement_artifact_path": entry.replacementArtifactPath ?? null,
":created_at": new Date().toISOString(),
});
}
export function insertAssessment(entry) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR REPLACE INTO assessments (path, milestone_id, slice_id, task_id, status, scope, full_content, created_at)
VALUES (:path, :milestone_id, :slice_id, :task_id, :status, :scope, :full_content, :created_at)`)
.run({
":path": entry.path,
":milestone_id": entry.milestoneId,
":slice_id": entry.sliceId ?? null,
":task_id": entry.taskId ?? null,
":status": entry.status,
":scope": entry.scope,
":full_content": entry.fullContent,
":created_at": new Date().toISOString(),
});
}
export function deleteAssessmentByScope(milestoneId, scope) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
`DELETE FROM assessments WHERE milestone_id = :mid AND scope = :scope`,
)
.run({ ":mid": milestoneId, ":scope": scope });
}
export function deleteVerificationEvidence(milestoneId, sliceId, taskId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
`DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid`,
)
.run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId });
}
export function deleteTask(milestoneId, sliceId, taskId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
transaction(() => {
// Must delete verification_evidence first (FK constraint)
currentDb
.prepare(
`DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid`,
)
.run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId });
currentDb
.prepare(
`DELETE FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`,
)
.run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId });
});
}
export function deleteSlice(milestoneId, sliceId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
transaction(() => {
// Cascade-style manual deletion: evidence → tasks → dependencies → slice
currentDb
.prepare(
`DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid`,
)
.run({ ":mid": milestoneId, ":sid": sliceId });
currentDb
.prepare(
`DELETE FROM tasks WHERE milestone_id = :mid AND slice_id = :sid`,
)
.run({ ":mid": milestoneId, ":sid": sliceId });
currentDb
.prepare(
`DELETE FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid`,
)
.run({ ":mid": milestoneId, ":sid": sliceId });
currentDb
.prepare(
`DELETE FROM slice_dependencies WHERE milestone_id = :mid AND depends_on_slice_id = :sid`,
)
.run({ ":mid": milestoneId, ":sid": sliceId });
currentDb
.prepare(`DELETE FROM slices WHERE milestone_id = :mid AND id = :sid`)
.run({ ":mid": milestoneId, ":sid": sliceId });
});
}
export function deleteMilestone(milestoneId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
transaction(() => {
currentDb
.prepare(`DELETE FROM verification_evidence WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM quality_gates WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM gate_runs WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM tasks WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM slice_dependencies WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM slices WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM replan_history WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM assessments WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM artifacts WHERE milestone_id = :mid`)
.run({ ":mid": milestoneId });
currentDb
.prepare(`DELETE FROM milestones WHERE id = :mid`)
.run({ ":mid": milestoneId });
});
}
export function updateSliceFields(milestoneId, sliceId, fields) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE slices SET
title = COALESCE(:title, title),
risk = COALESCE(:risk, risk),
depends = COALESCE(:depends, depends),
demo = COALESCE(:demo, demo)
WHERE milestone_id = :milestone_id AND id = :id`)
.run({
":milestone_id": milestoneId,
":id": sliceId,
":title": fields.title ?? null,
":risk": fields.risk ?? null,
":depends": fields.depends ? JSON.stringify(fields.depends) : null,
":demo": fields.demo ?? null,
});
}
export function getReplanHistory(milestoneId, sliceId) {
if (!currentDb) return [];
if (sliceId) {
return currentDb
.prepare(
`SELECT * FROM replan_history WHERE milestone_id = :mid AND slice_id = :sid ORDER BY created_at DESC`,
)
.all({ ":mid": milestoneId, ":sid": sliceId });
}
return currentDb
.prepare(
`SELECT * FROM replan_history WHERE milestone_id = :mid ORDER BY created_at DESC`,
)
.all({ ":mid": milestoneId });
}
export function getAssessment(path) {
if (!currentDb) return null;
const row = currentDb
.prepare(`SELECT * FROM assessments WHERE path = :path`)
.get({ ":path": path });
return row ?? null;
}
export function getAssessmentByScope(milestoneId, scope) {
if (!currentDb) return null;
const row = currentDb
.prepare(
`SELECT * FROM assessments
WHERE milestone_id = :mid AND scope = :scope
ORDER BY created_at DESC
LIMIT 1`,
)
.get({ ":mid": milestoneId, ":scope": scope });
return row ?? null;
}
export function getMilestoneValidationAssessment(milestoneId) {
return getAssessmentByScope(milestoneId, "milestone-validation");
}
// ─── Quality Gates ───────────────────────────────────────────────────────
function rowToGate(row) {
return {
milestone_id: row["milestone_id"],
slice_id: row["slice_id"],
gate_id: row["gate_id"],
scope: row["scope"],
task_id: row["task_id"] ?? "",
status: row["status"],
verdict: row["verdict"] || "",
rationale: row["rationale"] || "",
findings: row["findings"] || "",
evaluated_at: row["evaluated_at"] ?? null,
};
}
export function insertGateRow(g) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR IGNORE INTO quality_gates (milestone_id, slice_id, gate_id, scope, task_id, status)
VALUES (:mid, :sid, :gid, :scope, :tid, :status)`)
.run({
":mid": g.milestoneId,
":sid": g.sliceId,
":gid": g.gateId,
":scope": g.scope,
":tid": g.taskId ?? "",
":status": g.status ?? "pending",
});
}
export function saveGateResult(g) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE quality_gates
SET status = 'complete', verdict = :verdict, rationale = :rationale,
findings = :findings, evaluated_at = :evaluated_at
WHERE milestone_id = :mid AND slice_id = :sid AND gate_id = :gid
AND task_id = :tid`)
.run({
":mid": g.milestoneId,
":sid": g.sliceId,
":gid": g.gateId,
":tid": g.taskId ?? "",
":verdict": g.verdict,
":rationale": g.rationale,
":findings": g.findings,
":evaluated_at": new Date().toISOString(),
});
const outcome =
g.verdict === "pass"
? "pass"
: g.verdict === "omitted"
? "manual-attention"
: "fail";
insertGateRun({
traceId: `quality-gate:${g.milestoneId}:${g.sliceId}`,
turnId: `gate:${g.gateId}:${g.taskId ?? "slice"}`,
gateId: g.gateId,
gateType: "quality-gate",
milestoneId: g.milestoneId,
sliceId: g.sliceId,
taskId: g.taskId ?? undefined,
outcome,
failureClass:
outcome === "fail"
? "verification"
: outcome === "manual-attention"
? "manual-attention"
: "none",
rationale: g.rationale,
findings: g.findings,
attempt: 1,
maxAttempts: 1,
retryable: false,
evaluatedAt: new Date().toISOString(),
});
}
export function getPendingGates(milestoneId, sliceId, scope) {
if (!currentDb) return [];
const sql = scope
? `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND scope = :scope AND status = 'pending'`
: `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND status = 'pending'`;
const params = {
":mid": milestoneId,
":sid": sliceId,
};
if (scope) params[":scope"] = scope;
return currentDb.prepare(sql).all(params).map(rowToGate);
}
export function getGateResults(milestoneId, sliceId, scope) {
if (!currentDb) return [];
const sql = scope
? `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND scope = :scope`
: `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid`;
const params = {
":mid": milestoneId,
":sid": sliceId,
};
if (scope) params[":scope"] = scope;
return currentDb.prepare(sql).all(params).map(rowToGate);
}
export function markAllGatesOmitted(milestoneId, sliceId) {
if (!currentDb) return;
currentDb
.prepare(`UPDATE quality_gates SET status = 'omitted', verdict = 'omitted', evaluated_at = :now
WHERE milestone_id = :mid AND slice_id = :sid AND status = 'pending'`)
.run({
":mid": milestoneId,
":sid": sliceId,
":now": new Date().toISOString(),
});
}
export function getPendingSliceGateCount(milestoneId, sliceId) {
if (!currentDb) return 0;
const row = currentDb
.prepare(`SELECT COUNT(*) as cnt FROM quality_gates
WHERE milestone_id = :mid AND slice_id = :sid AND scope = 'slice' AND status = 'pending'`)
.get({ ":mid": milestoneId, ":sid": sliceId });
return row ? row["cnt"] : 0;
}
/**
* Return pending gate rows owned by a specific workflow turn.
*
* Unlike `getPendingGates(..., scope)`, this filters by the registry's
* `ownerTurn` metadata so callers can distinguish Q3/Q4 (owned by
* gate-evaluate) from Q8 (owned by complete-slice) even though both are
* scope:"slice". Pass `taskId` to narrow task-scoped results to one task.
*/
export function getPendingGatesForTurn(milestoneId, sliceId, turn, taskId) {
if (!currentDb) return [];
const ids = getGateIdsForTurn(turn);
if (ids.size === 0) return [];
const idList = [...ids];
const placeholders = idList.map((_, i) => `:gid${i}`).join(",");
const params = {
":mid": milestoneId,
":sid": sliceId,
};
idList.forEach((id, i) => {
params[`:gid${i}`] = id;
});
let sql = `SELECT * FROM quality_gates
WHERE milestone_id = :mid AND slice_id = :sid
AND status = 'pending'
AND gate_id IN (${placeholders})`;
if (taskId !== undefined) {
sql += ` AND task_id = :tid`;
params[":tid"] = taskId;
}
return currentDb.prepare(sql).all(params).map(rowToGate);
}
/**
* Count pending gates for a turn. Convenience wrapper used by state
* derivation to decide whether a phase transition should pause.
*/
export function getPendingGateCountForTurn(milestoneId, sliceId, turn) {
return getPendingGatesForTurn(milestoneId, sliceId, turn).length;
}
export function insertGateRun(entry) {
if (!currentDb) return;
currentDb
.prepare(`INSERT INTO gate_runs (
trace_id, turn_id, gate_id, gate_type, unit_type, unit_id, milestone_id, slice_id, task_id,
outcome, failure_class, rationale, findings, attempt, max_attempts, retryable, evaluated_at, duration_ms, cost_micro_usd
) VALUES (
:trace_id, :turn_id, :gate_id, :gate_type, :unit_type, :unit_id, :milestone_id, :slice_id, :task_id,
:outcome, :failure_class, :rationale, :findings, :attempt, :max_attempts, :retryable, :evaluated_at, :duration_ms, :cost_micro_usd
)`)
.run({
":trace_id": entry.traceId,
":turn_id": entry.turnId,
":gate_id": entry.gateId,
":gate_type": entry.gateType,
":unit_type": entry.unitType ?? null,
":unit_id": entry.unitId ?? null,
":milestone_id": entry.milestoneId ?? null,
":slice_id": entry.sliceId ?? null,
":task_id": entry.taskId ?? null,
":outcome": entry.outcome,
":failure_class": entry.failureClass,
":rationale": entry.rationale ?? "",
":findings": entry.findings ?? "",
":attempt": entry.attempt,
":max_attempts": entry.maxAttempts,
":retryable": entry.retryable ? 1 : 0,
":evaluated_at": entry.evaluatedAt,
":duration_ms": entry.durationMs ?? null,
":cost_micro_usd": entry.costMicroUsd ?? null,
});
}
export function upsertTurnGitTransaction(entry) {
if (!currentDb) return;
currentDb
.prepare(`INSERT OR REPLACE INTO turn_git_transactions (
trace_id, turn_id, unit_type, unit_id, stage, action, push, status, error, metadata_json, updated_at
) VALUES (
:trace_id, :turn_id, :unit_type, :unit_id, :stage, :action, :push, :status, :error, :metadata_json, :updated_at
)`)
.run({
":trace_id": entry.traceId,
":turn_id": entry.turnId,
":unit_type": entry.unitType ?? null,
":unit_id": entry.unitId ?? null,
":stage": entry.stage,
":action": entry.action,
":push": entry.push ? 1 : 0,
":status": entry.status,
":error": entry.error ?? null,
":metadata_json": JSON.stringify(entry.metadata ?? {}),
":updated_at": entry.updatedAt,
});
}
export function recordUokRunStart(entry) {
if (!currentDb) return;
const now = entry.startedAt ?? new Date().toISOString();
currentDb
.prepare(`INSERT INTO uok_runs (
run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at
) VALUES (
:run_id, :session_id, :path, 'started', :started_at, NULL, NULL, :flags_json, :updated_at
)
ON CONFLICT(run_id) DO UPDATE SET
session_id = excluded.session_id,
path = excluded.path,
status = 'started',
started_at = excluded.started_at,
ended_at = NULL,
error = NULL,
flags_json = excluded.flags_json,
updated_at = excluded.updated_at`)
.run({
":run_id": entry.runId,
":session_id": entry.sessionId ?? null,
":path": entry.path ?? "",
":started_at": now,
":flags_json": JSON.stringify(entry.flags ?? {}),
":updated_at": now,
});
}
export function recordUokRunExit(entry) {
if (!currentDb) return;
const now = entry.endedAt ?? new Date().toISOString();
currentDb
.prepare(`INSERT INTO uok_runs (
run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at
) VALUES (
:run_id, :session_id, :path, :status, :started_at, :ended_at, :error, :flags_json, :updated_at
)
ON CONFLICT(run_id) DO UPDATE SET
session_id = COALESCE(excluded.session_id, uok_runs.session_id),
path = CASE WHEN excluded.path = '' THEN uok_runs.path ELSE excluded.path END,
status = excluded.status,
ended_at = excluded.ended_at,
error = excluded.error,
flags_json = CASE WHEN excluded.flags_json = '{}' THEN uok_runs.flags_json ELSE excluded.flags_json END,
updated_at = excluded.updated_at`)
.run({
":run_id": entry.runId,
":session_id": entry.sessionId ?? null,
":path": entry.path ?? "",
":status": entry.status ?? "ok",
":started_at": entry.startedAt ?? now,
":ended_at": now,
":error": entry.error ?? null,
":flags_json": JSON.stringify(entry.flags ?? {}),
":updated_at": now,
});
}
export function getUokRuns(limit = 500) {
if (!currentDb) return [];
return currentDb
.prepare(
`SELECT run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at
FROM uok_runs
ORDER BY started_at DESC
LIMIT :limit`,
)
.all({ ":limit": limit })
.map((row) => ({
runId: row.run_id,
sessionId: row.session_id,
path: row.path,
status: row.status,
startedAt: row.started_at,
endedAt: row.ended_at,
error: row.error,
flags: (() => {
try {
return JSON.parse(row.flags_json || "{}");
} catch {
return {};
}
})(),
updatedAt: row.updated_at,
}));
}
export function insertAuditEvent(entry) {
if (!currentDb) return;
transaction(() => {
currentDb
.prepare(`INSERT OR IGNORE INTO audit_events (
event_id, trace_id, turn_id, caused_by, category, type, ts, payload_json
) VALUES (
:event_id, :trace_id, :turn_id, :caused_by, :category, :type, :ts, :payload_json
)`)
.run({
":event_id": entry.eventId,
":trace_id": entry.traceId,
":turn_id": entry.turnId ?? null,
":caused_by": entry.causedBy ?? null,
":category": entry.category,
":type": entry.type,
":ts": entry.ts,
":payload_json": JSON.stringify(entry.payload ?? {}),
});
if (entry.turnId) {
const row = currentDb
.prepare(`SELECT event_count, first_ts, last_ts
FROM audit_turn_index
WHERE trace_id = :trace_id AND turn_id = :turn_id`)
.get({
":trace_id": entry.traceId,
":turn_id": entry.turnId,
});
if (row) {
currentDb
.prepare(`UPDATE audit_turn_index
SET first_ts = CASE WHEN :ts < first_ts THEN :ts ELSE first_ts END,
last_ts = CASE WHEN :ts > last_ts THEN :ts ELSE last_ts END,
event_count = event_count + 1
WHERE trace_id = :trace_id AND turn_id = :turn_id`)
.run({
":trace_id": entry.traceId,
":turn_id": entry.turnId,
":ts": entry.ts,
});
} else {
currentDb
.prepare(`INSERT INTO audit_turn_index (trace_id, turn_id, first_ts, last_ts, event_count)
VALUES (:trace_id, :turn_id, :first_ts, :last_ts, :event_count)`)
.run({
":trace_id": entry.traceId,
":turn_id": entry.turnId,
":first_ts": entry.ts,
":last_ts": entry.ts,
":event_count": 1,
});
}
}
});
}
// ─── Single-writer bypass wrappers ───────────────────────────────────────
// These wrappers exist so modules outside this file never need to call
// `_getAdapter()` for writes. Each one is a byte-equivalent replacement for
// a raw prepare/run previously issued from another module. Keep them
// minimal and direct — they exist to hold SQL text in one place, not to
// add new behavior.
/** Delete a decision row by id. Used by db-writer.ts rollback on disk-write failure. */
export function deleteDecisionById(id) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb.prepare("DELETE FROM decisions WHERE id = :id").run({ ":id": id });
}
/** Delete a requirement row by id. Used by db-writer.ts rollback on disk-write failure. */
export function deleteRequirementById(id) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare("DELETE FROM requirements WHERE id = :id")
.run({ ":id": id });
}
/** Delete an artifact row by path. Used by db-writer.ts rollback on disk-write failure. */
export function deleteArtifactByPath(path) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare("DELETE FROM artifacts WHERE path = :path")
.run({ ":path": path });
}
/**
* Drop all rows from tasks/slices/milestones in dependency order inside a
* transaction. Used by `sf recover` to rebuild engine state from markdown.
*/
export function clearEngineHierarchy() {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
transaction(() => {
currentDb.exec("DELETE FROM tasks");
currentDb.exec("DELETE FROM slices");
currentDb.exec("DELETE FROM milestones");
});
}
/**
* INSERT OR IGNORE a slice during event replay (workflow-reconcile.ts).
* Strict insert-or-ignore semantics are required here to avoid the
* `insertSlice` ON CONFLICT path that could downgrade an already-completed
* slice back to 'pending'.
*/
export function insertOrIgnoreSlice(args) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR IGNORE INTO slices (milestone_id, id, title, status, created_at)
VALUES (:mid, :sid, :title, 'pending', :ts)`)
.run({
":mid": args.milestoneId,
":sid": args.sliceId,
":title": args.title,
":ts": args.createdAt,
});
}
/**
* INSERT OR IGNORE a task during event replay (workflow-reconcile.ts).
* Same rationale as `insertOrIgnoreSlice`.
*/
export function insertOrIgnoreTask(args) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR IGNORE INTO tasks (milestone_id, slice_id, id, title, status, created_at)
VALUES (:mid, :sid, :tid, :title, 'pending', :ts)`)
.run({
":mid": args.milestoneId,
":sid": args.sliceId,
":tid": args.taskId,
":title": args.title,
":ts": args.createdAt,
});
}
/**
* Stamp the `replan_triggered_at` column on a slice. Used by triage-resolution
* when a user capture requests a replan so the dispatcher can detect the
* trigger via DB in addition to the on-disk REPLAN-TRIGGER.md marker.
*/
export function setSliceReplanTriggeredAt(milestoneId, sliceId, ts) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
"UPDATE slices SET replan_triggered_at = :ts WHERE milestone_id = :mid AND id = :sid",
)
.run({ ":ts": ts, ":mid": milestoneId, ":sid": sliceId });
}
function boolToInt(value) {
if (value === null || value === undefined) return null;
return value ? 1 : 0;
}
export function insertLlmTaskOutcome(input) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
try {
currentDb
.prepare(`INSERT INTO llm_task_outcomes (
model_id,
provider,
unit_type,
unit_id,
succeeded,
retries,
escalated,
verification_passed,
blocker_discovered,
duration_ms,
tokens_total,
cost_usd,
recorded_at
) VALUES (
:model_id,
:provider,
:unit_type,
:unit_id,
:succeeded,
:retries,
:escalated,
:verification_passed,
:blocker_discovered,
:duration_ms,
:tokens_total,
:cost_usd,
:recorded_at
)
ON CONFLICT(unit_type, unit_id, recorded_at) DO UPDATE SET
model_id = excluded.model_id,
provider = excluded.provider,
succeeded = excluded.succeeded,
retries = excluded.retries,
escalated = excluded.escalated,
verification_passed = excluded.verification_passed,
blocker_discovered = excluded.blocker_discovered,
duration_ms = excluded.duration_ms,
tokens_total = excluded.tokens_total,
cost_usd = excluded.cost_usd`)
.run({
":model_id": input.modelId,
":provider": input.provider,
":unit_type": input.unitType,
":unit_id": input.unitId,
":succeeded": boolToInt(input.succeeded),
":retries": input.retries ?? 0,
":escalated": boolToInt(input.escalated ?? false),
":verification_passed": boolToInt(input.verification_passed ?? null),
":blocker_discovered": boolToInt(input.blocker_discovered ?? false),
":duration_ms": input.duration_ms ?? null,
":tokens_total": input.tokens_total ?? null,
":cost_usd": input.cost_usd ?? null,
":recorded_at": input.recorded_at ?? Date.now(),
});
return true;
} catch {
return false;
}
}
/**
* Query LLM task outcomes for a specific unit.
*
* Purpose: enable outcome-learning and cost-guard gates to inspect
* historical performance of a unit type + id combination.
*
* Consumer: uok/outcome-learning-gate.js, uok/cost-guard-gate.js.
*/
export function getLlmTaskOutcomesByUnit(unitType, unitId, limit = 20) {
if (!currentDb) return [];
try {
return currentDb
.prepare(
`SELECT
model_id,
provider,
unit_type,
unit_id,
succeeded,
retries,
escalated,
verification_passed,
blocker_discovered,
duration_ms,
tokens_total,
cost_usd,
recorded_at
FROM llm_task_outcomes
WHERE unit_type = :unit_type
AND unit_id = :unit_id
ORDER BY recorded_at DESC
LIMIT :limit`,
)
.all({
":unit_type": unitType,
":unit_id": unitId,
":limit": limit,
});
} catch {
return [];
}
}
/**
* Query LLM task outcomes for a specific model.
*
* Purpose: enable cost-guard to detect models with high failure rates
* or excessive cumulative spend.
*
* Consumer: uok/cost-guard-gate.js.
*/
export function getLlmTaskOutcomesByModel(modelId, limit = 50) {
if (!currentDb) return [];
try {
return currentDb
.prepare(
`SELECT
model_id,
provider,
unit_type,
unit_id,
succeeded,
retries,
escalated,
verification_passed,
blocker_discovered,
duration_ms,
tokens_total,
cost_usd,
recorded_at
FROM llm_task_outcomes
WHERE model_id = :model_id
ORDER BY recorded_at DESC
LIMIT :limit`,
)
.all({
":model_id": modelId,
":limit": limit,
});
} catch {
return [];
}
}
/**
* Query recent LLM task outcomes across all units.
*
* Purpose: provide a rolling window of outcomes for system-wide
* health and spend analysis.
*
* Consumer: uok/diagnostic-synthesis.js, uok/cost-guard-gate.js.
*/
export function getRecentLlmTaskOutcomes(hours = 24, limit = 100) {
if (!currentDb) return [];
const cutoff = Date.now() - hours * 60 * 60 * 1000;
try {
return currentDb
.prepare(
`SELECT
model_id,
provider,
unit_type,
unit_id,
succeeded,
retries,
escalated,
verification_passed,
blocker_discovered,
duration_ms,
tokens_total,
cost_usd,
recorded_at
FROM llm_task_outcomes
WHERE recorded_at >= :cutoff
ORDER BY recorded_at DESC
LIMIT :limit`,
)
.all({
":cutoff": cutoff,
":limit": limit,
});
} catch {
return [];
}
}
/**
* Aggregate LLM task outcome statistics for a model over a time window.
*
* Returns { total, succeeded, failed, totalCostUsd, totalTokens, avgDurationMs }.
*
* Consumer: uok/cost-guard-gate.js, uok/outcome-learning-gate.js.
*/
export function getLlmTaskOutcomeStats(modelId, windowHours = 24) {
if (!currentDb) {
return {
total: 0,
succeeded: 0,
failed: 0,
totalCostUsd: 0,
totalTokens: 0,
avgDurationMs: 0,
};
}
const cutoff = Date.now() - windowHours * 60 * 60 * 1000;
try {
const row = currentDb
.prepare(
`SELECT
COUNT(*) AS total,
COALESCE(SUM(CASE WHEN succeeded = 1 THEN 1 ELSE 0 END), 0) AS succeeded,
COALESCE(SUM(CASE WHEN succeeded = 0 THEN 1 ELSE 0 END), 0) AS failed,
COALESCE(SUM(cost_usd), 0) AS totalCostUsd,
COALESCE(SUM(tokens_total), 0) AS totalTokens,
COALESCE(AVG(duration_ms), 0) AS avgDurationMs
FROM llm_task_outcomes
WHERE model_id = :model_id
AND recorded_at >= :cutoff`,
)
.get({ ":model_id": modelId, ":cutoff": cutoff });
if (!row) {
return {
total: 0,
succeeded: 0,
failed: 0,
totalCostUsd: 0,
totalTokens: 0,
avgDurationMs: 0,
};
}
return {
total: row.total ?? 0,
succeeded: row.succeeded ?? 0,
failed: row.failed ?? 0,
totalCostUsd: row.totalCostUsd ?? 0,
totalTokens: row.totalTokens ?? 0,
avgDurationMs: row.avgDurationMs ?? 0,
};
} catch {
return {
total: 0,
succeeded: 0,
failed: 0,
totalCostUsd: 0,
totalTokens: 0,
avgDurationMs: 0,
};
}
}
/**
* Aggregate gate run statistics for a specific gate over a time window.
*
* Returns { total, pass, fail, retry, manualAttention, lastEvaluatedAt }.
*
* Consumer: uok/diagnostic-synthesis.js, uok/gate-runner.js health checks.
*/
export function getGateRunStats(gateId, windowHours = 24) {
if (!currentDb) {
return {
total: 0,
pass: 0,
fail: 0,
retry: 0,
manualAttention: 0,
lastEvaluatedAt: null,
};
}
const cutoff = new Date(
Date.now() - windowHours * 60 * 60 * 1000,
).toISOString();
try {
const row = currentDb
.prepare(
`SELECT
COUNT(*) AS total,
COALESCE(SUM(CASE WHEN outcome = 'pass' THEN 1 ELSE 0 END), 0) AS pass,
COALESCE(SUM(CASE WHEN outcome = 'fail' THEN 1 ELSE 0 END), 0) AS fail,
COALESCE(SUM(CASE WHEN outcome = 'retry' THEN 1 ELSE 0 END), 0) AS retry,
COALESCE(SUM(CASE WHEN outcome = 'manual-attention' THEN 1 ELSE 0 END), 0) AS manualAttention,
MAX(evaluated_at) AS lastEvaluatedAt
FROM gate_runs
WHERE gate_id = :gate_id
AND evaluated_at >= :cutoff`,
)
.get({ ":gate_id": gateId, ":cutoff": cutoff });
if (!row) {
return {
total: 0,
pass: 0,
fail: 0,
retry: 0,
manualAttention: 0,
lastEvaluatedAt: null,
};
}
return {
total: row.total ?? 0,
pass: row.pass ?? 0,
fail: row.fail ?? 0,
retry: row.retry ?? 0,
manualAttention: row.manualAttention ?? 0,
lastEvaluatedAt: row.lastEvaluatedAt ?? null,
};
} catch {
return {
total: 0,
pass: 0,
fail: 0,
retry: 0,
manualAttention: 0,
lastEvaluatedAt: null,
};
}
}
/**
* Read the circuit breaker state for a specific gate.
*
* Returns { gateId, state, failureStreak, lastFailureAt, openedAt, halfOpenAttempts, updatedAt }.
* If no record exists, returns a default closed state.
*
* Consumer: uok/gate-runner.js before executing a gate.
*/
export function getGateCircuitBreaker(gateId) {
if (!currentDb) {
return {
gateId,
state: "closed",
failureStreak: 0,
lastFailureAt: null,
openedAt: null,
halfOpenAttempts: 0,
updatedAt: null,
};
}
try {
const row = currentDb
.prepare(
`SELECT gate_id, state, failure_streak, last_failure_at, opened_at, half_open_attempts, updated_at
FROM gate_circuit_breakers
WHERE gate_id = :gate_id`,
)
.get({ ":gate_id": gateId });
if (!row) {
return {
gateId,
state: "closed",
failureStreak: 0,
lastFailureAt: null,
openedAt: null,
halfOpenAttempts: 0,
updatedAt: null,
};
}
return {
gateId: row.gate_id,
state: row.state,
failureStreak: row.failure_streak ?? 0,
lastFailureAt: row.last_failure_at ?? null,
openedAt: row.opened_at ?? null,
halfOpenAttempts: row.half_open_attempts ?? 0,
updatedAt: row.updated_at ?? null,
};
} catch {
return {
gateId,
state: "closed",
failureStreak: 0,
lastFailureAt: null,
openedAt: null,
halfOpenAttempts: 0,
updatedAt: null,
};
}
}
/**
* Update the circuit breaker state for a specific gate.
*
* Consumer: uok/gate-runner.js after executing a gate.
*/
export function updateGateCircuitBreaker(gateId, updates) {
if (!currentDb) return;
currentDb
.prepare(
`INSERT INTO gate_circuit_breakers (
gate_id, state, failure_streak, last_failure_at, opened_at, half_open_attempts, updated_at
) VALUES (
:gate_id, :state, :failure_streak, :last_failure_at, :opened_at, :half_open_attempts, :updated_at
)
ON CONFLICT(gate_id) DO UPDATE SET
state = excluded.state,
failure_streak = excluded.failure_streak,
last_failure_at = COALESCE(excluded.last_failure_at, gate_circuit_breakers.last_failure_at),
opened_at = COALESCE(excluded.opened_at, gate_circuit_breakers.opened_at),
half_open_attempts = excluded.half_open_attempts,
updated_at = excluded.updated_at`,
)
.run({
":gate_id": gateId,
":state": updates.state ?? "closed",
":failure_streak": updates.failureStreak ?? 0,
":last_failure_at": updates.lastFailureAt ?? null,
":opened_at": updates.openedAt ?? null,
":half_open_attempts": updates.halfOpenAttempts ?? 0,
":updated_at": new Date().toISOString(),
});
return { total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 };
}
export function getGateLatencyStats(gateId, windowHours = 24) {
if (!currentDb) {
return { total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 };
}
const cutoff = new Date(
Date.now() - windowHours * 60 * 60 * 1000,
).toISOString();
try {
const row = currentDb
.prepare(
`SELECT
COUNT(*) AS total,
COALESCE(AVG(duration_ms), 0) AS avgMs,
COALESCE(MAX(duration_ms), 0) AS maxMs
FROM gate_runs
WHERE gate_id = :gate_id AND evaluated_at >= :cutoff`,
)
.get({ ":gate_id": gateId, ":cutoff": cutoff });
if (!row || row.total === 0) {
return { total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 };
}
const durations = currentDb
.prepare(
`SELECT duration_ms
FROM gate_runs
WHERE gate_id = :gate_id AND evaluated_at >= :cutoff AND duration_ms IS NOT NULL
ORDER BY duration_ms`,
)
.all({ ":gate_id": gateId, ":cutoff": cutoff })
.map((r) => r.duration_ms);
const p50Ms = durations[Math.floor(durations.length * 0.5)] ?? 0;
const p95Ms = durations[Math.floor(durations.length * 0.95)] ?? 0;
return {
total: row.total ?? 0,
avgMs: Math.round(row.avgMs ?? 0),
p50Ms,
p95Ms,
maxMs: row.maxMs ?? 0,
};
} catch {
return { total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 };
}
}
export function getDistinctGateIds() {
if (!currentDb) return [];
try {
const rows = currentDb
.prepare("SELECT DISTINCT gate_id FROM gate_runs")
.all();
return rows.map((r) => r.gate_id).filter(Boolean);
} catch {
return [];
}
}
export function insertUokMessage(msg) {
if (!currentDb) return;
currentDb
.prepare(
`INSERT OR IGNORE INTO uok_messages (id, from_agent, to_agent, body, metadata_json, sent_at, delivered_at)
VALUES (:id, :from_agent, :to_agent, :body, :metadata_json, :sent_at, :delivered_at)`,
)
.run({
":id": msg.id,
":from_agent": msg.from,
":to_agent": msg.to,
":body": msg.body ?? "",
":metadata_json": JSON.stringify(msg.metadata ?? {}),
":sent_at": msg.sentAt,
":delivered_at": msg.deliveredAt ?? null,
});
}
export function getUokMessagesForAgent(
agentId,
limit = 1000,
unreadOnly = false,
) {
if (!currentDb) return [];
try {
let sql = `SELECT m.id, m.from_agent AS "from", m.to_agent AS "to", m.body, m.metadata_json AS metadataJson, m.sent_at AS sentAt, m.delivered_at AS deliveredAt,
CASE WHEN r.agent_id IS NOT NULL THEN 1 ELSE 0 END AS read
FROM uok_messages m
LEFT JOIN uok_message_reads r ON r.message_id = m.id AND r.agent_id = :agent_id
WHERE m.to_agent = :agent_id`;
if (unreadOnly) {
sql += " AND r.agent_id IS NULL";
}
sql += " ORDER BY m.sent_at ASC LIMIT :limit";
const rows = currentDb.prepare(sql).all({
":agent_id": agentId,
":limit": Math.max(1, Math.min(10_000, Number(limit) || 1000)),
});
return rows.map((r) => ({
id: r.id,
from: r.from,
to: r.to,
body: r.body,
metadata: parseJsonObject(r.metadataJson, {}),
sentAt: r.sentAt,
deliveredAt: r.deliveredAt,
read: !!r.read,
}));
} catch {
return [];
}
}
export function getUokConversation(agentA, agentB, limit = 1000) {
if (!currentDb) return [];
try {
const rows = currentDb
.prepare(
`SELECT id, from_agent AS "from", to_agent AS "to", body, metadata_json AS metadataJson, sent_at AS sentAt, delivered_at AS deliveredAt
FROM uok_messages
WHERE (from_agent = :a AND to_agent = :b) OR (from_agent = :b AND to_agent = :a)
ORDER BY sent_at DESC
LIMIT :limit`,
)
.all({ ":a": agentA, ":b": agentB, ":limit": limit });
return rows.map((r) => ({
id: r.id,
from: r.from,
to: r.to,
body: r.body,
metadata: parseJsonObject(r.metadataJson, {}),
sentAt: r.sentAt,
deliveredAt: r.deliveredAt,
}));
} catch {
return [];
}
}
export function markUokMessageRead(messageId, agentId) {
if (!currentDb) return false;
try {
currentDb
.prepare(
`INSERT OR IGNORE INTO uok_message_reads (message_id, agent_id, read_at) VALUES (:message_id, :agent_id, :read_at)`,
)
.run({
":message_id": messageId,
":agent_id": agentId,
":read_at": new Date().toISOString(),
});
return true;
} catch {
return false;
}
}
export function getUokMessageUnreadCount(agentId) {
if (!currentDb) return 0;
try {
const row = currentDb
.prepare(
`SELECT COUNT(*) AS cnt FROM uok_messages m
WHERE m.to_agent = :agent_id
AND NOT EXISTS (
SELECT 1 FROM uok_message_reads r
WHERE r.message_id = m.id AND r.agent_id = :agent_id
)`,
)
.get({ ":agent_id": agentId });
return row?.cnt ?? 0;
} catch {
return 0;
}
}
export function compactUokMessages(retentionDays) {
if (!currentDb) return { before: 0, after: 0 };
try {
const cutoff = new Date(
Date.now() - retentionDays * 24 * 60 * 60 * 1000,
).toISOString();
const beforeRow = currentDb
.prepare("SELECT COUNT(*) AS cnt FROM uok_messages")
.get();
currentDb
.prepare("DELETE FROM uok_messages WHERE sent_at < :cutoff")
.run({ ":cutoff": cutoff });
const afterRow = currentDb
.prepare("SELECT COUNT(*) AS cnt FROM uok_messages")
.get();
return { before: beforeRow?.cnt ?? 0, after: afterRow?.cnt ?? 0 };
} catch {
return { before: 0, after: 0 };
}
}
export function getUokMessageReadIds(agentId) {
if (!currentDb) return [];
try {
const rows = currentDb
.prepare(
"SELECT message_id FROM uok_message_reads WHERE agent_id = :agent_id",
)
.all({ ":agent_id": agentId });
return rows.map((r) => r.message_id);
} catch {
return [];
}
}
export function getUokMessageBusMetrics() {
if (!currentDb) {
return {
totalMessages: 0,
totalUnread: 0,
uniqueAgents: 0,
uniqueConversations: 0,
};
}
try {
const totalRow = currentDb
.prepare("SELECT COUNT(*) AS cnt FROM uok_messages")
.get();
const unreadRow = currentDb
.prepare(
`SELECT COUNT(*) AS cnt FROM uok_messages m
WHERE NOT EXISTS (
SELECT 1 FROM uok_message_reads r
WHERE r.message_id = m.id
AND r.agent_id = m.to_agent
)`,
)
.get();
const agentsRow = currentDb
.prepare(`SELECT COUNT(DISTINCT to_agent) AS cnt FROM uok_messages`)
.get();
const convRow = currentDb
.prepare(
`SELECT COUNT(DISTINCT from_agent || ':' || to_agent) AS cnt FROM uok_messages`,
)
.get();
return {
totalMessages: totalRow?.cnt ?? 0,
totalUnread: unreadRow?.cnt ?? 0,
uniqueAgents: agentsRow?.cnt ?? 0,
uniqueConversations: convRow?.cnt ?? 0,
};
} catch {
return {
totalMessages: 0,
totalUnread: 0,
uniqueAgents: 0,
uniqueConversations: 0,
};
}
}
function normalizeScheduleScope(scope) {
return scope === "global" ? "global" : "project";
}
function scheduleEntryFromRow(row) {
if (!row) return null;
const full = parseJsonObject(row.full_json, {});
return {
...full,
schemaVersion: row.schema_version ?? full.schemaVersion ?? 1,
id: row.id,
kind: row.kind,
status: row.status,
due_at: row.due_at,
created_at: row.created_at,
snoozed_at: row.snoozed_at ?? full.snoozed_at,
payload: parseJsonObject(row.payload_json, full.payload ?? {}),
created_by: row.created_by,
autonomous_dispatch: !!row.autonomous_dispatch,
};
}
/**
* Append a schedule entry to the DB-backed schedule ledger.
*
* Purpose: keep time-bound reminders in structured SQLite state so status,
* due-date, and scope queries are schema-owned instead of JSONL-owned.
*
* Consumer: schedule-store.js for /schedule and launch/auto due-item checks.
*/
export function insertScheduleEntry(scope, entry, importedFrom = null) {
if (!currentDb) return;
const normalizedScope = normalizeScheduleScope(scope);
const schemaVersion = entry.schemaVersion ?? 1;
const full = { schemaVersion, ...entry };
currentDb
.prepare(
`INSERT INTO schedule_entries (
scope, id, schema_version, kind, status, due_at, created_at,
snoozed_at, payload_json, created_by, autonomous_dispatch, full_json,
imported_from
) VALUES (
:scope, :id, :schema_version, :kind, :status, :due_at, :created_at,
:snoozed_at, :payload_json, :created_by, :autonomous_dispatch, :full_json,
:imported_from
)`,
)
.run({
":scope": normalizedScope,
":id": entry.id,
":schema_version": schemaVersion,
":kind": entry.kind ?? "reminder",
":status": entry.status ?? "pending",
":due_at": entry.due_at ?? "",
":created_at": entry.created_at ?? "",
":snoozed_at": entry.snoozed_at ?? null,
":payload_json": JSON.stringify(entry.payload ?? {}),
":created_by": entry.created_by ?? "user",
":autonomous_dispatch": entry.autonomous_dispatch ? 1 : 0,
":full_json": JSON.stringify(full),
":imported_from": importedFrom,
});
}
/**
* Return latest schedule entries per id for a scope.
*
* Purpose: preserve append-ledger semantics while serving queries from SQLite.
*
* Consumer: schedule-store.js readEntries/findDue/findUpcoming.
*/
export function getScheduleEntries(scope) {
if (!currentDb) return [];
const normalizedScope = normalizeScheduleScope(scope);
try {
const rows = currentDb
.prepare(
`SELECT s.*
FROM schedule_entries s
JOIN (
SELECT id, MAX(seq) AS max_seq
FROM schedule_entries
WHERE scope = :scope
GROUP BY id
) latest ON latest.id = s.id AND latest.max_seq = s.seq
WHERE s.scope = :scope
ORDER BY s.due_at ASC, s.created_at ASC, s.seq ASC`,
)
.all({ ":scope": normalizedScope });
return rows.map(scheduleEntryFromRow).filter(Boolean);
} catch {
return [];
}
}
export function countScheduleEntries(scope) {
if (!currentDb) return 0;
const normalizedScope = normalizeScheduleScope(scope);
try {
const row = currentDb
.prepare(
"SELECT COUNT(*) AS cnt FROM schedule_entries WHERE scope = :scope",
)
.get({ ":scope": normalizedScope });
return row?.cnt ?? 0;
} catch {
return 0;
}
}
function asStringOrNull(value) {
return typeof value === "string" && value.length > 0 ? value : null;
}
/**
* Persist a repository profile snapshot and update current file observations.
*
* Purpose: make harness evolution's read-only repo facts queryable across
* sessions while preserving first-seen timestamps for untracked observations.
*
* Consumer: `/harness profile` and future pre-plan profile snapshots.
*/
export function recordRepoProfile(profile) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
transaction(() => {
currentDb
.prepare(`INSERT OR REPLACE INTO repo_profiles (
profile_id, project_hash, project_root, head, branch, remote_hash,
dirty, profile_json, created_at
) VALUES (
:profile_id, :project_hash, :project_root, :head, :branch, :remote_hash,
:dirty, :profile_json, :created_at
)`)
.run({
":profile_id": profile.profileId,
":project_hash": profile.projectHash,
":project_root": profile.projectRoot,
":head": profile.git.head,
":branch": profile.git.branch,
":remote_hash": profile.git.remoteHash,
":dirty": profile.git.dirty ? 1 : 0,
":profile_json": JSON.stringify(profile),
":created_at": profile.createdAt,
});
const stmt = currentDb.prepare(`INSERT INTO repo_file_observations (
path, latest_profile_id, git_status, ownership, language, size_bytes,
content_hash, summary, first_seen_at, last_seen_at, adopted_at,
adoption_unit_id
) VALUES (
:path, :latest_profile_id, :git_status, :ownership, :language, :size_bytes,
:content_hash, :summary, :first_seen_at, :last_seen_at, :adopted_at,
:adoption_unit_id
)
ON CONFLICT(path) DO UPDATE SET
latest_profile_id = excluded.latest_profile_id,
git_status = excluded.git_status,
ownership = CASE
WHEN repo_file_observations.ownership = 'sf_generated'
THEN repo_file_observations.ownership
WHEN repo_file_observations.ownership = 'candidate_harness'
THEN repo_file_observations.ownership
ELSE excluded.ownership
END,
language = excluded.language,
size_bytes = excluded.size_bytes,
content_hash = excluded.content_hash,
summary = excluded.summary,
first_seen_at = repo_file_observations.first_seen_at,
last_seen_at = excluded.last_seen_at,
adopted_at = COALESCE(repo_file_observations.adopted_at, excluded.adopted_at),
adoption_unit_id = COALESCE(repo_file_observations.adoption_unit_id, excluded.adoption_unit_id)`);
for (const file of profile.git.changedFiles) {
stmt.run({
":path": file.path,
":latest_profile_id": profile.profileId,
":git_status": file.gitStatus,
":ownership": file.ownership,
":language": file.language,
":size_bytes": file.sizeBytes,
":content_hash": file.contentHash,
":summary": file.summary,
":first_seen_at": file.firstSeenAt,
":last_seen_at": file.lastSeenAt,
":adopted_at": file.adoptedAt,
":adoption_unit_id": file.adoptionUnitId,
});
}
});
}
/**
* Return the most recently recorded repository profile.
*
* Purpose: let harness planning and diagnostics inspect the latest factual
* repo snapshot without re-running the profiler.
*
* Consumer: harness status commands and future plan-phase coverage checks.
*/
export function getLatestRepoProfile() {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const row = currentDb
.prepare(`SELECT profile_id, project_hash, project_root, head, branch, remote_hash,
dirty, profile_json, created_at
FROM repo_profiles
ORDER BY created_at DESC, profile_id DESC
LIMIT 1`)
.get();
if (!row) return null;
return {
profileId: row["profile_id"],
projectHash: row["project_hash"],
projectRoot: row["project_root"],
head: asStringOrNull(row["head"]),
branch: asStringOrNull(row["branch"]),
remoteHash: asStringOrNull(row["remote_hash"]),
dirty: row["dirty"] === 1,
profileJson: row["profile_json"] ?? "{}",
createdAt: row["created_at"],
};
}
/**
* Return the current file observations accumulated by repo profiling.
*
* Purpose: keep untracked and modified file awareness queryable without
* treating those paths as SF-owned artifacts.
*
* Consumer: harness planning, diagnostics, and future drift detection.
*/
export function getRepoFileObservations() {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
return currentDb
.prepare(`SELECT path, latest_profile_id, git_status, ownership, language,
size_bytes, content_hash, summary, first_seen_at, last_seen_at,
adopted_at, adoption_unit_id
FROM repo_file_observations
ORDER BY path ASC`)
.all()
.map((row) => ({
path: row["path"],
latestProfileId: row["latest_profile_id"],
gitStatus: row["git_status"],
ownership: row["ownership"],
language: asStringOrNull(row["language"]),
sizeBytes: row["size_bytes"] ?? 0,
contentHash: asStringOrNull(row["content_hash"]),
summary: asStringOrNull(row["summary"]),
firstSeenAt: row["first_seen_at"],
lastSeenAt: row["last_seen_at"],
adoptedAt: asStringOrNull(row["adopted_at"]),
adoptionUnitId: asStringOrNull(row["adoption_unit_id"]),
}));
}
function intBool(value) {
return value ? 1 : 0;
}
function parseJsonObject(raw, fallback = {}) {
try {
return JSON.parse(raw);
} catch {
return fallback;
}
}
function solverEvalRunFromRow(row) {
return {
runId: row["run_id"],
suiteSource: row["suite_source"],
casesCount: row["cases_count"] ?? 0,
summary: parseJsonObject(row["summary_json"], {}),
reportPath: row["report_path"],
resultsPath: row["results_path"],
dbRecorded: row["db_recorded"] === 1,
createdAt: row["created_at"],
updatedAt: row["updated_at"],
};
}
function solverEvalCaseFromRow(row) {
return {
runId: row["run_id"],
caseId: row["case_id"],
title: row["title"],
mode: row["mode"],
passed: row["passed"] === 1,
falseComplete: row["false_complete"] === 1,
durationMs: row["duration_ms"],
commandStatus: row["command_status"],
solverOutcome: asStringOrNull(row["solver_outcome"]),
pddComplete:
row["pdd_complete"] === null || row["pdd_complete"] === undefined
? null
: row["pdd_complete"] === 1,
result: parseJsonObject(row["result_json"], {}),
createdAt: row["created_at"],
};
}
function headlessRunFromRow(row) {
return {
runId: row["run_id"],
command: row["command"],
status: row["status"],
exitCode: row["exit_code"],
timedOut: row["timed_out"] === 1,
interrupted: row["interrupted"] === 1,
restartCount: row["restart_count"] ?? 0,
maxRestarts: row["max_restarts"] ?? 0,
durationMs: row["duration_ms"] ?? 0,
totalEvents: row["total_events"] ?? 0,
toolCalls: row["tool_calls"] ?? 0,
solverEvalRunId: asStringOrNull(row["solver_eval_run_id"]),
solverEvalReportPath: asStringOrNull(row["solver_eval_report_path"]),
details: parseJsonObject(row["details_json"], {}),
createdAt: row["created_at"],
updatedAt: row["updated_at"],
};
}
/**
* Persist an autonomous solver eval run and its per-mode case results.
*
* Purpose: make solver-loop benchmark evidence queryable by SF commands,
* harness flows, UOK, and future memory retention instead of treating ignored
* `.sf/evals` JSON/JSONL evidence files as project state.
*
* Consumer: `/solver-eval` after each run completes.
*/
export function recordSolverEvalRun(report) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const now = new Date().toISOString();
transaction(() => {
currentDb
.prepare(`INSERT INTO solver_eval_runs (
run_id, suite_source, cases_count, summary_json, report_path,
results_path, db_recorded, created_at, updated_at
) VALUES (
:run_id, :suite_source, :cases_count, :summary_json, :report_path,
:results_path, 1, :created_at, :updated_at
)
ON CONFLICT(run_id) DO UPDATE SET
suite_source = excluded.suite_source,
cases_count = excluded.cases_count,
summary_json = excluded.summary_json,
report_path = excluded.report_path,
results_path = excluded.results_path,
db_recorded = 1,
updated_at = excluded.updated_at`)
.run({
":run_id": report.runId,
":suite_source": report.suiteSource ?? "",
":cases_count": report.summary?.cases ?? report.results?.length ?? 0,
":summary_json": JSON.stringify(report.summary ?? {}),
":report_path": report.reportPath ?? "",
":results_path": report.resultsPath ?? "",
":created_at": report.createdAt ?? now,
":updated_at": now,
});
const stmt = currentDb.prepare(`INSERT INTO solver_eval_case_results (
run_id, case_id, title, mode, passed, false_complete, duration_ms,
command_status, solver_outcome, pdd_complete, result_json, created_at
) VALUES (
:run_id, :case_id, :title, :mode, :passed, :false_complete, :duration_ms,
:command_status, :solver_outcome, :pdd_complete, :result_json, :created_at
)
ON CONFLICT(run_id, case_id, mode) DO UPDATE SET
title = excluded.title,
passed = excluded.passed,
false_complete = excluded.false_complete,
duration_ms = excluded.duration_ms,
command_status = excluded.command_status,
solver_outcome = excluded.solver_outcome,
pdd_complete = excluded.pdd_complete,
result_json = excluded.result_json,
created_at = excluded.created_at`);
for (const result of report.results ?? []) {
stmt.run({
":run_id": report.runId,
":case_id": result.caseId,
":title": result.title ?? "",
":mode": result.mode,
":passed": intBool(result.passed),
":false_complete": intBool(result.falseComplete),
":duration_ms": result.command?.durationMs ?? null,
":command_status": result.command?.status ?? null,
":solver_outcome": result.solverSignals?.outcome ?? null,
":pdd_complete":
result.solverSignals?.pddComplete === undefined
? null
: intBool(result.solverSignals.pddComplete),
":result_json": JSON.stringify(result),
":created_at": report.createdAt ?? now,
});
}
});
}
/**
* List recent autonomous solver eval runs.
*
* Purpose: let operators inspect benchmark history without scraping generated
* report files.
*
* Consumer: `/solver-eval history`.
*/
export function listSolverEvalRuns(limit = 10) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
return currentDb
.prepare(`SELECT run_id, suite_source, cases_count, summary_json,
report_path, results_path, db_recorded, created_at, updated_at
FROM solver_eval_runs
ORDER BY created_at DESC, run_id DESC
LIMIT :limit`)
.all({ ":limit": Math.max(1, Math.min(100, Number(limit) || 10)) })
.map(solverEvalRunFromRow);
}
/**
* Read one autonomous solver eval run by id.
*
* Purpose: support `/solver-eval show <run-id>` and future evidence
* promotion without parsing JSON artifacts.
*
* Consumer: solver eval command handlers.
*/
export function getSolverEvalRun(runId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const row = currentDb
.prepare(`SELECT run_id, suite_source, cases_count, summary_json,
report_path, results_path, db_recorded, created_at, updated_at
FROM solver_eval_runs
WHERE run_id = :run_id`)
.get({ ":run_id": runId });
return row ? solverEvalRunFromRow(row) : null;
}
/**
* Read per-case results for one autonomous solver eval run.
*
* Purpose: show raw-vs-SF comparisons from DB evidence.
*
* Consumer: `/solver-eval show <run-id>`.
*/
export function getSolverEvalCaseResults(runId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
return currentDb
.prepare(`SELECT run_id, case_id, title, mode, passed, false_complete,
duration_ms, command_status, solver_outcome, pdd_complete,
result_json, created_at
FROM solver_eval_case_results
WHERE run_id = :run_id
ORDER BY case_id ASC, mode ASC`)
.all({ ":run_id": runId })
.map(solverEvalCaseFromRow);
}
/**
* Persist one headless session outcome.
*
* Purpose: make headless lifecycle evidence queryable from `sf.db` so timeout,
* restart, and operator-bounded run behavior does not live only in stderr or
* generated JSON artifacts.
*
* Consumer: headless.ts after every session exits.
*/
export function recordHeadlessRun(entry) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const now = new Date().toISOString();
currentDb
.prepare(`INSERT INTO headless_runs (
run_id, command, status, exit_code, timed_out, interrupted,
restart_count, max_restarts, duration_ms, total_events, tool_calls,
solver_eval_run_id, solver_eval_report_path, details_json,
created_at, updated_at
) VALUES (
:run_id, :command, :status, :exit_code, :timed_out, :interrupted,
:restart_count, :max_restarts, :duration_ms, :total_events, :tool_calls,
:solver_eval_run_id, :solver_eval_report_path, :details_json,
:created_at, :updated_at
)
ON CONFLICT(run_id) DO UPDATE SET
command = excluded.command,
status = excluded.status,
exit_code = excluded.exit_code,
timed_out = excluded.timed_out,
interrupted = excluded.interrupted,
restart_count = excluded.restart_count,
max_restarts = excluded.max_restarts,
duration_ms = excluded.duration_ms,
total_events = excluded.total_events,
tool_calls = excluded.tool_calls,
solver_eval_run_id = excluded.solver_eval_run_id,
solver_eval_report_path = excluded.solver_eval_report_path,
details_json = excluded.details_json,
updated_at = excluded.updated_at`)
.run({
":run_id": entry.runId,
":command": entry.command ?? "",
":status": entry.status ?? "",
":exit_code": Number(entry.exitCode ?? 0),
":timed_out": intBool(entry.timedOut),
":interrupted": intBool(entry.interrupted),
":restart_count": Number(entry.restartCount ?? 0),
":max_restarts": Number(entry.maxRestarts ?? 0),
":duration_ms": Number(entry.durationMs ?? 0),
":total_events": Number(entry.totalEvents ?? 0),
":tool_calls": Number(entry.toolCalls ?? 0),
":solver_eval_run_id": entry.solverEvalRunId ?? null,
":solver_eval_report_path": entry.solverEvalReportPath ?? null,
":details_json": JSON.stringify(entry.details ?? {}),
":created_at": entry.createdAt ?? now,
":updated_at": now,
});
}
/**
* List recent headless session outcomes.
*
* Purpose: support status/doctor/query surfaces that need durable headless
* lifecycle evidence without parsing stderr logs.
*
* Consumer: tests now; headless query and doctor follow-on surfaces later.
*/
export function listHeadlessRuns(limit = 20) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
return currentDb
.prepare(`SELECT run_id, command, status, exit_code, timed_out,
interrupted, restart_count, max_restarts, duration_ms,
total_events, tool_calls, solver_eval_run_id,
solver_eval_report_path, details_json, created_at, updated_at
FROM headless_runs
ORDER BY created_at DESC, run_id DESC
LIMIT :limit`)
.all({ ":limit": Math.max(1, Math.min(100, Number(limit) || 20)) })
.map(headlessRunFromRow);
}
/**
* INSERT OR REPLACE a quality_gates row. Used by milestone-validation-gates.ts
* to persist milestone-level (MV*) gate outcomes after validate-milestone runs.
*/
export function upsertQualityGate(g) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR REPLACE INTO quality_gates
(milestone_id, slice_id, gate_id, scope, task_id, status, verdict, rationale, findings, evaluated_at)
VALUES (:mid, :sid, :gid, :scope, :tid, :status, :verdict, :rationale, :findings, :evaluated_at)`)
.run({
":mid": g.milestoneId,
":sid": g.sliceId,
":gid": g.gateId,
":scope": g.scope,
":tid": g.taskId,
":status": g.status,
":verdict": g.verdict,
":rationale": g.rationale,
":findings": g.findings,
":evaluated_at": g.evaluatedAt,
});
}
/**
* Atomically replace all workflow state from a manifest. Lifted verbatim from
* workflow-manifest.ts so the single-writer invariant holds. Only touches
* engine tables + decisions. Does NOT modify artifacts or memories.
*/
export function restoreManifest(manifest) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const db = currentDb;
transaction(() => {
// Clear engine tables (order matters for foreign-key-like consistency)
db.exec("DELETE FROM verification_evidence");
db.exec("DELETE FROM tasks");
db.exec("DELETE FROM slices");
db.exec("DELETE FROM milestones");
db.exec("DELETE FROM decisions WHERE 1=1");
// Restore milestones
const msStmt =
db.prepare(`INSERT INTO milestones (id, title, status, depends_on, created_at, completed_at,
vision, success_criteria, key_risks, proof_strategy,
verification_contract, verification_integration, verification_operational, verification_uat,
definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`);
for (const m of manifest.milestones) {
msStmt.run(
m.id,
m.title,
m.status,
JSON.stringify(m.depends_on),
m.created_at,
m.completed_at,
m.vision,
JSON.stringify(m.success_criteria),
JSON.stringify(m.key_risks),
JSON.stringify(m.proof_strategy),
m.verification_contract,
m.verification_integration,
m.verification_operational,
m.verification_uat,
JSON.stringify(m.definition_of_done),
m.requirement_coverage,
m.boundary_map_markdown,
m.vision_meeting ? JSON.stringify(m.vision_meeting) : "",
m.product_research ? JSON.stringify(m.product_research) : "",
);
}
// Restore slices
const slStmt =
db.prepare(`INSERT INTO slices (milestone_id, id, title, status, risk, depends, demo,
created_at, completed_at, full_summary_md, full_uat_md,
goal, success_criteria, proof_level, integration_closure, observability_impact,
adversarial_partner, adversarial_combatant, adversarial_architect, planning_meeting_json,
sequence, replan_triggered_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`);
for (const s of manifest.slices) {
slStmt.run(
s.milestone_id,
s.id,
s.title,
s.status,
s.risk,
JSON.stringify(s.depends),
s.demo,
s.created_at,
s.completed_at,
s.full_summary_md,
s.full_uat_md,
s.goal,
s.success_criteria,
s.proof_level,
s.integration_closure,
s.observability_impact,
s.adversarial_partner ?? "",
s.adversarial_combatant ?? "",
s.adversarial_architect ?? "",
s.planning_meeting ? JSON.stringify(s.planning_meeting) : "",
s.sequence,
s.replan_triggered_at,
);
}
// Restore tasks
const tkStmt =
db.prepare(`INSERT INTO tasks (milestone_id, slice_id, id, title, status,
one_liner, narrative, verification_result, duration, completed_at,
blocker_discovered, deviations, known_issues, key_files, key_decisions,
full_summary_md, description, estimate, files, verify,
inputs, expected_output, observability_impact, full_plan_md, sequence)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`);
for (const t of manifest.tasks) {
tkStmt.run(
t.milestone_id,
t.slice_id,
t.id,
t.title,
t.status,
t.one_liner,
t.narrative,
t.verification_result,
t.duration,
t.completed_at,
t.blocker_discovered ? 1 : 0,
t.deviations,
t.known_issues,
JSON.stringify(t.key_files),
JSON.stringify(t.key_decisions),
t.full_summary_md,
t.description,
t.estimate,
JSON.stringify(t.files),
t.verify,
JSON.stringify(t.inputs),
JSON.stringify(t.expected_output),
t.observability_impact,
t.full_plan_md,
t.sequence,
);
}
// Restore decisions
const dcStmt =
db.prepare(`INSERT INTO decisions (seq, id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`);
for (const d of manifest.decisions) {
dcStmt.run(
d.seq,
d.id,
d.when_context,
d.scope,
d.decision,
d.choice,
d.rationale,
d.revisable,
d.made_by,
d.superseded_by,
);
}
// Restore verification evidence
const evStmt =
db.prepare(`INSERT INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`);
for (const e of manifest.verification_evidence) {
evStmt.run(
e.task_id,
e.slice_id,
e.milestone_id,
e.command,
e.exit_code,
e.verdict,
e.duration_ms,
e.created_at,
);
}
});
}
/**
* Bulk delete + insert a legacy milestone hierarchy for markdown → DB migration.
* Used by workflow-migration.ts to populate engine tables from parsed ROADMAP/PLAN
* files. All operations run inside a single transaction.
*/
export function bulkInsertLegacyHierarchy(payload) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const db = currentDb;
const { milestones, slices, tasks, clearMilestoneIds, createdAt } = payload;
if (clearMilestoneIds.length === 0) return;
const placeholders = clearMilestoneIds.map(() => "?").join(",");
transaction(() => {
db.prepare(`DELETE FROM tasks WHERE milestone_id IN (${placeholders})`).run(
...clearMilestoneIds,
);
db.prepare(
`DELETE FROM slices WHERE milestone_id IN (${placeholders})`,
).run(...clearMilestoneIds);
db.prepare(`DELETE FROM milestones WHERE id IN (${placeholders})`).run(
...clearMilestoneIds,
);
const insertMilestone = db.prepare(
"INSERT INTO milestones (id, title, status, created_at) VALUES (?, ?, ?, ?)",
);
for (const m of milestones) {
insertMilestone.run(m.id, m.title, m.status, createdAt);
}
const insertSliceStmt = db.prepare(
"INSERT INTO slices (id, milestone_id, title, status, risk, depends, sequence, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
);
for (const s of slices) {
insertSliceStmt.run(
s.id,
s.milestoneId,
s.title,
s.status,
s.risk,
"[]",
s.sequence,
createdAt,
);
}
const insertTaskStmt = db.prepare(
"INSERT INTO tasks (id, slice_id, milestone_id, title, description, status, estimate, files, sequence) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
);
for (const t of tasks) {
insertTaskStmt.run(
t.id,
t.sliceId,
t.milestoneId,
t.title,
"",
t.status,
"",
"[]",
t.sequence,
);
}
});
}
// ─── Memory store writers ────────────────────────────────────────────────
// All memory writes go through sf-db.ts so the single-writer invariant
// holds. These are direct pass-throughs to the SQL previously in
// memory-store.ts — same bindings, same behavior.
export function insertMemoryRow(args) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO memories (id, category, content, confidence, source_unit_type, source_unit_id, created_at, updated_at, tags)
VALUES (:id, :category, :content, :confidence, :source_unit_type, :source_unit_id, :created_at, :updated_at, :tags)`)
.run({
":id": args.id,
":category": args.category,
":content": args.content,
":confidence": args.confidence,
":source_unit_type": args.sourceUnitType,
":source_unit_id": args.sourceUnitId,
":created_at": args.createdAt,
":updated_at": args.updatedAt,
":tags": JSON.stringify(args.tags ?? []),
});
}
export function rewriteMemoryId(placeholderId, realId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare("UPDATE memories SET id = :real_id WHERE id = :placeholder")
.run({
":real_id": realId,
":placeholder": placeholderId,
});
}
export function updateMemoryContentRow(id, content, confidence, updatedAt) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
if (confidence != null) {
currentDb
.prepare(
"UPDATE memories SET content = :content, confidence = :confidence, updated_at = :updated_at WHERE id = :id",
)
.run({
":content": content,
":confidence": confidence,
":updated_at": updatedAt,
":id": id,
});
} else {
currentDb
.prepare(
"UPDATE memories SET content = :content, updated_at = :updated_at WHERE id = :id",
)
.run({ ":content": content, ":updated_at": updatedAt, ":id": id });
}
}
export function incrementMemoryHitCount(id, updatedAt) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
"UPDATE memories SET hit_count = hit_count + 1, updated_at = :updated_at WHERE id = :id",
)
.run({ ":updated_at": updatedAt, ":id": id });
}
export function supersedeMemoryRow(oldId, newId, updatedAt) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(
"UPDATE memories SET superseded_by = :new_id, updated_at = :updated_at WHERE id = :old_id",
)
.run({ ":new_id": newId, ":updated_at": updatedAt, ":old_id": oldId });
}
export function markMemoryUnitProcessed(unitKey, activityFile, processedAt) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR IGNORE INTO memory_processed_units (unit_key, activity_file, processed_at)
VALUES (:key, :file, :at)`)
.run({ ":key": unitKey, ":file": activityFile, ":at": processedAt });
}
export function decayMemoriesBefore(cutoffTs, now) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE memories
SET confidence = MAX(0.1, confidence - 0.1), updated_at = :now
WHERE superseded_by IS NULL AND updated_at < :cutoff AND confidence > 0.1`)
.run({ ":now": now, ":cutoff": cutoffTs });
}
export function supersedeLowestRankedMemories(limit, now) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`UPDATE memories SET superseded_by = 'CAP_EXCEEDED', updated_at = :now
WHERE id IN (
SELECT id FROM memories
WHERE superseded_by IS NULL
ORDER BY (confidence * (1.0 + hit_count * 0.1)) ASC
LIMIT :limit
)`)
.run({ ":now": now, ":limit": limit });
}
// ─── Memory Sources ──────────────────────────────────────────────────────────
export function insertMemorySourceRow(args) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT OR IGNORE INTO memory_sources (id, kind, uri, title, content, content_hash, imported_at, scope, tags)
VALUES (:id, :kind, :uri, :title, :content, :content_hash, :imported_at, :scope, :tags)`)
.run({
":id": args.id,
":kind": args.kind,
":uri": args.uri,
":title": args.title,
":content": args.content,
":content_hash": args.contentHash,
":imported_at": args.importedAt,
":scope": args.scope ?? "project",
":tags": JSON.stringify(args.tags ?? []),
});
}
export function deleteMemorySourceRow(id) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const res = currentDb
.prepare("DELETE FROM memory_sources WHERE id = :id")
.run({ ":id": id });
return (res?.changes ?? 0) > 0;
}
// ─── Judgments ───────────────────────────────────────────────────────────────
export function insertJudgment(entry) {
if (!currentDb) return;
try {
currentDb
.prepare(`INSERT INTO judgments (unit_id, decision, alternatives_json, reasoning, confidence, ts)
VALUES (:unit_id, :decision, :alternatives_json, :reasoning, :confidence, :ts)`)
.run({
":unit_id": entry.unitId ?? "",
":decision": entry.decision ?? "",
":alternatives_json": JSON.stringify(entry.alternatives ?? []),
":reasoning": entry.reasoning ?? "",
":confidence": entry.confidence ?? "medium",
":ts": entry.ts ?? new Date().toISOString(),
});
} catch {
// Judgment logging is best-effort
}
}
export function getJudgmentsForUnit(unitIdPrefix, limit = 1000) {
if (!currentDb) return [];
try {
const rows = currentDb
.prepare(
`SELECT id, unit_id AS unitId, decision, alternatives_json AS alternativesJson, reasoning, confidence, ts
FROM judgments
WHERE unit_id LIKE :prefix
ORDER BY ts DESC
LIMIT :limit`,
)
.all({
":prefix": `${unitIdPrefix}%`,
":limit": limit,
});
return rows.map((r) => ({
id: r.id,
unitId: r.unitId,
decision: r.decision,
alternatives: parseJsonObject(r.alternativesJson, []),
reasoning: r.reasoning,
confidence: r.confidence,
ts: r.ts,
}));
} catch {
return [];
}
}
// ─── Retrieval Evidence ─────────────────────────────────────────────────────
/**
* Record a retrieval lookup with source provenance.
* Purpose: let SF compare live code, semantic, docs, and web context by the same
* freshness and scope contract before planning or implementation trusts it.
* Consumer: Sift/codebase search tools and future Context7/web retrieval bridges.
*/
export function insertRetrievalEvidence(args) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const now = args.recordedAt ?? new Date().toISOString();
currentDb
.prepare(`INSERT INTO retrieval_evidence (
backend, source_kind, query, strategy, scope, project_root,
git_head, git_branch, worktree_dirty, freshness, status,
hit_count, elapsed_ms, cache_path, error, result_json, recorded_at
) VALUES (
:backend, :source_kind, :query, :strategy, :scope, :project_root,
:git_head, :git_branch, :worktree_dirty, :freshness, :status,
:hit_count, :elapsed_ms, :cache_path, :error, :result_json, :recorded_at
)`)
.run({
":backend": args.backend,
":source_kind": args.sourceKind ?? "code",
":query": args.query ?? "",
":strategy": args.strategy ?? "",
":scope": args.scope ?? "",
":project_root": args.projectRoot ?? "",
":git_head": args.gitHead ?? null,
":git_branch": args.gitBranch ?? null,
":worktree_dirty": intBool(args.worktreeDirty),
":freshness": args.freshness ?? "unknown",
":status": args.status ?? "ok",
":hit_count": args.hitCount ?? 0,
":elapsed_ms": args.elapsedMs ?? 0,
":cache_path": args.cachePath ?? null,
":error": args.error ?? null,
":result_json": JSON.stringify(args.result ?? {}),
":recorded_at": now,
});
}
/**
* Return recent retrieval evidence rows.
* Purpose: support audits that need to distinguish live source evidence from
* stale indexed or prose-only context.
* Consumer: inspect/doctor tooling and tests for retrieval provenance.
*/
export function getRetrievalEvidence(limit = 100) {
if (!currentDb) return [];
const rows = currentDb
.prepare(`SELECT
id, backend, source_kind AS sourceKind, query, strategy, scope,
project_root AS projectRoot, git_head AS gitHead,
git_branch AS gitBranch, worktree_dirty AS worktreeDirty,
freshness, status, hit_count AS hitCount, elapsed_ms AS elapsedMs,
cache_path AS cachePath, error, result_json AS resultJson, recorded_at AS recordedAt
FROM retrieval_evidence
ORDER BY recorded_at DESC, id DESC
LIMIT :limit`)
.all({ ":limit": limit });
return rows.map((row) => ({
...row,
worktreeDirty: row.worktreeDirty === 1,
result: parseJsonObject(row.resultJson, {}),
}));
}
// ─── Memory Embeddings ───────────────────────────────────────────────────────
export function upsertMemoryEmbedding(args) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO memory_embeddings (memory_id, model, dim, vector, updated_at)
VALUES (:memory_id, :model, :dim, :vector, :updated_at)
ON CONFLICT(memory_id) DO UPDATE SET
model = excluded.model,
dim = excluded.dim,
vector = excluded.vector,
updated_at = excluded.updated_at`)
.run({
":memory_id": args.memoryId,
":model": args.model,
":dim": args.dim,
":vector": args.vector,
":updated_at": args.updatedAt,
});
}
export function deleteMemoryEmbedding(memoryId) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
const res = currentDb
.prepare("DELETE FROM memory_embeddings WHERE memory_id = :id")
.run({ ":id": memoryId });
return (res?.changes ?? 0) > 0;
}
// ─── Tier 1.3: Spec/Runtime/Evidence Schema ──────────────────────────────────
// Functions for managing evidence in the new spec schema (v32+)
/**
* Record evidence for a milestone. Appends to milestone_evidence table.
* Purpose: Create audit trail of decisions, verifications, and incidents.
* Consumer: complete-milestone, reassess-milestone, and other tools.
*/
export function insertMilestoneEvidence(
milestoneId,
evidenceType,
content,
phaseName,
recordedBy,
) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO milestone_evidence (milestone_id, evidence_type, content, recorded_at, phase_name, recorded_by)
VALUES (?, ?, ?, ?, ?, ?)`)
.run(
milestoneId,
evidenceType,
content,
new Date().toISOString(),
phaseName || "",
recordedBy || "",
);
}
/**
* Record evidence for a slice. Appends to slice_evidence table.
* Purpose: Create audit trail of slice decisions, verifications, and incidents.
* Consumer: complete-slice, execute-slice, and other tools.
*/
export function insertSliceEvidence(
milestoneId,
sliceId,
evidenceType,
content,
phaseName,
recordedBy,
) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO slice_evidence (milestone_id, slice_id, evidence_type, content, recorded_at, phase_name, recorded_by)
VALUES (?, ?, ?, ?, ?, ?, ?)`)
.run(
milestoneId,
sliceId,
evidenceType,
content,
new Date().toISOString(),
phaseName || "",
recordedBy || "",
);
}
/**
* Record evidence for a task. Appends to task_evidence table.
* Purpose: Create audit trail of task decisions, verifications, and incidents.
* Consumer: complete-task, execute-task, and other tools.
*/
export function insertTaskEvidence(
milestoneId,
sliceId,
taskId,
evidenceType,
content,
phaseName,
recordedBy,
) {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO task_evidence (milestone_id, slice_id, task_id, evidence_type, content, recorded_at, phase_name, recorded_by)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`)
.run(
milestoneId,
sliceId,
taskId,
evidenceType,
content,
new Date().toISOString(),
phaseName || "",
recordedBy || "",
);
}
/**
* Query milestone audit trail (spec + evidence). Returns rows with spec intent and evidence history.
* Purpose: Support data archaeology and decision-tree reconstruction.
* Consumer: forensics tools, doctor checks, audit/compliance queries.
*/
export function getMilestoneAuditTrail(milestoneId) {
if (!currentDb) return [];
return currentDb
.prepare(`
SELECT
r.id, r.title, r.status,
s.vision, s.spec_version,
e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by
FROM milestones r
LEFT JOIN milestone_specs s ON r.id = s.id
LEFT JOIN milestone_evidence e ON r.id = e.milestone_id
WHERE r.id = ?
ORDER BY e.recorded_at ASC
`)
.all(milestoneId);
}
/**
* Query slice audit trail (spec + evidence).
* Purpose: Support data archaeology and decision-tree reconstruction.
* Consumer: forensics tools, doctor checks, audit/compliance queries.
*/
export function getSliceAuditTrail(milestoneId, sliceId) {
if (!currentDb) return [];
return currentDb
.prepare(`
SELECT
r.id, r.title, r.status,
s.goal, s.spec_version,
e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by
FROM slices r
LEFT JOIN slice_specs s ON r.milestone_id = s.milestone_id AND r.id = s.slice_id
LEFT JOIN slice_evidence e ON r.milestone_id = e.milestone_id AND r.id = e.slice_id
WHERE r.milestone_id = ? AND r.id = ?
ORDER BY e.recorded_at ASC
`)
.all(milestoneId, sliceId);
}
/**
* Query task audit trail (spec + evidence).
* Purpose: Support data archaeology and decision-tree reconstruction.
* Consumer: forensics tools, doctor checks, audit/compliance queries.
*/
export function getTaskAuditTrail(milestoneId, sliceId, taskId) {
if (!currentDb) return [];
return currentDb
.prepare(`
SELECT
r.id, r.title, r.status,
s.verify, s.spec_version,
e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by
FROM tasks r
LEFT JOIN task_specs s ON r.milestone_id = s.milestone_id AND r.slice_id = s.slice_id AND r.id = s.task_id
LEFT JOIN task_evidence e ON r.milestone_id = e.milestone_id AND r.slice_id = e.slice_id AND r.id = e.task_id
WHERE r.milestone_id = ? AND r.slice_id = ? AND r.id = ?
ORDER BY e.recorded_at ASC
`)
.all(milestoneId, sliceId, taskId);
}
/**
* Get milestone spec only (immutable intent, no runtime state).
* Purpose: Retrieve spec intent for re-planning or spec validation.
* Consumer: plan-milestone and spec validation tools.
*/
export function getMilestoneSpec(milestoneId) {
if (!currentDb) return null;
return currentDb
.prepare("SELECT * FROM milestone_specs WHERE id = ?")
.get(milestoneId);
}
/**
* Get slice spec only (immutable intent, no runtime state).
* Purpose: Retrieve spec intent for re-planning or spec validation.
* Consumer: plan-slice and spec validation tools.
*/
export function getSliceSpec(milestoneId, sliceId) {
if (!currentDb) return null;
return currentDb
.prepare(
"SELECT * FROM slice_specs WHERE milestone_id = ? AND slice_id = ?",
)
.get(milestoneId, sliceId);
}
/**
* Get task spec only (immutable intent, no runtime state).
* Purpose: Retrieve spec intent for re-planning or spec validation.
* Consumer: plan-task and spec validation tools.
*/
export function getTaskSpec(milestoneId, sliceId, taskId) {
if (!currentDb) return null;
return currentDb
.prepare(
"SELECT * FROM task_specs WHERE milestone_id = ? AND slice_id = ? AND task_id = ?",
)
.get(milestoneId, sliceId, taskId);
}