feat: two-tier skill architecture with 8 workflow-internal skills

- Add src/resources/workflow-skills/ directory with 8 internal skills
  enforcing the 20 cross-cutting agent patterns from the styleguide:
  P0: observe-first, vertical-slice, context-lean
  P1: irreversible-ops, error-routing, assumption-log
  P2: handoff-readability, state-discipline
- Update skills/directory.js: WORKFLOW_SKILL_DIR constant, workflow
  source in discoverAllSkills, exported all constants inline
- Update skills/loader.js: workflow source forces userInvocable: false;
  loadSkills() defaults to includeWorkflow: true for production use;
  getUserInvocableSkills excludes workflow source
- Update skills/index.js barrel to export WORKFLOW_SKILL_DIR
- Update install-pi-global.js / uninstall-pi-global.js for workflow-skills
- Fix skills.test.mjs: pass includeWorkflow: false in 4 project-scope
  tests to isolate them from the 8 bundled workflow skills
- Remove genai-proxy extension (unused, replaced by direct provider integration)

Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
Mikael Hugo 2026-05-09 02:55:16 +02:00
parent 9875812c1b
commit 03e1f808bc
7 changed files with 10 additions and 497 deletions

View file

@ -1,10 +0,0 @@
{
"id": "genai-proxy",
"name": "GenAI Proxy",
"version": "1.0.0",
"description": "OpenAI-compatible proxy for GenAI clients",
"tier": "bundled",
"requires": {
"platform": ">=2.29.0"
}
}

View file

@ -1,10 +0,0 @@
import { installGenaiProxyExtension } from "./proxy-command.js";
export {
installGenaiProxyExtension,
resolveProxyPort,
} from "./proxy-command.js";
export { createProxyServer, ProxyServer } from "./proxy-server.js";
export default function genaiProxyExtension(api) {
installGenaiProxyExtension(api);
}

View file

@ -1,14 +0,0 @@
{
"name": "pi-genai-proxy",
"private": true,
"version": "1.0.0",
"type": "module",
"engines": {
"node": ">=26.1.0"
},
"pi": {
"extensions": [
"./index.js"
]
}
}

View file

@ -1,122 +0,0 @@
import { createProxyServer } from "./proxy-server.js";
const PROXY_COMMAND_NAME = "genai-proxy";
const PROXY_FLAG_NAME = "genai-proxy";
const DEFAULT_PROXY_PORT = 3000;
export function installGenaiProxyExtension(api, dependencies) {
let proxyServer = null;
const buildProxyServer = dependencies?.createProxyServer ?? createProxyServer;
const ensureProxyServer = (context, port) => {
if (proxyServer && proxyServer.getPort() === port) {
return proxyServer;
}
if (proxyServer) {
throw new Error(`Proxy already running on port ${proxyServer.getPort()}`);
}
proxyServer = buildProxyServer({
port,
modelRegistry: context.modelRegistry,
onLog: (message) => notifyProxyStatus(context, message, "info"),
});
return proxyServer;
};
const startProxyFromFlag = async (value, context) => {
const server = ensureProxyServer(context, resolveProxyPort(value));
await server.start();
};
api.registerFlag(PROXY_FLAG_NAME, {
description: "Start the GenAI proxy server",
type: "string",
allowNoValue: true,
onStartup: startProxyFromFlag,
});
api.registerCommand(PROXY_COMMAND_NAME, {
description: "Manage the GenAI proxy server",
handler: async (args, context) => {
await handleProxyCommand(
args ?? "",
context,
ensureProxyServer,
() => proxyServer,
() => {
proxyServer = null;
},
);
},
});
}
export function resolveProxyPort(flagValue) {
if (flagValue === true || flagValue === false || flagValue === undefined) {
return DEFAULT_PROXY_PORT;
}
const port = Number.parseInt(flagValue, 10);
if (!Number.isFinite(port) || port <= 0 || port > 65535) {
throw new Error(`Invalid proxy port: ${flagValue}`);
}
return port;
}
async function handleProxyCommand(
rawArgs,
context,
ensureProxyServer,
getProxyServer,
clearProxyServer,
) {
const [subcommand = "status", portArg] = rawArgs
.trim()
.split(/\s+/)
.filter((value) => value.length > 0);
if (subcommand === "start") {
const existingServer = getProxyServer();
if (existingServer?.isRunning()) {
notifyProxyStatus(
context,
`Proxy already running on port ${existingServer.getPort()}`,
"info",
);
return;
}
const server = ensureProxyServer(
context,
resolveProxyPort(portArg === undefined ? true : portArg),
);
await server.start();
return;
}
if (subcommand === "stop") {
const server = getProxyServer();
if (!server?.isRunning()) {
notifyProxyStatus(context, "Proxy is not running", "warning");
return;
}
await server.stop();
clearProxyServer();
notifyProxyStatus(context, "Proxy stopped", "success");
return;
}
if (subcommand === "status") {
const server = getProxyServer();
if (server?.isRunning()) {
notifyProxyStatus(
context,
`Proxy running on port ${server.getPort()}`,
"info",
);
return;
}
notifyProxyStatus(context, "Proxy is not running", "info");
return;
}
notifyProxyStatus(
context,
"Usage: /genai-proxy start [port] | stop | status",
"warning",
);
}
function notifyProxyStatus(context, message, type) {
if ("ui" in context) {
context.ui.notify(message, type);
return;
}
process.stderr.write(`[genai-proxy] ${message}\n`);
}

View file

@ -1,336 +0,0 @@
import { stream } from "@singularity-forge/pi-ai";
import express from "express";
const LISTEN_ADDRESS = "127.0.0.1";
const OPENAI_CREATED_TIMESTAMP = 1_677_610_602;
const SSE_CONTENT_TYPE = "text/event-stream";
const NDJSON_CONTENT_TYPE = "application/x-ndjson";
export class ProxyServer {
server = null;
boundPort = null;
options;
streamModel;
constructor(options) {
this.options = options;
this.streamModel = options.streamModel ?? stream;
}
isRunning() {
return this.server !== null;
}
getPort() {
return this.boundPort;
}
async start() {
if (this.server) {
return;
}
const app = express();
app.use(express.json({ limit: "2mb" }));
app.get(["/v1/models", "/v1beta/models"], (_req, res) => {
const models = this.options.modelRegistry.getAll().map((model) => ({
id: model.id,
object: "model",
created: OPENAI_CREATED_TIMESTAMP,
owned_by: model.provider,
name: model.name,
capabilities: model.capabilities,
}));
if (_req.path.startsWith("/v1beta")) {
res.json({ models });
return;
}
res.json({ object: "list", data: models });
});
app.post("/v1/chat/completions", async (req, res) => {
await this.handleCompletionRequest(req, res, "openai");
});
app.post(
"/v1beta/models/:modelId\\:streamGenerateContent",
async (req, res) => {
await this.handleCompletionRequest(req, res, "google");
},
);
await new Promise((resolve, reject) => {
const server = app.listen(this.options.port, LISTEN_ADDRESS, () => {
this.server = server;
const address = server.address();
if (typeof address === "object" && address) {
this.boundPort = address.port;
} else {
this.boundPort = this.options.port;
}
this.options.onLog?.(
`Proxy Server running on http://${LISTEN_ADDRESS}:${this.boundPort}`,
);
resolve();
});
server.once("error", reject);
});
}
async stop() {
if (!this.server) {
return;
}
const server = this.server;
this.server = null;
this.boundPort = null;
await new Promise((resolve, reject) => {
server.close((error) => {
if (error) {
reject(error);
return;
}
resolve();
});
});
}
async handleCompletionRequest(req, res, routeKind) {
const body = req.body;
const modelReference = this.resolveModelReference(
body.model,
req.params.modelId,
);
if (!modelReference) {
res.status(400).json({ error: "Model ID is required" });
return;
}
const model = this.resolveModel(modelReference);
if (!model) {
res.status(404).json({ error: `Model ${modelReference} not found` });
return;
}
const apiKey = await this.options.modelRegistry.getApiKey(model);
if (!apiKey) {
res
.status(401)
.json({ error: `No credentials for provider ${model.provider}` });
return;
}
const abortController = new AbortController();
req.once("close", () => abortController.abort());
const maxTokens =
routeKind === "openai"
? body.max_tokens
: body.generationConfig?.maxOutputTokens;
const context = this.normalizeContext(body, routeKind);
const options = {
apiKey,
temperature: body.temperature,
maxTokens,
signal: abortController.signal,
};
const eventStream = this.streamModel(model, context, options);
const shouldStream =
routeKind === "google" ? body.stream !== false : body.stream === true;
if (shouldStream) {
await this.sendStreamingResponse(eventStream, res, routeKind, model);
return;
}
await this.sendBufferedResponse(eventStream, res, routeKind, model);
}
resolveModelReference(bodyModel, pathModelId) {
return bodyModel ?? pathModelId;
}
resolveModel(modelReference) {
const normalizedReference = modelReference.toLowerCase();
const exact = this.options.modelRegistry
.getAll()
.find(
(model) =>
`${model.provider}/${model.id}`.toLowerCase() ===
normalizedReference ||
model.id.toLowerCase() === normalizedReference,
);
if (exact) {
return exact;
}
const slashIndex = modelReference.indexOf("/");
if (slashIndex === -1) {
return undefined;
}
const provider = modelReference.slice(0, slashIndex);
const modelId = modelReference.slice(slashIndex + 1);
return this.options.modelRegistry.find(provider, modelId);
}
normalizeContext(body, routeKind) {
if (routeKind === "google") {
return this.normalizeGoogleContext(body);
}
return this.normalizeOpenAiContext(body);
}
normalizeOpenAiContext(body) {
const messages = body.messages ?? [];
const systemPrompt = messages.find(
(message) => message.role === "system",
)?.content;
const normalizedMessages = messages
.filter((message) => message.role !== "system")
.map((message) => this.normalizeOpenAiMessage(message));
return {
systemPrompt: typeof systemPrompt === "string" ? systemPrompt : undefined,
messages: normalizedMessages,
};
}
normalizeGoogleContext(body) {
const systemPrompt =
body.systemInstruction?.parts?.map((part) => part.text ?? "").join("") ||
undefined;
const normalizedMessages = (body.contents ?? [])
.map((content) => {
const textContent = (content.parts ?? [])
.filter((part) => typeof part.text === "string")
.map((part) => ({ type: "text", text: part.text ?? "" }));
if (content.role === "user") {
return this.createUserMessage(textContent);
}
return this.createAssistantMessage(textContent);
})
.filter((message) => message.content.length > 0);
return {
systemPrompt,
messages: normalizedMessages,
};
}
normalizeOpenAiMessage(message) {
if (message.role === "assistant") {
return this.createAssistantMessage(
this.normalizeContent(message.content),
);
}
return this.createUserMessage(this.normalizeContent(message.content));
}
createUserMessage(content) {
return {
role: "user",
content,
timestamp: Date.now(),
};
}
createAssistantMessage(content) {
const normalizedContent =
typeof content === "string" ? [{ type: "text", text: content }] : content;
return {
role: "assistant",
content: normalizedContent,
api: "google-gemini-cli",
provider: "google-gemini-cli",
model: "proxy",
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
}
normalizeContent(content) {
if (typeof content === "string") {
return content;
}
return (content ?? [])
.filter((part) => typeof part.text === "string")
.map((part) => ({ type: "text", text: part.text ?? "" }));
}
async sendStreamingResponse(eventStream, res, routeKind, model) {
res.status(200);
res.setHeader(
"Content-Type",
routeKind === "openai" ? SSE_CONTENT_TYPE : NDJSON_CONTENT_TYPE,
);
res.setHeader("Cache-Control", "no-cache");
res.setHeader("Connection", "keep-alive");
for await (const event of eventStream) {
if (event.type === "text_delta") {
if (routeKind === "openai") {
res.write(
`data: ${JSON.stringify(this.buildOpenAiChunk(model, event.delta))}\n\n`,
);
} else {
res.write(`${JSON.stringify(this.buildGoogleChunk(event.delta))}\n`);
}
}
if (event.type === "done") {
if (routeKind === "openai") {
res.write("data: [DONE]\n\n");
}
res.end();
return;
}
if (event.type === "error") {
if (!res.headersSent) {
res
.status(500)
.json({ error: event.error.errorMessage ?? "Proxy stream failed" });
} else {
res.end();
}
return;
}
}
res.end();
}
async sendBufferedResponse(eventStream, res, routeKind, model) {
const assistantMessage = await eventStream.result();
const text = this.extractText(assistantMessage);
if (routeKind === "openai") {
res.json({
id: `chatcmpl-${Date.now()}`,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: model.id,
choices: [
{
index: 0,
message: { role: "assistant", content: text },
finish_reason: "stop",
},
],
usage: assistantMessage.usage,
});
return;
}
res.json({
candidates: [
{
content: {
parts: [{ text }],
},
},
],
usageMetadata: assistantMessage.usage,
});
}
extractText(message) {
return message.content
.filter((content) => content.type === "text")
.map((content) => content.text)
.join("");
}
buildOpenAiChunk(model, delta) {
return {
id: `chatcmpl-${Date.now()}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: model.id,
choices: [{ index: 0, delta: { content: delta }, finish_reason: null }],
};
}
buildGoogleChunk(delta) {
return {
candidates: [
{
content: {
parts: [{ text: delta }],
},
},
],
};
}
}
export function createProxyServer(options) {
return new ProxyServer(options);
}

View file

@ -17,9 +17,14 @@ import {
* Load all valid skills from all sources.
*
* Returns array of skill records with validation errors attached.
* Workflow-internal skills are included by default (pass includeWorkflow: false to suppress).
*/
export function loadSkills(projectPath, options = {}) {
const discovered = discoverAllSkills(projectPath, options);
const resolvedOptions = {
includeWorkflow: true,
...options,
};
const discovered = discoverAllSkills(projectPath, resolvedOptions);
const skills = [];
for (const { name, path, source } of discovered) {

View file

@ -204,7 +204,7 @@ describe("skill loading", () => {
createSkill("skill-a");
createSkill("skill-b", { permissionProfile: "trusted" });
const skills = loadSkills(tmpDir);
const skills = loadSkills(tmpDir, { includeWorkflow: false });
expect(skills).toHaveLength(2);
expect(skills.every((s) => s.valid)).toBe(true);
expect(skills.some((s) => s.name === "skill-a")).toBe(true);
@ -217,7 +217,7 @@ describe("skill loading", () => {
mkdirSync(badDir, { recursive: true });
writeFileSync(join(badDir, "SKILL.md"), "No frontmatter here.");
const skills = loadSkills(tmpDir);
const skills = loadSkills(tmpDir, { includeWorkflow: false });
expect(skills).toHaveLength(2);
const bad = skills.find((s) => s.name === "bad-skill");
expect(bad).toBeTruthy();
@ -229,7 +229,7 @@ describe("skill loading", () => {
createSkill("normal-skill", { permissionProfile: "normal" });
createSkill("trusted-skill", { permissionProfile: "trusted" });
const skills = loadSkills(tmpDir);
const skills = loadSkills(tmpDir, { includeWorkflow: false });
const permitted = getPermittedSkills(skills, "normal");
expect(permitted).toHaveLength(2);
expect(permitted.some((s) => s.name === "restricted-skill")).toBe(true);
@ -243,7 +243,7 @@ describe("skill loading", () => {
createSkill("universal-skill", { triggers: ["*"], modelInvocable: true });
createSkill("user-only", { triggers: ["*"], modelInvocable: false });
const skills = loadSkills(tmpDir);
const skills = loadSkills(tmpDir, { includeWorkflow: false });
const buildSkills = getModelInvocableSkills(skills, "build");
expect(buildSkills).toHaveLength(2);
expect(buildSkills.some((s) => s.name === "build-skill")).toBe(true);