@ryanfw/prompt-orchestration-pipeline 0.16.2 → 0.16.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,268 @@
1
+ # POP Pipeline Task Guide
2
+
3
+ > Unified reference for creating valid pipeline tasks. Only exported stage functions with exact names below are called by the pipeline runner.
4
+
5
+ ---
6
+
7
+ ## Critical Rules
8
+
9
+ ### Valid Stage Names (Exhaustive List)
10
+
11
+ The pipeline runner **ONLY** calls these 11 exported functions:
12
+
13
+ | Stage | Required | Purpose |
14
+ |-------|----------|---------|
15
+ | `ingestion` | Yes | Load input from `data.seed` |
16
+ | `preProcessing` | No | Normalize/enrich data |
17
+ | `promptTemplating` | Yes | Build LLM prompts |
18
+ | `inference` | Yes | Call LLM |
19
+ | `parsing` | No | Parse LLM output |
20
+ | `validateStructure` | No | JSON schema validation |
21
+ | `validateQuality` | No | Domain-specific checks |
22
+ | `critique` | No | Analyze failures |
23
+ | `refine` | No | Produce improved output |
24
+ | `finalValidation` | No | Final validation gate |
25
+ | `integration` | No | Persist results |
26
+
27
+ ### Required Contract
28
+
29
+ Every stage function must:
30
+ 1. Be exported: `export const stageName = ...`
31
+ 2. Return: `{ output: any, flags: object }`
32
+
33
+ ### Anti-Patterns (Invalid)
34
+
35
+ ```js
36
+ // ❌ WRONG: Helper functions are NEVER called by pipeline
37
+ function formatPrompt(topic) { return `...${topic}...`; }
38
+
39
+ // ❌ WRONG: Non-standard export names are NEVER called
40
+ export const myCustomStage = () => ({ output: {}, flags: {} });
41
+
42
+ // ❌ WRONG: Must return { output, flags } object
43
+ export const ingestion = () => "just a string";
44
+ ```
45
+
46
+ ---
47
+
48
+ ## Minimal Working Example
49
+
50
+ A simple 3-stage task (most tasks only need ingestion → promptTemplating → inference):
51
+
52
+ ```js
53
+ export const ingestion = ({
54
+ data: { seed: { data: { topic } } },
55
+ flags,
56
+ }) => ({
57
+ output: { topic },
58
+ flags,
59
+ });
60
+
61
+ export const promptTemplating = ({
62
+ data: { ingestion: { topic } },
63
+ flags,
64
+ }) => ({
65
+ output: {
66
+ system: "You are a helpful assistant. Respond in JSON.",
67
+ prompt: `Write about: ${topic}\n\nRespond as: { "content": "..." }`,
68
+ },
69
+ flags,
70
+ });
71
+
72
+ export const inference = async ({
73
+ io,
74
+ llm: { deepseek },
75
+ data: { promptTemplating: { system, prompt } },
76
+ flags,
77
+ }) => {
78
+ const response = await deepseek.chat({
79
+ messages: [
80
+ { role: "system", content: system },
81
+ { role: "user", content: prompt },
82
+ ],
83
+ });
84
+
85
+ const parsed = typeof response.content === "string"
86
+ ? JSON.parse(response.content)
87
+ : response.content;
88
+
89
+ await io.writeArtifact("output.json", JSON.stringify(parsed, null, 2));
90
+ return { output: {}, flags };
91
+ };
92
+ ```
93
+
94
+ ---
95
+
96
+ ## Stage Function Signatures
97
+
98
+ ### ingestion
99
+ ```js
100
+ export const ingestion = ({ data: { seed }, flags }) => ({
101
+ output: { /* extracted fields */ },
102
+ flags,
103
+ });
104
+ ```
105
+
106
+ ### promptTemplating
107
+ ```js
108
+ export const promptTemplating = ({ data: { ingestion }, flags }) => ({
109
+ output: { system: "...", prompt: "..." },
110
+ flags,
111
+ });
112
+ ```
113
+
114
+ ### inference
115
+ **Rule**: Read prompts from `data.promptTemplating`, not from other sources.
116
+ ```js
117
+ export const inference = async ({
118
+ io,
119
+ llm: { provider },
120
+ data: { promptTemplating: { system, prompt } },
121
+ flags,
122
+ }) => {
123
+ const response = await provider.chat({ messages: [...] });
124
+ const parsed = response.parsed;
125
+ await io.writeArtifact("output.json", JSON.stringify(parsed, null, 2));
126
+ return { output: {}, flags };
127
+ };
128
+ ```
129
+
130
+ ### validateStructure
131
+ ```js
132
+ export const validateStructure = async ({
133
+ io,
134
+ flags,
135
+ validators: { validateWithSchema },
136
+ }) => {
137
+ const content = await io.readArtifact("output.json");
138
+ // Provide your JSON schema here, for example the `outputSchema` from the "JSON Schema Export" section.
139
+ const mySchema = /* your JSON schema object */ {};
140
+ const result = validateWithSchema(mySchema, content);
141
+ if (!result.valid) {
142
+ return { output: {}, flags: { ...flags, validationFailed: true } };
143
+ }
144
+ return { output: {}, flags };
145
+ };
146
+ ```
147
+
148
+ ---
149
+
150
+ ## IO API
151
+
152
+ Available on `io` object passed to stages.
153
+
154
+ | Function | Parameters | Returns | Description |
155
+ |----------|------------|---------|-------------|
156
+ | `io.writeArtifact` | `name, content, { mode? }` | `Promise<string>` | Persist output files |
157
+ | `io.writeLog` | `name, content, { mode? }` | `Promise<string>` | Debug/progress logs |
158
+ | `io.writeTmp` | `name, content, { mode? }` | `Promise<string>` | Scratch data |
159
+ | `io.readArtifact` | `name` | `Promise<string>` | Load artifact |
160
+ | `io.readLog` | `name` | `Promise<string>` | Read log |
161
+ | `io.readTmp` | `name` | `Promise<string>` | Read temp file |
162
+ | `io.getTaskDir` | — | `string` | Current task directory |
163
+ | `io.getDB` | `options?` | `Database` | SQLite for job (WAL mode) |
164
+ | `io.runBatch` | `{ jobs, processor, ... }` | `Promise<{ completed, failed }>` | Concurrent batch processing |
165
+
166
+ **When to use artifacts vs stage output**: Use `io.writeArtifact` for large outputs, model-native text, values needed by multiple stages, or for auditability. Use stage `output` for small structured values needed immediately by the next stage.
167
+
168
+ ---
169
+
170
+ ## LLM API
171
+
172
+ Available on `llm` object. Call with messages array:
173
+
174
+ ```js
175
+ const response = await llm.deepseek.chat({
176
+ messages: [
177
+ { role: "system", content: "..." },
178
+ { role: "user", content: "..." },
179
+ ],
180
+ temperature: 0.7, // optional: 0-2
181
+ maxTokens: 1000, // optional
182
+ responseFormat: "json" // optional
183
+ });
184
+ // Returns: { content: any, usage?: object }
185
+ ```
186
+
187
+ ### Available Providers
188
+ - `llm.deepseek.chat()`
189
+ - `llm.anthropic.sonnet45()`
190
+ - `llm.openai.gpt5Mini()`
191
+ - `llm.gemini.flash25()`
192
+
193
+ ---
194
+
195
+ ## Validation API
196
+
197
+ Available via `validators` object in stages that need schema validation.
198
+
199
+ ```js
200
+ validateWithSchema(schema, data) → { valid: boolean, errors?: AjvError[] }
201
+ ```
202
+
203
+ - Accepts string or object (strings parsed as JSON)
204
+ - Uses Ajv with `{ allErrors: true, strict: false }`
205
+
206
+ ---
207
+
208
+ ## JSON Schema Export
209
+
210
+ Tasks export schemas to validate their output:
211
+
212
+ ```js
213
+ export const outputSchema = {
214
+ $schema: "http://json-schema.org/draft-07/schema#",
215
+ type: "object",
216
+ required: ["content"],
217
+ properties: {
218
+ content: { type: "string", minLength: 1 }
219
+ }
220
+ };
221
+ ```
222
+
223
+ ---
224
+
225
+ ## Seed File Format
226
+
227
+ Pipeline jobs start from a seed file in `pending/`:
228
+
229
+ ```json
230
+ {
231
+ "name": "unique-job-id",
232
+ "pipeline": "pipeline-slug",
233
+ "data": { /* context for tasks */ }
234
+ }
235
+ ```
236
+
237
+ ---
238
+
239
+ ## Context Object Reference
240
+
241
+ Each stage receives:
242
+
243
+ ```js
244
+ {
245
+ io, // File I/O (may be null)
246
+ llm, // LLM client
247
+ validators, // { validateWithSchema }
248
+ flags, // Control flags
249
+ meta: { taskName, workDir, jobId },
250
+ data: {
251
+ seed, // Initial payload
252
+ ingestion, // Output from ingestion
253
+ preProcessing, // Output from preProcessing
254
+ promptTemplating, // Output from promptTemplating
255
+ // ... other stage outputs
256
+ },
257
+ output, // Previous non-validation stage output
258
+ }
259
+ ```
260
+
261
+ ---
262
+
263
+ ## Summary
264
+
265
+ 1. Export only valid stage names: `ingestion`, `preProcessing`, `promptTemplating`, `inference`, `parsing`, `validateStructure`, `validateQuality`, `critique`, `refine`, `finalValidation`, `integration`
266
+ 2. Return `{ output, flags }` from every stage
267
+ 3. Custom helper functions are valid JavaScript but will not be called by the pipeline—only use them if called from within a valid stage
268
+ 4. Most simple tasks need only: `ingestion` → `promptTemplating` → `inference`
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ryanfw/prompt-orchestration-pipeline",
3
- "version": "0.16.2",
3
+ "version": "0.16.3",
4
4
  "description": "A Prompt-orchestration pipeline (POP) is a framework for building, running, and experimenting with complex chains of LLM tasks.",
5
5
  "type": "module",
6
6
  "main": "src/ui/server.js",
@@ -9,6 +9,7 @@
9
9
  },
10
10
  "files": [
11
11
  "src",
12
+ "docs/pop-task-guide.md",
12
13
  "README.md",
13
14
  "LICENSE"
14
15
  ],
@@ -1,4 +1,6 @@
1
1
  import fs from "node:fs";
2
+ import path from "node:path";
3
+ import { fileURLToPath } from "node:url";
2
4
  import { streamSSE } from "../lib/sse.js";
3
5
  import { createHighLevelLLM } from "../../llm/index.js";
4
6
  import { parseMentions } from "../lib/mention-parser.js";
@@ -6,41 +8,26 @@ import {
6
8
  loadSchemaContext,
7
9
  buildSchemaPromptSection,
8
10
  } from "../lib/schema-loader.js";
11
+ import { createLogger } from "../../core/logger.js";
9
12
 
10
- export async function handleTaskPlan(req, res) {
11
- console.log("[task-creation-endpoint] Request received");
13
+ const logger = createLogger("TaskCreationEndpoint");
12
14
 
13
- const { messages, pipelineSlug } = req.body;
15
+ // Resolve path relative to this module for NPM distribution
16
+ const __dirname = path.dirname(fileURLToPath(import.meta.url));
17
+ const guidelinesPath = path.resolve(__dirname, "../../../docs/pop-task-guide.md");
14
18
 
15
- console.log("[task-creation-endpoint] Request details:", {
16
- hasMessages: !!messages,
17
- messageCount: Array.isArray(messages) ? messages.length : 0,
18
- pipelineSlug,
19
- bodyKeys: Object.keys(req.body),
20
- });
19
+ export async function handleTaskPlan(req, res) {
20
+ const { messages, pipelineSlug } = req.body;
21
21
 
22
22
  // Validate input
23
23
  if (!Array.isArray(messages)) {
24
- console.error(
25
- "[task-creation-endpoint] Validation failed: messages is not an array"
26
- );
27
24
  res.status(400).json({ error: "messages must be an array" });
28
25
  return;
29
26
  }
30
27
 
31
- console.log(
32
- "[task-creation-endpoint] Loading guidelines from docs/pipeline-task-guidelines.md..."
33
- );
34
-
35
28
  // Load guidelines - let it throw if missing
36
- const guidelinesPath = "docs/pipeline-task-guidelines.md";
37
29
  const guidelines = fs.readFileSync(guidelinesPath, "utf-8");
38
30
 
39
- console.log(
40
- "[task-creation-endpoint] Guidelines loaded, length:",
41
- guidelines.length
42
- );
43
-
44
31
  // Parse @mentions and load schema contexts for enrichment
45
32
  const mentionedFiles = parseMentions(messages);
46
33
  const schemaContexts = [];
@@ -54,22 +41,76 @@ export async function handleTaskPlan(req, res) {
54
41
  }
55
42
  const schemaEnrichment = buildSchemaPromptSection(schemaContexts);
56
43
 
57
- if (schemaEnrichment) {
58
- console.log(
59
- "[task-creation-endpoint] Schema enrichment added for:",
60
- mentionedFiles
61
- );
62
- }
63
-
64
44
  // Build LLM messages array
65
- const systemPrompt = `You are a pipeline task assistant. Help users create task definitions following these guidelines:
45
+ const systemPrompt = `You are a pipeline task assistant. You help users understand the POP (Prompt Orchestration Pipeline) system and create task definitions.
46
+
47
+ ## How to Answer Questions
48
+
49
+ When users ask questions, identify which topic area applies and reference the relevant section of knowledge below:
50
+
51
+ - **LLM/Provider questions** → See "Available LLM Providers" section
52
+ - **Stage/Function questions** → See "Valid Stage Names" and "Stage Function Signatures" sections
53
+ - **IO/Database questions** → See "IO API" section
54
+ - **Validation questions** → See "Validation API" and "JSON Schema Export" sections
55
+ - **Task creation requests** → Use all sections to build a complete task
56
+
57
+ Be concise and direct. Use code examples when helpful. Reference specific API signatures.
58
+
59
+ ---
60
+
61
+ # KNOWLEDGE BASE
66
62
 
67
63
  ${guidelines}
68
64
  ${schemaEnrichment ? `\n${schemaEnrichment}\n` : ""}
69
65
 
66
+ ---
67
+
68
+ ## Quick Reference: Common Questions
69
+
70
+ **Q: What LLM models/providers are available?**
71
+ Available providers via the \`llm\` object:
72
+ - \`llm.deepseek.chat()\` - DeepSeek model
73
+ - \`llm.anthropic.sonnet45()\` - Anthropic Claude Sonnet 4.5
74
+ - \`llm.openai.gpt5Mini()\` - OpenAI GPT-5 Mini
75
+ - \`llm.gemini.flash25()\` - Google Gemini Flash 2.5
76
+
77
+ **Q: What functions/stages do I need to define?**
78
+ Minimum required: \`ingestion\`, \`promptTemplating\`, \`inference\`
79
+ Optional: \`preProcessing\`, \`parsing\`, \`validateStructure\`, \`validateQuality\`, \`critique\`, \`refine\`, \`finalValidation\`, \`integration\`
80
+
81
+ **Q: How do I use the database?**
82
+ Use \`io.getDB()\` to get a SQLite database instance (WAL mode):
83
+ \`\`\`js
84
+ const db = io.getDB();
85
+ db.exec('CREATE TABLE IF NOT EXISTS results (id INTEGER PRIMARY KEY, data TEXT)');
86
+ db.prepare('INSERT INTO results (data) VALUES (?)').run(JSON.stringify(myData));
87
+ \`\`\`
88
+
89
+ **Q: How do I read/write files?**
90
+ Use the \`io\` object:
91
+ - \`io.writeArtifact(name, content)\` - Persist output files
92
+ - \`io.readArtifact(name)\` - Load artifact
93
+ - \`io.writeTmp(name, content)\` - Scratch data
94
+ - \`io.writeLog(name, content)\` - Debug/progress logs
95
+
96
+ ---
97
+
98
+ ## Task Proposal Guidelines
99
+
70
100
  Provide complete, working code. Use markdown code blocks.
71
101
 
72
- When you have completed a task definition that the user wants to create, wrap it in this format:
102
+ ONLY use the [TASK_PROPOSAL] wrapper when ALL of these conditions are met:
103
+ 1. The user has explicitly requested you create/build/write a task for them
104
+ 2. You have a complete, production-ready task definition (not an example or illustration)
105
+ 3. The user has confirmed their requirements or iterated to a final version
106
+
107
+ DO NOT use [TASK_PROPOSAL] for:
108
+ - Answering questions about capabilities or how tasks work
109
+ - Showing illustrative examples or code snippets
110
+ - Explaining concepts with sample code
111
+ - Incomplete or draft task definitions still being discussed
112
+
113
+ When you DO output a [TASK_PROPOSAL], use this format:
73
114
  [TASK_PROPOSAL]
74
115
  FILENAME: <filename.js>
75
116
  TASKNAME: <task-name>
@@ -81,21 +122,13 @@ CODE:
81
122
 
82
123
  const llmMessages = [{ role: "system", content: systemPrompt }, ...messages];
83
124
 
84
- console.log("[task-creation-endpoint] LLM messages array created:", {
85
- totalMessages: llmMessages.length,
86
- systemPromptLength: systemPrompt.length,
87
- });
88
-
89
125
  // Create SSE stream
90
- console.log("[task-creation-endpoint] Creating SSE stream...");
91
126
  const sse = streamSSE(res);
92
127
 
93
128
  try {
94
- console.log("[task-creation-endpoint] Creating LLM instance...");
95
129
  // Get LLM instance (uses default provider from config)
96
130
  const llm = createHighLevelLLM();
97
131
 
98
- console.log("[task-creation-endpoint] Calling LLM chat with streaming...");
99
132
  // Call LLM with streaming enabled
100
133
  const response = await llm.chat({
101
134
  messages: llmMessages,
@@ -103,38 +136,20 @@ CODE:
103
136
  stream: true,
104
137
  });
105
138
 
106
- console.log("[task-creation-endpoint] LLM response received:", {
107
- isStream: typeof response[Symbol.asyncIterator] !== "undefined",
108
- });
109
-
110
139
  // Stream is an async generator
111
- let chunkCount = 0;
112
140
  for await (const chunk of response) {
113
141
  if (chunk?.content) {
114
142
  sse.send("chunk", { content: chunk.content });
115
- chunkCount++;
116
143
  }
117
144
  }
118
145
 
119
- console.log("[task-creation-endpoint] Sent", chunkCount, "chunks via SSE");
120
-
121
146
  // Send done event
122
- console.log("[task-creation-endpoint] Sending 'done' event...");
123
147
  sse.send("done", {});
124
- console.log("[task-creation-endpoint] Ending SSE stream...");
125
148
  sse.end();
126
- console.log("[task-creation-endpoint] Request completed successfully");
127
149
  } catch (error) {
128
- console.error("[task-creation-endpoint] Error occurred:", {
129
- message: error.message,
130
- stack: error.stack,
131
- name: error.name,
132
- });
150
+ logger.error("LLM streaming failed", error);
133
151
  // Send error event
134
152
  sse.send("error", { message: error.message });
135
- console.log(
136
- "[task-creation-endpoint] Error sent via SSE, ending stream..."
137
- );
138
153
  sse.end();
139
154
  }
140
- }
155
+ }