@prompty/core 2.0.0-alpha.1 → 2.0.0-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,105 +1,108 @@
1
- # @prompty/core
2
-
3
- Prompty core runtime — load, render, parse, and trace `.prompty` files in TypeScript/JavaScript.
4
-
5
- ## Installation
6
-
7
- ```bash
8
- npm install @prompty/core
9
- ```
10
-
11
- You'll also need a **provider package** for your LLM:
12
-
13
- ```bash
14
- # OpenAI
15
- npm install @prompty/openai
16
-
17
- # Azure AI Foundry
18
- npm install @prompty/foundry
19
- ```
20
-
21
- ## Quick Start
22
-
23
- ```typescript
24
- import "@prompty/openai"; // registers the OpenAI provider
25
- import { load, run } from "@prompty/core";
26
-
27
- // Load and run a .prompty file
28
- const result = await run("./my-prompt.prompty", {
29
- question: "What is the capital of France?",
30
- });
31
- console.log(result);
32
- ```
33
-
34
- ## Step-by-Step Usage
35
-
36
- ```typescript
37
- import "@prompty/openai";
38
- import { load, prepare, execute, process } from "@prompty/core";
39
-
40
- // 1. Load — parse .prompty file into a typed Prompty object
41
- const agent = await load("./chat.prompty");
42
-
43
- // 2. Preparerender template + parse into messages
44
- const messages = await prepare(agent, { name: "Alice" });
45
-
46
- // 3. Executecall the LLM
47
- const response = await execute(agent, messages);
48
-
49
- // 4. Processextract the result
50
- const result = await process(agent, response);
51
- ```
52
-
53
- ## What's a `.prompty` file?
54
-
55
- A Markdown file with YAML frontmatter that defines an LLM prompt:
56
-
57
- ```prompty
58
- ---
59
- name: greeting
60
- model:
61
- id: gpt-4o-mini
62
- provider: openai
63
- connection:
64
- kind: key
65
- endpoint: ${env:OPENAI_BASE_URL}
66
- apiKey: ${env:OPENAI_API_KEY}
67
- ---
68
- system:
69
- You are a helpful assistant.
70
-
71
- user:
72
- Hello, my name is {{name}}. {{question}}
73
- ```
74
-
75
- ## Tracing
76
-
77
- ```typescript
78
- import { Tracer, PromptyTracer } from "@prompty/core";
79
-
80
- // Write .tracy JSON files to .runs/
81
- const tracer = new PromptyTracer({ outputDir: ".runs" });
82
- Tracer.add("prompty", tracer.factory);
83
-
84
- const result = await run("./chat.prompty", { question: "Hi" });
85
- console.log("Trace:", tracer.lastTracePath);
86
- ```
87
-
88
- ## Key Exports
89
-
90
- | Export | Description |
91
- |--------|-------------|
92
- | `load` | Parse a `.prompty` file → `Prompty` object |
93
- | `prepare` | Render template + parse → `Message[]` |
94
- | `execute` | Call the LLM provider |
95
- | `process` | Extract result from LLM response |
96
- | `run` | All-in-one: load prepareexecute → process |
97
- | `executeAgent` | Agent loop with tool calling |
98
- | `Tracer` / `PromptyTracer` | Observability and tracing |
99
- | `registerExecutor` / `registerProcessor` | Register custom providers |
100
- | `registerConnection` | Register named connections |
101
- | `Message`, `ContentPart`, `PromptyStream` | Core types |
102
-
103
- ## License
104
-
105
- MIT
1
+ # @prompty/core
2
+
3
+ Prompty core runtime — load, render, parse, and trace `.prompty` files in TypeScript/JavaScript.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @prompty/core
9
+ ```
10
+
11
+ You'll also need a **provider package** for your LLM:
12
+
13
+ ```bash
14
+ # OpenAI
15
+ npm install @prompty/openai
16
+
17
+ # Azure AI Foundry
18
+ npm install @prompty/foundry
19
+
20
+ # Anthropic
21
+ npm install @prompty/anthropic
22
+ ```
23
+
24
+ ## Quick Start
25
+
26
+ ```typescript
27
+ import "@prompty/openai"; // registers the OpenAI provider
28
+ import { load, run } from "@prompty/core";
29
+
30
+ // Load and run a .prompty file
31
+ const result = await run("./my-prompt.prompty", {
32
+ question: "What is the capital of France?",
33
+ });
34
+ console.log(result);
35
+ ```
36
+
37
+ ## Step-by-Step Usage
38
+
39
+ ```typescript
40
+ import "@prompty/openai";
41
+ import { load, prepare, execute, process } from "@prompty/core";
42
+
43
+ // 1. Loadparse .prompty file into a typed Prompty object
44
+ const agent = await load("./chat.prompty");
45
+
46
+ // 2. Preparerender template + parse into messages
47
+ const messages = await prepare(agent, { name: "Alice" });
48
+
49
+ // 3. Executecall the LLM
50
+ const response = await execute(agent, messages);
51
+
52
+ // 4. Process — extract the result
53
+ const result = await process(agent, response);
54
+ ```
55
+
56
+ ## What's a `.prompty` file?
57
+
58
+ A Markdown file with YAML frontmatter that defines an LLM prompt:
59
+
60
+ ```prompty
61
+ ---
62
+ name: greeting
63
+ model:
64
+ id: gpt-4o-mini
65
+ provider: openai
66
+ connection:
67
+ kind: key
68
+ endpoint: ${env:OPENAI_BASE_URL}
69
+ apiKey: ${env:OPENAI_API_KEY}
70
+ ---
71
+ system:
72
+ You are a helpful assistant.
73
+
74
+ user:
75
+ Hello, my name is {{name}}. {{question}}
76
+ ```
77
+
78
+ ## Tracing
79
+
80
+ ```typescript
81
+ import { Tracer, PromptyTracer } from "@prompty/core";
82
+
83
+ // Write .tracy JSON files to .runs/
84
+ const tracer = new PromptyTracer({ outputDir: ".runs" });
85
+ Tracer.add("prompty", tracer.factory);
86
+
87
+ const result = await run("./chat.prompty", { question: "Hi" });
88
+ console.log("Trace:", tracer.lastTracePath);
89
+ ```
90
+
91
+ ## Key Exports
92
+
93
+ | Export | Description |
94
+ |--------|-------------|
95
+ | `load` | Parse a `.prompty` file `Prompty` object |
96
+ | `prepare` | Render template + parse`Message[]` |
97
+ | `execute` | Call the LLM provider |
98
+ | `process` | Extract result from LLM response |
99
+ | `run` | All-in-one: load prepare execute → process |
100
+ | `executeAgent` | Agent loop with tool calling |
101
+ | `Tracer` / `PromptyTracer` | Observability and tracing |
102
+ | `registerExecutor` / `registerProcessor` | Register custom providers |
103
+ | `registerConnection` | Register named connections |
104
+ | `Message`, `ContentPart`, `PromptyStream` | Core types |
105
+
106
+ ## License
107
+
108
+ MIT
package/dist/index.cjs CHANGED
@@ -4537,6 +4537,114 @@ async function execute(prompt, inputs, options) {
4537
4537
  return result;
4538
4538
  });
4539
4539
  }
4540
+ function isAsyncIterable(value) {
4541
+ return value != null && typeof value === "object" && Symbol.asyncIterator in value;
4542
+ }
4543
+ function isToolCallLike(item) {
4544
+ return typeof item === "object" && item !== null && "id" in item && "name" in item && "arguments" in item;
4545
+ }
4546
+ async function consumeStream(agent, response) {
4547
+ const processed = await process2(agent, response);
4548
+ const toolCalls = [];
4549
+ const textParts = [];
4550
+ if (isAsyncIterable(processed)) {
4551
+ for await (const item of processed) {
4552
+ if (isToolCallLike(item)) {
4553
+ toolCalls.push(item);
4554
+ } else if (typeof item === "string") {
4555
+ textParts.push(item);
4556
+ }
4557
+ }
4558
+ } else if (typeof processed === "string") {
4559
+ textParts.push(processed);
4560
+ }
4561
+ return { toolCalls, content: textParts.join("") };
4562
+ }
4563
+ async function buildToolMessagesFromCalls(toolCalls, textContent, tools, agent, parentEmit) {
4564
+ const provider = resolveProvider(agent);
4565
+ const apiType = agent.model?.apiType || "chat";
4566
+ const messages = [];
4567
+ const toolInputs = [];
4568
+ if (provider === "anthropic") {
4569
+ const rawContent = [];
4570
+ if (textContent) rawContent.push({ type: "text", text: textContent });
4571
+ for (const tc of toolCalls) {
4572
+ rawContent.push({
4573
+ type: "tool_use",
4574
+ id: tc.id,
4575
+ name: tc.name,
4576
+ input: JSON.parse(tc.arguments)
4577
+ });
4578
+ }
4579
+ messages.push(
4580
+ new Message("assistant", textContent ? [text(textContent)] : [], { content: rawContent })
4581
+ );
4582
+ } else if (apiType === "responses") {
4583
+ for (const tc of toolCalls) {
4584
+ messages.push(
4585
+ new Message("assistant", [], {
4586
+ responses_function_call: {
4587
+ type: "function_call",
4588
+ call_id: tc.id,
4589
+ name: tc.name,
4590
+ arguments: tc.arguments
4591
+ }
4592
+ })
4593
+ );
4594
+ }
4595
+ } else {
4596
+ const rawToolCalls = toolCalls.map((tc) => ({
4597
+ id: tc.id,
4598
+ type: "function",
4599
+ function: { name: tc.name, arguments: tc.arguments }
4600
+ }));
4601
+ messages.push(
4602
+ new Message("assistant", textContent ? [text(textContent)] : [], {
4603
+ tool_calls: rawToolCalls
4604
+ })
4605
+ );
4606
+ }
4607
+ const toolResultBlocks = [];
4608
+ for (const tc of toolCalls) {
4609
+ let result;
4610
+ let parsedArgs;
4611
+ try {
4612
+ parsedArgs = JSON.parse(tc.arguments);
4613
+ const toolFn = tools[tc.name];
4614
+ if (!toolFn) {
4615
+ result = `Error: tool "${tc.name}" not found`;
4616
+ } else {
4617
+ const toolResult = await traceSpan(tc.name, async (toolEmit) => {
4618
+ toolEmit("signature", `prompty.tool.${tc.name}`);
4619
+ toolEmit("description", `Execute tool: ${tc.name}`);
4620
+ toolEmit("inputs", { arguments: parsedArgs, id: tc.id });
4621
+ const r = await toolFn(...Array.isArray(parsedArgs) ? parsedArgs : [parsedArgs]);
4622
+ const str = typeof r === "string" ? r : JSON.stringify(r);
4623
+ toolEmit("result", str);
4624
+ return str;
4625
+ });
4626
+ result = toolResult;
4627
+ }
4628
+ } catch (err) {
4629
+ result = `Error: ${err instanceof Error ? err.message : String(err)}`;
4630
+ }
4631
+ toolInputs.push({ name: tc.name, arguments: parsedArgs, id: tc.id, result });
4632
+ if (provider === "anthropic") {
4633
+ toolResultBlocks.push({ type: "tool_result", tool_use_id: tc.id, content: result });
4634
+ } else {
4635
+ messages.push(
4636
+ new Message("tool", [text(result)], { tool_call_id: tc.id, name: tc.name })
4637
+ );
4638
+ }
4639
+ }
4640
+ if (provider === "anthropic" && toolResultBlocks.length > 0) {
4641
+ messages.push(new Message("user", [], { tool_results: toolResultBlocks }));
4642
+ }
4643
+ if (parentEmit) {
4644
+ parentEmit("inputs", { tool_calls: toolInputs });
4645
+ }
4646
+ return messages;
4647
+ }
4540
4648
  async function executeAgent(prompt, inputs, options) {
4541
4649
  return traceSpan("executeAgent", async (emit) => {
4542
4650
  const agent = typeof prompt === "string" ? await traceSpan("load", async (loadEmit) => {
@@ -4557,7 +4665,32 @@ async function executeAgent(prompt, inputs, options) {
4557
4665
  const executor = getExecutor(provider);
4558
4666
  let response = await executor.execute(agent, messages);
4559
4667
  let iteration = 0;
4560
- while (hasToolCalls(response)) {
4668
+ while (true) {
4669
+ if (isAsyncIterable(response)) {
4670
+ const { toolCalls, content } = await consumeStream(agent, response);
4671
+ if (toolCalls.length === 0) {
4672
+ emit("iterations", iteration);
4673
+ emit("result", content);
4674
+ return content;
4675
+ }
4676
+ iteration++;
4677
+ if (iteration > maxIterations) {
4678
+ throw new Error(
4679
+ `Agent loop exceeded maxIterations (${maxIterations}). The model kept requesting tool calls. Increase maxIterations or check your tools.`
4680
+ );
4681
+ }
4682
+ const toolMessages2 = await traceSpan("toolCalls", async (toolEmit) => {
4683
+ toolEmit("signature", "prompty.executeAgent.toolCalls");
4684
+ toolEmit("description", `Tool call round ${iteration}`);
4685
+ const result2 = await buildToolMessagesFromCalls(toolCalls, content, tools, agent, toolEmit);
4686
+ toolEmit("result", result2.map((m) => ({ role: m.role, content: m.parts.map((p) => p.value ?? "").join(""), metadata: m.metadata })));
4687
+ return result2;
4688
+ });
4689
+ messages.push(...toolMessages2);
4690
+ response = await executor.execute(agent, messages);
4691
+ continue;
4692
+ }
4693
+ if (!hasToolCalls(response)) break;
4561
4694
  iteration++;
4562
4695
  if (iteration > maxIterations) {
4563
4696
  throw new Error(
@@ -4631,15 +4764,37 @@ function hasToolCalls(response) {
4631
4764
  if (typeof response !== "object" || response === null) return false;
4632
4765
  const r = response;
4633
4766
  const choices = r.choices;
4634
- if (!Array.isArray(choices) || choices.length === 0) return false;
4635
- const choice = choices[0];
4636
- const message = choice.message;
4637
- if (!message) return false;
4638
- const toolCalls = message.tool_calls;
4639
- return Array.isArray(toolCalls) && toolCalls.length > 0;
4767
+ if (Array.isArray(choices) && choices.length > 0) {
4768
+ const choice = choices[0];
4769
+ const message = choice.message;
4770
+ if (message) {
4771
+ const toolCalls = message.tool_calls;
4772
+ if (Array.isArray(toolCalls) && toolCalls.length > 0) return true;
4773
+ }
4774
+ }
4775
+ if (r.stop_reason === "tool_use" && Array.isArray(r.content)) {
4776
+ return r.content.some(
4777
+ (block) => block.type === "tool_use"
4778
+ );
4779
+ }
4780
+ if (r.object === "response" && Array.isArray(r.output)) {
4781
+ return r.output.some(
4782
+ (item) => item.type === "function_call"
4783
+ );
4784
+ }
4785
+ return false;
4640
4786
  }
4641
4787
  async function buildToolResultMessages(response, tools, parentEmit) {
4642
4788
  const r = response;
4789
+ if (Array.isArray(r.content) && r.stop_reason === "tool_use") {
4790
+ return buildAnthropicToolResultMessages(r, tools, parentEmit);
4791
+ }
4792
+ if (r.object === "response" && Array.isArray(r.output)) {
4793
+ return buildResponsesToolResultMessages(r, tools, parentEmit);
4794
+ }
4795
+ return buildOpenAIToolResultMessages(r, tools, parentEmit);
4796
+ }
4797
+ async function buildOpenAIToolResultMessages(r, tools, parentEmit) {
4643
4798
  const choices = r.choices;
4644
4799
  const choice = choices[0];
4645
4800
  const message = choice.message;
@@ -4691,6 +4846,109 @@ async function buildToolResultMessages(response, tools, parentEmit) {
4691
4846
  }
4692
4847
  return messages;
4693
4848
  }
4849
+ async function buildAnthropicToolResultMessages(r, tools, parentEmit) {
4850
+ const content = r.content;
4851
+ const toolUseBlocks = content.filter((block) => block.type === "tool_use");
4852
+ const messages = [];
4853
+ const textParts = content.filter((block) => block.type === "text").map((block) => text(block.text));
4854
+ messages.push(
4855
+ new Message("assistant", textParts, { content })
4856
+ );
4857
+ const toolInputs = [];
4858
+ const toolResultBlocks = [];
4859
+ for (const block of toolUseBlocks) {
4860
+ const toolName = block.name;
4861
+ const toolCallId = block.id;
4862
+ const toolArgs = block.input;
4863
+ let result;
4864
+ try {
4865
+ const toolFn = tools[toolName];
4866
+ if (!toolFn) {
4867
+ result = `Error: tool "${toolName}" not found`;
4868
+ } else {
4869
+ const toolResult = await traceSpan(toolName, async (toolEmit) => {
4870
+ toolEmit("signature", `prompty.tool.${toolName}`);
4871
+ toolEmit("description", `Execute tool: ${toolName}`);
4872
+ toolEmit("inputs", { arguments: toolArgs, tool_use_id: toolCallId });
4873
+ const r2 = await toolFn(...Array.isArray(toolArgs) ? toolArgs : [toolArgs]);
4874
+ const str = typeof r2 === "string" ? r2 : JSON.stringify(r2);
4875
+ toolEmit("result", str);
4876
+ return str;
4877
+ });
4878
+ result = toolResult;
4879
+ }
4880
+ } catch (err) {
4881
+ result = `Error: ${err instanceof Error ? err.message : String(err)}`;
4882
+ }
4883
+ toolInputs.push({ name: toolName, arguments: toolArgs, tool_use_id: toolCallId, result });
4884
+ toolResultBlocks.push({
4885
+ type: "tool_result",
4886
+ tool_use_id: toolCallId,
4887
+ content: result
4888
+ });
4889
+ }
4890
+ if (parentEmit) {
4891
+ parentEmit("inputs", { tool_calls: toolInputs });
4892
+ }
4893
+ messages.push(
4894
+ new Message("user", [], { tool_results: toolResultBlocks })
4895
+ );
4896
+ return messages;
4897
+ }
4898
+ async function buildResponsesToolResultMessages(r, tools, parentEmit) {
4899
+ const output = r.output;
4900
+ const funcCalls = output.filter((item) => item.type === "function_call");
4901
+ const messages = [];
4902
+ const toolInputs = [];
4903
+ for (const fc of funcCalls) {
4904
+ const toolName = fc.name;
4905
+ const callId = fc.call_id ?? fc.id ?? "";
4906
+ const argsStr = fc.arguments ?? "{}";
4907
+ messages.push(
4908
+ new Message("assistant", [], {
4909
+ responses_function_call: {
4910
+ type: "function_call",
4911
+ call_id: callId,
4912
+ name: toolName,
4913
+ arguments: argsStr
4914
+ }
4915
+ })
4916
+ );
4917
+ let result;
4918
+ let parsedArgs;
4919
+ try {
4920
+ parsedArgs = JSON.parse(argsStr);
4921
+ const toolFn = tools[toolName];
4922
+ if (!toolFn) {
4923
+ result = `Error: tool "${toolName}" not found`;
4924
+ } else {
4925
+ const toolResult = await traceSpan(toolName, async (toolEmit) => {
4926
+ toolEmit("signature", `prompty.tool.${toolName}`);
4927
+ toolEmit("description", `Execute tool: ${toolName}`);
4928
+ toolEmit("inputs", { arguments: parsedArgs, call_id: callId });
4929
+ const r2 = await toolFn(...Array.isArray(parsedArgs) ? parsedArgs : [parsedArgs]);
4930
+ const str = typeof r2 === "string" ? r2 : JSON.stringify(r2);
4931
+ toolEmit("result", str);
4932
+ return str;
4933
+ });
4934
+ result = toolResult;
4935
+ }
4936
+ } catch (err) {
4937
+ result = `Error: ${err instanceof Error ? err.message : String(err)}`;
4938
+ }
4939
+ toolInputs.push({ name: toolName, arguments: parsedArgs, call_id: callId, result });
4940
+ messages.push(
4941
+ new Message("tool", [text(result)], {
4942
+ tool_call_id: callId,
4943
+ name: toolName
4944
+ })
4945
+ );
4946
+ }
4947
+ if (parentEmit) {
4948
+ parentEmit("inputs", { tool_calls: toolInputs });
4949
+ }
4950
+ return messages;
4951
+ }
4694
4952
  var runAgent = executeAgent;
4695
4953
 
4696
4954
  // src/renderers/nunjucks.ts