agency-lang 0.0.7 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- import { AgencyComment, AgencyProgram, Assignment, Literal, PromptLiteral, TypeAlias, TypeHint } from "../types.js";
1
+ import { AgencyComment, AgencyProgram, Assignment, Literal, PromptLiteral, TypeAlias, TypeHint, VariableType } from "../types.js";
2
2
  import { AccessExpression, DotFunctionCall, DotProperty, IndexAccess } from "../types/access.js";
3
3
  import { AgencyArray, AgencyObject } from "../types/dataStructures.js";
4
4
  import { FunctionCall, FunctionDefinition } from "../types/function.js";
@@ -20,6 +20,7 @@ export declare class AgencyGenerator extends BaseGenerator {
20
20
  protected generateImports(): string;
21
21
  protected preprocess(): string;
22
22
  protected postprocess(): string;
23
+ protected aliasedTypeToString(aliasedType: VariableType): string;
23
24
  protected processTypeAlias(node: TypeAlias): string;
24
25
  protected processTypeHint(node: TypeHint): string;
25
26
  protected processAssignment(node: Assignment): string;
@@ -6,8 +6,8 @@ export class AgencyGenerator extends BaseGenerator {
6
6
  constructor() {
7
7
  super();
8
8
  }
9
- indent() {
10
- return " ".repeat(this.indentLevel * this.indentSize);
9
+ indent(level = this.indentLevel) {
10
+ return " ".repeat(level * this.indentSize);
11
11
  }
12
12
  increaseIndent() {
13
13
  this.indentLevel++;
@@ -28,10 +28,26 @@ export class AgencyGenerator extends BaseGenerator {
28
28
  postprocess() {
29
29
  return "";
30
30
  }
31
+ aliasedTypeToString(aliasedType) {
32
+ if (aliasedType.type === "objectType") {
33
+ const props = aliasedType.properties
34
+ .map((prop) => {
35
+ let str = `${this.indent(this.indentLevel + 1)}`;
36
+ str += `${prop.key}: ${this.aliasedTypeToString(prop.value)}`;
37
+ if (prop.description) {
38
+ str += ` # ${prop.description}`;
39
+ }
40
+ return str;
41
+ })
42
+ .join(";\n");
43
+ return `{\n${props}\n}`;
44
+ }
45
+ return variableTypeToString(aliasedType, this.typeAliases);
46
+ }
31
47
  // Type system methods
32
48
  processTypeAlias(node) {
33
49
  this.typeAliases[node.aliasName] = node.aliasedType;
34
- const aliasedTypeStr = variableTypeToString(node.aliasedType, this.typeAliases);
50
+ const aliasedTypeStr = this.aliasedTypeToString(node.aliasedType);
35
51
  return this.indentStr(`type ${node.aliasName} = ${aliasedTypeStr}\n`);
36
52
  }
37
53
  processTypeHint(node) {
@@ -19,7 +19,9 @@ export class TypeScriptGenerator extends BaseGenerator {
19
19
  processTypeAlias(node) {
20
20
  this.typeAliases[node.aliasName] = node.aliasedType;
21
21
  const typeAliasStr = this.typeAliasToString(node);
22
- this.generatedTypeAliases.push(typeAliasStr);
22
+ if (!this.generatedTypeAliases.includes(typeAliasStr)) {
23
+ this.generatedTypeAliases.push(typeAliasStr);
24
+ }
23
25
  return "";
24
26
  }
25
27
  typeAliasToString(node) {
@@ -1,4 +1,4 @@
1
- export declare const template = "import OpenAI from \"openai\";\nimport { zodResponseFormat } from \"openai/helpers/zod\";\nimport { z } from \"zod\";\nimport * as readline from \"readline\";\nimport fs from \"fs\";\nimport { Graph, goToNode } from \"simplemachine\";\nimport { StatelogClient } from \"statelog-client\";\nimport { nanoid } from \"nanoid\";\n\nconst statelogHost = \"http://localhost:1065\";\nconst traceId = nanoid();\nconst statelogClient = new StatelogClient({host: statelogHost, tid: traceId});\nconst model = \"gpt-4.1-nano-2025-04-14\";\n\nconst openai = new OpenAI({\n apiKey: process.env.OPENAI_API_KEY,\n});\n\ntype State = {\n messages: string[];\n data: any;\n}\n\n// enable debug logging\nconst graphConfig = {\n debug: {\n log: true,\n logData: true,\n },\n statelogHost,\n traceId\n};\n\n// Define the names of the nodes in the graph\n// Useful for type safety\nconst nodes = {{{nodes:string}}} as const;\ntype Node = (typeof nodes)[number];\n\nconst graph = new Graph<State, Node>(nodes, graphConfig);";
1
+ export declare const template = "import OpenAI from \"openai\";\nimport { zodResponseFormat } from \"openai/helpers/zod\";\nimport { z } from \"zod\";\nimport * as readline from \"readline\";\nimport fs from \"fs\";\nimport { Graph, goToNode } from \"simplemachine\";\nimport { StatelogClient } from \"statelog-client\";\nimport { nanoid } from \"nanoid\";\nimport { assistantMessage, getClient, Message, userMessage } from \"smoltalk\";\n\nconst statelogHost = \"http://localhost:1065\";\nconst traceId = nanoid();\nconst statelogClient = new StatelogClient({host: statelogHost, tid: traceId});\nconst model = \"gpt-4o-mini\";\n\nconst client = getClient({\n apiKey: process.env.OPENAI_API_KEY || \"\",\n model,\n});\n\ntype State = {\n messages: string[];\n data: any;\n}\n\n// enable debug logging\nconst graphConfig = {\n debug: {\n log: true,\n logData: true,\n },\n statelogHost,\n traceId\n};\n\n// Define the names of the nodes in the graph\n// Useful for type safety\nconst nodes = {{{nodes:string}}} as const;\ntype Node = (typeof nodes)[number];\n\nconst graph = new Graph<State, Node>(nodes, graphConfig);";
2
2
  export type TemplateType = {
3
3
  nodes: string;
4
4
  };
@@ -10,14 +10,16 @@ import fs from "fs";
10
10
  import { Graph, goToNode } from "simplemachine";
11
11
  import { StatelogClient } from "statelog-client";
12
12
  import { nanoid } from "nanoid";
13
+ import { assistantMessage, getClient, Message, userMessage } from "smoltalk";
13
14
 
14
15
  const statelogHost = "http://localhost:1065";
15
16
  const traceId = nanoid();
16
17
  const statelogClient = new StatelogClient({host: statelogHost, tid: traceId});
17
- const model = "gpt-4.1-nano-2025-04-14";
18
+ const model = "gpt-4o-mini";
18
19
 
19
- const openai = new OpenAI({
20
- apiKey: process.env.OPENAI_API_KEY,
20
+ const client = getClient({
21
+ apiKey: process.env.OPENAI_API_KEY || "",
22
+ model,
21
23
  });
22
24
 
23
25
  type State = {
@@ -1,4 +1,4 @@
1
- export declare const template = "\nasync function _{{{variableName:string}}}({{{argsStr:string}}}): Promise<{{{typeString:string}}}> {\n const prompt = {{{promptCode:string}}};\n const startTime = performance.now();\n const messages:any[] = [{ role: \"user\", content: prompt }];\n const tools = {{{tools}}};\n\n let completion = await openai.chat.completions.create({\n model,\n messages,\n tools,\n response_format: zodResponseFormat(z.object({\n value: {{{zodSchema:string}}}\n }), \"{{{variableName:string}}}_response\"),\n });\n const endTime = performance.now();\n statelogClient.promptCompletion({\n messages,\n completion,\n model,\n timeTaken: endTime - startTime,\n });\n\n let responseMessage = completion.choices[0].message;\n // Handle function calls\n while (responseMessage.tool_calls && responseMessage.tool_calls.length > 0) {\n // Add assistant's response with tool calls to message history\n messages.push(responseMessage);\n let toolCallStartTime, toolCallEndTime;\n\n // Process each tool call\n for (const toolCall of responseMessage.tool_calls) {\n {{{functionCalls:string}}}\n }\n\n const nextStartTime = performance.now();\n // Get the next response from the model\n completion = await openai.chat.completions.create({\n model,\n messages: messages,\n tools: tools,\n });\n const nextEndTime = performance.now();\n\n statelogClient.promptCompletion({\n messages,\n completion,\n model,\n timeTaken: nextEndTime - nextStartTime,\n });\n\n responseMessage = completion.choices[0].message;\n }\n\n // Add final assistant response to history\n messages.push(responseMessage);\n\n try {\n const result = JSON.parse(completion.choices[0].message.content || \"\");\n return result.value;\n } catch (e) {\n return completion.choices[0].message.content;\n // console.error(\"Error parsing response for variable '{{{variableName:string}}}':\", e);\n // console.error(\"Full completion response:\", JSON.stringify(completion, null, 2));\n // throw e;\n }\n}\n";
1
+ export declare const template = "\nasync function _{{{variableName:string}}}({{{argsStr:string}}}): Promise<{{{typeString:string}}}> {\n const prompt = {{{promptCode:string}}};\n const startTime = performance.now();\n const messages: Message[] = [userMessage(prompt)];\n const tools = {{{tools}}};\n\n const responseFormat = {{{zodSchema:string}}};\n\n let completion = await client.text({\n messages,\n tools,\n responseFormat,\n });\n\n const endTime = performance.now();\n statelogClient.promptCompletion({\n messages,\n completion,\n model,\n timeTaken: endTime - startTime,\n });\n\n if (!completion.success) {\n throw new Error(\n `Error getting response from ${model}: ${completion.error}`\n );\n }\n\n let responseMessage = completion.value;\n\n // Handle function calls\n while (responseMessage.toolCalls.length > 0) {\n // Add assistant's response with tool calls to message history\n messages.push(assistantMessage(responseMessage.output));\n let toolCallStartTime, toolCallEndTime;\n\n // Process each tool call\n for (const toolCall of responseMessage.toolCalls) {\n {{{functionCalls:string}}}\n }\n\n const nextStartTime = performance.now();\n let completion = await client.text({\n messages,\n tools,\n responseFormat,\n });\n\n const nextEndTime = performance.now();\n\n statelogClient.promptCompletion({\n messages,\n completion,\n model,\n timeTaken: nextEndTime - nextStartTime,\n });\n\n if (!completion.success) {\n throw new Error(\n `Error getting response from ${model}: ${completion.error}`\n );\n }\n responseMessage = completion.value;\n }\n\n // Add final assistant response to history\n messages.push(assistantMessage(responseMessage.output));\n\n try {\n const result = JSON.parse(responseMessage.output || \"\");\n return result.value;\n } catch (e) {\n return responseMessage.output;\n // console.error(\"Error parsing response for variable '{{{variableName:string}}}':\", e);\n // console.error(\"Full completion response:\", JSON.stringify(completion, null, 2));\n // throw e;\n }\n}\n";
2
2
  export type TemplateType = {
3
3
  variableName: string;
4
4
  argsStr: string;
@@ -6,17 +6,17 @@ export const template = `
6
6
  async function _{{{variableName:string}}}({{{argsStr:string}}}): Promise<{{{typeString:string}}}> {
7
7
  const prompt = {{{promptCode:string}}};
8
8
  const startTime = performance.now();
9
- const messages:any[] = [{ role: "user", content: prompt }];
9
+ const messages: Message[] = [userMessage(prompt)];
10
10
  const tools = {{{tools}}};
11
11
 
12
- let completion = await openai.chat.completions.create({
13
- model,
12
+ const responseFormat = {{{zodSchema:string}}};
13
+
14
+ let completion = await client.text({
14
15
  messages,
15
16
  tools,
16
- response_format: zodResponseFormat(z.object({
17
- value: {{{zodSchema:string}}}
18
- }), "{{{variableName:string}}}_response"),
17
+ responseFormat,
19
18
  });
19
+
20
20
  const endTime = performance.now();
21
21
  statelogClient.promptCompletion({
22
22
  messages,
@@ -25,25 +25,32 @@ async function _{{{variableName:string}}}({{{argsStr:string}}}): Promise<{{{type
25
25
  timeTaken: endTime - startTime,
26
26
  });
27
27
 
28
- let responseMessage = completion.choices[0].message;
28
+ if (!completion.success) {
29
+ throw new Error(
30
+ \`Error getting response from $\{model\}: $\{completion.error\}\`
31
+ );
32
+ }
33
+
34
+ let responseMessage = completion.value;
35
+
29
36
  // Handle function calls
30
- while (responseMessage.tool_calls && responseMessage.tool_calls.length > 0) {
37
+ while (responseMessage.toolCalls.length > 0) {
31
38
  // Add assistant's response with tool calls to message history
32
- messages.push(responseMessage);
39
+ messages.push(assistantMessage(responseMessage.output));
33
40
  let toolCallStartTime, toolCallEndTime;
34
41
 
35
42
  // Process each tool call
36
- for (const toolCall of responseMessage.tool_calls) {
43
+ for (const toolCall of responseMessage.toolCalls) {
37
44
  {{{functionCalls:string}}}
38
45
  }
39
46
 
40
47
  const nextStartTime = performance.now();
41
- // Get the next response from the model
42
- completion = await openai.chat.completions.create({
43
- model,
44
- messages: messages,
45
- tools: tools,
48
+ let completion = await client.text({
49
+ messages,
50
+ tools,
51
+ responseFormat,
46
52
  });
53
+
47
54
  const nextEndTime = performance.now();
48
55
 
49
56
  statelogClient.promptCompletion({
@@ -53,17 +60,22 @@ async function _{{{variableName:string}}}({{{argsStr:string}}}): Promise<{{{type
53
60
  timeTaken: nextEndTime - nextStartTime,
54
61
  });
55
62
 
56
- responseMessage = completion.choices[0].message;
63
+ if (!completion.success) {
64
+ throw new Error(
65
+ \`Error getting response from $\{model\}: $\{completion.error\}\`
66
+ );
67
+ }
68
+ responseMessage = completion.value;
57
69
  }
58
70
 
59
71
  // Add final assistant response to history
60
- messages.push(responseMessage);
72
+ messages.push(assistantMessage(responseMessage.output));
61
73
 
62
74
  try {
63
- const result = JSON.parse(completion.choices[0].message.content || "");
75
+ const result = JSON.parse(responseMessage.output || "");
64
76
  return result.value;
65
77
  } catch (e) {
66
- return completion.choices[0].message.content;
78
+ return responseMessage.output;
67
79
  // console.error("Error parsing response for variable '{{{variableName:string}}}':", e);
68
80
  // console.error("Full completion response:", JSON.stringify(completion, null, 2));
69
81
  // throw e;
@@ -155,6 +155,7 @@ async function main() {
155
155
  }
156
156
  format(fmtContents, verbose);
157
157
  break;
158
+ case "ast":
158
159
  case "parse":
159
160
  let contents;
160
161
  if (filteredArgs.length < 2) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agency-lang",
3
- "version": "0.0.7",
3
+ "version": "0.0.8",
4
4
  "description": "The Agency language",
5
5
  "main": "lib/index.js",
6
6
  "scripts": {
@@ -43,6 +43,7 @@
43
43
  "nanoid": "^5.1.6",
44
44
  "openai": "^6.15.0",
45
45
  "simplemachine": "github:egonSchiele/simplemachine",
46
+ "smoltalk": "^0.0.4",
46
47
  "statelog-client": "^0.0.28",
47
48
  "tarsec": "^0.1.1",
48
49
  "typestache": "^0.4.4",