@garrix82/reactgenie-dsl 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.env +10 -0
  2. package/.env.example +17 -0
  3. package/.github/workflows/publish.yml +20 -0
  4. package/README.md +5 -1
  5. package/package.json +1 -5
  6. package/dist/__test__/dsl-descriptor.test.d.ts +0 -1
  7. package/dist/__test__/dsl-descriptor.test.js +0 -27
  8. package/dist/__test__/dsl-descriptor.test.js.map +0 -1
  9. package/dist/__test__/example_descriptor.d.ts +0 -125
  10. package/dist/__test__/example_descriptor.js +0 -607
  11. package/dist/__test__/example_descriptor.js.map +0 -1
  12. package/dist/__test__/food_descriptor.state.json +0 -1
  13. package/dist/__test__/food_descriptor.test.d.ts +0 -74
  14. package/dist/__test__/food_descriptor.test.js +0 -205
  15. package/dist/__test__/food_descriptor.test.js.map +0 -1
  16. package/dist/__test__/nl-interpreter-provider-selection.test.d.ts +0 -1
  17. package/dist/__test__/nl-interpreter-provider-selection.test.js +0 -73
  18. package/dist/__test__/nl-interpreter-provider-selection.test.js.map +0 -1
  19. package/dist/__test__/nl-interpreter.test.d.ts +0 -1
  20. package/dist/__test__/nl-interpreter.test.js +0 -86
  21. package/dist/__test__/nl-interpreter.test.js.map +0 -1
  22. package/dist/decorators/__test__/decorators.test.d.ts +0 -1
  23. package/dist/decorators/__test__/decorators.test.js +0 -182
  24. package/dist/decorators/__test__/decorators.test.js.map +0 -1
  25. package/dist/decorators/__test__/inheritance-descriptor.test.d.ts +0 -1
  26. package/dist/decorators/__test__/inheritance-descriptor.test.js +0 -107
  27. package/dist/decorators/__test__/inheritance-descriptor.test.js.map +0 -1
  28. package/dist/dsl/__test__/dsl-interpreter.test.d.ts +0 -1
  29. package/dist/dsl/__test__/dsl-interpreter.test.js +0 -334
  30. package/dist/dsl/__test__/dsl-interpreter.test.js.map +0 -1
  31. package/dist/dsl/__test__/parser.gen.test.d.ts +0 -1
  32. package/dist/dsl/__test__/parser.gen.test.js +0 -283
  33. package/dist/dsl/__test__/parser.gen.test.js.map +0 -1
  34. package/dist/nl/__test__/context-aware-prompt.test.d.ts +0 -1
  35. package/dist/nl/__test__/context-aware-prompt.test.js +0 -247
  36. package/dist/nl/__test__/context-aware-prompt.test.js.map +0 -1
  37. package/dist/nl/__test__/context-selector.test.d.ts +0 -1
  38. package/dist/nl/__test__/context-selector.test.js +0 -20
  39. package/dist/nl/__test__/context-selector.test.js.map +0 -1
  40. package/dist/nl/__test__/nl-parser-groq-transport.test.d.ts +0 -1
  41. package/dist/nl/__test__/nl-parser-groq-transport.test.js +0 -87
  42. package/dist/nl/__test__/nl-parser-groq-transport.test.js.map +0 -1
  43. package/dist/nl/__test__/nl-parser-openai-parity.test.d.ts +0 -1
  44. package/dist/nl/__test__/nl-parser-openai-parity.test.js +0 -206
  45. package/dist/nl/__test__/nl-parser-openai-parity.test.js.map +0 -1
  46. package/dist/nl/__test__/nl-parser-openai-sampling.test.d.ts +0 -1
  47. package/dist/nl/__test__/nl-parser-openai-sampling.test.js +0 -44
  48. package/dist/nl/__test__/nl-parser-openai-sampling.test.js.map +0 -1
  49. package/dist/nl/__test__/nl-parser-openai-transport.test.d.ts +0 -1
  50. package/dist/nl/__test__/nl-parser-openai-transport.test.js +0 -55
  51. package/dist/nl/__test__/nl-parser-openai-transport.test.js.map +0 -1
  52. package/dist/nl/__test__/nl-parser-utils.test.d.ts +0 -1
  53. package/dist/nl/__test__/nl-parser-utils.test.js +0 -70
  54. package/dist/nl/__test__/nl-parser-utils.test.js.map +0 -1
  55. package/dist/nl/__test__/nl-parser.test.d.ts +0 -1
  56. package/dist/nl/__test__/nl-parser.test.js +0 -64
  57. package/dist/nl/__test__/nl-parser.test.js.map +0 -1
  58. package/dist/nl/__test__/parameter-tuning.test.d.ts +0 -1
  59. package/dist/nl/__test__/parameter-tuning.test.js +0 -95
  60. package/dist/nl/__test__/parameter-tuning.test.js.map +0 -1
  61. package/dist/nl/__test__/semantic-parsing-experiment.test.d.ts +0 -1
  62. package/dist/nl/__test__/semantic-parsing-experiment.test.js +0 -178
  63. package/dist/nl/__test__/semantic-parsing-experiment.test.js.map +0 -1
  64. package/dist/nl/llm-monitoring.test.d.ts +0 -5
  65. package/dist/nl/llm-monitoring.test.js +0 -101
  66. package/dist/nl/llm-monitoring.test.js.map +0 -1
  67. package/lib/__test__/dsl-descriptor.test.ts +0 -27
  68. package/lib/__test__/example_descriptor.ts +0 -762
  69. package/lib/__test__/food_descriptor.state.json +0 -1
  70. package/lib/__test__/food_descriptor.test.ts +0 -331
  71. package/lib/__test__/nl-interpreter-provider-selection.test.ts +0 -126
  72. package/lib/__test__/nl-interpreter.test.ts +0 -129
  73. package/lib/decorators/__test__/decorators.test.ts +0 -177
  74. package/lib/decorators/__test__/inheritance-descriptor.test.ts +0 -92
  75. package/lib/decorators/decorators.ts +0 -754
  76. package/lib/decorators/index.ts +0 -2
  77. package/lib/decorators/store.ts +0 -47
  78. package/lib/dsl/__test__/dsl-interpreter.test.ts +0 -453
  79. package/lib/dsl/__test__/parser.gen.test.ts +0 -296
  80. package/lib/dsl/dsl-interpreter.ts +0 -974
  81. package/lib/dsl/index.ts +0 -1
  82. package/lib/dsl/parser.gen.js +0 -1479
  83. package/lib/dsl/parser.pegjs +0 -130
  84. package/lib/dsl-descriptor.ts +0 -241
  85. package/lib/index.ts +0 -5
  86. package/lib/nl/__test__/context-aware-prompt.test.ts +0 -372
  87. package/lib/nl/__test__/context-selector.test.ts +0 -27
  88. package/lib/nl/__test__/nl-parser-groq-transport.test.ts +0 -139
  89. package/lib/nl/__test__/nl-parser-openai-parity.test.ts +0 -381
  90. package/lib/nl/__test__/nl-parser-openai-sampling.test.ts +0 -73
  91. package/lib/nl/__test__/nl-parser-openai-transport.test.ts +0 -79
  92. package/lib/nl/__test__/nl-parser-utils.test.ts +0 -98
  93. package/lib/nl/__test__/nl-parser.test.ts +0 -119
  94. package/lib/nl/__test__/parameter-tuning.test.ts +0 -137
  95. package/lib/nl/__test__/semantic-parsing-experiment.test.ts +0 -260
  96. package/lib/nl/context-selector.ts +0 -123
  97. package/lib/nl/index.ts +0 -19
  98. package/lib/nl/llm-monitoring.test.ts +0 -136
  99. package/lib/nl/llm-monitoring.ts +0 -339
  100. package/lib/nl/nl-parser-groq.ts +0 -510
  101. package/lib/nl/nl-parser-utils.ts +0 -310
  102. package/lib/nl/nl-parser.ts +0 -616
  103. package/lib/nl/prompt-gen.ts +0 -607
  104. package/lib/nl/prompt-res.ts +0 -207
  105. package/lib/nl-interpreter.ts +0 -262
@@ -1,381 +0,0 @@
1
- import { NlParser } from "../nl-parser";
2
- import { PromptGen } from "../prompt-gen";
3
-
4
- const promptStub: PromptGen = {
5
- prompt: () => "legacy prompt",
6
- zero_shot_prompt: () => "filtered prompt",
7
- response_prompt: () => "respond prompt",
8
- };
9
-
10
- const promptWithBroadFallback = {
11
- ...promptStub,
12
- zero_shot_prompt_broad: () => "broad prompt",
13
- } as PromptGen & { zero_shot_prompt_broad: (user_utterance: string) => string };
14
-
15
- describe("NlParser OpenAI parity", () => {
16
- const originalFetch = global.fetch;
17
-
18
- afterEach(() => {
19
- global.fetch = originalFetch;
20
- jest.restoreAllMocks();
21
- });
22
-
23
- test("parse sends dsl_command json_schema and extracts command JSON", async () => {
24
- global.fetch = jest.fn().mockResolvedValue(
25
- new Response(
26
- JSON.stringify({
27
- choices: [{ message: { content: JSON.stringify({ command: "Task.run()" }) } }],
28
- usage: { prompt_tokens: 8, completion_tokens: 3, total_tokens: 11 },
29
- }),
30
- { status: 200, headers: { "content-type": "application/json" } }
31
- )
32
- ) as any;
33
-
34
- const parser = new NlParser(
35
- promptStub,
36
- "token",
37
- "https://api.openai.com/v1",
38
- "gpt-4o-mini"
39
- );
40
-
41
- await expect(parser.parse("run task")).resolves.toBe("Task.run()");
42
-
43
- const [, init] = (global.fetch as jest.Mock).mock.calls[0] as [string, any];
44
- const body = JSON.parse(String(init.body));
45
- expect(body.model).toBe("gpt-4o-mini");
46
- expect(body.response_format?.json_schema?.name).toBe("dsl_command");
47
- });
48
-
49
- test("parse retries strict=false then strict=true on filtered prompt", async () => {
50
- global.fetch = jest
51
- .fn()
52
- .mockResolvedValueOnce(
53
- new Response(
54
- JSON.stringify({
55
- choices: [{ message: { content: "{}" } }],
56
- usage: {},
57
- }),
58
- { status: 200, headers: { "content-type": "application/json" } }
59
- )
60
- )
61
- .mockResolvedValueOnce(
62
- new Response(
63
- JSON.stringify({
64
- choices: [{ message: { content: JSON.stringify({ command: "Task.run()" }) } }],
65
- usage: {},
66
- }),
67
- { status: 200, headers: { "content-type": "application/json" } }
68
- )
69
- ) as any;
70
-
71
- const parser = new NlParser(
72
- promptStub,
73
- "token",
74
- "https://api.openai.com/v1",
75
- "gpt-4o-mini"
76
- );
77
-
78
- await expect(parser.parse("run task")).resolves.toBe("Task.run()");
79
-
80
- const body1 = JSON.parse(String((global.fetch as jest.Mock).mock.calls[0][1].body));
81
- const body2 = JSON.parse(String((global.fetch as jest.Mock).mock.calls[1][1].body));
82
- expect(body1.response_format.json_schema.strict).toBe(false);
83
- expect(body2.response_format.json_schema.strict).toBe(true);
84
- });
85
-
86
- test("parse does not use broad prompt variants", async () => {
87
- global.fetch = (
88
- jest
89
- .fn()
90
- .mockResolvedValueOnce(
91
- new Response(
92
- JSON.stringify({
93
- choices: [{ message: { content: "{}" } }],
94
- usage: {},
95
- }),
96
- { status: 200, headers: { "content-type": "application/json" } }
97
- )
98
- )
99
- .mockResolvedValueOnce(
100
- new Response(
101
- JSON.stringify({
102
- choices: [{ message: { content: "{}" } }],
103
- usage: {},
104
- }),
105
- { status: 200, headers: { "content-type": "application/json" } }
106
- )
107
- )
108
- ) as any;
109
-
110
- const parser = new NlParser(
111
- promptWithBroadFallback,
112
- "token",
113
- "https://api.openai.com/v1",
114
- "gpt-4o-mini"
115
- );
116
-
117
- await expect(parser.parse("run task")).rejects.toThrow();
118
-
119
- const body1 = JSON.parse(String((global.fetch as jest.Mock).mock.calls[0][1].body));
120
- const body2 = JSON.parse(String((global.fetch as jest.Mock).mock.calls[1][1].body));
121
- expect(global.fetch).toHaveBeenCalledTimes(2);
122
- expect(body1.messages[0].content).toBe("filtered prompt");
123
- expect(body2.messages[0].content).toBe("filtered prompt");
124
- });
125
-
126
- test("parse repairs one invalid command after semantic validation fails", async () => {
127
- const validateCommand = jest
128
- .fn<Promise<void>, [string]>()
129
- .mockImplementation(async (command: string) => {
130
- if (command === 'Task.run(field: "name")') {
131
- throw new Error(
132
- "Invalid DSL: matching(field: ...) requires a dotted accessor such as .name, but received string."
133
- );
134
- }
135
- });
136
-
137
- global.fetch = jest
138
- .fn()
139
- .mockResolvedValueOnce(
140
- new Response(
141
- JSON.stringify({
142
- choices: [
143
- {
144
- message: {
145
- content: JSON.stringify({ command: 'Task.run(field: "name")' }),
146
- },
147
- },
148
- ],
149
- usage: {},
150
- }),
151
- { status: 200, headers: { "content-type": "application/json" } }
152
- )
153
- )
154
- .mockResolvedValueOnce(
155
- new Response(
156
- JSON.stringify({
157
- choices: [{ message: { content: JSON.stringify({ command: "Task.run(field: .name)" }) } }],
158
- usage: {},
159
- }),
160
- { status: 200, headers: { "content-type": "application/json" } }
161
- )
162
- ) as any;
163
-
164
- const parser = new NlParser(
165
- promptStub,
166
- "token",
167
- "https://api.openai.com/v1",
168
- "gpt-4o-mini",
169
- undefined,
170
- undefined,
171
- undefined,
172
- undefined,
173
- validateCommand,
174
- { statefulValidationRetry: false }
175
- );
176
-
177
- await expect(parser.parse("run task")).resolves.toBe("Task.run(field: .name)");
178
- expect(validateCommand).toHaveBeenCalledTimes(2);
179
- expect(validateCommand).toHaveBeenNthCalledWith(1, 'Task.run(field: "name")');
180
- expect(validateCommand).toHaveBeenNthCalledWith(2, "Task.run(field: .name)");
181
-
182
- const repairBody = JSON.parse(String((global.fetch as jest.Mock).mock.calls[1][1].body));
183
- expect(repairBody.messages[0].content).toBe("filtered prompt");
184
- expect(repairBody.messages[1].content).toBe('Task.run(field: "name")');
185
- expect(repairBody.messages[2].content).toContain("The previous DSL command is invalid.");
186
- expect(repairBody.messages[2].content).toContain("requires a dotted accessor");
187
- });
188
-
189
- test("parse uses Responses API state for validation retry when feature flag is enabled", async () => {
190
- const validateCommand = jest
191
- .fn<Promise<void>, [string]>()
192
- .mockImplementation(async (command: string) => {
193
- if (command === 'Task.run(field: "name")') {
194
- throw new Error(
195
- "Invalid DSL: matching(field: ...) requires a dotted accessor such as .name, but received string."
196
- );
197
- }
198
- });
199
-
200
- global.fetch = jest
201
- .fn()
202
- .mockResolvedValueOnce(
203
- new Response(
204
- JSON.stringify({
205
- id: "resp_initial",
206
- output_text: JSON.stringify({ command: 'Task.run(field: "name")' }),
207
- usage: { input_tokens: 12, output_tokens: 4, total_tokens: 16 },
208
- }),
209
- { status: 200, headers: { "content-type": "application/json" } }
210
- )
211
- )
212
- .mockResolvedValueOnce(
213
- new Response(
214
- JSON.stringify({
215
- id: "resp_repair",
216
- output_text: JSON.stringify({ command: "Task.run(field: .name)" }),
217
- usage: { input_tokens: 5, output_tokens: 3, total_tokens: 8 },
218
- }),
219
- { status: 200, headers: { "content-type": "application/json" } }
220
- )
221
- ) as any;
222
-
223
- const parser = new NlParser(
224
- promptStub,
225
- "token",
226
- "https://proxy.example.com/openai/v1",
227
- "gpt-4.1-mini",
228
- undefined,
229
- undefined,
230
- undefined,
231
- undefined,
232
- validateCommand,
233
- { statefulValidationRetry: true }
234
- );
235
-
236
- await expect(parser.parse("run task")).resolves.toBe("Task.run(field: .name)");
237
-
238
- expect(global.fetch).toHaveBeenCalledTimes(2);
239
- const [initialUrl, initialInit] = (global.fetch as jest.Mock).mock.calls[0] as [string, any];
240
- const [repairUrl, repairInit] = (global.fetch as jest.Mock).mock.calls[1] as [string, any];
241
- const initialBody = JSON.parse(String(initialInit.body));
242
- const repairBody = JSON.parse(String(repairInit.body));
243
-
244
- expect(initialUrl).toBe("https://proxy.example.com/openai/v1/responses");
245
- expect(repairUrl).toBe("https://proxy.example.com/openai/v1/responses");
246
- expect(initialBody.store).toBe(true);
247
- expect(initialBody.input[0].content[0].text).toBe("filtered prompt");
248
- expect(repairBody.previous_response_id).toBe("resp_initial");
249
- expect(repairBody.input[0].content[0].text).toContain("The previous DSL command is invalid.");
250
- expect(repairBody.input[0].content[0].text).toContain("requires a dotted accessor");
251
- expect(repairBody.input[0].content[0].text).not.toContain("filtered prompt");
252
- });
253
-
254
- test("parse throws after filtered prompt strict retries fail", async () => {
255
- global.fetch = jest
256
- .fn()
257
- .mockResolvedValue(
258
- new Response(
259
- JSON.stringify({
260
- choices: [{ message: { content: "{}" } }],
261
- usage: {},
262
- }),
263
- { status: 200, headers: { "content-type": "application/json" } }
264
- )
265
- ) as any;
266
-
267
- const parser = new NlParser(
268
- promptWithBroadFallback,
269
- "token",
270
- "https://api.openai.com/v1",
271
- "gpt-4o-mini"
272
- );
273
-
274
- await expect(parser.parse("run task")).rejects.toThrow(
275
- "OpenAI parse failed for filtered prompt"
276
- );
277
-
278
- expect(global.fetch).toHaveBeenCalledTimes(2);
279
-
280
- const requestBodies = (global.fetch as jest.Mock).mock.calls.map(([, init]) =>
281
- JSON.parse(String(init.body))
282
- );
283
- expect(requestBodies.map((body) => body.messages[0].content)).toEqual([
284
- "filtered prompt",
285
- "filtered prompt",
286
- ]);
287
- expect(requestBodies.map((body) => body.response_format.json_schema.strict)).toEqual([
288
- false,
289
- true,
290
- ]);
291
- });
292
-
293
- test("respond sends agent_response json_schema and normalizes payload", async () => {
294
- global.fetch = jest.fn().mockResolvedValue(
295
- new Response(
296
- JSON.stringify({
297
- choices: [{
298
- message: {
299
- content: JSON.stringify({
300
- minimalText: "Done now",
301
- fullText: "Done: Task.run()",
302
- type: "success",
303
- shouldSpeak: true,
304
- }),
305
- },
306
- }],
307
- usage: {},
308
- }),
309
- { status: 200, headers: { "content-type": "application/json" } }
310
- )
311
- ) as any;
312
-
313
- const parser = new NlParser(
314
- promptStub,
315
- "token",
316
- "https://api.openai.com/v1",
317
- "gpt-4o-mini"
318
- );
319
-
320
- await expect(parser.respond("run task", "Task.run()", "ok")).resolves.toEqual({
321
- minimalText: "Done now",
322
- fullText: "Done: Task.run()",
323
- type: "success",
324
- shouldSpeak: true,
325
- });
326
-
327
- const [, init] = (global.fetch as jest.Mock).mock.calls[0] as [string, any];
328
- const body = JSON.parse(String(init.body));
329
- expect(body.response_format?.json_schema?.name).toBe("agent_response");
330
- });
331
-
332
- test("respond falls back safely when OpenAI returns invalid JSON", async () => {
333
- global.fetch = jest.fn().mockResolvedValue(
334
- new Response(
335
- JSON.stringify({
336
- choices: [{ message: { content: "not-json" } }],
337
- usage: {},
338
- }),
339
- { status: 200, headers: { "content-type": "application/json" } }
340
- )
341
- ) as any;
342
-
343
- const parser = new NlParser(
344
- promptStub,
345
- "token",
346
- "https://api.openai.com/v1",
347
- "gpt-4o-mini"
348
- );
349
-
350
- await expect(parser.respond("run task", "Task.run()", "ok")).resolves.toEqual(
351
- expect.objectContaining({ type: "success", shouldSpeak: true })
352
- );
353
- });
354
-
355
- test("respond falls back safely when the transport fails", async () => {
356
- global.fetch = jest.fn().mockRejectedValue(new Error("socket hang up")) as any;
357
-
358
- const parser = new NlParser(
359
- promptStub,
360
- "token",
361
- "https://api.openai.com/v1",
362
- "gpt-4o-mini"
363
- );
364
-
365
- await expect(
366
- parser.respond("run task", "Task.run()", "error: network failed")
367
- ).resolves.toEqual({
368
- minimalText: "Retry task",
369
- fullText:
370
- "I couldn't complete that request. Please try rephrasing or checking the target item.",
371
- type: "error",
372
- shouldSpeak: true,
373
- });
374
- });
375
-
376
- test("constructor rejects an empty semantic model", () => {
377
- expect(
378
- () => new NlParser(promptStub, "token", "https://api.openai.com/v1", " ")
379
- ).toThrow("semanticModel / SEMANTIC_MODEL");
380
- });
381
- });
@@ -1,73 +0,0 @@
1
- import { NlParser } from "../nl-parser";
2
- import { PromptGen } from "../prompt-gen";
3
-
4
- const promptStub: PromptGen = {
5
- prompt: () => "legacy prompt",
6
- zero_shot_prompt: () => "parse prompt",
7
- response_prompt: () => "respond prompt",
8
- };
9
-
10
- describe("NlParser OpenAI sampling", () => {
11
- const originalFetch = global.fetch;
12
-
13
- afterEach(() => {
14
- global.fetch = originalFetch;
15
- jest.restoreAllMocks();
16
- });
17
-
18
- test("parse uses the standard sampling profile", async () => {
19
- global.fetch = jest.fn().mockResolvedValue(
20
- new Response(
21
- JSON.stringify({
22
- choices: [{ message: { content: JSON.stringify({ command: "Task.run()" }) } }],
23
- usage: {},
24
- }),
25
- { status: 200, headers: { "content-type": "application/json" } }
26
- )
27
- ) as any;
28
-
29
- const parser = new NlParser(
30
- promptStub,
31
- "token",
32
- "https://api.openai.com/v1",
33
- "gpt-4o-mini"
34
- );
35
-
36
- await parser.parse("run task");
37
-
38
- const [, init] = (global.fetch as jest.Mock).mock.calls[0] as [string, any];
39
- const body = JSON.parse(String(init.body));
40
- expect(body.temperature).toBe(0.2);
41
- expect(body.top_p).toBe(1);
42
- expect(body.frequency_penalty).toBe(0);
43
- expect(body.presence_penalty).toBe(0);
44
- });
45
-
46
- test("parseGpt4 uses the same sampling profile", async () => {
47
- global.fetch = jest.fn().mockResolvedValue(
48
- new Response(
49
- JSON.stringify({
50
- choices: [{ message: { content: JSON.stringify({ command: "Task.run()" }) } }],
51
- usage: {},
52
- }),
53
- { status: 200, headers: { "content-type": "application/json" } }
54
- )
55
- ) as any;
56
-
57
- const parser = new NlParser(
58
- promptStub,
59
- "token",
60
- "https://api.openai.com/v1",
61
- "gpt-4o-mini"
62
- );
63
-
64
- await parser.parseGpt4("run task");
65
-
66
- const [, init] = (global.fetch as jest.Mock).mock.calls[0] as [string, any];
67
- const body = JSON.parse(String(init.body));
68
- expect(body.temperature).toBe(0.2);
69
- expect(body.top_p).toBe(1);
70
- expect(body.frequency_penalty).toBe(0);
71
- expect(body.presence_penalty).toBe(0);
72
- });
73
- });
@@ -1,79 +0,0 @@
1
- import { NlParser } from "../nl-parser";
2
- import { PromptGen } from "../prompt-gen";
3
-
4
- function toHeaderRecord(headers: any): Record<string, string> {
5
- if (!headers) return {};
6
- if (typeof headers.entries === "function") {
7
- return Object.fromEntries(
8
- Array.from(headers.entries()).map(([key, value]) => [
9
- String(key).toLowerCase(),
10
- String(value),
11
- ])
12
- );
13
- }
14
- return Object.fromEntries(
15
- Object.entries(headers).map(([key, value]) => [
16
- String(key).toLowerCase(),
17
- String(value),
18
- ])
19
- );
20
- }
21
-
22
- const promptStub: PromptGen = {
23
- prompt: () => "legacy prompt",
24
- zero_shot_prompt: () => "parse prompt",
25
- response_prompt: () => "respond prompt",
26
- };
27
-
28
- describe("NlParser transport", () => {
29
- const originalFetch = global.fetch;
30
-
31
- afterEach(() => {
32
- global.fetch = originalFetch;
33
- jest.restoreAllMocks();
34
- });
35
-
36
- test("uses proxy chat completions endpoint without sdk stainless headers", async () => {
37
- const fetchMock = jest.fn().mockResolvedValue(
38
- new Response(
39
- JSON.stringify({
40
- choices: [{ message: { content: JSON.stringify({ command: "Task.run()" }) } }],
41
- usage: { prompt_tokens: 12, completion_tokens: 3, total_tokens: 15 },
42
- }),
43
- {
44
- status: 200,
45
- headers: { "content-type": "application/json" },
46
- }
47
- )
48
- );
49
- global.fetch = fetchMock as any;
50
-
51
- const parser = new NlParser(
52
- promptStub,
53
- "proxy-token",
54
- "https://proxy.example.com/openai/v1",
55
- "gpt-4o-mini"
56
- );
57
-
58
- const parsed = await parser.parse("run task");
59
-
60
- expect(parsed).toBe("Task.run()");
61
- expect(fetchMock).toHaveBeenCalledTimes(1);
62
-
63
- const [url, init] = fetchMock.mock.calls[0] as [string, any];
64
- expect(String(url)).toBe("https://proxy.example.com/openai/v1/chat/completions");
65
- expect(init?.method).toBe("POST");
66
-
67
- const headers = toHeaderRecord(init?.headers);
68
- expect(headers.authorization).toBe("Bearer proxy-token");
69
- expect(headers["x-genie-provider"]).toBe("openai");
70
- expect(
71
- Object.keys(headers).some((key) => key.startsWith("x-stainless-"))
72
- ).toBe(false);
73
-
74
- const body = JSON.parse(String(init?.body || "{}"));
75
- expect(body.model).toBe("gpt-4o-mini");
76
- expect(body.response_format?.type).toBe("json_schema");
77
- expect(body.response_format?.json_schema?.name).toBe("dsl_command");
78
- });
79
- });
@@ -1,98 +0,0 @@
1
- import {
2
- buildFallbackAgentResponse,
3
- createJsonChatCompletion,
4
- extractStructuredAgentResponse,
5
- extractStructuredCommand,
6
- normalizeAgentResponse,
7
- normalizeBaseUrl,
8
- } from "../nl-parser-utils";
9
-
10
- describe("nl-parser utils", () => {
11
- const originalFetch = global.fetch;
12
-
13
- afterEach(() => {
14
- global.fetch = originalFetch;
15
- jest.restoreAllMocks();
16
- });
17
-
18
- test("extractStructuredCommand returns null for invalid or missing command payloads", () => {
19
- expect(extractStructuredCommand("not-json")).toBeNull();
20
- expect(extractStructuredCommand(JSON.stringify({}))).toBeNull();
21
- expect(extractStructuredCommand(JSON.stringify({ command: 7 }))).toBeNull();
22
- });
23
-
24
- test("extractStructuredAgentResponse returns null for invalid payload shapes", () => {
25
- expect(extractStructuredAgentResponse("not-json")).toBeNull();
26
- expect(extractStructuredAgentResponse(JSON.stringify("done"))).toBeNull();
27
- expect(extractStructuredAgentResponse(JSON.stringify(null))).toBeNull();
28
- });
29
-
30
- test("normalizeAgentResponse applies defaults and trims oversized values", () => {
31
- expect(
32
- normalizeAgentResponse({
33
- minimalText: " this is too long for speech output ",
34
- fullText: "",
35
- type: "unsupported" as any,
36
- })
37
- ).toEqual({
38
- minimalText: "this is",
39
- fullText: "this is too long for speech output",
40
- type: "info",
41
- shouldSpeak: true,
42
- });
43
- });
44
-
45
- test("buildFallbackAgentResponse returns an error payload for failure contexts", () => {
46
- expect(buildFallbackAgentResponse("Task.run()", "Failed to update item")).toEqual({
47
- minimalText: "Retry task",
48
- fullText:
49
- "I couldn't complete that request. Please try rephrasing or checking the target item.",
50
- type: "error",
51
- shouldSpeak: true,
52
- });
53
- });
54
-
55
- test("normalizeBaseUrl strips trailing slashes", () => {
56
- expect(normalizeBaseUrl("https://proxy.example.com/openai/v1///")).toBe(
57
- "https://proxy.example.com/openai/v1"
58
- );
59
- });
60
-
61
- test("createJsonChatCompletion throws a detailed error for non-ok JSON responses", async () => {
62
- global.fetch = jest.fn().mockResolvedValue(
63
- new Response(JSON.stringify({ error: { message: "bad request" } }), {
64
- status: 400,
65
- headers: { "content-type": "application/json" },
66
- })
67
- ) as any;
68
-
69
- await expect(
70
- createJsonChatCompletion({
71
- endpoint: "https://proxy.example.com/openai/v1/chat/completions",
72
- apiKey: "token",
73
- body: { model: "gpt-4o-mini" },
74
- requestLabel: "OpenAI-compatible request failed",
75
- invalidPayloadLabel: "invalid payload",
76
- })
77
- ).rejects.toThrow("OpenAI-compatible request failed (400): {\"message\":\"bad request\"}");
78
- });
79
-
80
- test("createJsonChatCompletion throws for invalid successful payloads", async () => {
81
- global.fetch = jest.fn().mockResolvedValue(
82
- new Response("not-json", {
83
- status: 200,
84
- headers: { "content-type": "application/json" },
85
- })
86
- ) as any;
87
-
88
- await expect(
89
- createJsonChatCompletion({
90
- endpoint: "https://proxy.example.com/openai/v1/chat/completions",
91
- apiKey: "token",
92
- body: { model: "gpt-4o-mini" },
93
- requestLabel: "OpenAI-compatible request failed",
94
- invalidPayloadLabel: "OpenAI-compatible endpoint returned an invalid JSON payload",
95
- })
96
- ).rejects.toThrow("OpenAI-compatible endpoint returned an invalid JSON payload");
97
- });
98
- });