@garrix82/reactgenie-dsl 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.env +10 -0
  2. package/.env.example +17 -0
  3. package/.github/workflows/publish.yml +20 -0
  4. package/README.md +5 -1
  5. package/package.json +1 -5
  6. package/dist/__test__/dsl-descriptor.test.d.ts +0 -1
  7. package/dist/__test__/dsl-descriptor.test.js +0 -27
  8. package/dist/__test__/dsl-descriptor.test.js.map +0 -1
  9. package/dist/__test__/example_descriptor.d.ts +0 -125
  10. package/dist/__test__/example_descriptor.js +0 -607
  11. package/dist/__test__/example_descriptor.js.map +0 -1
  12. package/dist/__test__/food_descriptor.state.json +0 -1
  13. package/dist/__test__/food_descriptor.test.d.ts +0 -74
  14. package/dist/__test__/food_descriptor.test.js +0 -205
  15. package/dist/__test__/food_descriptor.test.js.map +0 -1
  16. package/dist/__test__/nl-interpreter-provider-selection.test.d.ts +0 -1
  17. package/dist/__test__/nl-interpreter-provider-selection.test.js +0 -73
  18. package/dist/__test__/nl-interpreter-provider-selection.test.js.map +0 -1
  19. package/dist/__test__/nl-interpreter.test.d.ts +0 -1
  20. package/dist/__test__/nl-interpreter.test.js +0 -86
  21. package/dist/__test__/nl-interpreter.test.js.map +0 -1
  22. package/dist/decorators/__test__/decorators.test.d.ts +0 -1
  23. package/dist/decorators/__test__/decorators.test.js +0 -182
  24. package/dist/decorators/__test__/decorators.test.js.map +0 -1
  25. package/dist/decorators/__test__/inheritance-descriptor.test.d.ts +0 -1
  26. package/dist/decorators/__test__/inheritance-descriptor.test.js +0 -107
  27. package/dist/decorators/__test__/inheritance-descriptor.test.js.map +0 -1
  28. package/dist/dsl/__test__/dsl-interpreter.test.d.ts +0 -1
  29. package/dist/dsl/__test__/dsl-interpreter.test.js +0 -334
  30. package/dist/dsl/__test__/dsl-interpreter.test.js.map +0 -1
  31. package/dist/dsl/__test__/parser.gen.test.d.ts +0 -1
  32. package/dist/dsl/__test__/parser.gen.test.js +0 -283
  33. package/dist/dsl/__test__/parser.gen.test.js.map +0 -1
  34. package/dist/nl/__test__/context-aware-prompt.test.d.ts +0 -1
  35. package/dist/nl/__test__/context-aware-prompt.test.js +0 -247
  36. package/dist/nl/__test__/context-aware-prompt.test.js.map +0 -1
  37. package/dist/nl/__test__/context-selector.test.d.ts +0 -1
  38. package/dist/nl/__test__/context-selector.test.js +0 -20
  39. package/dist/nl/__test__/context-selector.test.js.map +0 -1
  40. package/dist/nl/__test__/nl-parser-groq-transport.test.d.ts +0 -1
  41. package/dist/nl/__test__/nl-parser-groq-transport.test.js +0 -87
  42. package/dist/nl/__test__/nl-parser-groq-transport.test.js.map +0 -1
  43. package/dist/nl/__test__/nl-parser-openai-parity.test.d.ts +0 -1
  44. package/dist/nl/__test__/nl-parser-openai-parity.test.js +0 -206
  45. package/dist/nl/__test__/nl-parser-openai-parity.test.js.map +0 -1
  46. package/dist/nl/__test__/nl-parser-openai-sampling.test.d.ts +0 -1
  47. package/dist/nl/__test__/nl-parser-openai-sampling.test.js +0 -44
  48. package/dist/nl/__test__/nl-parser-openai-sampling.test.js.map +0 -1
  49. package/dist/nl/__test__/nl-parser-openai-transport.test.d.ts +0 -1
  50. package/dist/nl/__test__/nl-parser-openai-transport.test.js +0 -55
  51. package/dist/nl/__test__/nl-parser-openai-transport.test.js.map +0 -1
  52. package/dist/nl/__test__/nl-parser-utils.test.d.ts +0 -1
  53. package/dist/nl/__test__/nl-parser-utils.test.js +0 -70
  54. package/dist/nl/__test__/nl-parser-utils.test.js.map +0 -1
  55. package/dist/nl/__test__/nl-parser.test.d.ts +0 -1
  56. package/dist/nl/__test__/nl-parser.test.js +0 -64
  57. package/dist/nl/__test__/nl-parser.test.js.map +0 -1
  58. package/dist/nl/__test__/parameter-tuning.test.d.ts +0 -1
  59. package/dist/nl/__test__/parameter-tuning.test.js +0 -95
  60. package/dist/nl/__test__/parameter-tuning.test.js.map +0 -1
  61. package/dist/nl/__test__/semantic-parsing-experiment.test.d.ts +0 -1
  62. package/dist/nl/__test__/semantic-parsing-experiment.test.js +0 -178
  63. package/dist/nl/__test__/semantic-parsing-experiment.test.js.map +0 -1
  64. package/dist/nl/llm-monitoring.test.d.ts +0 -5
  65. package/dist/nl/llm-monitoring.test.js +0 -101
  66. package/dist/nl/llm-monitoring.test.js.map +0 -1
  67. package/lib/__test__/dsl-descriptor.test.ts +0 -27
  68. package/lib/__test__/example_descriptor.ts +0 -762
  69. package/lib/__test__/food_descriptor.state.json +0 -1
  70. package/lib/__test__/food_descriptor.test.ts +0 -331
  71. package/lib/__test__/nl-interpreter-provider-selection.test.ts +0 -126
  72. package/lib/__test__/nl-interpreter.test.ts +0 -129
  73. package/lib/decorators/__test__/decorators.test.ts +0 -177
  74. package/lib/decorators/__test__/inheritance-descriptor.test.ts +0 -92
  75. package/lib/decorators/decorators.ts +0 -754
  76. package/lib/decorators/index.ts +0 -2
  77. package/lib/decorators/store.ts +0 -47
  78. package/lib/dsl/__test__/dsl-interpreter.test.ts +0 -453
  79. package/lib/dsl/__test__/parser.gen.test.ts +0 -296
  80. package/lib/dsl/dsl-interpreter.ts +0 -974
  81. package/lib/dsl/index.ts +0 -1
  82. package/lib/dsl/parser.gen.js +0 -1479
  83. package/lib/dsl/parser.pegjs +0 -130
  84. package/lib/dsl-descriptor.ts +0 -241
  85. package/lib/index.ts +0 -5
  86. package/lib/nl/__test__/context-aware-prompt.test.ts +0 -372
  87. package/lib/nl/__test__/context-selector.test.ts +0 -27
  88. package/lib/nl/__test__/nl-parser-groq-transport.test.ts +0 -139
  89. package/lib/nl/__test__/nl-parser-openai-parity.test.ts +0 -381
  90. package/lib/nl/__test__/nl-parser-openai-sampling.test.ts +0 -73
  91. package/lib/nl/__test__/nl-parser-openai-transport.test.ts +0 -79
  92. package/lib/nl/__test__/nl-parser-utils.test.ts +0 -98
  93. package/lib/nl/__test__/nl-parser.test.ts +0 -119
  94. package/lib/nl/__test__/parameter-tuning.test.ts +0 -137
  95. package/lib/nl/__test__/semantic-parsing-experiment.test.ts +0 -260
  96. package/lib/nl/context-selector.ts +0 -123
  97. package/lib/nl/index.ts +0 -19
  98. package/lib/nl/llm-monitoring.test.ts +0 -136
  99. package/lib/nl/llm-monitoring.ts +0 -339
  100. package/lib/nl/nl-parser-groq.ts +0 -510
  101. package/lib/nl/nl-parser-utils.ts +0 -310
  102. package/lib/nl/nl-parser.ts +0 -616
  103. package/lib/nl/prompt-gen.ts +0 -607
  104. package/lib/nl/prompt-res.ts +0 -207
  105. package/lib/nl-interpreter.ts +0 -262
@@ -1,510 +0,0 @@
1
- import { AgentResponsePayload, PromptGen } from "./prompt-gen";
2
- import { getLLMMonitor, type LLMMonitor } from "./llm-monitoring";
3
- import {
4
- buildCommandRepairMessages,
5
- buildFallbackAgentResponse,
6
- CommandValidator,
7
- createJsonChatCompletion,
8
- DEFAULT_SEMANTIC_PARSER_SAMPLING,
9
- extractStructuredAgentResponse,
10
- extractStructuredCommand,
11
- getStructuredAgentResponseFormat,
12
- getStructuredResponseFormat,
13
- normalizeAgentResponse,
14
- normalizeBaseUrl,
15
- SamplingParams,
16
- } from "./nl-parser-utils";
17
- export type { SamplingParams } from "./nl-parser-utils";
18
-
19
- function buildChatCompletionsEndpoint(baseUrl?: string): string {
20
- const resolvedBase =
21
- normalizeBaseUrl(baseUrl && baseUrl.trim().length > 0
22
- ? baseUrl
23
- : "https://api.groq.com/openai/v1");
24
-
25
- if (resolvedBase.endsWith("/chat/completions")) {
26
- return resolvedBase;
27
- }
28
- if (resolvedBase.endsWith("/openai/v1") || resolvedBase.endsWith("/v1")) {
29
- return `${resolvedBase}/chat/completions`;
30
- }
31
- return `${resolvedBase}/openai/v1/chat/completions`;
32
- }
33
-
34
- function asFiniteNumber(value: unknown): number | undefined {
35
- return typeof value === "number" && Number.isFinite(value) ? value : undefined;
36
- }
37
-
38
- function extractCostMetrics(response: any): {
39
- inputCost?: number;
40
- outputCost?: number;
41
- totalCost?: number;
42
- } {
43
- const usage = response?.usage ?? response?.usage_metadata ?? {};
44
- const inputCost =
45
- asFiniteNumber(usage?.input_cost) ??
46
- asFiniteNumber(usage?.prompt_cost) ??
47
- asFiniteNumber(usage?.prompt_cost_usd);
48
- const outputCost =
49
- asFiniteNumber(usage?.output_cost) ??
50
- asFiniteNumber(usage?.completion_cost) ??
51
- asFiniteNumber(usage?.completion_cost_usd);
52
- const totalCost =
53
- asFiniteNumber(usage?.total_cost) ??
54
- asFiniteNumber(usage?.cost) ??
55
- asFiniteNumber(usage?.total_cost_usd) ??
56
- (typeof inputCost === "number" && typeof outputCost === "number"
57
- ? inputCost + outputCost
58
- : undefined);
59
-
60
- return {
61
- ...(typeof inputCost === "number" ? { inputCost } : {}),
62
- ...(typeof outputCost === "number" ? { outputCost } : {}),
63
- ...(typeof totalCost === "number" ? { totalCost } : {}),
64
- };
65
- }
66
-
67
- export class NlParserGroq {
68
- private chatCompletionsEndpoint: string;
69
- private samplingParams: SamplingParams;
70
- private llmMonitor!: LLMMonitor;
71
-
72
- constructor(
73
- public prompt: PromptGen,
74
- private apiKey: string,
75
- private model: string = process.env.SEMANTIC_MODEL || "llama-3.3-70b-versatile",
76
- samplingParams?: SamplingParams,
77
- private baseURL?: string,
78
- langsmithApiKey?: string,
79
- langsmithProject?: string,
80
- langsmithEndpoint?: string,
81
- private validateCommand?: CommandValidator
82
- ) {
83
- this.chatCompletionsEndpoint = buildChatCompletionsEndpoint(this.baseURL);
84
-
85
- this.llmMonitor = getLLMMonitor({
86
- apiKey: langsmithApiKey,
87
- project: langsmithProject,
88
- endpoint: langsmithEndpoint,
89
- });
90
-
91
- // Default parameters optimized for semantic parsing
92
- this.samplingParams = {
93
- ...DEFAULT_SEMANTIC_PARSER_SAMPLING,
94
- ...samplingParams,
95
- };
96
- }
97
-
98
- private async createChatCompletion(body: Record<string, unknown>): Promise<any> {
99
- return createJsonChatCompletion({
100
- endpoint: this.chatCompletionsEndpoint,
101
- apiKey: this.apiKey,
102
- body,
103
- provider: "groq",
104
- requestLabel: "LLM proxy request failed",
105
- invalidPayloadLabel: "LLM proxy returned an invalid JSON payload",
106
- });
107
- }
108
-
109
- async oldParse(nl: string): Promise<string | null> {
110
- const promptText = this.prompt.prompt(nl);
111
- const startTime = Date.now();
112
-
113
- try {
114
- const response = await this.llmMonitor.traceCall(
115
- "oldParse",
116
- { model: this.model, provider: "groq" },
117
- async () => {
118
- return await this.runStructuredCommandMessages(
119
- [
120
- {
121
- role: "user",
122
- content: promptText,
123
- },
124
- ],
125
- false
126
- );
127
- }
128
- );
129
-
130
- const completion = await this.validateOrRepairCommand(
131
- "oldParse",
132
- "legacy_prompt",
133
- promptText,
134
- response.command,
135
- promptText
136
- );
137
-
138
- // Log metrics
139
- await this.llmMonitor.logCall({
140
- model: this.model,
141
- provider: "groq",
142
- promptTokens: response.response.usage?.prompt_tokens,
143
- completionTokens: response.response.usage?.completion_tokens,
144
- totalTokens: response.response.usage?.total_tokens,
145
- ...extractCostMetrics(response.response),
146
- prompt: promptText,
147
- completion,
148
- latency: Date.now() - startTime,
149
- timestamp: new Date(),
150
- });
151
-
152
- return completion;
153
- } catch (error) {
154
- await this.llmMonitor.logCall({
155
- model: this.model,
156
- provider: "groq",
157
- prompt: promptText,
158
- completion: null,
159
- latency: Date.now() - startTime,
160
- timestamp: new Date(),
161
- error: error instanceof Error ? error.message : String(error),
162
- });
163
- throw error;
164
- }
165
- }
166
-
167
- async parse(nl: string): Promise<string | null> {
168
- const startTime = Date.now();
169
-
170
- const promptVariants: Array<{ label: string; text: string }> = [];
171
- const defaultPrompt = this.prompt.zero_shot_prompt(nl);
172
- promptVariants.push({ label: "filtered", text: defaultPrompt });
173
-
174
- let usedPromptText = defaultPrompt;
175
- let completion: string | null = null;
176
- let response: any;
177
- let lastError: unknown;
178
-
179
- try {
180
- const runParse = async (strict: boolean, promptText: string) => {
181
- return this.runStructuredCommandMessages(
182
- [
183
- {
184
- role: "user",
185
- content: promptText,
186
- },
187
- ],
188
- strict
189
- );
190
- };
191
-
192
- for (const promptVariant of promptVariants) {
193
- usedPromptText = promptVariant.text;
194
- try {
195
- const result = await this.llmMonitor.traceCall(
196
- `parse_${promptVariant.label}`,
197
- { model: this.model, provider: "groq" },
198
- async () => runParse(false, promptVariant.text)
199
- );
200
- response = result.response;
201
- completion = await this.validateOrRepairCommand(
202
- "parse",
203
- promptVariant.label,
204
- nl,
205
- result.command,
206
- usedPromptText
207
- );
208
- break;
209
- } catch (error) {
210
- try {
211
- const result = await this.llmMonitor.traceCall(
212
- `parse_${promptVariant.label}`,
213
- { model: this.model, provider: "groq" },
214
- async () => runParse(true, promptVariant.text)
215
- );
216
- response = result.response;
217
- completion = await this.validateOrRepairCommand(
218
- "parse",
219
- promptVariant.label,
220
- nl,
221
- result.command,
222
- usedPromptText
223
- );
224
- break;
225
- } catch (strictError) {
226
- lastError = strictError;
227
- }
228
- }
229
- }
230
-
231
- if (!completion || !response) {
232
- throw (lastError instanceof Error
233
- ? lastError
234
- : new Error("Groq parse failed for filtered prompt"));
235
- }
236
-
237
- // Log metrics
238
- await this.llmMonitor.logCall({
239
- model: this.model,
240
- provider: "groq",
241
- promptTokens: response.usage?.prompt_tokens,
242
- completionTokens: response.usage?.completion_tokens,
243
- totalTokens: response.usage?.total_tokens,
244
- ...extractCostMetrics(response),
245
- prompt: usedPromptText,
246
- completion,
247
- latency: Date.now() - startTime,
248
- timestamp: new Date(),
249
- });
250
-
251
- return completion;
252
- } catch (error) {
253
- await this.llmMonitor.logCall({
254
- model: this.model,
255
- provider: "groq",
256
- prompt: usedPromptText,
257
- completion: null,
258
- latency: Date.now() - startTime,
259
- timestamp: new Date(),
260
- error: error instanceof Error ? error.message : String(error),
261
- });
262
- throw error;
263
- }
264
- }
265
-
266
- async parseGpt4(nl: string): Promise<string | null> {
267
- const promptText = this.prompt.prompt(nl);
268
- const startTime = Date.now();
269
-
270
- try {
271
- const runParse = async (strict: boolean) => {
272
- return this.runStructuredCommandMessages(
273
- [
274
- {
275
- role: "system",
276
- content: "only generate one line of code",
277
- },
278
- {
279
- role: "user",
280
- content: promptText,
281
- },
282
- ],
283
- strict
284
- );
285
- };
286
-
287
- let response: any;
288
- let completion: string | null = null;
289
-
290
- try {
291
- const result = await this.llmMonitor.traceCall(
292
- "parseGpt4",
293
- { model: this.model, provider: "groq" },
294
- async () => runParse(false)
295
- );
296
- response = result.response;
297
- completion = result.command;
298
- } catch (error) {
299
- const result = await this.llmMonitor.traceCall(
300
- "parseGpt4",
301
- { model: this.model, provider: "groq" },
302
- async () => runParse(true)
303
- );
304
- response = result.response;
305
- completion = result.command;
306
- }
307
-
308
- const validatedCompletion = completion
309
- ? await this.validateOrRepairCommand(
310
- "parseGpt4",
311
- "legacy_prompt",
312
- nl,
313
- completion,
314
- promptText
315
- )
316
- : completion;
317
-
318
- // Log metrics
319
- await this.llmMonitor.logCall({
320
- model: this.model,
321
- provider: "groq",
322
- promptTokens: response.usage?.prompt_tokens,
323
- completionTokens: response.usage?.completion_tokens,
324
- totalTokens: response.usage?.total_tokens,
325
- ...extractCostMetrics(response),
326
- prompt: promptText,
327
- completion: validatedCompletion,
328
- latency: Date.now() - startTime,
329
- timestamp: new Date(),
330
- });
331
-
332
- return validatedCompletion;
333
- } catch (error) {
334
- await this.llmMonitor.logCall({
335
- model: this.model,
336
- provider: "groq",
337
- prompt: promptText,
338
- completion: null,
339
- latency: Date.now() - startTime,
340
- timestamp: new Date(),
341
- error: error instanceof Error ? error.message : String(error),
342
- });
343
- throw error;
344
- }
345
- }
346
-
347
- async respond(
348
- nl: string,
349
- parsed: string,
350
- result: string
351
- ): Promise<AgentResponsePayload | null> {
352
- const promptText = this.prompt.response_prompt(nl, parsed, result);
353
- const startTime = Date.now();
354
-
355
- try {
356
- const runRespond = async (strict: boolean) => {
357
- const response = await this.createChatCompletion({
358
- model: this.model,
359
- messages: [
360
- {
361
- role: "system",
362
- content: "Return only a strict JSON object for agent_response_json.",
363
- },
364
- {
365
- role: "user",
366
- content: promptText,
367
- },
368
- ],
369
- temperature: this.samplingParams.temperature,
370
- max_tokens: 256,
371
- top_p: this.samplingParams.top_p,
372
- frequency_penalty: this.samplingParams.frequency_penalty,
373
- presence_penalty: this.samplingParams.presence_penalty,
374
- response_format: getStructuredAgentResponseFormat(strict),
375
- });
376
-
377
- const raw = response.choices[0]?.message?.content ?? null;
378
- const payload = extractStructuredAgentResponse(raw);
379
- if (!payload) {
380
- throw new Error("Groq structured output missing agent response payload");
381
- }
382
- return { response, payload };
383
- };
384
-
385
- let response: any;
386
- let completion: AgentResponsePayload | null = null;
387
-
388
- try {
389
- const result = await this.llmMonitor.traceCall(
390
- "respond",
391
- { model: this.model, provider: "groq" },
392
- async () => runRespond(false)
393
- );
394
- response = result.response;
395
- completion = result.payload;
396
- } catch {
397
- const result = await this.llmMonitor.traceCall(
398
- "respond",
399
- { model: this.model, provider: "groq" },
400
- async () => runRespond(true)
401
- );
402
- response = result.response;
403
- completion = result.payload;
404
- }
405
-
406
- const normalizedCompletion = completion
407
- ? normalizeAgentResponse(completion)
408
- : buildFallbackAgentResponse(parsed, result);
409
-
410
- // Log metrics
411
- await this.llmMonitor.logCall({
412
- model: this.model,
413
- provider: "groq",
414
- promptTokens: response.usage?.prompt_tokens,
415
- completionTokens: response.usage?.completion_tokens,
416
- totalTokens: response.usage?.total_tokens,
417
- ...extractCostMetrics(response),
418
- prompt: promptText,
419
- completion: JSON.stringify(normalizedCompletion),
420
- latency: Date.now() - startTime,
421
- timestamp: new Date(),
422
- });
423
-
424
- return normalizedCompletion;
425
- } catch (error) {
426
- const fallbackResponse = buildFallbackAgentResponse(parsed, result);
427
- await this.llmMonitor.logCall({
428
- model: this.model,
429
- provider: "groq",
430
- prompt: promptText,
431
- completion: JSON.stringify(fallbackResponse),
432
- latency: Date.now() - startTime,
433
- timestamp: new Date(),
434
- error: error instanceof Error ? error.message : String(error),
435
- });
436
- return fallbackResponse;
437
- }
438
- }
439
-
440
- private async runStructuredCommandMessages(
441
- messages: ReadonlyArray<{
442
- role: "system" | "user" | "assistant";
443
- content: string;
444
- }>,
445
- strict: boolean
446
- ): Promise<{ response: any; command: string }> {
447
- const response = await this.createChatCompletion({
448
- model: this.model,
449
- messages: [...messages],
450
- temperature: this.samplingParams.temperature,
451
- max_tokens: 256,
452
- top_p: this.samplingParams.top_p,
453
- frequency_penalty: this.samplingParams.frequency_penalty,
454
- presence_penalty: this.samplingParams.presence_penalty,
455
- response_format: getStructuredResponseFormat(strict),
456
- });
457
-
458
- const raw = response.choices[0]?.message?.content ?? null;
459
- const command = extractStructuredCommand(raw);
460
- if (!command) {
461
- throw new Error("Groq structured output missing command");
462
- }
463
- return { response, command };
464
- }
465
-
466
- private async validateOrRepairCommand(
467
- traceName: string,
468
- promptLabel: string,
469
- userUtterance: string,
470
- command: string,
471
- lastUsedPrompt?: string
472
- ): Promise<string> {
473
- if (!this.validateCommand) {
474
- return command;
475
- }
476
-
477
- try {
478
- await this.validateCommand(command);
479
- return command;
480
- } catch (validationError) {
481
- const messages = buildCommandRepairMessages(
482
- userUtterance,
483
- command,
484
- validationError instanceof Error ? validationError.message : String(validationError),
485
- lastUsedPrompt
486
- );
487
-
488
- let lastError: unknown = validationError;
489
- for (const strict of [false, true]) {
490
- try {
491
- const repaired = await this.llmMonitor.traceCall(
492
- `${traceName}_${promptLabel}_repair_${strict ? "strict" : "lenient"}`,
493
- { model: this.model, provider: "groq" },
494
- async () => this.runStructuredCommandMessages(messages, strict)
495
- );
496
- await this.validateCommand(repaired.command);
497
- return repaired.command;
498
- } catch (repairError) {
499
- lastError = repairError;
500
- }
501
- }
502
-
503
- throw (
504
- lastError instanceof Error
505
- ? lastError
506
- : new Error("Groq repair failed after semantic validation")
507
- );
508
- }
509
- }
510
- }