@garrix82/reactgenie-dsl 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.env +10 -0
  2. package/.env.example +17 -0
  3. package/.github/workflows/publish.yml +20 -0
  4. package/README.md +5 -1
  5. package/package.json +1 -5
  6. package/dist/__test__/dsl-descriptor.test.d.ts +0 -1
  7. package/dist/__test__/dsl-descriptor.test.js +0 -27
  8. package/dist/__test__/dsl-descriptor.test.js.map +0 -1
  9. package/dist/__test__/example_descriptor.d.ts +0 -125
  10. package/dist/__test__/example_descriptor.js +0 -607
  11. package/dist/__test__/example_descriptor.js.map +0 -1
  12. package/dist/__test__/food_descriptor.state.json +0 -1
  13. package/dist/__test__/food_descriptor.test.d.ts +0 -74
  14. package/dist/__test__/food_descriptor.test.js +0 -205
  15. package/dist/__test__/food_descriptor.test.js.map +0 -1
  16. package/dist/__test__/nl-interpreter-provider-selection.test.d.ts +0 -1
  17. package/dist/__test__/nl-interpreter-provider-selection.test.js +0 -73
  18. package/dist/__test__/nl-interpreter-provider-selection.test.js.map +0 -1
  19. package/dist/__test__/nl-interpreter.test.d.ts +0 -1
  20. package/dist/__test__/nl-interpreter.test.js +0 -86
  21. package/dist/__test__/nl-interpreter.test.js.map +0 -1
  22. package/dist/decorators/__test__/decorators.test.d.ts +0 -1
  23. package/dist/decorators/__test__/decorators.test.js +0 -182
  24. package/dist/decorators/__test__/decorators.test.js.map +0 -1
  25. package/dist/decorators/__test__/inheritance-descriptor.test.d.ts +0 -1
  26. package/dist/decorators/__test__/inheritance-descriptor.test.js +0 -107
  27. package/dist/decorators/__test__/inheritance-descriptor.test.js.map +0 -1
  28. package/dist/dsl/__test__/dsl-interpreter.test.d.ts +0 -1
  29. package/dist/dsl/__test__/dsl-interpreter.test.js +0 -334
  30. package/dist/dsl/__test__/dsl-interpreter.test.js.map +0 -1
  31. package/dist/dsl/__test__/parser.gen.test.d.ts +0 -1
  32. package/dist/dsl/__test__/parser.gen.test.js +0 -283
  33. package/dist/dsl/__test__/parser.gen.test.js.map +0 -1
  34. package/dist/nl/__test__/context-aware-prompt.test.d.ts +0 -1
  35. package/dist/nl/__test__/context-aware-prompt.test.js +0 -247
  36. package/dist/nl/__test__/context-aware-prompt.test.js.map +0 -1
  37. package/dist/nl/__test__/context-selector.test.d.ts +0 -1
  38. package/dist/nl/__test__/context-selector.test.js +0 -20
  39. package/dist/nl/__test__/context-selector.test.js.map +0 -1
  40. package/dist/nl/__test__/nl-parser-groq-transport.test.d.ts +0 -1
  41. package/dist/nl/__test__/nl-parser-groq-transport.test.js +0 -87
  42. package/dist/nl/__test__/nl-parser-groq-transport.test.js.map +0 -1
  43. package/dist/nl/__test__/nl-parser-openai-parity.test.d.ts +0 -1
  44. package/dist/nl/__test__/nl-parser-openai-parity.test.js +0 -206
  45. package/dist/nl/__test__/nl-parser-openai-parity.test.js.map +0 -1
  46. package/dist/nl/__test__/nl-parser-openai-sampling.test.d.ts +0 -1
  47. package/dist/nl/__test__/nl-parser-openai-sampling.test.js +0 -44
  48. package/dist/nl/__test__/nl-parser-openai-sampling.test.js.map +0 -1
  49. package/dist/nl/__test__/nl-parser-openai-transport.test.d.ts +0 -1
  50. package/dist/nl/__test__/nl-parser-openai-transport.test.js +0 -55
  51. package/dist/nl/__test__/nl-parser-openai-transport.test.js.map +0 -1
  52. package/dist/nl/__test__/nl-parser-utils.test.d.ts +0 -1
  53. package/dist/nl/__test__/nl-parser-utils.test.js +0 -70
  54. package/dist/nl/__test__/nl-parser-utils.test.js.map +0 -1
  55. package/dist/nl/__test__/nl-parser.test.d.ts +0 -1
  56. package/dist/nl/__test__/nl-parser.test.js +0 -64
  57. package/dist/nl/__test__/nl-parser.test.js.map +0 -1
  58. package/dist/nl/__test__/parameter-tuning.test.d.ts +0 -1
  59. package/dist/nl/__test__/parameter-tuning.test.js +0 -95
  60. package/dist/nl/__test__/parameter-tuning.test.js.map +0 -1
  61. package/dist/nl/__test__/semantic-parsing-experiment.test.d.ts +0 -1
  62. package/dist/nl/__test__/semantic-parsing-experiment.test.js +0 -178
  63. package/dist/nl/__test__/semantic-parsing-experiment.test.js.map +0 -1
  64. package/dist/nl/llm-monitoring.test.d.ts +0 -5
  65. package/dist/nl/llm-monitoring.test.js +0 -101
  66. package/dist/nl/llm-monitoring.test.js.map +0 -1
  67. package/lib/__test__/dsl-descriptor.test.ts +0 -27
  68. package/lib/__test__/example_descriptor.ts +0 -762
  69. package/lib/__test__/food_descriptor.state.json +0 -1
  70. package/lib/__test__/food_descriptor.test.ts +0 -331
  71. package/lib/__test__/nl-interpreter-provider-selection.test.ts +0 -126
  72. package/lib/__test__/nl-interpreter.test.ts +0 -129
  73. package/lib/decorators/__test__/decorators.test.ts +0 -177
  74. package/lib/decorators/__test__/inheritance-descriptor.test.ts +0 -92
  75. package/lib/decorators/decorators.ts +0 -754
  76. package/lib/decorators/index.ts +0 -2
  77. package/lib/decorators/store.ts +0 -47
  78. package/lib/dsl/__test__/dsl-interpreter.test.ts +0 -453
  79. package/lib/dsl/__test__/parser.gen.test.ts +0 -296
  80. package/lib/dsl/dsl-interpreter.ts +0 -974
  81. package/lib/dsl/index.ts +0 -1
  82. package/lib/dsl/parser.gen.js +0 -1479
  83. package/lib/dsl/parser.pegjs +0 -130
  84. package/lib/dsl-descriptor.ts +0 -241
  85. package/lib/index.ts +0 -5
  86. package/lib/nl/__test__/context-aware-prompt.test.ts +0 -372
  87. package/lib/nl/__test__/context-selector.test.ts +0 -27
  88. package/lib/nl/__test__/nl-parser-groq-transport.test.ts +0 -139
  89. package/lib/nl/__test__/nl-parser-openai-parity.test.ts +0 -381
  90. package/lib/nl/__test__/nl-parser-openai-sampling.test.ts +0 -73
  91. package/lib/nl/__test__/nl-parser-openai-transport.test.ts +0 -79
  92. package/lib/nl/__test__/nl-parser-utils.test.ts +0 -98
  93. package/lib/nl/__test__/nl-parser.test.ts +0 -119
  94. package/lib/nl/__test__/parameter-tuning.test.ts +0 -137
  95. package/lib/nl/__test__/semantic-parsing-experiment.test.ts +0 -260
  96. package/lib/nl/context-selector.ts +0 -123
  97. package/lib/nl/index.ts +0 -19
  98. package/lib/nl/llm-monitoring.test.ts +0 -136
  99. package/lib/nl/llm-monitoring.ts +0 -339
  100. package/lib/nl/nl-parser-groq.ts +0 -510
  101. package/lib/nl/nl-parser-utils.ts +0 -310
  102. package/lib/nl/nl-parser.ts +0 -616
  103. package/lib/nl/prompt-gen.ts +0 -607
  104. package/lib/nl/prompt-res.ts +0 -207
  105. package/lib/nl-interpreter.ts +0 -262
@@ -1,119 +0,0 @@
1
- import { NlParserGroq } from "../nl-parser-groq";
2
- import { BasicPromptGen } from "../prompt-gen";
3
- import { classDescriptions, examples } from "../../__test__/example_descriptor";
4
- import { initGenie } from "../../decorators";
5
-
6
- initGenie();
7
- jest.setTimeout(30000);
8
-
9
- describe("NlParserGroq offline regression", () => {
10
- const originalFetch = global.fetch;
11
-
12
- afterEach(() => {
13
- global.fetch = originalFetch;
14
- jest.restoreAllMocks();
15
- });
16
-
17
- test("Parser basics", async () => {
18
- global.fetch = jest.fn().mockResolvedValue(
19
- new Response(
20
- JSON.stringify({
21
- choices: [
22
- {
23
- message: {
24
- content: JSON.stringify({
25
- command:
26
- 'Restaurant.all().matching(field: .address, value: "palo alto").sort(field: .priceGrade, ascending: true)[0]',
27
- }),
28
- },
29
- },
30
- ],
31
- usage: {},
32
- }),
33
- { status: 200, headers: { "content-type": "application/json" } }
34
- )
35
- ) as any;
36
-
37
- const parser = new NlParserGroq(
38
- new BasicPromptGen(classDescriptions, examples),
39
- "test-key",
40
- "llama-3.3-70b-versatile"
41
- );
42
-
43
- await expect(
44
- parser.parse("get me the cheapest restaurant in palo alto")
45
- ).resolves.toBe(
46
- 'Restaurant.all().matching(field: .address, value: "palo alto").sort(field: .priceGrade, ascending: true)[0]'
47
- );
48
- });
49
-
50
- test("Parser complex", async () => {
51
- global.fetch = jest.fn().mockResolvedValue(
52
- new Response(
53
- JSON.stringify({
54
- choices: [
55
- {
56
- message: {
57
- content: JSON.stringify({
58
- command:
59
- 'Order.current().addFoods(foods: [Order.all().matching(field: .restaurant, value: Restaurant.all().matching(field: .name, value: "mcDonald")[0]).sort(field: .dateTime, ascending: false)[0].foods.matching(field: .name, value: "burger")[0]])',
60
- }),
61
- },
62
- },
63
- ],
64
- usage: {},
65
- }),
66
- { status: 200, headers: { "content-type": "application/json" } }
67
- )
68
- ) as any;
69
-
70
- const parser = new NlParserGroq(
71
- new BasicPromptGen(classDescriptions, examples),
72
- "test-key",
73
- "llama-3.3-70b-versatile"
74
- );
75
-
76
- await expect(
77
- parser.parse("order the same burger that I ordered at mcDonald last time")
78
- ).resolves.toBe(
79
- 'Order.current().addFoods(foods: [Order.all().matching(field: .restaurant, value: Restaurant.all().matching(field: .name, value: "mcDonald")[0]).sort(field: .dateTime, ascending: false)[0].foods.matching(field: .name, value: "burger")[0]])'
80
- );
81
- });
82
-
83
- test("Parser complex voice", async () => {
84
- global.fetch = jest.fn().mockResolvedValue(
85
- new Response(
86
- JSON.stringify({
87
- choices: [
88
- {
89
- message: {
90
- content: JSON.stringify({
91
- command:
92
- 'Order.current().addFoods(foods: Order.all().matching(field: .restaurant, value: Restaurant.all().matching(field: .name, value: "mcDonald")[0]).sort(field: .dateTime, ascending: false)[0].foods)',
93
- }),
94
- },
95
- },
96
- ],
97
- usage: {},
98
- }),
99
- { status: 200, headers: { "content-type": "application/json" } }
100
- )
101
- ) as any;
102
-
103
- const parser = new NlParserGroq(
104
- new BasicPromptGen(
105
- classDescriptions,
106
- examples,
107
- '// We utilize voice recognition technology, which may occasionally result in errors. Please consider the possibility of words with similar sounds being misinterpreted. For instance, the word "order" might be mistakenly recognized as "elder".'
108
- ),
109
- "test-key",
110
- "llama-3.3-70b-versatile"
111
- );
112
-
113
- await expect(
114
- parser.parseGpt4("elder the same foods that I ordered at mcDonald last time")
115
- ).resolves.toBe(
116
- 'Order.current().addFoods(foods: Order.all().matching(field: .restaurant, value: Restaurant.all().matching(field: .name, value: "mcDonald")[0]).sort(field: .dateTime, ascending: false)[0].foods)'
117
- );
118
- });
119
- });
@@ -1,137 +0,0 @@
1
- import { NlParserGroq } from '../nl-parser-groq';
2
- import { BasicPromptGen } from '../prompt-gen';
3
- import { classDescriptions, examples } from '../../__test__/example_descriptor';
4
- import { initGenie } from '../../decorators';
5
-
6
- initGenie();
7
- jest.setTimeout(180000); // 3 minutes for comprehensive testing
8
-
9
- // Test cases with expected outputs
10
- const testCases = [
11
- {
12
- name: 'Basic query',
13
- input: 'get me the cheapest restaurant in palo alto',
14
- expected: 'Restaurant.all().matching(field: .address, value: "palo alto").sort(field: .priceGrade, ascending: true)[0]',
15
- method: 'parse'
16
- },
17
- {
18
- name: 'Complex query',
19
- input: 'order the same burger that I ordered at mcDonald last time',
20
- expected: 'Order.current().addFoods(foods: [Order.all().matching(field: .restaurant, value: Restaurant.all().matching(field: .name, value: "mcDonald")[0]).sort(field: .dateTime, ascending: false)[0].foods.matching(field: .name, value: "burger")[0]])',
21
- method: 'parse'
22
- },
23
- {
24
- name: 'Voice recognition query',
25
- input: 'elder the same foods that I ordered at mcDonald last time',
26
- expected: 'Order.current().addFoods(foods: Order.all().matching(field: .restaurant, value: Restaurant.all().matching(field: .name, value: "mcDonald")[0]).sort(field: .dateTime, ascending: false)[0].foods)',
27
- method: 'parseGpt4',
28
- extraPrompt: '// We utilize voice recognition technology, which may occasionally result in errors. Please consider the possibility of words with similar sounds being misinterpreted. For instance, the word "order" might be mistakenly recognized as "elder".'
29
- }
30
- ];
31
-
32
- // Parameter combinations to test
33
- const parameterSets = [
34
- // Current baseline
35
- { name: 'Baseline (temp=0.2)', temperature: 0.2, top_p: 1, frequency_penalty: 0, presence_penalty: 0 },
36
-
37
- // Temperature variations (determinism vs creativity)
38
- { name: 'Very deterministic', temperature: 0, top_p: 1, frequency_penalty: 0, presence_penalty: 0 },
39
- { name: 'Slightly random', temperature: 0.1, top_p: 1, frequency_penalty: 0, presence_penalty: 0 },
40
- { name: 'More creative', temperature: 0.3, top_p: 1, frequency_penalty: 0, presence_penalty: 0 },
41
- { name: 'Moderate creative', temperature: 0.5, top_p: 1, frequency_penalty: 0, presence_penalty: 0 },
42
-
43
- // Top-p (nucleus sampling) variations
44
- { name: 'Narrow top_p', temperature: 0.2, top_p: 0.8, frequency_penalty: 0, presence_penalty: 0 },
45
- { name: 'Very narrow top_p', temperature: 0.2, top_p: 0.5, frequency_penalty: 0, presence_penalty: 0 },
46
- { name: 'Tighter sampling', temperature: 0.1, top_p: 0.9, frequency_penalty: 0, presence_penalty: 0 },
47
-
48
- // Frequency penalty (reduce repetition)
49
- { name: 'Low freq penalty', temperature: 0.2, top_p: 1, frequency_penalty: 0.2, presence_penalty: 0 },
50
- { name: 'Medium freq penalty', temperature: 0.2, top_p: 1, frequency_penalty: 0.5, presence_penalty: 0 },
51
- { name: 'High freq penalty', temperature: 0.2, top_p: 1, frequency_penalty: 1.0, presence_penalty: 0 },
52
-
53
- // Presence penalty (encourage diversity)
54
- { name: 'Low pres penalty', temperature: 0.2, top_p: 1, frequency_penalty: 0, presence_penalty: 0.2 },
55
- { name: 'Medium pres penalty', temperature: 0.2, top_p: 1, frequency_penalty: 0, presence_penalty: 0.5 },
56
-
57
- // Combined penalties
58
- { name: 'Balanced penalties', temperature: 0.2, top_p: 1, frequency_penalty: 0.3, presence_penalty: 0.3 },
59
- { name: 'Strong penalties', temperature: 0.1, top_p: 0.9, frequency_penalty: 0.5, presence_penalty: 0.5 },
60
-
61
- // Optimal combinations for code generation
62
- { name: 'Code-optimized 1', temperature: 0, top_p: 0.95, frequency_penalty: 0, presence_penalty: 0 },
63
- { name: 'Code-optimized 2', temperature: 0.1, top_p: 0.95, frequency_penalty: 0.1, presence_penalty: 0 },
64
- { name: 'Code-optimized 3', temperature: 0.15, top_p: 0.9, frequency_penalty: 0.2, presence_penalty: 0.1 },
65
- ];
66
-
67
- describe('Parameter Tuning Experiments', () => {
68
- // Full comprehensive test - unskip to run all combinations
69
- describe.skip('Comprehensive Parameter Testing', () => {
70
- parameterSets.forEach((params) => {
71
- describe(`Config: ${params.name}`, () => {
72
- testCases.forEach((testCase) => {
73
- test(`${testCase.name}`, async () => {
74
- const parser = new NlParserGroq(
75
- new BasicPromptGen(
76
- classDescriptions,
77
- examples,
78
- testCase.extraPrompt
79
- ),
80
- process.env.GROQ_API_KEY || 'test-key',
81
- 'llama-3.3-70b-versatile',
82
- params // Pass parameters
83
- );
84
-
85
- let parsed: string | null;
86
- if (testCase.method === 'parseGpt4') {
87
- parsed = await parser.parseGpt4(testCase.input);
88
- } else {
89
- parsed = await parser.parse(testCase.input);
90
- }
91
-
92
- const matches = parsed === testCase.expected;
93
-
94
- console.log(`\n📊 ${params.name} - ${testCase.name}`);
95
- console.log(` Temperature: ${params.temperature}, Top-p: ${params.top_p}, Freq: ${params.frequency_penalty}, Pres: ${params.presence_penalty}`);
96
- console.log(` ✓ Match: ${matches ? '✅ EXACT' : '❌ DIFF'}`);
97
-
98
- if (!matches) {
99
- console.log(` Expected: ${testCase.expected.substring(0, 80)}...`);
100
- console.log(` Got: ${parsed?.substring(0, 80)}...`);
101
- }
102
-
103
- // Don't fail the test, just log results for comparison
104
- expect(parsed).toBeDefined();
105
- });
106
- });
107
- });
108
- });
109
- });
110
-
111
- // Quick focused test on most promising configurations
112
- describe('Quick Parameter Comparison', () => {
113
- const focusedParams = [
114
- { name: 'Baseline', temperature: 0.2, top_p: 1, frequency_penalty: 0, presence_penalty: 0 },
115
- { name: 'Deterministic', temperature: 0, top_p: 1, frequency_penalty: 0, presence_penalty: 0 },
116
- { name: 'Code-optimized', temperature: 0.1, top_p: 0.95, frequency_penalty: 0.1, presence_penalty: 0 },
117
- ];
118
-
119
- focusedParams.forEach((params) => {
120
- test(`Quick test: ${params.name}`, async () => {
121
- const parser = new NlParserGroq(
122
- new BasicPromptGen(classDescriptions, examples),
123
- process.env.GROQ_API_KEY || 'test-key',
124
- 'llama-3.3-70b-versatile',
125
- params
126
- );
127
-
128
- const parsed = await parser.parse('get me the cheapest restaurant in palo alto');
129
-
130
- console.log(`\n🎯 ${params.name}:`);
131
- console.log(` Result: ${parsed?.substring(0, 100)}...`);
132
-
133
- expect(parsed).toBeDefined();
134
- });
135
- });
136
- });
137
- });
@@ -1,260 +0,0 @@
1
- /**
2
- * Systematic Parameter Tuning Experiment
3
- *
4
- * This script tests different sampling parameter combinations to find
5
- * the optimal configuration for semantic parsing with Groq's LLM.
6
- *
7
- * Run with: pnpm test lib/nl/__test__/semantic-parsing-experiment.test.ts
8
- */
9
-
10
- import { NlParserGroq, SamplingParams } from '../nl-parser-groq';
11
- import { BasicPromptGen } from '../prompt-gen';
12
- import { classDescriptions, examples } from '../../__test__/example_descriptor';
13
- import { initGenie } from '../../decorators';
14
-
15
- initGenie();
16
- jest.setTimeout(300000); // 5 minutes for comprehensive testing
17
-
18
- interface TestCase {
19
- name: string;
20
- input: string;
21
- expected: string;
22
- method: 'parse' | 'parseGpt4';
23
- extraPrompt?: string;
24
- }
25
-
26
- interface ExperimentResult {
27
- configName: string;
28
- params: SamplingParams;
29
- testName: string;
30
- input: string;
31
- output: string | null;
32
- expected: string;
33
- exactMatch: boolean;
34
- similarityScore: number;
35
- }
36
-
37
- // Test cases
38
- const testCases: TestCase[] = [
39
- {
40
- name: 'Basic query',
41
- input: 'get me the cheapest restaurant in palo alto',
42
- expected: 'Restaurant.all().matching(field: .address, value: "palo alto").sort(field: .priceGrade, ascending: true)[0]',
43
- method: 'parse'
44
- },
45
- {
46
- name: 'Complex query',
47
- input: 'order the same burger that I ordered at mcDonald last time',
48
- expected: 'Order.current().addFoods(foods: [Order.all().matching(field: .restaurant, value: Restaurant.all().matching(field: .name, value: "mcDonald")[0]).sort(field: .dateTime, ascending: false)[0].foods.matching(field: .name, value: "burger")[0]])',
49
- method: 'parse'
50
- },
51
- {
52
- name: 'Voice recognition query',
53
- input: 'elder the same foods that I ordered at mcDonald last time',
54
- expected: 'Order.current().addFoods(foods: Order.all().matching(field: .restaurant, value: Restaurant.all().matching(field: .name, value: "mcDonald")[0]).sort(field: .dateTime, ascending: false)[0].foods)',
55
- method: 'parseGpt4',
56
- extraPrompt: '// We utilize voice recognition technology, which may occasionally result in errors. Please consider the possibility of words with similar sounds being misinterpreted. For instance, the word "order" might be mistakenly recognized as "elder".'
57
- }
58
- ];
59
-
60
- // Parameter configurations to test
61
- const experimentConfigs: Array<{ name: string; params: SamplingParams }> = [
62
- // Baseline
63
- { name: 'Current (temp=0.2)', params: { temperature: 0.2, top_p: 1, frequency_penalty: 0, presence_penalty: 0 } },
64
-
65
- // Temperature sweep
66
- { name: 'Deterministic (temp=0)', params: { temperature: 0, top_p: 1, frequency_penalty: 0, presence_penalty: 0 } },
67
- { name: 'Low temp (0.1)', params: { temperature: 0.1, top_p: 1, frequency_penalty: 0, presence_penalty: 0 } },
68
- { name: 'Medium temp (0.3)', params: { temperature: 0.3, top_p: 1, frequency_penalty: 0, presence_penalty: 0 } },
69
-
70
- // Top-p (nucleus sampling)
71
- { name: 'Narrow sampling (top_p=0.9)', params: { temperature: 0.2, top_p: 0.9, frequency_penalty: 0, presence_penalty: 0 } },
72
- { name: 'Very narrow (top_p=0.8)', params: { temperature: 0.2, top_p: 0.8, frequency_penalty: 0, presence_penalty: 0 } },
73
-
74
- // Code-optimized combinations
75
- { name: 'Code-opt 1', params: { temperature: 0, top_p: 0.95, frequency_penalty: 0, presence_penalty: 0 } },
76
- { name: 'Code-opt 2', params: { temperature: 0.1, top_p: 0.95, frequency_penalty: 0.1, presence_penalty: 0 } },
77
- { name: 'Code-opt 3', params: { temperature: 0.15, top_p: 0.9, frequency_penalty: 0.2, presence_penalty: 0.1 } },
78
-
79
- // Penalty variations
80
- { name: 'With freq penalty', params: { temperature: 0.2, top_p: 1, frequency_penalty: 0.3, presence_penalty: 0 } },
81
- { name: 'Balanced penalties', params: { temperature: 0.2, top_p: 1, frequency_penalty: 0.2, presence_penalty: 0.2 } },
82
- ];
83
-
84
- // Calculate similarity score (simple string similarity)
85
- function calculateSimilarity(str1: string | null, str2: string): number {
86
- if (!str1) return 0;
87
- if (str1 === str2) return 1.0;
88
-
89
- const longer = str1.length > str2.length ? str1 : str2;
90
- const shorter = str1.length > str2.length ? str2 : str1;
91
-
92
- if (longer.length === 0) return 1.0;
93
-
94
- // Calculate Levenshtein distance
95
- const editDistance = levenshteinDistance(str1, str2);
96
- return (longer.length - editDistance) / longer.length;
97
- }
98
-
99
- function levenshteinDistance(str1: string, str2: string): number {
100
- const matrix: number[][] = [];
101
-
102
- for (let i = 0; i <= str2.length; i++) {
103
- matrix[i] = [i];
104
- }
105
-
106
- for (let j = 0; j <= str1.length; j++) {
107
- matrix[0][j] = j;
108
- }
109
-
110
- for (let i = 1; i <= str2.length; i++) {
111
- for (let j = 1; j <= str1.length; j++) {
112
- if (str2.charAt(i - 1) === str1.charAt(j - 1)) {
113
- matrix[i][j] = matrix[i - 1][j - 1];
114
- } else {
115
- matrix[i][j] = Math.min(
116
- matrix[i - 1][j - 1] + 1,
117
- matrix[i][j - 1] + 1,
118
- matrix[i - 1][j] + 1
119
- );
120
- }
121
- }
122
- }
123
-
124
- return matrix[str2.length][str1.length];
125
- }
126
-
127
- describe('🔬 Semantic Parsing Parameter Experiments', () => {
128
- let allResults: ExperimentResult[] = [];
129
-
130
- describe('Run experiments', () => {
131
- experimentConfigs.forEach((config) => {
132
- describe(`Config: ${config.name}`, () => {
133
- testCases.forEach((testCase) => {
134
- test(testCase.name, async () => {
135
- const parser = new NlParserGroq(
136
- new BasicPromptGen(
137
- classDescriptions,
138
- examples,
139
- testCase.extraPrompt
140
- ),
141
- process.env.GROQ_API_KEY || 'test-key',
142
- 'llama-3.3-70b-versatile',
143
- config.params
144
- );
145
-
146
- let output: string | null;
147
- if (testCase.method === 'parseGpt4') {
148
- output = await parser.parseGpt4(testCase.input);
149
- } else {
150
- output = await parser.parse(testCase.input);
151
- }
152
-
153
- const exactMatch = output === testCase.expected;
154
- const similarity = calculateSimilarity(output, testCase.expected);
155
-
156
- const result: ExperimentResult = {
157
- configName: config.name,
158
- params: config.params,
159
- testName: testCase.name,
160
- input: testCase.input,
161
- output: output,
162
- expected: testCase.expected,
163
- exactMatch,
164
- similarityScore: similarity
165
- };
166
-
167
- allResults.push(result);
168
-
169
- // Log result
170
- console.log(`\n${exactMatch ? '✅' : '❌'} ${config.name} - ${testCase.name}`);
171
- console.log(` Params: temp=${config.params.temperature}, top_p=${config.params.top_p}, freq=${config.params.frequency_penalty}, pres=${config.params.presence_penalty}`);
172
- console.log(` Similarity: ${(similarity * 100).toFixed(1)}%`);
173
-
174
- if (!exactMatch && output) {
175
- console.log(` Expected: ${testCase.expected.substring(0, 60)}...`);
176
- console.log(` Got: ${output.substring(0, 60)}...`);
177
- }
178
-
179
- expect(output).toBeDefined();
180
- });
181
- });
182
- });
183
- });
184
- });
185
-
186
- describe('📊 Summary and Recommendations', () => {
187
- test('Analyze results', () => {
188
- console.log('\n\n' + '='.repeat(80));
189
- console.log('📊 EXPERIMENT SUMMARY');
190
- console.log('='.repeat(80));
191
-
192
- // Group by configuration
193
- const configSummaries = new Map<string, { totalTests: number; exactMatches: number; avgSimilarity: number }>();
194
-
195
- allResults.forEach(result => {
196
- if (!configSummaries.has(result.configName)) {
197
- configSummaries.set(result.configName, { totalTests: 0, exactMatches: 0, avgSimilarity: 0 });
198
- }
199
-
200
- const summary = configSummaries.get(result.configName)!;
201
- summary.totalTests++;
202
- if (result.exactMatch) summary.exactMatches++;
203
- summary.avgSimilarity += result.similarityScore;
204
- });
205
-
206
- // Calculate averages and sort by performance
207
- const sortedConfigs = Array.from(configSummaries.entries())
208
- .map(([name, data]) => ({
209
- name,
210
- ...data,
211
- avgSimilarity: data.avgSimilarity / data.totalTests,
212
- successRate: (data.exactMatches / data.totalTests) * 100
213
- }))
214
- .sort((a, b) => {
215
- // Sort by exact matches first, then by similarity
216
- if (b.exactMatches !== a.exactMatches) {
217
- return b.exactMatches - a.exactMatches;
218
- }
219
- return b.avgSimilarity - a.avgSimilarity;
220
- });
221
-
222
- console.log('\n🏆 Configuration Rankings:');
223
- console.log('-'.repeat(80));
224
-
225
- sortedConfigs.forEach((config, index) => {
226
- const params = allResults.find(r => r.configName === config.name)?.params;
227
- console.log(`\n${index + 1}. ${config.name}`);
228
- console.log(` Success Rate: ${config.successRate.toFixed(1)}% (${config.exactMatches}/${config.totalTests} exact matches)`);
229
- console.log(` Avg Similarity: ${(config.avgSimilarity * 100).toFixed(1)}%`);
230
- if (params) {
231
- console.log(` Parameters: temp=${params.temperature}, top_p=${params.top_p}, freq=${params.frequency_penalty}, pres=${params.presence_penalty}`);
232
- }
233
- });
234
-
235
- console.log('\n' + '='.repeat(80));
236
- console.log('🎯 RECOMMENDATION');
237
- console.log('='.repeat(80));
238
-
239
- const best = sortedConfigs[0];
240
- const bestParams = allResults.find(r => r.configName === best.name)?.params;
241
-
242
- console.log(`\n✨ Best Configuration: ${best.name}`);
243
- console.log(` Success Rate: ${best.successRate.toFixed(1)}%`);
244
- console.log(` Average Similarity: ${(best.avgSimilarity * 100).toFixed(1)}%`);
245
- if (bestParams) {
246
- console.log(`\n Recommended Parameters:`);
247
- console.log(` {`);
248
- console.log(` temperature: ${bestParams.temperature},`);
249
- console.log(` top_p: ${bestParams.top_p},`);
250
- console.log(` frequency_penalty: ${bestParams.frequency_penalty},`);
251
- console.log(` presence_penalty: ${bestParams.presence_penalty}`);
252
- console.log(` }`);
253
- }
254
-
255
- console.log('\n' + '='.repeat(80) + '\n');
256
-
257
- expect(allResults.length).toBeGreaterThan(0);
258
- });
259
- });
260
- });
@@ -1,123 +0,0 @@
1
- export interface SelectionScore {
2
- score: number;
3
- index: number;
4
- }
5
-
6
- export interface RankByQueryOptions {
7
- allowZeroScoreBackfill?: boolean;
8
- }
9
-
10
- function splitCompoundBoundaries(text: string): string {
11
- return text
12
- .replace(/([a-z0-9])([A-Z])/g, "$1 $2")
13
- .replace(/[_]+/g, " ");
14
- }
15
-
16
- function normalize(text: string): string {
17
- return splitCompoundBoundaries(text)
18
- .toLowerCase()
19
- .replace(/[^a-z0-9\s]/g, " ")
20
- .replace(/\s+/g, " ")
21
- .trim();
22
- }
23
-
24
- function tokenize(text: string): string[] {
25
- const normalized = normalize(text);
26
- if (!normalized) {
27
- return [];
28
- }
29
- return normalized.split(" ").filter((token) => token.length > 1);
30
- }
31
-
32
- function tokenSet(text: string): Set<string> {
33
- return new Set(tokenize(text));
34
- }
35
-
36
- export function scoreTextOverlap(query: string, candidate: string): number {
37
- const queryTokens = tokenSet(query);
38
- if (queryTokens.size === 0) {
39
- return 0;
40
- }
41
-
42
- const candidateTokens = tokenSet(candidate);
43
- if (candidateTokens.size === 0) {
44
- return 0;
45
- }
46
-
47
- let overlap = 0;
48
- queryTokens.forEach((token) => {
49
- if (candidateTokens.has(token)) {
50
- overlap += 1;
51
- }
52
- });
53
-
54
- return overlap / Math.sqrt(queryTokens.size * candidateTokens.size);
55
- }
56
-
57
- export function rankByQuery<T>(
58
- query: string,
59
- items: T[],
60
- textExtractor: (item: T) => string,
61
- fallbackCount: number,
62
- options?: RankByQueryOptions
63
- ): SelectionScore[] {
64
- const scores = items.map((item, index) => ({
65
- index,
66
- score: scoreTextOverlap(query, textExtractor(item)),
67
- }));
68
-
69
- scores.sort((a, b) => {
70
- if (b.score !== a.score) {
71
- return b.score - a.score;
72
- }
73
- return a.index - b.index;
74
- });
75
-
76
- const nonZero = scores.filter((entry) => entry.score > 0);
77
- const allowZeroScoreBackfill = options?.allowZeroScoreBackfill ?? true;
78
- if (!allowZeroScoreBackfill) {
79
- return nonZero;
80
- }
81
-
82
- if (nonZero.length >= fallbackCount) {
83
- return nonZero;
84
- }
85
-
86
- return scores;
87
- }
88
-
89
- export class BoundedCache<T> {
90
- private readonly cache = new Map<string, T>();
91
-
92
- constructor(private readonly maxSize: number) {}
93
-
94
- get(key: string): T | undefined {
95
- const existing = this.cache.get(key);
96
- if (existing === undefined) {
97
- return undefined;
98
- }
99
-
100
- this.cache.delete(key);
101
- this.cache.set(key, existing);
102
- return existing;
103
- }
104
-
105
- set(key: string, value: T): void {
106
- if (this.cache.has(key)) {
107
- this.cache.delete(key);
108
- }
109
- this.cache.set(key, value);
110
-
111
- while (this.cache.size > this.maxSize) {
112
- const firstKey = this.cache.keys().next().value;
113
- if (firstKey === undefined) {
114
- break;
115
- }
116
- this.cache.delete(firstKey);
117
- }
118
- }
119
- }
120
-
121
- export function normalizeCacheKey(text: string): string {
122
- return normalize(text);
123
- }
package/lib/nl/index.ts DELETED
@@ -1,19 +0,0 @@
1
- export { NlParser } from "./nl-parser";
2
- export { NlParserGroq } from "./nl-parser-groq";
3
- export type { SamplingParams } from "./nl-parser";
4
- export {
5
- PromptGen,
6
- BasicPromptGen,
7
- ExampleParse,
8
- DescriptorPromptGen,
9
- ContextAwareDescriptorPromptGen,
10
- PromptInteractionRecord,
11
- PromptInteractionRecorder,
12
- PromptSelectionConfig,
13
- PromptRuntimeContextSetter,
14
- RuntimeUiContext,
15
- AgentResponsePayload,
16
- ResponseSeverity,
17
- } from "./prompt-gen";
18
- export { getLLMMonitor } from "./llm-monitoring";
19
- export type { LLMMonitorTelemetryBridge } from "./llm-monitoring";