recursive-llm-ts 2.0.11 โ 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +60 -43
- package/bin/rlm-go +0 -0
- package/dist/bridge-factory.d.ts +1 -1
- package/dist/bridge-factory.js +44 -14
- package/dist/bridge-interface.d.ts +1 -0
- package/dist/bunpy-bridge.d.ts +3 -4
- package/dist/bunpy-bridge.js +11 -143
- package/dist/go-bridge.d.ts +5 -0
- package/dist/go-bridge.js +136 -0
- package/dist/rlm-bridge.js +28 -5
- package/go/README.md +347 -0
- package/go/cmd/rlm/main.go +63 -0
- package/go/go.mod +12 -0
- package/go/go.sum +57 -0
- package/go/integration_test.sh +169 -0
- package/go/internal/rlm/benchmark_test.go +168 -0
- package/go/internal/rlm/errors.go +83 -0
- package/go/internal/rlm/openai.go +128 -0
- package/go/internal/rlm/parser.go +53 -0
- package/go/internal/rlm/parser_test.go +202 -0
- package/go/internal/rlm/prompt.go +68 -0
- package/go/internal/rlm/repl.go +260 -0
- package/go/internal/rlm/repl_test.go +291 -0
- package/go/internal/rlm/rlm.go +142 -0
- package/go/internal/rlm/types.go +108 -0
- package/go/test_mock.sh +90 -0
- package/go/test_rlm.sh +41 -0
- package/go/test_simple.sh +78 -0
- package/package.json +6 -9
- package/scripts/build-go-binary.js +41 -0
- package/recursive-llm/pyproject.toml +0 -70
- package/recursive-llm/src/rlm/__init__.py +0 -14
- package/recursive-llm/src/rlm/core.py +0 -322
- package/recursive-llm/src/rlm/parser.py +0 -93
- package/recursive-llm/src/rlm/prompts.py +0 -50
- package/recursive-llm/src/rlm/repl.py +0 -235
- package/recursive-llm/src/rlm/types.py +0 -37
- package/scripts/install-python-deps.js +0 -72
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Integration test with real LLM API
|
|
3
|
+
# Set OPENAI_API_KEY environment variable before running
|
|
4
|
+
|
|
5
|
+
set -e
|
|
6
|
+
|
|
7
|
+
echo "๐งช RLM Go Integration Tests"
|
|
8
|
+
echo "================================"
|
|
9
|
+
echo ""
|
|
10
|
+
|
|
11
|
+
# Check if binary exists
|
|
12
|
+
if [ ! -f "./rlm" ]; then
|
|
13
|
+
echo "โ Binary not found. Building..."
|
|
14
|
+
go build -o rlm ./cmd/rlm
|
|
15
|
+
echo "โ
Built binary"
|
|
16
|
+
fi
|
|
17
|
+
|
|
18
|
+
# Check for API key
|
|
19
|
+
if [ -z "$OPENAI_API_KEY" ]; then
|
|
20
|
+
echo "โ OPENAI_API_KEY environment variable not set"
|
|
21
|
+
echo ""
|
|
22
|
+
echo "Usage:"
|
|
23
|
+
echo " export OPENAI_API_KEY='sk-...'"
|
|
24
|
+
echo " ./integration_test.sh"
|
|
25
|
+
exit 1
|
|
26
|
+
fi
|
|
27
|
+
|
|
28
|
+
echo "โ
API key found"
|
|
29
|
+
echo ""
|
|
30
|
+
|
|
31
|
+
# Test 1: Simple query
|
|
32
|
+
echo "๐ Test 1: Simple context analysis"
|
|
33
|
+
echo "-----------------------------------"
|
|
34
|
+
RESULT=$(cat <<EOF | ./rlm
|
|
35
|
+
{
|
|
36
|
+
"model": "gpt-4o-mini",
|
|
37
|
+
"query": "How many times does the word 'test' appear?",
|
|
38
|
+
"context": "This is a test. Another test here. Final test.",
|
|
39
|
+
"config": {
|
|
40
|
+
"api_key": "$OPENAI_API_KEY",
|
|
41
|
+
"max_iterations": 10
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
EOF
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
if [ $? -eq 0 ]; then
|
|
48
|
+
echo "โ
Test 1 passed"
|
|
49
|
+
echo "Result: $(echo $RESULT | jq -r '.result')"
|
|
50
|
+
echo "Stats: $(echo $RESULT | jq '.stats')"
|
|
51
|
+
else
|
|
52
|
+
echo "โ Test 1 failed"
|
|
53
|
+
exit 1
|
|
54
|
+
fi
|
|
55
|
+
echo ""
|
|
56
|
+
|
|
57
|
+
# Test 2: Count/aggregation
|
|
58
|
+
echo "๐ Test 2: Counting errors in logs"
|
|
59
|
+
echo "-----------------------------------"
|
|
60
|
+
LOG_CONTEXT='2024-01-01 INFO: System started
|
|
61
|
+
2024-01-01 ERROR: Connection failed
|
|
62
|
+
2024-01-01 INFO: Retrying
|
|
63
|
+
2024-01-01 ERROR: Timeout
|
|
64
|
+
2024-01-01 ERROR: Failed again
|
|
65
|
+
2024-01-01 INFO: Success'
|
|
66
|
+
|
|
67
|
+
RESULT=$(./rlm <<EOF
|
|
68
|
+
{
|
|
69
|
+
"model": "gpt-4o-mini",
|
|
70
|
+
"query": "Count how many ERROR entries are in the logs",
|
|
71
|
+
"context": "$LOG_CONTEXT",
|
|
72
|
+
"config": {
|
|
73
|
+
"api_key": "$OPENAI_API_KEY",
|
|
74
|
+
"max_iterations": 10
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
EOF
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
if [ $? -eq 0 ]; then
|
|
81
|
+
echo "โ
Test 2 passed"
|
|
82
|
+
echo "Result: $(echo $RESULT | jq -r '.result')"
|
|
83
|
+
ITERATIONS=$(echo $RESULT | jq '.stats.iterations')
|
|
84
|
+
echo "Iterations: $ITERATIONS"
|
|
85
|
+
else
|
|
86
|
+
echo "โ Test 2 failed"
|
|
87
|
+
exit 1
|
|
88
|
+
fi
|
|
89
|
+
echo ""
|
|
90
|
+
|
|
91
|
+
# Test 3: Long context
|
|
92
|
+
echo "๐ Test 3: Long context processing"
|
|
93
|
+
echo "-----------------------------------"
|
|
94
|
+
LONG_CONTEXT=$(cat <<EOF
|
|
95
|
+
Chapter 1: The Beginning
|
|
96
|
+
|
|
97
|
+
It was a dark and stormy night. The hero embarked on a journey.
|
|
98
|
+
$(for i in {1..100}; do echo "Line $i of the story continues here with more content."; done)
|
|
99
|
+
|
|
100
|
+
Chapter 2: The Middle
|
|
101
|
+
|
|
102
|
+
The hero faced many challenges.
|
|
103
|
+
$(for i in {1..100}; do echo "Line $i describes the adventure."; done)
|
|
104
|
+
|
|
105
|
+
Chapter 3: The End
|
|
106
|
+
|
|
107
|
+
Finally, the hero succeeded and returned home triumphant.
|
|
108
|
+
EOF
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
RESULT=$(cat <<EOF | ./rlm
|
|
112
|
+
{
|
|
113
|
+
"model": "gpt-4o-mini",
|
|
114
|
+
"query": "How many chapters are in this document?",
|
|
115
|
+
"context": "$LONG_CONTEXT",
|
|
116
|
+
"config": {
|
|
117
|
+
"api_key": "$OPENAI_API_KEY",
|
|
118
|
+
"max_iterations": 15
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
EOF
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
if [ $? -eq 0 ]; then
|
|
125
|
+
echo "โ
Test 3 passed"
|
|
126
|
+
echo "Result: $(echo $RESULT | jq -r '.result')"
|
|
127
|
+
LLM_CALLS=$(echo $RESULT | jq '.stats.llm_calls')
|
|
128
|
+
echo "LLM calls: $LLM_CALLS"
|
|
129
|
+
else
|
|
130
|
+
echo "โ Test 3 failed"
|
|
131
|
+
exit 1
|
|
132
|
+
fi
|
|
133
|
+
echo ""
|
|
134
|
+
|
|
135
|
+
# Test 4: Different model configurations
|
|
136
|
+
echo "๐ Test 4: Two-model configuration"
|
|
137
|
+
echo "-----------------------------------"
|
|
138
|
+
RESULT=$(cat <<EOF | ./rlm
|
|
139
|
+
{
|
|
140
|
+
"model": "gpt-4o",
|
|
141
|
+
"query": "What is this text about?",
|
|
142
|
+
"context": "Artificial intelligence and machine learning are transforming technology.",
|
|
143
|
+
"config": {
|
|
144
|
+
"recursive_model": "gpt-4o-mini",
|
|
145
|
+
"api_key": "$OPENAI_API_KEY",
|
|
146
|
+
"max_iterations": 10,
|
|
147
|
+
"temperature": 0.3
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
EOF
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
if [ $? -eq 0 ]; then
|
|
154
|
+
echo "โ
Test 4 passed"
|
|
155
|
+
echo "Result: $(echo $RESULT | jq -r '.result')"
|
|
156
|
+
else
|
|
157
|
+
echo "โ Test 4 failed"
|
|
158
|
+
exit 1
|
|
159
|
+
fi
|
|
160
|
+
echo ""
|
|
161
|
+
|
|
162
|
+
echo "================================"
|
|
163
|
+
echo "โ
All integration tests passed!"
|
|
164
|
+
echo ""
|
|
165
|
+
echo "Summary:"
|
|
166
|
+
echo " - Simple queries work"
|
|
167
|
+
echo " - Counting/aggregation works"
|
|
168
|
+
echo " - Long context works"
|
|
169
|
+
echo " - Model configuration works"
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
package rlm
|
|
2
|
+
|
|
3
|
+
import (
|
|
4
|
+
"strings"
|
|
5
|
+
"testing"
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
// Benchmark parser performance
|
|
9
|
+
func BenchmarkIsFinal(b *testing.B) {
|
|
10
|
+
responses := []string{
|
|
11
|
+
`FINAL("answer")`,
|
|
12
|
+
`FINAL_VAR(result)`,
|
|
13
|
+
`x = 1`,
|
|
14
|
+
`console.log("test")`,
|
|
15
|
+
}
|
|
16
|
+
b.ResetTimer()
|
|
17
|
+
for i := 0; i < b.N; i++ {
|
|
18
|
+
IsFinal(responses[i%len(responses)])
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
func BenchmarkExtractFinal(b *testing.B) {
|
|
23
|
+
response := `FINAL("This is a test answer with some content")`
|
|
24
|
+
b.ResetTimer()
|
|
25
|
+
for i := 0; i < b.N; i++ {
|
|
26
|
+
extractFinal(response)
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
func BenchmarkParseResponse(b *testing.B) {
|
|
31
|
+
response := `FINAL("Test answer")`
|
|
32
|
+
env := map[string]interface{}{
|
|
33
|
+
"result": "test",
|
|
34
|
+
}
|
|
35
|
+
b.ResetTimer()
|
|
36
|
+
for i := 0; i < b.N; i++ {
|
|
37
|
+
ParseResponse(response, env)
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Benchmark REPL performance
|
|
42
|
+
func BenchmarkREPLSimpleExecution(b *testing.B) {
|
|
43
|
+
repl := NewREPLExecutor()
|
|
44
|
+
code := `console.log("Hello World")`
|
|
45
|
+
env := map[string]interface{}{}
|
|
46
|
+
|
|
47
|
+
b.ResetTimer()
|
|
48
|
+
for i := 0; i < b.N; i++ {
|
|
49
|
+
_, _ = repl.Execute(code, env)
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
func BenchmarkREPLContextAccess(b *testing.B) {
|
|
54
|
+
repl := NewREPLExecutor()
|
|
55
|
+
code := `console.log(context.slice(0, 10))`
|
|
56
|
+
env := map[string]interface{}{
|
|
57
|
+
"context": strings.Repeat("Lorem ipsum dolor sit amet. ", 1000),
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
b.ResetTimer()
|
|
61
|
+
for i := 0; i < b.N; i++ {
|
|
62
|
+
_, _ = repl.Execute(code, env)
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
func BenchmarkREPLRegex(b *testing.B) {
|
|
67
|
+
repl := NewREPLExecutor()
|
|
68
|
+
code := `const matches = re.findall("ERROR", context); console.log(matches.length)`
|
|
69
|
+
context := strings.Repeat("INFO ERROR WARNING ", 100)
|
|
70
|
+
env := map[string]interface{}{
|
|
71
|
+
"context": context,
|
|
72
|
+
"re": NewRegexHelper(),
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
b.ResetTimer()
|
|
76
|
+
for i := 0; i < b.N; i++ {
|
|
77
|
+
_, _ = repl.Execute(code, env)
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
func BenchmarkREPLJSBootstrap(b *testing.B) {
|
|
82
|
+
repl := NewREPLExecutor()
|
|
83
|
+
code := `const arr = range(100); const s = sum(arr); console.log(s)`
|
|
84
|
+
env := map[string]interface{}{}
|
|
85
|
+
|
|
86
|
+
b.ResetTimer()
|
|
87
|
+
for i := 0; i < b.N; i++ {
|
|
88
|
+
_, _ = repl.Execute(code, env)
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// Benchmark regex helper
|
|
93
|
+
func BenchmarkRegexFindall(b *testing.B) {
|
|
94
|
+
re := NewRegexHelper()
|
|
95
|
+
text := strings.Repeat("ERROR INFO WARNING ERROR ", 100)
|
|
96
|
+
|
|
97
|
+
b.ResetTimer()
|
|
98
|
+
for i := 0; i < b.N; i++ {
|
|
99
|
+
re["findall"]("ERROR", text)
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
func BenchmarkRegexSearch(b *testing.B) {
|
|
104
|
+
re := NewRegexHelper()
|
|
105
|
+
text := strings.Repeat("INFO WARNING ", 50) + "ERROR" + strings.Repeat(" INFO WARNING", 50)
|
|
106
|
+
|
|
107
|
+
b.ResetTimer()
|
|
108
|
+
for i := 0; i < b.N; i++ {
|
|
109
|
+
re["search"]("ERROR", text)
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Benchmark config parsing
|
|
114
|
+
func BenchmarkConfigFromMap(b *testing.B) {
|
|
115
|
+
config := map[string]interface{}{
|
|
116
|
+
"recursive_model": "gpt-4o-mini",
|
|
117
|
+
"api_base": "https://api.openai.com/v1",
|
|
118
|
+
"api_key": "sk-test",
|
|
119
|
+
"max_depth": 5,
|
|
120
|
+
"max_iterations": 30,
|
|
121
|
+
"temperature": 0.7,
|
|
122
|
+
"extra_param": "value",
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
b.ResetTimer()
|
|
126
|
+
for i := 0; i < b.N; i++ {
|
|
127
|
+
ConfigFromMap(config)
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Benchmark code extraction
|
|
132
|
+
func BenchmarkExtractCode(b *testing.B) {
|
|
133
|
+
code := "```javascript\nconsole.log('test')\nconst x = 42\n```"
|
|
134
|
+
|
|
135
|
+
b.ResetTimer()
|
|
136
|
+
for i := 0; i < b.N; i++ {
|
|
137
|
+
extractCode(code)
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// Memory allocation benchmarks
|
|
142
|
+
func BenchmarkREPLMemoryAllocation(b *testing.B) {
|
|
143
|
+
repl := NewREPLExecutor()
|
|
144
|
+
code := `const arr = []; for (let i = 0; i < 1000; i++) arr.push(i); console.log(arr.length)`
|
|
145
|
+
env := map[string]interface{}{}
|
|
146
|
+
|
|
147
|
+
b.ReportAllocs()
|
|
148
|
+
b.ResetTimer()
|
|
149
|
+
for i := 0; i < b.N; i++ {
|
|
150
|
+
_, _ = repl.Execute(code, env)
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
func BenchmarkLargeContextAccess(b *testing.B) {
|
|
155
|
+
repl := NewREPLExecutor()
|
|
156
|
+
// Simulate 100KB context
|
|
157
|
+
largeContext := strings.Repeat("Lorem ipsum dolor sit amet, consectetur adipiscing elit. ", 2000)
|
|
158
|
+
code := `const first = context.slice(0, 100); const last = context.slice(-100); console.log(first.length + last.length)`
|
|
159
|
+
env := map[string]interface{}{
|
|
160
|
+
"context": largeContext,
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
b.ReportAllocs()
|
|
164
|
+
b.ResetTimer()
|
|
165
|
+
for i := 0; i < b.N; i++ {
|
|
166
|
+
_, _ = repl.Execute(code, env)
|
|
167
|
+
}
|
|
168
|
+
}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
package rlm
|
|
2
|
+
|
|
3
|
+
import "fmt"
|
|
4
|
+
|
|
5
|
+
// RLMError is the base error type for all RLM errors
|
|
6
|
+
type RLMError struct {
|
|
7
|
+
Message string
|
|
8
|
+
Cause error
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
func (e *RLMError) Error() string {
|
|
12
|
+
if e.Cause != nil {
|
|
13
|
+
return fmt.Sprintf("%s: %v", e.Message, e.Cause)
|
|
14
|
+
}
|
|
15
|
+
return e.Message
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
func (e *RLMError) Unwrap() error {
|
|
19
|
+
return e.Cause
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
// MaxIterationsError is returned when max iterations are exceeded
|
|
23
|
+
type MaxIterationsError struct {
|
|
24
|
+
MaxIterations int
|
|
25
|
+
*RLMError
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
func NewMaxIterationsError(maxIterations int) *MaxIterationsError {
|
|
29
|
+
return &MaxIterationsError{
|
|
30
|
+
MaxIterations: maxIterations,
|
|
31
|
+
RLMError: &RLMError{
|
|
32
|
+
Message: fmt.Sprintf("max iterations (%d) exceeded without FINAL()", maxIterations),
|
|
33
|
+
},
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// MaxDepthError is returned when max recursion depth is exceeded
|
|
38
|
+
type MaxDepthError struct {
|
|
39
|
+
MaxDepth int
|
|
40
|
+
*RLMError
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
func NewMaxDepthError(maxDepth int) *MaxDepthError {
|
|
44
|
+
return &MaxDepthError{
|
|
45
|
+
MaxDepth: maxDepth,
|
|
46
|
+
RLMError: &RLMError{
|
|
47
|
+
Message: fmt.Sprintf("max recursion depth (%d) exceeded", maxDepth),
|
|
48
|
+
},
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// REPLError is returned when REPL execution fails
|
|
53
|
+
type REPLError struct {
|
|
54
|
+
Code string
|
|
55
|
+
*RLMError
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
func NewREPLError(message string, code string, cause error) *REPLError {
|
|
59
|
+
return &REPLError{
|
|
60
|
+
Code: code,
|
|
61
|
+
RLMError: &RLMError{
|
|
62
|
+
Message: message,
|
|
63
|
+
Cause: cause,
|
|
64
|
+
},
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// APIError is returned when LLM API calls fail
|
|
69
|
+
type APIError struct {
|
|
70
|
+
StatusCode int
|
|
71
|
+
Response string
|
|
72
|
+
*RLMError
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
func NewAPIError(statusCode int, response string) *APIError {
|
|
76
|
+
return &APIError{
|
|
77
|
+
StatusCode: statusCode,
|
|
78
|
+
Response: response,
|
|
79
|
+
RLMError: &RLMError{
|
|
80
|
+
Message: fmt.Sprintf("LLM request failed (%d): %s", statusCode, response),
|
|
81
|
+
},
|
|
82
|
+
}
|
|
83
|
+
}
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
package rlm
|
|
2
|
+
|
|
3
|
+
import (
|
|
4
|
+
"bytes"
|
|
5
|
+
"encoding/json"
|
|
6
|
+
"errors"
|
|
7
|
+
"fmt"
|
|
8
|
+
"io"
|
|
9
|
+
"net/http"
|
|
10
|
+
"strings"
|
|
11
|
+
"time"
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
type Message struct {
|
|
15
|
+
Role string `json:"role"`
|
|
16
|
+
Content string `json:"content"`
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
type ChatRequest struct {
|
|
20
|
+
Model string
|
|
21
|
+
Messages []Message
|
|
22
|
+
APIBase string
|
|
23
|
+
APIKey string
|
|
24
|
+
Timeout int
|
|
25
|
+
ExtraParams map[string]interface{}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
type chatResponse struct {
|
|
29
|
+
Choices []struct {
|
|
30
|
+
Message struct {
|
|
31
|
+
Content string `json:"content"`
|
|
32
|
+
} `json:"message"`
|
|
33
|
+
} `json:"choices"`
|
|
34
|
+
Error *struct {
|
|
35
|
+
Message string `json:"message"`
|
|
36
|
+
} `json:"error"`
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
var (
|
|
40
|
+
// defaultHTTPClient is a shared HTTP client with connection pooling
|
|
41
|
+
defaultHTTPClient = &http.Client{
|
|
42
|
+
Timeout: 60 * time.Second,
|
|
43
|
+
Transport: &http.Transport{
|
|
44
|
+
MaxIdleConns: 100,
|
|
45
|
+
MaxIdleConnsPerHost: 10,
|
|
46
|
+
IdleConnTimeout: 90 * time.Second,
|
|
47
|
+
},
|
|
48
|
+
}
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
func CallChatCompletion(request ChatRequest) (string, error) {
|
|
52
|
+
endpoint := buildEndpoint(request.APIBase)
|
|
53
|
+
payload := map[string]interface{}{
|
|
54
|
+
"model": request.Model,
|
|
55
|
+
"messages": request.Messages,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
for key, value := range request.ExtraParams {
|
|
59
|
+
payload[key] = value
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
body, err := json.Marshal(payload)
|
|
63
|
+
if err != nil {
|
|
64
|
+
return "", err
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Use shared client with connection pooling
|
|
68
|
+
client := defaultHTTPClient
|
|
69
|
+
if request.Timeout > 0 {
|
|
70
|
+
// Create custom client for non-default timeout
|
|
71
|
+
client = &http.Client{
|
|
72
|
+
Timeout: time.Duration(request.Timeout) * time.Second,
|
|
73
|
+
Transport: defaultHTTPClient.Transport,
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
req, err := http.NewRequest(http.MethodPost, endpoint, bytes.NewReader(body))
|
|
78
|
+
if err != nil {
|
|
79
|
+
return "", err
|
|
80
|
+
}
|
|
81
|
+
req.Header.Set("Content-Type", "application/json")
|
|
82
|
+
if request.APIKey != "" {
|
|
83
|
+
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", request.APIKey))
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
resp, err := client.Do(req)
|
|
87
|
+
if err != nil {
|
|
88
|
+
return "", err
|
|
89
|
+
}
|
|
90
|
+
defer resp.Body.Close()
|
|
91
|
+
|
|
92
|
+
responseBody, err := io.ReadAll(resp.Body)
|
|
93
|
+
if err != nil {
|
|
94
|
+
return "", err
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
if resp.StatusCode >= http.StatusBadRequest {
|
|
98
|
+
return "", NewAPIError(resp.StatusCode, strings.TrimSpace(string(responseBody)))
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
var parsed chatResponse
|
|
102
|
+
if err := json.Unmarshal(responseBody, &parsed); err != nil {
|
|
103
|
+
return "", err
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
if parsed.Error != nil && parsed.Error.Message != "" {
|
|
107
|
+
return "", errors.New(parsed.Error.Message)
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
if len(parsed.Choices) == 0 {
|
|
111
|
+
return "", errors.New("no choices returned by LLM")
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
return parsed.Choices[0].Message.Content, nil
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
func buildEndpoint(apiBase string) string {
|
|
118
|
+
base := strings.TrimSpace(apiBase)
|
|
119
|
+
if base == "" {
|
|
120
|
+
base = "https://api.openai.com/v1"
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
if strings.Contains(base, "/chat/completions") {
|
|
124
|
+
return base
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return strings.TrimRight(base, "/") + "/chat/completions"
|
|
128
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
package rlm
|
|
2
|
+
|
|
3
|
+
import (
|
|
4
|
+
"fmt"
|
|
5
|
+
"regexp"
|
|
6
|
+
"strings"
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
var (
|
|
10
|
+
finalTripleDouble = regexp.MustCompile(`(?s)FINAL\s*\(\s*"""(.*)"""`)
|
|
11
|
+
finalTripleSingle = regexp.MustCompile(`(?s)FINAL\s*\(\s*'''(.*)'''`)
|
|
12
|
+
finalDouble = regexp.MustCompile(`(?s)FINAL\s*\(\s*"([^"]*)"`)
|
|
13
|
+
finalSingle = regexp.MustCompile(`(?s)FINAL\s*\(\s*'([^']*)'`)
|
|
14
|
+
finalVar = regexp.MustCompile(`FINAL_VAR\s*\(\s*(\w+)\s*\)`)
|
|
15
|
+
finalAny = regexp.MustCompile(`FINAL\(|FINAL_VAR\(`)
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
func IsFinal(response string) bool {
|
|
19
|
+
return finalAny.MatchString(response)
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
func ParseResponse(response string, env map[string]interface{}) (string, bool) {
|
|
23
|
+
answer, ok := extractFinal(response)
|
|
24
|
+
if ok {
|
|
25
|
+
return answer, true
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
return extractFinalVar(response, env)
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
func extractFinal(response string) (string, bool) {
|
|
32
|
+
matchers := []*regexp.Regexp{finalTripleDouble, finalTripleSingle, finalDouble, finalSingle}
|
|
33
|
+
for _, matcher := range matchers {
|
|
34
|
+
match := matcher.FindStringSubmatch(response)
|
|
35
|
+
if len(match) > 1 {
|
|
36
|
+
return strings.TrimSpace(match[1]), true
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
return "", false
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
func extractFinalVar(response string, env map[string]interface{}) (string, bool) {
|
|
43
|
+
match := finalVar.FindStringSubmatch(response)
|
|
44
|
+
if len(match) < 2 {
|
|
45
|
+
return "", false
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
value, ok := env[match[1]]
|
|
49
|
+
if !ok {
|
|
50
|
+
return "", false
|
|
51
|
+
}
|
|
52
|
+
return fmt.Sprint(value), true
|
|
53
|
+
}
|