@jclaw/core 0.4.1 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/jclaw.js
CHANGED
|
@@ -65,7 +65,8 @@ async function getLLMConfig() {
|
|
|
65
65
|
return {
|
|
66
66
|
apiBase: process.env.LLM_BASE_URL || 'https://api.openai.com/v1',
|
|
67
67
|
apiKey: process.env.LLM_API_KEY,
|
|
68
|
-
model: process.env.LLM_MODEL_NAME || 'gpt-4'
|
|
68
|
+
model: process.env.LLM_MODEL_NAME || 'gpt-4',
|
|
69
|
+
temperature: process.env.LLM_TEMPERATURE ? parseFloat(process.env.LLM_TEMPERATURE) : 1.0
|
|
69
70
|
};
|
|
70
71
|
}
|
|
71
72
|
|
|
@@ -73,7 +74,8 @@ async function getLLMConfig() {
|
|
|
73
74
|
return {
|
|
74
75
|
apiBase: process.env.LLM_BASE_URL || 'https://api.openai.com/v1',
|
|
75
76
|
apiKey: process.env.OPENAI_API_KEY,
|
|
76
|
-
model: process.env.LLM_MODEL_NAME || 'gpt-4'
|
|
77
|
+
model: process.env.LLM_MODEL_NAME || 'gpt-4',
|
|
78
|
+
temperature: process.env.LLM_TEMPERATURE ? parseFloat(process.env.LLM_TEMPERATURE) : 1.0
|
|
77
79
|
};
|
|
78
80
|
}
|
|
79
81
|
|
|
@@ -194,3 +196,5 @@ switch (command) {
|
|
|
194
196
|
showHelp();
|
|
195
197
|
}
|
|
196
198
|
}
|
|
199
|
+
|
|
200
|
+
// Add temperature env var support
|
|
@@ -1,108 +1,22 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* LLM Client
|
|
3
|
-
*
|
|
4
|
-
* Provides a generic interface for LLM API calls.
|
|
5
|
-
* Supports OpenAI-compatible APIs and can be extended for other providers.
|
|
6
|
-
*
|
|
7
|
-
* @module @jclaw/core/runtime/llm-client
|
|
8
|
-
*/
|
|
9
|
-
/**
|
|
10
|
-
* Configuration for LLM client
|
|
2
|
+
* LLM Client - Support flexible configuration
|
|
11
3
|
*/
|
|
12
4
|
export interface LLMClientConfig {
|
|
13
|
-
|
|
14
|
-
apiBase: string;
|
|
15
|
-
/** API key for authentication */
|
|
5
|
+
apiBase?: string;
|
|
16
6
|
apiKey: string;
|
|
17
|
-
|
|
18
|
-
model: string;
|
|
19
|
-
/** Request timeout in milliseconds (default: 60000) */
|
|
20
|
-
timeout?: number;
|
|
21
|
-
/** Maximum tokens in response (default: 4096) */
|
|
22
|
-
maxTokens?: number;
|
|
23
|
-
/** Temperature for sampling (default: 0.7) */
|
|
7
|
+
model?: string;
|
|
24
8
|
temperature?: number;
|
|
9
|
+
maxTokens?: number;
|
|
25
10
|
}
|
|
26
|
-
/**
|
|
27
|
-
* Message structure for chat completions
|
|
28
|
-
*/
|
|
29
|
-
export interface ChatMessage {
|
|
30
|
-
/** Role of the message sender */
|
|
31
|
-
role: 'system' | 'user' | 'assistant';
|
|
32
|
-
/** Content of the message */
|
|
33
|
-
content: string;
|
|
34
|
-
}
|
|
35
|
-
/**
|
|
36
|
-
* Response from LLM completion
|
|
37
|
-
*/
|
|
38
|
-
export interface LLMResponse {
|
|
39
|
-
/** Generated text content */
|
|
40
|
-
content: string;
|
|
41
|
-
/** Model used for generation */
|
|
42
|
-
model: string;
|
|
43
|
-
/** Token usage statistics */
|
|
44
|
-
usage?: {
|
|
45
|
-
promptTokens: number;
|
|
46
|
-
completionTokens: number;
|
|
47
|
-
totalTokens: number;
|
|
48
|
-
};
|
|
49
|
-
/** Duration of the request in milliseconds */
|
|
50
|
-
duration: number;
|
|
51
|
-
}
|
|
52
|
-
/**
|
|
53
|
-
* LLM Client for making API calls to language models.
|
|
54
|
-
*
|
|
55
|
-
* Supports OpenAI-compatible APIs and provides a unified interface
|
|
56
|
-
* for chat completions.
|
|
57
|
-
*
|
|
58
|
-
* @example
|
|
59
|
-
* ```typescript
|
|
60
|
-
* const client = new LLMClient({
|
|
61
|
-
* apiBase: 'https://api.openai.com/v1',
|
|
62
|
-
* apiKey: 'sk-...',
|
|
63
|
-
* model: 'gpt-4'
|
|
64
|
-
* });
|
|
65
|
-
*
|
|
66
|
-
* const response = await client.chat([
|
|
67
|
-
* { role: 'user', content: 'Hello!' }
|
|
68
|
-
* ]);
|
|
69
|
-
* ```
|
|
70
|
-
*/
|
|
71
11
|
export declare class LLMClient {
|
|
72
12
|
private readonly config;
|
|
73
|
-
/**
|
|
74
|
-
* Create a new LLM client instance.
|
|
75
|
-
*
|
|
76
|
-
* @param config - Configuration options including API credentials
|
|
77
|
-
*/
|
|
78
13
|
constructor(config: LLMClientConfig);
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
* @throws Error if the API request fails
|
|
86
|
-
*/
|
|
87
|
-
chat(messages: ChatMessage[], options?: Partial<Pick<LLMClientConfig, 'maxTokens' | 'temperature'>>): Promise<LLMResponse>;
|
|
88
|
-
/**
|
|
89
|
-
* Send a simple text prompt and get a response.
|
|
90
|
-
*
|
|
91
|
-
* @param prompt - The prompt text
|
|
92
|
-
* @param systemPrompt - Optional system prompt
|
|
93
|
-
* @returns Promise resolving to response content
|
|
94
|
-
*/
|
|
95
|
-
complete(prompt: string, systemPrompt?: string): Promise<string>;
|
|
96
|
-
/**
|
|
97
|
-
* Get the configured model name.
|
|
98
|
-
*/
|
|
99
|
-
get model(): string;
|
|
14
|
+
chat(messages: Array<{
|
|
15
|
+
role: string;
|
|
16
|
+
content: string;
|
|
17
|
+
}>): Promise<{
|
|
18
|
+
content: string;
|
|
19
|
+
}>;
|
|
100
20
|
}
|
|
101
|
-
/**
|
|
102
|
-
* Create a new LLM client instance.
|
|
103
|
-
*
|
|
104
|
-
* @param config - Configuration options
|
|
105
|
-
* @returns New LLMClient instance
|
|
106
|
-
*/
|
|
107
21
|
export declare function createLLMClient(config: LLMClientConfig): LLMClient;
|
|
108
22
|
//# sourceMappingURL=llm-client.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"llm-client.d.ts","sourceRoot":"","sources":["../../src/runtime/llm-client.ts"],"names":[],"mappings":"AAAA
|
|
1
|
+
{"version":3,"file":"llm-client.d.ts","sourceRoot":"","sources":["../../src/runtime/llm-client.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,MAAM,WAAW,eAAe;IAC9B,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED,qBAAa,SAAS;IACpB,OAAO,CAAC,QAAQ,CAAC,MAAM,CAErB;gBAEU,MAAM,EAAE,eAAe;IAU7B,IAAI,CAAC,QAAQ,EAAE,KAAK,CAAC;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAE,CAAC,GAAG,OAAO,CAAC;QAAE,OAAO,EAAE,MAAM,CAAA;KAAE,CAAC;CA2B7F;AAED,wBAAgB,eAAe,CAAC,MAAM,EAAE,eAAe,GAAG,SAAS,CAElE"}
|
|
@@ -1,124 +1,44 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* LLM Client
|
|
3
|
-
*
|
|
4
|
-
* Provides a generic interface for LLM API calls.
|
|
5
|
-
* Supports OpenAI-compatible APIs and can be extended for other providers.
|
|
6
|
-
*
|
|
7
|
-
* @module @jclaw/core/runtime/llm-client
|
|
8
|
-
*/
|
|
9
|
-
/**
|
|
10
|
-
* LLM Client for making API calls to language models.
|
|
11
|
-
*
|
|
12
|
-
* Supports OpenAI-compatible APIs and provides a unified interface
|
|
13
|
-
* for chat completions.
|
|
14
|
-
*
|
|
15
|
-
* @example
|
|
16
|
-
* ```typescript
|
|
17
|
-
* const client = new LLMClient({
|
|
18
|
-
* apiBase: 'https://api.openai.com/v1',
|
|
19
|
-
* apiKey: 'sk-...',
|
|
20
|
-
* model: 'gpt-4'
|
|
21
|
-
* });
|
|
22
|
-
*
|
|
23
|
-
* const response = await client.chat([
|
|
24
|
-
* { role: 'user', content: 'Hello!' }
|
|
25
|
-
* ]);
|
|
26
|
-
* ```
|
|
2
|
+
* LLM Client - Support flexible configuration
|
|
27
3
|
*/
|
|
28
4
|
export class LLMClient {
|
|
29
5
|
config;
|
|
30
|
-
/**
|
|
31
|
-
* Create a new LLM client instance.
|
|
32
|
-
*
|
|
33
|
-
* @param config - Configuration options including API credentials
|
|
34
|
-
*/
|
|
35
6
|
constructor(config) {
|
|
36
7
|
this.config = {
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
8
|
+
apiBase: config.apiBase || 'https://api.openai.com/v1',
|
|
9
|
+
apiKey: config.apiKey,
|
|
10
|
+
model: config.model || 'gpt-4',
|
|
11
|
+
temperature: config.temperature ?? 0.7,
|
|
12
|
+
maxTokens: config.maxTokens || 4096,
|
|
41
13
|
};
|
|
42
14
|
}
|
|
43
|
-
|
|
44
|
-
* Send a chat completion request to the LLM.
|
|
45
|
-
*
|
|
46
|
-
* @param messages - Array of chat messages
|
|
47
|
-
* @param options - Optional overrides for this specific request
|
|
48
|
-
* @returns Promise resolving to LLM response
|
|
49
|
-
* @throws Error if the API request fails
|
|
50
|
-
*/
|
|
51
|
-
async chat(messages, options) {
|
|
52
|
-
const startTime = Date.now();
|
|
15
|
+
async chat(messages) {
|
|
53
16
|
try {
|
|
54
17
|
const response = await fetch(`${this.config.apiBase}/chat/completions`, {
|
|
55
18
|
method: 'POST',
|
|
56
19
|
headers: {
|
|
57
20
|
'Content-Type': 'application/json',
|
|
58
|
-
Authorization: `Bearer ${this.config.apiKey}`,
|
|
21
|
+
'Authorization': `Bearer ${this.config.apiKey}`,
|
|
59
22
|
},
|
|
60
23
|
body: JSON.stringify({
|
|
61
24
|
model: this.config.model,
|
|
62
25
|
messages,
|
|
63
|
-
|
|
64
|
-
|
|
26
|
+
temperature: this.config.temperature,
|
|
27
|
+
max_tokens: this.config.maxTokens,
|
|
65
28
|
}),
|
|
66
|
-
signal: AbortSignal.timeout(this.config.timeout),
|
|
67
29
|
});
|
|
68
30
|
if (!response.ok) {
|
|
69
|
-
const
|
|
70
|
-
throw new Error(`LLM API error: ${response.status} - ${
|
|
31
|
+
const error = await response.text();
|
|
32
|
+
throw new Error(`LLM API error: ${response.status} - ${error}`);
|
|
71
33
|
}
|
|
72
|
-
const data =
|
|
73
|
-
|
|
74
|
-
const duration = Date.now() - startTime;
|
|
75
|
-
return {
|
|
76
|
-
content,
|
|
77
|
-
model: data.model,
|
|
78
|
-
usage: data.usage
|
|
79
|
-
? {
|
|
80
|
-
promptTokens: data.usage.prompt_tokens,
|
|
81
|
-
completionTokens: data.usage.completion_tokens,
|
|
82
|
-
totalTokens: data.usage.total_tokens,
|
|
83
|
-
}
|
|
84
|
-
: undefined,
|
|
85
|
-
duration,
|
|
86
|
-
};
|
|
34
|
+
const data = await response.json();
|
|
35
|
+
return { content: data.choices[0]?.message?.content || '' };
|
|
87
36
|
}
|
|
88
37
|
catch (error) {
|
|
89
|
-
|
|
90
|
-
throw new Error(`LLM request failed: ${message}`);
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
/**
|
|
94
|
-
* Send a simple text prompt and get a response.
|
|
95
|
-
*
|
|
96
|
-
* @param prompt - The prompt text
|
|
97
|
-
* @param systemPrompt - Optional system prompt
|
|
98
|
-
* @returns Promise resolving to response content
|
|
99
|
-
*/
|
|
100
|
-
async complete(prompt, systemPrompt) {
|
|
101
|
-
const messages = [];
|
|
102
|
-
if (systemPrompt) {
|
|
103
|
-
messages.push({ role: 'system', content: systemPrompt });
|
|
38
|
+
throw new Error(`LLM request failed: ${error}`);
|
|
104
39
|
}
|
|
105
|
-
messages.push({ role: 'user', content: prompt });
|
|
106
|
-
const response = await this.chat(messages);
|
|
107
|
-
return response.content;
|
|
108
|
-
}
|
|
109
|
-
/**
|
|
110
|
-
* Get the configured model name.
|
|
111
|
-
*/
|
|
112
|
-
get model() {
|
|
113
|
-
return this.config.model;
|
|
114
40
|
}
|
|
115
41
|
}
|
|
116
|
-
/**
|
|
117
|
-
* Create a new LLM client instance.
|
|
118
|
-
*
|
|
119
|
-
* @param config - Configuration options
|
|
120
|
-
* @returns New LLMClient instance
|
|
121
|
-
*/
|
|
122
42
|
export function createLLMClient(config) {
|
|
123
43
|
return new LLMClient(config);
|
|
124
44
|
}
|