llmjs2 1.0.1 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +39 -436
  2. package/grapes.jpg +0 -0
  3. package/index.d.ts +43 -0
  4. package/index.js +465 -0
  5. package/package.json +7 -47
  6. package/spec.txt +73 -0
  7. package/test-generate-tools-suite.js +100 -0
  8. package/test-generate-tools.js +57 -0
  9. package/test-generate.js +31 -0
  10. package/test.js +33 -0
  11. package/LICENSE +0 -21
  12. package/dist/agent.d.ts +0 -80
  13. package/dist/agent.d.ts.map +0 -1
  14. package/dist/agent.js +0 -199
  15. package/dist/agent.js.map +0 -1
  16. package/dist/index.d.ts +0 -74
  17. package/dist/index.d.ts.map +0 -1
  18. package/dist/index.js +0 -191
  19. package/dist/index.js.map +0 -1
  20. package/dist/providers/base.d.ts +0 -58
  21. package/dist/providers/base.d.ts.map +0 -1
  22. package/dist/providers/base.js +0 -149
  23. package/dist/providers/base.js.map +0 -1
  24. package/dist/providers/index.d.ts +0 -8
  25. package/dist/providers/index.d.ts.map +0 -1
  26. package/dist/providers/index.js +0 -7
  27. package/dist/providers/index.js.map +0 -1
  28. package/dist/providers/ollama.d.ts +0 -42
  29. package/dist/providers/ollama.d.ts.map +0 -1
  30. package/dist/providers/ollama.js +0 -260
  31. package/dist/providers/ollama.js.map +0 -1
  32. package/dist/providers/openai.d.ts +0 -38
  33. package/dist/providers/openai.d.ts.map +0 -1
  34. package/dist/providers/openai.js +0 -322
  35. package/dist/providers/openai.js.map +0 -1
  36. package/dist/types.d.ts +0 -191
  37. package/dist/types.d.ts.map +0 -1
  38. package/dist/types.js +0 -6
  39. package/dist/types.js.map +0 -1
  40. package/src/agent.ts +0 -295
  41. package/src/index.ts +0 -268
  42. package/src/providers/base.ts +0 -216
  43. package/src/providers/index.ts +0 -8
  44. package/src/providers/ollama.ts +0 -429
  45. package/src/providers/openai.ts +0 -521
  46. package/src/types.ts +0 -243
package/src/types.ts DELETED
@@ -1,243 +0,0 @@
1
- /**
2
- * Unified type definitions for llmjs2
3
- * Enterprise-grade LLM abstraction layer supporting OpenAI and Ollama
4
- */
5
-
6
- /**
7
- * Supported LLM providers
8
- */
9
- export type ProviderType = 'openai' | 'ollama';
10
-
11
- /**
12
- * Role of a message in a conversation
13
- */
14
- export type MessageRole = 'system' | 'user' | 'assistant';
15
-
16
- /**
17
- * A single message in a conversation
18
- */
19
- export interface Message {
20
- role: MessageRole;
21
- content: string;
22
- }
23
-
24
- /**
25
- * Tool/function definition for function calling
26
- */
27
- export interface ToolBase {
28
- type?: 'function';
29
- description?: string;
30
- parameters?: Record<string, unknown>;
31
- handler?: (args: Record<string, unknown>) => string | Promise<string>;
32
- }
33
-
34
- export interface ToolV1 extends ToolBase {
35
- function: {
36
- name: string;
37
- description?: string;
38
- parameters?: Record<string, unknown>;
39
- };
40
- }
41
-
42
- export interface ToolV2 extends ToolBase {
43
- name: string;
44
- }
45
-
46
- export type Tool = ToolV1 | ToolV2;
47
-
48
- /**
49
- * Configuration for a completion request
50
- */
51
- export interface CompletionRequest {
52
- /** Model identifier (e.g., 'openai/gpt-4', 'ollama/mistral') */
53
- model: string;
54
-
55
- /** API key or authentication token */
56
- apiKey?: string;
57
-
58
- /** Base URL for the API (mainly for Ollama) */
59
- baseUrl?: string;
60
-
61
- /** Messages for the completion */
62
- messages: Message[];
63
-
64
- /** Maximum tokens to generate */
65
- maxTokens?: number;
66
-
67
- /** Sampling temperature (0-2 for OpenAI, typically 0-1) */
68
- temperature?: number;
69
-
70
- /** Top-p (nucleus sampling) */
71
- topP?: number;
72
-
73
- /** Top-k sampling parameter */
74
- topK?: number;
75
-
76
- /** Frequency penalty (-2 to 2 for OpenAI) */
77
- frequencyPenalty?: number;
78
-
79
- /** Presence penalty (-2 to 2 for OpenAI) */
80
- presencePenalty?: number;
81
-
82
- /** Stop sequences */
83
- stop?: string[];
84
-
85
- /** Available tools for function calling */
86
- tools?: Tool[];
87
-
88
- /** Force tool usage */
89
- toolChoice?: 'auto' | 'required' | string;
90
-
91
- /** Custom headers to send with requests */
92
- headers?: Record<string, string>;
93
-
94
- /** Request timeout in milliseconds */
95
- timeout?: number;
96
-
97
- /** Retry configuration */
98
- retry?: {
99
- maxRetries?: number;
100
- backoffMultiplier?: number;
101
- initialDelayMs?: number;
102
- };
103
- }
104
-
105
- /**
106
- * Completion response
107
- */
108
- export interface CompletionResponse {
109
- /** Generated text content */
110
- content: string;
111
-
112
- /** The model used */
113
- model: string;
114
-
115
- /** Stop reason */
116
- stopReason?: 'stop_sequence' | 'length' | 'tool_calls' | 'end_turn' | string;
117
-
118
- /** Total tokens used (if available) */
119
- usage?: {
120
- promptTokens?: number;
121
- completionTokens?: number;
122
- totalTokens?: number;
123
- };
124
-
125
- /** Raw provider response for advanced use cases */
126
- raw?: unknown;
127
-
128
- /** Finish reason from provider */
129
- finishReason?: string;
130
-
131
- /** Tool calls if function calling was used */
132
- toolCalls?: Array<{
133
- id?: string;
134
- name: string;
135
- arguments: Record<string, unknown>;
136
- }>;
137
- }
138
-
139
- /**
140
- * Streaming completion chunk
141
- */
142
- export interface CompletionChunk {
143
- /** Delta content */
144
- delta: string;
145
-
146
- /** Stop reason if stream ended */
147
- stopReason?: string;
148
-
149
- /** Usage at end of stream */
150
- usage?: {
151
- promptTokens?: number;
152
- completionTokens?: number;
153
- totalTokens?: number;
154
- };
155
- }
156
-
157
- /**
158
- * Provider configuration options
159
- */
160
- export interface ProviderConfig {
161
- /** Provider type */
162
- type: ProviderType;
163
-
164
- /** API key */
165
- apiKey?: string;
166
-
167
- /** Base URL for API */
168
- baseUrl?: string;
169
-
170
- /** Default model */
171
- model?: string;
172
-
173
- /** Request timeout */
174
- timeout?: number;
175
-
176
- /** Retry configuration */
177
- retry?: {
178
- maxRetries?: number;
179
- backoffMultiplier?: number;
180
- initialDelayMs?: number;
181
- };
182
-
183
- /** Custom headers */
184
- headers?: Record<string, string>;
185
- }
186
-
187
- /**
188
- * Error response from provider
189
- */
190
- export interface ProviderError extends Error {
191
- name: string;
192
- message: string;
193
- code?: string;
194
- statusCode?: number;
195
- details?: unknown;
196
- retryable?: boolean;
197
- }
198
-
199
- /**
200
- * Provider interface that all providers must implement
201
- */
202
- export interface IProvider {
203
- /** Create a completion request */
204
- complete(request: CompletionRequest): Promise<CompletionResponse>;
205
-
206
- /** Stream a completion request */
207
- completeStream(
208
- request: CompletionRequest
209
- ): AsyncIterable<CompletionChunk>;
210
-
211
- /** Validate that the configuration is correct */
212
- validate(): Promise<void>;
213
-
214
- /** Parse model string (e.g., 'openai/gpt-4' -> 'gpt-4') */
215
- parseModel(model: string): string;
216
-
217
- /** Enable or disable debug mode */
218
- setDebug(debug: boolean): void;
219
-
220
- /** Set custom logger function */
221
- setLogger(logger: (level: string, message: string, data?: unknown) => void): void;
222
- }
223
-
224
- /**
225
- * Global completion options
226
- */
227
- export interface CompletionOptions {
228
- /** Enable request logging for debugging */
229
- debug?: boolean;
230
-
231
- /** Custom logger function */
232
- logger?: (level: string, message: string, data?: unknown) => void;
233
-
234
- /** Global timeout override */
235
- globalTimeout?: number;
236
-
237
- /** Retry configuration override */
238
- globalRetry?: {
239
- maxRetries?: number;
240
- backoffMultiplier?: number;
241
- initialDelayMs?: number;
242
- };
243
- }