wirejs-resources 0.1.149-llm → 0.1.151-llm

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,33 +1,105 @@
1
1
  import { Resource } from '../resource.js';
2
- export type LLMMessage = {
3
- role: 'assistant' | 'user';
2
+ export type ToolCall = {
3
+ /** Unique identifier for this tool call */
4
+ id?: string;
5
+ /** Function to be called */
6
+ function: {
7
+ /** Name of the function to call */
8
+ name: string;
9
+ /** Arguments to pass to the function (JSON object) */
10
+ arguments: Record<string, any>;
11
+ };
12
+ };
13
+ /** User message in conversation */
14
+ export type UserMessage = {
15
+ /** Always 'user' for user messages */
16
+ role: 'user';
17
+ /** Text content of the message */
18
+ content: string;
19
+ };
20
+ /** Assistant message type - what LLM methods actually return */
21
+ export type AssistantMessage = {
22
+ /** Always 'assistant' for LLM responses */
23
+ role: 'assistant';
24
+ /** Text content of the message */
4
25
  content: string;
26
+ /** Tool calls requested by the assistant */
27
+ tool_calls?: ToolCall[];
5
28
  };
29
+ /** Tool execution result message */
30
+ export type ToolMessage = {
31
+ /** Always 'tool' for tool execution results */
32
+ role: 'tool';
33
+ /** Result content from the tool execution */
34
+ content: string;
35
+ /** Name of the tool that was executed */
36
+ tool_name: string;
37
+ /** ID linking this result to the original tool call */
38
+ tool_call_id: string;
39
+ };
40
+ /** Union of all possible message types in conversation history */
41
+ export type LLMMessage = UserMessage | AssistantMessage | ToolMessage;
6
42
  export type LLMChunk = {
7
43
  created_at: string;
8
44
  message: LLMMessage;
9
45
  done: boolean;
10
46
  };
47
+ type ToolParameterProperty = {
48
+ /** The property's data type */
49
+ type: string;
50
+ /** Human-readable description of this property */
51
+ description?: string;
52
+ /** Allowed values for this property (for constrained inputs) */
53
+ enum?: string[] | number[];
54
+ /** Schema for array elements when this property's type is 'array' */
55
+ items?: {
56
+ type: string;
57
+ description?: string;
58
+ };
59
+ /** Minimum value for numeric properties */
60
+ minimum?: number;
61
+ /** Maximum value for numeric properties */
62
+ maximum?: number;
63
+ };
64
+ export type ToolDefinition = {
65
+ /** The name of the tool function */
66
+ name: string;
67
+ /** Human-readable description of what the tool does */
68
+ description: string;
69
+ /** JSON Schema definition for the tool's parameters - defines named function parameters */
70
+ parameters: {
71
+ /** Always 'object' - functions must have named parameters */
72
+ type: 'object';
73
+ /** Named function parameters and their schemas */
74
+ properties: Record<string, ToolParameterProperty>;
75
+ /** Array of parameter names that must be provided */
76
+ required?: string[];
77
+ };
78
+ };
11
79
  export type ContinueConversationOptions = {
12
80
  history: LLMMessage[];
13
81
  onChunk?: (chunk: LLMChunk) => void | Promise<void>;
14
82
  timeoutSeconds?: number;
15
83
  systemPrompt?: string;
16
84
  models?: string[];
85
+ tools?: ToolDefinition[];
17
86
  targetContextSize?: number;
18
87
  };
19
88
  export declare class LLM extends Resource {
20
89
  models: string[];
21
90
  systemPrompt: string | undefined;
22
91
  targetContextSize: number;
92
+ tools: ToolDefinition[];
23
93
  constructor(scope: Resource | string, id: string, options: {
24
94
  models: string[];
25
95
  systemPrompt?: string;
26
96
  targetContextSize?: number;
97
+ tools?: ToolDefinition[];
27
98
  });
28
99
  private stream;
29
100
  private checkOllamaAvailable;
30
101
  private checkModelExists;
31
102
  private createStreamedString;
32
- continueConversation({ history, onChunk, timeoutSeconds, systemPrompt, targetContextSize, models, }: ContinueConversationOptions): Promise<LLMMessage>;
103
+ continueConversation({ history, onChunk, timeoutSeconds, systemPrompt, targetContextSize, models, tools, }: ContinueConversationOptions): Promise<AssistantMessage>;
33
104
  }
105
+ export {};
@@ -3,11 +3,13 @@ export class LLM extends Resource {
3
3
  models;
4
4
  systemPrompt;
5
5
  targetContextSize;
6
+ tools;
6
7
  constructor(scope, id, options) {
7
8
  super(scope, id);
8
9
  this.models = options.models;
9
10
  this.systemPrompt = options.systemPrompt;
10
11
  this.targetContextSize = options.targetContextSize ?? 16384;
12
+ this.tools = options.tools ?? [];
11
13
  }
12
14
  async stream(response, onChunk) {
13
15
  if (!response.ok || !response.body) {
@@ -17,6 +19,7 @@ export class LLM extends Resource {
17
19
  const decoder = new TextDecoder('utf-8');
18
20
  let role = 'assistant';
19
21
  let content = '';
22
+ let tool_calls;
20
23
  while (true) {
21
24
  const { value, done } = await reader.read();
22
25
  if (done)
@@ -32,8 +35,20 @@ export class LLM extends Resource {
32
35
  }
33
36
  role = chunk.message.role;
34
37
  content += chunk.message.content;
38
+ // Capture tool calls if present in the chunk
39
+ if ('tool_calls' in chunk.message && chunk.message.tool_calls) {
40
+ tool_calls = chunk.message.tool_calls;
41
+ }
42
+ }
43
+ // Build the assistant result message
44
+ const result = {
45
+ role: 'assistant',
46
+ content
47
+ };
48
+ if (tool_calls) {
49
+ result.tool_calls = tool_calls;
35
50
  }
36
- return { role, content };
51
+ return result;
37
52
  }
38
53
  async checkOllamaAvailable() {
39
54
  try {
@@ -79,7 +94,7 @@ export class LLM extends Resource {
79
94
  content: message
80
95
  };
81
96
  }
82
- async continueConversation({ history, onChunk, timeoutSeconds, systemPrompt, targetContextSize, models, }) {
97
+ async continueConversation({ history, onChunk, timeoutSeconds, systemPrompt, targetContextSize, models, tools, }) {
83
98
  const ollamaAvailable = await this.checkOllamaAvailable();
84
99
  if (!ollamaAvailable) {
85
100
  return this.createStreamedString('Ollama is not running locally. Please install and start Ollama:\n\n' +
@@ -102,6 +117,16 @@ export class LLM extends Resource {
102
117
  }, timeoutSeconds * 1000);
103
118
  }
104
119
  const finalSystemPrompt = systemPrompt ?? this.systemPrompt;
120
+ const finalTools = tools ?? this.tools;
121
+ // Transform our ToolDefinition format to Ollama's expected format
122
+ const ollamaTools = finalTools.map(tool => ({
123
+ type: "function",
124
+ function: {
125
+ name: tool.name,
126
+ description: tool.description,
127
+ parameters: tool.parameters
128
+ }
129
+ }));
105
130
  try {
106
131
  const response = await fetch('http://localhost:11434/api/chat', {
107
132
  method: 'POST',
@@ -118,6 +143,7 @@ export class LLM extends Resource {
118
143
  ...history
119
144
  ],
120
145
  stream,
146
+ ...(ollamaTools.length > 0 ? { tools: ollamaTools } : {}),
121
147
  options: {
122
148
  num_ctx: targetContextSize ?? this.targetContextSize
123
149
  }
@@ -133,7 +159,18 @@ export class LLM extends Resource {
133
159
  }
134
160
  else {
135
161
  const chunk = await response.json();
136
- return chunk.message;
162
+ // Ensure we return an assistant message
163
+ const message = chunk.message;
164
+ if (message.role === 'assistant') {
165
+ return message;
166
+ }
167
+ else {
168
+ // Fallback: create assistant message
169
+ return {
170
+ role: 'assistant',
171
+ content: message.content
172
+ };
173
+ }
137
174
  }
138
175
  }
139
176
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wirejs-resources",
3
- "version": "0.1.149-llm",
3
+ "version": "0.1.151-llm",
4
4
  "description": "Basic services and server-side resources for wirejs apps",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",