npcsh 0.3.31__py3-none-any.whl → 0.3.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/audio.py +540 -181
- npcsh/audio_gen.py +1 -0
- npcsh/cli.py +8 -10
- npcsh/conversation.py +14 -251
- npcsh/dataframes.py +13 -5
- npcsh/helpers.py +5 -0
- npcsh/image.py +2 -2
- npcsh/image_gen.py +38 -38
- npcsh/knowledge_graph.py +4 -4
- npcsh/llm_funcs.py +517 -349
- npcsh/npc_compiler.py +32 -23
- npcsh/npc_sysenv.py +5 -0
- npcsh/plonk.py +2 -2
- npcsh/response.py +131 -482
- npcsh/search.py +5 -1
- npcsh/serve.py +210 -203
- npcsh/shell.py +11 -25
- npcsh/shell_helpers.py +489 -99
- npcsh/stream.py +87 -554
- npcsh/video.py +5 -2
- npcsh/video_gen.py +69 -0
- npcsh-0.3.32.dist-info/METADATA +779 -0
- {npcsh-0.3.31.dist-info → npcsh-0.3.32.dist-info}/RECORD +49 -47
- npcsh-0.3.31.dist-info/METADATA +0 -1853
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/bash_executer.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/calculator.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/celona.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/code_executor.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/eriane.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/generic_search.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/image_generation.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/lineru.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/local_search.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/maurawa.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/npcsh_executor.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/raone.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/screen_cap.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/slean.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/sql_executor.tool +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/test_pipeline.py +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/turnic.npc +0 -0
- {npcsh-0.3.31.data → npcsh-0.3.32.data}/data/npcsh/npc_team/welxor.npc +0 -0
- {npcsh-0.3.31.dist-info → npcsh-0.3.32.dist-info}/WHEEL +0 -0
- {npcsh-0.3.31.dist-info → npcsh-0.3.32.dist-info}/entry_points.txt +0 -0
- {npcsh-0.3.31.dist-info → npcsh-0.3.32.dist-info}/licenses/LICENSE +0 -0
- {npcsh-0.3.31.dist-info → npcsh-0.3.32.dist-info}/top_level.txt +0 -0
npcsh/stream.py
CHANGED
|
@@ -6,397 +6,41 @@
|
|
|
6
6
|
########
|
|
7
7
|
########
|
|
8
8
|
|
|
9
|
-
from npcsh.npc_sysenv import get_system_message
|
|
10
9
|
from typing import Any, Dict, Generator, List
|
|
11
10
|
import os
|
|
12
|
-
import anthropic
|
|
13
|
-
from openai import OpenAI
|
|
14
|
-
from google import genai
|
|
15
|
-
|
|
16
|
-
from google.generativeai import types
|
|
17
|
-
import google.generativeai as genai
|
|
18
11
|
import base64
|
|
19
12
|
import json
|
|
13
|
+
import requests
|
|
14
|
+
from PIL import Image
|
|
15
|
+
from typing import Any, Dict, Generator, List, Union
|
|
20
16
|
|
|
17
|
+
from pydantic import BaseModel
|
|
18
|
+
from npcsh.npc_sysenv import (
|
|
19
|
+
get_system_message,
|
|
20
|
+
compress_image,
|
|
21
|
+
available_chat_models,
|
|
22
|
+
available_reasoning_models,
|
|
23
|
+
)
|
|
21
24
|
|
|
22
|
-
|
|
23
|
-
messages,
|
|
24
|
-
model: str,
|
|
25
|
-
npc: Any = None,
|
|
26
|
-
tools: list = None,
|
|
27
|
-
images: List[Dict[str, str]] = None,
|
|
28
|
-
api_key: str = None,
|
|
29
|
-
tool_choice: Dict = None,
|
|
30
|
-
**kwargs,
|
|
31
|
-
) -> Generator:
|
|
32
|
-
"""
|
|
33
|
-
Streams responses from Anthropic, supporting images, tools, and yielding raw text chunks.
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
messages: List of conversation messages
|
|
37
|
-
model: Anthropic model to use
|
|
38
|
-
npc: Optional NPC context
|
|
39
|
-
tools: Optional list of tools to provide to Claude
|
|
40
|
-
images: Optional list of images to include
|
|
41
|
-
api_key: Anthropic API key
|
|
42
|
-
tool_choice: Optional tool choice configuration
|
|
43
|
-
**kwargs: Additional arguments for the API call
|
|
44
|
-
"""
|
|
45
|
-
if api_key is None:
|
|
46
|
-
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
47
|
-
client = anthropic.Anthropic(api_key=api_key)
|
|
48
|
-
|
|
49
|
-
if messages[0]["role"] == "system":
|
|
50
|
-
system_message = messages[0]["content"]
|
|
51
|
-
messages = messages[1:]
|
|
52
|
-
elif npc is not None:
|
|
53
|
-
system_message = get_system_message(npc)
|
|
54
|
-
else:
|
|
55
|
-
system_message = "You are a helpful assistant."
|
|
56
|
-
|
|
57
|
-
# Preprocess messages to ensure content is a list of dicts
|
|
58
|
-
for message in messages:
|
|
59
|
-
if isinstance(message["content"], str):
|
|
60
|
-
message["content"] = [{"type": "text", "text": message["content"]}]
|
|
61
|
-
# Add images if provided
|
|
62
|
-
if images:
|
|
63
|
-
for img in images:
|
|
64
|
-
with open(img["file_path"], "rb") as image_file:
|
|
65
|
-
img["data"] = base64.b64encode(image_file.read()).decode("utf-8")
|
|
66
|
-
img["media_type"] = "image/jpeg"
|
|
67
|
-
messages[-1]["content"].append(
|
|
68
|
-
{
|
|
69
|
-
"type": "image",
|
|
70
|
-
"source": {
|
|
71
|
-
"type": "base64",
|
|
72
|
-
"media_type": img["media_type"],
|
|
73
|
-
"data": img["data"],
|
|
74
|
-
},
|
|
75
|
-
}
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
# Prepare API call parameters
|
|
79
|
-
api_params = {
|
|
80
|
-
"model": model,
|
|
81
|
-
"messages": messages,
|
|
82
|
-
"max_tokens": kwargs.get("max_tokens", 8192),
|
|
83
|
-
"stream": True,
|
|
84
|
-
"system": system_message,
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
# Add tools if provided
|
|
88
|
-
if tools:
|
|
89
|
-
api_params["tools"] = tools
|
|
90
|
-
|
|
91
|
-
# Add tool choice if specified
|
|
92
|
-
if tool_choice:
|
|
93
|
-
api_params["tool_choice"] = tool_choice
|
|
94
|
-
|
|
95
|
-
# Make the API call
|
|
96
|
-
response = client.messages.create(**api_params)
|
|
97
|
-
|
|
98
|
-
for chunk in response:
|
|
99
|
-
yield chunk
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def process_anthropic_tool_stream(
|
|
103
|
-
stream, tool_map: Dict[str, callable], messages: List[Dict] = None
|
|
104
|
-
) -> List[Dict]:
|
|
105
|
-
"""
|
|
106
|
-
Process the Anthropic tool use stream
|
|
107
|
-
"""
|
|
108
|
-
tool_results = []
|
|
109
|
-
current_tool = None
|
|
110
|
-
current_input = ""
|
|
111
|
-
context = messages[-1]["content"] if messages else ""
|
|
112
|
-
|
|
113
|
-
for chunk in stream:
|
|
114
|
-
# Look for tool use blocks
|
|
115
|
-
if (
|
|
116
|
-
chunk.type == "content_block_start"
|
|
117
|
-
and getattr(chunk, "content_block", None)
|
|
118
|
-
and chunk.content_block.type == "tool_use"
|
|
119
|
-
):
|
|
120
|
-
current_tool = {
|
|
121
|
-
"id": chunk.content_block.id,
|
|
122
|
-
"name": chunk.content_block.name,
|
|
123
|
-
}
|
|
124
|
-
current_input = ""
|
|
125
|
-
|
|
126
|
-
# Collect input JSON deltas
|
|
127
|
-
if chunk.type == "content_block_delta" and hasattr(chunk.delta, "partial_json"):
|
|
128
|
-
current_input += chunk.delta.partial_json
|
|
129
|
-
|
|
130
|
-
# When tool input is complete
|
|
131
|
-
if chunk.type == "content_block_stop" and current_tool:
|
|
132
|
-
try:
|
|
133
|
-
# Parse the complete input
|
|
134
|
-
tool_input = json.loads(current_input) if current_input.strip() else {}
|
|
135
|
-
|
|
136
|
-
# Add context to tool input
|
|
137
|
-
tool_input["context"] = context
|
|
138
|
-
|
|
139
|
-
# Execute the tool
|
|
140
|
-
tool_func = tool_map.get(current_tool["name"])
|
|
141
|
-
if tool_func:
|
|
142
|
-
result = tool_func(tool_input)
|
|
143
|
-
tool_results.append(
|
|
144
|
-
{
|
|
145
|
-
"tool_name": current_tool["name"],
|
|
146
|
-
"tool_input": tool_input,
|
|
147
|
-
"tool_result": result,
|
|
148
|
-
}
|
|
149
|
-
)
|
|
150
|
-
else:
|
|
151
|
-
tool_results.append(
|
|
152
|
-
{
|
|
153
|
-
"tool_name": current_tool["name"],
|
|
154
|
-
"tool_input": tool_input,
|
|
155
|
-
"error": f"Tool {current_tool['name']} not found",
|
|
156
|
-
}
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
except Exception as e:
|
|
160
|
-
tool_results.append(
|
|
161
|
-
{
|
|
162
|
-
"tool_name": current_tool["name"],
|
|
163
|
-
"tool_input": current_input,
|
|
164
|
-
"error": str(e),
|
|
165
|
-
}
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
# Reset current tool
|
|
169
|
-
current_tool = None
|
|
170
|
-
current_input = ""
|
|
171
|
-
|
|
172
|
-
return tool_results
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
from typing import List, Dict, Any, Literal
|
|
176
|
-
|
|
177
|
-
ProviderType = Literal[
|
|
178
|
-
"openai", "anthropic", "ollama", "gemini", "deepseek", "openai-like"
|
|
179
|
-
]
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
def generate_tool_schema(
|
|
183
|
-
name: str,
|
|
184
|
-
description: str,
|
|
185
|
-
parameters: Dict[str, Any],
|
|
186
|
-
provider: ProviderType,
|
|
187
|
-
required: List[str] = None,
|
|
188
|
-
) -> Dict[str, Any]:
|
|
189
|
-
"""
|
|
190
|
-
Generate provider-specific function/tool schema from common parameters
|
|
191
|
-
|
|
192
|
-
Args:
|
|
193
|
-
name: Name of the function
|
|
194
|
-
description: Description of what the function does
|
|
195
|
-
parameters: Dict of parameter names and their properties
|
|
196
|
-
provider: Which provider to generate schema for
|
|
197
|
-
required: List of required parameter names
|
|
198
|
-
"""
|
|
199
|
-
if required is None:
|
|
200
|
-
required = []
|
|
201
|
-
|
|
202
|
-
if provider == "openai":
|
|
203
|
-
return {
|
|
204
|
-
"type": "function",
|
|
205
|
-
"function": {
|
|
206
|
-
"name": name,
|
|
207
|
-
"description": description,
|
|
208
|
-
"parameters": {
|
|
209
|
-
"type": "object",
|
|
210
|
-
"properties": parameters,
|
|
211
|
-
"required": required,
|
|
212
|
-
"additionalProperties": False,
|
|
213
|
-
},
|
|
214
|
-
"strict": True,
|
|
215
|
-
},
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
elif provider == "anthropic":
|
|
219
|
-
return {
|
|
220
|
-
"name": name,
|
|
221
|
-
"description": description,
|
|
222
|
-
"parameters": {
|
|
223
|
-
"type": "object",
|
|
224
|
-
"properties": parameters,
|
|
225
|
-
"required": required,
|
|
226
|
-
},
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
elif provider == "ollama":
|
|
230
|
-
return {
|
|
231
|
-
"type": "function",
|
|
232
|
-
"function": {
|
|
233
|
-
"name": name,
|
|
234
|
-
"description": description,
|
|
235
|
-
"parameters": {
|
|
236
|
-
"type": "object",
|
|
237
|
-
"properties": parameters,
|
|
238
|
-
"required": required,
|
|
239
|
-
},
|
|
240
|
-
},
|
|
241
|
-
}
|
|
242
|
-
elif provider == "gemini":
|
|
243
|
-
# Convert our generic tool schema to a Gemini function declaration
|
|
244
|
-
function = {
|
|
245
|
-
"name": name,
|
|
246
|
-
"description": description,
|
|
247
|
-
"parameters": {
|
|
248
|
-
"type": "OBJECT",
|
|
249
|
-
"properties": {
|
|
250
|
-
k: {
|
|
251
|
-
"type": v.get("type", "STRING").upper(),
|
|
252
|
-
"description": v.get("description", ""),
|
|
253
|
-
"enum": v.get("enum", None),
|
|
254
|
-
}
|
|
255
|
-
for k, v in parameters.items()
|
|
256
|
-
},
|
|
257
|
-
"required": required or [],
|
|
258
|
-
},
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
# Create a Tool object as shown in the example
|
|
262
|
-
return types.Tool(function_declarations=[function])
|
|
263
|
-
|
|
264
|
-
raise ValueError(f"Unknown provider: {provider}")
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
def get_ollama_stream(
|
|
268
|
-
messages: List[Dict[str, str]],
|
|
269
|
-
model: str,
|
|
270
|
-
npc: Any = None,
|
|
271
|
-
tools: list = None,
|
|
272
|
-
images: list = None,
|
|
273
|
-
tool_choice: Dict = None,
|
|
274
|
-
**kwargs,
|
|
275
|
-
) -> Generator:
|
|
276
|
-
"""Streams responses from Ollama, supporting images and tools."""
|
|
277
|
-
import ollama
|
|
278
|
-
|
|
279
|
-
messages_copy = messages.copy()
|
|
280
|
-
|
|
281
|
-
# Handle images if provided
|
|
282
|
-
if images:
|
|
283
|
-
messages[-1]["images"] = [image["file_path"] for image in images]
|
|
284
|
-
|
|
285
|
-
# Add system message if not present
|
|
286
|
-
if messages_copy[0]["role"] != "system":
|
|
287
|
-
if npc is not None:
|
|
288
|
-
system_message = get_system_message(npc)
|
|
289
|
-
messages_copy.insert(0, {"role": "system", "content": system_message})
|
|
290
|
-
|
|
291
|
-
# Prepare API call parameters
|
|
292
|
-
api_params = {
|
|
293
|
-
"model": model,
|
|
294
|
-
"messages": messages_copy,
|
|
295
|
-
"stream": True,
|
|
296
|
-
}
|
|
297
|
-
|
|
298
|
-
# Add tools if provided
|
|
299
|
-
if tools:
|
|
300
|
-
api_params["tools"] = tools
|
|
301
|
-
|
|
302
|
-
# Make the API call
|
|
303
|
-
for chunk in ollama.chat(**api_params):
|
|
304
|
-
yield chunk
|
|
305
|
-
|
|
25
|
+
from litellm import completion
|
|
306
26
|
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
) -> List[Dict]:
|
|
310
|
-
"""Process the Ollama tool use stream"""
|
|
311
|
-
tool_results = []
|
|
312
|
-
content = ""
|
|
27
|
+
# import litellm
|
|
28
|
+
# litellm._turn_on_debug()
|
|
313
29
|
|
|
314
|
-
# Build tool schema map
|
|
315
|
-
tool_schemas = {
|
|
316
|
-
tool["function"]["name"]: tool["function"]["parameters"] for tool in tools
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
def convert_params(tool_name: str, params: dict) -> dict:
|
|
320
|
-
"""Convert parameters to the correct type based on schema"""
|
|
321
|
-
schema = tool_schemas.get(tool_name, {})
|
|
322
|
-
properties = schema.get("properties", {})
|
|
323
|
-
|
|
324
|
-
converted = {}
|
|
325
|
-
for key, value in params.items():
|
|
326
|
-
prop_schema = properties.get(key, {})
|
|
327
|
-
prop_type = prop_schema.get("type")
|
|
328
|
-
|
|
329
|
-
if prop_type == "integer" and isinstance(value, str):
|
|
330
|
-
try:
|
|
331
|
-
converted[key] = int(value)
|
|
332
|
-
except (ValueError, TypeError):
|
|
333
|
-
converted[key] = 0
|
|
334
|
-
else:
|
|
335
|
-
converted[key] = value
|
|
336
30
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
# Accumulate content
|
|
340
|
-
for chunk in stream:
|
|
341
|
-
if chunk.message and chunk.message.content:
|
|
342
|
-
content += chunk.message.content
|
|
343
|
-
|
|
344
|
-
# Process complete JSON objects when done
|
|
345
|
-
try:
|
|
346
|
-
# Find all JSON objects in the content
|
|
347
|
-
json_objects = []
|
|
348
|
-
current = ""
|
|
349
|
-
for char in content:
|
|
350
|
-
current += char
|
|
351
|
-
if current.count("{") == current.count("}") and current.strip().startswith(
|
|
352
|
-
"{"
|
|
353
|
-
):
|
|
354
|
-
json_objects.append(current.strip())
|
|
355
|
-
current = ""
|
|
356
|
-
|
|
357
|
-
# Process each JSON object
|
|
358
|
-
for json_str in json_objects:
|
|
359
|
-
try:
|
|
360
|
-
tool_call = json.loads(json_str)
|
|
361
|
-
tool_name = tool_call.get("name")
|
|
362
|
-
tool_params = tool_call.get("parameters", {})
|
|
363
|
-
|
|
364
|
-
if tool_name in tool_map:
|
|
365
|
-
# Convert parameters to correct types
|
|
366
|
-
converted_params = convert_params(tool_name, tool_params)
|
|
367
|
-
result = tool_map[tool_name](converted_params)
|
|
368
|
-
tool_results.append(
|
|
369
|
-
{
|
|
370
|
-
"tool_name": tool_name,
|
|
371
|
-
"tool_input": converted_params,
|
|
372
|
-
"tool_result": result,
|
|
373
|
-
}
|
|
374
|
-
)
|
|
375
|
-
except Exception as e:
|
|
376
|
-
tool_results.append({"error": str(e), "partial_json": json_str})
|
|
377
|
-
|
|
378
|
-
except Exception as e:
|
|
379
|
-
tool_results.append({"error": str(e), "content": content})
|
|
380
|
-
|
|
381
|
-
return tool_results
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
def get_openai_stream(
|
|
31
|
+
def get_litellm_stream(
|
|
385
32
|
messages: List[Dict[str, str]],
|
|
386
33
|
model: str,
|
|
34
|
+
provider: str = None,
|
|
387
35
|
npc: Any = None,
|
|
388
36
|
tools: list = None,
|
|
389
37
|
images: List[Dict[str, str]] = None,
|
|
390
38
|
api_key: str = None,
|
|
39
|
+
api_url: str = None,
|
|
391
40
|
tool_choice: Dict = None,
|
|
392
41
|
**kwargs,
|
|
393
42
|
) -> Generator:
|
|
394
43
|
"""Streams responses from OpenAI, supporting images, tools and yielding raw text chunks."""
|
|
395
|
-
|
|
396
|
-
if api_key is None:
|
|
397
|
-
api_key = os.environ["OPENAI_API_KEY"]
|
|
398
|
-
client = OpenAI(api_key=api_key)
|
|
399
|
-
|
|
400
44
|
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
401
45
|
|
|
402
46
|
if not messages:
|
|
@@ -429,7 +73,26 @@ def get_openai_stream(
|
|
|
429
73
|
messages.append(last_user_message)
|
|
430
74
|
|
|
431
75
|
# Prepare API call parameters
|
|
432
|
-
|
|
76
|
+
# print("provider", provider)
|
|
77
|
+
# print("model", model)
|
|
78
|
+
if provider is not None:
|
|
79
|
+
model_str = f"{provider}/{model}"
|
|
80
|
+
else:
|
|
81
|
+
model_str = model
|
|
82
|
+
|
|
83
|
+
api_params = {
|
|
84
|
+
"model": model_str,
|
|
85
|
+
"messages": messages,
|
|
86
|
+
"stream": True,
|
|
87
|
+
}
|
|
88
|
+
# print(api_params["model"])
|
|
89
|
+
|
|
90
|
+
if api_key is not None and provider == "openai-like":
|
|
91
|
+
print(api_key)
|
|
92
|
+
api_params["api_key"] = api_key
|
|
93
|
+
|
|
94
|
+
if api_url is not None and provider == "openai-like":
|
|
95
|
+
api_params["api_url"] = api_url
|
|
433
96
|
|
|
434
97
|
# Add tools if provided
|
|
435
98
|
if tools:
|
|
@@ -438,16 +101,33 @@ def get_openai_stream(
|
|
|
438
101
|
# Add tool choice if specified
|
|
439
102
|
if tool_choice:
|
|
440
103
|
api_params["tool_choice"] = tool_choice
|
|
441
|
-
|
|
442
|
-
|
|
104
|
+
if kwargs:
|
|
105
|
+
for key, value in kwargs.items():
|
|
106
|
+
if key in [
|
|
107
|
+
"stream",
|
|
108
|
+
"stop",
|
|
109
|
+
"temperature",
|
|
110
|
+
"top_p",
|
|
111
|
+
"max_tokens",
|
|
112
|
+
"max_completion_tokens",
|
|
113
|
+
"tools",
|
|
114
|
+
"tool_choice",
|
|
115
|
+
"extra_headers",
|
|
116
|
+
"parallel_tool_calls",
|
|
117
|
+
"response_format",
|
|
118
|
+
"user",
|
|
119
|
+
]:
|
|
120
|
+
api_params[key] = value
|
|
121
|
+
# print(api_params)
|
|
122
|
+
stream = completion(**api_params)
|
|
443
123
|
|
|
444
124
|
for chunk in stream:
|
|
445
125
|
yield chunk
|
|
446
126
|
|
|
447
127
|
|
|
448
|
-
def
|
|
128
|
+
def process_litellm_tool_stream(stream, tool_map: Dict[str, callable]) -> List[Dict]:
|
|
449
129
|
"""
|
|
450
|
-
Process the
|
|
130
|
+
Process the litellm tool use stream
|
|
451
131
|
"""
|
|
452
132
|
final_tool_calls = {}
|
|
453
133
|
tool_results = []
|
|
@@ -515,186 +195,39 @@ def process_openai_tool_stream(stream, tool_map: Dict[str, callable]) -> List[Di
|
|
|
515
195
|
return tool_results
|
|
516
196
|
|
|
517
197
|
|
|
518
|
-
|
|
519
|
-
messages: List[Dict[str, str]],
|
|
520
|
-
model: str,
|
|
521
|
-
api_url: str,
|
|
522
|
-
npc: Any = None,
|
|
523
|
-
images: List[Dict[str, str]] = None,
|
|
524
|
-
tools: list = None,
|
|
525
|
-
api_key: str = None,
|
|
526
|
-
**kwargs,
|
|
527
|
-
) -> List[Dict[str, str]]:
|
|
528
|
-
"""
|
|
529
|
-
Function Description:
|
|
530
|
-
This function generates a conversation using the OpenAI API.
|
|
531
|
-
Args:
|
|
532
|
-
messages (List[Dict[str, str]]): The list of messages in the conversation.
|
|
533
|
-
model (str): The model to use for the conversation.
|
|
534
|
-
Keyword Args:
|
|
535
|
-
npc (Any): The NPC object.
|
|
536
|
-
api_key (str): The API key for accessing the OpenAI API.
|
|
537
|
-
Returns:
|
|
538
|
-
List[Dict[str, str]]: The list of messages in the conversation.
|
|
539
|
-
"""
|
|
540
|
-
if api_key is None:
|
|
541
|
-
api_key = "dummy"
|
|
542
|
-
client = OpenAI(api_key=api_key, base_url=api_url)
|
|
543
|
-
|
|
544
|
-
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
545
|
-
|
|
546
|
-
if messages is None:
|
|
547
|
-
messages = []
|
|
548
|
-
|
|
549
|
-
# Ensure the system message is at the beginning
|
|
550
|
-
if not any(msg["role"] == "system" for msg in messages):
|
|
551
|
-
messages.insert(0, {"role": "system", "content": system_message})
|
|
552
|
-
|
|
553
|
-
# messages should already include the user's latest message
|
|
554
|
-
|
|
555
|
-
# Make the API call with the messages including the latest user input
|
|
556
|
-
completion = client.chat.completions.create(
|
|
557
|
-
model=model, messages=messages, stream=True, **kwargs
|
|
558
|
-
)
|
|
559
|
-
|
|
560
|
-
for chunk in completion:
|
|
561
|
-
yield chunk
|
|
198
|
+
from typing import List, Dict, Any, Literal
|
|
562
199
|
|
|
563
200
|
|
|
564
|
-
def
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
**kwargs,
|
|
571
|
-
) -> List[Dict[str, str]]:
|
|
201
|
+
def generate_tool_schema(
|
|
202
|
+
name: str,
|
|
203
|
+
description: str,
|
|
204
|
+
parameters: Dict[str, Any],
|
|
205
|
+
required: List[str] = None,
|
|
206
|
+
) -> Dict[str, Any]:
|
|
572
207
|
"""
|
|
573
|
-
|
|
574
|
-
|
|
208
|
+
Generate provider-specific function/tool schema from common parameters
|
|
209
|
+
|
|
575
210
|
Args:
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
Returns:
|
|
582
|
-
List[Dict[str, str]]: The list of messages in the conversation.
|
|
211
|
+
name: Name of the function
|
|
212
|
+
description: Description of what the function does
|
|
213
|
+
parameters: Dict of parameter names and their properties
|
|
214
|
+
provider: Which provider to generate schema for
|
|
215
|
+
required: List of required parameter names
|
|
583
216
|
"""
|
|
584
|
-
if
|
|
585
|
-
|
|
586
|
-
client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
|
|
587
|
-
|
|
588
|
-
system_message = get_system_message(npc) if npc else ""
|
|
589
|
-
|
|
590
|
-
messages_copy = messages.copy()
|
|
591
|
-
if messages_copy[0]["role"] != "system":
|
|
592
|
-
messages_copy.insert(0, {"role": "system", "content": system_message})
|
|
593
|
-
|
|
594
|
-
completion = client.chat.completions.create(
|
|
595
|
-
model=model,
|
|
596
|
-
messages=messages,
|
|
597
|
-
tools=tools,
|
|
598
|
-
stream=True,
|
|
599
|
-
**kwargs, # Include any additional keyword arguments
|
|
600
|
-
)
|
|
601
|
-
|
|
602
|
-
for response in completion:
|
|
603
|
-
yield response
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
def get_gemini_stream(
|
|
607
|
-
messages: List[Dict[str, str]],
|
|
608
|
-
model: str,
|
|
609
|
-
npc: Any = None,
|
|
610
|
-
tools: list = None,
|
|
611
|
-
api_key: str = None,
|
|
612
|
-
**kwargs,
|
|
613
|
-
) -> Generator:
|
|
614
|
-
"""Streams responses from Gemini, supporting tools and yielding chunks."""
|
|
615
|
-
import google.generativeai as genai
|
|
616
|
-
|
|
617
|
-
if api_key is None:
|
|
618
|
-
api_key = os.environ["GEMINI_API_KEY"]
|
|
619
|
-
|
|
620
|
-
# Configure the Gemini API
|
|
621
|
-
genai.configure(api_key=api_key)
|
|
622
|
-
|
|
623
|
-
# Create model instance
|
|
624
|
-
model = genai.GenerativeModel(model_name=model)
|
|
625
|
-
|
|
626
|
-
# Convert all messages to contents list
|
|
627
|
-
contents = []
|
|
628
|
-
for msg in messages:
|
|
629
|
-
if msg["role"] != "system":
|
|
630
|
-
contents.append(
|
|
631
|
-
{
|
|
632
|
-
"role": "user" if msg["role"] == "user" else "model",
|
|
633
|
-
"parts": [{"text": msg["content"]}],
|
|
634
|
-
}
|
|
635
|
-
)
|
|
636
|
-
|
|
637
|
-
try:
|
|
638
|
-
# Generate streaming response with full history
|
|
639
|
-
response = model.generate_content(
|
|
640
|
-
contents=contents, tools=tools if tools else None, stream=True, **kwargs
|
|
641
|
-
)
|
|
642
|
-
|
|
643
|
-
for chunk in response:
|
|
644
|
-
yield chunk
|
|
645
|
-
|
|
646
|
-
except Exception as e:
|
|
647
|
-
print(f"Error in Gemini stream: {str(e)}")
|
|
648
|
-
yield None
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
def process_gemini_tool_stream(
|
|
652
|
-
stream, tool_map: Dict[str, callable], tools: List[Dict]
|
|
653
|
-
) -> List[Dict]:
|
|
654
|
-
"""Process the Gemini tool stream and execute tools."""
|
|
655
|
-
tool_results = []
|
|
656
|
-
|
|
657
|
-
try:
|
|
658
|
-
for response in stream:
|
|
659
|
-
if not response.candidates:
|
|
660
|
-
continue
|
|
661
|
-
|
|
662
|
-
for candidate in response.candidates:
|
|
663
|
-
if not candidate.content or not candidate.content.parts:
|
|
664
|
-
continue
|
|
665
|
-
|
|
666
|
-
for part in candidate.content.parts:
|
|
667
|
-
if hasattr(part, "function_call"):
|
|
668
|
-
try:
|
|
669
|
-
tool_name = part.function_call.name
|
|
670
|
-
# Convert MapComposite to dict
|
|
671
|
-
tool_args = dict(part.function_call.args)
|
|
672
|
-
|
|
673
|
-
if tool_name in tool_map:
|
|
674
|
-
result = tool_map[tool_name](tool_args)
|
|
675
|
-
tool_results.append(
|
|
676
|
-
{
|
|
677
|
-
"tool_name": tool_name,
|
|
678
|
-
"tool_input": tool_args,
|
|
679
|
-
"tool_result": result,
|
|
680
|
-
}
|
|
681
|
-
)
|
|
682
|
-
except Exception as e:
|
|
683
|
-
tool_results.append(
|
|
684
|
-
{
|
|
685
|
-
"error": str(e),
|
|
686
|
-
"tool_name": (
|
|
687
|
-
tool_name
|
|
688
|
-
if "tool_name" in locals()
|
|
689
|
-
else "unknown"
|
|
690
|
-
),
|
|
691
|
-
"tool_input": (
|
|
692
|
-
tool_args if "tool_args" in locals() else None
|
|
693
|
-
),
|
|
694
|
-
}
|
|
695
|
-
)
|
|
696
|
-
|
|
697
|
-
except Exception as e:
|
|
698
|
-
tool_results.append({"error": f"Stream processing error: {str(e)}"})
|
|
217
|
+
if required is None:
|
|
218
|
+
required = []
|
|
699
219
|
|
|
700
|
-
return
|
|
220
|
+
return {
|
|
221
|
+
"type": "function",
|
|
222
|
+
"function": {
|
|
223
|
+
"name": name,
|
|
224
|
+
"description": description,
|
|
225
|
+
"parameters": {
|
|
226
|
+
"type": "object",
|
|
227
|
+
"properties": parameters,
|
|
228
|
+
"required": required,
|
|
229
|
+
"additionalProperties": False,
|
|
230
|
+
},
|
|
231
|
+
"strict": True,
|
|
232
|
+
},
|
|
233
|
+
}
|