npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +942 -0
- npcsh/alicanto.py +1074 -0
- npcsh/guac.py +785 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_npcsh.py +822 -0
- npcsh/mcp_server.py +184 -0
- npcsh/npc.py +218 -0
- npcsh/npcsh.py +1161 -0
- npcsh/plonk.py +387 -269
- npcsh/pti.py +234 -0
- npcsh/routes.py +958 -0
- npcsh/spool.py +315 -0
- npcsh/wander.py +550 -0
- npcsh/yap.py +573 -0
- npcsh-1.0.0.dist-info/METADATA +596 -0
- npcsh-1.0.0.dist-info/RECORD +21 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
- npcsh-1.0.0.dist-info/entry_points.txt +9 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
- npcsh/audio.py +0 -210
- npcsh/cli.py +0 -545
- npcsh/command_history.py +0 -566
- npcsh/conversation.py +0 -291
- npcsh/data_models.py +0 -46
- npcsh/dataframes.py +0 -163
- npcsh/embeddings.py +0 -168
- npcsh/helpers.py +0 -641
- npcsh/image.py +0 -298
- npcsh/image_gen.py +0 -79
- npcsh/knowledge_graph.py +0 -1006
- npcsh/llm_funcs.py +0 -2027
- npcsh/load_data.py +0 -83
- npcsh/main.py +0 -5
- npcsh/model_runner.py +0 -189
- npcsh/npc_compiler.py +0 -2870
- npcsh/npc_sysenv.py +0 -383
- npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcsh/npc_team/corca.npc +0 -13
- npcsh/npc_team/foreman.npc +0 -7
- npcsh/npc_team/npcsh.ctx +0 -11
- npcsh/npc_team/sibiji.npc +0 -4
- npcsh/npc_team/templates/analytics/celona.npc +0 -0
- npcsh/npc_team/templates/hr_support/raone.npc +0 -0
- npcsh/npc_team/templates/humanities/eriane.npc +0 -4
- npcsh/npc_team/templates/it_support/lineru.npc +0 -0
- npcsh/npc_team/templates/marketing/slean.npc +0 -4
- npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcsh/npc_team/templates/sales/turnic.npc +0 -4
- npcsh/npc_team/templates/software/welxor.npc +0 -0
- npcsh/npc_team/tools/bash_executer.tool +0 -32
- npcsh/npc_team/tools/calculator.tool +0 -8
- npcsh/npc_team/tools/code_executor.tool +0 -16
- npcsh/npc_team/tools/generic_search.tool +0 -27
- npcsh/npc_team/tools/image_generation.tool +0 -25
- npcsh/npc_team/tools/local_search.tool +0 -149
- npcsh/npc_team/tools/npcsh_executor.tool +0 -9
- npcsh/npc_team/tools/screen_cap.tool +0 -27
- npcsh/npc_team/tools/sql_executor.tool +0 -26
- npcsh/response.py +0 -623
- npcsh/search.py +0 -248
- npcsh/serve.py +0 -1460
- npcsh/shell.py +0 -538
- npcsh/shell_helpers.py +0 -3529
- npcsh/stream.py +0 -700
- npcsh/video.py +0 -49
- npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
- npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
- npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
- npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
- npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
- npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
- npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
- npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
- npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
- npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
- npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
- npcsh-0.3.31.dist-info/METADATA +0 -1853
- npcsh-0.3.31.dist-info/RECORD +0 -76
- npcsh-0.3.31.dist-info/entry_points.txt +0 -3
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/stream.py
DELETED
|
@@ -1,700 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
########
|
|
3
|
-
########
|
|
4
|
-
########
|
|
5
|
-
######## STREAM
|
|
6
|
-
########
|
|
7
|
-
########
|
|
8
|
-
|
|
9
|
-
from npcsh.npc_sysenv import get_system_message
|
|
10
|
-
from typing import Any, Dict, Generator, List
|
|
11
|
-
import os
|
|
12
|
-
import anthropic
|
|
13
|
-
from openai import OpenAI
|
|
14
|
-
from google import genai
|
|
15
|
-
|
|
16
|
-
from google.generativeai import types
|
|
17
|
-
import google.generativeai as genai
|
|
18
|
-
import base64
|
|
19
|
-
import json
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def get_anthropic_stream(
|
|
23
|
-
messages,
|
|
24
|
-
model: str,
|
|
25
|
-
npc: Any = None,
|
|
26
|
-
tools: list = None,
|
|
27
|
-
images: List[Dict[str, str]] = None,
|
|
28
|
-
api_key: str = None,
|
|
29
|
-
tool_choice: Dict = None,
|
|
30
|
-
**kwargs,
|
|
31
|
-
) -> Generator:
|
|
32
|
-
"""
|
|
33
|
-
Streams responses from Anthropic, supporting images, tools, and yielding raw text chunks.
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
messages: List of conversation messages
|
|
37
|
-
model: Anthropic model to use
|
|
38
|
-
npc: Optional NPC context
|
|
39
|
-
tools: Optional list of tools to provide to Claude
|
|
40
|
-
images: Optional list of images to include
|
|
41
|
-
api_key: Anthropic API key
|
|
42
|
-
tool_choice: Optional tool choice configuration
|
|
43
|
-
**kwargs: Additional arguments for the API call
|
|
44
|
-
"""
|
|
45
|
-
if api_key is None:
|
|
46
|
-
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
47
|
-
client = anthropic.Anthropic(api_key=api_key)
|
|
48
|
-
|
|
49
|
-
if messages[0]["role"] == "system":
|
|
50
|
-
system_message = messages[0]["content"]
|
|
51
|
-
messages = messages[1:]
|
|
52
|
-
elif npc is not None:
|
|
53
|
-
system_message = get_system_message(npc)
|
|
54
|
-
else:
|
|
55
|
-
system_message = "You are a helpful assistant."
|
|
56
|
-
|
|
57
|
-
# Preprocess messages to ensure content is a list of dicts
|
|
58
|
-
for message in messages:
|
|
59
|
-
if isinstance(message["content"], str):
|
|
60
|
-
message["content"] = [{"type": "text", "text": message["content"]}]
|
|
61
|
-
# Add images if provided
|
|
62
|
-
if images:
|
|
63
|
-
for img in images:
|
|
64
|
-
with open(img["file_path"], "rb") as image_file:
|
|
65
|
-
img["data"] = base64.b64encode(image_file.read()).decode("utf-8")
|
|
66
|
-
img["media_type"] = "image/jpeg"
|
|
67
|
-
messages[-1]["content"].append(
|
|
68
|
-
{
|
|
69
|
-
"type": "image",
|
|
70
|
-
"source": {
|
|
71
|
-
"type": "base64",
|
|
72
|
-
"media_type": img["media_type"],
|
|
73
|
-
"data": img["data"],
|
|
74
|
-
},
|
|
75
|
-
}
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
# Prepare API call parameters
|
|
79
|
-
api_params = {
|
|
80
|
-
"model": model,
|
|
81
|
-
"messages": messages,
|
|
82
|
-
"max_tokens": kwargs.get("max_tokens", 8192),
|
|
83
|
-
"stream": True,
|
|
84
|
-
"system": system_message,
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
# Add tools if provided
|
|
88
|
-
if tools:
|
|
89
|
-
api_params["tools"] = tools
|
|
90
|
-
|
|
91
|
-
# Add tool choice if specified
|
|
92
|
-
if tool_choice:
|
|
93
|
-
api_params["tool_choice"] = tool_choice
|
|
94
|
-
|
|
95
|
-
# Make the API call
|
|
96
|
-
response = client.messages.create(**api_params)
|
|
97
|
-
|
|
98
|
-
for chunk in response:
|
|
99
|
-
yield chunk
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def process_anthropic_tool_stream(
|
|
103
|
-
stream, tool_map: Dict[str, callable], messages: List[Dict] = None
|
|
104
|
-
) -> List[Dict]:
|
|
105
|
-
"""
|
|
106
|
-
Process the Anthropic tool use stream
|
|
107
|
-
"""
|
|
108
|
-
tool_results = []
|
|
109
|
-
current_tool = None
|
|
110
|
-
current_input = ""
|
|
111
|
-
context = messages[-1]["content"] if messages else ""
|
|
112
|
-
|
|
113
|
-
for chunk in stream:
|
|
114
|
-
# Look for tool use blocks
|
|
115
|
-
if (
|
|
116
|
-
chunk.type == "content_block_start"
|
|
117
|
-
and getattr(chunk, "content_block", None)
|
|
118
|
-
and chunk.content_block.type == "tool_use"
|
|
119
|
-
):
|
|
120
|
-
current_tool = {
|
|
121
|
-
"id": chunk.content_block.id,
|
|
122
|
-
"name": chunk.content_block.name,
|
|
123
|
-
}
|
|
124
|
-
current_input = ""
|
|
125
|
-
|
|
126
|
-
# Collect input JSON deltas
|
|
127
|
-
if chunk.type == "content_block_delta" and hasattr(chunk.delta, "partial_json"):
|
|
128
|
-
current_input += chunk.delta.partial_json
|
|
129
|
-
|
|
130
|
-
# When tool input is complete
|
|
131
|
-
if chunk.type == "content_block_stop" and current_tool:
|
|
132
|
-
try:
|
|
133
|
-
# Parse the complete input
|
|
134
|
-
tool_input = json.loads(current_input) if current_input.strip() else {}
|
|
135
|
-
|
|
136
|
-
# Add context to tool input
|
|
137
|
-
tool_input["context"] = context
|
|
138
|
-
|
|
139
|
-
# Execute the tool
|
|
140
|
-
tool_func = tool_map.get(current_tool["name"])
|
|
141
|
-
if tool_func:
|
|
142
|
-
result = tool_func(tool_input)
|
|
143
|
-
tool_results.append(
|
|
144
|
-
{
|
|
145
|
-
"tool_name": current_tool["name"],
|
|
146
|
-
"tool_input": tool_input,
|
|
147
|
-
"tool_result": result,
|
|
148
|
-
}
|
|
149
|
-
)
|
|
150
|
-
else:
|
|
151
|
-
tool_results.append(
|
|
152
|
-
{
|
|
153
|
-
"tool_name": current_tool["name"],
|
|
154
|
-
"tool_input": tool_input,
|
|
155
|
-
"error": f"Tool {current_tool['name']} not found",
|
|
156
|
-
}
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
except Exception as e:
|
|
160
|
-
tool_results.append(
|
|
161
|
-
{
|
|
162
|
-
"tool_name": current_tool["name"],
|
|
163
|
-
"tool_input": current_input,
|
|
164
|
-
"error": str(e),
|
|
165
|
-
}
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
# Reset current tool
|
|
169
|
-
current_tool = None
|
|
170
|
-
current_input = ""
|
|
171
|
-
|
|
172
|
-
return tool_results
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
from typing import List, Dict, Any, Literal
|
|
176
|
-
|
|
177
|
-
ProviderType = Literal[
|
|
178
|
-
"openai", "anthropic", "ollama", "gemini", "deepseek", "openai-like"
|
|
179
|
-
]
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
def generate_tool_schema(
|
|
183
|
-
name: str,
|
|
184
|
-
description: str,
|
|
185
|
-
parameters: Dict[str, Any],
|
|
186
|
-
provider: ProviderType,
|
|
187
|
-
required: List[str] = None,
|
|
188
|
-
) -> Dict[str, Any]:
|
|
189
|
-
"""
|
|
190
|
-
Generate provider-specific function/tool schema from common parameters
|
|
191
|
-
|
|
192
|
-
Args:
|
|
193
|
-
name: Name of the function
|
|
194
|
-
description: Description of what the function does
|
|
195
|
-
parameters: Dict of parameter names and their properties
|
|
196
|
-
provider: Which provider to generate schema for
|
|
197
|
-
required: List of required parameter names
|
|
198
|
-
"""
|
|
199
|
-
if required is None:
|
|
200
|
-
required = []
|
|
201
|
-
|
|
202
|
-
if provider == "openai":
|
|
203
|
-
return {
|
|
204
|
-
"type": "function",
|
|
205
|
-
"function": {
|
|
206
|
-
"name": name,
|
|
207
|
-
"description": description,
|
|
208
|
-
"parameters": {
|
|
209
|
-
"type": "object",
|
|
210
|
-
"properties": parameters,
|
|
211
|
-
"required": required,
|
|
212
|
-
"additionalProperties": False,
|
|
213
|
-
},
|
|
214
|
-
"strict": True,
|
|
215
|
-
},
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
elif provider == "anthropic":
|
|
219
|
-
return {
|
|
220
|
-
"name": name,
|
|
221
|
-
"description": description,
|
|
222
|
-
"parameters": {
|
|
223
|
-
"type": "object",
|
|
224
|
-
"properties": parameters,
|
|
225
|
-
"required": required,
|
|
226
|
-
},
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
elif provider == "ollama":
|
|
230
|
-
return {
|
|
231
|
-
"type": "function",
|
|
232
|
-
"function": {
|
|
233
|
-
"name": name,
|
|
234
|
-
"description": description,
|
|
235
|
-
"parameters": {
|
|
236
|
-
"type": "object",
|
|
237
|
-
"properties": parameters,
|
|
238
|
-
"required": required,
|
|
239
|
-
},
|
|
240
|
-
},
|
|
241
|
-
}
|
|
242
|
-
elif provider == "gemini":
|
|
243
|
-
# Convert our generic tool schema to a Gemini function declaration
|
|
244
|
-
function = {
|
|
245
|
-
"name": name,
|
|
246
|
-
"description": description,
|
|
247
|
-
"parameters": {
|
|
248
|
-
"type": "OBJECT",
|
|
249
|
-
"properties": {
|
|
250
|
-
k: {
|
|
251
|
-
"type": v.get("type", "STRING").upper(),
|
|
252
|
-
"description": v.get("description", ""),
|
|
253
|
-
"enum": v.get("enum", None),
|
|
254
|
-
}
|
|
255
|
-
for k, v in parameters.items()
|
|
256
|
-
},
|
|
257
|
-
"required": required or [],
|
|
258
|
-
},
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
# Create a Tool object as shown in the example
|
|
262
|
-
return types.Tool(function_declarations=[function])
|
|
263
|
-
|
|
264
|
-
raise ValueError(f"Unknown provider: {provider}")
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
def get_ollama_stream(
|
|
268
|
-
messages: List[Dict[str, str]],
|
|
269
|
-
model: str,
|
|
270
|
-
npc: Any = None,
|
|
271
|
-
tools: list = None,
|
|
272
|
-
images: list = None,
|
|
273
|
-
tool_choice: Dict = None,
|
|
274
|
-
**kwargs,
|
|
275
|
-
) -> Generator:
|
|
276
|
-
"""Streams responses from Ollama, supporting images and tools."""
|
|
277
|
-
import ollama
|
|
278
|
-
|
|
279
|
-
messages_copy = messages.copy()
|
|
280
|
-
|
|
281
|
-
# Handle images if provided
|
|
282
|
-
if images:
|
|
283
|
-
messages[-1]["images"] = [image["file_path"] for image in images]
|
|
284
|
-
|
|
285
|
-
# Add system message if not present
|
|
286
|
-
if messages_copy[0]["role"] != "system":
|
|
287
|
-
if npc is not None:
|
|
288
|
-
system_message = get_system_message(npc)
|
|
289
|
-
messages_copy.insert(0, {"role": "system", "content": system_message})
|
|
290
|
-
|
|
291
|
-
# Prepare API call parameters
|
|
292
|
-
api_params = {
|
|
293
|
-
"model": model,
|
|
294
|
-
"messages": messages_copy,
|
|
295
|
-
"stream": True,
|
|
296
|
-
}
|
|
297
|
-
|
|
298
|
-
# Add tools if provided
|
|
299
|
-
if tools:
|
|
300
|
-
api_params["tools"] = tools
|
|
301
|
-
|
|
302
|
-
# Make the API call
|
|
303
|
-
for chunk in ollama.chat(**api_params):
|
|
304
|
-
yield chunk
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
def process_ollama_tool_stream(
|
|
308
|
-
stream, tool_map: Dict[str, callable], tools: List[Dict]
|
|
309
|
-
) -> List[Dict]:
|
|
310
|
-
"""Process the Ollama tool use stream"""
|
|
311
|
-
tool_results = []
|
|
312
|
-
content = ""
|
|
313
|
-
|
|
314
|
-
# Build tool schema map
|
|
315
|
-
tool_schemas = {
|
|
316
|
-
tool["function"]["name"]: tool["function"]["parameters"] for tool in tools
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
def convert_params(tool_name: str, params: dict) -> dict:
|
|
320
|
-
"""Convert parameters to the correct type based on schema"""
|
|
321
|
-
schema = tool_schemas.get(tool_name, {})
|
|
322
|
-
properties = schema.get("properties", {})
|
|
323
|
-
|
|
324
|
-
converted = {}
|
|
325
|
-
for key, value in params.items():
|
|
326
|
-
prop_schema = properties.get(key, {})
|
|
327
|
-
prop_type = prop_schema.get("type")
|
|
328
|
-
|
|
329
|
-
if prop_type == "integer" and isinstance(value, str):
|
|
330
|
-
try:
|
|
331
|
-
converted[key] = int(value)
|
|
332
|
-
except (ValueError, TypeError):
|
|
333
|
-
converted[key] = 0
|
|
334
|
-
else:
|
|
335
|
-
converted[key] = value
|
|
336
|
-
|
|
337
|
-
return converted
|
|
338
|
-
|
|
339
|
-
# Accumulate content
|
|
340
|
-
for chunk in stream:
|
|
341
|
-
if chunk.message and chunk.message.content:
|
|
342
|
-
content += chunk.message.content
|
|
343
|
-
|
|
344
|
-
# Process complete JSON objects when done
|
|
345
|
-
try:
|
|
346
|
-
# Find all JSON objects in the content
|
|
347
|
-
json_objects = []
|
|
348
|
-
current = ""
|
|
349
|
-
for char in content:
|
|
350
|
-
current += char
|
|
351
|
-
if current.count("{") == current.count("}") and current.strip().startswith(
|
|
352
|
-
"{"
|
|
353
|
-
):
|
|
354
|
-
json_objects.append(current.strip())
|
|
355
|
-
current = ""
|
|
356
|
-
|
|
357
|
-
# Process each JSON object
|
|
358
|
-
for json_str in json_objects:
|
|
359
|
-
try:
|
|
360
|
-
tool_call = json.loads(json_str)
|
|
361
|
-
tool_name = tool_call.get("name")
|
|
362
|
-
tool_params = tool_call.get("parameters", {})
|
|
363
|
-
|
|
364
|
-
if tool_name in tool_map:
|
|
365
|
-
# Convert parameters to correct types
|
|
366
|
-
converted_params = convert_params(tool_name, tool_params)
|
|
367
|
-
result = tool_map[tool_name](converted_params)
|
|
368
|
-
tool_results.append(
|
|
369
|
-
{
|
|
370
|
-
"tool_name": tool_name,
|
|
371
|
-
"tool_input": converted_params,
|
|
372
|
-
"tool_result": result,
|
|
373
|
-
}
|
|
374
|
-
)
|
|
375
|
-
except Exception as e:
|
|
376
|
-
tool_results.append({"error": str(e), "partial_json": json_str})
|
|
377
|
-
|
|
378
|
-
except Exception as e:
|
|
379
|
-
tool_results.append({"error": str(e), "content": content})
|
|
380
|
-
|
|
381
|
-
return tool_results
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
def get_openai_stream(
|
|
385
|
-
messages: List[Dict[str, str]],
|
|
386
|
-
model: str,
|
|
387
|
-
npc: Any = None,
|
|
388
|
-
tools: list = None,
|
|
389
|
-
images: List[Dict[str, str]] = None,
|
|
390
|
-
api_key: str = None,
|
|
391
|
-
tool_choice: Dict = None,
|
|
392
|
-
**kwargs,
|
|
393
|
-
) -> Generator:
|
|
394
|
-
"""Streams responses from OpenAI, supporting images, tools and yielding raw text chunks."""
|
|
395
|
-
|
|
396
|
-
if api_key is None:
|
|
397
|
-
api_key = os.environ["OPENAI_API_KEY"]
|
|
398
|
-
client = OpenAI(api_key=api_key)
|
|
399
|
-
|
|
400
|
-
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
401
|
-
|
|
402
|
-
if not messages:
|
|
403
|
-
messages = [{"role": "system", "content": system_message}]
|
|
404
|
-
|
|
405
|
-
# Add images if provided
|
|
406
|
-
if images:
|
|
407
|
-
last_user_message = (
|
|
408
|
-
messages[-1]
|
|
409
|
-
if messages and messages[-1]["role"] == "user"
|
|
410
|
-
else {"role": "user", "content": []}
|
|
411
|
-
)
|
|
412
|
-
|
|
413
|
-
if isinstance(last_user_message["content"], str):
|
|
414
|
-
last_user_message["content"] = [
|
|
415
|
-
{"type": "text", "text": last_user_message["content"]}
|
|
416
|
-
]
|
|
417
|
-
|
|
418
|
-
for image in images:
|
|
419
|
-
with open(image["file_path"], "rb") as image_file:
|
|
420
|
-
image_data = base64.b64encode(image_file.read()).decode("utf-8")
|
|
421
|
-
last_user_message["content"].append(
|
|
422
|
-
{
|
|
423
|
-
"type": "image_url",
|
|
424
|
-
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
|
425
|
-
}
|
|
426
|
-
)
|
|
427
|
-
|
|
428
|
-
if last_user_message not in messages:
|
|
429
|
-
messages.append(last_user_message)
|
|
430
|
-
|
|
431
|
-
# Prepare API call parameters
|
|
432
|
-
api_params = {"model": model, "messages": messages, "stream": True, **kwargs}
|
|
433
|
-
|
|
434
|
-
# Add tools if provided
|
|
435
|
-
if tools:
|
|
436
|
-
api_params["tools"] = tools
|
|
437
|
-
|
|
438
|
-
# Add tool choice if specified
|
|
439
|
-
if tool_choice:
|
|
440
|
-
api_params["tool_choice"] = tool_choice
|
|
441
|
-
|
|
442
|
-
stream = client.chat.completions.create(**api_params)
|
|
443
|
-
|
|
444
|
-
for chunk in stream:
|
|
445
|
-
yield chunk
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
def process_openai_tool_stream(stream, tool_map: Dict[str, callable]) -> List[Dict]:
|
|
449
|
-
"""
|
|
450
|
-
Process the OpenAI tool use stream
|
|
451
|
-
"""
|
|
452
|
-
final_tool_calls = {}
|
|
453
|
-
tool_results = []
|
|
454
|
-
|
|
455
|
-
for chunk in stream:
|
|
456
|
-
delta = chunk.choices[0].delta
|
|
457
|
-
|
|
458
|
-
# Process tool calls if present
|
|
459
|
-
if delta.tool_calls:
|
|
460
|
-
for tool_call in delta.tool_calls:
|
|
461
|
-
index = tool_call.index
|
|
462
|
-
|
|
463
|
-
# Initialize tool call if new
|
|
464
|
-
if index not in final_tool_calls:
|
|
465
|
-
final_tool_calls[index] = {
|
|
466
|
-
"id": tool_call.id,
|
|
467
|
-
"name": tool_call.function.name if tool_call.function else None,
|
|
468
|
-
"arguments": (
|
|
469
|
-
tool_call.function.arguments if tool_call.function else ""
|
|
470
|
-
),
|
|
471
|
-
}
|
|
472
|
-
# Append arguments if continuing
|
|
473
|
-
elif tool_call.function and tool_call.function.arguments:
|
|
474
|
-
final_tool_calls[index]["arguments"] += tool_call.function.arguments
|
|
475
|
-
|
|
476
|
-
# Process all complete tool calls
|
|
477
|
-
for tool_call in final_tool_calls.values():
|
|
478
|
-
try:
|
|
479
|
-
# Parse the arguments
|
|
480
|
-
tool_input = (
|
|
481
|
-
json.loads(tool_call["arguments"])
|
|
482
|
-
if tool_call["arguments"].strip()
|
|
483
|
-
else {}
|
|
484
|
-
)
|
|
485
|
-
|
|
486
|
-
# Execute the tool
|
|
487
|
-
tool_func = tool_map.get(tool_call["name"])
|
|
488
|
-
if tool_func:
|
|
489
|
-
result = tool_func(tool_input)
|
|
490
|
-
tool_results.append(
|
|
491
|
-
{
|
|
492
|
-
"tool_name": tool_call["name"],
|
|
493
|
-
"tool_input": tool_input,
|
|
494
|
-
"tool_result": result,
|
|
495
|
-
}
|
|
496
|
-
)
|
|
497
|
-
else:
|
|
498
|
-
tool_results.append(
|
|
499
|
-
{
|
|
500
|
-
"tool_name": tool_call["name"],
|
|
501
|
-
"tool_input": tool_input,
|
|
502
|
-
"error": f"Tool {tool_call['name']} not found",
|
|
503
|
-
}
|
|
504
|
-
)
|
|
505
|
-
|
|
506
|
-
except Exception as e:
|
|
507
|
-
tool_results.append(
|
|
508
|
-
{
|
|
509
|
-
"tool_name": tool_call["name"],
|
|
510
|
-
"tool_input": tool_call["arguments"],
|
|
511
|
-
"error": str(e),
|
|
512
|
-
}
|
|
513
|
-
)
|
|
514
|
-
|
|
515
|
-
return tool_results
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
def get_openai_like_stream(
|
|
519
|
-
messages: List[Dict[str, str]],
|
|
520
|
-
model: str,
|
|
521
|
-
api_url: str,
|
|
522
|
-
npc: Any = None,
|
|
523
|
-
images: List[Dict[str, str]] = None,
|
|
524
|
-
tools: list = None,
|
|
525
|
-
api_key: str = None,
|
|
526
|
-
**kwargs,
|
|
527
|
-
) -> List[Dict[str, str]]:
|
|
528
|
-
"""
|
|
529
|
-
Function Description:
|
|
530
|
-
This function generates a conversation using the OpenAI API.
|
|
531
|
-
Args:
|
|
532
|
-
messages (List[Dict[str, str]]): The list of messages in the conversation.
|
|
533
|
-
model (str): The model to use for the conversation.
|
|
534
|
-
Keyword Args:
|
|
535
|
-
npc (Any): The NPC object.
|
|
536
|
-
api_key (str): The API key for accessing the OpenAI API.
|
|
537
|
-
Returns:
|
|
538
|
-
List[Dict[str, str]]: The list of messages in the conversation.
|
|
539
|
-
"""
|
|
540
|
-
if api_key is None:
|
|
541
|
-
api_key = "dummy"
|
|
542
|
-
client = OpenAI(api_key=api_key, base_url=api_url)
|
|
543
|
-
|
|
544
|
-
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
|
545
|
-
|
|
546
|
-
if messages is None:
|
|
547
|
-
messages = []
|
|
548
|
-
|
|
549
|
-
# Ensure the system message is at the beginning
|
|
550
|
-
if not any(msg["role"] == "system" for msg in messages):
|
|
551
|
-
messages.insert(0, {"role": "system", "content": system_message})
|
|
552
|
-
|
|
553
|
-
# messages should already include the user's latest message
|
|
554
|
-
|
|
555
|
-
# Make the API call with the messages including the latest user input
|
|
556
|
-
completion = client.chat.completions.create(
|
|
557
|
-
model=model, messages=messages, stream=True, **kwargs
|
|
558
|
-
)
|
|
559
|
-
|
|
560
|
-
for chunk in completion:
|
|
561
|
-
yield chunk
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
def get_deepseek_stream(
|
|
565
|
-
messages: List[Dict[str, str]],
|
|
566
|
-
model: str,
|
|
567
|
-
npc: Any = None,
|
|
568
|
-
tools: list = None,
|
|
569
|
-
api_key: str = None,
|
|
570
|
-
**kwargs,
|
|
571
|
-
) -> List[Dict[str, str]]:
|
|
572
|
-
"""
|
|
573
|
-
Function Description:
|
|
574
|
-
This function generates a conversation using the Deepseek API.
|
|
575
|
-
Args:
|
|
576
|
-
messages (List[Dict[str, str]]): The list of messages in the conversation.
|
|
577
|
-
model (str): The model to use for the conversation.
|
|
578
|
-
Keyword Args:
|
|
579
|
-
npc (Any): The NPC object.
|
|
580
|
-
api_key (str): The API key for accessing the Deepseek API.
|
|
581
|
-
Returns:
|
|
582
|
-
List[Dict[str, str]]: The list of messages in the conversation.
|
|
583
|
-
"""
|
|
584
|
-
if api_key is None:
|
|
585
|
-
api_key = os.environ["DEEPSEEK_API_KEY"]
|
|
586
|
-
client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
|
|
587
|
-
|
|
588
|
-
system_message = get_system_message(npc) if npc else ""
|
|
589
|
-
|
|
590
|
-
messages_copy = messages.copy()
|
|
591
|
-
if messages_copy[0]["role"] != "system":
|
|
592
|
-
messages_copy.insert(0, {"role": "system", "content": system_message})
|
|
593
|
-
|
|
594
|
-
completion = client.chat.completions.create(
|
|
595
|
-
model=model,
|
|
596
|
-
messages=messages,
|
|
597
|
-
tools=tools,
|
|
598
|
-
stream=True,
|
|
599
|
-
**kwargs, # Include any additional keyword arguments
|
|
600
|
-
)
|
|
601
|
-
|
|
602
|
-
for response in completion:
|
|
603
|
-
yield response
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
def get_gemini_stream(
|
|
607
|
-
messages: List[Dict[str, str]],
|
|
608
|
-
model: str,
|
|
609
|
-
npc: Any = None,
|
|
610
|
-
tools: list = None,
|
|
611
|
-
api_key: str = None,
|
|
612
|
-
**kwargs,
|
|
613
|
-
) -> Generator:
|
|
614
|
-
"""Streams responses from Gemini, supporting tools and yielding chunks."""
|
|
615
|
-
import google.generativeai as genai
|
|
616
|
-
|
|
617
|
-
if api_key is None:
|
|
618
|
-
api_key = os.environ["GEMINI_API_KEY"]
|
|
619
|
-
|
|
620
|
-
# Configure the Gemini API
|
|
621
|
-
genai.configure(api_key=api_key)
|
|
622
|
-
|
|
623
|
-
# Create model instance
|
|
624
|
-
model = genai.GenerativeModel(model_name=model)
|
|
625
|
-
|
|
626
|
-
# Convert all messages to contents list
|
|
627
|
-
contents = []
|
|
628
|
-
for msg in messages:
|
|
629
|
-
if msg["role"] != "system":
|
|
630
|
-
contents.append(
|
|
631
|
-
{
|
|
632
|
-
"role": "user" if msg["role"] == "user" else "model",
|
|
633
|
-
"parts": [{"text": msg["content"]}],
|
|
634
|
-
}
|
|
635
|
-
)
|
|
636
|
-
|
|
637
|
-
try:
|
|
638
|
-
# Generate streaming response with full history
|
|
639
|
-
response = model.generate_content(
|
|
640
|
-
contents=contents, tools=tools if tools else None, stream=True, **kwargs
|
|
641
|
-
)
|
|
642
|
-
|
|
643
|
-
for chunk in response:
|
|
644
|
-
yield chunk
|
|
645
|
-
|
|
646
|
-
except Exception as e:
|
|
647
|
-
print(f"Error in Gemini stream: {str(e)}")
|
|
648
|
-
yield None
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
def process_gemini_tool_stream(
|
|
652
|
-
stream, tool_map: Dict[str, callable], tools: List[Dict]
|
|
653
|
-
) -> List[Dict]:
|
|
654
|
-
"""Process the Gemini tool stream and execute tools."""
|
|
655
|
-
tool_results = []
|
|
656
|
-
|
|
657
|
-
try:
|
|
658
|
-
for response in stream:
|
|
659
|
-
if not response.candidates:
|
|
660
|
-
continue
|
|
661
|
-
|
|
662
|
-
for candidate in response.candidates:
|
|
663
|
-
if not candidate.content or not candidate.content.parts:
|
|
664
|
-
continue
|
|
665
|
-
|
|
666
|
-
for part in candidate.content.parts:
|
|
667
|
-
if hasattr(part, "function_call"):
|
|
668
|
-
try:
|
|
669
|
-
tool_name = part.function_call.name
|
|
670
|
-
# Convert MapComposite to dict
|
|
671
|
-
tool_args = dict(part.function_call.args)
|
|
672
|
-
|
|
673
|
-
if tool_name in tool_map:
|
|
674
|
-
result = tool_map[tool_name](tool_args)
|
|
675
|
-
tool_results.append(
|
|
676
|
-
{
|
|
677
|
-
"tool_name": tool_name,
|
|
678
|
-
"tool_input": tool_args,
|
|
679
|
-
"tool_result": result,
|
|
680
|
-
}
|
|
681
|
-
)
|
|
682
|
-
except Exception as e:
|
|
683
|
-
tool_results.append(
|
|
684
|
-
{
|
|
685
|
-
"error": str(e),
|
|
686
|
-
"tool_name": (
|
|
687
|
-
tool_name
|
|
688
|
-
if "tool_name" in locals()
|
|
689
|
-
else "unknown"
|
|
690
|
-
),
|
|
691
|
-
"tool_input": (
|
|
692
|
-
tool_args if "tool_args" in locals() else None
|
|
693
|
-
),
|
|
694
|
-
}
|
|
695
|
-
)
|
|
696
|
-
|
|
697
|
-
except Exception as e:
|
|
698
|
-
tool_results.append({"error": f"Stream processing error: {str(e)}"})
|
|
699
|
-
|
|
700
|
-
return tool_results
|