npcpy 1.3.4__py3-none-any.whl → 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/build_funcs.py +288 -0
- npcpy/data/load.py +1 -1
- npcpy/data/web.py +5 -4
- npcpy/gen/image_gen.py +2 -1
- npcpy/gen/response.py +119 -66
- npcpy/gen/world_gen.py +609 -0
- npcpy/llm_funcs.py +177 -271
- npcpy/memory/command_history.py +107 -2
- npcpy/memory/knowledge_graph.py +1 -1
- npcpy/npc_compiler.py +176 -32
- npcpy/npc_sysenv.py +5 -5
- npcpy/serve.py +311 -2
- npcpy/sql/npcsql.py +272 -59
- npcpy/work/browser.py +30 -0
- {npcpy-1.3.4.dist-info → npcpy-1.3.6.dist-info}/METADATA +1 -1
- {npcpy-1.3.4.dist-info → npcpy-1.3.6.dist-info}/RECORD +19 -16
- {npcpy-1.3.4.dist-info → npcpy-1.3.6.dist-info}/WHEEL +0 -0
- {npcpy-1.3.4.dist-info → npcpy-1.3.6.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.3.4.dist-info → npcpy-1.3.6.dist-info}/top_level.txt +0 -0
npcpy/build_funcs.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Build functions for NPC team deployment artifacts.
|
|
3
|
+
"""
|
|
4
|
+
import os
|
|
5
|
+
import yaml
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_team_name(team_path):
|
|
10
|
+
"""Get team name from ctx file or folder name."""
|
|
11
|
+
team_path = Path(team_path)
|
|
12
|
+
for ctx_file in team_path.glob("*.ctx"):
|
|
13
|
+
try:
|
|
14
|
+
with open(ctx_file, 'r') as f:
|
|
15
|
+
ctx = yaml.safe_load(f)
|
|
16
|
+
if ctx and ctx.get('name'):
|
|
17
|
+
return ctx['name']
|
|
18
|
+
except:
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
name = team_path.name
|
|
22
|
+
if name == 'npc_team':
|
|
23
|
+
name = team_path.parent.name
|
|
24
|
+
return name
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def build_dockerfile(config, **kwargs):
|
|
28
|
+
"""Generate a Dockerfile for serving an NPC team."""
|
|
29
|
+
team_path = config.get('team_path', './npc_team')
|
|
30
|
+
port = config.get('port', 5337)
|
|
31
|
+
team_name = get_team_name(team_path)
|
|
32
|
+
|
|
33
|
+
dockerfile = f'''FROM python:3.11-slim
|
|
34
|
+
|
|
35
|
+
WORKDIR /app
|
|
36
|
+
|
|
37
|
+
# Install npcsh
|
|
38
|
+
RUN pip install --no-cache-dir npcsh
|
|
39
|
+
|
|
40
|
+
# Copy the NPC team
|
|
41
|
+
COPY {os.path.basename(team_path)} /app/npc_team
|
|
42
|
+
|
|
43
|
+
# Expose the port
|
|
44
|
+
EXPOSE {port}
|
|
45
|
+
|
|
46
|
+
# Set environment variables (override at runtime)
|
|
47
|
+
ENV NPCSH_CHAT_MODEL=gpt-4o-mini
|
|
48
|
+
ENV NPCSH_CHAT_PROVIDER=openai
|
|
49
|
+
ENV OPENAI_API_KEY=""
|
|
50
|
+
ENV ANTHROPIC_API_KEY=""
|
|
51
|
+
|
|
52
|
+
# Run the serve command
|
|
53
|
+
CMD ["npc", "serve", "--port", "{port}"]
|
|
54
|
+
'''
|
|
55
|
+
return dockerfile
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def build_docker_compose(config, **kwargs):
|
|
59
|
+
"""Generate Docker Compose setup for NPC team deployment."""
|
|
60
|
+
team_path = config.get('team_path', './npc_team')
|
|
61
|
+
output_dir = config.get('output_dir', './build')
|
|
62
|
+
port = config.get('port', 5337)
|
|
63
|
+
cors_origins = config.get('cors_origins', None)
|
|
64
|
+
|
|
65
|
+
team_name = get_team_name(team_path)
|
|
66
|
+
safe_name = team_name.lower().replace(' ', '_').replace('-', '_')
|
|
67
|
+
|
|
68
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
69
|
+
|
|
70
|
+
# Generate Dockerfile
|
|
71
|
+
dockerfile_content = build_dockerfile(config)
|
|
72
|
+
dockerfile_path = os.path.join(output_dir, 'Dockerfile')
|
|
73
|
+
with open(dockerfile_path, 'w') as f:
|
|
74
|
+
f.write(dockerfile_content)
|
|
75
|
+
|
|
76
|
+
# Generate docker-compose.yml
|
|
77
|
+
cors_env = f'\n - CORS_ORIGINS={",".join(cors_origins)}' if cors_origins else ''
|
|
78
|
+
|
|
79
|
+
compose = f'''version: '3.8'
|
|
80
|
+
|
|
81
|
+
services:
|
|
82
|
+
{safe_name}:
|
|
83
|
+
build: .
|
|
84
|
+
ports:
|
|
85
|
+
- "{port}:{port}"
|
|
86
|
+
environment:
|
|
87
|
+
- NPCSH_CHAT_MODEL=${{NPCSH_CHAT_MODEL:-gpt-4o-mini}}
|
|
88
|
+
- NPCSH_CHAT_PROVIDER=${{NPCSH_CHAT_PROVIDER:-openai}}
|
|
89
|
+
- OPENAI_API_KEY=${{OPENAI_API_KEY}}
|
|
90
|
+
- ANTHROPIC_API_KEY=${{ANTHROPIC_API_KEY}}
|
|
91
|
+
- GEMINI_API_KEY=${{GEMINI_API_KEY}}{cors_env}
|
|
92
|
+
volumes:
|
|
93
|
+
- ./{os.path.basename(team_path)}:/app/npc_team
|
|
94
|
+
restart: unless-stopped
|
|
95
|
+
'''
|
|
96
|
+
|
|
97
|
+
compose_path = os.path.join(output_dir, 'docker-compose.yml')
|
|
98
|
+
with open(compose_path, 'w') as f:
|
|
99
|
+
f.write(compose)
|
|
100
|
+
|
|
101
|
+
# Generate .env.example
|
|
102
|
+
env_example = '''# NPC Team Environment Variables
|
|
103
|
+
NPCSH_CHAT_MODEL=gpt-4o-mini
|
|
104
|
+
NPCSH_CHAT_PROVIDER=openai
|
|
105
|
+
|
|
106
|
+
# API Keys (set at least one)
|
|
107
|
+
OPENAI_API_KEY=
|
|
108
|
+
ANTHROPIC_API_KEY=
|
|
109
|
+
GEMINI_API_KEY=
|
|
110
|
+
'''
|
|
111
|
+
|
|
112
|
+
env_path = os.path.join(output_dir, '.env.example')
|
|
113
|
+
with open(env_path, 'w') as f:
|
|
114
|
+
f.write(env_example)
|
|
115
|
+
|
|
116
|
+
# Copy npc_team to output
|
|
117
|
+
import shutil
|
|
118
|
+
dest_team = os.path.join(output_dir, os.path.basename(team_path))
|
|
119
|
+
if os.path.exists(dest_team):
|
|
120
|
+
shutil.rmtree(dest_team)
|
|
121
|
+
shutil.copytree(team_path, dest_team)
|
|
122
|
+
|
|
123
|
+
return {
|
|
124
|
+
'output': f'''Docker deployment created in {output_dir}/
|
|
125
|
+
|
|
126
|
+
Files generated:
|
|
127
|
+
- Dockerfile
|
|
128
|
+
- docker-compose.yml
|
|
129
|
+
- .env.example
|
|
130
|
+
- {os.path.basename(team_path)}/
|
|
131
|
+
|
|
132
|
+
To deploy:
|
|
133
|
+
cd {output_dir}
|
|
134
|
+
cp .env.example .env
|
|
135
|
+
# Edit .env with your API keys
|
|
136
|
+
docker-compose up -d
|
|
137
|
+
|
|
138
|
+
API will be available at http://localhost:{port}
|
|
139
|
+
''',
|
|
140
|
+
'messages': kwargs.get('messages', [])
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def build_flask_server(config, **kwargs):
|
|
145
|
+
"""Generate a standalone Flask server script."""
|
|
146
|
+
team_path = config.get('team_path', './npc_team')
|
|
147
|
+
output_dir = config.get('output_dir', './build')
|
|
148
|
+
port = config.get('port', 5337)
|
|
149
|
+
cors_origins = config.get('cors_origins', None)
|
|
150
|
+
|
|
151
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
152
|
+
|
|
153
|
+
cors_line = f"cors_origins={cors_origins}" if cors_origins else "cors_origins=None"
|
|
154
|
+
|
|
155
|
+
server_script = f'''#!/usr/bin/env python3
|
|
156
|
+
"""
|
|
157
|
+
Auto-generated NPC Team server.
|
|
158
|
+
Run with: python server.py
|
|
159
|
+
"""
|
|
160
|
+
from npcpy.serve import start_flask_server
|
|
161
|
+
|
|
162
|
+
if __name__ == "__main__":
|
|
163
|
+
start_flask_server(
|
|
164
|
+
port={port},
|
|
165
|
+
{cors_line},
|
|
166
|
+
)
|
|
167
|
+
'''
|
|
168
|
+
|
|
169
|
+
script_path = os.path.join(output_dir, 'server.py')
|
|
170
|
+
with open(script_path, 'w') as f:
|
|
171
|
+
f.write(server_script)
|
|
172
|
+
os.chmod(script_path, 0o755)
|
|
173
|
+
|
|
174
|
+
# Copy npc_team
|
|
175
|
+
import shutil
|
|
176
|
+
dest_team = os.path.join(output_dir, os.path.basename(team_path))
|
|
177
|
+
if os.path.exists(dest_team):
|
|
178
|
+
shutil.rmtree(dest_team)
|
|
179
|
+
shutil.copytree(team_path, dest_team)
|
|
180
|
+
|
|
181
|
+
return {
|
|
182
|
+
'output': f'''Flask server created in {output_dir}/
|
|
183
|
+
|
|
184
|
+
Files generated:
|
|
185
|
+
- server.py
|
|
186
|
+
- {os.path.basename(team_path)}/
|
|
187
|
+
|
|
188
|
+
To run:
|
|
189
|
+
cd {output_dir}
|
|
190
|
+
pip install npcsh
|
|
191
|
+
python server.py
|
|
192
|
+
|
|
193
|
+
API will be available at http://localhost:{port}
|
|
194
|
+
''',
|
|
195
|
+
'messages': kwargs.get('messages', [])
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def build_cli_executable(config, **kwargs):
|
|
200
|
+
"""Generate CLI wrapper scripts for team NPCs."""
|
|
201
|
+
team_path = config.get('team_path', './npc_team')
|
|
202
|
+
output_dir = config.get('output_dir', './build')
|
|
203
|
+
|
|
204
|
+
team_path = Path(team_path)
|
|
205
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
206
|
+
|
|
207
|
+
npc_files = list(team_path.glob("*.npc"))
|
|
208
|
+
scripts = []
|
|
209
|
+
|
|
210
|
+
for npc_file in npc_files:
|
|
211
|
+
name = npc_file.stem
|
|
212
|
+
script = f'''#!/usr/bin/env python3
|
|
213
|
+
"""CLI wrapper for {name} NPC."""
|
|
214
|
+
import sys
|
|
215
|
+
from npcsh.npcsh import main
|
|
216
|
+
sys.argv[0] = "{name}"
|
|
217
|
+
main()
|
|
218
|
+
'''
|
|
219
|
+
script_path = os.path.join(output_dir, name)
|
|
220
|
+
with open(script_path, 'w') as f:
|
|
221
|
+
f.write(script)
|
|
222
|
+
os.chmod(script_path, 0o755)
|
|
223
|
+
scripts.append(name)
|
|
224
|
+
|
|
225
|
+
return {
|
|
226
|
+
'output': f'''CLI scripts created in {output_dir}/
|
|
227
|
+
|
|
228
|
+
Scripts: {", ".join(scripts)}
|
|
229
|
+
|
|
230
|
+
To use, add {output_dir} to your PATH or run directly:
|
|
231
|
+
./{scripts[0] if scripts else "npc_name"} "your prompt"
|
|
232
|
+
''',
|
|
233
|
+
'messages': kwargs.get('messages', [])
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def build_static_site(config, **kwargs):
|
|
238
|
+
"""Generate static site documentation for the team."""
|
|
239
|
+
team_path = config.get('team_path', './npc_team')
|
|
240
|
+
output_dir = config.get('output_dir', './build')
|
|
241
|
+
|
|
242
|
+
team_path = Path(team_path)
|
|
243
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
244
|
+
|
|
245
|
+
team_name = get_team_name(team_path)
|
|
246
|
+
|
|
247
|
+
# Get NPCs
|
|
248
|
+
npcs = []
|
|
249
|
+
for npc_file in team_path.glob("*.npc"):
|
|
250
|
+
with open(npc_file, 'r') as f:
|
|
251
|
+
npc_data = yaml.safe_load(f)
|
|
252
|
+
npcs.append({
|
|
253
|
+
'name': npc_file.stem,
|
|
254
|
+
'directive': npc_data.get('primary_directive', '')[:200] + '...'
|
|
255
|
+
})
|
|
256
|
+
|
|
257
|
+
# Simple HTML page
|
|
258
|
+
npc_list = '\n'.join([
|
|
259
|
+
f'<li><strong>{n["name"]}</strong>: {n["directive"]}</li>'
|
|
260
|
+
for n in npcs
|
|
261
|
+
])
|
|
262
|
+
|
|
263
|
+
html = f'''<!DOCTYPE html>
|
|
264
|
+
<html>
|
|
265
|
+
<head>
|
|
266
|
+
<title>{team_name} - NPC Team</title>
|
|
267
|
+
<style>
|
|
268
|
+
body {{ font-family: system-ui; max-width: 800px; margin: 0 auto; padding: 20px; }}
|
|
269
|
+
h1 {{ color: #333; }}
|
|
270
|
+
ul {{ line-height: 1.8; }}
|
|
271
|
+
</style>
|
|
272
|
+
</head>
|
|
273
|
+
<body>
|
|
274
|
+
<h1>{team_name}</h1>
|
|
275
|
+
<h2>Available NPCs</h2>
|
|
276
|
+
<ul>{npc_list}</ul>
|
|
277
|
+
</body>
|
|
278
|
+
</html>
|
|
279
|
+
'''
|
|
280
|
+
|
|
281
|
+
html_path = os.path.join(output_dir, 'index.html')
|
|
282
|
+
with open(html_path, 'w') as f:
|
|
283
|
+
f.write(html)
|
|
284
|
+
|
|
285
|
+
return {
|
|
286
|
+
'output': f'Static site created at {html_path}',
|
|
287
|
+
'messages': kwargs.get('messages', [])
|
|
288
|
+
}
|
npcpy/data/load.py
CHANGED
|
@@ -259,7 +259,7 @@ def load_file_contents(file_path, chunk_size=None):
|
|
|
259
259
|
file_ext = os.path.splitext(file_path)[1].upper().lstrip('.')
|
|
260
260
|
full_content = ""
|
|
261
261
|
if not isinstance(chunk_size, int):
|
|
262
|
-
chunk_size=
|
|
262
|
+
chunk_size=8000
|
|
263
263
|
try:
|
|
264
264
|
if file_ext == 'PDF':
|
|
265
265
|
full_content = load_pdf(file_path)
|
npcpy/data/web.py
CHANGED
|
@@ -78,14 +78,15 @@ def search_perplexity(
|
|
|
78
78
|
headers = {"Authorization": f"Bearer {api_key}",
|
|
79
79
|
"Content-Type": "application/json"}
|
|
80
80
|
|
|
81
|
-
|
|
82
81
|
response = requests.post(url,
|
|
83
82
|
json=payload,
|
|
84
83
|
headers=headers)
|
|
85
|
-
|
|
86
|
-
response = response.json()
|
|
87
84
|
|
|
88
|
-
|
|
85
|
+
if response.status_code != 200:
|
|
86
|
+
raise Exception(f"Perplexity API error {response.status_code}: {response.text[:200]}")
|
|
87
|
+
|
|
88
|
+
data = response.json()
|
|
89
|
+
return [data["choices"][0]["message"]["content"], data.get("citations", [])]
|
|
89
90
|
|
|
90
91
|
|
|
91
92
|
def search_web(
|
npcpy/gen/image_gen.py
CHANGED
|
@@ -160,11 +160,12 @@ def openai_image_gen(
|
|
|
160
160
|
height: int = 1024,
|
|
161
161
|
width: int = 1024,
|
|
162
162
|
n_images: int = 1,
|
|
163
|
+
api_key: Optional[str] = None,
|
|
163
164
|
):
|
|
164
165
|
"""Generate or edit an image using the OpenAI API."""
|
|
165
166
|
from openai import OpenAI
|
|
166
167
|
|
|
167
|
-
client = OpenAI()
|
|
168
|
+
client = OpenAI(api_key=api_key) if api_key else OpenAI()
|
|
168
169
|
|
|
169
170
|
if height is None:
|
|
170
171
|
height = 1024
|
npcpy/gen/response.py
CHANGED
|
@@ -4,15 +4,20 @@ from npcpy.data.image import compress_image
|
|
|
4
4
|
from npcpy.npc_sysenv import get_system_message, lookup_provider, render_markdown
|
|
5
5
|
import base64
|
|
6
6
|
import json
|
|
7
|
+
import yaml
|
|
7
8
|
import uuid
|
|
8
|
-
import os
|
|
9
|
-
|
|
9
|
+
import os
|
|
10
|
+
import logging
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
try:
|
|
10
15
|
import ollama
|
|
11
16
|
except ImportError:
|
|
12
17
|
pass
|
|
13
18
|
except OSError:
|
|
14
|
-
|
|
15
|
-
|
|
19
|
+
logger.warning("Ollama is not installed or not available.")
|
|
20
|
+
|
|
16
21
|
try:
|
|
17
22
|
import litellm
|
|
18
23
|
from litellm import completion
|
|
@@ -277,16 +282,33 @@ def get_ollama_response(
|
|
|
277
282
|
Do not include any additional markdown formatting or leading
|
|
278
283
|
```json tags in your response. The item keys should be based on the ones provided
|
|
279
284
|
by the user. Do not invent new ones."""
|
|
280
|
-
|
|
285
|
+
|
|
281
286
|
if messages and messages[-1]["role"] == "user":
|
|
282
287
|
if isinstance(messages[-1]["content"], list):
|
|
283
288
|
messages[-1]["content"].append({
|
|
284
|
-
"type": "text",
|
|
289
|
+
"type": "text",
|
|
285
290
|
"text": json_instruction
|
|
286
291
|
})
|
|
287
292
|
elif isinstance(messages[-1]["content"], str):
|
|
288
293
|
messages[-1]["content"] += "\n" + json_instruction
|
|
289
|
-
|
|
294
|
+
|
|
295
|
+
if format == "yaml" and not stream:
|
|
296
|
+
yaml_instruction = """Return your response as valid YAML. Do not include ```yaml markdown tags.
|
|
297
|
+
For multi-line strings like code, use the literal block scalar (|) syntax:
|
|
298
|
+
code: |
|
|
299
|
+
your code here
|
|
300
|
+
more lines here
|
|
301
|
+
The keys should be based on the ones requested by the user. Do not invent new ones."""
|
|
302
|
+
|
|
303
|
+
if messages and messages[-1]["role"] == "user":
|
|
304
|
+
if isinstance(messages[-1]["content"], list):
|
|
305
|
+
messages[-1]["content"].append({
|
|
306
|
+
"type": "text",
|
|
307
|
+
"text": yaml_instruction
|
|
308
|
+
})
|
|
309
|
+
elif isinstance(messages[-1]["content"], str):
|
|
310
|
+
messages[-1]["content"] += "\n" + yaml_instruction
|
|
311
|
+
|
|
290
312
|
if image_paths:
|
|
291
313
|
last_user_idx = -1
|
|
292
314
|
for i, msg in enumerate(messages):
|
|
@@ -393,11 +415,25 @@ def get_ollama_response(
|
|
|
393
415
|
result["response"] = parsed_response
|
|
394
416
|
except json.JSONDecodeError:
|
|
395
417
|
result["error"] = f"Invalid JSON response: {response_content}"
|
|
396
|
-
|
|
418
|
+
|
|
419
|
+
if format == "yaml":
|
|
420
|
+
try:
|
|
421
|
+
if isinstance(response_content, str):
|
|
422
|
+
if response_content.startswith("```yaml"):
|
|
423
|
+
response_content = (
|
|
424
|
+
response_content.replace("```yaml", "")
|
|
425
|
+
.replace("```", "")
|
|
426
|
+
.strip()
|
|
427
|
+
)
|
|
428
|
+
parsed_response = yaml.safe_load(response_content)
|
|
429
|
+
result["response"] = parsed_response
|
|
430
|
+
except yaml.YAMLError:
|
|
431
|
+
result["error"] = f"Invalid YAML response: {response_content}"
|
|
432
|
+
|
|
397
433
|
return result
|
|
398
434
|
|
|
399
|
-
|
|
400
|
-
|
|
435
|
+
|
|
436
|
+
logger.debug(f"ollama api_params: {api_params}")
|
|
401
437
|
res = ollama.chat(**api_params, options=options)
|
|
402
438
|
result["raw_response"] = res
|
|
403
439
|
|
|
@@ -466,9 +502,10 @@ def get_ollama_response(
|
|
|
466
502
|
|
|
467
503
|
result["response"] = ollama.chat(**stream_api_params, options=options)
|
|
468
504
|
else:
|
|
469
|
-
|
|
505
|
+
|
|
470
506
|
if format == "json":
|
|
471
507
|
try:
|
|
508
|
+
llm_response = response_content
|
|
472
509
|
if isinstance(llm_response, str):
|
|
473
510
|
llm_response = llm_response.strip()
|
|
474
511
|
|
|
@@ -505,14 +542,32 @@ def get_ollama_response(
|
|
|
505
542
|
result["response"] = parsed_json
|
|
506
543
|
|
|
507
544
|
except (json.JSONDecodeError, TypeError) as e:
|
|
508
|
-
|
|
509
|
-
print(f"Raw response: {llm_response[:500]}")
|
|
545
|
+
logger.debug(f"JSON parsing error: {str(e)}, raw response: {llm_response[:500]}")
|
|
510
546
|
result["response"] = {}
|
|
511
547
|
result["error"] = "Invalid JSON response"
|
|
512
548
|
|
|
549
|
+
if format == "yaml":
|
|
550
|
+
try:
|
|
551
|
+
if isinstance(llm_response, str):
|
|
552
|
+
llm_response = llm_response.strip()
|
|
553
|
+
|
|
554
|
+
if '```yaml' in llm_response:
|
|
555
|
+
start = llm_response.find('```yaml') + 7
|
|
556
|
+
end = llm_response.rfind('```')
|
|
557
|
+
if end > start:
|
|
558
|
+
llm_response = llm_response[start:end].strip()
|
|
559
|
+
|
|
560
|
+
parsed_yaml = yaml.safe_load(llm_response)
|
|
561
|
+
result["response"] = parsed_yaml
|
|
562
|
+
|
|
563
|
+
except (yaml.YAMLError, TypeError) as e:
|
|
564
|
+
logger.debug(f"YAML parsing error: {str(e)}, raw response: {llm_response[:500]}")
|
|
565
|
+
result["response"] = {}
|
|
566
|
+
result["error"] = "Invalid YAML response"
|
|
567
|
+
|
|
513
568
|
return result
|
|
514
|
-
|
|
515
|
-
import time
|
|
569
|
+
|
|
570
|
+
import time
|
|
516
571
|
|
|
517
572
|
|
|
518
573
|
def get_litellm_response(
|
|
@@ -631,13 +686,27 @@ def get_litellm_response(
|
|
|
631
686
|
Do not include any additional markdown formatting or leading
|
|
632
687
|
```json tags in your response. The item keys should be based on the ones provided
|
|
633
688
|
by the user. Do not invent new ones."""
|
|
634
|
-
|
|
689
|
+
|
|
635
690
|
if result["messages"] and result["messages"][-1]["role"] == "user":
|
|
636
691
|
if isinstance(result["messages"][-1]["content"], list):
|
|
637
692
|
result["messages"][-1]["content"].append({"type": "text", "text": json_instruction})
|
|
638
693
|
elif isinstance(result["messages"][-1]["content"], str):
|
|
639
694
|
result["messages"][-1]["content"] += "\n" + json_instruction
|
|
640
695
|
|
|
696
|
+
if format == "yaml" and not stream:
|
|
697
|
+
yaml_instruction = """Return your response as valid YAML. Do not include ```yaml markdown tags.
|
|
698
|
+
For multi-line strings like code, use the literal block scalar (|) syntax:
|
|
699
|
+
code: |
|
|
700
|
+
your code here
|
|
701
|
+
more lines here
|
|
702
|
+
The keys should be based on the ones requested by the user. Do not invent new ones."""
|
|
703
|
+
|
|
704
|
+
if result["messages"] and result["messages"][-1]["role"] == "user":
|
|
705
|
+
if isinstance(result["messages"][-1]["content"], list):
|
|
706
|
+
result["messages"][-1]["content"].append({"type": "text", "text": yaml_instruction})
|
|
707
|
+
elif isinstance(result["messages"][-1]["content"], str):
|
|
708
|
+
result["messages"][-1]["content"] += "\n" + yaml_instruction
|
|
709
|
+
|
|
641
710
|
if images:
|
|
642
711
|
last_user_idx = -1
|
|
643
712
|
for i, msg in enumerate(result["messages"]):
|
|
@@ -684,7 +753,7 @@ def get_litellm_response(
|
|
|
684
753
|
model = model.split('-npc')[0]
|
|
685
754
|
provider = "openai"
|
|
686
755
|
|
|
687
|
-
if isinstance(format, BaseModel):
|
|
756
|
+
if isinstance(format, type) and issubclass(format, BaseModel):
|
|
688
757
|
api_params["response_format"] = format
|
|
689
758
|
if model is None:
|
|
690
759
|
model = os.environ.get("NPCSH_CHAT_MODEL", "llama3.2")
|
|
@@ -777,11 +846,40 @@ def get_litellm_response(
|
|
|
777
846
|
result["response"] = parsed_json
|
|
778
847
|
|
|
779
848
|
except (json.JSONDecodeError, TypeError) as e:
|
|
780
|
-
|
|
781
|
-
print(f"Raw response: {llm_response[:500]}")
|
|
849
|
+
logger.debug(f"JSON parsing error: {str(e)}, raw response: {llm_response[:500]}")
|
|
782
850
|
result["response"] = {}
|
|
783
851
|
result["error"] = "Invalid JSON response"
|
|
784
|
-
|
|
852
|
+
|
|
853
|
+
if format == "yaml":
|
|
854
|
+
try:
|
|
855
|
+
if isinstance(llm_response, str):
|
|
856
|
+
llm_response = llm_response.strip()
|
|
857
|
+
|
|
858
|
+
# Strip ```yaml markdown if present
|
|
859
|
+
if '```yaml' in llm_response:
|
|
860
|
+
start = llm_response.find('```yaml') + 7
|
|
861
|
+
end = llm_response.rfind('```')
|
|
862
|
+
if end > start:
|
|
863
|
+
llm_response = llm_response[start:end].strip()
|
|
864
|
+
elif '```' in llm_response:
|
|
865
|
+
# Generic code block
|
|
866
|
+
start = llm_response.find('```') + 3
|
|
867
|
+
# Skip any language identifier on the same line
|
|
868
|
+
newline = llm_response.find('\n', start)
|
|
869
|
+
if newline != -1:
|
|
870
|
+
start = newline + 1
|
|
871
|
+
end = llm_response.rfind('```')
|
|
872
|
+
if end > start:
|
|
873
|
+
llm_response = llm_response[start:end].strip()
|
|
874
|
+
|
|
875
|
+
parsed_yaml = yaml.safe_load(llm_response)
|
|
876
|
+
result["response"] = parsed_yaml
|
|
877
|
+
|
|
878
|
+
except (yaml.YAMLError, TypeError) as e:
|
|
879
|
+
logger.debug(f"YAML parsing error: {str(e)}, raw response: {llm_response[:500]}")
|
|
880
|
+
result["response"] = {}
|
|
881
|
+
result["error"] = "Invalid YAML response"
|
|
882
|
+
|
|
785
883
|
return result
|
|
786
884
|
|
|
787
885
|
|
|
@@ -792,8 +890,7 @@ def get_litellm_response(
|
|
|
792
890
|
try:
|
|
793
891
|
resp = completion(**initial_api_params)
|
|
794
892
|
except Exception as e:
|
|
795
|
-
|
|
796
|
-
print(colored(f"[litellm ERROR] completion() failed: {type(e).__name__}: {e}", "red"))
|
|
893
|
+
logger.error(f"litellm completion() failed: {type(e).__name__}: {e}")
|
|
797
894
|
result["error"] = str(e)
|
|
798
895
|
result["response"] = f"LLM call failed: {e}"
|
|
799
896
|
return result
|
|
@@ -976,54 +1073,10 @@ def process_tool_calls(response_dict, tool_map, model, provider, messages, strea
|
|
|
976
1073
|
tool_result_str = ""
|
|
977
1074
|
serializable_result = None
|
|
978
1075
|
|
|
979
|
-
# Show tool execution indicator with truncated args
|
|
980
|
-
# Store full args for Ctrl+O expansion
|
|
981
|
-
_last_tool_call = {"name": tool_name, "arguments": arguments}
|
|
982
|
-
try:
|
|
983
|
-
import builtins
|
|
984
|
-
builtins._npcsh_last_tool_call = _last_tool_call
|
|
985
|
-
except:
|
|
986
|
-
pass
|
|
987
|
-
|
|
988
|
-
try:
|
|
989
|
-
from termcolor import colored
|
|
990
|
-
# Format arguments nicely - show key=value pairs
|
|
991
|
-
is_truncated = False
|
|
992
|
-
if arguments:
|
|
993
|
-
arg_parts = []
|
|
994
|
-
for k, v in arguments.items():
|
|
995
|
-
v_str = str(v)
|
|
996
|
-
if len(v_str) > 40:
|
|
997
|
-
v_str = v_str[:40] + "…"
|
|
998
|
-
is_truncated = True
|
|
999
|
-
arg_parts.append(f"{v_str}")
|
|
1000
|
-
args_display = " ".join(arg_parts)
|
|
1001
|
-
if len(args_display) > 60:
|
|
1002
|
-
args_display = args_display[:60] + "…"
|
|
1003
|
-
is_truncated = True
|
|
1004
|
-
else:
|
|
1005
|
-
args_display = ""
|
|
1006
|
-
|
|
1007
|
-
if args_display:
|
|
1008
|
-
hint = colored(" [^O]", "white", attrs=["dark"]) if is_truncated else ""
|
|
1009
|
-
print(colored(f" ⚡ {tool_name}", "cyan") + colored(f" {args_display}", "white", attrs=["dark"]) + hint, end="", flush=True)
|
|
1010
|
-
else:
|
|
1011
|
-
print(colored(f" ⚡ {tool_name}", "cyan"), end="", flush=True)
|
|
1012
|
-
except:
|
|
1013
|
-
pass
|
|
1014
|
-
|
|
1015
1076
|
try:
|
|
1016
1077
|
tool_result = tool_map[tool_name](**arguments)
|
|
1017
|
-
try:
|
|
1018
|
-
print(colored(" ✓", "green"), flush=True)
|
|
1019
|
-
except:
|
|
1020
|
-
pass
|
|
1021
1078
|
except Exception as e:
|
|
1022
1079
|
tool_result = f"Error executing tool '{tool_name}': {str(e)}"
|
|
1023
|
-
try:
|
|
1024
|
-
print(colored(f" ✗ {str(e)[:50]}", "red"), flush=True)
|
|
1025
|
-
except:
|
|
1026
|
-
pass
|
|
1027
1080
|
|
|
1028
1081
|
try:
|
|
1029
1082
|
tool_result_str = json.dumps(tool_result, default=str)
|