npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. npcsh/_state.py +942 -0
  2. npcsh/alicanto.py +1074 -0
  3. npcsh/guac.py +785 -0
  4. npcsh/mcp_helpers.py +357 -0
  5. npcsh/mcp_npcsh.py +822 -0
  6. npcsh/mcp_server.py +184 -0
  7. npcsh/npc.py +218 -0
  8. npcsh/npcsh.py +1161 -0
  9. npcsh/plonk.py +387 -269
  10. npcsh/pti.py +234 -0
  11. npcsh/routes.py +958 -0
  12. npcsh/spool.py +315 -0
  13. npcsh/wander.py +550 -0
  14. npcsh/yap.py +573 -0
  15. npcsh-1.0.0.dist-info/METADATA +596 -0
  16. npcsh-1.0.0.dist-info/RECORD +21 -0
  17. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
  18. npcsh-1.0.0.dist-info/entry_points.txt +9 -0
  19. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
  20. npcsh/audio.py +0 -210
  21. npcsh/cli.py +0 -545
  22. npcsh/command_history.py +0 -566
  23. npcsh/conversation.py +0 -291
  24. npcsh/data_models.py +0 -46
  25. npcsh/dataframes.py +0 -163
  26. npcsh/embeddings.py +0 -168
  27. npcsh/helpers.py +0 -641
  28. npcsh/image.py +0 -298
  29. npcsh/image_gen.py +0 -79
  30. npcsh/knowledge_graph.py +0 -1006
  31. npcsh/llm_funcs.py +0 -2027
  32. npcsh/load_data.py +0 -83
  33. npcsh/main.py +0 -5
  34. npcsh/model_runner.py +0 -189
  35. npcsh/npc_compiler.py +0 -2870
  36. npcsh/npc_sysenv.py +0 -383
  37. npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
  38. npcsh/npc_team/corca.npc +0 -13
  39. npcsh/npc_team/foreman.npc +0 -7
  40. npcsh/npc_team/npcsh.ctx +0 -11
  41. npcsh/npc_team/sibiji.npc +0 -4
  42. npcsh/npc_team/templates/analytics/celona.npc +0 -0
  43. npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  44. npcsh/npc_team/templates/humanities/eriane.npc +0 -4
  45. npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  46. npcsh/npc_team/templates/marketing/slean.npc +0 -4
  47. npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  48. npcsh/npc_team/templates/sales/turnic.npc +0 -4
  49. npcsh/npc_team/templates/software/welxor.npc +0 -0
  50. npcsh/npc_team/tools/bash_executer.tool +0 -32
  51. npcsh/npc_team/tools/calculator.tool +0 -8
  52. npcsh/npc_team/tools/code_executor.tool +0 -16
  53. npcsh/npc_team/tools/generic_search.tool +0 -27
  54. npcsh/npc_team/tools/image_generation.tool +0 -25
  55. npcsh/npc_team/tools/local_search.tool +0 -149
  56. npcsh/npc_team/tools/npcsh_executor.tool +0 -9
  57. npcsh/npc_team/tools/screen_cap.tool +0 -27
  58. npcsh/npc_team/tools/sql_executor.tool +0 -26
  59. npcsh/response.py +0 -623
  60. npcsh/search.py +0 -248
  61. npcsh/serve.py +0 -1460
  62. npcsh/shell.py +0 -538
  63. npcsh/shell_helpers.py +0 -3529
  64. npcsh/stream.py +0 -700
  65. npcsh/video.py +0 -49
  66. npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
  67. npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
  68. npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
  69. npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
  70. npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
  71. npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
  72. npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
  73. npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
  74. npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
  75. npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
  76. npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
  77. npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
  78. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
  79. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
  80. npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
  81. npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
  82. npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
  83. npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
  84. npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
  85. npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
  86. npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
  87. npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
  88. npcsh-0.3.31.dist-info/METADATA +0 -1853
  89. npcsh-0.3.31.dist-info/RECORD +0 -76
  90. npcsh-0.3.31.dist-info/entry_points.txt +0 -3
  91. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/llm_funcs.py DELETED
@@ -1,2027 +0,0 @@
1
- # Remove duplicate imports
2
- import subprocess
3
- import requests
4
- import os
5
- import json
6
- import PIL
7
-
8
- import sqlite3
9
- from datetime import datetime
10
- from typing import List, Dict, Any, Optional, Union, Generator
11
-
12
-
13
- from jinja2 import Environment, FileSystemLoader, Template, Undefined
14
-
15
- import pandas as pd
16
- import numpy as np
17
-
18
- from google.generativeai import types
19
- import google.generativeai as genai
20
- from sqlalchemy import create_engine
21
-
22
- from .npc_sysenv import (
23
- get_system_message,
24
- get_available_models,
25
- get_model_and_provider,
26
- lookup_provider,
27
- NPCSH_CHAT_PROVIDER,
28
- NPCSH_CHAT_MODEL,
29
- NPCSH_API_URL,
30
- EMBEDDINGS_DB_PATH,
31
- NPCSH_EMBEDDING_MODEL,
32
- NPCSH_EMBEDDING_PROVIDER,
33
- NPCSH_DEFAULT_MODE,
34
- NPCSH_REASONING_MODEL,
35
- NPCSH_REASONING_PROVIDER,
36
- NPCSH_IMAGE_GEN_MODEL,
37
- NPCSH_IMAGE_GEN_PROVIDER,
38
- NPCSH_API_URL,
39
- NPCSH_VISION_MODEL,
40
- NPCSH_VISION_PROVIDER,
41
- available_reasoning_models,
42
- available_chat_models,
43
- )
44
-
45
- from .stream import (
46
- get_ollama_stream,
47
- get_openai_stream,
48
- get_anthropic_stream,
49
- get_openai_like_stream,
50
- get_deepseek_stream,
51
- get_gemini_stream,
52
- )
53
- from .conversation import (
54
- get_ollama_conversation,
55
- get_openai_conversation,
56
- get_openai_like_conversation,
57
- get_anthropic_conversation,
58
- get_deepseek_conversation,
59
- get_gemini_conversation,
60
- )
61
-
62
- from .response import (
63
- get_ollama_response,
64
- get_openai_response,
65
- get_anthropic_response,
66
- get_openai_like_response,
67
- get_deepseek_response,
68
- get_gemini_response,
69
- )
70
- from .image_gen import (
71
- generate_image_openai,
72
- generate_image_hf_diffusion,
73
- )
74
-
75
- from .embeddings import (
76
- get_ollama_embeddings,
77
- get_openai_embeddings,
78
- get_anthropic_embeddings,
79
- store_embeddings_for_model,
80
- )
81
-
82
-
83
- def generate_image(
84
- prompt: str,
85
- model: str = NPCSH_IMAGE_GEN_MODEL,
86
- provider: str = NPCSH_IMAGE_GEN_PROVIDER,
87
- filename: str = None,
88
- npc: Any = None,
89
- ):
90
- """
91
- Function Description:
92
- This function generates an image using the specified provider and model.
93
- Args:
94
- prompt (str): The prompt for generating the image.
95
- Keyword Args:
96
- model (str): The model to use for generating the image.
97
- provider (str): The provider to use for generating the image.
98
- filename (str): The filename to save the image to.
99
- npc (Any): The NPC object.
100
- Returns:
101
- str: The filename of the saved image.
102
- """
103
- if model is not None and provider is not None:
104
- pass
105
- elif model is not None and provider is None:
106
- provider = lookup_provider(model)
107
- elif npc is not None:
108
- if npc.provider is not None:
109
- provider = npc.provider
110
- if npc.model is not None:
111
- model = npc.model
112
- if npc.api_url is not None:
113
- api_url = npc.api_url
114
- if filename is None:
115
- # Generate a filename based on the prompt and the date time
116
- os.makedirs(os.path.expanduser("~/.npcsh/images/"), exist_ok=True)
117
- filename = (
118
- os.path.expanduser("~/.npcsh/images/")
119
- + f"image_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
120
- )
121
-
122
- # if provider == "ollama":
123
- # image = generate_image_ollama(prompt, model)
124
- if provider == "openai":
125
- image = generate_image_openai(
126
- prompt,
127
- model,
128
- npc=npc,
129
- )
130
- # elif provider == "anthropic":
131
- # image = generate_image_anthropic(prompt, model, anthropic_api_key)
132
- # elif provider == "openai-like":
133
- # image = generate_image_openai_like(prompt, model, npc.api_url, openai_api_key)
134
- elif provider == "diffusers":
135
- image = generate_image_hf_diffusion(prompt, model)
136
- else:
137
- image = None
138
- # save image
139
- # check if image is a PIL image
140
- if isinstance(image, PIL.Image.Image):
141
- image.save(filename)
142
- return filename
143
-
144
- else:
145
- try:
146
- # image is at a private url
147
- response = requests.get(image.data[0].url)
148
- with open(filename, "wb") as file:
149
- file.write(response.content)
150
- from PIL import Image
151
-
152
- img = Image.open(filename)
153
- img.show()
154
- # console = Console()
155
- # console.print(Image.from_path(filename))
156
- return filename
157
-
158
- except AttributeError as e:
159
- print(f"Error saving image: {e}")
160
-
161
-
162
- def get_embeddings(
163
- texts: List[str],
164
- model: str = NPCSH_EMBEDDING_MODEL,
165
- provider: str = NPCSH_EMBEDDING_PROVIDER,
166
- ) -> List[List[float]]:
167
- """Generate embeddings using the specified provider and store them in Chroma."""
168
- if provider == "ollama":
169
- embeddings = get_ollama_embeddings(texts, model)
170
- elif provider == "openai":
171
- embeddings = get_openai_embeddings(texts, model)
172
- elif provider == "anthropic":
173
- embeddings = get_anthropic_embeddings(texts, model)
174
- else:
175
- raise ValueError(f"Unsupported provider: {provider}")
176
-
177
- # Store the embeddings in the relevant Chroma collection
178
- # store_embeddings_for_model(texts, embeddings, model, provider)
179
- return embeddings
180
-
181
-
182
- def get_llm_response(
183
- prompt: str,
184
- provider: str = NPCSH_CHAT_PROVIDER,
185
- model: str = NPCSH_CHAT_MODEL,
186
- images: List[Dict[str, str]] = None,
187
- npc: Any = None,
188
- messages: List[Dict[str, str]] = None,
189
- api_url: str = NPCSH_API_URL,
190
- api_key: str = None,
191
- context=None,
192
- **kwargs,
193
- ):
194
- """
195
- Function Description:
196
- This function generates a response using the specified provider and model.
197
- Args:
198
- prompt (str): The prompt for generating the response.
199
- Keyword Args:
200
- provider (str): The provider to use for generating the response.
201
- model (str): The model to use for generating the response.
202
- images (List[Dict[str, str]]): The list of images.
203
- npc (Any): The NPC object.
204
- messages (List[Dict[str, str]]): The list of messages.
205
- api_url (str): The URL of the API endpoint.
206
- Returns:
207
- Any: The response generated by the specified provider and model.
208
- """
209
- if model is not None and provider is not None:
210
- pass
211
- elif provider is None and model is not None:
212
- provider = lookup_provider(model)
213
-
214
- elif npc is not None:
215
- if npc.provider is not None:
216
- provider = npc.provider
217
- if npc.model is not None:
218
- model = npc.model
219
- if npc.api_url is not None:
220
- api_url = npc.api_url
221
-
222
- else:
223
- provider = "ollama"
224
- if images is not None:
225
- model = "llava:7b"
226
- else:
227
- model = "llama3.2"
228
- # print(provider, model)
229
- # print(provider, model)
230
- if provider == "ollama":
231
- if model is None:
232
- if images is not None:
233
- model = "llama:7b"
234
- else:
235
- model = "llama3.2"
236
- elif images is not None and model not in [
237
- "x/llama3.2-vision",
238
- "llama3.2-vision",
239
- "llava-llama3",
240
- "bakllava",
241
- "moondream",
242
- "llava-phi3",
243
- "minicpm-v",
244
- "hhao/openbmb-minicpm-llama3-v-2_5",
245
- "aiden_lu/minicpm-v2.6",
246
- "xuxx/minicpm2.6",
247
- "benzie/llava-phi-3",
248
- "mskimomadto/chat-gph-vision",
249
- "xiayu/openbmb-minicpm-llama3-v-2_5",
250
- "0ssamaak0/xtuner-llava",
251
- "srizon/pixie",
252
- "jyan1/paligemma-mix-224",
253
- "qnguyen3/nanollava",
254
- "knoopx/llava-phi-2",
255
- "nsheth/llama-3-lumimaid-8b-v0.1-iq-imatrix",
256
- "bigbug/minicpm-v2.5",
257
- ]:
258
- model = "llava:7b"
259
- # print(model)
260
- return get_ollama_response(
261
- prompt, model, npc=npc, messages=messages, images=images, **kwargs
262
- )
263
- elif provider == "gemini":
264
- if model is None:
265
- model = "gemini-2.0-flash"
266
- return get_gemini_response(
267
- prompt, model, npc=npc, messages=messages, images=images, **kwargs
268
- )
269
-
270
- elif provider == "deepseek":
271
- if model is None:
272
- model = "deepseek-chat"
273
- # print(prompt, model, provider)
274
- return get_deepseek_response(
275
- prompt, model, npc=npc, messages=messages, images=images, **kwargs
276
- )
277
- elif provider == "openai":
278
- if model is None:
279
- model = "gpt-4o-mini"
280
- # print(model)
281
- return get_openai_response(
282
- prompt, model, npc=npc, messages=messages, images=images, **kwargs
283
- )
284
- elif provider == "openai-like":
285
- if api_url is None:
286
- raise ValueError("api_url is required for openai-like provider")
287
- return get_openai_like_response(
288
- prompt,
289
- model,
290
- api_url,
291
- api_key,
292
- npc=npc,
293
- messages=messages,
294
- images=images,
295
- **kwargs,
296
- )
297
-
298
- elif provider == "anthropic":
299
- if model is None:
300
- model = "claude-3-haiku-20240307"
301
- return get_anthropic_response(
302
- prompt, model, npc=npc, messages=messages, images=images, **kwargs
303
- )
304
- else:
305
- # print(provider)
306
- # print(model)
307
- return "Error: Invalid provider specified."
308
-
309
-
310
- def get_stream(
311
- messages: List[Dict[str, str]],
312
- provider: str = NPCSH_CHAT_PROVIDER,
313
- model: str = NPCSH_CHAT_MODEL,
314
- npc: Any = None,
315
- images: List[Dict[str, str]] = None,
316
- api_url: str = NPCSH_API_URL,
317
- api_key: str = None,
318
- context=None,
319
- **kwargs,
320
- ) -> List[Dict[str, str]]:
321
- """
322
- Function Description:
323
- This function generates a streaming response using the specified provider and model
324
- Args:
325
- messages (List[Dict[str, str]]): The list of messages in the conversation.
326
- Keyword Args:
327
- provider (str): The provider to use for the conversation.
328
- model (str): The model to use for the conversation.
329
- npc (Any): The NPC object.
330
- api_url (str): The URL of the API endpoint.
331
- api_key (str): The API key for accessing the API.
332
- Returns:
333
- List[Dict[str, str]]: The list of messages in the conversation.
334
- """
335
- if model is not None and provider is not None:
336
- pass
337
- elif model is not None and provider is None:
338
- provider = lookup_provider(model)
339
- elif npc is not None:
340
- if npc.provider is not None:
341
- provider = npc.provider
342
- if npc.model is not None:
343
- model = npc.model
344
- if npc.api_url is not None:
345
- api_url = npc.api_url
346
- else:
347
- provider = "ollama"
348
- model = "llama3.2"
349
- # print(model, provider)
350
- if provider == "ollama":
351
- return get_ollama_stream(messages, model, npc=npc, images=images, **kwargs)
352
- elif provider == "openai":
353
- return get_openai_stream(
354
- messages, model, npc=npc, api_key=api_key, images=images, **kwargs
355
- )
356
- elif provider == "anthropic":
357
- return get_anthropic_stream(
358
- messages, model, npc=npc, api_key=api_key, images=images, **kwargs
359
- )
360
- elif provider == "openai-like":
361
- return get_openai_like_stream(
362
- messages,
363
- model,
364
- api_url,
365
- npc=npc,
366
- api_key=api_key,
367
- images=images,
368
- **kwargs,
369
- )
370
- elif provider == "deepseek":
371
- return get_deepseek_stream(messages, model, npc=npc, api_key=api_key, **kwargs)
372
- elif provider == "gemini":
373
- return get_gemini_stream(messages, model, npc=npc, api_key=api_key, **kwargs)
374
- else:
375
- return "Error: Invalid provider specified."
376
-
377
-
378
- def get_conversation(
379
- messages: List[Dict[str, str]],
380
- provider: str = NPCSH_CHAT_PROVIDER,
381
- model: str = NPCSH_CHAT_MODEL,
382
- images: List[Dict[str, str]] = None,
383
- npc: Any = None,
384
- api_url: str = NPCSH_API_URL,
385
- context=None,
386
- **kwargs,
387
- ) -> List[Dict[str, str]]:
388
- """
389
- Function Description:
390
- This function generates a conversation using the specified provider and model.
391
- Args:
392
- messages (List[Dict[str, str]]): The list of messages in the conversation.
393
- Keyword Args:
394
- provider (str): The provider to use for the conversation.
395
- model (str): The model to use for the conversation.
396
- npc (Any): The NPC object.
397
- Returns:
398
- List[Dict[str, str]]: The list of messages in the conversation.
399
- """
400
-
401
- if model is not None and provider is not None:
402
- pass # Use explicitly provided model and provider
403
- elif model is not None and provider is None:
404
- provider = lookup_provider(model)
405
- elif npc is not None and (npc.provider is not None or npc.model is not None):
406
- provider = npc.provider if npc.provider else provider
407
- model = npc.model if npc.model else model
408
- api_url = npc.api_url if npc.api_url else api_url
409
- else:
410
- provider = "ollama"
411
- model = "llava:7b" if images is not None else "llama3.2"
412
-
413
- # print(provider, model)
414
- if provider == "ollama":
415
- return get_ollama_conversation(
416
- messages, model, npc=npc, images=images, **kwargs
417
- )
418
- elif provider == "openai":
419
- return get_openai_conversation(
420
- messages, model, npc=npc, images=images, **kwargs
421
- )
422
- elif provider == "openai-like":
423
- return get_openai_like_conversation(
424
- messages, model, api_url, npc=npc, images=images, **kwargs
425
- )
426
- elif provider == "anthropic":
427
- return get_anthropic_conversation(
428
- messages, model, npc=npc, images=images, **kwargs
429
- )
430
- elif provider == "gemini":
431
- return get_gemini_conversation(messages, model, npc=npc, **kwargs)
432
- elif provider == "deepseek":
433
- return get_deepseek_conversation(messages, model, npc=npc, **kwargs)
434
-
435
- else:
436
- return "Error: Invalid provider specified."
437
-
438
-
439
- def execute_llm_question(
440
- command: str,
441
- model: str = NPCSH_CHAT_MODEL,
442
- provider: str = NPCSH_CHAT_PROVIDER,
443
- api_url: str = NPCSH_API_URL,
444
- api_key: str = None,
445
- npc: Any = None,
446
- messages: List[Dict[str, str]] = None,
447
- retrieved_docs=None,
448
- n_docs: int = 5,
449
- stream: bool = False,
450
- images: List[Dict[str, str]] = None,
451
- context=None,
452
- ):
453
- location = os.getcwd()
454
- if messages is None or len(messages) == 0:
455
- messages = []
456
- messages.append({"role": "user", "content": command})
457
-
458
- # Build context from retrieved documents
459
- if retrieved_docs:
460
- context = ""
461
- for filename, content in retrieved_docs[:n_docs]:
462
- context += f"Document: {filename}\n{content}\n\n"
463
- context_message = f"""
464
- What follows is the context of the text files in the user's directory that are potentially relevant to their request:
465
- {context}
466
-
467
- if the user has asked for code, be sure to include markdown formatting
468
- blocks starting and stopping with ``` to ensure the code is formatted correctly.
469
- """
470
- # Add context as a system message
471
- # messages.append({"role": "system", "content": context_message})
472
-
473
- # Append the user's message to messages
474
-
475
- # Print messages before calling get_conversation for debugging
476
- # print("Messages before get_conversation:", messages)
477
-
478
- # Use the existing messages list
479
- if stream:
480
- # print("beginning stream")
481
- response = get_stream(
482
- messages,
483
- model=model,
484
- provider=provider,
485
- npc=npc,
486
- images=images,
487
- api_url=api_url,
488
- api_key=api_key,
489
- )
490
- # let streamer deal with the diff response data and messages
491
- return response
492
- # print("Response from get_stream:", response)
493
- # full_response = ""
494
- # for chunk in response:
495
- # full_response += chunk
496
- # print(chunk, end="")
497
- # print("end of stream")
498
- # output = full_response
499
- # messages.append({"role": "assistant", "content": output})
500
-
501
- else:
502
- response = get_conversation(
503
- messages,
504
- model=model,
505
- provider=provider,
506
- npc=npc,
507
- images=images,
508
- api_url=api_url,
509
- api_key=api_key,
510
- )
511
-
512
- # Print response from get_conversation for debugging
513
- # print("Response from get_conversation:", response)
514
-
515
- if isinstance(response, str) and "Error" in response:
516
- output = response
517
- elif isinstance(response, list) and len(response) > 0:
518
- messages = response # Update messages with the new conversation
519
- output = response[-1]["content"]
520
- else:
521
- output = "Error: Invalid response from conversation function"
522
-
523
- # render_markdown(output)
524
- # print(f"LLM response: {output}")
525
- # print(f"Messages: {messages}")
526
- # print("type of output", type(output))
527
- return {"messages": messages, "output": output}
528
-
529
-
530
- def execute_llm_command(
531
- command: str,
532
- model: Optional[str] = None,
533
- provider: Optional[str] = None,
534
- api_url: str = NPCSH_API_URL,
535
- api_key: str = None,
536
- npc: Optional[Any] = None,
537
- messages: Optional[List[Dict[str, str]]] = None,
538
- retrieved_docs=None,
539
- n_docs=5,
540
- stream=False,
541
- context=None,
542
- ) -> str:
543
- """
544
- Function Description:
545
- This function executes an LLM command.
546
- Args:
547
- command (str): The command to execute.
548
-
549
- Keyword Args:
550
- model (Optional[str]): The model to use for executing the command.
551
- provider (Optional[str]): The provider to use for executing the command.
552
- npc (Optional[Any]): The NPC object.
553
- messages (Optional[List[Dict[str, str]]): The list of messages.
554
- retrieved_docs (Optional): The retrieved documents.
555
- n_docs (int): The number of documents.
556
- Returns:
557
- str: The result of the LLM command.
558
- """
559
-
560
- max_attempts = 5
561
- attempt = 0
562
- subcommands = []
563
- npc_name = npc.name if npc else "sibiji"
564
- location = os.getcwd()
565
- print(f"{npc_name} generating command")
566
- # Create context from retrieved documents
567
- context = ""
568
- if retrieved_docs:
569
- for filename, content in retrieved_docs[:n_docs]:
570
- # print(f"Document: {filename}")
571
- # print(content)
572
- context += f"Document: {filename}\n{content}\n\n"
573
- context = f"Refer to the following documents for context:\n{context}\n\n"
574
- while attempt < max_attempts:
575
- prompt = f"""
576
- A user submitted this query: {command}.
577
- You need to generate a bash command that will accomplish the user's intent.
578
- Respond ONLY with the command that should be executed.
579
- in the json key "bash_command".
580
- You must reply with valid json and nothing else. Do not include markdown formatting
581
- """
582
- if len(context) > 0:
583
- prompt += f"""
584
- What follows is the context of the text files in the user's directory that are potentially relevant to their request
585
- Use these to help inform your decision.
586
- {context}
587
- """
588
- if len(messages) > 0:
589
- prompt += f"""
590
- The following messages have been exchanged between the user and the assistant:
591
- {messages}
592
- """
593
-
594
- response = get_llm_response(
595
- prompt,
596
- model=model,
597
- provider=provider,
598
- api_url=api_url,
599
- api_key=api_key,
600
- messages=[],
601
- npc=npc,
602
- format="json",
603
- context=context,
604
- )
605
-
606
- llm_response = response.get("response", {})
607
- # messages.append({"role": "assistant", "content": llm_response})
608
- # print(f"LLM response type: {type(llm_response)}")
609
- # print(f"LLM response: {llm_response}")
610
-
611
- try:
612
- if isinstance(llm_response, str):
613
- llm_response = json.loads(llm_response)
614
-
615
- if isinstance(llm_response, dict) and "bash_command" in llm_response:
616
- bash_command = llm_response["bash_command"]
617
- else:
618
- raise ValueError("Invalid response format from LLM")
619
- except (json.JSONDecodeError, ValueError) as e:
620
- print(f"Error parsing LLM response: {e}")
621
- attempt += 1
622
- continue
623
-
624
- print(f"LLM suggests the following bash command: {bash_command}")
625
- subcommands.append(bash_command)
626
-
627
- try:
628
- print(f"Running command: {bash_command}")
629
- result = subprocess.run(
630
- bash_command, shell=True, text=True, capture_output=True, check=True
631
- )
632
- print(f"Command executed with output: {result.stdout}")
633
-
634
- prompt = f"""
635
- Here was the output of the result for the {command} inquiry
636
- which ran this bash command {bash_command}:
637
-
638
- {result.stdout}
639
-
640
- Provide a simple response to the user that explains to them
641
- what you did and how it accomplishes what they asked for.
642
- """
643
- if len(context) > 0:
644
- prompt += f"""
645
- What follows is the context of the text files in the user's directory that are potentially relevant to their request
646
- Use these to help inform how you respond.
647
- You must read the context and use it to provide the user with a more helpful answer related to their specific text data.
648
-
649
- CONTEXT:
650
-
651
- {context}
652
- """
653
- messages.append({"role": "user", "content": prompt})
654
- # print(messages, stream)
655
- if stream:
656
- response = get_stream(
657
- messages,
658
- model=model,
659
- provider=provider,
660
- api_url=api_url,
661
- api_key=api_key,
662
- npc=npc,
663
- )
664
- return response
665
-
666
- else:
667
- response = get_llm_response(
668
- prompt,
669
- model=model,
670
- provider=provider,
671
- api_url=api_url,
672
- api_key=api_key,
673
- npc=npc,
674
- messages=messages,
675
- context=context,
676
- )
677
- output = response.get("response", "")
678
-
679
- # render_markdown(output)
680
-
681
- return {"messages": messages, "output": output}
682
- except subprocess.CalledProcessError as e:
683
- print(f"Command failed with error:")
684
- print(e.stderr)
685
-
686
- error_prompt = f"""
687
- The command '{bash_command}' failed with the following error:
688
- {e.stderr}
689
- Please suggest a fix or an alternative command.
690
- Respond with a JSON object containing the key "bash_command" with the suggested command.
691
- Do not include any additional markdown formatting.
692
-
693
- """
694
-
695
- if len(context) > 0:
696
- error_prompt += f"""
697
- What follows is the context of the text files in the user's directory that are potentially relevant to their request
698
- Use these to help inform your decision.
699
- {context}
700
- """
701
-
702
- fix_suggestion = get_llm_response(
703
- error_prompt,
704
- model=model,
705
- provider=provider,
706
- npc=npc,
707
- api_url=api_url,
708
- api_key=api_key,
709
- format="json",
710
- messages=messages,
711
- context=context,
712
- )
713
-
714
- fix_suggestion_response = fix_suggestion.get("response", {})
715
-
716
- try:
717
- if isinstance(fix_suggestion_response, str):
718
- fix_suggestion_response = json.loads(fix_suggestion_response)
719
-
720
- if (
721
- isinstance(fix_suggestion_response, dict)
722
- and "bash_command" in fix_suggestion_response
723
- ):
724
- print(
725
- f"LLM suggests fix: {fix_suggestion_response['bash_command']}"
726
- )
727
- command = fix_suggestion_response["bash_command"]
728
- else:
729
- raise ValueError(
730
- "Invalid response format from LLM for fix suggestion"
731
- )
732
- except (json.JSONDecodeError, ValueError) as e:
733
- print(f"Error parsing LLM fix suggestion: {e}")
734
-
735
- attempt += 1
736
-
737
- return {
738
- "messages": messages,
739
- "output": "Max attempts reached. Unable to execute the command successfully.",
740
- }
741
-
742
-
743
- def check_llm_command(
744
- command: str,
745
- model: str = NPCSH_CHAT_MODEL,
746
- provider: str = NPCSH_CHAT_PROVIDER,
747
- reasoning_model: str = NPCSH_REASONING_MODEL,
748
- reasoning_provider: str = NPCSH_REASONING_PROVIDER,
749
- api_url: str = NPCSH_API_URL,
750
- api_key: str = None,
751
- npc: Any = None,
752
- retrieved_docs=None,
753
- messages: List[Dict[str, str]] = None,
754
- images: list = None,
755
- n_docs=5,
756
- stream=False,
757
- context=None,
758
- ):
759
- """
760
- Function Description:
761
- This function checks an LLM command.
762
- Args:
763
- command (str): The command to check.
764
- Keyword Args:
765
- model (str): The model to use for checking the command.
766
- provider (str): The provider to use for checking the command.
767
- npc (Any): The NPC object.
768
- retrieved_docs (Any): The retrieved documents.
769
- n_docs (int): The number of documents.
770
- Returns:
771
- Any: The result of checking the LLM command.
772
- """
773
-
774
- ENTER_REASONING_FLOW = False
775
- if NPCSH_DEFAULT_MODE == "reasoning":
776
- ENTER_REASONING_FLOW = True
777
- if model in available_reasoning_models:
778
- print(
779
- """
780
- Model provided is a reasoning model, defaulting to non reasoning model for
781
- ReAct choices then will enter reasoning flow
782
- """
783
- )
784
- reasoning_model = model
785
- reasoning_provider = provider
786
-
787
- model = NPCSH_CHAT_MODEL
788
- provider = NPCSH_CHAT_PROVIDER
789
- if messages is None:
790
- messages = []
791
-
792
- # print(model, provider, npc)
793
- # Create context from retrieved documents
794
- docs_context = ""
795
-
796
- if retrieved_docs:
797
- for filename, content in retrieved_docs[:n_docs]:
798
- docs_context += f"Document: {filename}\n{content}\n\n"
799
- docs_context = (
800
- f"Refer to the following documents for context:\n{docs_context}\n\n"
801
- )
802
-
803
- prompt = f"""
804
- A user submitted this query: {command}
805
-
806
- Determine the nature of the user's request:
807
-
808
- 1. Should a tool be invoked to fulfill the request?
809
-
810
- 2. Is it a general question that requires an informative answer or a highly specific question that
811
- requires inforrmation on the web?
812
-
813
- 3. Would this question be best answered by an alternative NPC?
814
-
815
- 4. Is it a complex request that actually requires more than one
816
- tool to be called, perhaps in a sequence?
817
-
818
- 5. is there a need for the user to provide additional input to fulfill the request?
819
-
820
-
821
-
822
- Available tools:
823
- """
824
-
825
- if (npc.tools_dict is None or npc.tools_dict == {}) & (
826
- npc.all_tools_dict is None or npc.all_tools_dict == {}
827
- ):
828
- prompt += "No tools available. Do not invoke tools."
829
- else:
830
- tools_set = {}
831
-
832
- if npc.tools_dict is not None:
833
- for tool_name, tool in npc.tools_dict.items():
834
- if tool_name not in tools_set:
835
- tools_set[tool_name] = tool.description
836
- if npc.all_tools_dict is not None:
837
- for tool_name, tool in npc.all_tools_dict.items():
838
- if tool_name not in tools_set:
839
- tools_set[tool_name] = tool.description
840
-
841
- for tool_name, tool_description in tools_set.items():
842
- prompt += f"""
843
-
844
- {tool_name} : {tool_description} \n
845
- """
846
-
847
- prompt += f"""
848
- Available NPCs for alternative answers:
849
-
850
- """
851
- if len(npc.resolved_npcs) == 0:
852
- prompt += "No NPCs available for alternative answers."
853
- else:
854
- print(npc.resolved_npcs)
855
- for i, npc_in_network in enumerate(npc.resolved_npcs):
856
- name = list(npc_in_network.keys())[0]
857
- npc_obj = npc_in_network[name]
858
-
859
- if hasattr(npc_obj, "name"):
860
- name_to_include = npc_obj.name
861
- elif "name " in npc_obj:
862
- name_to_include = npc_obj["name"]
863
-
864
- if hasattr(npc_obj, "primary_directive"):
865
- primary_directive_to_include = npc_obj.primary_directive
866
- elif "primary_directive" in npc_obj:
867
- primary_directive_to_include = npc_obj["primary_directive"]
868
- prompt += f"""
869
- ({i})
870
-
871
- NPC: {name_to_include}
872
- Primary Directive : {primary_directive_to_include}
873
-
874
- """
875
- if npc.shared_context:
876
- prompt += f"""
877
- Relevant shared context for the npc:
878
- {npc.shared_context}
879
- """
880
- # print("shared_context: " + str(npc.shared_context))
881
- # print(prompt)
882
-
883
- prompt += f"""
884
- In considering how to answer this, consider:
885
-
886
- - Whether more context from the user is required to adequately answer the question.
887
- e.g. if a user asks for a joke about their favorite city but they don't include the city ,
888
- it would be helpful to ask for that information. Similarly, if a user asks to open a browser
889
- and to check the weather in a city, it would be helpful to ask for the city and which website
890
- or source to use.
891
- - Whether a tool should be used.
892
-
893
-
894
- Excluding time-sensitive phenomena,
895
- most general questions can be answered without any
896
- extra tools or agent passes.
897
- Only use tools or pass to other NPCs
898
- when it is obvious that the answer needs to be as up-to-date as possible. For example,
899
- a question about where mount everest is does not necessarily need to be answered by a tool call or an agent pass.
900
- Similarly, if a user asks to explain the plot of the aeneid, this can be answered without a tool call or agent pass.
901
-
902
- If a user were to ask for the current weather in tokyo or the current price of bitcoin or who the mayor of a city is,
903
- then a tool call or agent pass may be appropriate.
904
-
905
- Tools are valuable but their use should be limited and purposeful to
906
- ensure the best user experience.
907
-
908
- Respond with a JSON object containing:
909
- - "action": one of ["invoke_tool", "answer_question", "pass_to_npc", "execute_sequence", "request_input"]
910
- - "tool_name": : if action is "invoke_tool": the name of the tool to use.
911
- else if action is "execute_sequence", a list of tool names to use.
912
- - "explanation": a brief explanation of why you chose this action.
913
- - "npc_name": (if action is "pass_to_npc") the name of the NPC to pass the question , else if action is "execute_sequence", a list of
914
- npcs to pass the question to in order.
915
-
916
-
917
-
918
- Return only the JSON object. Do not include any additional text.
919
-
920
- The format of the JSON object is:
921
- {{
922
- "action": "invoke_tool" | "answer_question" | "pass_to_npc" | "execute_sequence" | "request_input",
923
- "tool_name": "<tool_name(s)_if_applicable>",
924
- "explanation": "<your_explanation>",
925
- "npc_name": "<npc_name(s)_if_applicable>"
926
- }}
927
-
928
- If you execute a sequence, ensure that you have a specified NPC for each tool use.
929
-
930
- Remember, do not include ANY ADDITIONAL MARKDOWN FORMATTING.
931
- There should be no leading ```json.
932
-
933
- """
934
-
935
- if docs_context:
936
- prompt += f"""
937
- Relevant context from user files.
938
-
939
- {docs_context}
940
-
941
- """
942
- if context:
943
- prompt += f"""
944
- Relevant context from users:
945
-
946
- {context}
947
-
948
- """
949
- action_response = get_llm_response(
950
- prompt,
951
- model=model,
952
- provider=provider,
953
- api_url=api_url,
954
- api_key=api_key,
955
- npc=npc,
956
- format="json",
957
- messages=[],
958
- context=None,
959
- )
960
- if "Error" in action_response:
961
- print(f"LLM Error: {action_response['error']}")
962
- return action_response["error"]
963
-
964
- response_content = action_response.get("response", {})
965
-
966
- if isinstance(response_content, str):
967
- try:
968
- response_content_parsed = json.loads(response_content)
969
- except json.JSONDecodeError as e:
970
- print(
971
- f"Invalid JSON received from LLM: {e}. Response was: {response_content}"
972
- )
973
- return f"Error: Invalid JSON from LLM: {response_content}"
974
- else:
975
- response_content_parsed = response_content
976
-
977
- action = response_content_parsed.get("action")
978
- explanation = response_content["explanation"]
979
- print(f"action chosen: {action}")
980
- print(f"explanation given: {explanation}")
981
-
982
- if response_content_parsed.get("tool_name"):
983
- print(f"tool name: {response_content_parsed.get('tool_name')}")
984
-
985
- if action == "execute_command":
986
- # Pass messages to execute_llm_command
987
- result = execute_llm_command(
988
- command,
989
- model=model,
990
- provider=provider,
991
- api_url=api_url,
992
- api_key=api_key,
993
- messages=[],
994
- npc=npc,
995
- retrieved_docs=retrieved_docs,
996
- stream=stream,
997
- )
998
- if stream:
999
- return result
1000
-
1001
- output = result.get("output", "")
1002
- messages = result.get("messages", messages)
1003
- return {"messages": messages, "output": output}
1004
-
1005
- elif action == "invoke_tool":
1006
- tool_name = response_content_parsed.get("tool_name")
1007
- # print(npc)
1008
- print(f"tool name: {tool_name}")
1009
- result = handle_tool_call(
1010
- command,
1011
- tool_name,
1012
- model=model,
1013
- provider=provider,
1014
- api_url=api_url,
1015
- api_key=api_key,
1016
- messages=messages,
1017
- npc=npc,
1018
- retrieved_docs=retrieved_docs,
1019
- stream=stream,
1020
- )
1021
- if stream:
1022
- return result
1023
- messages = result.get("messages", messages)
1024
- output = result.get("output", "")
1025
- return {"messages": messages, "output": output}
1026
-
1027
- elif action == "answer_question":
1028
- if ENTER_REASONING_FLOW:
1029
- print("entering reasoning flow")
1030
- result = enter_reasoning_human_in_the_loop(
1031
- messages, reasoning_model, reasoning_provider
1032
- )
1033
- else:
1034
- result = execute_llm_question(
1035
- command,
1036
- model=model,
1037
- provider=provider,
1038
- api_url=api_url,
1039
- api_key=api_key,
1040
- messages=messages,
1041
- npc=npc,
1042
- retrieved_docs=retrieved_docs,
1043
- stream=stream,
1044
- images=images,
1045
- )
1046
-
1047
- if stream:
1048
- return result
1049
- messages = result.get("messages", messages)
1050
- output = result.get("output", "")
1051
- return {"messages": messages, "output": output}
1052
- elif action == "pass_to_npc":
1053
- npc_to_pass = response_content_parsed.get("npc_name")
1054
- # print(npc)
1055
- # get tge actual npc object from the npc.resolved_npcs
1056
- npc_to_pass_obj = None
1057
- for npc_obj in npc.resolved_npcs:
1058
- if npc_to_pass in npc_obj:
1059
- npc_to_pass_obj = npc_obj[npc_to_pass]
1060
- break
1061
- return npc.handle_agent_pass(
1062
- npc_to_pass_obj,
1063
- command,
1064
- messages=messages,
1065
- retrieved_docs=retrieved_docs,
1066
- n_docs=n_docs,
1067
- )
1068
- elif action == "request_input":
1069
- explanation = response_content_parsed.get("explanation")
1070
-
1071
- request_input = handle_request_input(
1072
- f"Explanation from check_llm_command: {explanation} \n for the user input command: {command}",
1073
- model=model,
1074
- provider=provider,
1075
- )
1076
- # pass it back through with the request input added to the end of the messages
1077
- # so that we can re-pass the result through the check_llm_command.
1078
-
1079
- messages.append(
1080
- {
1081
- "role": "assistant",
1082
- "content": f"""its clear that extra input is required.
1083
- could you please provide it? Here is the reason:
1084
-
1085
- {explanation},
1086
-
1087
- and the prompt: {command}""",
1088
- }
1089
- )
1090
- messages.append(
1091
- {
1092
- "role": "user",
1093
- "content": command + " \n \n \n extra context: " + request_input,
1094
- }
1095
- )
1096
-
1097
- return check_llm_command(
1098
- command + " \n \n \n extra context: " + request_input,
1099
- model=model,
1100
- provider=provider,
1101
- api_url=api_url,
1102
- api_key=api_key,
1103
- npc=npc,
1104
- messages=messages,
1105
- retrieved_docs=retrieved_docs,
1106
- n_docs=n_docs,
1107
- stream=stream,
1108
- )
1109
-
1110
- elif action == "execute_sequence":
1111
- tool_names = response_content_parsed.get("tool_name")
1112
- npc_names = response_content_parsed.get("npc_name")
1113
- # print(npc_names)
1114
- npcs = []
1115
- # print(tool_names, npc_names)
1116
- if isinstance(npc_names, list):
1117
- for npc_name in npc_names:
1118
- for npc_obj in npc.resolved_npcs:
1119
- if npc_name in npc_obj:
1120
- npcs.append(npc_obj[npc_name])
1121
- break
1122
-
1123
- output = ""
1124
- results_tool_calls = []
1125
-
1126
- if len(tool_names) > 0:
1127
- for npc_obj, tool_name in zip(npcs, tool_names):
1128
- result = handle_tool_call(
1129
- command,
1130
- tool_name,
1131
- model=model,
1132
- provider=provider,
1133
- api_url=api_url,
1134
- api_key=api_key,
1135
- messages=messages,
1136
- npc=npc_obj,
1137
- retrieved_docs=retrieved_docs,
1138
- stream=stream,
1139
- )
1140
- results_tool_calls.append(result)
1141
- messages = result.get("messages", messages)
1142
- output += result.get("output", "")
1143
- print(output)
1144
- else:
1145
- for npc_obj in npcs:
1146
- result = npc.handle_agent_pass(
1147
- npc_obj,
1148
- command,
1149
- messages=messages,
1150
- retrieved_docs=retrieved_docs,
1151
- n_docs=n_docs,
1152
- shared_context=npc.shared_context,
1153
- )
1154
-
1155
- messages = result.get("messages", messages)
1156
- results_tool_calls.append(result.get("response"))
1157
- print(messages[-1])
1158
- # import pdb
1159
-
1160
- # pdb.set_trace()
1161
-
1162
- return {"messages": messages, "output": output}
1163
- else:
1164
- print("Error: Invalid action in LLM response")
1165
- return "Error: Invalid action in LLM response"
1166
-
1167
-
1168
- def handle_tool_call(
1169
- command: str,
1170
- tool_name: str,
1171
- model: str = NPCSH_CHAT_MODEL,
1172
- provider: str = NPCSH_CHAT_PROVIDER,
1173
- api_url: str = NPCSH_API_URL,
1174
- api_key: str = None,
1175
- messages: List[Dict[str, str]] = None,
1176
- npc: Any = None,
1177
- retrieved_docs=None,
1178
- n_docs: int = 5,
1179
- stream=False,
1180
- n_attempts=3,
1181
- attempt=0,
1182
- context=None,
1183
- ) -> Union[str, Dict[str, Any]]:
1184
- """
1185
- Function Description:
1186
- This function handles a tool call.
1187
- Args:
1188
- command (str): The command.
1189
- tool_name (str): The tool name.
1190
- Keyword Args:
1191
- model (str): The model to use for handling the tool call.
1192
- provider (str): The provider to use for handling the tool call.
1193
- messages (List[Dict[str, str]]): The list of messages.
1194
- npc (Any): The NPC object.
1195
- retrieved_docs (Any): The retrieved documents.
1196
- n_docs (int): The number of documents.
1197
- Returns:
1198
- Union[str, Dict[str, Any]]: The result of handling
1199
- the tool call.
1200
-
1201
- """
1202
- # print(npc)
1203
- print("handling tool call")
1204
- if not npc:
1205
- print(
1206
- f"No tools available for NPC '{npc.name}' or tools_dict is empty. Available tools: {available_tools}"
1207
- )
1208
- return f"No tools are available for NPC '{npc.name or 'default'}'."
1209
-
1210
- if tool_name not in npc.all_tools_dict and tool_name not in npc.tools_dict:
1211
- print("not available")
1212
- print(f"Tool '{tool_name}' not found in NPC's tools_dict.")
1213
- print("available tools", npc.all_tools_dict)
1214
- return f"Tool '{tool_name}' not found."
1215
-
1216
- if tool_name in npc.all_tools_dict:
1217
- tool = npc.all_tools_dict[tool_name]
1218
- elif tool_name in npc.tools_dict:
1219
- tool = npc.tools_dict[tool_name]
1220
- print(f"Tool found: {tool.tool_name}")
1221
- jinja_env = Environment(loader=FileSystemLoader("."), undefined=Undefined)
1222
-
1223
- prompt = f"""
1224
- The user wants to use the tool '{tool_name}' with the following request:
1225
- '{command}'
1226
- Here is the tool file:
1227
- ```
1228
- {tool.to_dict()}
1229
- ```
1230
-
1231
- Please extract the required inputs for the tool as a JSON object.
1232
- They must be exactly as they are named in the tool.
1233
- Return only the JSON object without any markdown formatting.
1234
-
1235
- """
1236
-
1237
- if npc and hasattr(npc, "shared_context"):
1238
- if npc.shared_context.get("dataframes"):
1239
- context_info = "\nAvailable dataframes:\n"
1240
- for df_name in npc.shared_context["dataframes"].keys():
1241
- context_info += f"- {df_name}\n"
1242
- prompt += f"""Here is contextual info that may affect your choice: {context_info}
1243
- """
1244
- if context is not None:
1245
- prompt += f"Here is some additional context: {context}"
1246
-
1247
- # print(prompt)
1248
-
1249
- # print(
1250
- # print(prompt)
1251
- response = get_llm_response(
1252
- prompt,
1253
- format="json",
1254
- model=model,
1255
- provider=provider,
1256
- api_url=api_url,
1257
- api_key=api_key,
1258
- npc=npc,
1259
- )
1260
- try:
1261
- # Clean the response of markdown formatting
1262
- response_text = response.get("response", "{}")
1263
- if isinstance(response_text, str):
1264
- response_text = (
1265
- response_text.replace("```json", "").replace("```", "").strip()
1266
- )
1267
-
1268
- # Parse the cleaned response
1269
- if isinstance(response_text, dict):
1270
- input_values = response_text
1271
- else:
1272
- input_values = json.loads(response_text)
1273
- # print(f"Extracted inputs: {input_values}")
1274
- except json.JSONDecodeError as e:
1275
- print(f"Error decoding input values: {e}. Raw response: {response}")
1276
- return f"Error extracting inputs for tool '{tool_name}'"
1277
- # Input validation (example):
1278
- required_inputs = tool.inputs
1279
- missing_inputs = []
1280
- for inp in required_inputs:
1281
- if not isinstance(inp, dict):
1282
- # dicts contain the keywords so its fine if theyre missing from the inputs.
1283
- if inp not in input_values or input_values[inp] == "":
1284
- missing_inputs.append(inp)
1285
- if len(missing_inputs) > 0:
1286
- # print(f"Missing required inputs for tool '{tool_name}': {missing_inputs}")
1287
- if attempt < n_attempts:
1288
- print(f"attempt {attempt+1} to generate inputs failed, trying again")
1289
- print("missing inputs", missing_inputs)
1290
- # print("llm response", response)
1291
- print("input values", input_values)
1292
- return handle_tool_call(
1293
- command,
1294
- tool_name,
1295
- model=model,
1296
- provider=provider,
1297
- messages=messages,
1298
- npc=npc,
1299
- api_url=api_url,
1300
- api_key=api_key,
1301
- retrieved_docs=retrieved_docs,
1302
- n_docs=n_docs,
1303
- stream=stream,
1304
- attempt=attempt + 1,
1305
- n_attempts=n_attempts,
1306
- )
1307
- return {
1308
- "output": f"Missing inputs for tool '{tool_name}': {missing_inputs}",
1309
- "messages": messages,
1310
- }
1311
-
1312
- # try:
1313
- print("Executing tool with input values:", input_values)
1314
-
1315
- try:
1316
- tool_output = tool.execute(
1317
- input_values,
1318
- npc.all_tools_dict,
1319
- jinja_env,
1320
- command,
1321
- model=model,
1322
- provider=provider,
1323
- npc=npc,
1324
- stream=stream,
1325
- messages=messages,
1326
- )
1327
- if not stream:
1328
- if "Error" in tool_output:
1329
- raise Exception(tool_output)
1330
- except Exception as e:
1331
- # diagnose_problem = get_llm_response(
1332
- ## f"""a problem has occurred.
1333
- # Please provide a diagnosis of the problem and a suggested #fix.
1334
-
1335
- # The tool call failed with this error:
1336
- # {e}
1337
- # Please return a json object containing two fields
1338
- ## -problem
1339
- # -suggested solution.
1340
- # do not include any additional markdown formatting or #leading json tags
1341
-
1342
- # """,
1343
- # model=model,
1344
- # provider=provider,
1345
- # npc=npc,
1346
- ## api_url=api_url,
1347
- # api_ley=api_key,
1348
- # format="json",
1349
- # )
1350
- # print(e)
1351
- # problem = diagnose_problem.get("response", {}).get("problem")
1352
- # suggested_solution = diagnose_problem.get("response", {}).get(
1353
- # "suggested_solution"
1354
- # )
1355
-
1356
- tool_output = handle_tool_call(
1357
- command,
1358
- tool_name,
1359
- model=model,
1360
- provider=provider,
1361
- messages=messages,
1362
- npc=npc,
1363
- api_url=api_url,
1364
- api_key=api_key,
1365
- retrieved_docs=retrieved_docs,
1366
- n_docs=n_docs,
1367
- stream=stream,
1368
- attempt=attempt + 1,
1369
- n_attempts=n_attempts,
1370
- context=f""" \n \n \n "tool failed: {e} \n \n \n here was the previous attempt: {input_values}""",
1371
- )
1372
-
1373
- if stream:
1374
- return tool_output
1375
- # print(f"Tool output: {tool_output}")
1376
- # render_markdown(str(tool_output))
1377
- if messages is not None: # Check if messages is not None
1378
- messages.append({"role": "assistant", "content": tool_output})
1379
- return {"messages": messages, "output": tool_output}
1380
- # except Exception as e:
1381
- # print(f"Error executing tool {tool_name}: {e}")
1382
- # return f"Error executing tool {tool_name}: {e}"
1383
-
1384
-
1385
- def execute_data_operations(
1386
- query: str,
1387
- dataframes: Dict[str, pd.DataFrame],
1388
- npc: Any = None,
1389
- db_path: str = "~/npcsh_history.db",
1390
- ):
1391
- """
1392
- Function Description:
1393
- This function executes data operations.
1394
- Args:
1395
- query (str): The query to execute.
1396
-
1397
- dataframes (Dict[str, pd.DataFrame]): The dictionary of dataframes.
1398
- Keyword Args:
1399
- npc (Any): The NPC object.
1400
- db_path (str): The database path.
1401
- Returns:
1402
- Any: The result of the data operations.
1403
- """
1404
-
1405
- location = os.getcwd()
1406
- db_path = os.path.expanduser(db_path)
1407
-
1408
- try:
1409
- try:
1410
- # Create a safe namespace for pandas execution
1411
- namespace = {
1412
- "pd": pd,
1413
- "np": np,
1414
- "plt": plt,
1415
- **dataframes, # This includes all our loaded dataframes
1416
- }
1417
- # Execute the query
1418
- result = eval(query, namespace)
1419
-
1420
- # Handle the result
1421
- if isinstance(result, (pd.DataFrame, pd.Series)):
1422
- # render_markdown(result)
1423
- return result, "pd"
1424
- elif isinstance(result, plt.Figure):
1425
- plt.show()
1426
- return result, "pd"
1427
- elif result is not None:
1428
- # render_markdown(result)
1429
-
1430
- return result, "pd"
1431
-
1432
- except Exception as exec_error:
1433
- print(f"Pandas Error: {exec_error}")
1434
-
1435
- # 2. Try SQL
1436
- # print(db_path)
1437
- try:
1438
- with sqlite3.connect(db_path) as conn:
1439
- cursor = conn.cursor()
1440
- print(query)
1441
- print(get_available_tables(db_path))
1442
-
1443
- cursor.execute(query)
1444
- # get available tables
1445
-
1446
- result = cursor.fetchall()
1447
- if result:
1448
- for row in result:
1449
- print(row)
1450
- return result, "sql"
1451
- except Exception as e:
1452
- print(f"SQL Error: {e}")
1453
-
1454
- # 3. Try R
1455
- try:
1456
- result = subprocess.run(
1457
- ["Rscript", "-e", query], capture_output=True, text=True
1458
- )
1459
- if result.returncode == 0:
1460
- print(result.stdout)
1461
- return result.stdout, "r"
1462
- else:
1463
- print(f"R Error: {result.stderr}")
1464
- except Exception as e:
1465
- pass
1466
-
1467
- # If all engines fail, ask the LLM
1468
- print("Direct execution failed. Asking LLM for SQL query...")
1469
- llm_prompt = f"""
1470
- The user entered the following query which could not be executed directly using pandas, SQL, R, Scala, or PySpark:
1471
- ```
1472
- {query}
1473
- ```
1474
-
1475
- The available tables in the SQLite database at {db_path} are:
1476
- ```sql
1477
- {get_available_tables(db_path)}
1478
- ```
1479
-
1480
- Please provide a valid SQL query that accomplishes the user's intent. If the query requires data from a file, provide instructions on how to load the data into a table first.
1481
- Return only the SQL query, or instructions for loading data followed by the SQL query.
1482
- """
1483
-
1484
- llm_response = get_llm_response(llm_prompt, npc=npc)
1485
-
1486
- print(f"LLM suggested SQL: {llm_response}")
1487
- command = llm_response.get("response", "")
1488
- if command == "":
1489
- return "LLM did not provide a valid SQL query.", None
1490
- # Execute the LLM-generated SQL
1491
- try:
1492
- with sqlite3.connect(db_path) as conn:
1493
- cursor = conn.cursor()
1494
- cursor.execute(command)
1495
- result = cursor.fetchall()
1496
- if result:
1497
- for row in result:
1498
- print(row)
1499
- return result, "llm"
1500
- except Exception as e:
1501
- print(f"Error executing LLM-generated SQL: {e}")
1502
- return f"Error executing LLM-generated SQL: {e}", None
1503
-
1504
- except Exception as e:
1505
- print(f"Error executing query: {e}")
1506
- return f"Error executing query: {e}", None
1507
-
1508
-
1509
- def check_output_sufficient(
1510
- request: str,
1511
- data: pd.DataFrame,
1512
- query: str,
1513
- model: str = None,
1514
- provider: str = None,
1515
- npc: Any = None,
1516
- ) -> Dict[str, Any]:
1517
- """
1518
- Check if the query results are sufficient to answer the user's request.
1519
- """
1520
- prompt = f"""
1521
- Given:
1522
- - User request: {request}
1523
- - Query executed: {query}
1524
- - Results:
1525
- Summary: {data.describe()}
1526
- data schema: {data.dtypes}
1527
- Sample: {data.head()}
1528
-
1529
- Is this result sufficient to answer the user's request?
1530
- Return JSON with:
1531
- {{
1532
- "IS_SUFFICIENT": <boolean>,
1533
- "EXPLANATION": <string : If the answer is not sufficient specify what else is necessary.
1534
- IFF the answer is sufficient, provide a response that can be returned to the user as an explanation that answers their question.
1535
- The explanation should use the results to answer their question as long as they wouold be useful to the user.
1536
- For example, it is not useful to report on the "average/min/max/std ID" or the "min/max/std/average of a string column".
1537
-
1538
- Be smart about what you report.
1539
- It should not be a conceptual or abstract summary of the data.
1540
- It should not unnecessarily bring up a need for more data.
1541
- You should write it in a tone that answers the user request. Do not spout unnecessary self-referential fluff like "This information gives a clear overview of the x landscape".
1542
- >
1543
- }}
1544
- DO NOT include markdown formatting or ```json tags.
1545
-
1546
- """
1547
-
1548
- response = get_llm_response(
1549
- prompt, format="json", model=model, provider=provider, npc=npc
1550
- )
1551
-
1552
- # Clean response if it's a string
1553
- result = response.get("response", {})
1554
- if isinstance(result, str):
1555
- result = result.replace("```json", "").replace("```", "").strip()
1556
- try:
1557
- result = json.loads(result)
1558
- except json.JSONDecodeError:
1559
- return {"IS_SUFFICIENT": False, "EXPLANATION": "Failed to parse response"}
1560
-
1561
- return result
1562
-
1563
-
1564
- def process_data_output(
1565
- llm_response: Dict[str, Any],
1566
- db_conn,
1567
- request: str,
1568
- tables: str = None,
1569
- history: str = None,
1570
- npc: Any = None,
1571
- model: str = None,
1572
- provider: str = None,
1573
- ) -> Dict[str, Any]:
1574
- """
1575
- Process the LLM's response to a data request and execute the appropriate query.
1576
- """
1577
- try:
1578
- choice = llm_response.get("choice")
1579
- query = llm_response.get("query")
1580
-
1581
- if not query:
1582
- return {"response": "No query provided", "code": 400}
1583
-
1584
- # Create SQLAlchemy engine based on connection type
1585
- if "psycopg2" in db_conn.__class__.__module__:
1586
- engine = create_engine("postgresql://caug:gobears@localhost/npc_test")
1587
- else:
1588
- engine = create_engine("sqlite:///test_sqlite.db")
1589
-
1590
- if choice == 1: # Direct answer query
1591
- try:
1592
- df = pd.read_sql_query(query, engine)
1593
- result = check_output_sufficient(
1594
- request, df, query, model=model, provider=provider, npc=npc
1595
- )
1596
-
1597
- if result.get("IS_SUFFICIENT"):
1598
- return {"response": result["EXPLANATION"], "data": df, "code": 200}
1599
- return {
1600
- "response": f"Results insufficient: {result.get('EXPLANATION')}",
1601
- "code": 400,
1602
- }
1603
-
1604
- except Exception as e:
1605
- return {"response": f"Query execution failed: {str(e)}", "code": 400}
1606
-
1607
- elif choice == 2: # Exploratory query
1608
- try:
1609
- df = pd.read_sql_query(query, engine)
1610
- extra_context = f"""
1611
- Exploratory query results:
1612
- Query: {query}
1613
- Results summary: {df.describe()}
1614
- Sample data: {df.head()}
1615
- """
1616
-
1617
- return get_data_response(
1618
- request,
1619
- db_conn,
1620
- tables=tables,
1621
- extra_context=extra_context,
1622
- history=history,
1623
- model=model,
1624
- provider=provider,
1625
- npc=npc,
1626
- )
1627
-
1628
- except Exception as e:
1629
- return {"response": f"Exploratory query failed: {str(e)}", "code": 400}
1630
-
1631
- return {"response": "Invalid choice specified", "code": 400}
1632
-
1633
- except Exception as e:
1634
- return {"response": f"Processing error: {str(e)}", "code": 400}
1635
-
1636
-
1637
- def get_data_response(
1638
- request: str,
1639
- db_conn,
1640
- tables: str = None,
1641
- n_try_freq: int = 5,
1642
- extra_context: str = None,
1643
- history: str = None,
1644
- model: str = None,
1645
- provider: str = None,
1646
- npc: Any = None,
1647
- max_retries: int = 3,
1648
- ) -> Dict[str, Any]:
1649
- """
1650
- Generate a response to a data request, with retries for failed attempts.
1651
- """
1652
-
1653
- # Extract schema information based on connection type
1654
- schema_info = ""
1655
- if "psycopg2" in db_conn.__class__.__module__:
1656
- cursor = db_conn.cursor()
1657
- # Get all tables and their columns
1658
- cursor.execute(
1659
- """
1660
- SELECT
1661
- t.table_name,
1662
- array_agg(c.column_name || ' ' || c.data_type) as columns,
1663
- array_agg(
1664
- CASE
1665
- WHEN tc.constraint_type = 'FOREIGN KEY'
1666
- THEN kcu.column_name || ' REFERENCES ' || ccu.table_name || '.' || ccu.column_name
1667
- ELSE NULL
1668
- END
1669
- ) as foreign_keys
1670
- FROM information_schema.tables t
1671
- JOIN information_schema.columns c ON t.table_name = c.table_name
1672
- LEFT JOIN information_schema.table_constraints tc
1673
- ON t.table_name = tc.table_name
1674
- AND tc.constraint_type = 'FOREIGN KEY'
1675
- LEFT JOIN information_schema.key_column_usage kcu
1676
- ON tc.constraint_name = kcu.constraint_name
1677
- LEFT JOIN information_schema.constraint_column_usage ccu
1678
- ON tc.constraint_name = ccu.constraint_name
1679
- WHERE t.table_schema = 'public'
1680
- GROUP BY t.table_name;
1681
- """
1682
- )
1683
- for table, columns, fks in cursor.fetchall():
1684
- schema_info += f"\nTable {table}:\n"
1685
- schema_info += "Columns:\n"
1686
- for col in columns:
1687
- schema_info += f" - {col}\n"
1688
- if any(fk for fk in fks if fk is not None):
1689
- schema_info += "Foreign Keys:\n"
1690
- for fk in fks:
1691
- if fk:
1692
- schema_info += f" - {fk}\n"
1693
-
1694
- elif "sqlite3" in db_conn.__class__.__module__:
1695
- cursor = db_conn.cursor()
1696
- cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
1697
- tables = cursor.fetchall()
1698
- for (table_name,) in tables:
1699
- schema_info += f"\nTable {table_name}:\n"
1700
- cursor.execute(f"PRAGMA table_info({table_name});")
1701
- columns = cursor.fetchall()
1702
- schema_info += "Columns:\n"
1703
- for col in columns:
1704
- schema_info += f" - {col[1]} {col[2]}\n"
1705
-
1706
- cursor.execute(f"PRAGMA foreign_key_list({table_name});")
1707
- foreign_keys = cursor.fetchall()
1708
- if foreign_keys:
1709
- schema_info += "Foreign Keys:\n"
1710
- for fk in foreign_keys:
1711
- schema_info += f" - {fk[3]} REFERENCES {fk[2]}({fk[4]})\n"
1712
-
1713
- prompt = f"""
1714
- User request: {request}
1715
-
1716
- Database Schema:
1717
- {schema_info}
1718
-
1719
- {extra_context or ''}
1720
- {f'Query history: {history}' if history else ''}
1721
-
1722
- Provide either:
1723
- 1) An SQL query to directly answer the request
1724
- 2) An exploratory query to gather more information
1725
-
1726
- Return JSON with:
1727
- {{
1728
- "query": <sql query string>,
1729
- "choice": <1 or 2>,
1730
- "explanation": <reason for choice>
1731
- }}
1732
- DO NOT include markdown formatting or ```json tags.
1733
- """
1734
-
1735
- failures = []
1736
- for attempt in range(max_retries):
1737
- # try:
1738
- llm_response = get_llm_response(
1739
- prompt, npc=npc, format="json", model=model, provider=provider
1740
- )
1741
-
1742
- # Clean response if it's a string
1743
- response_data = llm_response.get("response", {})
1744
- if isinstance(response_data, str):
1745
- response_data = (
1746
- response_data.replace("```json", "").replace("```", "").strip()
1747
- )
1748
- try:
1749
- response_data = json.loads(response_data)
1750
- except json.JSONDecodeError:
1751
- failures.append("Invalid JSON response")
1752
- continue
1753
-
1754
- result = process_data_output(
1755
- response_data,
1756
- db_conn,
1757
- request,
1758
- tables=tables,
1759
- history=failures,
1760
- npc=npc,
1761
- model=model,
1762
- provider=provider,
1763
- )
1764
-
1765
- if result["code"] == 200:
1766
- return result
1767
-
1768
- failures.append(result["response"])
1769
-
1770
- if attempt == max_retries - 1:
1771
- return {
1772
- "response": f"Failed after {max_retries} attempts. Errors: {'; '.join(failures)}",
1773
- "code": 400,
1774
- }
1775
-
1776
- # except Exception as e:
1777
- # failures.append(str(e))
1778
-
1779
-
1780
- def enter_reasoning_human_in_the_loop(
1781
- messages: List[Dict[str, str]],
1782
- reasoning_model: str = NPCSH_REASONING_MODEL,
1783
- reasoning_provider: str = NPCSH_REASONING_PROVIDER,
1784
- chat_model: str = NPCSH_CHAT_MODEL,
1785
- chat_provider: str = NPCSH_CHAT_PROVIDER,
1786
- npc: Any = None,
1787
- answer_only: bool = False,
1788
- context=None,
1789
- ) -> Generator[str, None, None]:
1790
- """
1791
- Stream responses while checking for think tokens and handling human input when needed.
1792
-
1793
- Args:
1794
- messages: List of conversation messages
1795
- model: LLM model to use
1796
- provider: Model provider
1797
- npc: NPC instance if applicable
1798
-
1799
- Yields:
1800
- Streamed response chunks
1801
- """
1802
- # Get the initial stream
1803
- if answer_only:
1804
- messages[-1]["content"] = (
1805
- messages[-1]["content"].replace(
1806
- "Think first though and use <think> tags", ""
1807
- )
1808
- + " Do not think just answer. "
1809
- )
1810
- else:
1811
- messages[-1]["content"] = (
1812
- messages[-1]["content"]
1813
- + " Think first though and use <think> tags. "
1814
- )
1815
-
1816
- response_stream = get_stream(
1817
- messages,
1818
- model=reasoning_model,
1819
- provider=reasoning_provider,
1820
- npc=npc,
1821
- context=context,
1822
- )
1823
-
1824
- thoughts = []
1825
- response_chunks = []
1826
- in_think_block = False
1827
-
1828
- for chunk in response_stream:
1829
- # Extract content based on provider/model type
1830
- if reasoning_provider == "ollama":
1831
- chunk_content = chunk.get("message", {}).get("content", "")
1832
- elif reasoning_provider == "openai" or reasoning_provider == "deepseek":
1833
- chunk_content = "".join(
1834
- choice.delta.content
1835
- for choice in chunk.choices
1836
- if choice.delta.content is not None
1837
- )
1838
- elif reasoning_provider == "anthropic":
1839
- if chunk.type == "content_block_delta":
1840
- chunk_content = chunk.delta.text
1841
- else:
1842
- chunk_content = ""
1843
- else:
1844
- # Default extraction
1845
- chunk_content = str(chunk)
1846
-
1847
- # Always yield the chunk whether in think block or not
1848
- response_chunks.append(chunk_content)
1849
- # Track think block state and accumulate thoughts
1850
- if answer_only:
1851
- yield chunk
1852
- else:
1853
- if "<th" in "".join(response_chunks) and "/th" not in "".join(
1854
- response_chunks
1855
- ):
1856
- in_think_block = True
1857
-
1858
- if in_think_block:
1859
- thoughts.append(chunk_content)
1860
- yield chunk # Show the thoughts as they come
1861
-
1862
- if "</th" in "".join(response_chunks):
1863
- thought_text = "".join(thoughts)
1864
- # Analyze thoughts before stopping
1865
- input_needed = analyze_thoughts_for_input(
1866
- thought_text, model=chat_model, provider=chat_provider
1867
- )
1868
-
1869
- if input_needed:
1870
- # If input needed, get it and restart with new context
1871
- user_input = request_user_input(input_needed)
1872
-
1873
- messages.append(
1874
- {
1875
- "role": "assistant",
1876
- "content": f"""its clear that extra input is required.
1877
- could you please provide it? Here is the reason:
1878
-
1879
- {input_needed['reason']},
1880
-
1881
- and the prompt: {input_needed['prompt']}""",
1882
- }
1883
- )
1884
-
1885
- messages.append({"role": "user", "content": user_input})
1886
- yield from enter_reasoning_human_in_the_loop(
1887
- messages,
1888
- reasoning_model=reasoning_model,
1889
- reasoning_provider=reasoning_provider,
1890
- chat_model=chat_model,
1891
- chat_provider=chat_provider,
1892
- npc=npc,
1893
- answer_only=True,
1894
- )
1895
- else:
1896
- # If no input needed, just get the answer
1897
- messages.append({"role": "assistant", "content": thought_text})
1898
- messages.append(
1899
- {"role": "user", "content": messages[-2]["content"]}
1900
- )
1901
- yield from enter_reasoning_human_in_the_loop( # Restart with new context
1902
- messages,
1903
- reasoning_model=reasoning_model,
1904
- reasoning_provider=reasoning_provider,
1905
- chat_model=chat_model,
1906
- chat_provider=chat_provider,
1907
- npc=npc,
1908
- answer_only=True,
1909
- )
1910
-
1911
- return # Stop the original stream in either case
1912
-
1913
-
1914
- def handle_request_input(
1915
- context: str,
1916
- model: str = NPCSH_CHAT_MODEL,
1917
- provider: str = NPCSH_CHAT_PROVIDER,
1918
- ):
1919
- """
1920
- Analyze text and decide what to request from the user
1921
- """
1922
- prompt = f"""
1923
- Analyze the text:
1924
- {context}
1925
- and determine what additional input is needed.
1926
- Return a JSON object with:
1927
- {{
1928
- "input_needed": boolean,
1929
- "request_reason": string explaining why input is needed,
1930
- "request_prompt": string to show user if input needed
1931
- }}
1932
-
1933
- Do not include any additional markdown formatting or leading ```json tags. Your response
1934
- must be a valid JSON object.
1935
- """
1936
-
1937
- response = get_llm_response(
1938
- prompt,
1939
- model=model,
1940
- provider=provider,
1941
- messages=[],
1942
- format="json",
1943
- )
1944
-
1945
- result = response.get("response", {})
1946
- if isinstance(result, str):
1947
- result = json.loads(result)
1948
-
1949
- user_input = request_user_input(
1950
- {"reason": result["request_reason"], "prompt": result["request_prompt"]}
1951
- )
1952
- return user_input
1953
-
1954
-
1955
- def analyze_thoughts_for_input(
1956
- thought_text: str,
1957
- model: str = NPCSH_CHAT_MODEL,
1958
- provider: str = NPCSH_CHAT_PROVIDER,
1959
- api_url: str = NPCSH_API_URL,
1960
- api_key: str = None,
1961
- ) -> Optional[Dict[str, str]]:
1962
- """
1963
- Analyze accumulated thoughts to determine if user input is needed.
1964
-
1965
- Args:
1966
- thought_text: Accumulated text from think block
1967
- messages: Conversation history
1968
-
1969
- Returns:
1970
- Dict with input request details if needed, None otherwise
1971
- """
1972
-
1973
- prompt = (
1974
- f"""
1975
- Analyze these thoughts:
1976
- {thought_text}
1977
- and determine if additional user input would be helpful.
1978
- Return a JSON object with:"""
1979
- + """
1980
- {
1981
- "input_needed": boolean,
1982
- "request_reason": string explaining why input is needed,
1983
- "request_prompt": string to show user if input needed
1984
- }
1985
- Consider things like:
1986
- - Ambiguity in the user's request
1987
- - Missing context that would help provide a better response
1988
- - Clarification needed about user preferences/requirements
1989
- Only request input if it would meaningfully improve the response.
1990
- Do not include any additional markdown formatting or leading ```json tags. Your response
1991
- must be a valid JSON object.
1992
- """
1993
- )
1994
-
1995
- response = get_llm_response(
1996
- prompt,
1997
- model=model,
1998
- provider=provider,
1999
- api_url=api_url,
2000
- api_key=api_key,
2001
- messages=[],
2002
- format="json",
2003
- )
2004
-
2005
- result = response.get("response", {})
2006
- if isinstance(result, str):
2007
- result = json.loads(result)
2008
-
2009
- if result.get("input_needed"):
2010
- return {
2011
- "reason": result["request_reason"],
2012
- "prompt": result["request_prompt"],
2013
- }
2014
-
2015
-
2016
- def request_user_input(input_request: Dict[str, str]) -> str:
2017
- """
2018
- Request and get input from user.
2019
-
2020
- Args:
2021
- input_request: Dict with reason and prompt for input
2022
-
2023
- Returns:
2024
- User's input text
2025
- """
2026
- print(f"\nAdditional input needed: {input_request['reason']}")
2027
- return input(f"{input_request['prompt']}: ")