npcsh 0.3.30__py3-none-any.whl → 0.3.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. npcsh/audio.py +540 -181
  2. npcsh/audio_gen.py +1 -0
  3. npcsh/cli.py +37 -19
  4. npcsh/conversation.py +14 -251
  5. npcsh/dataframes.py +13 -5
  6. npcsh/helpers.py +5 -0
  7. npcsh/image.py +2 -4
  8. npcsh/image_gen.py +38 -38
  9. npcsh/knowledge_graph.py +4 -4
  10. npcsh/llm_funcs.py +517 -349
  11. npcsh/npc_compiler.py +44 -23
  12. npcsh/npc_sysenv.py +5 -0
  13. npcsh/npc_team/npcsh.ctx +8 -2
  14. npcsh/npc_team/tools/generic_search.tool +9 -1
  15. npcsh/plonk.py +2 -2
  16. npcsh/response.py +131 -482
  17. npcsh/search.py +20 -9
  18. npcsh/serve.py +210 -203
  19. npcsh/shell.py +78 -80
  20. npcsh/shell_helpers.py +513 -102
  21. npcsh/stream.py +87 -554
  22. npcsh/video.py +5 -2
  23. npcsh/video_gen.py +69 -0
  24. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/generic_search.tool +9 -1
  25. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/npcsh.ctx +8 -2
  26. npcsh-0.3.32.dist-info/METADATA +779 -0
  27. npcsh-0.3.32.dist-info/RECORD +78 -0
  28. npcsh-0.3.30.dist-info/METADATA +0 -1862
  29. npcsh-0.3.30.dist-info/RECORD +0 -76
  30. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/bash_executer.tool +0 -0
  31. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/calculator.tool +0 -0
  32. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/celona.npc +0 -0
  33. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/code_executor.tool +0 -0
  34. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/corca.npc +0 -0
  35. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/eriane.npc +0 -0
  36. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/foreman.npc +0 -0
  37. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/image_generation.tool +0 -0
  38. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/lineru.npc +0 -0
  39. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/local_search.tool +0 -0
  40. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/maurawa.npc +0 -0
  41. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/npcsh_executor.tool +0 -0
  42. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/raone.npc +0 -0
  43. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/screen_cap.tool +0 -0
  44. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  45. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/slean.npc +0 -0
  46. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/sql_executor.tool +0 -0
  47. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/test_pipeline.py +0 -0
  48. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/turnic.npc +0 -0
  49. {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/welxor.npc +0 -0
  50. {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/WHEEL +0 -0
  51. {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/entry_points.txt +0 -0
  52. {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/licenses/LICENSE +0 -0
  53. {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/top_level.txt +0 -0
npcsh/audio_gen.py ADDED
@@ -0,0 +1 @@
1
+ # audio_gen.py
npcsh/cli.py CHANGED
@@ -1,5 +1,5 @@
1
1
  import argparse
2
- from .npc_sysenv import (
2
+ from npcsh.npc_sysenv import (
3
3
  NPCSH_CHAT_MODEL,
4
4
  NPCSH_CHAT_PROVIDER,
5
5
  NPCSH_IMAGE_GEN_MODEL,
@@ -14,15 +14,15 @@ from .npc_sysenv import (
14
14
  NPCSH_STREAM_OUTPUT,
15
15
  NPCSH_SEARCH_PROVIDER,
16
16
  )
17
- from .serve import start_flask_server
18
- from .npc_compiler import (
17
+ from npcsh.serve import start_flask_server
18
+ from npcsh.npc_compiler import (
19
19
  initialize_npc_project,
20
20
  conjure_team,
21
21
  NPCCompiler,
22
22
  NPC,
23
23
  load_npc_from_file,
24
24
  )
25
- from .llm_funcs import (
25
+ from npcsh.llm_funcs import (
26
26
  check_llm_command,
27
27
  execute_llm_command,
28
28
  execute_llm_question,
@@ -33,8 +33,9 @@ from .llm_funcs import (
33
33
  get_stream,
34
34
  get_conversation,
35
35
  )
36
- from .search import search_web
37
- from .shell_helpers import *
36
+ from npcsh.plonk import plonk, action_space
37
+ from npcsh.search import search_web
38
+ from npcsh.shell_helpers import *
38
39
  import os
39
40
 
40
41
  # check if ./npc_team exists
@@ -54,10 +55,12 @@ def main():
54
55
  "assemble",
55
56
  "build",
56
57
  "compile",
58
+ "chat",
57
59
  "init",
58
60
  "new",
59
61
  "plonk",
60
62
  "sample",
63
+ "search",
61
64
  "select",
62
65
  "serve",
63
66
  "spool",
@@ -165,6 +168,10 @@ def main():
165
168
  "directory", nargs="?", default=".", help="Directory to build project in"
166
169
  )
167
170
 
171
+ # chat
172
+ chat_parser = subparsers.add_parser("chat", help="chat with an NPC")
173
+ chat_parser.add_argument("-n", "--npc_name", help="name of npc")
174
+
168
175
  # Compile command
169
176
  compile_parser = subparsers.add_parser("compile", help="Compile an NPC")
170
177
  compile_parser.add_argument("path", help="Path to NPC file")
@@ -300,7 +307,7 @@ def main():
300
307
 
301
308
  # Web search
302
309
  search_parser = subparsers.add_parser("search", help="search the web")
303
- search_parser.add_argument("query", help="search query")
310
+ search_parser.add_argument("--query", "-q", help="search query")
304
311
  search_parser.add_argument(
305
312
  "--search_provider",
306
313
  "-sp",
@@ -317,7 +324,7 @@ def main():
317
324
 
318
325
  # Voice chat
319
326
  whisper_parser = subparsers.add_parser("whisper", help="start voice chat")
320
- whisper_parser.add_argument("npc_name", help="name of the NPC to chat with")
327
+ whisper_parser.add_argument("-n", "--npc_name", help="name of the NPC to chat with")
321
328
 
322
329
  args = parser.parse_args()
323
330
 
@@ -367,6 +374,13 @@ def main():
367
374
  port=args.port if args.port else 5337,
368
375
  cors_origins=cors_origins,
369
376
  )
377
+ elif args.command == "chat":
378
+ npc_name = args.npc_name
379
+ npc_path = get_npc_path(npc_name, NPCSH_DB_PATH)
380
+ current_npc = load_npc_from_file(npc_path, sqlite3.connect(NPCSH_DB_PATH))
381
+ return enter_spool_mode(
382
+ model=args.model, provider=args.provider, npc=current_npc
383
+ )
370
384
 
371
385
  elif args.command == "init":
372
386
  if args.templates:
@@ -395,16 +409,18 @@ def main():
395
409
  )
396
410
 
397
411
  elif args.command == "compile":
398
- compile_npc(args.path)
412
+ npc_compiler = NPCCompiler(npc_directory, NPCSH_DB_PATH)
413
+ compiled = npc_compiler.compile(args.path)
414
+ print("NPC compiled to:", compiled)
399
415
 
400
416
  elif args.command == "plonk":
401
417
  task = args.task or args.spell
402
418
  npc_name = args.name
403
- run_plonk_task(
404
- task=task,
405
- npc_name=npc_name,
406
- model=args.model or NPCSH_REASONING_MODEL,
407
- provider=args.provider or NPCSH_REASONING_PROVIDER,
419
+ plonk(
420
+ task,
421
+ action_space,
422
+ model=args.model or NPCSH_CHAT_MODEL,
423
+ provider=args.provider or NPCSH_CHAT_PROVIDER,
408
424
  )
409
425
 
410
426
  elif args.command == "sample":
@@ -443,10 +459,14 @@ def main():
443
459
  model=args.model,
444
460
  provider=args.provider,
445
461
  )
446
- print(result)
462
+ print(result["output"])
447
463
 
448
464
  elif args.command == "whisper":
449
- start_whisper_chat(args.npc_name)
465
+ npc_name = args.npc_name
466
+ npc_path = get_npc_path(npc_name, NPCSH_DB_PATH)
467
+ current_npc = load_npc_from_file(npc_path, sqlite3.connect(NPCSH_DB_PATH))
468
+
469
+ enter_whisper_mode(npc=current_npc)
450
470
 
451
471
  elif args.command == "tool":
452
472
  result = invoke_tool(
@@ -483,7 +503,7 @@ def main():
483
503
  elif args.command == "new":
484
504
  # create a new npc, tool, or assembly line
485
505
  if args.type == "npc":
486
- from .npc_creator import create_new_npc
506
+ from npcsh.npc_creator import create_new_npc
487
507
 
488
508
  create_new_npc(
489
509
  name=args.name,
@@ -494,7 +514,6 @@ def main():
494
514
  autogen=args.autogen,
495
515
  )
496
516
  elif args.type == "tool":
497
- from .tool_creator import create_new_tool
498
517
 
499
518
  create_new_tool(
500
519
  name=args.name,
@@ -502,7 +521,6 @@ def main():
502
521
  autogen=args.autogen,
503
522
  )
504
523
  elif args.type == "assembly_line":
505
- from .assembly_creator import create_new_assembly_line
506
524
 
507
525
  create_new_assembly_line(
508
526
  name=args.name,
npcsh/conversation.py CHANGED
@@ -6,49 +6,15 @@
6
6
  ########
7
7
  from typing import Any, Dict, Generator, List
8
8
  import os
9
- import anthropic
10
9
 
11
- from openai import OpenAI
12
- from google.generativeai import types
13
- import google.generativeai as genai
14
- from .npc_sysenv import get_system_message
10
+ from litellm import completion
11
+ from npcsh.npc_sysenv import get_system_message
15
12
 
16
13
 
17
- def get_ollama_conversation(
18
- messages: List[Dict[str, str]],
19
- model: str,
20
- npc: Any = None,
21
- tools: list = None,
22
- images=None,
23
- **kwargs,
24
- ) -> List[Dict[str, str]]:
25
- """
26
- Function Description:
27
- This function generates a conversation using the Ollama API.
28
- Args:
29
- messages (List[Dict[str, str]]): The list of messages in the conversation.
30
- model (str): The model to use for the conversation.
31
- Keyword Args:
32
- npc (Any): The NPC object.
33
- Returns:
34
- List[Dict[str, str]]: The list of messages in the conversation.
35
- """
36
- import ollama
37
-
38
- messages_copy = messages.copy()
39
- if messages_copy[0]["role"] != "system":
40
- if npc is not None:
41
- system_message = get_system_message(npc)
42
- messages_copy.insert(0, {"role": "system", "content": system_message})
43
-
44
- response = ollama.chat(model=model, messages=messages_copy)
45
- messages_copy.append(response["message"])
46
- return messages_copy
47
-
48
-
49
- def get_openai_conversation(
14
+ def get_litellm_conversation(
50
15
  messages: List[Dict[str, str]],
51
16
  model: str,
17
+ provider: str,
52
18
  npc: Any = None,
53
19
  tools: list = None,
54
20
  api_key: str = None,
@@ -66,226 +32,23 @@ def get_openai_conversation(
66
32
  api_key (str): The API key for accessing the OpenAI API.
67
33
  Returns:
68
34
  List[Dict[str, str]]: The list of messages in the conversation.
69
- """
70
-
71
- try:
72
- if api_key is None:
73
- api_key = os.environ["OPENAI_API_KEY"]
74
- client = OpenAI(api_key=api_key)
75
-
76
- system_message = (
77
- get_system_message(npc) if npc else "You are a helpful assistant."
78
- )
79
-
80
- if messages is None:
81
- messages = []
82
-
83
- # Ensure the system message is at the beginning
84
- if not any(msg["role"] == "system" for msg in messages):
85
- messages.insert(0, {"role": "system", "content": system_message})
86
-
87
- # messages should already include the user's latest message
88
-
89
- # Make the API call with the messages including the latest user input
90
- completion = client.chat.completions.create(
91
- model=model, messages=messages, **kwargs
92
- )
93
-
94
- response_message = completion.choices[0].message
95
- messages.append({"role": "assistant", "content": response_message.content})
96
-
97
- return messages
98
-
99
- except Exception as e:
100
- return f"Error interacting with OpenAI: {e}"
101
-
102
-
103
- def get_openai_like_conversation(
104
- messages: List[Dict[str, str]],
105
- model: str,
106
- api_url: str,
107
- npc: Any = None,
108
- images=None,
109
- tools: list = None,
110
- api_key: str = None,
111
- **kwargs,
112
- ) -> List[Dict[str, str]]:
113
- """
114
- Function Description:
115
- This function generates a conversation using an OpenAI-like API.
116
- Args:
117
- messages (List[Dict[str, str]]): The list of messages in the conversation.
118
- model (str): The model to use for the conversation.
119
- Keyword Args:
120
- npc (Any): The NPC object.
121
- api_url (str): The URL of the API endpoint.
122
- api_key (str): The API key for accessing the API.
123
- Returns:
124
- List[Dict[str, str]]: The list of messages in the conversation.
125
- """
126
-
127
- if api_url is None:
128
- raise ValueError("api_url is required for openai-like provider")
129
- if api_key is None:
130
- api_key = "dummy_api_key"
131
- try:
132
- client = OpenAI(api_key=api_key, base_url=api_url)
133
-
134
- system_message = (
135
- get_system_message(npc) if npc else "You are a helpful assistant."
136
- )
137
-
138
- if messages is None:
139
- messages = []
140
-
141
- # Ensure the system message is at the beginning
142
- if not any(msg["role"] == "system" for msg in messages):
143
- messages.insert(0, {"role": "system", "content": system_message})
144
-
145
- # messages should already include the user's latest message
146
-
147
- # Make the API call with the messages including the latest user input
148
-
149
- completion = client.chat.completions.create(
150
- model=model, messages=messages, **kwargs
151
- )
152
- response_message = completion.choices[0].message
153
- messages.append({"role": "assistant", "content": response_message.content})
154
-
155
- return messages
156
-
157
- except Exception as e:
158
- return f"Error interacting with OpenAI: {e}"
159
-
160
- return messages
161
-
162
-
163
- def get_anthropic_conversation(
164
- messages: List[Dict[str, str]],
165
- model: str,
166
- npc: Any = None,
167
- tools: list = None,
168
- images=None,
169
- api_key: str = None,
170
- **kwargs,
171
- ) -> List[Dict[str, str]]:
172
- """
173
- Function Description:
174
- This function generates a conversation using the Anthropic API.
175
- Args:
176
- messages (List[Dict[str, str]]): The list of messages in the conversation.
177
- model (str): The model to use for the conversation.
178
- Keyword Args:
179
- npc (Any): The NPC object.
180
- api_key (str): The API key for accessing the Anthropic API.
181
- Returns:
182
- List[Dict[str, str]]: The list of messages in the conversation.
183
- """
184
-
185
- try:
186
- if api_key is None:
187
- api_key = os.getenv("ANTHROPIC_API_KEY", None)
188
- system_message = get_system_message(npc) if npc else ""
189
- client = anthropic.Anthropic(api_key=api_key)
190
- last_user_message = None
191
- for msg in reversed(messages):
192
- if msg["role"] == "user":
193
- last_user_message = msg["content"]
194
- break
195
-
196
- if last_user_message is None:
197
- raise ValueError("No user message found in the conversation history.")
198
-
199
- # if a sys message is in messages, remove it
200
- if messages[0]["role"] == "system":
201
- messages.pop(0)
202
-
203
- message = client.messages.create(
204
- model=model,
205
- system=system_message, # Include system message in each turn for Anthropic
206
- messages=messages, # Send only the last user message
207
- max_tokens=8192,
208
- **kwargs,
209
- )
210
-
211
- messages.append({"role": "assistant", "content": message.content[0].text})
212
-
213
- return messages
214
-
215
- except Exception as e:
216
- return f"Error interacting with Anthropic conversations: {e}"
217
-
218
-
219
- def get_gemini_conversation(
220
- messages: List[Dict[str, str]],
221
- model: str,
222
- npc: Any = None,
223
- tools: list = None,
224
- api_key: str = None,
225
- ) -> List[Dict[str, str]]:
226
- """
227
- Function Description:
228
- This function generates a conversation using the Gemini API.
229
- Args:
230
- messages (List[Dict[str, str]]): The list of messages in the conversation.
231
- model (str): The model to use for the conversation.
232
- Keyword Args:
233
- npc (Any): The NPC object.
234
- Returns:
235
- List[Dict[str, str]]: The list of messages in the conversation.
236
- """
237
- # Make the API call to Gemini
238
35
 
239
- # print(messages)
240
- response = get_gemini_response(
241
- messages[-1]["content"], model, messages=messages[1:], npc=npc
242
- )
243
- # print(response)
244
- return response.get("messages", [])
245
36
 
246
-
247
- def get_deepseek_conversation(
248
- messages: List[Dict[str, str]],
249
- model: str,
250
- npc: Any = None,
251
- tools: list = None,
252
- api_key: str = None,
253
- ) -> List[Dict[str, str]]:
37
+ Examples:
38
+ >>> messages = [ {"role": "user", "content": "Hello, how are you?"}]
39
+ >>> model = 'openai/gpt-4o-mini'
40
+ >>> response = get_litellm_conversation(messages, model)
254
41
  """
255
- Function Description:
256
- This function generates a conversation using the DeepSeek API.
257
- Args:
258
- messages (List[Dict[str, str]]): The list of messages in the conversation.
259
- model (str): The model to use for the conversation.
260
- Keyword Args:
261
- npc (Any): The NPC object.
262
- Returns:
263
- List[Dict[str, str]]: The list of messages in the conversation.
264
- """
265
-
266
42
  system_message = get_system_message(npc) if npc else "You are a helpful assistant."
43
+ if messages is None:
44
+ messages = []
267
45
 
268
- # Prepare the messages list
269
- if messages is None or len(messages) == 0:
270
- messages = [{"role": "system", "content": system_message}]
271
- elif not any(msg["role"] == "system" for msg in messages):
46
+ if not any(msg["role"] == "system" for msg in messages):
272
47
  messages.insert(0, {"role": "system", "content": system_message})
273
48
 
274
- # Make the API call to DeepSeek
275
- try:
276
- response = get_deepseek_response(
277
- messages[-1]["content"], model, messages=messages, npc=npc
278
- )
279
- messages.append(
280
- {"role": "assistant", "content": response.get("response", "No response")}
281
- )
49
+ resp = completion(model=f"{provider}/{model}", messages=messages)
282
50
 
283
- except Exception as e:
284
- messages.append(
285
- {
286
- "role": "assistant",
287
- "content": f"Error interacting with DeepSeek: {str(e)}",
288
- }
289
- )
51
+ response_message = resp.choices[0].message
52
+ messages.append({"role": "assistant", "content": response_message.content})
290
53
 
291
54
  return messages
npcsh/dataframes.py CHANGED
@@ -8,11 +8,19 @@ import io
8
8
  from PIL import Image
9
9
  from typing import Optional
10
10
 
11
- from .llm_funcs import get_llm_response
12
- from .audio import process_audio
13
- from .video import process_video
14
-
15
- from .load_data import load_pdf, load_csv, load_json, load_excel, load_txt, load_image
11
+ from npcsh.llm_funcs import get_llm_response
12
+
13
+ # from npcsh.audio import process_audio
14
+ # from npcsh.video import process_video
15
+
16
+ from npcsh.load_data import (
17
+ load_pdf,
18
+ load_csv,
19
+ load_json,
20
+ load_excel,
21
+ load_txt,
22
+ load_image,
23
+ )
16
24
 
17
25
 
18
26
  def load_data_into_table(
npcsh/helpers.py CHANGED
@@ -106,6 +106,11 @@ def ensure_npcshrc_exists() -> str:
106
106
  )
107
107
 
108
108
  npcshrc.write("export NPCSH_IMAGE_GEN_PROVIDER='diffusers'\n")
109
+ npcshrc.write(
110
+ "export NPCSH_VIDEO_GEN_MODEL='runwayml/stable-diffusion-v1-5'\n"
111
+ )
112
+
113
+ npcshrc.write("export NPCSH_VIDEO_GEN_PROVIDER='diffusers'\n")
109
114
 
110
115
  npcshrc.write("export NPCSH_API_URL=''\n")
111
116
  npcshrc.write("export NPCSH_DB_PATH='~/npcsh_history.db'\n")
npcsh/image.py CHANGED
@@ -6,8 +6,8 @@ import subprocess
6
6
  from typing import Dict, Any
7
7
  from PIL import ImageGrab # Import ImageGrab from Pillow
8
8
 
9
- from .npc_sysenv import NPCSH_VISION_MODEL, NPCSH_VISION_PROVIDER, NPCSH_API_URL
10
- from .llm_funcs import get_llm_response, get_stream
9
+ from npcsh.npc_sysenv import NPCSH_VISION_MODEL, NPCSH_VISION_PROVIDER, NPCSH_API_URL
10
+ from npcsh.llm_funcs import get_llm_response, get_stream
11
11
  import os
12
12
 
13
13
 
@@ -283,8 +283,6 @@ def analyze_image(
283
283
  api_key=api_key,
284
284
  )
285
285
 
286
- print(response)
287
- # Add to command history *inside* the try block
288
286
  return response
289
287
 
290
288
  except Exception as e:
npcsh/image_gen.py CHANGED
@@ -9,47 +9,15 @@
9
9
 
10
10
  import os
11
11
 
12
- from openai import OpenAI
13
12
 
13
+ from litellm import image_generation
14
+ from npcsh.npc_sysenv import (
15
+ NPCSH_IMAGE_GEN_MODEL,
16
+ NPCSH_IMAGE_GEN_PROVIDER,
17
+ )
14
18
 
15
- def generate_image_openai(
16
- prompt: str,
17
- model: str,
18
- api_key: str = None,
19
- size: str = None,
20
- npc=None,
21
- ) -> str:
22
- """
23
- Function Description:
24
- This function generates an image using the OpenAI API.
25
- Args:
26
- prompt (str): The prompt for generating the image.
27
- model (str): The model to use for generating the image.
28
- api_key (str): The API key for accessing the OpenAI API.
29
- Keyword Args:
30
- None
31
- Returns:
32
- str: The URL of the generated image.
33
- """
34
- if api_key is None:
35
- api_key = os.environ.get("OPENAI_API_KEY")
36
- if model is None:
37
- model = "dall-e-2"
38
- client = OpenAI(api_key=api_key)
39
- if size is None:
40
- size = "1024x1024"
41
- if model not in ["dall-e-3", "dall-e-2"]:
42
- # raise ValueError(f"Invalid model: {model}")
43
- print(f"Invalid model: {model}")
44
- print("Switching to dall-e-3")
45
- model = "dall-e-3"
46
- image = client.images.generate(model=model, prompt=prompt, n=1, size=size)
47
- if image is not None:
48
- # print(image)
49
- return image
50
19
 
51
-
52
- def generate_image_hf_diffusion(
20
+ def generate_image_diffusers(
53
21
  prompt: str,
54
22
  model: str = "runwayml/stable-diffusion-v1-5",
55
23
  device: str = "cpu",
@@ -77,3 +45,35 @@ def generate_image_hf_diffusion(
77
45
  image.show()
78
46
 
79
47
  return image
48
+
49
+
50
+ def generate_image_litellm(
51
+ prompt: str,
52
+ model: str = NPCSH_IMAGE_GEN_MODEL,
53
+ provider: str = NPCSH_IMAGE_GEN_PROVIDER,
54
+ api_key: str = None,
55
+ size: str = None,
56
+ npc=None,
57
+ ) -> str:
58
+ """
59
+ Function Description:
60
+ This function generates an image using the OpenAI API.
61
+ Args:
62
+ prompt (str): The prompt for generating the image.
63
+ model (str): The model to use for generating the image.
64
+ api_key (str): The API key for accessing the OpenAI API.
65
+ Keyword Args:
66
+ None
67
+ Returns:
68
+ str: The URL of the generated image.
69
+ """
70
+ if model is None:
71
+ model = "runwayml/stable-diffusion-v1-5"
72
+ if size is None:
73
+ size = "1024x1024"
74
+ if provider == "diffusers":
75
+ return generate_image_diffusers(prompt, model)
76
+ else:
77
+ return image_generation(
78
+ prompt=prompt, model=f"{provider}/{model}", n=2, size="240x240"
79
+ )
npcsh/knowledge_graph.py CHANGED
@@ -11,8 +11,8 @@ except ModuleNotFoundError:
11
11
  from typing import Optional, Dict, List, Union, Tuple
12
12
 
13
13
 
14
- from .llm_funcs import get_llm_response, get_embeddings
15
- from .npc_compiler import NPC
14
+ from npcsh.llm_funcs import get_llm_response, get_embeddings
15
+ from npcsh.npc_compiler import NPC
16
16
  import sqlite3
17
17
 
18
18
 
@@ -738,7 +738,7 @@ def process_text_with_chroma(
738
738
  print(f"\nProcessing batch {i//batch_size + 1} ({len(batch)} facts)")
739
739
 
740
740
  # Generate embeddings for the batch using npcsh.llm_funcs.get_embeddings
741
- from .llm_funcs import get_embeddings
741
+ from npcsh.llm_funcs import get_embeddings
742
742
 
743
743
  batch_embeddings = get_embeddings(
744
744
  batch,
@@ -812,7 +812,7 @@ def hybrid_search_with_chroma(
812
812
  List of dictionaries with combined results
813
813
  """
814
814
  # Get embedding for query using npcsh.llm_funcs.get_embeddings
815
- from .llm_funcs import get_embeddings
815
+ from npcsh.llm_funcs import get_embeddings
816
816
 
817
817
  query_embedding = get_embeddings([query])[0]
818
818