npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. npcsh/_state.py +942 -0
  2. npcsh/alicanto.py +1074 -0
  3. npcsh/guac.py +785 -0
  4. npcsh/mcp_helpers.py +357 -0
  5. npcsh/mcp_npcsh.py +822 -0
  6. npcsh/mcp_server.py +184 -0
  7. npcsh/npc.py +218 -0
  8. npcsh/npcsh.py +1161 -0
  9. npcsh/plonk.py +387 -269
  10. npcsh/pti.py +234 -0
  11. npcsh/routes.py +958 -0
  12. npcsh/spool.py +315 -0
  13. npcsh/wander.py +550 -0
  14. npcsh/yap.py +573 -0
  15. npcsh-1.0.0.dist-info/METADATA +596 -0
  16. npcsh-1.0.0.dist-info/RECORD +21 -0
  17. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
  18. npcsh-1.0.0.dist-info/entry_points.txt +9 -0
  19. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
  20. npcsh/audio.py +0 -210
  21. npcsh/cli.py +0 -545
  22. npcsh/command_history.py +0 -566
  23. npcsh/conversation.py +0 -291
  24. npcsh/data_models.py +0 -46
  25. npcsh/dataframes.py +0 -163
  26. npcsh/embeddings.py +0 -168
  27. npcsh/helpers.py +0 -641
  28. npcsh/image.py +0 -298
  29. npcsh/image_gen.py +0 -79
  30. npcsh/knowledge_graph.py +0 -1006
  31. npcsh/llm_funcs.py +0 -2027
  32. npcsh/load_data.py +0 -83
  33. npcsh/main.py +0 -5
  34. npcsh/model_runner.py +0 -189
  35. npcsh/npc_compiler.py +0 -2870
  36. npcsh/npc_sysenv.py +0 -383
  37. npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
  38. npcsh/npc_team/corca.npc +0 -13
  39. npcsh/npc_team/foreman.npc +0 -7
  40. npcsh/npc_team/npcsh.ctx +0 -11
  41. npcsh/npc_team/sibiji.npc +0 -4
  42. npcsh/npc_team/templates/analytics/celona.npc +0 -0
  43. npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  44. npcsh/npc_team/templates/humanities/eriane.npc +0 -4
  45. npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  46. npcsh/npc_team/templates/marketing/slean.npc +0 -4
  47. npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  48. npcsh/npc_team/templates/sales/turnic.npc +0 -4
  49. npcsh/npc_team/templates/software/welxor.npc +0 -0
  50. npcsh/npc_team/tools/bash_executer.tool +0 -32
  51. npcsh/npc_team/tools/calculator.tool +0 -8
  52. npcsh/npc_team/tools/code_executor.tool +0 -16
  53. npcsh/npc_team/tools/generic_search.tool +0 -27
  54. npcsh/npc_team/tools/image_generation.tool +0 -25
  55. npcsh/npc_team/tools/local_search.tool +0 -149
  56. npcsh/npc_team/tools/npcsh_executor.tool +0 -9
  57. npcsh/npc_team/tools/screen_cap.tool +0 -27
  58. npcsh/npc_team/tools/sql_executor.tool +0 -26
  59. npcsh/response.py +0 -623
  60. npcsh/search.py +0 -248
  61. npcsh/serve.py +0 -1460
  62. npcsh/shell.py +0 -538
  63. npcsh/shell_helpers.py +0 -3529
  64. npcsh/stream.py +0 -700
  65. npcsh/video.py +0 -49
  66. npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
  67. npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
  68. npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
  69. npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
  70. npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
  71. npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
  72. npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
  73. npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
  74. npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
  75. npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
  76. npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
  77. npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
  78. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
  79. npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
  80. npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
  81. npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
  82. npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
  83. npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
  84. npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
  85. npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
  86. npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
  87. npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
  88. npcsh-0.3.31.dist-info/METADATA +0 -1853
  89. npcsh-0.3.31.dist-info/RECORD +0 -76
  90. npcsh-0.3.31.dist-info/entry_points.txt +0 -3
  91. {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/response.py DELETED
@@ -1,623 +0,0 @@
1
- from typing import Any, Dict, Generator, List, Union
2
- from pydantic import BaseModel
3
- import os
4
- import anthropic
5
- from openai import OpenAI
6
- from google.generativeai import types
7
- from google import genai
8
-
9
- import google.generativeai as genai
10
- from .npc_sysenv import (
11
- get_system_message,
12
- compress_image,
13
- available_chat_models,
14
- available_reasoning_models,
15
- )
16
-
17
- import json
18
- import requests
19
- import base64
20
- from PIL import Image
21
-
22
-
23
- def get_deepseek_response(
24
- prompt: str,
25
- model: str,
26
- images: List[Dict[str, str]] = None,
27
- npc: Any = None,
28
- tools: list = None,
29
- format: Union[str, BaseModel] = None,
30
- messages: List[Dict[str, str]] = None,
31
- api_key: str = None,
32
- **kwargs,
33
- ) -> Dict[str, Any]:
34
- """
35
- Function Description:
36
- This function generates a response using the DeepSeek API.
37
- Args:
38
- prompt (str): The prompt for generating the response.
39
- model (str): The model to use for generating the response.
40
- Keyword Args:
41
- images (List[Dict[str, str]]): The list of images.
42
- npc (Any): The NPC object.
43
- format (str): The format of the response.
44
- messages (List[Dict[str, str]]): The list of messages.
45
- Returns:
46
- Any: The response generated by the DeepSeek API.
47
-
48
-
49
- """
50
- if api_key is None:
51
- api_key = os.getenv("DEEPSEEK_API_KEY", None)
52
- client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
53
-
54
- # print(client)
55
-
56
- system_message = get_system_message(npc) if npc else "You are a helpful assistant."
57
- if messages is None or len(messages) == 0:
58
- messages = [
59
- {"role": "system", "content": system_message},
60
- {"role": "user", "content": [{"type": "text", "text": prompt}]},
61
- ]
62
- if images:
63
- for image in images:
64
- # print(f"Image file exists: {os.path.exists(image['file_path'])}")
65
-
66
- with open(image["file_path"], "rb") as image_file:
67
- image_data = base64.b64encode(compress_image(image_file.read())).decode(
68
- "utf-8"
69
- )
70
- messages[-1]["content"].append(
71
- {
72
- "type": "image_url",
73
- "image_url": {
74
- "url": f"data:image/jpeg;base64,{image_data}",
75
- },
76
- }
77
- )
78
- # print(messages)
79
- # print(model)
80
- response_format = None if format == "json" else format
81
- if response_format is None:
82
- completion = client.chat.completions.create(model=model, messages=messages)
83
- llm_response = completion.choices[0].message.content
84
- items_to_return = {"response": llm_response}
85
-
86
- items_to_return["messages"] = messages
87
- # print(llm_response, model)
88
- if format == "json":
89
- try:
90
- items_to_return["response"] = json.loads(llm_response)
91
-
92
- return items_to_return
93
- except json.JSONDecodeError:
94
- print(f"Warning: Expected JSON response, but received: {llm_response}")
95
- return {"error": "Invalid JSON response"}
96
- else:
97
- items_to_return["messages"].append(
98
- {"role": "assistant", "content": llm_response}
99
- )
100
- return items_to_return
101
-
102
- else:
103
- if model in available_reasoning_models:
104
- raise NotImplementedError("Reasoning models do not support JSON output.")
105
- try:
106
- completion = client.beta.chat.completions.parse(
107
- model=model, messages=messages, response_format=response_format
108
- )
109
- items_to_return = {"response": completion.choices[0].message.parsed.dict()}
110
- items_to_return["messages"] = messages
111
-
112
- items_to_return["messages"].append(
113
- {"role": "assistant", "content": completion.choices[0].message.parsed}
114
- )
115
- return items_to_return
116
- except Exception as e:
117
- print("pydantic outputs not yet implemented with deepseek?")
118
-
119
-
120
- def get_ollama_response(
121
- prompt: str,
122
- model: str,
123
- images: List[Dict[str, str]] = None,
124
- npc: Any = None,
125
- tools: list = None,
126
- format: Union[str, BaseModel] = None,
127
- messages: List[Dict[str, str]] = None,
128
- **kwargs,
129
- ) -> Dict[str, Any]:
130
- """
131
- Generates a response using the Ollama API.
132
-
133
- Args:
134
- prompt (str): Prompt for generating the response.
135
- model (str): Model to use for generating the response.
136
- images (List[Dict[str, str]], optional): List of image data. Defaults to None.
137
- npc (Any, optional): Optional NPC object. Defaults to None.
138
- format (Union[str, BaseModel], optional): Response format or schema. Defaults to None.
139
- messages (List[Dict[str, str]], optional): Existing messages to append responses. Defaults to None.
140
-
141
- Returns:
142
- Dict[str, Any]: The response, optionally including updated messages.
143
- """
144
- import ollama
145
-
146
- # try:
147
- # Prepare the message payload
148
- system_message = get_system_message(npc) if npc else "You are a helpful assistant."
149
- if messages is None or len(messages) == 0:
150
- messages = [
151
- {"role": "system", "content": system_message},
152
- {"role": "user", "content": prompt},
153
- ]
154
-
155
- if images:
156
- messages[-1]["images"] = [image["file_path"] for image in images]
157
-
158
- # Prepare format
159
- if isinstance(format, type):
160
- schema = format.model_json_schema()
161
- res = ollama.chat(model=model, messages=messages, format=schema)
162
-
163
- elif isinstance(format, str):
164
- if format == "json":
165
- res = ollama.chat(model=model, messages=messages, format=format)
166
- else:
167
- res = ollama.chat(model=model, messages=messages)
168
- else:
169
- res = ollama.chat(model=model, messages=messages)
170
- response_content = res.get("message", {}).get("content")
171
-
172
- # Prepare the return dictionary
173
- result = {"response": response_content}
174
-
175
- # Append response to messages if provided
176
- if messages is not None:
177
- messages.append({"role": "assistant", "content": response_content})
178
- result["messages"] = messages
179
-
180
- # Handle JSON format if specified
181
- if format == "json":
182
- if model in available_reasoning_models:
183
- raise NotImplementedError("Reasoning models do not support JSON output.")
184
- try:
185
- if isinstance(response_content, str):
186
- if response_content.startswith("```json"):
187
- response_content = (
188
- response_content.replace("```json", "")
189
- .replace("```", "")
190
- .strip()
191
- )
192
- response_content = json.loads(response_content)
193
- # print(response_content, type(response_content))
194
- result["response"] = response_content
195
- except json.JSONDecodeError:
196
- return {"error": f"Invalid JSON response: {response_content}"}
197
-
198
- return result
199
-
200
- # except Exception as e:
201
- # return {"error": f"Exception occurred: {e}"}
202
-
203
-
204
- def get_openai_response(
205
- prompt: str,
206
- model: str,
207
- images: List[Dict[str, str]] = None,
208
- npc: Any = None,
209
- tools: list = None,
210
- format: Union[str, BaseModel] = None,
211
- api_key: str = None,
212
- messages: List[Dict[str, str]] = None,
213
- **kwargs,
214
- ):
215
- """
216
- Function Description:
217
- This function generates a response using the OpenAI API.
218
- Args:
219
- prompt (str): The prompt for generating the response.
220
- model (str): The model to use for generating the response.
221
- Keyword Args:
222
- images (List[Dict[str, str]]): The list of images.
223
- npc (Any): The NPC object.
224
- format (str): The format of the response.
225
- api_key (str): The API key for accessing the OpenAI API.
226
- messages (List[Dict[str, str]]): The list of messages.
227
- Returns:
228
- Any: The response generated by the OpenAI API.
229
- """
230
-
231
- # try:
232
- if api_key is None:
233
- api_key = os.environ.get("OPENAI_API_KEY", "")
234
- if len(api_key) == 0:
235
- raise ValueError("API key not found.")
236
- client = OpenAI(api_key=api_key)
237
- # print(npc)
238
-
239
- system_message = get_system_message(npc) if npc else "You are a helpful assistant."
240
- if messages is None or len(messages) == 0:
241
- messages = [
242
- {"role": "system", "content": system_message},
243
- {"role": "user", "content": [{"type": "text", "text": prompt}]},
244
- ]
245
- if images:
246
- for image in images:
247
- # print(f"Image file exists: {os.path.exists(image['file_path'])}")
248
-
249
- with open(image["file_path"], "rb") as image_file:
250
- image_data = base64.b64encode(compress_image(image_file.read())).decode(
251
- "utf-8"
252
- )
253
- messages[-1]["content"].append(
254
- {
255
- "type": "image_url",
256
- "image_url": {
257
- "url": f"data:image/jpeg;base64,{image_data}",
258
- },
259
- }
260
- )
261
- # print(model)
262
- response_format = None if format == "json" else format
263
- if response_format is None:
264
- completion = client.chat.completions.create(model=model, messages=messages)
265
- llm_response = completion.choices[0].message.content
266
- items_to_return = {"response": llm_response}
267
-
268
- items_to_return["messages"] = messages
269
- # print(llm_response, model)
270
- if format == "json":
271
- if model in available_reasoning_models:
272
- raise NotImplementedError(
273
- "Reasoning models do not support JSON output."
274
- )
275
- try:
276
- if isinstance(llm_response, str):
277
- if llm_response.startswith("```json"):
278
- llm_response = (
279
- llm_response.replace("```json", "")
280
- .replace("```", "")
281
- .strip()
282
- )
283
- llm_response = json.loads(llm_response)
284
- items_to_return["response"] = llm_response
285
- return items_to_return
286
-
287
- except json.JSONDecodeError:
288
- print(f"Warning: Expected JSON response, but received: {llm_response}")
289
- return {"error": "Invalid JSON response"}
290
- else:
291
- items_to_return["messages"].append(
292
- {"role": "assistant", "content": llm_response}
293
- )
294
- return items_to_return
295
-
296
- else:
297
- completion = client.beta.chat.completions.parse(
298
- model=model, messages=messages, response_format=response_format
299
- )
300
- items_to_return = {"response": completion.choices[0].message.parsed.dict()}
301
- items_to_return["messages"] = messages
302
-
303
- items_to_return["messages"].append(
304
- {"role": "assistant", "content": completion.choices[0].message.parsed}
305
- )
306
- return items_to_return
307
- # except Exception as e:
308
- # print("openai api key", api_key)
309
- # print(f"Error interacting with OpenAI: {e}")
310
- # return f"Error interacting with OpenAI: {e}"
311
-
312
-
313
- def get_anthropic_response(
314
- prompt: str,
315
- model: str,
316
- images: List[Dict[str, str]] = None,
317
- npc: Any = None,
318
- tools: list = None,
319
- format: str = None,
320
- api_key: str = None,
321
- messages: List[Dict[str, str]] = None,
322
- **kwargs,
323
- ):
324
- """
325
- Function Description:
326
- This function generates a response using the Anthropic API.
327
- Args:
328
- prompt (str): The prompt for generating the response.
329
- model (str): The model to use for generating the response.
330
- Keyword Args:
331
- images (List[Dict[str, str]]): The list of images.
332
- npc (Any): The NPC object.
333
- format (str): The format of the response.
334
- api_key (str): The API key for accessing the Anthropic API.
335
- messages (List[Dict[str, str]]): The list of messages.
336
- Returns:
337
- Any: The response generated by the Anthropic API.
338
- """
339
-
340
- try:
341
- if api_key is None:
342
- api_key = os.environ.get("ANTHROPIC_API_KEY")
343
- client = anthropic.Anthropic(api_key=api_key)
344
-
345
- if messages[0]["role"] == "system":
346
- system_message = messages[0]
347
- messages = messages[1:]
348
- elif npc is not None:
349
- system_message = get_system_message(npc)
350
-
351
- # Preprocess messages to ensure content is a list of dicts
352
- for message in messages:
353
- if isinstance(message["content"], str):
354
- message["content"] = [{"type": "text", "text": message["content"]}]
355
- # Add images if provided
356
- if images:
357
- for img in images:
358
- with open(img["file_path"], "rb") as image_file:
359
- img["data"] = base64.b64encode(image_file.read()).decode("utf-8")
360
- img["media_type"] = "image/jpeg"
361
- messages[-1]["content"].append(
362
- {
363
- "type": "image",
364
- "source": {
365
- "type": "base64",
366
- "media_type": img["media_type"],
367
- "data": img["data"],
368
- },
369
- }
370
- )
371
-
372
- # Prepare API call parameters
373
-
374
- api_params = {
375
- "model": model,
376
- "messages": messages,
377
- "max_tokens": kwargs.get("max_tokens", 8192),
378
- "stream": False,
379
- "system": system_message,
380
- }
381
-
382
- # Add tools if provided
383
- if tools:
384
- api_params["tools"] = tools
385
-
386
- # Add tool choice if specified
387
- if tool_choice:
388
- api_params["tool_choice"] = tool_choice
389
-
390
- # Make the API call
391
- response = client.messages.create(**api_params)
392
-
393
- llm_response = message.content[0].text
394
- items_to_return = {"response": llm_response}
395
- messages.append(
396
- {"role": "assistant", "content": {"type": "text", "text": llm_response}}
397
- )
398
- items_to_return["messages"] = messages
399
-
400
- # Handle JSON format if requested
401
- if format == "json":
402
- try:
403
- if isinstance(llm_response, str):
404
- if llm_response.startswith("```json"):
405
- llm_response = (
406
- llm_response.replace("```json", "")
407
- .replace("```", "")
408
- .strip()
409
- )
410
- llm_response = json.loads(llm_response)
411
- items_to_return["response"] = llm_response
412
- return items_to_return
413
- except json.JSONDecodeError:
414
- print(f"Warning: Expected JSON response, but received: {llm_response}")
415
- return {"response": llm_response, "error": "Invalid JSON response"}
416
- else:
417
- # only append to messages if the response is not json
418
- messages.append({"role": "assistant", "content": llm_response})
419
- # print("teststea")
420
- return items_to_return
421
-
422
- except Exception as e:
423
- return f"Error interacting with Anthropic llm response: {e}"
424
-
425
-
426
- def get_openai_like_response(
427
- prompt: str,
428
- model: str,
429
- api_url: str,
430
- api_key: str = None,
431
- npc: Any = None,
432
- tools: list = None,
433
- images: list = None,
434
- messages: list = None,
435
- format=None,
436
- **kwargs,
437
- ) -> Dict[str, Any]:
438
- """
439
- Function Description:
440
- This function generates a response using API.
441
- penai-like
442
- Args:
443
- prompt (str): The prompt for generating the response.
444
- model (str): The model to use for generating the response.
445
- Keyword Args:
446
- images (List[Dict[str, str]]): The list of images.
447
- npc (Any): The NPC object.
448
- format (str): The format of the response.
449
- messages (List[Dict[str, str]]): The list of messages.
450
- Returns:
451
- Any: The response generated by the DeepSeek API.
452
-
453
-
454
- """
455
- if api_key is None:
456
- api_key = "dummy_api_key"
457
- client = OpenAI(api_key=api_key, base_url=api_url)
458
- system_message = get_system_message(npc) if npc else "You are a helpful assistant."
459
- if messages is None or len(messages) == 0:
460
- messages = [
461
- {"role": "system", "content": system_message},
462
- {"role": "user", "content": [{"type": "text", "text": prompt}]},
463
- ]
464
- if images:
465
- for image in images:
466
- # print(f"Image file exists: {os.path.exists(image['file_path'])}")
467
-
468
- with open(image["file_path"], "rb") as image_file:
469
- image_data = base64.b64encode(compress_image(image_file.read())).decode(
470
- "utf-8"
471
- )
472
- messages[-1]["content"].append(
473
- {
474
- "type": "image_url",
475
- "image_url": {
476
- "url": f"data:image/jpeg;base64,{image_data}",
477
- },
478
- }
479
- )
480
-
481
- response_format = None if format == "json" else format
482
- if response_format is None:
483
- completion = client.chat.completions.create(model=model, messages=messages)
484
- llm_response = completion.choices[0].message.content
485
- items_to_return = {"response": llm_response}
486
-
487
- items_to_return["messages"] = messages
488
- # print(llm_response, model)
489
- if format == "json":
490
- if model in available_reasoning_models:
491
- raise NotImplementedError(
492
- "Reasoning models do not support JSON output."
493
- )
494
- try:
495
- if isinstance(llm_response, str):
496
- if llm_response.startswith("```json"):
497
- llm_response = (
498
- llm_response.replace("```json", "")
499
- .replace("```", "")
500
- .strip()
501
- )
502
- # print(llm_response)
503
- items_to_return["response"] = json.loads(llm_response)
504
- return items_to_return
505
- except json.JSONDecodeError:
506
- print(f"Warning: Expected JSON response, but received: {llm_response}")
507
- return {"error": "Invalid JSON response"}
508
- else:
509
- items_to_return["messages"].append(
510
- {"role": "assistant", "content": llm_response}
511
- )
512
- return items_to_return
513
-
514
- else:
515
- if model in available_reasoning_models:
516
- raise NotImplementedError("Reasoning models do not support JSON output.")
517
-
518
- completion = client.beta.chat.completions.parse(
519
- model=model, messages=messages, response_format=response_format
520
- )
521
-
522
- items_to_return = {"response": completion.choices[0].message.parsed.dict()}
523
- items_to_return["messages"] = messages
524
-
525
- items_to_return["messages"].append(
526
- {"role": "assistant", "content": completion.choices[0].message.parsed}
527
- )
528
- return items_to_return
529
-
530
-
531
- def get_gemini_response(
532
- prompt: str,
533
- model: str,
534
- images: List[Dict[str, str]] = None,
535
- npc: Any = None,
536
- tools: list = None,
537
- format: Union[str, BaseModel] = None,
538
- messages: List[Dict[str, str]] = None,
539
- api_key: str = None,
540
- **kwargs,
541
- ) -> Dict[str, Any]:
542
- """
543
- Generates a response using the Gemini API.
544
- """
545
- # Configure the Gemini API
546
- if api_key is None:
547
- genai.configure(api_key=gemini_api_key)
548
-
549
- # Prepare the system message
550
- system_message = get_system_message(npc) if npc else "You are a helpful assistant."
551
- model = genai.GenerativeModel(model, system_instruction=system_message)
552
-
553
- # Extract just the content to send to the model
554
- if messages is None or len(messages) == 0:
555
- content_to_send = prompt
556
- else:
557
- # Get the latest message's content
558
- latest_message = messages[-1]
559
- content_to_send = (
560
- latest_message["parts"][0]
561
- if "parts" in latest_message
562
- else latest_message.get("content", prompt)
563
- )
564
- history = []
565
- if messages:
566
- for msg in messages:
567
- if "content" in msg:
568
- # Convert content to parts format
569
- history.append({"role": msg["role"], "parts": [msg["content"]]})
570
- else:
571
- # Already in parts format
572
- history.append(msg)
573
- # If no history, create a new message list
574
- if not history:
575
- history = [{"role": "user", "parts": [prompt]}]
576
- elif isinstance(prompt, str): # Add new prompt to existing history
577
- history.append({"role": "user", "parts": [prompt]})
578
-
579
- # Handle images if provided
580
- # Handle images by adding them to the last message's parts
581
- if images:
582
- for image in images:
583
- with open(image["file_path"], "rb") as image_file:
584
- img = Image.open(image_file)
585
- history[-1]["parts"].append(img)
586
- # Generate the response
587
- # try:
588
- # Send the entire conversation history to maintain context
589
- response = model.generate_content(history)
590
- llm_response = response.text
591
-
592
- # Filter out empty parts
593
- if isinstance(llm_response, list):
594
- llm_response = " ".join([part for part in llm_response if part.strip()])
595
- elif not llm_response.strip():
596
- llm_response = ""
597
-
598
- # Prepare the return dictionary
599
- items_to_return = {"response": llm_response, "messages": history}
600
- # print(llm_response, type(llm_response))
601
-
602
- # Handle JSON format if specified
603
- if format == "json":
604
- if isinstance(llm_response, str):
605
- if llm_response.startswith("```json"):
606
- llm_response = (
607
- llm_response.replace("```json", "").replace("```", "").strip()
608
- )
609
-
610
- try:
611
- items_to_return["response"] = json.loads(llm_response)
612
- except json.JSONDecodeError:
613
- print(f"Warning: Expected JSON response, but received: {llm_response}")
614
- return {"error": "Invalid JSON response"}
615
- else:
616
- # Append the model's response to the messages
617
- history.append({"role": "model", "parts": [llm_response]})
618
- items_to_return["messages"] = history
619
-
620
- return items_to_return
621
-
622
- # except Exception as e:
623
- # return {"error": f"Error generating response: {str(e)}"}