npcsh 0.3.32__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. npcsh/_state.py +942 -0
  2. npcsh/alicanto.py +1074 -0
  3. npcsh/guac.py +785 -0
  4. npcsh/mcp_helpers.py +357 -0
  5. npcsh/mcp_npcsh.py +822 -0
  6. npcsh/mcp_server.py +184 -0
  7. npcsh/npc.py +218 -0
  8. npcsh/npcsh.py +1161 -0
  9. npcsh/plonk.py +387 -269
  10. npcsh/pti.py +234 -0
  11. npcsh/routes.py +958 -0
  12. npcsh/spool.py +315 -0
  13. npcsh/wander.py +550 -0
  14. npcsh/yap.py +573 -0
  15. npcsh-1.0.1.dist-info/METADATA +596 -0
  16. npcsh-1.0.1.dist-info/RECORD +21 -0
  17. {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/WHEEL +1 -1
  18. npcsh-1.0.1.dist-info/entry_points.txt +9 -0
  19. {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/licenses/LICENSE +1 -1
  20. npcsh/audio.py +0 -569
  21. npcsh/audio_gen.py +0 -1
  22. npcsh/cli.py +0 -543
  23. npcsh/command_history.py +0 -566
  24. npcsh/conversation.py +0 -54
  25. npcsh/data_models.py +0 -46
  26. npcsh/dataframes.py +0 -171
  27. npcsh/embeddings.py +0 -168
  28. npcsh/helpers.py +0 -646
  29. npcsh/image.py +0 -298
  30. npcsh/image_gen.py +0 -79
  31. npcsh/knowledge_graph.py +0 -1006
  32. npcsh/llm_funcs.py +0 -2195
  33. npcsh/load_data.py +0 -83
  34. npcsh/main.py +0 -5
  35. npcsh/model_runner.py +0 -189
  36. npcsh/npc_compiler.py +0 -2879
  37. npcsh/npc_sysenv.py +0 -388
  38. npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
  39. npcsh/npc_team/corca.npc +0 -13
  40. npcsh/npc_team/foreman.npc +0 -7
  41. npcsh/npc_team/npcsh.ctx +0 -11
  42. npcsh/npc_team/sibiji.npc +0 -4
  43. npcsh/npc_team/templates/analytics/celona.npc +0 -0
  44. npcsh/npc_team/templates/hr_support/raone.npc +0 -0
  45. npcsh/npc_team/templates/humanities/eriane.npc +0 -4
  46. npcsh/npc_team/templates/it_support/lineru.npc +0 -0
  47. npcsh/npc_team/templates/marketing/slean.npc +0 -4
  48. npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
  49. npcsh/npc_team/templates/sales/turnic.npc +0 -4
  50. npcsh/npc_team/templates/software/welxor.npc +0 -0
  51. npcsh/npc_team/tools/bash_executer.tool +0 -32
  52. npcsh/npc_team/tools/calculator.tool +0 -8
  53. npcsh/npc_team/tools/code_executor.tool +0 -16
  54. npcsh/npc_team/tools/generic_search.tool +0 -27
  55. npcsh/npc_team/tools/image_generation.tool +0 -25
  56. npcsh/npc_team/tools/local_search.tool +0 -149
  57. npcsh/npc_team/tools/npcsh_executor.tool +0 -9
  58. npcsh/npc_team/tools/screen_cap.tool +0 -27
  59. npcsh/npc_team/tools/sql_executor.tool +0 -26
  60. npcsh/response.py +0 -272
  61. npcsh/search.py +0 -252
  62. npcsh/serve.py +0 -1467
  63. npcsh/shell.py +0 -524
  64. npcsh/shell_helpers.py +0 -3919
  65. npcsh/stream.py +0 -233
  66. npcsh/video.py +0 -52
  67. npcsh/video_gen.py +0 -69
  68. npcsh-0.3.32.data/data/npcsh/npc_team/bash_executer.tool +0 -32
  69. npcsh-0.3.32.data/data/npcsh/npc_team/calculator.tool +0 -8
  70. npcsh-0.3.32.data/data/npcsh/npc_team/celona.npc +0 -0
  71. npcsh-0.3.32.data/data/npcsh/npc_team/code_executor.tool +0 -16
  72. npcsh-0.3.32.data/data/npcsh/npc_team/corca.npc +0 -13
  73. npcsh-0.3.32.data/data/npcsh/npc_team/eriane.npc +0 -4
  74. npcsh-0.3.32.data/data/npcsh/npc_team/foreman.npc +0 -7
  75. npcsh-0.3.32.data/data/npcsh/npc_team/generic_search.tool +0 -27
  76. npcsh-0.3.32.data/data/npcsh/npc_team/image_generation.tool +0 -25
  77. npcsh-0.3.32.data/data/npcsh/npc_team/lineru.npc +0 -0
  78. npcsh-0.3.32.data/data/npcsh/npc_team/local_search.tool +0 -149
  79. npcsh-0.3.32.data/data/npcsh/npc_team/maurawa.npc +0 -0
  80. npcsh-0.3.32.data/data/npcsh/npc_team/npcsh.ctx +0 -11
  81. npcsh-0.3.32.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
  82. npcsh-0.3.32.data/data/npcsh/npc_team/raone.npc +0 -0
  83. npcsh-0.3.32.data/data/npcsh/npc_team/screen_cap.tool +0 -27
  84. npcsh-0.3.32.data/data/npcsh/npc_team/sibiji.npc +0 -4
  85. npcsh-0.3.32.data/data/npcsh/npc_team/slean.npc +0 -4
  86. npcsh-0.3.32.data/data/npcsh/npc_team/sql_executor.tool +0 -26
  87. npcsh-0.3.32.data/data/npcsh/npc_team/test_pipeline.py +0 -181
  88. npcsh-0.3.32.data/data/npcsh/npc_team/turnic.npc +0 -4
  89. npcsh-0.3.32.data/data/npcsh/npc_team/welxor.npc +0 -0
  90. npcsh-0.3.32.dist-info/METADATA +0 -779
  91. npcsh-0.3.32.dist-info/RECORD +0 -78
  92. npcsh-0.3.32.dist-info/entry_points.txt +0 -3
  93. {npcsh-0.3.32.dist-info → npcsh-1.0.1.dist-info}/top_level.txt +0 -0
@@ -1,16 +0,0 @@
1
- tool_name: code_executor
2
- description: Execute scripts with a specified language. choose from python, bash, R, or javascript. Set the ultimate result as the "output" variable. It must be a string. Do not add unnecessary print statements.
3
- inputs:
4
- - code
5
- - language
6
- steps:
7
- - engine: '{{ language }}'
8
- code: |
9
- {{code}}
10
- - engine: natural
11
- code: |
12
- Here is the result of the code execution that an agent ran.
13
- ```
14
- {{ output }}
15
- ```
16
- please provide a response accordingly.
@@ -1,27 +0,0 @@
1
- tool_name: "internet_search"
2
- description: Searches the web for information based on a query in order to verify timiely details (e.g. current events) or to corroborate information in uncertain situations. Should be mainly only used when users specifically request a search, otherwise an LLMs basic knowledge should be sufficient.
3
- inputs:
4
- - query
5
- - provider: ''
6
- steps:
7
- - engine: "python"
8
- code: |
9
- from npcsh.search import search_web
10
- from npcsh.npc_sysenv import NPCSH_SEARCH_PROVIDER
11
- query = "{{ query }}"
12
- provider = '{{ provider }}'
13
- if provider.strip() != '':
14
- results = search_web(query, num_results=5, provider = provider)
15
- else:
16
- results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
17
-
18
- print('QUERY in tool', query)
19
- results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
20
- print('RESULTS in tool', results)
21
- - engine: "natural"
22
- code: |
23
- Using the following information extracted from the web:
24
-
25
- {{ results }}
26
-
27
- Answer the users question: {{ query }}
@@ -1,25 +0,0 @@
1
- tool_name: "image_generation_tool"
2
- description: |
3
- Generates images based on a text prompt.
4
- inputs:
5
- - prompt
6
- - model: 'runwayml/stable-diffusion-v1-5'
7
- - provider: 'diffusers'
8
-
9
- steps:
10
- - engine: "python"
11
- code: |
12
- image_prompt = '{{prompt}}'.strip()
13
-
14
- # Generate the image
15
- filename = generate_image(
16
- image_prompt,
17
- npc=npc,
18
- model='{{model}}', # You can adjust the model as needed
19
- provider='{{provider}}'
20
- )
21
- if filename:
22
- image_generated = True
23
- else:
24
- image_generated = False
25
-
@@ -1,149 +0,0 @@
1
- tool_name: local_search
2
- description: |
3
- Searches files in current and downstream directories to find items related to the user's query using fuzzy matching.
4
- Returns only relevant snippets (10 lines around matches) to avoid including too much irrelevant content.
5
- Intended for fuzzy searches, not for understanding file sizes.
6
- inputs:
7
- - query
8
- - summarize: false # Optional - set to true to summarize the results
9
- - file_filter: 'none' # Optional - can be filename patterns or folder names
10
- - depth: 2 # Optional - search depth for nested directories
11
- - fuzzy_threshold: 70 # Optional - minimum fuzzy match score (0-100)
12
- steps:
13
- - engine: python
14
- code: |
15
- # Search parameters are directly available
16
- query = "{{ query }}"
17
- file_filter = "{{ file_filter | default('None') }}"
18
- if isinstance(file_filter, str) and file_filter.lower() == 'none':
19
- file_filter = None
20
- max_depth = {{ depth | default(2) }}
21
- fuzzy_threshold = {{ fuzzy_threshold | default(70) }}
22
-
23
- import os
24
- import fnmatch
25
- from pathlib import Path
26
- from thefuzz import fuzz # Fuzzy string matching library
27
-
28
- def find_files(file_filter=None, max_depth=2):
29
- default_extensions = ['.py', '.txt', '.md',
30
- '.json', '.yml', '.yaml',
31
- '.log', '.csv', '.html',
32
- '.js', '.css']
33
- matches = []
34
- root_path = Path('.').resolve() # Resolve to absolute path
35
-
36
- # First, check files in the current directory
37
- for path in root_path.iterdir():
38
- if path.is_file():
39
- # Skip hidden files
40
- if path.name.startswith('.'):
41
- continue
42
-
43
- # If no filter specified, include files with default extensions
44
- if file_filter is None:
45
- if path.suffix in default_extensions:
46
- matches.append(str(path))
47
- else:
48
- # If filter specified, check if file matches the filter
49
- filters = [file_filter] if isinstance(file_filter, str) else file_filter
50
- for f in filters:
51
- if (fnmatch.fnmatch(path.name, f) or
52
- fnmatch.fnmatch(str(path), f'*{f}*')):
53
- matches.append(str(path))
54
- break
55
-
56
- # Then, check subdirectories with depth control
57
- for path in root_path.rglob('*'):
58
- # Skip hidden folders and common directories to ignore
59
- if '/.' in str(path) or '__pycache__' in str(path) or '.git' in str(path) or 'node_modules' in str(path) or 'venv' in str(path):
60
- continue
61
-
62
- # Skip if we've gone too deep
63
- relative_depth = len(path.relative_to(root_path).parts)
64
- if relative_depth > max_depth:
65
- continue
66
-
67
- if path.is_file():
68
- # If no filter specified, include files with default extensions
69
- if file_filter is None:
70
- if path.suffix in default_extensions:
71
- matches.append(str(path))
72
- else:
73
- # If filter specified, check if file matches the filter
74
- filters = [file_filter] if isinstance(file_filter, str) else file_filter
75
- for f in filters:
76
- if (fnmatch.fnmatch(path.name, f) or
77
- fnmatch.fnmatch(str(path), f'*{f}*')):
78
- matches.append(str(path))
79
- break
80
-
81
- return matches
82
-
83
- # Find and load files
84
- files = find_files(file_filter, max_depth)
85
-
86
- # Process documents
87
- relevant_chunks = []
88
- for file_path in files:
89
- with open(file_path, 'r', encoding='utf-8') as f:
90
- lines = f.readlines() # Read file as lines
91
- if lines:
92
- # Join lines into a single string for fuzzy matching
93
- content = ''.join(lines)
94
- match_score = fuzz.partial_ratio(query.lower(), content.lower())
95
- if match_score >= fuzzy_threshold:
96
- # Find the best matching line
97
- best_line_index = -1
98
- best_line_score = 0
99
- for i, line in enumerate(lines):
100
- line_score = fuzz.partial_ratio(query.lower(), line.lower())
101
- if line_score > best_line_score:
102
- best_line_score = line_score
103
- best_line_index = i
104
-
105
- # Extract 10 lines around the best matching line
106
- if best_line_index != -1:
107
- start = max(0, best_line_index - 5) # 5 lines before
108
- end = min(len(lines), best_line_index + 6) # 5 lines after
109
- snippet = ''.join(lines[start:end])
110
- relevant_chunks.append({
111
- 'path': file_path,
112
- 'snippet': snippet,
113
- 'ext': Path(file_path).suffix.lower(),
114
- 'score': match_score
115
- })
116
-
117
- # Sort results by match score (highest first)
118
- relevant_chunks.sort(key=lambda x: x['score'], reverse=True)
119
-
120
- # Format results
121
- if relevant_chunks:
122
- context_text = "Here are the most relevant code sections:\n\n"
123
- for chunk in relevant_chunks:
124
- file_path = chunk['path'].replace('./', '')
125
- context_text += f"File: {file_path} (match score: {chunk['score']})\n"
126
- context_text += f"```{chunk['ext'][1:] if chunk['ext'] else ''}\n"
127
- context_text += f"{chunk['snippet'].strip()}\n"
128
- context_text += "```\n\n"
129
- else:
130
- context_text = "No relevant code sections found.\n"
131
-
132
- output = context_text
133
-
134
- - engine: natural
135
- code: |
136
- {% if summarize %}
137
- You are a helpful coding assistant.
138
- Please help with this query:
139
-
140
- `{{ query }}`
141
-
142
- The user is attempting to carry out a local search. This search returned the following results:
143
-
144
- `{{ results }}`
145
-
146
- Please analyze the code sections above and provide a clear, helpful response that directly addresses the query.
147
- If you reference specific files or code sections in your response, indicate which file they came from.
148
- Make sure to explain your reasoning and how the provided code relates to the query.
149
- {% endif %}
@@ -1,9 +0,0 @@
1
- tool_name: npcsh_executor
2
- description: Execute npcsh commands. Use the macro commands.
3
- inputs:
4
- - code
5
- - language
6
- steps:
7
- - engine: "{{language}}"
8
- code: |
9
- {{code}}
@@ -1,27 +0,0 @@
1
- tool_name: "screen_capture_analysis_tool"
2
- description: Captures the whole screen and sends the image for analysis
3
- inputs:
4
- - "prompt"
5
- steps:
6
- - engine: "python"
7
- code: |
8
- # Capture the screen
9
- import pyautogui
10
- import datetime
11
- import os
12
- from PIL import Image
13
- import time
14
- from npcsh.image import analyze_image_base, capture_screenshot
15
-
16
- out = capture_screenshot(npc = npc, full = True)
17
-
18
- llm_response = analyze_image_base( '{{prompt}}' + "\n\nAttached is a screenshot of my screen currently. Please use this to evaluate the situation. If the user asked for you to explain what's on their screen or something similar, they are referring to the details contained within the attached image. You do not need to actually view their screen. You do not need to mention that you cannot view or interpret images directly. You only need to answer the user's request based on the attached screenshot!",
19
- out['file_path'],
20
- out['filename'],
21
- npc=npc,
22
- **out['model_kwargs'])
23
- # To this:
24
- if isinstance(llm_response, dict):
25
- llm_response = llm_response.get('response', 'No response from image analysis')
26
- else:
27
- llm_response = 'No response from image analysis'
@@ -1,26 +0,0 @@
1
- tool_name: data_pull
2
- description: Execute queries on the ~/npcsh_history.db to pull data. The database contains only information about conversations and other user-provided data. It does not store any information about individual files.
3
- inputs:
4
- - sql_query
5
- - interpret: false # Note that this is not a boolean, but a string
6
-
7
- steps:
8
- - engine: python
9
- code: |
10
- import pandas as pd
11
- try:
12
- df = pd.read_sql_query('{{sql_query}}', npc.db_conn)
13
- except pandas.errors.DatabaseError as e:
14
- df = pd.DataFrame({'Error': [str(e)]})
15
-
16
-
17
- output = df.to_string()
18
-
19
- - engine: natural
20
- code: |
21
- {% if interpret %}
22
- Here is the result of the SQL query:
23
- ```
24
- {{ df.to_string() }} # Convert DataFrame to string for a nicer display
25
- ```
26
- {% endif %}
npcsh/response.py DELETED
@@ -1,272 +0,0 @@
1
- import json
2
- import requests
3
- import base64
4
- import os
5
- from PIL import Image
6
- from typing import Any, Dict, Generator, List, Union
7
-
8
- from pydantic import BaseModel
9
- from npcsh.npc_sysenv import (
10
- get_system_message,
11
- compress_image,
12
- available_chat_models,
13
- available_reasoning_models,
14
- )
15
-
16
- from litellm import completion
17
-
18
- # import litellm
19
-
20
- # litellm._turn_on_debug()
21
-
22
- try:
23
- import ollama
24
- except:
25
- pass
26
-
27
-
28
- def get_ollama_response(
29
- prompt: str,
30
- model: str,
31
- images: List[Dict[str, str]] = None,
32
- npc: Any = None,
33
- tools: list = None,
34
- format: Union[str, BaseModel] = None,
35
- messages: List[Dict[str, str]] = None,
36
- **kwargs,
37
- ) -> Dict[str, Any]:
38
- """
39
- Generates a response using the Ollama API.
40
-
41
- Args:
42
- prompt (str): Prompt for generating the response.
43
- model (str): Model to use for generating the response.
44
- images (List[Dict[str, str]], optional): List of image data. Defaults to None.
45
- npc (Any, optional): Optional NPC object. Defaults to None.
46
- format (Union[str, BaseModel], optional): Response format or schema. Defaults to None.
47
- messages (List[Dict[str, str]], optional): Existing messages to append responses. Defaults to None.
48
-
49
- Returns:
50
- Dict[str, Any]: The response, optionally including updated messages.
51
- """
52
- import ollama
53
-
54
- # try:
55
- # Prepare the message payload
56
- system_message = get_system_message(npc) if npc else "You are a helpful assistant."
57
- if messages is None or len(messages) == 0:
58
- messages = [
59
- {"role": "system", "content": system_message},
60
- {"role": "user", "content": prompt},
61
- ]
62
-
63
- if images:
64
- messages[-1]["images"] = [image["file_path"] for image in images]
65
-
66
- # Prepare format
67
- if isinstance(format, type):
68
- schema = format.model_json_schema()
69
- res = ollama.chat(model=model, messages=messages, format=schema)
70
-
71
- elif isinstance(format, str):
72
- if format == "json":
73
- res = ollama.chat(model=model, messages=messages, format=format)
74
- else:
75
- res = ollama.chat(model=model, messages=messages)
76
- else:
77
- res = ollama.chat(model=model, messages=messages)
78
- response_content = res.get("message", {}).get("content")
79
-
80
- # Prepare the return dictionary
81
- result = {"response": response_content}
82
-
83
- # Append response to messages if provided
84
- if messages is not None:
85
- messages.append({"role": "assistant", "content": response_content})
86
- result["messages"] = messages
87
-
88
- # Handle JSON format if specified
89
- if format == "json":
90
- if model in available_reasoning_models:
91
- raise NotImplementedError("Reasoning models do not support JSON output.")
92
- try:
93
- if isinstance(response_content, str):
94
- if response_content.startswith("```json"):
95
- response_content = (
96
- response_content.replace("```json", "")
97
- .replace("```", "")
98
- .strip()
99
- )
100
- response_content = json.loads(response_content)
101
- # print(response_content, type(response_content))
102
- result["response"] = response_content
103
- except json.JSONDecodeError:
104
- return {"error": f"Invalid JSON response: {response_content}"}
105
-
106
- return result
107
-
108
-
109
- def get_litellm_response(
110
- prompt: str,
111
- model: str,
112
- provider: str = None,
113
- images: List[Dict[str, str]] = None,
114
- npc: Any = None,
115
- tools: list = None,
116
- format: Union[str, BaseModel] = None,
117
- messages: List[Dict[str, str]] = None,
118
- api_key: str = None,
119
- api_url: str = None,
120
- tool_choice: Dict = None,
121
- **kwargs,
122
- ) -> Dict[str, Any]:
123
- """
124
- Improved version with consistent JSON parsing
125
- """
126
- if provider == "ollama":
127
- return get_ollama_response(
128
- prompt, model, images, npc, tools, format, messages, **kwargs
129
- )
130
-
131
- system_message = get_system_message(npc) if npc else "You are a helpful assistant."
132
- if format == "json":
133
- prompt += """If you are a returning a json object, begin directly with the opening {.
134
- If you are returning a json array, begin directly with the opening [.
135
- Do not include any additional markdown formatting or leading
136
- ```json tags in your response. The item keys should be based on the ones provided
137
- by the user. Do not invent new ones.
138
-
139
- """
140
- if messages is None or len(messages) == 0:
141
- messages = [
142
- {"role": "system", "content": system_message},
143
- {"role": "user", "content": [{"type": "text", "text": prompt}]},
144
- ]
145
-
146
- if images:
147
- for image in images:
148
- with open(image["file_path"], "rb") as image_file:
149
- image_data = base64.b64encode(compress_image(image_file.read())).decode(
150
- "utf-8"
151
- )
152
- messages[-1]["content"].append(
153
- {
154
- "type": "image_url",
155
- "image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
156
- }
157
- )
158
- api_params = {
159
- "messages": messages,
160
- }
161
- if provider is None:
162
- split = model.split("/")
163
- if len(split) == 2:
164
- provider = split[0]
165
- # if provider == "ollama":
166
- # uncomment the two lines below once litellm works better with ollama
167
- # litellm works better with ollama_chat
168
- # api_params["api_base"] = "http://localhost:11434"
169
- # provider = "ollama_chat"
170
- api_params["format"] = format
171
-
172
- # else:
173
- if api_url is not None:
174
- # the default api_url is for npcsh's NPCSH_API_URL
175
- # for an openai-like provider.
176
- # so the proviuder should only ever be openai-like
177
- if provider == "openai-like":
178
- api_params["api_base"] = api_url
179
-
180
- if format == "json":
181
- api_params["response_format"] = {"type": "json_object"}
182
- elif format is not None:
183
- # pydantic model
184
- api_params["response_format"] = format
185
-
186
- if "/" not in model: # litellm expects provder/model so let ppl provide like that
187
- model_str = f"{provider}/{model}"
188
- else:
189
- model_str = model
190
- api_params["model"] = model_str
191
- if api_key is not None:
192
- api_params["api_key"] = api_key
193
- # Add tools if provided
194
- if tools:
195
- api_params["tools"] = tools
196
- # Add tool choice if specified
197
- if tool_choice:
198
- api_params["tool_choice"] = tool_choice
199
- if kwargs:
200
- for key, value in kwargs.items():
201
- # minimum parameter set for anthropic to work
202
- if key in [
203
- "stream",
204
- "stop",
205
- "temperature",
206
- "top_p",
207
- "max_tokens",
208
- "max_completion_tokens",
209
- "tools",
210
- "tool_choice",
211
- "extra_headers",
212
- "parallel_tool_calls",
213
- "response_format",
214
- "user",
215
- ]:
216
- api_params[key] = value
217
-
218
- try:
219
- # print(api_params)
220
- # litellm completion appears to have some
221
- # ollama issues, so will default to our
222
- # custom implementation until we can revisit
223
- # when its likely more better supported
224
- resp = completion(
225
- **api_params,
226
- )
227
-
228
- # Get the raw response content
229
- llm_response = resp.choices[0].message.content
230
-
231
- # Prepare return dict
232
- items_to_return = {
233
- "response": llm_response,
234
- "messages": messages,
235
- "raw_response": resp, # Include the full response for debugging
236
- }
237
-
238
- # Handle JSON format requests
239
- print(format)
240
- if format == "json":
241
- try:
242
- if isinstance(llm_response, str):
243
- print("converting the json")
244
- loaded = json.loads(llm_response)
245
- else:
246
- loaded = llm_response # Assume it's already parsed
247
- if "json" in loaded:
248
- items_to_return["response"] = loaded["json"]
249
- else:
250
- items_to_return["response"] = loaded
251
-
252
- except (json.JSONDecodeError, TypeError) as e:
253
- print(f"JSON parsing error: {str(e)}")
254
- print(f"Raw response: {llm_response}")
255
- items_to_return["error"] = "Invalid JSON response"
256
- return items_to_return
257
-
258
- # Add assistant response to message history
259
- items_to_return["messages"].append(
260
- {
261
- "role": "assistant",
262
- "content": (
263
- llm_response if isinstance(llm_response, str) else str(llm_response)
264
- ),
265
- }
266
- )
267
-
268
- return items_to_return
269
-
270
- except Exception as e:
271
- print(f"Error in get_litellm_response: {str(e)}")
272
- return {"error": str(e), "messages": messages, "response": None}