npcpy 1.2.31__tar.gz → 1.2.33__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. {npcpy-1.2.31/npcpy.egg-info → npcpy-1.2.33}/PKG-INFO +97 -34
  2. {npcpy-1.2.31 → npcpy-1.2.33}/README.md +96 -33
  3. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/gen/response.py +73 -20
  4. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/llm_funcs.py +58 -38
  5. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/npc_compiler.py +549 -350
  6. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/npc_sysenv.py +44 -20
  7. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/serve.py +1 -0
  8. {npcpy-1.2.31 → npcpy-1.2.33/npcpy.egg-info}/PKG-INFO +97 -34
  9. {npcpy-1.2.31 → npcpy-1.2.33}/setup.py +1 -1
  10. {npcpy-1.2.31 → npcpy-1.2.33}/LICENSE +0 -0
  11. {npcpy-1.2.31 → npcpy-1.2.33}/MANIFEST.in +0 -0
  12. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/__init__.py +0 -0
  13. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/data/__init__.py +0 -0
  14. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/data/audio.py +0 -0
  15. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/data/data_models.py +0 -0
  16. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/data/image.py +0 -0
  17. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/data/load.py +0 -0
  18. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/data/text.py +0 -0
  19. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/data/video.py +0 -0
  20. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/data/web.py +0 -0
  21. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/ft/__init__.py +0 -0
  22. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/ft/diff.py +0 -0
  23. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/ft/ge.py +0 -0
  24. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/ft/memory_trainer.py +0 -0
  25. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/ft/model_ensembler.py +0 -0
  26. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/ft/rl.py +0 -0
  27. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/ft/sft.py +0 -0
  28. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/ft/usft.py +0 -0
  29. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/gen/__init__.py +0 -0
  30. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/gen/audio_gen.py +0 -0
  31. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/gen/embeddings.py +0 -0
  32. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/gen/image_gen.py +0 -0
  33. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/gen/video_gen.py +0 -0
  34. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/main.py +0 -0
  35. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/memory/__init__.py +0 -0
  36. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/memory/command_history.py +0 -0
  37. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/memory/kg_vis.py +0 -0
  38. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/memory/knowledge_graph.py +0 -0
  39. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/memory/memory_processor.py +0 -0
  40. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/memory/search.py +0 -0
  41. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/mix/__init__.py +0 -0
  42. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/mix/debate.py +0 -0
  43. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/npcs.py +0 -0
  44. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/sql/__init__.py +0 -0
  45. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/sql/ai_function_tools.py +0 -0
  46. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/sql/database_ai_adapters.py +0 -0
  47. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/sql/database_ai_functions.py +0 -0
  48. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/sql/model_runner.py +0 -0
  49. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/sql/npcsql.py +0 -0
  50. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/sql/sql_model_compiler.py +0 -0
  51. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/tools.py +0 -0
  52. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/work/__init__.py +0 -0
  53. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/work/desktop.py +0 -0
  54. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/work/plan.py +0 -0
  55. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy/work/trigger.py +0 -0
  56. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy.egg-info/SOURCES.txt +0 -0
  57. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy.egg-info/dependency_links.txt +0 -0
  58. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy.egg-info/requires.txt +0 -0
  59. {npcpy-1.2.31 → npcpy-1.2.33}/npcpy.egg-info/top_level.txt +0 -0
  60. {npcpy-1.2.31 → npcpy-1.2.33}/setup.cfg +0 -0
  61. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_audio.py +0 -0
  62. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_command_history.py +0 -0
  63. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_image.py +0 -0
  64. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_llm_funcs.py +0 -0
  65. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_load.py +0 -0
  66. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_npc_compiler.py +0 -0
  67. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_npcsql.py +0 -0
  68. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_response.py +0 -0
  69. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_serve.py +0 -0
  70. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_text.py +0 -0
  71. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_tools.py +0 -0
  72. {npcpy-1.2.31 → npcpy-1.2.33}/tests/test_web.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.2.31
3
+ Version: 1.2.33
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -185,62 +185,117 @@ for tool_call in response['tool_results']:
185
185
  Here is an example for setting up an agent team to use Jinja Execution (Jinxs) templates that are processed entirely with prompts, allowing you to use them with models that do or do not possess tool calling support.
186
186
 
187
187
  ```python
188
+
188
189
  from npcpy.npc_compiler import NPC, Team, Jinx
189
190
  from npcpy.tools import auto_tools
190
191
  import os
192
+ from jinja2 import Environment, Undefined, DictLoader # Import necessary Jinja2 components for Jinx code
191
193
 
192
-
193
-
194
+ # --- REVISED file_reader_jinx ---
194
195
  file_reader_jinx = Jinx(jinx_data={
195
196
  "jinx_name": "file_reader",
196
- "description": "Read a file and summarize its contents",
197
+ "description": "Read a file and optionally summarize its contents using an LLM.",
197
198
  "inputs": ["filename"],
198
199
  "steps": [
199
200
  {
200
- "name": "read_file",
201
+ "name": "read_file_content",
201
202
  "engine": "python",
202
- "code": """
203
+ "code": '''
203
204
  import os
204
- with open(os.path.abspath('{{ filename }}'), 'r') as f:
205
- content = f.read()
206
- output= content
207
- """
205
+ from jinja2 import Environment, Undefined, DictLoader # Local import for Jinx step
206
+
207
+ # The 'filename' input to the file_reader jinx might be a Jinja template string like "{{ source_filename }}"
208
+ # or a direct filename. We need to render it using the current execution context.
209
+
210
+ # Get the Jinja environment from the NPC if available, otherwise create a default one.
211
+ # The 'npc' variable is available in the Jinx execution context.
212
+ # We need to ensure 'npc' exists before trying to access its 'jinja_env'.
213
+ execution_jinja_env = npc.jinja_env if npc else Environment(loader=DictLoader({}), undefined=Undefined)
214
+
215
+ # Render the filename. The current 'context' should contain the variables needed for rendering.
216
+ # For declarative calls, the parent Jinx's inputs (like 'source_filename') will be in this context.
217
+ # We also need to ensure the value from context['filename'] is treated as a template string.
218
+ filename_template = execution_jinja_env.from_string(context['filename'])
219
+ rendered_filename = filename_template.render(**context)
220
+
221
+ file_path_abs = os.path.abspath(rendered_filename)
222
+ try:
223
+ with open(file_path_abs, 'r') as f:
224
+ content = f.read()
225
+ context['file_raw_content'] = content # Store raw content in context for later use
226
+ output = content # Output of this step is the raw content
227
+ except FileNotFoundError:
228
+ output = f"Error: File not found at {file_path_abs}"
229
+ context['file_raw_content'] = output # Store error message for consistency
230
+ except Exception as e:
231
+ output = f"Error reading file {file_path_abs}: {e}"
232
+ context['file_raw_content'] = output # Store error message for consistency
233
+ '''
208
234
  },
209
235
  {
210
- "name": "summarize_content",
211
- "engine": "natural",
212
- "code": """
213
- Summarize the content of the file: {{ read_file }}.
214
- """
236
+ "name": "summarize_file_content",
237
+ "engine": "python",
238
+ "code": '''
239
+ # Check if the previous step encountered an error
240
+ if "Error" not in context['file_raw_content']:
241
+ prompt = f"Summarize the following content concisely, highlighting key themes and points: {context['file_raw_content']}"
242
+ llm_result = npc.get_llm_response(prompt, tool_choice=False) # FIX: Passed prompt positionally
243
+ output = llm_result.get('response', 'Failed to generate summary due to LLM error.')
244
+ else:
245
+ output = "Skipping summary due to previous file reading error."
246
+ '''
215
247
  }
216
248
  ]
217
249
  })
218
250
 
219
-
220
- # Define a jinx for literary research
251
+ # --- REVISED literary_research_jinx ---
221
252
  literary_research_jinx = Jinx(jinx_data={
222
253
  "jinx_name": "literary_research",
223
- "description": "Research a literary topic, analyze files, and summarize findings",
224
- "inputs": ["topic"],
254
+ "description": "Research a literary topic, read a specific file, analyze, and synthesize findings.",
255
+ "inputs": ["topic", "source_filename"],
225
256
  "steps": [
226
257
  {
227
- "name": "gather_info",
228
- "engine": "natural",
229
- "code": """
230
- Research the topic: {{ topic }}.
231
- Summarize the main themes and historical context.
232
- """
258
+ "name": "initial_llm_research",
259
+ "engine": "python",
260
+ "code": '''
261
+ prompt = f"Research the topic: {context['topic']}. Summarize the main themes, key authors, and historical context. Be thorough."
262
+ llm_result = npc.get_llm_response(prompt, tool_choice=False) # FIX: Passed prompt positionally
263
+ context['research_summary'] = llm_result.get('response', 'No initial LLM research found.')
264
+ output = context['research_summary']
265
+ '''
266
+ },
267
+ {
268
+ "name": "read_and_process_source_file",
269
+ "engine": "file_reader",
270
+ "filename": "{{ source_filename }}" # This is passed as a string template to file_reader
233
271
  },
234
272
  {
235
- "name": "final_summary",
236
- "engine": "natural",
237
- "code": """
238
- Based on the research in. {{gather_info}}, write a concise, creative summary.
239
- """
273
+ "name": "final_synthesis_and_creative_writing",
274
+ "engine": "python",
275
+ "code": '''
276
+ # Access outputs from previous steps.
277
+ research_summary = context['initial_llm_research']
278
+ # The output of a declarative jinx call (like 'file_reader') is stored under its step name.
279
+ # The actual content we want is the 'output' of the *last step* within that sub-jinx.
280
+ file_summary = context['read_and_process_source_file'].get('output', 'No file summary available.')
281
+
282
+ prompt = f"""Based on the following information:
283
+ 1. Comprehensive Research Summary:
284
+ {research_summary}
285
+
286
+ 2. Key Insights from Source File:
287
+ {file_summary}
288
+
289
+ Integrate these findings and write a concise, creative, and poetically styled summary of the literary topic '{context['topic']}'. Emphasize unique perspectives or connections between the research and the file content, as if written by a master of magical realism.
290
+ """
291
+ llm_result = npc.get_llm_response(prompt, tool_choice=False) # FIX: Passed prompt positionally
292
+ output = llm_result.get('response', 'Failed to generate final creative summary.')
293
+ '''
240
294
  }
241
295
  ]
242
296
  })
243
297
 
298
+ # --- NPC Definitions (unchanged) ---
244
299
  ggm = NPC(
245
300
  name='Gabriel Garcia Marquez',
246
301
  primary_directive='You are Gabriel Garcia Marquez, master of magical realism. Research, analyze, and write with poetic flair.',
@@ -263,16 +318,24 @@ borges = NPC(
263
318
  provider='ollama',
264
319
  )
265
320
 
266
- # Set up a team with a forenpc that orchestrates the other npcs
267
- lit_team = Team(npcs=[ggm, isabel], forenpc=borges, jinxs={'literary_research': literary_research_jinx, 'file_reader': file_reader_jinx},
321
+ # --- Team Setup ---
322
+ lit_team = Team(
323
+ npcs=[ggm, isabel],
324
+ forenpc=borges,
325
+ jinxs=[literary_research_jinx, file_reader_jinx],
268
326
  )
269
327
 
270
- # Example: Orchestrate a jinx workflow
328
+ # --- Orchestration Example ---
271
329
  result = lit_team.orchestrate(
272
- "Research the topic of magical realism, read ./test_data/magical_realism.txt and summarize the findings"
330
+ "Research the topic of magical realism, using the file './test_data/magical_realism.txt' as a primary source, and provide a comprehensive, creative summary."
273
331
  )
332
+
333
+ print("\n--- Orchestration Result Summary ---")
274
334
  print(result['debrief']['summary'])
275
335
 
336
+ print("\n--- Full Orchestration Output ---")
337
+ print(result['output'])
338
+
276
339
  ```
277
340
  ```
278
341
  • Action chosen: pass_to_npc
@@ -89,62 +89,117 @@ for tool_call in response['tool_results']:
89
89
  Here is an example for setting up an agent team to use Jinja Execution (Jinxs) templates that are processed entirely with prompts, allowing you to use them with models that do or do not possess tool calling support.
90
90
 
91
91
  ```python
92
+
92
93
  from npcpy.npc_compiler import NPC, Team, Jinx
93
94
  from npcpy.tools import auto_tools
94
95
  import os
96
+ from jinja2 import Environment, Undefined, DictLoader # Import necessary Jinja2 components for Jinx code
95
97
 
96
-
97
-
98
+ # --- REVISED file_reader_jinx ---
98
99
  file_reader_jinx = Jinx(jinx_data={
99
100
  "jinx_name": "file_reader",
100
- "description": "Read a file and summarize its contents",
101
+ "description": "Read a file and optionally summarize its contents using an LLM.",
101
102
  "inputs": ["filename"],
102
103
  "steps": [
103
104
  {
104
- "name": "read_file",
105
+ "name": "read_file_content",
105
106
  "engine": "python",
106
- "code": """
107
+ "code": '''
107
108
  import os
108
- with open(os.path.abspath('{{ filename }}'), 'r') as f:
109
- content = f.read()
110
- output= content
111
- """
109
+ from jinja2 import Environment, Undefined, DictLoader # Local import for Jinx step
110
+
111
+ # The 'filename' input to the file_reader jinx might be a Jinja template string like "{{ source_filename }}"
112
+ # or a direct filename. We need to render it using the current execution context.
113
+
114
+ # Get the Jinja environment from the NPC if available, otherwise create a default one.
115
+ # The 'npc' variable is available in the Jinx execution context.
116
+ # We need to ensure 'npc' exists before trying to access its 'jinja_env'.
117
+ execution_jinja_env = npc.jinja_env if npc else Environment(loader=DictLoader({}), undefined=Undefined)
118
+
119
+ # Render the filename. The current 'context' should contain the variables needed for rendering.
120
+ # For declarative calls, the parent Jinx's inputs (like 'source_filename') will be in this context.
121
+ # We also need to ensure the value from context['filename'] is treated as a template string.
122
+ filename_template = execution_jinja_env.from_string(context['filename'])
123
+ rendered_filename = filename_template.render(**context)
124
+
125
+ file_path_abs = os.path.abspath(rendered_filename)
126
+ try:
127
+ with open(file_path_abs, 'r') as f:
128
+ content = f.read()
129
+ context['file_raw_content'] = content # Store raw content in context for later use
130
+ output = content # Output of this step is the raw content
131
+ except FileNotFoundError:
132
+ output = f"Error: File not found at {file_path_abs}"
133
+ context['file_raw_content'] = output # Store error message for consistency
134
+ except Exception as e:
135
+ output = f"Error reading file {file_path_abs}: {e}"
136
+ context['file_raw_content'] = output # Store error message for consistency
137
+ '''
112
138
  },
113
139
  {
114
- "name": "summarize_content",
115
- "engine": "natural",
116
- "code": """
117
- Summarize the content of the file: {{ read_file }}.
118
- """
140
+ "name": "summarize_file_content",
141
+ "engine": "python",
142
+ "code": '''
143
+ # Check if the previous step encountered an error
144
+ if "Error" not in context['file_raw_content']:
145
+ prompt = f"Summarize the following content concisely, highlighting key themes and points: {context['file_raw_content']}"
146
+ llm_result = npc.get_llm_response(prompt, tool_choice=False) # FIX: Passed prompt positionally
147
+ output = llm_result.get('response', 'Failed to generate summary due to LLM error.')
148
+ else:
149
+ output = "Skipping summary due to previous file reading error."
150
+ '''
119
151
  }
120
152
  ]
121
153
  })
122
154
 
123
-
124
- # Define a jinx for literary research
155
+ # --- REVISED literary_research_jinx ---
125
156
  literary_research_jinx = Jinx(jinx_data={
126
157
  "jinx_name": "literary_research",
127
- "description": "Research a literary topic, analyze files, and summarize findings",
128
- "inputs": ["topic"],
158
+ "description": "Research a literary topic, read a specific file, analyze, and synthesize findings.",
159
+ "inputs": ["topic", "source_filename"],
129
160
  "steps": [
130
161
  {
131
- "name": "gather_info",
132
- "engine": "natural",
133
- "code": """
134
- Research the topic: {{ topic }}.
135
- Summarize the main themes and historical context.
136
- """
162
+ "name": "initial_llm_research",
163
+ "engine": "python",
164
+ "code": '''
165
+ prompt = f"Research the topic: {context['topic']}. Summarize the main themes, key authors, and historical context. Be thorough."
166
+ llm_result = npc.get_llm_response(prompt, tool_choice=False) # FIX: Passed prompt positionally
167
+ context['research_summary'] = llm_result.get('response', 'No initial LLM research found.')
168
+ output = context['research_summary']
169
+ '''
170
+ },
171
+ {
172
+ "name": "read_and_process_source_file",
173
+ "engine": "file_reader",
174
+ "filename": "{{ source_filename }}" # This is passed as a string template to file_reader
137
175
  },
138
176
  {
139
- "name": "final_summary",
140
- "engine": "natural",
141
- "code": """
142
- Based on the research in. {{gather_info}}, write a concise, creative summary.
143
- """
177
+ "name": "final_synthesis_and_creative_writing",
178
+ "engine": "python",
179
+ "code": '''
180
+ # Access outputs from previous steps.
181
+ research_summary = context['initial_llm_research']
182
+ # The output of a declarative jinx call (like 'file_reader') is stored under its step name.
183
+ # The actual content we want is the 'output' of the *last step* within that sub-jinx.
184
+ file_summary = context['read_and_process_source_file'].get('output', 'No file summary available.')
185
+
186
+ prompt = f"""Based on the following information:
187
+ 1. Comprehensive Research Summary:
188
+ {research_summary}
189
+
190
+ 2. Key Insights from Source File:
191
+ {file_summary}
192
+
193
+ Integrate these findings and write a concise, creative, and poetically styled summary of the literary topic '{context['topic']}'. Emphasize unique perspectives or connections between the research and the file content, as if written by a master of magical realism.
194
+ """
195
+ llm_result = npc.get_llm_response(prompt, tool_choice=False) # FIX: Passed prompt positionally
196
+ output = llm_result.get('response', 'Failed to generate final creative summary.')
197
+ '''
144
198
  }
145
199
  ]
146
200
  })
147
201
 
202
+ # --- NPC Definitions (unchanged) ---
148
203
  ggm = NPC(
149
204
  name='Gabriel Garcia Marquez',
150
205
  primary_directive='You are Gabriel Garcia Marquez, master of magical realism. Research, analyze, and write with poetic flair.',
@@ -167,16 +222,24 @@ borges = NPC(
167
222
  provider='ollama',
168
223
  )
169
224
 
170
- # Set up a team with a forenpc that orchestrates the other npcs
171
- lit_team = Team(npcs=[ggm, isabel], forenpc=borges, jinxs={'literary_research': literary_research_jinx, 'file_reader': file_reader_jinx},
225
+ # --- Team Setup ---
226
+ lit_team = Team(
227
+ npcs=[ggm, isabel],
228
+ forenpc=borges,
229
+ jinxs=[literary_research_jinx, file_reader_jinx],
172
230
  )
173
231
 
174
- # Example: Orchestrate a jinx workflow
232
+ # --- Orchestration Example ---
175
233
  result = lit_team.orchestrate(
176
- "Research the topic of magical realism, read ./test_data/magical_realism.txt and summarize the findings"
234
+ "Research the topic of magical realism, using the file './test_data/magical_realism.txt' as a primary source, and provide a comprehensive, creative summary."
177
235
  )
236
+
237
+ print("\n--- Orchestration Result Summary ---")
178
238
  print(result['debrief']['summary'])
179
239
 
240
+ print("\n--- Full Orchestration Output ---")
241
+ print(result['output'])
242
+
180
243
  ```
181
244
  ```
182
245
  • Action chosen: pass_to_npc
@@ -378,21 +378,50 @@ def get_ollama_response(
378
378
 
379
379
  result["response"] = ollama.chat(**stream_api_params, options=options)
380
380
  else:
381
-
381
+
382
382
  if format == "json":
383
383
  try:
384
- if isinstance(response_content, str):
385
- if response_content.startswith("```json"):
386
- response_content = (
387
- response_content.replace("```json", "")
388
- .replace("```", "")
389
- .strip()
390
- )
391
- parsed_response = json.loads(response_content)
392
- result["response"] = parsed_response
393
- except json.JSONDecodeError:
394
- result["error"] = f"Invalid JSON response: {response_content}"
395
-
384
+ if isinstance(llm_response, str):
385
+ llm_response = llm_response.strip()
386
+
387
+ if '```json' in llm_response:
388
+ start = llm_response.find('```json') + 7
389
+ end = llm_response.rfind('```')
390
+ if end > start:
391
+ llm_response = llm_response[start:end].strip()
392
+
393
+ first_brace = llm_response.find('{')
394
+ first_bracket = llm_response.find('[')
395
+
396
+ if first_brace == -1 and first_bracket == -1:
397
+ result["response"] = {}
398
+ result["error"] = "No JSON found in response"
399
+ return result
400
+
401
+ if first_brace != -1 and (first_bracket == -1 or first_brace < first_bracket):
402
+ llm_response = llm_response[first_brace:]
403
+ last_brace = llm_response.rfind('}')
404
+ if last_brace != -1:
405
+ llm_response = llm_response[:last_brace+1]
406
+ else:
407
+ llm_response = llm_response[first_bracket:]
408
+ last_bracket = llm_response.rfind(']')
409
+ if last_bracket != -1:
410
+ llm_response = llm_response[:last_bracket+1]
411
+
412
+ parsed_json = json.loads(llm_response, strict=False)
413
+
414
+ if "json" in parsed_json:
415
+ result["response"] = parsed_json["json"]
416
+ else:
417
+ result["response"] = parsed_json
418
+
419
+ except (json.JSONDecodeError, TypeError) as e:
420
+ print(f"JSON parsing error: {str(e)}")
421
+ print(f"Raw response: {llm_response[:500]}")
422
+ result["response"] = {}
423
+ result["error"] = "Invalid JSON response"
424
+
396
425
  return result
397
426
 
398
427
  import time
@@ -553,7 +582,7 @@ def get_litellm_response(
553
582
  litellm.include_cost_in_streaming_usage = True
554
583
  api_params['stream_options'] = {"include_usage": True}
555
584
 
556
- if api_url is not None and (provider == "openai-like" or provider == "openai"):
585
+ if api_url is not None and ('openai-like' in provider or provider == "openai-like" or provider == "openai"):
557
586
  api_params["api_base"] = api_url
558
587
  provider = "openai"
559
588
 
@@ -609,14 +638,37 @@ def get_litellm_response(
609
638
 
610
639
  if hasattr(resp.choices[0].message, 'tool_calls') and resp.choices[0].message.tool_calls:
611
640
  result["tool_calls"] = resp.choices[0].message.tool_calls
612
-
613
-
614
641
  if format == "json":
615
642
  try:
616
643
  if isinstance(llm_response, str):
617
- if llm_response.startswith("```json"):
618
- llm_response = llm_response.replace("```json", "").replace("```", "").strip()
619
- parsed_json = json.loads(llm_response)
644
+ llm_response = llm_response.strip()
645
+
646
+ if '```json' in llm_response:
647
+ start = llm_response.find('```json') + 7
648
+ end = llm_response.rfind('```')
649
+ if end > start:
650
+ llm_response = llm_response[start:end].strip()
651
+
652
+ first_brace = llm_response.find('{')
653
+ first_bracket = llm_response.find('[')
654
+
655
+ if first_brace == -1 and first_bracket == -1:
656
+ result["response"] = {}
657
+ result["error"] = "No JSON found in response"
658
+ return result
659
+
660
+ if first_brace != -1 and (first_bracket == -1 or first_brace < first_bracket):
661
+ llm_response = llm_response[first_brace:]
662
+ last_brace = llm_response.rfind('}')
663
+ if last_brace != -1:
664
+ llm_response = llm_response[:last_brace+1]
665
+ else:
666
+ llm_response = llm_response[first_bracket:]
667
+ last_bracket = llm_response.rfind(']')
668
+ if last_bracket != -1:
669
+ llm_response = llm_response[:last_bracket+1]
670
+
671
+ parsed_json = json.loads(llm_response, strict=False)
620
672
 
621
673
  if "json" in parsed_json:
622
674
  result["response"] = parsed_json["json"]
@@ -625,7 +677,8 @@ def get_litellm_response(
625
677
 
626
678
  except (json.JSONDecodeError, TypeError) as e:
627
679
  print(f"JSON parsing error: {str(e)}")
628
- print(f"Raw response: {llm_response}")
680
+ print(f"Raw response: {llm_response[:500]}")
681
+ result["response"] = {}
629
682
  result["error"] = "Invalid JSON response"
630
683
 
631
684
  return result