npcpy 1.0.26__py3-none-any.whl → 1.2.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. npcpy/__init__.py +0 -7
  2. npcpy/data/audio.py +16 -99
  3. npcpy/data/image.py +43 -42
  4. npcpy/data/load.py +83 -124
  5. npcpy/data/text.py +28 -28
  6. npcpy/data/video.py +8 -32
  7. npcpy/data/web.py +51 -23
  8. npcpy/ft/diff.py +110 -0
  9. npcpy/ft/ge.py +115 -0
  10. npcpy/ft/memory_trainer.py +171 -0
  11. npcpy/ft/model_ensembler.py +357 -0
  12. npcpy/ft/rl.py +360 -0
  13. npcpy/ft/sft.py +248 -0
  14. npcpy/ft/usft.py +128 -0
  15. npcpy/gen/audio_gen.py +24 -0
  16. npcpy/gen/embeddings.py +13 -13
  17. npcpy/gen/image_gen.py +262 -117
  18. npcpy/gen/response.py +615 -415
  19. npcpy/gen/video_gen.py +53 -7
  20. npcpy/llm_funcs.py +1869 -437
  21. npcpy/main.py +1 -1
  22. npcpy/memory/command_history.py +844 -510
  23. npcpy/memory/kg_vis.py +833 -0
  24. npcpy/memory/knowledge_graph.py +892 -1845
  25. npcpy/memory/memory_processor.py +81 -0
  26. npcpy/memory/search.py +188 -90
  27. npcpy/mix/debate.py +192 -3
  28. npcpy/npc_compiler.py +1672 -801
  29. npcpy/npc_sysenv.py +593 -1266
  30. npcpy/serve.py +3120 -0
  31. npcpy/sql/ai_function_tools.py +257 -0
  32. npcpy/sql/database_ai_adapters.py +186 -0
  33. npcpy/sql/database_ai_functions.py +163 -0
  34. npcpy/sql/model_runner.py +19 -19
  35. npcpy/sql/npcsql.py +706 -507
  36. npcpy/sql/sql_model_compiler.py +156 -0
  37. npcpy/tools.py +183 -0
  38. npcpy/work/plan.py +13 -279
  39. npcpy/work/trigger.py +3 -3
  40. npcpy-1.2.32.dist-info/METADATA +803 -0
  41. npcpy-1.2.32.dist-info/RECORD +54 -0
  42. npcpy/data/dataframes.py +0 -171
  43. npcpy/memory/deep_research.py +0 -125
  44. npcpy/memory/sleep.py +0 -557
  45. npcpy/modes/_state.py +0 -78
  46. npcpy/modes/alicanto.py +0 -1075
  47. npcpy/modes/guac.py +0 -785
  48. npcpy/modes/mcp_npcsh.py +0 -822
  49. npcpy/modes/npc.py +0 -213
  50. npcpy/modes/npcsh.py +0 -1158
  51. npcpy/modes/plonk.py +0 -409
  52. npcpy/modes/pti.py +0 -234
  53. npcpy/modes/serve.py +0 -1637
  54. npcpy/modes/spool.py +0 -312
  55. npcpy/modes/wander.py +0 -549
  56. npcpy/modes/yap.py +0 -572
  57. npcpy/npc_team/alicanto.npc +0 -2
  58. npcpy/npc_team/alicanto.png +0 -0
  59. npcpy/npc_team/assembly_lines/test_pipeline.py +0 -181
  60. npcpy/npc_team/corca.npc +0 -13
  61. npcpy/npc_team/foreman.npc +0 -7
  62. npcpy/npc_team/frederic.npc +0 -6
  63. npcpy/npc_team/frederic4.png +0 -0
  64. npcpy/npc_team/guac.png +0 -0
  65. npcpy/npc_team/jinxs/automator.jinx +0 -18
  66. npcpy/npc_team/jinxs/bash_executer.jinx +0 -31
  67. npcpy/npc_team/jinxs/calculator.jinx +0 -11
  68. npcpy/npc_team/jinxs/edit_file.jinx +0 -96
  69. npcpy/npc_team/jinxs/file_chat.jinx +0 -14
  70. npcpy/npc_team/jinxs/gui_controller.jinx +0 -28
  71. npcpy/npc_team/jinxs/image_generation.jinx +0 -29
  72. npcpy/npc_team/jinxs/internet_search.jinx +0 -30
  73. npcpy/npc_team/jinxs/local_search.jinx +0 -152
  74. npcpy/npc_team/jinxs/npcsh_executor.jinx +0 -31
  75. npcpy/npc_team/jinxs/python_executor.jinx +0 -8
  76. npcpy/npc_team/jinxs/screen_cap.jinx +0 -25
  77. npcpy/npc_team/jinxs/sql_executor.jinx +0 -33
  78. npcpy/npc_team/kadiefa.npc +0 -3
  79. npcpy/npc_team/kadiefa.png +0 -0
  80. npcpy/npc_team/npcsh.ctx +0 -9
  81. npcpy/npc_team/npcsh_sibiji.png +0 -0
  82. npcpy/npc_team/plonk.npc +0 -2
  83. npcpy/npc_team/plonk.png +0 -0
  84. npcpy/npc_team/plonkjr.npc +0 -2
  85. npcpy/npc_team/plonkjr.png +0 -0
  86. npcpy/npc_team/sibiji.npc +0 -5
  87. npcpy/npc_team/sibiji.png +0 -0
  88. npcpy/npc_team/spool.png +0 -0
  89. npcpy/npc_team/templates/analytics/celona.npc +0 -0
  90. npcpy/npc_team/templates/hr_support/raone.npc +0 -0
  91. npcpy/npc_team/templates/humanities/eriane.npc +0 -4
  92. npcpy/npc_team/templates/it_support/lineru.npc +0 -0
  93. npcpy/npc_team/templates/marketing/slean.npc +0 -4
  94. npcpy/npc_team/templates/philosophy/maurawa.npc +0 -0
  95. npcpy/npc_team/templates/sales/turnic.npc +0 -4
  96. npcpy/npc_team/templates/software/welxor.npc +0 -0
  97. npcpy/npc_team/yap.png +0 -0
  98. npcpy/routes.py +0 -958
  99. npcpy/work/mcp_helpers.py +0 -357
  100. npcpy/work/mcp_server.py +0 -194
  101. npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.npc +0 -2
  102. npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.png +0 -0
  103. npcpy-1.0.26.data/data/npcpy/npc_team/automator.jinx +0 -18
  104. npcpy-1.0.26.data/data/npcpy/npc_team/bash_executer.jinx +0 -31
  105. npcpy-1.0.26.data/data/npcpy/npc_team/calculator.jinx +0 -11
  106. npcpy-1.0.26.data/data/npcpy/npc_team/celona.npc +0 -0
  107. npcpy-1.0.26.data/data/npcpy/npc_team/corca.npc +0 -13
  108. npcpy-1.0.26.data/data/npcpy/npc_team/edit_file.jinx +0 -96
  109. npcpy-1.0.26.data/data/npcpy/npc_team/eriane.npc +0 -4
  110. npcpy-1.0.26.data/data/npcpy/npc_team/file_chat.jinx +0 -14
  111. npcpy-1.0.26.data/data/npcpy/npc_team/foreman.npc +0 -7
  112. npcpy-1.0.26.data/data/npcpy/npc_team/frederic.npc +0 -6
  113. npcpy-1.0.26.data/data/npcpy/npc_team/frederic4.png +0 -0
  114. npcpy-1.0.26.data/data/npcpy/npc_team/guac.png +0 -0
  115. npcpy-1.0.26.data/data/npcpy/npc_team/gui_controller.jinx +0 -28
  116. npcpy-1.0.26.data/data/npcpy/npc_team/image_generation.jinx +0 -29
  117. npcpy-1.0.26.data/data/npcpy/npc_team/internet_search.jinx +0 -30
  118. npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.npc +0 -3
  119. npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.png +0 -0
  120. npcpy-1.0.26.data/data/npcpy/npc_team/lineru.npc +0 -0
  121. npcpy-1.0.26.data/data/npcpy/npc_team/local_search.jinx +0 -152
  122. npcpy-1.0.26.data/data/npcpy/npc_team/maurawa.npc +0 -0
  123. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh.ctx +0 -9
  124. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_executor.jinx +0 -31
  125. npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_sibiji.png +0 -0
  126. npcpy-1.0.26.data/data/npcpy/npc_team/plonk.npc +0 -2
  127. npcpy-1.0.26.data/data/npcpy/npc_team/plonk.png +0 -0
  128. npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.npc +0 -2
  129. npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.png +0 -0
  130. npcpy-1.0.26.data/data/npcpy/npc_team/python_executor.jinx +0 -8
  131. npcpy-1.0.26.data/data/npcpy/npc_team/raone.npc +0 -0
  132. npcpy-1.0.26.data/data/npcpy/npc_team/screen_cap.jinx +0 -25
  133. npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.npc +0 -5
  134. npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.png +0 -0
  135. npcpy-1.0.26.data/data/npcpy/npc_team/slean.npc +0 -4
  136. npcpy-1.0.26.data/data/npcpy/npc_team/spool.png +0 -0
  137. npcpy-1.0.26.data/data/npcpy/npc_team/sql_executor.jinx +0 -33
  138. npcpy-1.0.26.data/data/npcpy/npc_team/test_pipeline.py +0 -181
  139. npcpy-1.0.26.data/data/npcpy/npc_team/turnic.npc +0 -4
  140. npcpy-1.0.26.data/data/npcpy/npc_team/welxor.npc +0 -0
  141. npcpy-1.0.26.data/data/npcpy/npc_team/yap.png +0 -0
  142. npcpy-1.0.26.dist-info/METADATA +0 -827
  143. npcpy-1.0.26.dist-info/RECORD +0 -139
  144. npcpy-1.0.26.dist-info/entry_points.txt +0 -11
  145. /npcpy/{modes → ft}/__init__.py +0 -0
  146. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
  147. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
  148. {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
npcpy/gen/response.py CHANGED
@@ -9,53 +9,127 @@ import os
9
9
  try:
10
10
  import ollama
11
11
  except ImportError:
12
-
13
12
  pass
14
13
  except OSError:
15
- # Handle case where ollama is not installed or not available
14
+
16
15
  print("Ollama is not installed or not available. Please install it to use this feature.")
17
16
  try:
17
+ import litellm
18
18
  from litellm import completion
19
19
  except ImportError:
20
20
  pass
21
21
  except OSError:
22
- # Handle case where litellm is not installed or not available
22
+
23
23
  pass
24
+
24
25
  def handle_streaming_json(api_params):
25
26
  """
26
- Handles streaming responses when JSON format is requested.
27
-
28
- Args:
29
- api_params (dict): API parameters for the completion call.
30
-
31
- Yields:
32
- Processed chunks of the JSON response.
27
+ Handles streaming responses when JSON format is requested from LiteLLM.
33
28
  """
34
29
  json_buffer = ""
35
30
  stream = completion(**api_params)
36
-
37
31
  for chunk in stream:
38
32
  content = chunk.choices[0].delta.content
39
33
  if content:
40
34
  json_buffer += content
41
- # Try to parse as valid JSON but only yield once we have complete JSON
42
35
  try:
43
- # Check if we have a complete JSON object
44
36
  json.loads(json_buffer)
45
- # If successful, yield the chunk
46
37
  yield chunk
47
38
  except json.JSONDecodeError:
48
- # Not complete JSON yet, continue buffering
49
39
  pass
50
-
51
- # After the stream ends, try to ensure we have valid JSON
52
- try:
53
- final_json = json.loads(json_buffer)
54
- # Could yield a special "completion" chunk here if needed
55
- except json.JSONDecodeError:
56
- # Handle case where stream ended but JSON is invalid
57
- print(f"Warning: Complete stream did not produce valid JSON: {json_buffer}")
58
-
40
+
41
+ def get_transformers_response(
42
+ prompt: str = None,
43
+ model=None,
44
+ tokenizer=None,
45
+ tools: list = None,
46
+ tool_map: Dict = None,
47
+ format: str = None,
48
+ messages: List[Dict[str, str]] = None,
49
+ auto_process_tool_calls: bool = False,
50
+ **kwargs,
51
+ ) -> Dict[str, Any]:
52
+ import torch
53
+ import json
54
+ import uuid
55
+ from transformers import AutoTokenizer, AutoModelForCausalLM
56
+
57
+ result = {
58
+ "response": None,
59
+ "messages": messages.copy() if messages else [],
60
+ "raw_response": None,
61
+ "tool_calls": [],
62
+ "tool_results": []
63
+ }
64
+
65
+ if model is None or tokenizer is None:
66
+ model_name = model if isinstance(model, str) else "Qwen/Qwen3-1.7b"
67
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
68
+ model = AutoModelForCausalLM.from_pretrained(model_name)
69
+
70
+ if tokenizer.pad_token is None:
71
+ tokenizer.pad_token = tokenizer.eos_token
72
+
73
+ if prompt:
74
+ if result['messages'] and result['messages'][-1]["role"] == "user":
75
+ result['messages'][-1]["content"] = prompt
76
+ else:
77
+ result['messages'].append({"role": "user", "content": prompt})
78
+
79
+ if format == "json":
80
+ json_instruction = """If you are returning a json object, begin directly with the opening {.
81
+ Do not include any additional markdown formatting or leading ```json tags in your response."""
82
+ if result["messages"] and result["messages"][-1]["role"] == "user":
83
+ result["messages"][-1]["content"] += "\n" + json_instruction
84
+
85
+ chat_text = tokenizer.apply_chat_template(result["messages"], tokenize=False, add_generation_prompt=True)
86
+ device = next(model.parameters()).device
87
+ inputs = tokenizer(chat_text, return_tensors="pt", padding=True, truncation=True)
88
+ inputs = {k: v.to(device) for k, v in inputs.items()}
89
+
90
+
91
+ with torch.no_grad():
92
+ outputs = model.generate(
93
+ **inputs,
94
+ max_new_tokens=256,
95
+ temperature=0.7,
96
+ do_sample=True,
97
+ pad_token_id=tokenizer.eos_token_id,
98
+ )
99
+
100
+ response_content = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True).strip()
101
+ result["response"] = response_content
102
+ result["raw_response"] = response_content
103
+ result["messages"].append({"role": "assistant", "content": response_content})
104
+
105
+ if auto_process_tool_calls and tools and tool_map:
106
+ detected_tools = []
107
+ for tool in tools:
108
+ tool_name = tool.get("function", {}).get("name", "")
109
+ if tool_name in response_content:
110
+ detected_tools.append({
111
+ "id": str(uuid.uuid4()),
112
+ "function": {
113
+ "name": tool_name,
114
+ "arguments": "{}"
115
+ }
116
+ })
117
+
118
+ if detected_tools:
119
+ result["tool_calls"] = detected_tools
120
+ result = process_tool_calls(result, tool_map, "local", "transformers", result["messages"])
121
+
122
+ if format == "json":
123
+ try:
124
+ if response_content.startswith("```json"):
125
+ response_content = response_content.replace("```json", "").replace("```", "").strip()
126
+ parsed_response = json.loads(response_content)
127
+ result["response"] = parsed_response
128
+ except json.JSONDecodeError:
129
+ result["error"] = f"Invalid JSON response: {response_content}"
130
+
131
+ return result
132
+
59
133
 
60
134
  def get_ollama_response(
61
135
  prompt: str,
@@ -63,71 +137,58 @@ def get_ollama_response(
63
137
  images: List[str] = None,
64
138
  tools: list = None,
65
139
  tool_choice: Dict = None,
140
+ tool_map: Dict = None,
141
+ think= None ,
66
142
  format: Union[str, BaseModel] = None,
67
143
  messages: List[Dict[str, str]] = None,
68
144
  stream: bool = False,
69
145
  attachments: List[str] = None,
146
+ auto_process_tool_calls: bool = False,
70
147
  **kwargs,
71
148
  ) -> Dict[str, Any]:
72
149
  """
73
150
  Generates a response using the Ollama API, supporting both streaming and non-streaming.
74
151
  """
75
- import ollama
152
+
153
+ options = {}
154
+
76
155
  image_paths = []
77
156
  if images:
78
157
  image_paths.extend(images)
79
158
 
80
- # Handle attachments - simply add them to images if they exist
81
159
  if attachments:
82
160
  for attachment in attachments:
83
- # Check if file exists
84
161
  if os.path.exists(attachment):
85
- # Extract extension to determine file type
86
162
  _, ext = os.path.splitext(attachment)
87
163
  ext = ext.lower()
88
164
 
89
- # Handle image attachments
90
165
  if ext in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']:
91
166
  image_paths.append(attachment)
92
- # Handle PDF attachments
93
167
  elif ext == '.pdf':
94
168
  try:
95
169
  from npcpy.data.load import load_pdf
96
170
  pdf_data = load_pdf(attachment)
97
171
  if pdf_data is not None:
98
- # Extract text and add to prompt
99
- texts = json.loads(pdf_data['texts'].iloc[0])
100
- pdf_text = "\n\n".join([item.get('content', '') for item in texts])
101
-
102
172
  if prompt:
103
- prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_text[:2000]}..."
173
+ prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_data[:5000]}..."
104
174
  else:
105
- prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_text[:2000]}..."
106
-
107
- # Add images from PDF if needed
108
- try:
109
- images_data = json.loads(pdf_data['images'].iloc[0])
110
- # We would need to save these images temporarily and add paths to image_paths
111
- # This would require more complex implementation
112
- except Exception as e:
113
- print(f"Error processing PDF images: {e}")
114
- except Exception as e:
115
- print(f"Error processing PDF attachment: {e}")
116
- # Handle CSV attachments
175
+ prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_data[:5000]}..."
176
+ except Exception:
177
+ pass
117
178
  elif ext == '.csv':
118
179
  try:
119
180
  from npcpy.data.load import load_csv
120
181
  csv_data = load_csv(attachment)
121
182
  if csv_data is not None:
122
- csv_sample = csv_data.head(10).to_string()
183
+ csv_sample = csv_data.head(100).to_string()
123
184
  if prompt:
124
- prompt += f"\n\nContent from CSV: {os.path.basename(attachment)} (first 10 rows):\n{csv_sample}"
185
+ prompt += f"\n\nContent from CSV: {os.path.basename(attachment)} (first 100 rows):\n{csv_sample} \n csv description: {csv_data.describe()}"
125
186
  else:
126
- prompt = f"Content from CSV: {os.path.basename(attachment)} (first 10 rows):\n{csv_sample}"
127
- except Exception as e:
128
- print(f"Error processing CSV attachment: {e}")
129
-
130
- # Update the user message with processed prompt content
187
+ prompt = f"Content from CSV: {os.path.basename(attachment)} (first 100 rows):\n{csv_sample} \n csv description: {csv_data.describe()}"
188
+ except Exception:
189
+ pass
190
+
191
+
131
192
  if prompt:
132
193
  if messages and messages[-1]["role"] == "user":
133
194
  if isinstance(messages[-1]["content"], str):
@@ -140,107 +201,231 @@ def get_ollama_response(
140
201
  else:
141
202
  messages[-1]["content"].append({"type": "text", "text": prompt})
142
203
  else:
204
+ if not messages:
205
+ messages = []
143
206
  messages.append({"role": "user", "content": prompt})
144
-
145
- # Add images to the last user message for Ollama
207
+ if format == "json" and not stream:
208
+ json_instruction = """If you are a returning a json object, begin directly with the opening {.
209
+ If you are returning a json array, begin directly with the opening [.
210
+ Do not include any additional markdown formatting or leading
211
+ ```json tags in your response. The item keys should be based on the ones provided
212
+ by the user. Do not invent new ones."""
213
+
214
+ if messages and messages[-1]["role"] == "user":
215
+ if isinstance(messages[-1]["content"], list):
216
+ messages[-1]["content"].append({
217
+ "type": "text",
218
+ "text": json_instruction
219
+ })
220
+ elif isinstance(messages[-1]["content"], str):
221
+ messages[-1]["content"] += "\n" + json_instruction
222
+
146
223
  if image_paths:
147
- # Find the last user message or create one
148
- last_user_idx = None
224
+ last_user_idx = -1
149
225
  for i, msg in enumerate(messages):
150
226
  if msg["role"] == "user":
151
227
  last_user_idx = i
152
-
153
- if last_user_idx is None:
228
+ if last_user_idx == -1:
154
229
  messages.append({"role": "user", "content": ""})
155
230
  last_user_idx = len(messages) - 1
156
-
157
- # For Ollama, we directly attach the images to the message
158
231
  messages[last_user_idx]["images"] = image_paths
159
-
160
- # Prepare API parameters
232
+
161
233
  api_params = {
162
234
  "model": model,
163
235
  "messages": messages,
164
- "stream": stream,
236
+ "stream": stream if not (tools and tool_map and auto_process_tool_calls) else False,
165
237
  }
166
-
167
- # Add tools if provided
238
+
168
239
  if tools:
169
240
  api_params["tools"] = tools
170
-
171
- # Add tool choice if specified
172
- if tool_choice:
173
- api_params["tool_choice"] = tool_choice
174
- options = {}
175
- # Add any additional parameters
241
+ if tool_choice:
242
+ options["tool_choice"] = tool_choice
243
+
244
+
245
+ if think is not None:
246
+ api_params['think'] = think
247
+
248
+ if isinstance(format, type) and not stream:
249
+ api_params["format"] = format.model_json_schema()
250
+ elif isinstance(format, str) and format == "json" and not stream:
251
+ api_params["format"] = "json"
252
+
176
253
  for key, value in kwargs.items():
177
254
  if key in [
178
- "stop",
179
- "temperature",
180
- "top_p",
255
+ "stop",
256
+ "temperature",
257
+ "top_p",
181
258
  "max_tokens",
182
259
  "max_completion_tokens",
183
- "tools",
184
- "tool_choice",
185
- "extra_headers",
260
+ "extra_headers",
186
261
  "parallel_tool_calls",
187
262
  "response_format",
188
263
  "user",
189
264
  ]:
190
265
  options[key] = value
191
-
192
266
 
193
- # Handle formatting
194
- if isinstance(format, type) and not stream:
195
- schema = format.model_json_schema()
196
- api_params["format"] = schema
197
- elif isinstance(format, str) and format == "json" and not stream:
198
- api_params["format"] = "json"
199
-
200
- # Create standardized response structure
201
267
  result = {
202
268
  "response": None,
203
269
  "messages": messages.copy(),
204
270
  "raw_response": None,
205
- "tool_calls": []
271
+ "tool_calls": [],
272
+ "tool_results": []
206
273
  }
274
+
275
+
276
+
277
+
278
+ if not auto_process_tool_calls or not (tools and tool_map):
279
+ res = ollama.chat(**api_params, options=options)
280
+ result["raw_response"] = res
281
+
282
+ if stream:
283
+ result["response"] = res
284
+ return result
285
+ else:
286
+
287
+ message = res.get("message", {})
288
+ response_content = message.get("content", "")
289
+ result["response"] = response_content
290
+ result["messages"].append({"role": "assistant", "content": response_content})
291
+
292
+ if message.get('tool_calls'):
293
+ result["tool_calls"] = message['tool_calls']
294
+
295
+
296
+ if format == "json":
297
+ try:
298
+ if isinstance(response_content, str):
299
+ if response_content.startswith("```json"):
300
+ response_content = (
301
+ response_content.replace("```json", "")
302
+ .replace("```", "")
303
+ .strip()
304
+ )
305
+ parsed_response = json.loads(response_content)
306
+ result["response"] = parsed_response
307
+ except json.JSONDecodeError:
308
+ result["error"] = f"Invalid JSON response: {response_content}"
309
+
310
+ return result
311
+
207
312
 
208
- # Handle streaming
209
- if stream:
210
- result["response"] = ollama.chat(**api_params, options=options)
211
- return result
212
313
 
213
- # Non-streaming case
214
- res = ollama.chat(**api_params, options = options)
314
+ res = ollama.chat(**api_params, options=options)
215
315
  result["raw_response"] = res
216
316
 
217
- # Extract the response content
218
- response_content = res.get("message", {}).get("content")
219
- result["response"] = response_content
220
317
 
221
- # Handle tool calls if tools were provided
222
- if tools and "tool_calls" in res.get("message", {}):
223
- result["tool_calls"] = res["message"]["tool_calls"]
224
318
 
225
- # Append response to messages
226
- result["messages"].append({"role": "assistant", "content": response_content})
319
+ message = res.get("message", {})
320
+ response_content = message.get("content", "")
227
321
 
228
- # Handle JSON format if specified
229
- if format == "json":
230
- try:
231
- if isinstance(response_content, str):
232
- if response_content.startswith("```json"):
233
- response_content = (
234
- response_content.replace("```json", "")
235
- .replace("```", "")
236
- .strip()
237
- )
238
- parsed_response = json.loads(response_content)
239
- result["response"] = parsed_response
240
- except json.JSONDecodeError:
241
- result["error"] = f"Invalid JSON response: {response_content}"
242
322
 
243
- return result
323
+ if message.get('tool_calls'):
324
+
325
+
326
+ result["tool_calls"] = message['tool_calls']
327
+
328
+ response_for_processing = {
329
+ "response": response_content,
330
+ "raw_response": res,
331
+ "messages": messages,
332
+ "tool_calls": message['tool_calls']
333
+ }
334
+
335
+
336
+ processed_result = process_tool_calls(response_for_processing,
337
+ tool_map, model,
338
+ 'ollama',
339
+ messages,
340
+ stream=False)
341
+
342
+
343
+ if stream:
344
+
345
+
346
+
347
+ final_messages = processed_result["messages"]
348
+
349
+
350
+ final_api_params = {
351
+ "model": model,
352
+ "messages": final_messages,
353
+ "stream": True,
354
+ }
355
+
356
+ if tools:
357
+ final_api_params["tools"] = tools
358
+
359
+ final_stream = ollama.chat(**final_api_params, options=options)
360
+ processed_result["response"] = final_stream
361
+
362
+ return processed_result
363
+
364
+
365
+ else:
366
+ result["response"] = response_content
367
+ result["messages"].append({"role": "assistant", "content": response_content})
368
+
369
+ if stream:
370
+
371
+ stream_api_params = {
372
+ "model": model,
373
+ "messages": messages,
374
+ "stream": True,
375
+ }
376
+ if tools:
377
+ stream_api_params["tools"] = tools
378
+
379
+ result["response"] = ollama.chat(**stream_api_params, options=options)
380
+ else:
381
+
382
+ if format == "json":
383
+ try:
384
+ if isinstance(llm_response, str):
385
+ llm_response = llm_response.strip()
386
+
387
+ if '```json' in llm_response:
388
+ start = llm_response.find('```json') + 7
389
+ end = llm_response.rfind('```')
390
+ if end > start:
391
+ llm_response = llm_response[start:end].strip()
392
+
393
+ first_brace = llm_response.find('{')
394
+ first_bracket = llm_response.find('[')
395
+
396
+ if first_brace == -1 and first_bracket == -1:
397
+ result["response"] = {}
398
+ result["error"] = "No JSON found in response"
399
+ return result
400
+
401
+ if first_brace != -1 and (first_bracket == -1 or first_brace < first_bracket):
402
+ llm_response = llm_response[first_brace:]
403
+ last_brace = llm_response.rfind('}')
404
+ if last_brace != -1:
405
+ llm_response = llm_response[:last_brace+1]
406
+ else:
407
+ llm_response = llm_response[first_bracket:]
408
+ last_bracket = llm_response.rfind(']')
409
+ if last_bracket != -1:
410
+ llm_response = llm_response[:last_bracket+1]
411
+
412
+ parsed_json = json.loads(llm_response, strict=False)
413
+
414
+ if "json" in parsed_json:
415
+ result["response"] = parsed_json["json"]
416
+ else:
417
+ result["response"] = parsed_json
418
+
419
+ except (json.JSONDecodeError, TypeError) as e:
420
+ print(f"JSON parsing error: {str(e)}")
421
+ print(f"Raw response: {llm_response[:500]}")
422
+ result["response"] = {}
423
+ result["error"] = "Invalid JSON response"
424
+
425
+ return result
426
+
427
+ import time
428
+
244
429
 
245
430
  def get_litellm_response(
246
431
  prompt: str = None,
@@ -250,41 +435,108 @@ def get_litellm_response(
250
435
  tools: list = None,
251
436
  tool_choice: Dict = None,
252
437
  tool_map: Dict = None,
438
+ think= None,
253
439
  format: Union[str, BaseModel] = None,
254
440
  messages: List[Dict[str, str]] = None,
255
441
  api_key: str = None,
256
442
  api_url: str = None,
257
443
  stream: bool = False,
258
444
  attachments: List[str] = None,
445
+ auto_process_tool_calls: bool = False,
446
+ include_usage: bool = False,
259
447
  **kwargs,
260
448
  ) -> Dict[str, Any]:
261
- """
262
- Unified function for generating responses using litellm, supporting both streaming and non-streaming.
263
- """
264
- # Create standardized response structure
265
449
  result = {
266
450
  "response": None,
267
451
  "messages": messages.copy() if messages else [],
268
452
  "raw_response": None,
269
- "tool_calls": []
453
+ "tool_calls": [],
454
+ "tool_results":[],
270
455
  }
271
-
272
- # Handle Ollama separately
273
- if provider == "ollama":
456
+ if provider == "ollama" and 'gpt-oss' not in model:
274
457
  return get_ollama_response(
275
458
  prompt,
276
459
  model,
277
- images=images,
460
+ images=images,
278
461
  tools=tools,
279
- tool_choice=tool_choice,
462
+ tool_choice=tool_choice,
463
+ tool_map=tool_map,
464
+ think=think,
280
465
  format=format,
281
466
  messages=messages,
282
467
  stream=stream,
283
- attachments=attachments,
468
+ attachments=attachments,
469
+ auto_process_tool_calls=auto_process_tool_calls,
284
470
  **kwargs
285
471
  )
472
+ elif provider=='transformers':
473
+ return get_transformers_response(
474
+ prompt,
475
+ model,
476
+ images=images,
477
+ tools=tools,
478
+ tool_choice=tool_choice,
479
+ tool_map=tool_map,
480
+ think=think,
481
+ format=format,
482
+ messages=messages,
483
+ stream=stream,
484
+ attachments=attachments,
485
+ auto_process_tool_calls=auto_process_tool_calls,
486
+ **kwargs
487
+
488
+ )
286
489
 
287
- # Handle JSON format instructions
490
+
491
+ if attachments:
492
+ for attachment in attachments:
493
+ if os.path.exists(attachment):
494
+ _, ext = os.path.splitext(attachment)
495
+ ext = ext.lower()
496
+
497
+ if ext in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']:
498
+ if not images:
499
+ images = []
500
+ images.append(attachment)
501
+ elif ext == '.pdf':
502
+ try:
503
+ from npcpy.data.load import load_pdf
504
+ pdf_data = load_pdf(attachment)
505
+ if pdf_data is not None:
506
+ if prompt:
507
+ prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_data}..."
508
+ else:
509
+ prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_data}..."
510
+
511
+ except Exception:
512
+ pass
513
+ elif ext == '.csv':
514
+ try:
515
+ from npcpy.data.load import load_csv
516
+ csv_data = load_csv(attachment)
517
+ if csv_data is not None:
518
+ csv_sample = csv_data.head(10).to_string()
519
+ if prompt:
520
+ prompt += f"\n\nContent from CSV: {os.path.basename(attachment)} (first 10 rows):\n{csv_sample}"
521
+ else:
522
+ prompt = f"Content from CSV: {os.path.basename(attachment)} (first 10 rows):\n{csv_sample}"
523
+ except Exception:
524
+ pass
525
+
526
+ if prompt:
527
+ if result['messages'] and result['messages'][-1]["role"] == "user":
528
+ if isinstance(messages[-1]["content"], str):
529
+ result['messages'][-1]["content"] = prompt
530
+ elif isinstance(result['messages'][-1]["content"], list):
531
+ for i, item in enumerate(result['messages'][-1]["content"]):
532
+ if item.get("type") == "text":
533
+ result['messages'][-1]["content"][i]["text"] = prompt
534
+ break
535
+ else:
536
+ result['messages'][-1]["content"].append({"type": "text", "text": prompt})
537
+ else:
538
+ result['messages'].append({"role": "user", "content": prompt})
539
+
288
540
  if format == "json" and not stream:
289
541
  json_instruction = """If you are a returning a json object, begin directly with the opening {.
290
542
  If you are returning a json array, begin directly with the opening [.
@@ -294,350 +546,298 @@ def get_litellm_response(
294
546
 
295
547
  if result["messages"] and result["messages"][-1]["role"] == "user":
296
548
  if isinstance(result["messages"][-1]["content"], list):
297
- result["messages"][-1]["content"].append({
298
- "type": "text",
299
- "text": json_instruction
300
- })
549
+ result["messages"][-1]["content"].append({"type": "text", "text": json_instruction})
301
550
  elif isinstance(result["messages"][-1]["content"], str):
302
551
  result["messages"][-1]["content"] += "\n" + json_instruction
303
-
304
- # Handle images
552
+
305
553
  if images:
306
- last_user_idx = None
554
+ last_user_idx = -1
307
555
  for i, msg in enumerate(result["messages"]):
308
556
  if msg["role"] == "user":
309
557
  last_user_idx = i
310
-
311
- if last_user_idx is None:
558
+ if last_user_idx == -1:
312
559
  result["messages"].append({"role": "user", "content": []})
313
560
  last_user_idx = len(result["messages"]) - 1
314
-
315
561
  if isinstance(result["messages"][last_user_idx]["content"], str):
316
- result["messages"][last_user_idx]["content"] = [
317
- {"type": "text", "text": result["messages"][last_user_idx]["content"]}
318
- ]
562
+
563
+ result["messages"][last_user_idx]["content"] = [{"type": "text",
564
+ "text": result["messages"][last_user_idx]["content"]
565
+ }]
566
+
319
567
  elif not isinstance(result["messages"][last_user_idx]["content"], list):
320
568
  result["messages"][last_user_idx]["content"] = []
321
-
322
569
  for image_path in images:
323
570
  with open(image_path, "rb") as image_file:
324
571
  image_data = base64.b64encode(compress_image(image_file.read())).decode("utf-8")
325
572
  result["messages"][last_user_idx]["content"].append(
326
- {
327
- "type": "image_url",
328
- "image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
329
- }
573
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}}
330
574
  )
575
+
576
+
331
577
 
332
- # Prepare API parameters
333
- api_params = {
334
- "messages": result["messages"],
335
- }
336
-
337
- # Handle provider, model, and API settings
338
- if api_url is not None and provider == "openai-like":
578
+
579
+ api_params = {"messages": result["messages"]}
580
+
581
+ if include_usage:
582
+ litellm.include_cost_in_streaming_usage = True
583
+ api_params['stream_options'] = {"include_usage": True}
584
+
585
+ if api_url is not None and ('openai-like' in provider or provider == "openai-like" or provider == "openai"):
339
586
  api_params["api_base"] = api_url
340
587
  provider = "openai"
341
588
 
342
- if format == "json" and not stream:
343
- api_params["response_format"] = {"type": "json_object"}
344
- elif isinstance(format, BaseModel):
589
+
590
+ if provider =='enpisi' and api_url is None:
591
+ api_params['api_base'] = 'https://api.enpisi.com'
592
+ if api_key is None:
593
+ api_key = os.environ.get('NPC_STUDIO_LICENSE_KEY')
594
+ api_params['api_key'] = api_key
595
+ if '-npc' in model:
596
+ model = model.split('-npc')[0]
597
+ provider = "openai"
598
+
599
+ if isinstance(format, BaseModel):
345
600
  api_params["response_format"] = format
346
601
  if model is None:
347
- print('model not provided, using defaults')
348
602
  model = os.environ.get("NPCSH_CHAT_MODEL", "llama3.2")
349
603
  if provider is None:
350
- provider = os.environ.get("NPCSH_CHAT_PROVIDER", "openai")
604
+ provider = os.environ.get("NPCSH_CHAT_PROVIDER")
351
605
 
352
- if "/" not in model:
353
- model_str = f"{provider}/{model}"
354
- else:
355
- model_str = model
356
-
357
- api_params["model"] = model_str
358
-
359
- if api_key is not None:
606
+ api_params["model"] = f"{provider}/{model}" if "/" not in model else model
607
+ if api_key is not None:
360
608
  api_params["api_key"] = api_key
361
-
362
- # Add tools if provided
363
- if tools:
609
+ if tools:
364
610
  api_params["tools"] = tools
365
-
366
- # Add tool choice if specified
367
- if tool_choice:
611
+ if tool_choice:
368
612
  api_params["tool_choice"] = tool_choice
369
613
 
370
- # Add additional parameters
371
614
  if kwargs:
372
615
  for key, value in kwargs.items():
373
616
  if key in [
374
- "stop",
375
- "temperature",
376
- "top_p",
377
- "max_tokens",
378
- "max_completion_tokens",
379
- "tools",
380
- "tool_choice",
381
- "extra_headers",
382
- "parallel_tool_calls",
383
- "response_format",
384
- "user",
617
+ "stop", "temperature", "top_p", "max_tokens", "max_completion_tokens",
618
+ "extra_headers", "parallel_tool_calls",
619
+ "response_format", "user",
385
620
  ]:
386
621
  api_params[key] = value
387
-
388
622
 
389
- # Handle streaming
390
- if stream:
391
- #print('streaming response')
392
- if format == "json":
393
- print('streaming json output')
394
- result["response"] = handle_streaming_json(api_params)
395
- elif tools:
396
- # do a call to get tool choice
397
- result["response"] = completion(**api_params, stream=False)
398
- result = process_tool_calls(result, tool_map, model, provider, messages, stream=True)
399
-
623
+ if not auto_process_tool_calls or not (tools and tool_map):
624
+ api_params["stream"] = stream
625
+ resp = completion(**api_params)
626
+ result["raw_response"] = resp
627
+
628
+ if stream:
629
+ result["response"] = resp
630
+ return result
400
631
  else:
401
-
402
- result["response"] = completion(**api_params, stream=True)
403
632
 
404
- return result
633
+ llm_response = resp.choices[0].message.content
634
+ result["response"] = llm_response
635
+ result["messages"].append({"role": "assistant",
636
+ "content": llm_response})
637
+
638
+
639
+ if hasattr(resp.choices[0].message, 'tool_calls') and resp.choices[0].message.tool_calls:
640
+ result["tool_calls"] = resp.choices[0].message.tool_calls
641
+ if format == "json":
642
+ try:
643
+ if isinstance(llm_response, str):
644
+ llm_response = llm_response.strip()
645
+
646
+ if '```json' in llm_response:
647
+ start = llm_response.find('```json') + 7
648
+ end = llm_response.rfind('```')
649
+ if end > start:
650
+ llm_response = llm_response[start:end].strip()
651
+
652
+ first_brace = llm_response.find('{')
653
+ first_bracket = llm_response.find('[')
654
+
655
+ if first_brace == -1 and first_bracket == -1:
656
+ result["response"] = {}
657
+ result["error"] = "No JSON found in response"
658
+ return result
659
+
660
+ if first_brace != -1 and (first_bracket == -1 or first_brace < first_bracket):
661
+ llm_response = llm_response[first_brace:]
662
+ last_brace = llm_response.rfind('}')
663
+ if last_brace != -1:
664
+ llm_response = llm_response[:last_brace+1]
665
+ else:
666
+ llm_response = llm_response[first_bracket:]
667
+ last_bracket = llm_response.rfind(']')
668
+ if last_bracket != -1:
669
+ llm_response = llm_response[:last_bracket+1]
670
+
671
+ parsed_json = json.loads(llm_response, strict=False)
672
+
673
+ if "json" in parsed_json:
674
+ result["response"] = parsed_json["json"]
675
+ else:
676
+ result["response"] = parsed_json
677
+
678
+ except (json.JSONDecodeError, TypeError) as e:
679
+ print(f"JSON parsing error: {str(e)}")
680
+ print(f"Raw response: {llm_response[:500]}")
681
+ result["response"] = {}
682
+ result["error"] = "Invalid JSON response"
683
+
684
+ return result
685
+
405
686
 
406
- # Non-streaming case
407
687
 
408
- resp = completion(**api_params)
409
- result["raw_response"] = resp
688
+ initial_api_params = api_params.copy()
689
+ initial_api_params["stream"] = False
410
690
 
411
- # Extract response content
412
- llm_response = resp.choices[0].message.content
413
- result["response"] = llm_response
414
691
 
415
- # Extract tool calls if any
416
- if hasattr(resp.choices[0].message, 'tool_calls') and resp.choices[0].message.tool_calls:
417
- result["tool_calls"] = resp.choices[0].message.tool_calls
692
+ resp = completion(**initial_api_params)
693
+ result["raw_response"] = resp
418
694
 
419
- # Handle JSON format requests
420
- if format == "json":
421
- try:
422
- if isinstance(llm_response, str):
423
- # Clean up JSON response if needed
424
- if llm_response.startswith("```json"):
425
- llm_response = llm_response.replace("```json", "").replace("```", "").strip()
426
- parsed_json = json.loads(llm_response)
427
-
428
- if "json" in parsed_json:
429
- result["response"] = parsed_json["json"]
430
- else:
431
- result["response"] = parsed_json
432
-
433
- except (json.JSONDecodeError, TypeError) as e:
434
- print(f"JSON parsing error: {str(e)}")
435
- print(f"Raw response: {llm_response}")
436
- result["error"] = "Invalid JSON response"
437
-
438
- # Add assistant response to message history
439
- result["messages"].append(
440
- {
441
- "role": "assistant",
442
- "content": (
443
- llm_response if isinstance(llm_response, str) else str(llm_response)
444
- ),
445
- }
446
- )
447
695
 
448
- return result
696
+ has_tool_calls = hasattr(resp.choices[0].message, 'tool_calls') and resp.choices[0].message.tool_calls
697
+
698
+ if has_tool_calls:
449
699
 
700
+
701
+ result["tool_calls"] = resp.choices[0].message.tool_calls
702
+
703
+
704
+ processed_result = process_tool_calls(result,
705
+ tool_map,
706
+ model,
707
+ provider,
708
+ result["messages"],
709
+ stream=False)
710
+
711
+
712
+ if stream:
450
713
 
451
- def handle_streaming_json(api_params):
452
- """
453
- Handles streaming responses when JSON format is requested.
454
- """
455
- json_buffer = ""
456
- stream = completion(**api_params)
457
-
458
- for chunk in stream:
459
- content = chunk.choices[0].delta.content
460
- if content:
461
- json_buffer += content
462
- # Try to parse as valid JSON but only yield once we have complete JSON
463
- try:
464
- # Check if we have a complete JSON object
465
- json.loads(json_buffer)
466
- # If successful, yield the chunk
467
- yield chunk
468
- except json.JSONDecodeError:
469
- # Not complete JSON yet, continue buffering
470
- pass
471
-
472
- # After the stream ends, try to ensure we have valid JSON
473
- try:
474
- final_json = json.loads(json_buffer)
475
- # Could yield a special "completion" chunk here if needed
476
- except json.JSONDecodeError:
477
- # Handle case where stream ended but JSON is invalid
478
- print(f"Warning: Complete stream did not produce valid JSON: {json_buffer}")
714
+
715
+
716
+ clean_messages = []
717
+ for msg in processed_result["messages"]:
718
+ if msg.get('role') == 'assistant' and 'tool_calls' in msg:
719
+ continue
720
+
721
+ else:
722
+ clean_messages.append(msg)
723
+
724
+ final_api_params = api_params.copy()
725
+ final_api_params["messages"] = clean_messages
726
+ final_api_params["stream"] = True
479
727
 
480
728
 
729
+ final_api_params = api_params.copy()
730
+ final_api_params["messages"] = clean_messages
731
+ final_api_params["stream"] = True
732
+ if "tools" in final_api_params:
733
+ del final_api_params["tools"]
734
+ if "tool_choice" in final_api_params:
735
+ del final_api_params["tool_choice"]
481
736
 
482
- def process_tool_calls(response_dict, tool_map, model, provider, messages, stream=False):
483
- """
484
- Process tool calls from a response and execute corresponding tools.
485
-
486
- Args:
487
- response_dict (dict): The response dictionary from get_litellm_response or get_ollama_response
488
- tool_map (dict): Mapping of tool names to their implementation functions
489
- model (str): The model to use for follow-up responses
490
- provider (str): The provider to use for follow-up responses
491
- messages (list): The current message history
492
- stream (bool): Whether to stream the response
737
+ final_stream = completion(**final_api_params)
738
+
739
+
740
+ final_stream = completion(**final_api_params)
741
+ processed_result["response"] = final_stream
742
+
743
+ return processed_result
493
744
 
494
- Returns:
495
- dict: Updated response dictionary with tool results and final response
496
- """
497
-
745
+
746
+ else:
747
+ llm_response = resp.choices[0].message.content
748
+ result["messages"].append({"role": "assistant", "content": llm_response})
749
+
750
+ if stream:
751
+ def string_chunk_generator():
752
+ chunk_size = 1
753
+ for i, char in enumerate(llm_response):
754
+ yield type('MockChunk', (), {
755
+ 'id': f'mock-chunk-{i}',
756
+ 'object': 'chat.completion.chunk',
757
+ 'created': int(time.time()),
758
+ 'model': model or 'unknown',
759
+ 'choices': [type('Choice', (), {
760
+ 'index': 0,
761
+ 'delta': type('Delta', (), {
762
+ 'content': char,
763
+ 'role': 'assistant' if i == 0 else None
764
+ })(),
765
+ 'finish_reason': 'stop' if i == len(llm_response) - 1 else None
766
+ })()]
767
+ })()
768
+
769
+ result["response"] = string_chunk_generator()
770
+ else:
771
+ result["response"] = llm_response
772
+ return result
773
+ def process_tool_calls(response_dict, tool_map, model, provider, messages, stream=False):
498
774
  result = response_dict.copy()
499
775
  result["tool_results"] = []
500
- #print(tool_map)
501
776
 
502
- # Make sure messages is initialized
503
777
  if "messages" not in result:
504
778
  result["messages"] = messages if messages else []
505
779
 
506
- # Extract tool calls from the response
507
- if "response" in result:
508
- if hasattr(result["response"], "choices") and hasattr(result["response"].choices[0], "message"):
509
- tool_calls = result["response"].choices[0].message.tool_calls
510
- elif isinstance(result["response"], dict) and "tool_calls" in result["response"]:
511
- tool_calls = result["response"]["tool_calls"]
512
- else:
513
- tool_calls = None
514
- else:
515
- tool_calls = None
780
+ tool_calls = result.get("tool_calls", [])
516
781
 
517
- if tool_calls is not None:
518
- for tool_call in tool_calls:
519
-
520
- # Extract tool details - handle both Ollama and LiteLLM formats
521
- if isinstance(tool_call, dict): # Ollama format
522
- tool_id = tool_call.get("id", str(uuid.uuid4()))
523
- tool_name = tool_call.get("function", {}).get("name")
524
- arguments_str = tool_call.get("function", {}).get("arguments", "{}")
525
- else: # LiteLLM format - expect object with attributes
526
- tool_id = getattr(tool_call, "id", str(uuid.uuid4()))
527
- # Handle function as either attribute or dict
528
- if hasattr(tool_call, "function"):
529
- if isinstance(tool_call.function, dict):
530
- tool_name = tool_call.function.get("name")
531
- arguments_str = tool_call.function.get("arguments", "{}")
532
- else:
533
- tool_name = getattr(tool_call.function, "name", None)
534
- arguments_str = getattr(tool_call.function, "arguments", "{}")
535
- else:
536
- raise ValueError("Jinx call missing function attribute or property")
537
-
538
- # Parse arguments
539
- if not arguments_str:
540
- arguments = {}
541
- else:
542
- try:
543
- arguments = json.loads(arguments_str) if isinstance(arguments_str, str) else arguments_str
544
- except json.JSONDecodeError:
545
- arguments = {"raw_arguments": arguments_str}
782
+ if not tool_calls:
783
+ return result
784
+
785
+ for tool_call in tool_calls:
786
+ tool_id = str(uuid.uuid4())
787
+ tool_name = None
788
+ arguments = {}
789
+
546
790
 
547
- render_markdown('# tool_call \n - '+ tool_name + '\n - ' + str(arguments))
791
+ if isinstance(tool_call, dict):
792
+ tool_id = tool_call.get("id", str(uuid.uuid4()))
793
+ tool_name = tool_call.get("function", {}).get("name")
794
+ arguments_str = tool_call.get("function", {}).get("arguments", "{}")
795
+ else:
796
+ tool_id = getattr(tool_call, "id", str(uuid.uuid4()))
797
+ if hasattr(tool_call, "function"):
798
+ func_obj = tool_call.function
799
+ tool_name = getattr(func_obj, "name", None)
800
+ arguments_str = getattr(func_obj, "arguments", "{}")
801
+ else:
802
+ continue
548
803
 
549
-
550
- # Execute the tool if it exists in the tool map
551
- if tool_name in tool_map:
552
- try:
553
- # Try calling with keyword arguments first
554
- tool_result = tool_map[tool_name](**arguments)
555
- except TypeError:
556
- # If that fails, try calling with the arguments as a single parameter
557
- tool_result = tool_map[tool_name](arguments)
558
- # Convert tool result to a serializable format
559
- serializable_result = None
560
- tool_result_str = ""
561
-
562
- try:
563
- # Check if it's TextContent or similar with .text attribute
564
- if hasattr(tool_result, 'content') and isinstance(tool_result.content, list):
565
- content_list = tool_result.content
566
- text_parts = []
567
- for item in content_list:
568
- if hasattr(item, 'text'):
569
- text_parts.append(item.text)
570
- tool_result_str = " ".join(text_parts)
571
- serializable_result = {"text": tool_result_str}
572
- # Handle other common types
573
- elif hasattr(tool_result, 'model_dump'):
574
- serializable_result = tool_result.model_dump()
575
- tool_result_str = str(serializable_result)
576
- elif hasattr(tool_result, 'to_dict'):
577
- serializable_result = tool_result.to_dict()
578
- tool_result_str = str(serializable_result)
579
- elif hasattr(tool_result, '__dict__'):
580
- serializable_result = tool_result.__dict__
581
- tool_result_str = str(serializable_result)
582
- elif isinstance(tool_result, (dict, list)):
583
- serializable_result = tool_result
584
- tool_result_str = json.dumps(tool_result)
585
- else:
586
- # Fall back to string representation
587
- tool_result_str = str(tool_result)
588
- serializable_result = {"text": tool_result_str}
589
- except Exception as e:
590
- tool_result_str = f"Error serializing result: {str(e)}"
591
- serializable_result = {"error": tool_result_str}
592
-
593
- # Store the serializable result
594
- result["tool_results"].append({
595
- "tool_call_id": tool_id,
596
- "tool_name": tool_name,
597
- "arguments": arguments,
598
- "result": serializable_result
599
- })
600
-
601
- # Add the tool call message
602
- result["messages"].append({
603
- "role": "assistant",
604
- "content": None,
605
- "tool_calls": [
606
- {
607
- "id": tool_id,
608
- "type": "function",
609
- "function": {
610
- "name": tool_name,
611
- "arguments": json.dumps(arguments)
612
- }
613
- }
614
- ]
615
- })
616
-
617
- # Add the tool response message with string content
618
- result["messages"].append({
619
- "role": "tool",
620
- "tool_call_id": tool_id,
621
- "content": tool_result_str
622
- })
623
-
624
- # Follow up with a request to the LLM
625
- follow_up_prompt = "Based on the tool results, please provide a helpful response."
804
+ try:
805
+ arguments = json.loads(arguments_str) if isinstance(arguments_str, str) else arguments_str
806
+ except json.JSONDecodeError:
807
+ arguments = {"raw_arguments": arguments_str}
626
808
 
627
- # Get follow-up response
628
- follow_up_response = get_litellm_response(
629
- prompt=follow_up_prompt,
630
- model=model,
631
- provider=provider,
632
- messages=result["messages"],
633
- stream=stream,
634
- )
635
809
 
636
- # Update the result
637
- if isinstance(follow_up_response, dict):
638
- if "response" in follow_up_response:
639
- result["response"] = follow_up_response["response"]
640
- if "messages" in follow_up_response:
641
- result["messages"] = follow_up_response["messages"]
810
+ if tool_name in tool_map:
811
+ tool_result = None
812
+ tool_result_str = ""
813
+ serializable_result = None
814
+
815
+ try:
816
+ tool_result = tool_map[tool_name](**arguments)
817
+ except Exception as e:
818
+ tool_result = f"Error executing tool '{tool_name}': {str(e)}. Tool map is : {tool_map}"
819
+
820
+ try:
821
+ tool_result_str = json.dumps(tool_result, default=str)
822
+ try:
823
+ serializable_result = json.loads(tool_result_str)
824
+ except json.JSONDecodeError:
825
+ serializable_result = {"result": tool_result_str}
826
+ except Exception as e_serialize:
827
+ tool_result_str = f"Error serializing result for {tool_name}: {str(e_serialize)}"
828
+ serializable_result = {"error": tool_result_str}
829
+
830
+ result["tool_results"].append({
831
+ "tool_call_id": tool_id,
832
+ "tool_name": tool_name,
833
+ "arguments": arguments,
834
+ "result": serializable_result
835
+ })
836
+
837
+
838
+ result["messages"].append({
839
+ "role": "assistant",
840
+ "content": f'The results of the tool call for {tool_name} with {arguments} are as follows:' +tool_result_str
841
+ })
642
842
 
643
843
  return result