npcpy 1.1.28__py3-none-any.whl → 1.2.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. npcpy/data/audio.py +16 -38
  2. npcpy/data/image.py +29 -29
  3. npcpy/data/load.py +4 -3
  4. npcpy/data/text.py +28 -28
  5. npcpy/data/video.py +6 -6
  6. npcpy/data/web.py +49 -21
  7. npcpy/ft/__init__.py +0 -0
  8. npcpy/ft/diff.py +110 -0
  9. npcpy/ft/ge.py +115 -0
  10. npcpy/ft/memory_trainer.py +171 -0
  11. npcpy/ft/model_ensembler.py +357 -0
  12. npcpy/ft/rl.py +360 -0
  13. npcpy/ft/sft.py +248 -0
  14. npcpy/ft/usft.py +128 -0
  15. npcpy/gen/audio_gen.py +24 -0
  16. npcpy/gen/embeddings.py +13 -13
  17. npcpy/gen/image_gen.py +37 -15
  18. npcpy/gen/response.py +287 -111
  19. npcpy/gen/video_gen.py +10 -9
  20. npcpy/llm_funcs.py +447 -79
  21. npcpy/memory/command_history.py +201 -48
  22. npcpy/memory/kg_vis.py +74 -74
  23. npcpy/memory/knowledge_graph.py +482 -115
  24. npcpy/memory/memory_processor.py +81 -0
  25. npcpy/memory/search.py +70 -70
  26. npcpy/mix/debate.py +192 -3
  27. npcpy/npc_compiler.py +1541 -879
  28. npcpy/npc_sysenv.py +250 -78
  29. npcpy/serve.py +1036 -321
  30. npcpy/sql/ai_function_tools.py +257 -0
  31. npcpy/sql/database_ai_adapters.py +186 -0
  32. npcpy/sql/database_ai_functions.py +163 -0
  33. npcpy/sql/model_runner.py +19 -19
  34. npcpy/sql/npcsql.py +706 -507
  35. npcpy/sql/sql_model_compiler.py +156 -0
  36. npcpy/tools.py +20 -20
  37. npcpy/work/plan.py +8 -8
  38. npcpy/work/trigger.py +3 -3
  39. {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/METADATA +169 -9
  40. npcpy-1.2.32.dist-info/RECORD +54 -0
  41. npcpy-1.1.28.dist-info/RECORD +0 -40
  42. {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
  43. {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
  44. {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
npcpy/gen/response.py CHANGED
@@ -11,14 +11,15 @@ try:
11
11
  except ImportError:
12
12
  pass
13
13
  except OSError:
14
- # Handle case where ollama is not installed or not available
14
+
15
15
  print("Ollama is not installed or not available. Please install it to use this feature.")
16
16
  try:
17
+ import litellm
17
18
  from litellm import completion
18
19
  except ImportError:
19
20
  pass
20
21
  except OSError:
21
- # Handle case where litellm is not installed or not available
22
+
22
23
  pass
23
24
 
24
25
  def handle_streaming_json(api_params):
@@ -37,6 +38,97 @@ def handle_streaming_json(api_params):
37
38
  except json.JSONDecodeError:
38
39
  pass
39
40
 
41
+ def get_transformers_response(
42
+ prompt: str = None,
43
+ model=None,
44
+ tokenizer=None,
45
+ tools: list = None,
46
+ tool_map: Dict = None,
47
+ format: str = None,
48
+ messages: List[Dict[str, str]] = None,
49
+ auto_process_tool_calls: bool = False,
50
+ **kwargs,
51
+ ) -> Dict[str, Any]:
52
+ import torch
53
+ import json
54
+ import uuid
55
+ from transformers import AutoTokenizer, AutoModelForCausalLM
56
+
57
+ result = {
58
+ "response": None,
59
+ "messages": messages.copy() if messages else [],
60
+ "raw_response": None,
61
+ "tool_calls": [],
62
+ "tool_results": []
63
+ }
64
+
65
+ if model is None or tokenizer is None:
66
+ model_name = model if isinstance(model, str) else "Qwen/Qwen3-1.7b"
67
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
68
+ model = AutoModelForCausalLM.from_pretrained(model_name)
69
+
70
+ if tokenizer.pad_token is None:
71
+ tokenizer.pad_token = tokenizer.eos_token
72
+
73
+ if prompt:
74
+ if result['messages'] and result['messages'][-1]["role"] == "user":
75
+ result['messages'][-1]["content"] = prompt
76
+ else:
77
+ result['messages'].append({"role": "user", "content": prompt})
78
+
79
+ if format == "json":
80
+ json_instruction = """If you are returning a json object, begin directly with the opening {.
81
+ Do not include any additional markdown formatting or leading ```json tags in your response."""
82
+ if result["messages"] and result["messages"][-1]["role"] == "user":
83
+ result["messages"][-1]["content"] += "\n" + json_instruction
84
+
85
+ chat_text = tokenizer.apply_chat_template(result["messages"], tokenize=False, add_generation_prompt=True)
86
+ device = next(model.parameters()).device
87
+ inputs = tokenizer(chat_text, return_tensors="pt", padding=True, truncation=True)
88
+ inputs = {k: v.to(device) for k, v in inputs.items()}
89
+
90
+
91
+ with torch.no_grad():
92
+ outputs = model.generate(
93
+ **inputs,
94
+ max_new_tokens=256,
95
+ temperature=0.7,
96
+ do_sample=True,
97
+ pad_token_id=tokenizer.eos_token_id,
98
+ )
99
+
100
+ response_content = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True).strip()
101
+ result["response"] = response_content
102
+ result["raw_response"] = response_content
103
+ result["messages"].append({"role": "assistant", "content": response_content})
104
+
105
+ if auto_process_tool_calls and tools and tool_map:
106
+ detected_tools = []
107
+ for tool in tools:
108
+ tool_name = tool.get("function", {}).get("name", "")
109
+ if tool_name in response_content:
110
+ detected_tools.append({
111
+ "id": str(uuid.uuid4()),
112
+ "function": {
113
+ "name": tool_name,
114
+ "arguments": "{}"
115
+ }
116
+ })
117
+
118
+ if detected_tools:
119
+ result["tool_calls"] = detected_tools
120
+ result = process_tool_calls(result, tool_map, "local", "transformers", result["messages"])
121
+
122
+ if format == "json":
123
+ try:
124
+ if response_content.startswith("```json"):
125
+ response_content = response_content.replace("```json", "").replace("```", "").strip()
126
+ parsed_response = json.loads(response_content)
127
+ result["response"] = parsed_response
128
+ except json.JSONDecodeError:
129
+ result["error"] = f"Invalid JSON response: {response_content}"
130
+
131
+ return result
40
132
 
41
133
 
42
134
  def get_ollama_response(
@@ -58,6 +150,7 @@ def get_ollama_response(
58
150
  Generates a response using the Ollama API, supporting both streaming and non-streaming.
59
151
  """
60
152
 
153
+ options = {}
61
154
 
62
155
  image_paths = []
63
156
  if images:
@@ -145,6 +238,10 @@ def get_ollama_response(
145
238
 
146
239
  if tools:
147
240
  api_params["tools"] = tools
241
+ if tool_choice:
242
+ options["tool_choice"] = tool_choice
243
+
244
+
148
245
  if think is not None:
149
246
  api_params['think'] = think
150
247
 
@@ -153,7 +250,6 @@ def get_ollama_response(
153
250
  elif isinstance(format, str) and format == "json" and not stream:
154
251
  api_params["format"] = "json"
155
252
 
156
- options = {}
157
253
  for key, value in kwargs.items():
158
254
  if key in [
159
255
  "stop",
@@ -167,9 +263,6 @@ def get_ollama_response(
167
263
  "user",
168
264
  ]:
169
265
  options[key] = value
170
- if tool_choice:
171
- options["tool_choice"] = tool_choice
172
-
173
266
 
174
267
  result = {
175
268
  "response": None,
@@ -179,18 +272,18 @@ def get_ollama_response(
179
272
  "tool_results": []
180
273
  }
181
274
 
182
- #print("API params:", api_params)
275
+
183
276
 
184
- # If we want raw tool calls OR no tools, just stream directly
277
+
185
278
  if not auto_process_tool_calls or not (tools and tool_map):
186
279
  res = ollama.chat(**api_params, options=options)
187
280
  result["raw_response"] = res
188
281
 
189
282
  if stream:
190
- result["response"] = res # This is the stream generator
283
+ result["response"] = res
191
284
  return result
192
285
  else:
193
- # Non-streaming regular response
286
+
194
287
  message = res.get("message", {})
195
288
  response_content = message.get("content", "")
196
289
  result["response"] = response_content
@@ -199,7 +292,7 @@ def get_ollama_response(
199
292
  if message.get('tool_calls'):
200
293
  result["tool_calls"] = message['tool_calls']
201
294
 
202
- # Handle JSON format if specified
295
+
203
296
  if format == "json":
204
297
  try:
205
298
  if isinstance(response_content, str):
@@ -216,19 +309,19 @@ def get_ollama_response(
216
309
 
217
310
  return result
218
311
 
219
- # Only if auto_process_tool_calls=True AND we have tools
220
- # Make initial non-streaming call to check for tool calls
312
+
313
+
221
314
  res = ollama.chat(**api_params, options=options)
222
315
  result["raw_response"] = res
223
316
 
224
- #print("Raw Ollama response:", res)
317
+
225
318
 
226
319
  message = res.get("message", {})
227
320
  response_content = message.get("content", "")
228
321
 
229
- # Check for tool calls and process them
322
+
230
323
  if message.get('tool_calls'):
231
- print("Found tool calls, processing automatically:", message['tool_calls'])
324
+
232
325
 
233
326
  result["tool_calls"] = message['tool_calls']
234
327
 
@@ -239,21 +332,21 @@ def get_ollama_response(
239
332
  "tool_calls": message['tool_calls']
240
333
  }
241
334
 
242
- # Process tool calls and get the updated result
335
+
243
336
  processed_result = process_tool_calls(response_for_processing,
244
337
  tool_map, model,
245
338
  'ollama',
246
339
  messages,
247
340
  stream=False)
248
341
 
249
- # Now if streaming was requested, make a final call with the complete conversation
342
+
250
343
  if stream:
251
- print("Making final streaming call with processed tools")
344
+
345
+
252
346
 
253
- # Use the updated messages from tool processing
254
347
  final_messages = processed_result["messages"]
255
348
 
256
- # Make the final streaming call
349
+
257
350
  final_api_params = {
258
351
  "model": model,
259
352
  "messages": final_messages,
@@ -268,13 +361,13 @@ def get_ollama_response(
268
361
 
269
362
  return processed_result
270
363
 
271
- # No tool calls found, handle normally
364
+
272
365
  else:
273
366
  result["response"] = response_content
274
367
  result["messages"].append({"role": "assistant", "content": response_content})
275
368
 
276
369
  if stream:
277
- # Make streaming call for regular response
370
+
278
371
  stream_api_params = {
279
372
  "model": model,
280
373
  "messages": messages,
@@ -285,22 +378,53 @@ def get_ollama_response(
285
378
 
286
379
  result["response"] = ollama.chat(**stream_api_params, options=options)
287
380
  else:
288
- # Handle JSON format if specified
381
+
289
382
  if format == "json":
290
383
  try:
291
- if isinstance(response_content, str):
292
- if response_content.startswith("```json"):
293
- response_content = (
294
- response_content.replace("```json", "")
295
- .replace("```", "")
296
- .strip()
297
- )
298
- parsed_response = json.loads(response_content)
299
- result["response"] = parsed_response
300
- except json.JSONDecodeError:
301
- result["error"] = f"Invalid JSON response: {response_content}"
302
-
384
+ if isinstance(llm_response, str):
385
+ llm_response = llm_response.strip()
386
+
387
+ if '```json' in llm_response:
388
+ start = llm_response.find('```json') + 7
389
+ end = llm_response.rfind('```')
390
+ if end > start:
391
+ llm_response = llm_response[start:end].strip()
392
+
393
+ first_brace = llm_response.find('{')
394
+ first_bracket = llm_response.find('[')
395
+
396
+ if first_brace == -1 and first_bracket == -1:
397
+ result["response"] = {}
398
+ result["error"] = "No JSON found in response"
399
+ return result
400
+
401
+ if first_brace != -1 and (first_bracket == -1 or first_brace < first_bracket):
402
+ llm_response = llm_response[first_brace:]
403
+ last_brace = llm_response.rfind('}')
404
+ if last_brace != -1:
405
+ llm_response = llm_response[:last_brace+1]
406
+ else:
407
+ llm_response = llm_response[first_bracket:]
408
+ last_bracket = llm_response.rfind(']')
409
+ if last_bracket != -1:
410
+ llm_response = llm_response[:last_bracket+1]
411
+
412
+ parsed_json = json.loads(llm_response, strict=False)
413
+
414
+ if "json" in parsed_json:
415
+ result["response"] = parsed_json["json"]
416
+ else:
417
+ result["response"] = parsed_json
418
+
419
+ except (json.JSONDecodeError, TypeError) as e:
420
+ print(f"JSON parsing error: {str(e)}")
421
+ print(f"Raw response: {llm_response[:500]}")
422
+ result["response"] = {}
423
+ result["error"] = "Invalid JSON response"
424
+
303
425
  return result
426
+
427
+ import time
304
428
 
305
429
 
306
430
  def get_litellm_response(
@@ -319,6 +443,7 @@ def get_litellm_response(
319
443
  stream: bool = False,
320
444
  attachments: List[str] = None,
321
445
  auto_process_tool_calls: bool = False,
446
+ include_usage: bool = False,
322
447
  **kwargs,
323
448
  ) -> Dict[str, Any]:
324
449
  result = {
@@ -344,6 +469,23 @@ def get_litellm_response(
344
469
  auto_process_tool_calls=auto_process_tool_calls,
345
470
  **kwargs
346
471
  )
472
+ elif provider=='transformers':
473
+ return get_transformers_response(
474
+ prompt,
475
+ model,
476
+ images=images,
477
+ tools=tools,
478
+ tool_choice=tool_choice,
479
+ tool_map=tool_map,
480
+ think=think,
481
+ format=format,
482
+ messages=messages,
483
+ stream=stream,
484
+ attachments=attachments,
485
+ auto_process_tool_calls=auto_process_tool_calls,
486
+ **kwargs
487
+
488
+ )
347
489
 
348
490
 
349
491
  if attachments:
@@ -362,9 +504,9 @@ def get_litellm_response(
362
504
  pdf_data = load_pdf(attachment)
363
505
  if pdf_data is not None:
364
506
  if prompt:
365
- prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_data[:5000]}..."
507
+ prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_data}..."
366
508
  else:
367
- prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_data[:5000]}..."
509
+ prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_data}..."
368
510
 
369
511
  except Exception:
370
512
  pass
@@ -435,12 +577,25 @@ def get_litellm_response(
435
577
 
436
578
 
437
579
  api_params = {"messages": result["messages"]}
438
-
439
- if api_url is not None and provider == "openai-like":
580
+
581
+ if include_usage:
582
+ litellm.include_cost_in_streaming_usage = True
583
+ api_params['stream_options'] = {"include_usage": True}
584
+
585
+ if api_url is not None and ('openai-like' in provider or provider == "openai-like" or provider == "openai"):
440
586
  api_params["api_base"] = api_url
441
587
  provider = "openai"
442
588
 
443
589
 
590
+ if provider =='enpisi' and api_url is None:
591
+ api_params['api_base'] = 'https://api.enpisi.com'
592
+ if api_key is None:
593
+ api_key = os.environ.get('NPC_STUDIO_LICENSE_KEY')
594
+ api_params['api_key'] = api_key
595
+ if '-npc' in model:
596
+ model = model.split('-npc')[0]
597
+ provider = "openai"
598
+
444
599
  if isinstance(format, BaseModel):
445
600
  api_params["response_format"] = format
446
601
  if model is None:
@@ -465,33 +620,55 @@ def get_litellm_response(
465
620
  ]:
466
621
  api_params[key] = value
467
622
 
468
- # If we want raw tool calls OR no tools, just call directly with requested streaming
469
623
  if not auto_process_tool_calls or not (tools and tool_map):
470
624
  api_params["stream"] = stream
471
625
  resp = completion(**api_params)
472
626
  result["raw_response"] = resp
473
627
 
474
628
  if stream:
475
- result["response"] = resp # This is the stream generator
629
+ result["response"] = resp
476
630
  return result
477
631
  else:
478
- # Non-streaming response
632
+
479
633
  llm_response = resp.choices[0].message.content
480
634
  result["response"] = llm_response
481
635
  result["messages"].append({"role": "assistant",
482
636
  "content": llm_response})
483
637
 
484
- # Check for tool calls
638
+
485
639
  if hasattr(resp.choices[0].message, 'tool_calls') and resp.choices[0].message.tool_calls:
486
640
  result["tool_calls"] = resp.choices[0].message.tool_calls
487
-
488
- # Handle JSON format requests
489
641
  if format == "json":
490
642
  try:
491
643
  if isinstance(llm_response, str):
492
- if llm_response.startswith("```json"):
493
- llm_response = llm_response.replace("```json", "").replace("```", "").strip()
494
- parsed_json = json.loads(llm_response)
644
+ llm_response = llm_response.strip()
645
+
646
+ if '```json' in llm_response:
647
+ start = llm_response.find('```json') + 7
648
+ end = llm_response.rfind('```')
649
+ if end > start:
650
+ llm_response = llm_response[start:end].strip()
651
+
652
+ first_brace = llm_response.find('{')
653
+ first_bracket = llm_response.find('[')
654
+
655
+ if first_brace == -1 and first_bracket == -1:
656
+ result["response"] = {}
657
+ result["error"] = "No JSON found in response"
658
+ return result
659
+
660
+ if first_brace != -1 and (first_bracket == -1 or first_brace < first_bracket):
661
+ llm_response = llm_response[first_brace:]
662
+ last_brace = llm_response.rfind('}')
663
+ if last_brace != -1:
664
+ llm_response = llm_response[:last_brace+1]
665
+ else:
666
+ llm_response = llm_response[first_bracket:]
667
+ last_bracket = llm_response.rfind(']')
668
+ if last_bracket != -1:
669
+ llm_response = llm_response[:last_bracket+1]
670
+
671
+ parsed_json = json.loads(llm_response, strict=False)
495
672
 
496
673
  if "json" in parsed_json:
497
674
  result["response"] = parsed_json["json"]
@@ -500,13 +677,14 @@ def get_litellm_response(
500
677
 
501
678
  except (json.JSONDecodeError, TypeError) as e:
502
679
  print(f"JSON parsing error: {str(e)}")
503
- print(f"Raw response: {llm_response}")
680
+ print(f"Raw response: {llm_response[:500]}")
681
+ result["response"] = {}
504
682
  result["error"] = "Invalid JSON response"
505
683
 
506
684
  return result
507
685
 
508
- # Only if process_tool_calls=True AND we have tools
509
- # Make initial non-streaming call to check for tool calls
686
+
687
+
510
688
  initial_api_params = api_params.copy()
511
689
  initial_api_params["stream"] = False
512
690
 
@@ -514,15 +692,15 @@ def get_litellm_response(
514
692
  resp = completion(**initial_api_params)
515
693
  result["raw_response"] = resp
516
694
 
517
- # Check for tool calls
695
+
518
696
  has_tool_calls = hasattr(resp.choices[0].message, 'tool_calls') and resp.choices[0].message.tool_calls
519
697
 
520
698
  if has_tool_calls:
521
- print("Found tool calls in LiteLLM, processing automatically:", resp.choices[0].message.tool_calls)
699
+
522
700
 
523
701
  result["tool_calls"] = resp.choices[0].message.tool_calls
524
702
 
525
- # Process tool calls
703
+
526
704
  processed_result = process_tool_calls(result,
527
705
  tool_map,
528
706
  model,
@@ -530,52 +708,68 @@ def get_litellm_response(
530
708
  result["messages"],
531
709
  stream=False)
532
710
 
533
- # If streaming was requested, make final streaming call with processed conversation
711
+
534
712
  if stream:
535
- print("Making final streaming call with processed tools")
713
+
714
+
715
+
716
+ clean_messages = []
717
+ for msg in processed_result["messages"]:
718
+ if msg.get('role') == 'assistant' and 'tool_calls' in msg:
719
+ continue
720
+
721
+ else:
722
+ clean_messages.append(msg)
536
723
 
537
724
  final_api_params = api_params.copy()
538
- final_api_params["messages"] = processed_result["messages"]
725
+ final_api_params["messages"] = clean_messages
726
+ final_api_params["stream"] = True
727
+
728
+
729
+ final_api_params = api_params.copy()
730
+ final_api_params["messages"] = clean_messages
539
731
  final_api_params["stream"] = True
732
+ if "tools" in final_api_params:
733
+ del final_api_params["tools"]
734
+ if "tool_choice" in final_api_params:
735
+ del final_api_params["tool_choice"]
736
+
737
+ final_stream = completion(**final_api_params)
738
+
540
739
 
541
740
  final_stream = completion(**final_api_params)
542
741
  processed_result["response"] = final_stream
543
742
 
544
743
  return processed_result
545
-
546
- # No tool calls found, handle normally
744
+
745
+
547
746
  else:
548
747
  llm_response = resp.choices[0].message.content
549
- result["response"] = llm_response
550
748
  result["messages"].append({"role": "assistant", "content": llm_response})
551
749
 
552
750
  if stream:
553
- # Make streaming call for regular response
554
- stream_api_params = api_params.copy()
555
- stream_api_params["stream"] = True
556
- final_stream = completion(**stream_api_params)
557
- result["response"] = final_stream
751
+ def string_chunk_generator():
752
+ chunk_size = 1
753
+ for i, char in enumerate(llm_response):
754
+ yield type('MockChunk', (), {
755
+ 'id': f'mock-chunk-{i}',
756
+ 'object': 'chat.completion.chunk',
757
+ 'created': int(time.time()),
758
+ 'model': model or 'unknown',
759
+ 'choices': [type('Choice', (), {
760
+ 'index': 0,
761
+ 'delta': type('Delta', (), {
762
+ 'content': char,
763
+ 'role': 'assistant' if i == 0 else None
764
+ })(),
765
+ 'finish_reason': 'stop' if i == len(llm_response) - 1 else None
766
+ })()]
767
+ })()
768
+
769
+ result["response"] = string_chunk_generator()
558
770
  else:
559
- # Handle JSON format requests for non-streaming
560
- if format == "json":
561
- try:
562
- if isinstance(llm_response, str):
563
- if llm_response.startswith("```json"):
564
- llm_response = llm_response.replace("```json", "").replace("```", "").strip()
565
- parsed_json = json.loads(llm_response)
566
-
567
- if "json" in parsed_json:
568
- result["response"] = parsed_json["json"]
569
- else:
570
- result["response"] = parsed_json
571
-
572
- except (json.JSONDecodeError, TypeError) as e:
573
- print(f"JSON parsing error: {str(e)}")
574
- print(f"Raw response: {llm_response}")
575
- result["error"] = "Invalid JSON response"
576
-
577
- return result
578
-
771
+ result["response"] = llm_response
772
+ return result
579
773
  def process_tool_calls(response_dict, tool_map, model, provider, messages, stream=False):
580
774
  result = response_dict.copy()
581
775
  result["tool_results"] = []
@@ -587,12 +781,12 @@ def process_tool_calls(response_dict, tool_map, model, provider, messages, strea
587
781
 
588
782
  if not tool_calls:
589
783
  return result
590
- #print('tm', tool_map)
784
+
591
785
  for tool_call in tool_calls:
592
786
  tool_id = str(uuid.uuid4())
593
787
  tool_name = None
594
788
  arguments = {}
595
- #print('tc', tool_call)
789
+
596
790
 
597
791
  if isinstance(tool_call, dict):
598
792
  tool_id = tool_call.get("id", str(uuid.uuid4()))
@@ -611,20 +805,17 @@ def process_tool_calls(response_dict, tool_map, model, provider, messages, strea
611
805
  arguments = json.loads(arguments_str) if isinstance(arguments_str, str) else arguments_str
612
806
  except json.JSONDecodeError:
613
807
  arguments = {"raw_arguments": arguments_str}
614
- #print('arg', arguments)
615
- #print('tool name in tool map ', tool_name in tool_map)
808
+
809
+
616
810
  if tool_name in tool_map:
617
811
  tool_result = None
618
812
  tool_result_str = ""
619
813
  serializable_result = None
620
814
 
621
815
  try:
622
- print(tool_map[tool_name])
623
- print('Executing tool:', tool_name, 'with arguments:', arguments)
624
816
  tool_result = tool_map[tool_name](**arguments)
625
- print('Executed Tool Result:', tool_result)
626
817
  except Exception as e:
627
- tool_result = f"Error executing tool '{tool_name}': {str(e)}"
818
+ tool_result = f"Error executing tool '{tool_name}': {str(e)}. Tool map is : {tool_map}"
628
819
 
629
820
  try:
630
821
  tool_result_str = json.dumps(tool_result, default=str)
@@ -643,25 +834,10 @@ def process_tool_calls(response_dict, tool_map, model, provider, messages, strea
643
834
  "result": serializable_result
644
835
  })
645
836
 
646
- result["messages"].append({
647
- "role": "assistant",
648
- "content": None,
649
- "tool_calls": [
650
- {
651
- "id": tool_id,
652
- "type": "function",
653
- "function": {
654
- "name": tool_name,
655
- "arguments": arguments
656
- }
657
- }
658
- ]
659
- })
660
837
 
661
838
  result["messages"].append({
662
- "role": "tool",
663
- "tool_call_id": tool_id,
664
- "content": tool_result_str
839
+ "role": "assistant",
840
+ "content": f'The results of the tool call for {tool_name} with {arguments} are as follows:' +tool_result_str
665
841
  })
666
842
 
667
843
  return result