swarms 7.8.8__py3-none-any.whl → 7.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swarms/cli/onboarding_process.py +1 -3
- swarms/prompts/collaborative_prompts.py +177 -0
- swarms/structs/agent.py +434 -128
- swarms/structs/concurrent_workflow.py +70 -196
- swarms/structs/conversation.py +6 -0
- swarms/structs/csv_to_agent.py +1 -3
- swarms/structs/interactive_groupchat.py +319 -12
- swarms/structs/ma_utils.py +25 -6
- swarms/structs/mixture_of_agents.py +88 -113
- swarms/structs/swarm_router.py +148 -187
- swarms/telemetry/__init__.py +4 -22
- swarms/telemetry/log_executions.py +43 -0
- swarms/telemetry/main.py +63 -325
- swarms/tools/__init__.py +10 -0
- swarms/tools/base_tool.py +15 -6
- swarms/tools/mcp_client_call.py +508 -0
- swarms/tools/py_func_to_openai_func_str.py +0 -1
- swarms/utils/auto_download_check_packages.py +4 -3
- swarms/utils/formatter.py +130 -13
- swarms/utils/history_output_formatter.py +2 -0
- swarms/utils/litellm_wrapper.py +5 -1
- swarms/utils/output_types.py +1 -1
- swarms-7.9.0.dist-info/METADATA +626 -0
- {swarms-7.8.8.dist-info → swarms-7.9.0.dist-info}/RECORD +27 -25
- swarms-7.8.8.dist-info/METADATA +0 -2119
- {swarms-7.8.8.dist-info → swarms-7.9.0.dist-info}/LICENSE +0 -0
- {swarms-7.8.8.dist-info → swarms-7.9.0.dist-info}/WHEEL +0 -0
- {swarms-7.8.8.dist-info → swarms-7.9.0.dist-info}/entry_points.txt +0 -0
swarms/tools/mcp_client_call.py
CHANGED
@@ -505,3 +505,511 @@ async def execute_tool_call_simple(
|
|
505
505
|
*args,
|
506
506
|
**kwargs,
|
507
507
|
)
|
508
|
+
|
509
|
+
|
510
|
+
def _create_server_tool_mapping(
|
511
|
+
urls: List[str],
|
512
|
+
connections: List[MCPConnection] = None,
|
513
|
+
format: str = "openai",
|
514
|
+
) -> Dict[str, Dict[str, Any]]:
|
515
|
+
"""
|
516
|
+
Create a mapping of function names to server information for all MCP servers.
|
517
|
+
|
518
|
+
Args:
|
519
|
+
urls: List of server URLs
|
520
|
+
connections: Optional list of MCPConnection objects
|
521
|
+
format: Format to fetch tools in
|
522
|
+
|
523
|
+
Returns:
|
524
|
+
Dict mapping function names to server info (url, connection, tool)
|
525
|
+
"""
|
526
|
+
server_tool_mapping = {}
|
527
|
+
|
528
|
+
for i, url in enumerate(urls):
|
529
|
+
connection = (
|
530
|
+
connections[i]
|
531
|
+
if connections and i < len(connections)
|
532
|
+
else None
|
533
|
+
)
|
534
|
+
|
535
|
+
try:
|
536
|
+
# Get tools for this server
|
537
|
+
tools = get_mcp_tools_sync(
|
538
|
+
server_path=url,
|
539
|
+
connection=connection,
|
540
|
+
format=format,
|
541
|
+
)
|
542
|
+
|
543
|
+
# Create mapping for each tool
|
544
|
+
for tool in tools:
|
545
|
+
if isinstance(tool, dict) and "function" in tool:
|
546
|
+
function_name = tool["function"]["name"]
|
547
|
+
server_tool_mapping[function_name] = {
|
548
|
+
"url": url,
|
549
|
+
"connection": connection,
|
550
|
+
"tool": tool,
|
551
|
+
"server_index": i,
|
552
|
+
}
|
553
|
+
elif hasattr(tool, "name"):
|
554
|
+
# Handle MCPTool objects
|
555
|
+
server_tool_mapping[tool.name] = {
|
556
|
+
"url": url,
|
557
|
+
"connection": connection,
|
558
|
+
"tool": tool,
|
559
|
+
"server_index": i,
|
560
|
+
}
|
561
|
+
|
562
|
+
except Exception as e:
|
563
|
+
logger.warning(
|
564
|
+
f"Failed to fetch tools from server {url}: {str(e)}"
|
565
|
+
)
|
566
|
+
continue
|
567
|
+
|
568
|
+
return server_tool_mapping
|
569
|
+
|
570
|
+
|
571
|
+
async def _create_server_tool_mapping_async(
|
572
|
+
urls: List[str],
|
573
|
+
connections: List[MCPConnection] = None,
|
574
|
+
format: str = "openai",
|
575
|
+
) -> Dict[str, Dict[str, Any]]:
|
576
|
+
"""
|
577
|
+
Async version: Create a mapping of function names to server information for all MCP servers.
|
578
|
+
|
579
|
+
Args:
|
580
|
+
urls: List of server URLs
|
581
|
+
connections: Optional list of MCPConnection objects
|
582
|
+
format: Format to fetch tools in
|
583
|
+
|
584
|
+
Returns:
|
585
|
+
Dict mapping function names to server info (url, connection, tool)
|
586
|
+
"""
|
587
|
+
server_tool_mapping = {}
|
588
|
+
|
589
|
+
for i, url in enumerate(urls):
|
590
|
+
connection = (
|
591
|
+
connections[i]
|
592
|
+
if connections and i < len(connections)
|
593
|
+
else None
|
594
|
+
)
|
595
|
+
|
596
|
+
try:
|
597
|
+
# Get tools for this server using async function
|
598
|
+
tools = await aget_mcp_tools(
|
599
|
+
server_path=url,
|
600
|
+
connection=connection,
|
601
|
+
format=format,
|
602
|
+
)
|
603
|
+
|
604
|
+
# Create mapping for each tool
|
605
|
+
for tool in tools:
|
606
|
+
if isinstance(tool, dict) and "function" in tool:
|
607
|
+
function_name = tool["function"]["name"]
|
608
|
+
server_tool_mapping[function_name] = {
|
609
|
+
"url": url,
|
610
|
+
"connection": connection,
|
611
|
+
"tool": tool,
|
612
|
+
"server_index": i,
|
613
|
+
}
|
614
|
+
elif hasattr(tool, "name"):
|
615
|
+
# Handle MCPTool objects
|
616
|
+
server_tool_mapping[tool.name] = {
|
617
|
+
"url": url,
|
618
|
+
"connection": connection,
|
619
|
+
"tool": tool,
|
620
|
+
"server_index": i,
|
621
|
+
}
|
622
|
+
|
623
|
+
except Exception as e:
|
624
|
+
logger.warning(
|
625
|
+
f"Failed to fetch tools from server {url}: {str(e)}"
|
626
|
+
)
|
627
|
+
continue
|
628
|
+
|
629
|
+
return server_tool_mapping
|
630
|
+
|
631
|
+
|
632
|
+
async def _execute_tool_on_server(
|
633
|
+
tool_call: Dict[str, Any],
|
634
|
+
server_info: Dict[str, Any],
|
635
|
+
output_type: Literal["json", "dict", "str", "formatted"] = "str",
|
636
|
+
) -> Dict[str, Any]:
|
637
|
+
"""
|
638
|
+
Execute a single tool call on a specific server.
|
639
|
+
|
640
|
+
Args:
|
641
|
+
tool_call: The tool call to execute
|
642
|
+
server_info: Server information from the mapping
|
643
|
+
output_type: Output format type
|
644
|
+
|
645
|
+
Returns:
|
646
|
+
Execution result with server metadata
|
647
|
+
"""
|
648
|
+
try:
|
649
|
+
result = await _execute_tool_call_simple(
|
650
|
+
response=tool_call,
|
651
|
+
server_path=server_info["url"],
|
652
|
+
connection=server_info["connection"],
|
653
|
+
output_type=output_type,
|
654
|
+
)
|
655
|
+
|
656
|
+
return {
|
657
|
+
"server_url": server_info["url"],
|
658
|
+
"server_index": server_info["server_index"],
|
659
|
+
"function_name": tool_call.get("function", {}).get(
|
660
|
+
"name", "unknown"
|
661
|
+
),
|
662
|
+
"result": result,
|
663
|
+
"status": "success",
|
664
|
+
}
|
665
|
+
|
666
|
+
except Exception as e:
|
667
|
+
logger.error(
|
668
|
+
f"Failed to execute tool on server {server_info['url']}: {str(e)}"
|
669
|
+
)
|
670
|
+
return {
|
671
|
+
"server_url": server_info["url"],
|
672
|
+
"server_index": server_info["server_index"],
|
673
|
+
"function_name": tool_call.get("function", {}).get(
|
674
|
+
"name", "unknown"
|
675
|
+
),
|
676
|
+
"result": None,
|
677
|
+
"error": str(e),
|
678
|
+
"status": "error",
|
679
|
+
}
|
680
|
+
|
681
|
+
|
682
|
+
async def execute_multiple_tools_on_multiple_mcp_servers(
|
683
|
+
responses: List[Dict[str, Any]],
|
684
|
+
urls: List[str],
|
685
|
+
connections: List[MCPConnection] = None,
|
686
|
+
output_type: Literal["json", "dict", "str", "formatted"] = "str",
|
687
|
+
max_concurrent: Optional[int] = None,
|
688
|
+
*args,
|
689
|
+
**kwargs,
|
690
|
+
) -> List[Dict[str, Any]]:
|
691
|
+
"""
|
692
|
+
Execute multiple tool calls across multiple MCP servers.
|
693
|
+
|
694
|
+
This function creates a mapping of function names to servers, then for each response
|
695
|
+
that contains tool calls, it finds the appropriate server for each function and
|
696
|
+
executes the calls concurrently.
|
697
|
+
|
698
|
+
Args:
|
699
|
+
responses: List of responses containing tool calls (OpenAI format)
|
700
|
+
urls: List of MCP server URLs
|
701
|
+
connections: Optional list of MCPConnection objects corresponding to each URL
|
702
|
+
output_type: Output format type for results
|
703
|
+
max_concurrent: Maximum number of concurrent executions (default: len(responses))
|
704
|
+
|
705
|
+
Returns:
|
706
|
+
List of execution results with server metadata
|
707
|
+
|
708
|
+
Example:
|
709
|
+
# Example responses format:
|
710
|
+
responses = [
|
711
|
+
{
|
712
|
+
"function": {
|
713
|
+
"name": "search_web",
|
714
|
+
"arguments": {"query": "python programming"}
|
715
|
+
}
|
716
|
+
},
|
717
|
+
{
|
718
|
+
"function": {
|
719
|
+
"name": "search_database",
|
720
|
+
"arguments": {"table": "users", "id": 123}
|
721
|
+
}
|
722
|
+
}
|
723
|
+
]
|
724
|
+
|
725
|
+
urls = ["http://server1:8000", "http://server2:8000"]
|
726
|
+
|
727
|
+
results = await execute_multiple_tools_on_multiple_mcp_servers(
|
728
|
+
responses=responses,
|
729
|
+
urls=urls
|
730
|
+
)
|
731
|
+
"""
|
732
|
+
if not responses:
|
733
|
+
logger.warning("No responses provided for execution")
|
734
|
+
return []
|
735
|
+
|
736
|
+
if not urls:
|
737
|
+
raise MCPValidationError("No server URLs provided")
|
738
|
+
|
739
|
+
# Create mapping of function names to servers using async version
|
740
|
+
logger.info(f"Creating tool mapping for {len(urls)} servers")
|
741
|
+
server_tool_mapping = await _create_server_tool_mapping_async(
|
742
|
+
urls=urls, connections=connections, format="openai"
|
743
|
+
)
|
744
|
+
|
745
|
+
if not server_tool_mapping:
|
746
|
+
raise MCPExecutionError(
|
747
|
+
"No tools found on any of the provided servers"
|
748
|
+
)
|
749
|
+
|
750
|
+
logger.info(
|
751
|
+
f"Found {len(server_tool_mapping)} unique functions across all servers"
|
752
|
+
)
|
753
|
+
|
754
|
+
# Extract all tool calls from responses
|
755
|
+
all_tool_calls = []
|
756
|
+
logger.info(
|
757
|
+
f"Processing {len(responses)} responses for tool call extraction"
|
758
|
+
)
|
759
|
+
|
760
|
+
# Check if responses are individual characters that need to be reconstructed
|
761
|
+
if len(responses) > 10 and all(
|
762
|
+
isinstance(r, str) and len(r) == 1 for r in responses
|
763
|
+
):
|
764
|
+
logger.info(
|
765
|
+
"Detected character-by-character response, reconstructing JSON string"
|
766
|
+
)
|
767
|
+
try:
|
768
|
+
reconstructed_response = "".join(responses)
|
769
|
+
logger.info(
|
770
|
+
f"Reconstructed response length: {len(reconstructed_response)}"
|
771
|
+
)
|
772
|
+
logger.debug(
|
773
|
+
f"Reconstructed response: {reconstructed_response}"
|
774
|
+
)
|
775
|
+
|
776
|
+
# Try to parse the reconstructed response to validate it
|
777
|
+
try:
|
778
|
+
json.loads(reconstructed_response)
|
779
|
+
logger.info(
|
780
|
+
"Successfully validated reconstructed JSON response"
|
781
|
+
)
|
782
|
+
except json.JSONDecodeError as e:
|
783
|
+
logger.warning(
|
784
|
+
f"Reconstructed response is not valid JSON: {str(e)}"
|
785
|
+
)
|
786
|
+
logger.debug(
|
787
|
+
f"First 100 chars: {reconstructed_response[:100]}"
|
788
|
+
)
|
789
|
+
logger.debug(
|
790
|
+
f"Last 100 chars: {reconstructed_response[-100:]}"
|
791
|
+
)
|
792
|
+
|
793
|
+
responses = [reconstructed_response]
|
794
|
+
except Exception as e:
|
795
|
+
logger.warning(
|
796
|
+
f"Failed to reconstruct response from characters: {str(e)}"
|
797
|
+
)
|
798
|
+
|
799
|
+
for i, response in enumerate(responses):
|
800
|
+
logger.debug(
|
801
|
+
f"Processing response {i}: {type(response)} - {response}"
|
802
|
+
)
|
803
|
+
|
804
|
+
# Handle JSON string responses
|
805
|
+
if isinstance(response, str):
|
806
|
+
try:
|
807
|
+
response = json.loads(response)
|
808
|
+
logger.debug(
|
809
|
+
f"Parsed JSON string response {i}: {response}"
|
810
|
+
)
|
811
|
+
except json.JSONDecodeError:
|
812
|
+
logger.warning(
|
813
|
+
f"Failed to parse JSON response at index {i}: {response}"
|
814
|
+
)
|
815
|
+
continue
|
816
|
+
|
817
|
+
if isinstance(response, dict):
|
818
|
+
# Single tool call
|
819
|
+
if "function" in response:
|
820
|
+
logger.debug(
|
821
|
+
f"Found single tool call in response {i}: {response['function']}"
|
822
|
+
)
|
823
|
+
# Parse arguments if they're a JSON string
|
824
|
+
if isinstance(
|
825
|
+
response["function"].get("arguments"), str
|
826
|
+
):
|
827
|
+
try:
|
828
|
+
response["function"]["arguments"] = (
|
829
|
+
json.loads(
|
830
|
+
response["function"]["arguments"]
|
831
|
+
)
|
832
|
+
)
|
833
|
+
logger.debug(
|
834
|
+
f"Parsed function arguments: {response['function']['arguments']}"
|
835
|
+
)
|
836
|
+
except json.JSONDecodeError:
|
837
|
+
logger.warning(
|
838
|
+
f"Failed to parse function arguments: {response['function']['arguments']}"
|
839
|
+
)
|
840
|
+
|
841
|
+
all_tool_calls.append((i, response))
|
842
|
+
# Multiple tool calls
|
843
|
+
elif "tool_calls" in response:
|
844
|
+
logger.debug(
|
845
|
+
f"Found multiple tool calls in response {i}: {len(response['tool_calls'])} calls"
|
846
|
+
)
|
847
|
+
for tool_call in response["tool_calls"]:
|
848
|
+
# Parse arguments if they're a JSON string
|
849
|
+
if isinstance(
|
850
|
+
tool_call.get("function", {}).get(
|
851
|
+
"arguments"
|
852
|
+
),
|
853
|
+
str,
|
854
|
+
):
|
855
|
+
try:
|
856
|
+
tool_call["function"]["arguments"] = (
|
857
|
+
json.loads(
|
858
|
+
tool_call["function"]["arguments"]
|
859
|
+
)
|
860
|
+
)
|
861
|
+
logger.debug(
|
862
|
+
f"Parsed tool call arguments: {tool_call['function']['arguments']}"
|
863
|
+
)
|
864
|
+
except json.JSONDecodeError:
|
865
|
+
logger.warning(
|
866
|
+
f"Failed to parse tool call arguments: {tool_call['function']['arguments']}"
|
867
|
+
)
|
868
|
+
|
869
|
+
all_tool_calls.append((i, tool_call))
|
870
|
+
# Direct tool call
|
871
|
+
elif "name" in response and "arguments" in response:
|
872
|
+
logger.debug(
|
873
|
+
f"Found direct tool call in response {i}: {response}"
|
874
|
+
)
|
875
|
+
# Parse arguments if they're a JSON string
|
876
|
+
if isinstance(response.get("arguments"), str):
|
877
|
+
try:
|
878
|
+
response["arguments"] = json.loads(
|
879
|
+
response["arguments"]
|
880
|
+
)
|
881
|
+
logger.debug(
|
882
|
+
f"Parsed direct tool call arguments: {response['arguments']}"
|
883
|
+
)
|
884
|
+
except json.JSONDecodeError:
|
885
|
+
logger.warning(
|
886
|
+
f"Failed to parse direct tool call arguments: {response['arguments']}"
|
887
|
+
)
|
888
|
+
|
889
|
+
all_tool_calls.append((i, {"function": response}))
|
890
|
+
else:
|
891
|
+
logger.debug(
|
892
|
+
f"Response {i} is a dict but doesn't match expected tool call formats: {list(response.keys())}"
|
893
|
+
)
|
894
|
+
else:
|
895
|
+
logger.warning(
|
896
|
+
f"Unsupported response type at index {i}: {type(response)}"
|
897
|
+
)
|
898
|
+
continue
|
899
|
+
|
900
|
+
if not all_tool_calls:
|
901
|
+
logger.warning("No tool calls found in responses")
|
902
|
+
return []
|
903
|
+
|
904
|
+
logger.info(f"Found {len(all_tool_calls)} tool calls to execute")
|
905
|
+
|
906
|
+
# Execute tool calls concurrently
|
907
|
+
max_concurrent = max_concurrent or len(all_tool_calls)
|
908
|
+
semaphore = asyncio.Semaphore(max_concurrent)
|
909
|
+
|
910
|
+
async def execute_with_semaphore(tool_call_info):
|
911
|
+
async with semaphore:
|
912
|
+
response_index, tool_call = tool_call_info
|
913
|
+
function_name = tool_call.get("function", {}).get(
|
914
|
+
"name", "unknown"
|
915
|
+
)
|
916
|
+
|
917
|
+
if function_name not in server_tool_mapping:
|
918
|
+
logger.warning(
|
919
|
+
f"Function '{function_name}' not found on any server"
|
920
|
+
)
|
921
|
+
return {
|
922
|
+
"response_index": response_index,
|
923
|
+
"function_name": function_name,
|
924
|
+
"result": None,
|
925
|
+
"error": f"Function '{function_name}' not available on any server",
|
926
|
+
"status": "not_found",
|
927
|
+
}
|
928
|
+
|
929
|
+
server_info = server_tool_mapping[function_name]
|
930
|
+
result = await _execute_tool_on_server(
|
931
|
+
tool_call=tool_call,
|
932
|
+
server_info=server_info,
|
933
|
+
output_type=output_type,
|
934
|
+
)
|
935
|
+
result["response_index"] = response_index
|
936
|
+
return result
|
937
|
+
|
938
|
+
# Execute all tool calls concurrently
|
939
|
+
tasks = [
|
940
|
+
execute_with_semaphore(tool_call_info)
|
941
|
+
for tool_call_info in all_tool_calls
|
942
|
+
]
|
943
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
944
|
+
|
945
|
+
# Process results and handle exceptions
|
946
|
+
processed_results = []
|
947
|
+
for i, result in enumerate(results):
|
948
|
+
if isinstance(result, Exception):
|
949
|
+
logger.error(
|
950
|
+
f"Task {i} failed with exception: {str(result)}"
|
951
|
+
)
|
952
|
+
processed_results.append(
|
953
|
+
{
|
954
|
+
"response_index": (
|
955
|
+
all_tool_calls[i][0]
|
956
|
+
if i < len(all_tool_calls)
|
957
|
+
else -1
|
958
|
+
),
|
959
|
+
"function_name": "unknown",
|
960
|
+
"result": None,
|
961
|
+
"error": str(result),
|
962
|
+
"status": "exception",
|
963
|
+
}
|
964
|
+
)
|
965
|
+
else:
|
966
|
+
processed_results.append(result)
|
967
|
+
|
968
|
+
logger.info(
|
969
|
+
f"Completed execution of {len(processed_results)} tool calls"
|
970
|
+
)
|
971
|
+
return processed_results
|
972
|
+
|
973
|
+
|
974
|
+
def execute_multiple_tools_on_multiple_mcp_servers_sync(
|
975
|
+
responses: List[Dict[str, Any]],
|
976
|
+
urls: List[str],
|
977
|
+
connections: List[MCPConnection] = None,
|
978
|
+
output_type: Literal["json", "dict", "str", "formatted"] = "str",
|
979
|
+
max_concurrent: Optional[int] = None,
|
980
|
+
*args,
|
981
|
+
**kwargs,
|
982
|
+
) -> List[Dict[str, Any]]:
|
983
|
+
"""
|
984
|
+
Synchronous version of execute_multiple_tools_on_multiple_mcp_servers.
|
985
|
+
|
986
|
+
Args:
|
987
|
+
responses: List of responses containing tool calls (OpenAI format)
|
988
|
+
urls: List of MCP server URLs
|
989
|
+
connections: Optional list of MCPConnection objects corresponding to each URL
|
990
|
+
output_type: Output format type for results
|
991
|
+
max_concurrent: Maximum number of concurrent executions
|
992
|
+
|
993
|
+
Returns:
|
994
|
+
List of execution results with server metadata
|
995
|
+
"""
|
996
|
+
with get_or_create_event_loop() as loop:
|
997
|
+
try:
|
998
|
+
return loop.run_until_complete(
|
999
|
+
execute_multiple_tools_on_multiple_mcp_servers(
|
1000
|
+
responses=responses,
|
1001
|
+
urls=urls,
|
1002
|
+
connections=connections,
|
1003
|
+
output_type=output_type,
|
1004
|
+
max_concurrent=max_concurrent,
|
1005
|
+
*args,
|
1006
|
+
**kwargs,
|
1007
|
+
)
|
1008
|
+
)
|
1009
|
+
except Exception as e:
|
1010
|
+
logger.error(
|
1011
|
+
f"Error in execute_multiple_tools_on_multiple_mcp_servers_sync: {str(e)}"
|
1012
|
+
)
|
1013
|
+
raise MCPExecutionError(
|
1014
|
+
f"Failed to execute multiple tools sync: {str(e)}"
|
1015
|
+
)
|
@@ -492,7 +492,6 @@ def convert_multiple_functions_to_openai_function_schema(
|
|
492
492
|
# ]
|
493
493
|
# Use 40% of cpu cores
|
494
494
|
max_workers = int(os.cpu_count() * 0.8)
|
495
|
-
print(f"max_workers: {max_workers}")
|
496
495
|
|
497
496
|
with concurrent.futures.ThreadPoolExecutor(
|
498
497
|
max_workers=max_workers
|
@@ -8,9 +8,10 @@ import subprocess
|
|
8
8
|
import sys
|
9
9
|
from typing import Literal, Optional, Union
|
10
10
|
from swarms.utils.loguru_logger import initialize_logger
|
11
|
-
import pkg_resources
|
12
11
|
|
13
12
|
|
13
|
+
from importlib.metadata import distribution, PackageNotFoundError
|
14
|
+
|
14
15
|
logger = initialize_logger("autocheckpackages")
|
15
16
|
|
16
17
|
|
@@ -39,13 +40,13 @@ def check_and_install_package(
|
|
39
40
|
# Check if package exists
|
40
41
|
if package_manager == "pip":
|
41
42
|
try:
|
42
|
-
|
43
|
+
distribution(package_name)
|
43
44
|
if not upgrade:
|
44
45
|
logger.info(
|
45
46
|
f"Package {package_name} is already installed"
|
46
47
|
)
|
47
48
|
return True
|
48
|
-
except
|
49
|
+
except PackageNotFoundError:
|
49
50
|
pass
|
50
51
|
|
51
52
|
# Construct installation command
|