crewplus 0.2.27__tar.gz → 0.2.29__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crewplus might be problematic. Click here for more details.
- {crewplus-0.2.27 → crewplus-0.2.29}/PKG-INFO +9 -5
- {crewplus-0.2.27 → crewplus-0.2.29}/README.md +7 -4
- crewplus-0.2.29/crewplus/services/init_services.py +37 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/services/model_load_balancer.py +10 -3
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/vectorstores/milvus/vdb_service.py +29 -3
- {crewplus-0.2.27 → crewplus-0.2.29}/pyproject.toml +2 -1
- crewplus-0.2.27/crewplus/services/init_services.py +0 -20
- {crewplus-0.2.27 → crewplus-0.2.29}/LICENSE +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/__init__.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/services/__init__.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/services/azure_chat_model.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/services/gemini_chat_model.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/services/tracing_manager.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/utils/__init__.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/utils/schema_action.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/utils/schema_document_updater.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/vectorstores/milvus/__init__.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/vectorstores/milvus/milvus_schema_manager.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/crewplus/vectorstores/milvus/schema_milvus.py +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/docs/GeminiChatModel.md +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/docs/ModelLoadBalancer.md +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/docs/VDBService.md +0 -0
- {crewplus-0.2.27 → crewplus-0.2.29}/docs/index.md +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: crewplus
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.29
|
|
4
4
|
Summary: Base services for CrewPlus AI applications
|
|
5
5
|
Author-Email: Tim Liu <tim@opsmateai.com>
|
|
6
6
|
License: MIT
|
|
@@ -14,6 +14,7 @@ Requires-Dist: langchain-openai==0.3.24
|
|
|
14
14
|
Requires-Dist: google-genai==1.21.1
|
|
15
15
|
Requires-Dist: langchain-milvus<0.3.0,>=0.2.1
|
|
16
16
|
Requires-Dist: langfuse<4.0.0,>=3.1.3
|
|
17
|
+
Requires-Dist: langchain-mcp-adapters>=0.1.4
|
|
17
18
|
Description-Content-Type: text/markdown
|
|
18
19
|
|
|
19
20
|
# CrewPlus
|
|
@@ -34,16 +35,17 @@ This repository, `crewplus-base`, contains the core `crewplus` Python package. I
|
|
|
34
35
|
CrewPlus is designed as a modular and extensible ecosystem of packages. This allows you to adopt only the components you need for your specific use case.
|
|
35
36
|
|
|
36
37
|
- **`crewplus` (This package):** The core package containing foundational services for chat, model load balancing, and vector stores.
|
|
37
|
-
- **`crewplus-
|
|
38
|
+
- **`crewplus-agent`:** crewplus agent core: agentic task planner and executor, with context-aware memory.
|
|
38
39
|
- **`crewplus-ingestion`:** Provides robust pipelines for knowledge ingestion and data processing.
|
|
39
40
|
- **`crewplus-memory`:** Provides agent memory services for Crewplus AI Agents.
|
|
40
41
|
- **`crewplus-integrations`:** A collection of third-party integrations to connect CrewPlus with other services and platforms.
|
|
41
42
|
|
|
42
43
|
## Features
|
|
43
44
|
|
|
44
|
-
- **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`).
|
|
45
|
+
- **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`, `TracedAzureChatOpenAI`).
|
|
45
46
|
- **Model Load Balancer:** Intelligently distribute requests across multiple LLM endpoints.
|
|
46
47
|
- **Vector DB Services:** working with popular vector stores (e.g. Milvus, Zilliz Cloud) for retrieval-augmented generation (RAG) and agent memory.
|
|
48
|
+
- **Observability & Tracing:** Automatic integration with tracing tools like Langfuse, with an extensible design for adding others (e.g., Helicone, ...).
|
|
47
49
|
|
|
48
50
|
|
|
49
51
|
## Documentation
|
|
@@ -92,15 +94,17 @@ crewplus-base/ # GitHub repo name
|
|
|
92
94
|
│ └── services/
|
|
93
95
|
│ └── __init__.py
|
|
94
96
|
│ └── gemini_chat_model.py
|
|
97
|
+
│ └── azure_chat_model.py
|
|
95
98
|
│ └── model_load_balancer.py
|
|
99
|
+
│ └── tracing_manager.py
|
|
96
100
|
│ └── ...
|
|
97
101
|
│ └── vectorstores/milvus
|
|
98
102
|
│ └── __init__.py
|
|
99
103
|
│ └── schema_milvus.py
|
|
100
104
|
│ └── vdb_service.py
|
|
101
|
-
│ └──
|
|
105
|
+
│ └── utils/
|
|
102
106
|
│ └── __init__.py
|
|
103
|
-
│ └──
|
|
107
|
+
│ └── schema_action.py
|
|
104
108
|
│ └── ...
|
|
105
109
|
├── tests/
|
|
106
110
|
│ └── ...
|
|
@@ -16,16 +16,17 @@ This repository, `crewplus-base`, contains the core `crewplus` Python package. I
|
|
|
16
16
|
CrewPlus is designed as a modular and extensible ecosystem of packages. This allows you to adopt only the components you need for your specific use case.
|
|
17
17
|
|
|
18
18
|
- **`crewplus` (This package):** The core package containing foundational services for chat, model load balancing, and vector stores.
|
|
19
|
-
- **`crewplus-
|
|
19
|
+
- **`crewplus-agent`:** crewplus agent core: agentic task planner and executor, with context-aware memory.
|
|
20
20
|
- **`crewplus-ingestion`:** Provides robust pipelines for knowledge ingestion and data processing.
|
|
21
21
|
- **`crewplus-memory`:** Provides agent memory services for Crewplus AI Agents.
|
|
22
22
|
- **`crewplus-integrations`:** A collection of third-party integrations to connect CrewPlus with other services and platforms.
|
|
23
23
|
|
|
24
24
|
## Features
|
|
25
25
|
|
|
26
|
-
- **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`).
|
|
26
|
+
- **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`, `TracedAzureChatOpenAI`).
|
|
27
27
|
- **Model Load Balancer:** Intelligently distribute requests across multiple LLM endpoints.
|
|
28
28
|
- **Vector DB Services:** working with popular vector stores (e.g. Milvus, Zilliz Cloud) for retrieval-augmented generation (RAG) and agent memory.
|
|
29
|
+
- **Observability & Tracing:** Automatic integration with tracing tools like Langfuse, with an extensible design for adding others (e.g., Helicone, ...).
|
|
29
30
|
|
|
30
31
|
|
|
31
32
|
## Documentation
|
|
@@ -74,15 +75,17 @@ crewplus-base/ # GitHub repo name
|
|
|
74
75
|
│ └── services/
|
|
75
76
|
│ └── __init__.py
|
|
76
77
|
│ └── gemini_chat_model.py
|
|
78
|
+
│ └── azure_chat_model.py
|
|
77
79
|
│ └── model_load_balancer.py
|
|
80
|
+
│ └── tracing_manager.py
|
|
78
81
|
│ └── ...
|
|
79
82
|
│ └── vectorstores/milvus
|
|
80
83
|
│ └── __init__.py
|
|
81
84
|
│ └── schema_milvus.py
|
|
82
85
|
│ └── vdb_service.py
|
|
83
|
-
│ └──
|
|
86
|
+
│ └── utils/
|
|
84
87
|
│ └── __init__.py
|
|
85
|
-
│ └──
|
|
88
|
+
│ └── schema_action.py
|
|
86
89
|
│ └── ...
|
|
87
90
|
├── tests/
|
|
88
91
|
│ └── ...
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from .model_load_balancer import ModelLoadBalancer
|
|
3
|
+
|
|
4
|
+
model_balancer = None
|
|
5
|
+
|
|
6
|
+
def init_load_balancer(config_path: str = None):
|
|
7
|
+
"""
|
|
8
|
+
Initializes the global ModelLoadBalancer instance.
|
|
9
|
+
|
|
10
|
+
This function is idempotent. If the balancer is already initialized,
|
|
11
|
+
it does nothing. It follows a safe initialization pattern where the
|
|
12
|
+
global instance is only assigned after successful configuration loading.
|
|
13
|
+
"""
|
|
14
|
+
global model_balancer
|
|
15
|
+
if model_balancer is None:
|
|
16
|
+
# Use parameter if provided, otherwise check env var, then default
|
|
17
|
+
final_config_path = config_path or os.getenv(
|
|
18
|
+
"MODEL_CONFIG_PATH",
|
|
19
|
+
"config/models_config.json"
|
|
20
|
+
)
|
|
21
|
+
try:
|
|
22
|
+
# 1. Create a local instance first.
|
|
23
|
+
balancer = ModelLoadBalancer(final_config_path)
|
|
24
|
+
# 2. Attempt to load its configuration.
|
|
25
|
+
balancer.load_config()
|
|
26
|
+
# 3. Only assign to the global variable on full success.
|
|
27
|
+
model_balancer = balancer
|
|
28
|
+
except Exception as e:
|
|
29
|
+
# If any step fails, the global model_balancer remains None,
|
|
30
|
+
# allowing for another initialization attempt later.
|
|
31
|
+
# Re-raise the exception to notify the caller of the failure.
|
|
32
|
+
raise RuntimeError(f"Failed to initialize and configure ModelLoadBalancer from {final_config_path}: {e}") from e
|
|
33
|
+
|
|
34
|
+
def get_model_balancer() -> ModelLoadBalancer:
|
|
35
|
+
if model_balancer is None:
|
|
36
|
+
raise RuntimeError("ModelLoadBalancer not initialized. Please call init_load_balancer() first.")
|
|
37
|
+
return model_balancer
|
|
@@ -76,7 +76,7 @@ class ModelLoadBalancer:
|
|
|
76
76
|
self.logger.error(f"Failed to load model configuration: {e}", exc_info=True)
|
|
77
77
|
raise RuntimeError(f"Failed to load model configuration: {e}")
|
|
78
78
|
|
|
79
|
-
def get_model(self, provider: str = None, model_type: str = None, deployment_name: str = None):
|
|
79
|
+
def get_model(self, provider: str = None, model_type: str = None, deployment_name: str = None, with_metadata: bool = False):
|
|
80
80
|
"""
|
|
81
81
|
Get a model instance.
|
|
82
82
|
|
|
@@ -104,7 +104,11 @@ class ModelLoadBalancer:
|
|
|
104
104
|
for model_config in self.models_config:
|
|
105
105
|
if model_config.get('deployment_name') == deployment_name:
|
|
106
106
|
model_id = model_config['id']
|
|
107
|
-
|
|
107
|
+
model = self.models[model_id]
|
|
108
|
+
if with_metadata:
|
|
109
|
+
return model, deployment_name
|
|
110
|
+
return model
|
|
111
|
+
|
|
108
112
|
self.logger.error(f"No model found for deployment name: {deployment_name}")
|
|
109
113
|
raise ValueError(f"No model found for deployment name: {deployment_name}")
|
|
110
114
|
|
|
@@ -116,7 +120,10 @@ class ModelLoadBalancer:
|
|
|
116
120
|
|
|
117
121
|
selected_model_config = self._round_robin_selection(candidates)
|
|
118
122
|
model_id = selected_model_config['id']
|
|
119
|
-
|
|
123
|
+
model = self.models[model_id]
|
|
124
|
+
if with_metadata:
|
|
125
|
+
return model, selected_model_config.get('deployment_name')
|
|
126
|
+
return model
|
|
120
127
|
|
|
121
128
|
raise ValueError("Either 'deployment_name' or both 'provider' and 'model_type' must be provided.")
|
|
122
129
|
|
|
@@ -10,6 +10,7 @@ from langchain_milvus import Milvus
|
|
|
10
10
|
from langchain_core.embeddings import Embeddings
|
|
11
11
|
from langchain_openai import AzureOpenAIEmbeddings
|
|
12
12
|
from pymilvus import MilvusClient
|
|
13
|
+
import time
|
|
13
14
|
|
|
14
15
|
from ...services.init_services import get_model_balancer
|
|
15
16
|
from .schema_milvus import SchemaMilvus, DEFAULT_SCHEMA
|
|
@@ -361,10 +362,9 @@ class VDBService(object):
|
|
|
361
362
|
"params": {}
|
|
362
363
|
}
|
|
363
364
|
|
|
364
|
-
vdb =
|
|
365
|
-
embedding_function=embeddings,
|
|
365
|
+
vdb = self._create_milvus_instance_with_retry(
|
|
366
366
|
collection_name=collection_name,
|
|
367
|
-
|
|
367
|
+
embeddings=embeddings,
|
|
368
368
|
index_params=index_params
|
|
369
369
|
)
|
|
370
370
|
|
|
@@ -373,6 +373,32 @@ class VDBService(object):
|
|
|
373
373
|
|
|
374
374
|
return vdb
|
|
375
375
|
|
|
376
|
+
def _create_milvus_instance_with_retry(self, collection_name: str, embeddings: Embeddings, index_params: dict) -> Milvus:
|
|
377
|
+
"""
|
|
378
|
+
Creates a Milvus instance with a retry mechanism for connection failures.
|
|
379
|
+
"""
|
|
380
|
+
retries = 2
|
|
381
|
+
for attempt in range(retries + 1):
|
|
382
|
+
try:
|
|
383
|
+
vdb = Milvus(
|
|
384
|
+
embedding_function=embeddings,
|
|
385
|
+
collection_name=collection_name,
|
|
386
|
+
connection_args=self.connection_args,
|
|
387
|
+
index_params=index_params
|
|
388
|
+
)
|
|
389
|
+
self.logger.info(f"Successfully connected to Milvus for collection '{collection_name}' on attempt {attempt + 1}.")
|
|
390
|
+
return vdb # Return on success
|
|
391
|
+
except Exception as e:
|
|
392
|
+
self.logger.warning(
|
|
393
|
+
f"Attempt {attempt + 1}/{retries + 1} to connect to Milvus for collection '{collection_name}' failed: {e}"
|
|
394
|
+
)
|
|
395
|
+
if attempt < retries:
|
|
396
|
+
self.logger.info("Retrying in 3 seconds...")
|
|
397
|
+
time.sleep(3)
|
|
398
|
+
else:
|
|
399
|
+
self.logger.error(f"Failed to connect to Milvus for collection '{collection_name}' after {retries + 1} attempts.")
|
|
400
|
+
raise RuntimeError(f"Could not connect to Milvus after {retries + 1} attempts.") from e
|
|
401
|
+
|
|
376
402
|
def drop_collection(self, collection_name: str) -> None:
|
|
377
403
|
"""
|
|
378
404
|
Deletes a collection from the vector database and removes it from the cache.
|
|
@@ -6,7 +6,7 @@ build-backend = "pdm.backend"
|
|
|
6
6
|
|
|
7
7
|
[project]
|
|
8
8
|
name = "crewplus"
|
|
9
|
-
version = "0.2.
|
|
9
|
+
version = "0.2.29"
|
|
10
10
|
description = "Base services for CrewPlus AI applications"
|
|
11
11
|
authors = [
|
|
12
12
|
{ name = "Tim Liu", email = "tim@opsmateai.com" },
|
|
@@ -19,6 +19,7 @@ dependencies = [
|
|
|
19
19
|
"google-genai==1.21.1",
|
|
20
20
|
"langchain-milvus (>=0.2.1,<0.3.0)",
|
|
21
21
|
"langfuse (>=3.1.3,<4.0.0)",
|
|
22
|
+
"langchain-mcp-adapters>=0.1.4",
|
|
22
23
|
]
|
|
23
24
|
|
|
24
25
|
[project.license]
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
from .model_load_balancer import ModelLoadBalancer
|
|
3
|
-
|
|
4
|
-
model_balancer = None
|
|
5
|
-
|
|
6
|
-
def init_load_balancer(config_path: str = None):
|
|
7
|
-
global model_balancer
|
|
8
|
-
if model_balancer is None:
|
|
9
|
-
# Use parameter if provided, otherwise check env var, then default
|
|
10
|
-
final_config_path = config_path or os.getenv(
|
|
11
|
-
"MODEL_CONFIG_PATH",
|
|
12
|
-
"config/models_config.json" # Fixed default path
|
|
13
|
-
)
|
|
14
|
-
model_balancer = ModelLoadBalancer(final_config_path)
|
|
15
|
-
model_balancer.load_config() # Load initial configuration synchronously
|
|
16
|
-
|
|
17
|
-
def get_model_balancer() -> ModelLoadBalancer:
|
|
18
|
-
if model_balancer is None:
|
|
19
|
-
raise RuntimeError("ModelLoadBalancer not initialized")
|
|
20
|
-
return model_balancer
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|