crewplus 0.2.4__tar.gz → 0.2.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crewplus might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: crewplus
3
- Version: 0.2.4
3
+ Version: 0.2.5
4
4
  Summary: Base services for CrewPlus AI applications
5
5
  Author-Email: Tim Liu <tim@opsmateai.com>
6
6
  License: MIT
@@ -3,11 +3,15 @@ from crewplus.services.model_load_balancer import ModelLoadBalancer
3
3
 
4
4
  model_balancer = None
5
5
 
6
- def init_load_balancer():
6
+ def init_load_balancer(config_path: str = None):
7
7
  global model_balancer
8
8
  if model_balancer is None:
9
- config_path = os.getenv("MODEL_CONFIG_PATH", "config/models_config.json")
10
- model_balancer = ModelLoadBalancer(config_path)
9
+ # Use parameter if provided, otherwise check env var, then default
10
+ final_config_path = config_path or os.getenv(
11
+ "MODEL_CONFIG_PATH",
12
+ "config/models_config.json" # Fixed default path
13
+ )
14
+ model_balancer = ModelLoadBalancer(final_config_path)
11
15
  model_balancer.load_config() # Load initial configuration synchronously
12
16
 
13
17
  def get_model_balancer() -> ModelLoadBalancer:
@@ -197,7 +197,7 @@ class VDBService(object):
197
197
  """
198
198
  return self._client
199
199
 
200
- def get_embeddings(self, from_model_balancer: bool = False, model_type: Optional[str] = "embedding-large") -> Embeddings:
200
+ def get_embeddings(self, from_model_balancer: bool = False, provider: Optional[str] = "azure-openai", model_type: Optional[str] = "embedding-large") -> Embeddings:
201
201
  """
202
202
  Gets an embedding function, either from the model balancer or directly from settings.
203
203
 
@@ -211,7 +211,7 @@ class VDBService(object):
211
211
  """
212
212
  if from_model_balancer:
213
213
  model_balancer = get_model_balancer()
214
- return model_balancer.get_model(model_type=model_type)
214
+ return model_balancer.get_model(provider=provider, model_type=model_type)
215
215
 
216
216
  embedder_config = self.settings.get("embedder")
217
217
  if not embedder_config:
@@ -88,3 +88,47 @@ The model `o3mini` is identified by the deployment name `gpt-o3mini-eastus2-RPM2
88
88
  ```python
89
89
  o3mini_model = balancer.get_model(deployment_name="gpt-o3mini-eastus2-RPM25")
90
90
  ```
91
+
92
+ ## 5. Global Access with `init_load_balancer`
93
+
94
+ The `init_load_balancer` function provides a convenient singleton pattern for accessing the `ModelLoadBalancer` throughout your application without passing the instance around.
95
+
96
+ First, you initialize the balancer once at the start of your application.
97
+
98
+ ### Initialization
99
+
100
+ You can initialize it in several ways:
101
+
102
+ **1. Default Initialization**
103
+
104
+ This will look for the `MODEL_CONFIG_PATH` environment variable, or use the default path `_config/models_config.json`.
105
+
106
+ ```python
107
+ from crewplus.services.init_services import init_load_balancer
108
+
109
+ init_load_balancer()
110
+ ```
111
+
112
+ **2. Initialization with a Custom Path**
113
+
114
+ You can also provide a direct path to your configuration file.
115
+
116
+ ```python
117
+ from crewplus.services.init_services import init_load_balancer
118
+
119
+ init_load_balancer(config_path="path/to/your/models_config.json")
120
+ ```
121
+
122
+ ### Getting the Balancer and Models
123
+
124
+ Once initialized, you can retrieve the `ModelLoadBalancer` instance from anywhere in your code using `get_model_balancer`.
125
+
126
+ ```python
127
+ from crewplus.services.init_services import get_model_balancer
128
+
129
+ # Get the balancer instance
130
+ balancer = get_model_balancer()
131
+
132
+ # Get a model by deployment name
133
+ gemini_flash_model = balancer.get_model(deployment_name="gemini-2.5-flash")
134
+ ```
@@ -60,6 +60,20 @@ vdb_service = VDBService(settings=settings)
60
60
  print("VDBService initialized successfully!")
61
61
  ```
62
62
 
63
+ **Alternative Initialization for Zilliz**
64
+
65
+ For a simpler Zilliz Cloud connection, you can initialize the service directly with your endpoint and token.
66
+
67
+ ```python
68
+ # Initialize directly with Zilliz credentials
69
+ vdb_service_zilliz = VDBService(
70
+ endpoint="YOUR_ZILLIZ_ENDPOINT",
71
+ token="YOUR_ZILLIZ_TOKEN"
72
+ )
73
+
74
+ print("VDBService for Zilliz initialized successfully!")
75
+ ```
76
+
63
77
  ## 3. Usage Examples
64
78
 
65
79
  ### Basic Usage: Get Vector Store with Default Embeddings
@@ -86,7 +100,8 @@ This requires the `ModelLoadBalancer` to have been initialized, as shown in the
86
100
  # 1. Get a specific embedding model from the ModelLoadBalancer
87
101
  # The service will call get_model_balancer() internally to get the initialized instance.
88
102
  embedding_model = vdb_service.get_embeddings(
89
- from_model_balancer=True,
103
+ from_model_balancer=True,
104
+ provider="azure-openai",
90
105
  model_type="embedding-large" # Specify the model type configured in the balancer
91
106
  )
92
107
 
@@ -6,7 +6,7 @@ build-backend = "pdm.backend"
6
6
 
7
7
  [project]
8
8
  name = "crewplus"
9
- version = "0.2.4"
9
+ version = "0.2.5"
10
10
  description = "Base services for CrewPlus AI applications"
11
11
  authors = [
12
12
  { name = "Tim Liu", email = "tim@opsmateai.com" },
File without changes
File without changes
File without changes
File without changes