crewplus 0.2.34__py3-none-any.whl → 0.2.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crewplus might be problematic. Click here for more details.

@@ -294,6 +294,7 @@ class GeminiChatModel(BaseChatModel):
294
294
  if creds is None:
295
295
  # Get service account file from env if not provided
296
296
  sa_file = self.service_account_file or os.getenv("GCP_SERVICE_ACCOUNT_FILE")
297
+ self.logger.debug(f"Service account file: {sa_file}")
297
298
  if sa_file:
298
299
  try:
299
300
  creds = service_account.Credentials.from_service_account_file(
@@ -307,15 +308,21 @@ class GeminiChatModel(BaseChatModel):
307
308
 
308
309
  # If creds is still None, the client will use Application Default Credentials (ADC).
309
310
 
310
- self._client = genai.Client(
311
- project=self.project_id,
312
- location=self.location,
313
- credentials=creds,
314
- )
315
- self.logger.info(
316
- f"Initialized GeminiChatModel with model: {self.model_name} for Vertex AI "
317
- f"(Project: {self.project_id}, Location: {self.location})"
318
- )
311
+ try:
312
+ self._client = genai.Client(
313
+ vertexai=True,
314
+ project=self.project_id,
315
+ location=self.location,
316
+ credentials=creds,
317
+ )
318
+ self.logger.info(
319
+ f"Initialized GeminiChatModel with model: {self.model_name} for Vertex AI "
320
+ f"(Project: {self.project_id}, Location: {self.location})"
321
+ )
322
+ except Exception as e:
323
+ error_msg = f"Failed to initialize GenAI Client for Vertex AI: {e}"
324
+ self.logger.error(error_msg, exc_info=True)
325
+ raise ValueError(error_msg)
319
326
 
320
327
  def get_model_identifier(self) -> str:
321
328
  """Return a string identifying this model for tracing and logging."""
@@ -1,15 +1,27 @@
1
1
  import os
2
+ import logging
3
+ from typing import Optional
2
4
  from .model_load_balancer import ModelLoadBalancer
3
5
 
4
6
  model_balancer = None
5
7
 
6
- def init_load_balancer(config_path: str = None):
8
+ def init_load_balancer(
9
+ config_path: Optional[str] = None,
10
+ logger: Optional[logging.Logger] = None
11
+ ):
7
12
  """
8
13
  Initializes the global ModelLoadBalancer instance.
9
14
 
10
15
  This function is idempotent. If the balancer is already initialized,
11
16
  it does nothing. It follows a safe initialization pattern where the
12
17
  global instance is only assigned after successful configuration loading.
18
+
19
+ Args:
20
+ config_path (Optional[str]): The path to the model configuration file.
21
+ If not provided, it's determined by the `MODEL_CONFIG_PATH`
22
+ environment variable, or defaults to "config/models_config.json".
23
+ logger (Optional[logging.Logger]): An optional logger instance to be
24
+ used by the model balancer.
13
25
  """
14
26
  global model_balancer
15
27
  if model_balancer is None:
@@ -20,7 +32,10 @@ def init_load_balancer(config_path: str = None):
20
32
  )
21
33
  try:
22
34
  # 1. Create a local instance first.
23
- balancer = ModelLoadBalancer(final_config_path)
35
+ balancer = ModelLoadBalancer(
36
+ config_path=final_config_path,
37
+ logger=logger
38
+ )
24
39
  # 2. Attempt to load its configuration.
25
40
  balancer.load_config()
26
41
  # 3. Only assign to the global variable on full success.
@@ -1,6 +1,7 @@
1
1
  import json
2
2
  import random
3
3
  import logging
4
+ import threading
4
5
  from typing import Dict, List, Optional, Union
5
6
  from collections import defaultdict
6
7
  from langchain_openai import ChatOpenAI, AzureOpenAIEmbeddings
@@ -31,7 +32,7 @@ class ModelLoadBalancer:
31
32
  self.config_data = config_data
32
33
  self.logger = logger or logging.getLogger(__name__)
33
34
  self.models_config: List[Dict] = []
34
- self.models: Dict[int, Union[TracedAzureChatOpenAI, ChatOpenAI, AzureOpenAIEmbeddings, GeminiChatModel]] = {}
35
+ self.thread_local = threading.local()
35
36
  self._initialize_state()
36
37
  self._config_loaded = False # Flag to check if config is loaded
37
38
 
@@ -60,15 +61,6 @@ class ModelLoadBalancer:
60
61
 
61
62
  self.models_config = config['models']
62
63
 
63
- # Instantiate models
64
- for model_config in self.models_config:
65
- model_id = model_config['id']
66
- model_instance = self._instantiate_model(model_config)
67
- if model_instance is not None:
68
- self.models[model_id] = model_instance
69
- else:
70
- self.logger.warning(f"Model with id {model_id} was not loaded due to instantiation error.")
71
-
72
64
  self._config_loaded = True
73
65
  self.logger.debug("Model balancer: configuration loaded successfully.")
74
66
  except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
@@ -105,8 +97,7 @@ class ModelLoadBalancer:
105
97
  if deployment_name:
106
98
  for model_config in self.models_config:
107
99
  if model_config.get('deployment_name') == deployment_name:
108
- model_id = model_config['id']
109
- model = self.models[model_id]
100
+ model = self._get_or_create_model(model_config)
110
101
  if with_metadata:
111
102
  return model, deployment_name
112
103
  return model
@@ -130,14 +121,33 @@ class ModelLoadBalancer:
130
121
  self.logger.warning(f"Unsupported selection strategy: '{selection_strategy}'. Defaulting to 'random'.")
131
122
  selected_model_config = self._random_selection(candidates)
132
123
 
133
- model_id = selected_model_config['id']
134
- model = self.models[model_id]
124
+ model = self._get_or_create_model(selected_model_config)
135
125
  if with_metadata:
136
126
  return model, selected_model_config.get('deployment_name')
137
127
  return model
138
128
 
139
129
  raise ValueError("Either 'deployment_name' or both 'provider' and 'model_type' must be provided.")
140
130
 
131
+ def _get_thread_local_models_cache(self) -> Dict:
132
+ """Gets the model cache for the current thread, creating it if it doesn't exist."""
133
+ if not hasattr(self.thread_local, 'models_cache'):
134
+ self.thread_local.models_cache = {}
135
+ return self.thread_local.models_cache
136
+
137
+ def _get_or_create_model(self, model_config: Dict):
138
+ """
139
+ Gets a model instance from the thread-local cache. If it doesn't exist,
140
+ it instantiates, caches, and returns it.
141
+ """
142
+ model_id = model_config['id']
143
+ models_cache = self._get_thread_local_models_cache()
144
+
145
+ if model_id not in models_cache:
146
+ self.logger.debug(f"Creating new model instance for id {model_id} in thread {threading.get_ident()}")
147
+ models_cache[model_id] = self._instantiate_model(model_config)
148
+
149
+ return models_cache[model_id]
150
+
141
151
  def _instantiate_model(self, model_config: Dict):
142
152
  """Instantiate and return an LLM object based on the model configuration"""
143
153
  provider = model_config['provider']
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: crewplus
3
- Version: 0.2.34
3
+ Version: 0.2.37
4
4
  Summary: Base services for CrewPlus AI applications
5
5
  Author-Email: Tim Liu <tim@opsmateai.com>
6
6
  License: MIT
@@ -1,13 +1,13 @@
1
- crewplus-0.2.34.dist-info/METADATA,sha256=UNWEOkl43TfQMN5AtWXFlMKy2Fdy7zBv6oKfN8_y_qM,5327
2
- crewplus-0.2.34.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- crewplus-0.2.34.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- crewplus-0.2.34.dist-info/licenses/LICENSE,sha256=2_NHSHRTKB_cTcT_GXgcenOCtIZku8j343mOgAguTfc,1087
1
+ crewplus-0.2.37.dist-info/METADATA,sha256=BzOLmuTMYx92TsT05qZXnQVBRhJO6t6XMYbLDh3JqJc,5327
2
+ crewplus-0.2.37.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ crewplus-0.2.37.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ crewplus-0.2.37.dist-info/licenses/LICENSE,sha256=2_NHSHRTKB_cTcT_GXgcenOCtIZku8j343mOgAguTfc,1087
5
5
  crewplus/__init__.py,sha256=m46HkZL1Y4toD619NL47Sn2Qe084WFFSFD7e6VoYKZc,284
6
6
  crewplus/services/__init__.py,sha256=V1CG8b2NOmRzNgQH7BPl4KVxWSYJH5vfEsW1wVErKNE,375
7
7
  crewplus/services/azure_chat_model.py,sha256=WMSf4BDO8UcP7ZASNGRJxdTEnuWBmCRSY_4yx_VMbok,5499
8
- crewplus/services/gemini_chat_model.py,sha256=XlJeaic1lq31lMx1EPKCe4AlYaHDR57tXOdu8V8j6dU,39623
9
- crewplus/services/init_services.py,sha256=EBpDkIwzuujmdlqjyWvdLQCfhQmfS_OKFz-9Ji8nmAU,1628
10
- crewplus/services/model_load_balancer.py,sha256=PU3wn8lh6pGeVFn62SURW_1lIGXbUnAklM1EWcJMhLU,11752
8
+ crewplus/services/gemini_chat_model.py,sha256=2M0kXbjnPfQm3O9BZaGzJFlHF0pBdAFR3kIRS1bALoI,39983
9
+ crewplus/services/init_services.py,sha256=7oZ1GmesK32EDB_DYnTzW17MEpXjXK41_U_1pmqu_m4,2183
10
+ crewplus/services/model_load_balancer.py,sha256=Q9Gx3GrbKworU-Ytxeqp0ggHSgZ1Q6brtTk-nCl4sak,12095
11
11
  crewplus/services/tracing_manager.py,sha256=aCU9N4Jvh8pDD3h8kWX4O-Ax8xwdLHnQ4wJ3sf-vLwA,6289
12
12
  crewplus/utils/__init__.py,sha256=2Gk1n5srFJQnFfBuYTxktdtKOVZyNrFcNaZKhXk35Pw,142
13
13
  crewplus/utils/schema_action.py,sha256=GDaBoVFQD1rXqrLVSMTfXYW1xcUu7eDcHsn57XBSnIg,422
@@ -20,4 +20,4 @@ docs/GeminiChatModel.md,sha256=zZYyl6RmjZTUsKxxMiC9O4yV70MC4TD-IGUmWhIDBKA,8677
20
20
  docs/ModelLoadBalancer.md,sha256=aGHES1dcXPz4c7Y8kB5-vsCNJjriH2SWmjBkSGoYKiI,4398
21
21
  docs/VDBService.md,sha256=Dw286Rrf_fsi13jyD3Bo4Sy7nZ_G7tYm7d8MZ2j9hxk,9375
22
22
  docs/index.md,sha256=3tlc15uR8lzFNM5WjdoZLw0Y9o1P1gwgbEnOdIBspqc,1643
23
- crewplus-0.2.34.dist-info/RECORD,,
23
+ crewplus-0.2.37.dist-info/RECORD,,