trustgraph-vertexai 1.2.3__tar.gz → 1.2.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of trustgraph-vertexai might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: trustgraph-vertexai
3
- Version: 1.2.3
3
+ Version: 1.2.5
4
4
  Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
5
5
  Author-email: "trustgraph.ai" <security@trustgraph.ai>
6
6
  Project-URL: Homepage, https://github.com/trustgraph-ai/trustgraph
@@ -19,6 +19,7 @@ Google Cloud. Input is prompt, output is response.
19
19
  from google.oauth2 import service_account
20
20
  import google
21
21
  import vertexai
22
+ import logging
22
23
 
23
24
  # Why is preview here?
24
25
  from vertexai.generative_models import (
@@ -29,6 +30,9 @@ from vertexai.generative_models import (
29
30
  from .... exceptions import TooManyRequests
30
31
  from .... base import LlmService, LlmResult
31
32
 
33
+ # Module logger
34
+ logger = logging.getLogger(__name__)
35
+
32
36
  default_ident = "text-completion"
33
37
 
34
38
  default_model = 'gemini-2.0-flash-001'
@@ -91,7 +95,7 @@ class Processor(LlmService):
91
95
  ),
92
96
  ]
93
97
 
94
- print("Initialise VertexAI...", flush=True)
98
+ logger.info("Initializing VertexAI...")
95
99
 
96
100
  if private_key:
97
101
  credentials = (
@@ -113,11 +117,11 @@ class Processor(LlmService):
113
117
  location=region
114
118
  )
115
119
 
116
- print(f"Initialise model {model}", flush=True)
120
+ logger.info(f"Initializing model {model}")
117
121
  self.llm = GenerativeModel(model)
118
122
  self.model = model
119
123
 
120
- print("Initialisation complete", flush=True)
124
+ logger.info("VertexAI initialization complete")
121
125
 
122
126
  async def generate_content(self, system, prompt):
123
127
 
@@ -137,16 +141,16 @@ class Processor(LlmService):
137
141
  model = self.model
138
142
  )
139
143
 
140
- print(f"Input Tokens: {resp.in_token}", flush=True)
141
- print(f"Output Tokens: {resp.out_token}", flush=True)
144
+ logger.info(f"Input Tokens: {resp.in_token}")
145
+ logger.info(f"Output Tokens: {resp.out_token}")
142
146
 
143
- print("Send response...", flush=True)
147
+ logger.debug("Send response...")
144
148
 
145
149
  return resp
146
150
 
147
151
  except google.api_core.exceptions.ResourceExhausted as e:
148
152
 
149
- print("Hit rate limit:", e, flush=True)
153
+ logger.warning(f"Hit rate limit: {e}")
150
154
 
151
155
  # Leave rate limit retries to the base handler
152
156
  raise TooManyRequests()
@@ -154,7 +158,7 @@ class Processor(LlmService):
154
158
  except Exception as e:
155
159
 
156
160
  # Apart from rate limits, treat all exceptions as unrecoverable
157
- print(f"Exception: {e}")
161
+ logger.error(f"VertexAI LLM exception: {e}", exc_info=True)
158
162
  raise e
159
163
 
160
164
  @staticmethod
@@ -0,0 +1 @@
1
+ __version__ = "1.2.5"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: trustgraph-vertexai
3
- Version: 1.2.3
3
+ Version: 1.2.5
4
4
  Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
5
5
  Author-email: "trustgraph.ai" <security@trustgraph.ai>
6
6
  Project-URL: Homepage, https://github.com/trustgraph-ai/trustgraph
@@ -1 +0,0 @@
1
- __version__ = "1.2.3"