trustgraph-vertexai 1.3.19__tar.gz → 1.5.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/PKG-INFO +2 -2
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/pyproject.toml +1 -1
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph/model/text_completion/vertexai/llm.py +104 -66
- trustgraph_vertexai-1.5.6/trustgraph/vertexai_version.py +1 -0
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/PKG-INFO +2 -2
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/requires.txt +1 -1
- trustgraph_vertexai-1.3.19/trustgraph/vertexai_version.py +0 -1
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/README.md +0 -0
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/setup.cfg +0 -0
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph/model/text_completion/vertexai/__init__.py +0 -0
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph/model/text_completion/vertexai/__main__.py +0 -0
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/SOURCES.txt +0 -0
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/dependency_links.txt +0 -0
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/entry_points.txt +0 -0
- {trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: trustgraph-vertexai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.6
|
|
4
4
|
Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
|
|
5
5
|
Author-email: "trustgraph.ai" <security@trustgraph.ai>
|
|
6
6
|
Project-URL: Homepage, https://github.com/trustgraph-ai/trustgraph
|
|
@@ -8,7 +8,7 @@ Classifier: Programming Language :: Python :: 3
|
|
|
8
8
|
Classifier: Operating System :: OS Independent
|
|
9
9
|
Requires-Python: >=3.8
|
|
10
10
|
Description-Content-Type: text/markdown
|
|
11
|
-
Requires-Dist: trustgraph-base<1.
|
|
11
|
+
Requires-Dist: trustgraph-base<1.6,>=1.5
|
|
12
12
|
Requires-Dist: pulsar-client
|
|
13
13
|
Requires-Dist: google-cloud-aiplatform
|
|
14
14
|
Requires-Dist: prometheus-client
|
|
@@ -10,7 +10,7 @@ description = "TrustGraph provides a means to run a pipeline of flexible AI proc
|
|
|
10
10
|
readme = "README.md"
|
|
11
11
|
requires-python = ">=3.8"
|
|
12
12
|
dependencies = [
|
|
13
|
-
"trustgraph-base>=1.
|
|
13
|
+
"trustgraph-base>=1.5,<1.6",
|
|
14
14
|
"pulsar-client",
|
|
15
15
|
"google-cloud-aiplatform",
|
|
16
16
|
"prometheus-client",
|
|
@@ -18,6 +18,7 @@ Supports both Google's Gemini models and Anthropic's Claude models.
|
|
|
18
18
|
|
|
19
19
|
from google.oauth2 import service_account
|
|
20
20
|
import google.auth
|
|
21
|
+
import google.api_core.exceptions
|
|
21
22
|
import vertexai
|
|
22
23
|
import logging
|
|
23
24
|
|
|
@@ -59,8 +60,17 @@ class Processor(LlmService):
|
|
|
59
60
|
|
|
60
61
|
super(Processor, self).__init__(**params)
|
|
61
62
|
|
|
62
|
-
|
|
63
|
-
self.
|
|
63
|
+
# Store default model and configuration parameters
|
|
64
|
+
self.default_model = model
|
|
65
|
+
self.region = region
|
|
66
|
+
self.temperature = temperature
|
|
67
|
+
self.max_output = max_output
|
|
68
|
+
self.private_key = private_key
|
|
69
|
+
|
|
70
|
+
# Model client caches
|
|
71
|
+
self.model_clients = {} # Cache for model instances
|
|
72
|
+
self.generation_configs = {} # Cache for generation configs (Gemini only)
|
|
73
|
+
self.anthropic_client = None # Single Anthropic client (handles multiple models)
|
|
64
74
|
|
|
65
75
|
# Shared parameters for both model types
|
|
66
76
|
self.api_params = {
|
|
@@ -89,75 +99,101 @@ class Processor(LlmService):
|
|
|
89
99
|
"Ensure it's set in your environment or service account."
|
|
90
100
|
)
|
|
91
101
|
|
|
92
|
-
#
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
102
|
+
# Store credentials and project info for later use
|
|
103
|
+
self.credentials = credentials
|
|
104
|
+
self.project_id = project_id
|
|
105
|
+
|
|
106
|
+
# Initialize Vertex AI SDK for Gemini models
|
|
107
|
+
init_kwargs = {'location': region, 'project': project_id}
|
|
108
|
+
if credentials and private_key: # Pass credentials only if from a file
|
|
109
|
+
init_kwargs['credentials'] = credentials
|
|
110
|
+
|
|
111
|
+
vertexai.init(**init_kwargs)
|
|
112
|
+
|
|
113
|
+
# Pre-initialize Anthropic client if needed (single client handles all Claude models)
|
|
114
|
+
if 'claude' in self.default_model.lower():
|
|
115
|
+
self._get_anthropic_client()
|
|
116
|
+
|
|
117
|
+
# Safety settings for Gemini models
|
|
118
|
+
block_level = HarmBlockThreshold.BLOCK_ONLY_HIGH
|
|
119
|
+
self.safety_settings = [
|
|
120
|
+
SafetySetting(
|
|
121
|
+
category = HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
122
|
+
threshold = block_level,
|
|
123
|
+
),
|
|
124
|
+
SafetySetting(
|
|
125
|
+
category = HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
126
|
+
threshold = block_level,
|
|
127
|
+
),
|
|
128
|
+
SafetySetting(
|
|
129
|
+
category = HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
130
|
+
threshold = block_level,
|
|
131
|
+
),
|
|
132
|
+
SafetySetting(
|
|
133
|
+
category = HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
134
|
+
threshold = block_level,
|
|
135
|
+
),
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
logger.info("VertexAI initialization complete")
|
|
139
|
+
|
|
140
|
+
def _get_anthropic_client(self):
|
|
141
|
+
"""Get or create the Anthropic client (single client for all Claude models)"""
|
|
142
|
+
if self.anthropic_client is None:
|
|
143
|
+
logger.info(f"Initializing AnthropicVertex client")
|
|
144
|
+
anthropic_kwargs = {'region': self.region, 'project_id': self.project_id}
|
|
145
|
+
if self.credentials and self.private_key: # Pass credentials only if from a file
|
|
146
|
+
anthropic_kwargs['credentials'] = self.credentials
|
|
147
|
+
logger.debug(f"Using service account credentials for Anthropic models")
|
|
100
148
|
else:
|
|
101
|
-
logger.debug(f"Using Application Default Credentials for Anthropic
|
|
102
|
-
|
|
103
|
-
self.llm = AnthropicVertex(**anthropic_kwargs)
|
|
104
|
-
else:
|
|
105
|
-
# For Gemini models, initialize the Vertex AI SDK
|
|
106
|
-
logger.info(f"Initializing Google model '{model}' via Vertex AI SDK")
|
|
107
|
-
init_kwargs = {'location': region, 'project': project_id}
|
|
108
|
-
if credentials and private_key: # Pass credentials only if from a file
|
|
109
|
-
init_kwargs['credentials'] = credentials
|
|
110
|
-
|
|
111
|
-
vertexai.init(**init_kwargs)
|
|
112
|
-
|
|
113
|
-
self.llm = GenerativeModel(model)
|
|
114
|
-
|
|
115
|
-
self.generation_config = GenerationConfig(
|
|
116
|
-
temperature=temperature,
|
|
117
|
-
top_p=1.0,
|
|
118
|
-
top_k=10,
|
|
119
|
-
candidate_count=1,
|
|
120
|
-
max_output_tokens=max_output,
|
|
121
|
-
)
|
|
149
|
+
logger.debug(f"Using Application Default Credentials for Anthropic models")
|
|
122
150
|
|
|
123
|
-
|
|
124
|
-
block_level = HarmBlockThreshold.BLOCK_ONLY_HIGH
|
|
125
|
-
# block_level = HarmBlockThreshold.BLOCK_NONE
|
|
126
|
-
|
|
127
|
-
self.safety_settings = [
|
|
128
|
-
SafetySetting(
|
|
129
|
-
category = HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
130
|
-
threshold = block_level,
|
|
131
|
-
),
|
|
132
|
-
SafetySetting(
|
|
133
|
-
category = HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
134
|
-
threshold = block_level,
|
|
135
|
-
),
|
|
136
|
-
SafetySetting(
|
|
137
|
-
category = HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
138
|
-
threshold = block_level,
|
|
139
|
-
),
|
|
140
|
-
SafetySetting(
|
|
141
|
-
category = HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
142
|
-
threshold = block_level,
|
|
143
|
-
),
|
|
144
|
-
]
|
|
151
|
+
self.anthropic_client = AnthropicVertex(**anthropic_kwargs)
|
|
145
152
|
|
|
153
|
+
return self.anthropic_client
|
|
146
154
|
|
|
147
|
-
|
|
155
|
+
def _get_gemini_model(self, model_name, temperature=None):
|
|
156
|
+
"""Get or create a Gemini model instance"""
|
|
157
|
+
if model_name not in self.model_clients:
|
|
158
|
+
logger.info(f"Creating GenerativeModel instance for '{model_name}'")
|
|
159
|
+
self.model_clients[model_name] = GenerativeModel(model_name)
|
|
148
160
|
|
|
149
|
-
|
|
161
|
+
# Use provided temperature or fall back to default
|
|
162
|
+
effective_temperature = temperature if temperature is not None else self.temperature
|
|
163
|
+
|
|
164
|
+
# Create generation config with the effective temperature
|
|
165
|
+
generation_config = GenerationConfig(
|
|
166
|
+
temperature=effective_temperature,
|
|
167
|
+
top_p=1.0,
|
|
168
|
+
top_k=10,
|
|
169
|
+
candidate_count=1,
|
|
170
|
+
max_output_tokens=self.max_output,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
return self.model_clients[model_name], generation_config
|
|
174
|
+
|
|
175
|
+
async def generate_content(self, system, prompt, model=None, temperature=None):
|
|
176
|
+
|
|
177
|
+
# Use provided model or fall back to default
|
|
178
|
+
model_name = model or self.default_model
|
|
179
|
+
# Use provided temperature or fall back to default
|
|
180
|
+
effective_temperature = temperature if temperature is not None else self.temperature
|
|
181
|
+
|
|
182
|
+
logger.debug(f"Using model: {model_name}")
|
|
183
|
+
logger.debug(f"Using temperature: {effective_temperature}")
|
|
150
184
|
|
|
151
185
|
try:
|
|
152
|
-
if
|
|
186
|
+
if 'claude' in model_name.lower():
|
|
153
187
|
# Anthropic API uses a dedicated system prompt
|
|
154
|
-
logger.debug("Sending request to Anthropic model...")
|
|
155
|
-
|
|
156
|
-
|
|
188
|
+
logger.debug(f"Sending request to Anthropic model '{model_name}'...")
|
|
189
|
+
client = self._get_anthropic_client()
|
|
190
|
+
|
|
191
|
+
response = client.messages.create(
|
|
192
|
+
model=model_name,
|
|
157
193
|
system=system,
|
|
158
194
|
messages=[{"role": "user", "content": prompt}],
|
|
159
195
|
max_tokens=self.api_params['max_output_tokens'],
|
|
160
|
-
temperature=
|
|
196
|
+
temperature=effective_temperature,
|
|
161
197
|
top_p=self.api_params['top_p'],
|
|
162
198
|
top_k=self.api_params['top_k'],
|
|
163
199
|
)
|
|
@@ -166,15 +202,17 @@ class Processor(LlmService):
|
|
|
166
202
|
text=response.content[0].text,
|
|
167
203
|
in_token=response.usage.input_tokens,
|
|
168
204
|
out_token=response.usage.output_tokens,
|
|
169
|
-
model=
|
|
205
|
+
model=model_name
|
|
170
206
|
)
|
|
171
207
|
else:
|
|
172
208
|
# Gemini API combines system and user prompts
|
|
173
|
-
logger.debug("Sending request to Gemini model...")
|
|
209
|
+
logger.debug(f"Sending request to Gemini model '{model_name}'...")
|
|
174
210
|
full_prompt = system + "\n\n" + prompt
|
|
175
211
|
|
|
176
|
-
|
|
177
|
-
|
|
212
|
+
llm, generation_config = self._get_gemini_model(model_name, effective_temperature)
|
|
213
|
+
|
|
214
|
+
response = llm.generate_content(
|
|
215
|
+
full_prompt, generation_config = generation_config,
|
|
178
216
|
safety_settings = self.safety_settings,
|
|
179
217
|
)
|
|
180
218
|
|
|
@@ -182,7 +220,7 @@ class Processor(LlmService):
|
|
|
182
220
|
text = response.text,
|
|
183
221
|
in_token = response.usage_metadata.prompt_token_count,
|
|
184
222
|
out_token = response.usage_metadata.candidates_token_count,
|
|
185
|
-
model =
|
|
223
|
+
model = model_name
|
|
186
224
|
)
|
|
187
225
|
|
|
188
226
|
logger.info(f"Input Tokens: {resp.in_token}")
|
|
@@ -238,4 +276,4 @@ class Processor(LlmService):
|
|
|
238
276
|
)
|
|
239
277
|
|
|
240
278
|
def run():
|
|
241
|
-
Processor.launch(default_ident, __doc__)
|
|
279
|
+
Processor.launch(default_ident, __doc__)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "1.5.6"
|
{trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: trustgraph-vertexai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.6
|
|
4
4
|
Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
|
|
5
5
|
Author-email: "trustgraph.ai" <security@trustgraph.ai>
|
|
6
6
|
Project-URL: Homepage, https://github.com/trustgraph-ai/trustgraph
|
|
@@ -8,7 +8,7 @@ Classifier: Programming Language :: Python :: 3
|
|
|
8
8
|
Classifier: Operating System :: OS Independent
|
|
9
9
|
Requires-Python: >=3.8
|
|
10
10
|
Description-Content-Type: text/markdown
|
|
11
|
-
Requires-Dist: trustgraph-base<1.
|
|
11
|
+
Requires-Dist: trustgraph-base<1.6,>=1.5
|
|
12
12
|
Requires-Dist: pulsar-client
|
|
13
13
|
Requires-Dist: google-cloud-aiplatform
|
|
14
14
|
Requires-Dist: prometheus-client
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "1.3.19"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/SOURCES.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{trustgraph_vertexai-1.3.19 → trustgraph_vertexai-1.5.6}/trustgraph_vertexai.egg-info/top_level.txt
RENAMED
|
File without changes
|