trustgraph-vertexai 0.22.8__tar.gz → 0.22.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of trustgraph-vertexai might be problematic. Click here for more details.
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/PKG-INFO +2 -2
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph/model/text_completion/vertexai/llm.py +109 -29
- trustgraph-vertexai-0.22.10/trustgraph/vertexai_version.py +1 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph_vertexai.egg-info/PKG-INFO +2 -2
- trustgraph-vertexai-0.22.8/trustgraph/vertexai_version.py +0 -1
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/README.md +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/scripts/text-completion-vertexai +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/setup.cfg +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/setup.py +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph/model/text_completion/vertexai/__init__.py +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph/model/text_completion/vertexai/__main__.py +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph_vertexai.egg-info/SOURCES.txt +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph_vertexai.egg-info/dependency_links.txt +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph_vertexai.egg-info/requires.txt +0 -0
- {trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph_vertexai.egg-info/top_level.txt +0 -0
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: trustgraph-vertexai
|
|
3
|
-
Version: 0.22.
|
|
3
|
+
Version: 0.22.10
|
|
4
4
|
Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
|
|
5
5
|
Home-page: https://github.com/trustgraph-ai/trustgraph
|
|
6
|
-
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.22.
|
|
6
|
+
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.22.10.tar.gz
|
|
7
7
|
Author: trustgraph.ai
|
|
8
8
|
Author-email: security@trustgraph.ai
|
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
|
@@ -4,30 +4,50 @@ Simple LLM service, performs text prompt completion using VertexAI on
|
|
|
4
4
|
Google Cloud. Input is prompt, output is response.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
import vertexai
|
|
8
|
+
import time
|
|
9
|
+
from prometheus_client import Histogram
|
|
10
|
+
import os
|
|
11
|
+
|
|
7
12
|
from google.oauth2 import service_account
|
|
8
13
|
import google
|
|
9
|
-
import vertexai
|
|
10
14
|
|
|
11
15
|
from vertexai.preview.generative_models import (
|
|
12
|
-
Content,
|
|
13
|
-
|
|
16
|
+
Content,
|
|
17
|
+
FunctionDeclaration,
|
|
18
|
+
GenerativeModel,
|
|
19
|
+
GenerationConfig,
|
|
20
|
+
HarmCategory,
|
|
21
|
+
HarmBlockThreshold,
|
|
22
|
+
Part,
|
|
23
|
+
Tool,
|
|
14
24
|
)
|
|
15
25
|
|
|
26
|
+
from .... schema import TextCompletionRequest, TextCompletionResponse, Error
|
|
27
|
+
from .... schema import text_completion_request_queue
|
|
28
|
+
from .... schema import text_completion_response_queue
|
|
29
|
+
from .... log_level import LogLevel
|
|
30
|
+
from .... base import ConsumerProducer
|
|
16
31
|
from .... exceptions import TooManyRequests
|
|
17
|
-
from .... base import LlmService, LlmResult
|
|
18
32
|
|
|
19
|
-
|
|
33
|
+
module = ".".join(__name__.split(".")[1:-1])
|
|
20
34
|
|
|
35
|
+
default_input_queue = text_completion_request_queue
|
|
36
|
+
default_output_queue = text_completion_response_queue
|
|
37
|
+
default_subscriber = module
|
|
21
38
|
default_model = 'gemini-1.0-pro-001'
|
|
22
39
|
default_region = 'us-central1'
|
|
23
40
|
default_temperature = 0.0
|
|
24
41
|
default_max_output = 8192
|
|
25
42
|
default_private_key = "private.json"
|
|
26
43
|
|
|
27
|
-
class Processor(
|
|
44
|
+
class Processor(ConsumerProducer):
|
|
28
45
|
|
|
29
46
|
def __init__(self, **params):
|
|
30
47
|
|
|
48
|
+
input_queue = params.get("input_queue", default_input_queue)
|
|
49
|
+
output_queue = params.get("output_queue", default_output_queue)
|
|
50
|
+
subscriber = params.get("subscriber", default_subscriber)
|
|
31
51
|
region = params.get("region", default_region)
|
|
32
52
|
model = params.get("model", default_model)
|
|
33
53
|
private_key = params.get("private_key", default_private_key)
|
|
@@ -37,7 +57,28 @@ class Processor(LlmService):
|
|
|
37
57
|
if private_key is None:
|
|
38
58
|
raise RuntimeError("Private key file not specified")
|
|
39
59
|
|
|
40
|
-
super(Processor, self).__init__(
|
|
60
|
+
super(Processor, self).__init__(
|
|
61
|
+
**params | {
|
|
62
|
+
"input_queue": input_queue,
|
|
63
|
+
"output_queue": output_queue,
|
|
64
|
+
"subscriber": subscriber,
|
|
65
|
+
"input_schema": TextCompletionRequest,
|
|
66
|
+
"output_schema": TextCompletionResponse,
|
|
67
|
+
}
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
if not hasattr(__class__, "text_completion_metric"):
|
|
71
|
+
__class__.text_completion_metric = Histogram(
|
|
72
|
+
'text_completion_duration',
|
|
73
|
+
'Text completion duration (seconds)',
|
|
74
|
+
buckets=[
|
|
75
|
+
0.25, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
|
|
76
|
+
8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
|
|
77
|
+
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,
|
|
78
|
+
30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 80.0, 100.0,
|
|
79
|
+
120.0
|
|
80
|
+
]
|
|
81
|
+
)
|
|
41
82
|
|
|
42
83
|
self.parameters = {
|
|
43
84
|
"temperature": temperature,
|
|
@@ -69,11 +110,7 @@ class Processor(LlmService):
|
|
|
69
110
|
print("Initialise VertexAI...", flush=True)
|
|
70
111
|
|
|
71
112
|
if private_key:
|
|
72
|
-
credentials = (
|
|
73
|
-
service_account.Credentials.from_service_account_file(
|
|
74
|
-
private_key
|
|
75
|
-
)
|
|
76
|
-
)
|
|
113
|
+
credentials = service_account.Credentials.from_service_account_file(private_key)
|
|
77
114
|
else:
|
|
78
115
|
credentials = None
|
|
79
116
|
|
|
@@ -94,29 +131,50 @@ class Processor(LlmService):
|
|
|
94
131
|
|
|
95
132
|
print("Initialisation complete", flush=True)
|
|
96
133
|
|
|
97
|
-
async def
|
|
134
|
+
async def handle(self, msg):
|
|
98
135
|
|
|
99
136
|
try:
|
|
100
137
|
|
|
101
|
-
|
|
138
|
+
v = msg.value()
|
|
102
139
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
)
|
|
140
|
+
# Sender-produced ID
|
|
141
|
+
|
|
142
|
+
id = msg.properties()["id"]
|
|
107
143
|
|
|
108
|
-
|
|
109
|
-
resp.text = response.text
|
|
110
|
-
resp.in_token = response.usage_metadata.prompt_token_count
|
|
111
|
-
resp.out_token = response.usage_metadata.candidates_token_count
|
|
112
|
-
resp.model = self.model
|
|
144
|
+
print(f"Handling prompt {id}...", flush=True)
|
|
113
145
|
|
|
114
|
-
|
|
115
|
-
|
|
146
|
+
prompt = v.system + "\n\n" + v.prompt
|
|
147
|
+
|
|
148
|
+
with __class__.text_completion_metric.time():
|
|
149
|
+
|
|
150
|
+
response = self.llm.generate_content(
|
|
151
|
+
prompt, generation_config=self.generation_config,
|
|
152
|
+
safety_settings=self.safety_settings
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
resp = response.text
|
|
156
|
+
inputtokens = int(response.usage_metadata.prompt_token_count)
|
|
157
|
+
outputtokens = int(response.usage_metadata.candidates_token_count)
|
|
158
|
+
print(resp, flush=True)
|
|
159
|
+
print(f"Input Tokens: {inputtokens}", flush=True)
|
|
160
|
+
print(f"Output Tokens: {outputtokens}", flush=True)
|
|
116
161
|
|
|
117
162
|
print("Send response...", flush=True)
|
|
118
163
|
|
|
119
|
-
|
|
164
|
+
r = TextCompletionResponse(
|
|
165
|
+
error=None,
|
|
166
|
+
response=resp,
|
|
167
|
+
in_token=inputtokens,
|
|
168
|
+
out_token=outputtokens,
|
|
169
|
+
model=self.model
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
await self.send(r, properties={"id": id})
|
|
173
|
+
|
|
174
|
+
print("Done.", flush=True)
|
|
175
|
+
|
|
176
|
+
# Acknowledge successful processing of the message
|
|
177
|
+
self.consumer.acknowledge(msg)
|
|
120
178
|
|
|
121
179
|
except google.api_core.exceptions.ResourceExhausted as e:
|
|
122
180
|
|
|
@@ -128,19 +186,40 @@ class Processor(LlmService):
|
|
|
128
186
|
except Exception as e:
|
|
129
187
|
|
|
130
188
|
# Apart from rate limits, treat all exceptions as unrecoverable
|
|
189
|
+
|
|
131
190
|
print(f"Exception: {e}")
|
|
132
|
-
|
|
191
|
+
|
|
192
|
+
print("Send error response...", flush=True)
|
|
193
|
+
|
|
194
|
+
r = TextCompletionResponse(
|
|
195
|
+
error=Error(
|
|
196
|
+
type = "llm-error",
|
|
197
|
+
message = str(e),
|
|
198
|
+
),
|
|
199
|
+
response=None,
|
|
200
|
+
in_token=None,
|
|
201
|
+
out_token=None,
|
|
202
|
+
model=None,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
await self.send(r, properties={"id": id})
|
|
206
|
+
|
|
207
|
+
self.consumer.acknowledge(msg)
|
|
133
208
|
|
|
134
209
|
@staticmethod
|
|
135
210
|
def add_args(parser):
|
|
136
211
|
|
|
137
|
-
|
|
212
|
+
ConsumerProducer.add_args(
|
|
213
|
+
parser, default_input_queue, default_subscriber,
|
|
214
|
+
default_output_queue,
|
|
215
|
+
)
|
|
138
216
|
|
|
139
217
|
parser.add_argument(
|
|
140
218
|
'-m', '--model',
|
|
141
219
|
default=default_model,
|
|
142
220
|
help=f'LLM model (default: {default_model})'
|
|
143
221
|
)
|
|
222
|
+
# Also: text-bison-32k
|
|
144
223
|
|
|
145
224
|
parser.add_argument(
|
|
146
225
|
'-k', '--private-key',
|
|
@@ -168,5 +247,6 @@ class Processor(LlmService):
|
|
|
168
247
|
)
|
|
169
248
|
|
|
170
249
|
def run():
|
|
171
|
-
|
|
250
|
+
|
|
251
|
+
Processor.launch(module, __doc__)
|
|
172
252
|
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.22.10"
|
{trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph_vertexai.egg-info/PKG-INFO
RENAMED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: trustgraph-vertexai
|
|
3
|
-
Version: 0.22.
|
|
3
|
+
Version: 0.22.10
|
|
4
4
|
Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
|
|
5
5
|
Home-page: https://github.com/trustgraph-ai/trustgraph
|
|
6
|
-
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.22.
|
|
6
|
+
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.22.10.tar.gz
|
|
7
7
|
Author: trustgraph.ai
|
|
8
8
|
Author-email: security@trustgraph.ai
|
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.22.8"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph_vertexai.egg-info/SOURCES.txt
RENAMED
|
File without changes
|
|
File without changes
|
{trustgraph-vertexai-0.22.8 → trustgraph-vertexai-0.22.10}/trustgraph_vertexai.egg-info/requires.txt
RENAMED
|
File without changes
|
|
File without changes
|