trustgraph-bedrock 0.23.1__tar.gz → 0.23.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/PKG-INFO +2 -2
- trustgraph-bedrock-0.23.2/trustgraph/bedrock_version.py +1 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph/model/text_completion/bedrock/llm.py +20 -83
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/PKG-INFO +2 -2
- trustgraph-bedrock-0.23.1/trustgraph/bedrock_version.py +0 -1
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/README.md +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/scripts/text-completion-bedrock +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/setup.cfg +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/setup.py +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph/model/text_completion/bedrock/__init__.py +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph/model/text_completion/bedrock/__main__.py +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/SOURCES.txt +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/dependency_links.txt +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/requires.txt +0 -0
- {trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/top_level.txt +0 -0
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: trustgraph-bedrock
|
3
|
-
Version: 0.23.
|
3
|
+
Version: 0.23.2
|
4
4
|
Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
|
5
5
|
Home-page: https://github.com/trustgraph-ai/trustgraph
|
6
|
-
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.23.
|
6
|
+
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.23.2.tar.gz
|
7
7
|
Author: trustgraph.ai
|
8
8
|
Author-email: security@trustgraph.ai
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "0.23.2"
|
@@ -6,22 +6,14 @@ Input is prompt, output is response. Mistral is default.
|
|
6
6
|
|
7
7
|
import boto3
|
8
8
|
import json
|
9
|
-
from prometheus_client import Histogram
|
10
9
|
import os
|
11
10
|
import enum
|
12
11
|
|
13
|
-
from .... schema import TextCompletionRequest, TextCompletionResponse, Error
|
14
|
-
from .... schema import text_completion_request_queue
|
15
|
-
from .... schema import text_completion_response_queue
|
16
|
-
from .... log_level import LogLevel
|
17
|
-
from .... base import ConsumerProducer
|
18
12
|
from .... exceptions import TooManyRequests
|
13
|
+
from .... base import LlmService, LlmResult
|
19
14
|
|
20
|
-
|
15
|
+
default_ident = "text-completion"
|
21
16
|
|
22
|
-
default_input_queue = text_completion_request_queue
|
23
|
-
default_output_queue = text_completion_response_queue
|
24
|
-
default_subscriber = module
|
25
17
|
default_model = 'mistral.mistral-large-2407-v1:0'
|
26
18
|
default_temperature = 0.0
|
27
19
|
default_max_output = 2048
|
@@ -149,16 +141,12 @@ class Cohere(ModelHandler):
|
|
149
141
|
|
150
142
|
Default=Mistral
|
151
143
|
|
152
|
-
class Processor(
|
144
|
+
class Processor(LlmService):
|
153
145
|
|
154
146
|
def __init__(self, **params):
|
155
147
|
|
156
148
|
print(params)
|
157
149
|
|
158
|
-
input_queue = params.get("input_queue", default_input_queue)
|
159
|
-
output_queue = params.get("output_queue", default_output_queue)
|
160
|
-
subscriber = params.get("subscriber", default_subscriber)
|
161
|
-
|
162
150
|
model = params.get("model", default_model)
|
163
151
|
temperature = params.get("temperature", default_temperature)
|
164
152
|
max_output = params.get("max_output", default_max_output)
|
@@ -185,30 +173,12 @@ class Processor(ConsumerProducer):
|
|
185
173
|
|
186
174
|
super(Processor, self).__init__(
|
187
175
|
**params | {
|
188
|
-
"input_queue": input_queue,
|
189
|
-
"output_queue": output_queue,
|
190
|
-
"subscriber": subscriber,
|
191
|
-
"input_schema": TextCompletionRequest,
|
192
|
-
"output_schema": TextCompletionResponse,
|
193
176
|
"model": model,
|
194
177
|
"temperature": temperature,
|
195
178
|
"max_output": max_output,
|
196
179
|
}
|
197
180
|
)
|
198
181
|
|
199
|
-
if not hasattr(__class__, "text_completion_metric"):
|
200
|
-
__class__.text_completion_metric = Histogram(
|
201
|
-
'text_completion_duration',
|
202
|
-
'Text completion duration (seconds)',
|
203
|
-
buckets=[
|
204
|
-
0.25, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
|
205
|
-
8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
|
206
|
-
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,
|
207
|
-
30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 80.0, 100.0,
|
208
|
-
120.0
|
209
|
-
]
|
210
|
-
)
|
211
|
-
|
212
182
|
self.model = model
|
213
183
|
self.temperature = temperature
|
214
184
|
self.max_output = max_output
|
@@ -257,30 +227,21 @@ class Processor(ConsumerProducer):
|
|
257
227
|
|
258
228
|
return Default
|
259
229
|
|
260
|
-
async def
|
261
|
-
|
262
|
-
v = msg.value()
|
263
|
-
|
264
|
-
# Sender-produced ID
|
265
|
-
|
266
|
-
id = msg.properties()["id"]
|
267
|
-
|
268
|
-
print(f"Handling prompt {id}...", flush=True)
|
230
|
+
async def generate_content(self, system, prompt):
|
269
231
|
|
270
232
|
try:
|
271
233
|
|
272
|
-
promptbody = self.variant.encode_request(
|
234
|
+
promptbody = self.variant.encode_request(system, prompt)
|
273
235
|
|
274
236
|
accept = 'application/json'
|
275
237
|
contentType = 'application/json'
|
276
238
|
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
)
|
239
|
+
response = self.bedrock.invoke_model(
|
240
|
+
body=promptbody,
|
241
|
+
modelId=self.model,
|
242
|
+
accept=accept,
|
243
|
+
contentType=contentType
|
244
|
+
)
|
284
245
|
|
285
246
|
# Response structure decode
|
286
247
|
outputtext = self.variant.decode_response(response)
|
@@ -293,18 +254,14 @@ class Processor(ConsumerProducer):
|
|
293
254
|
print(f"Input Tokens: {inputtokens}", flush=True)
|
294
255
|
print(f"Output Tokens: {outputtokens}", flush=True)
|
295
256
|
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
out_token=outputtokens,
|
302
|
-
model=str(self.model),
|
257
|
+
resp = LlmResult(
|
258
|
+
text = outputtext,
|
259
|
+
in_token = inputtokens,
|
260
|
+
out_token = outputtokens,
|
261
|
+
model = self.model
|
303
262
|
)
|
304
263
|
|
305
|
-
|
306
|
-
|
307
|
-
print("Done.", flush=True)
|
264
|
+
return resp
|
308
265
|
|
309
266
|
except self.bedrock.exceptions.ThrottlingException as e:
|
310
267
|
|
@@ -319,31 +276,12 @@ class Processor(ConsumerProducer):
|
|
319
276
|
|
320
277
|
print(type(e))
|
321
278
|
print(f"Exception: {e}")
|
322
|
-
|
323
|
-
print("Send error response...", flush=True)
|
324
|
-
|
325
|
-
r = TextCompletionResponse(
|
326
|
-
error=Error(
|
327
|
-
type = "llm-error",
|
328
|
-
message = str(e),
|
329
|
-
),
|
330
|
-
response=None,
|
331
|
-
in_token=None,
|
332
|
-
out_token=None,
|
333
|
-
model=None,
|
334
|
-
)
|
335
|
-
|
336
|
-
await self.send(r, properties={"id": id})
|
337
|
-
|
338
|
-
self.consumer.acknowledge(msg)
|
279
|
+
raise e
|
339
280
|
|
340
281
|
@staticmethod
|
341
282
|
def add_args(parser):
|
342
283
|
|
343
|
-
|
344
|
-
parser, default_input_queue, default_subscriber,
|
345
|
-
default_output_queue,
|
346
|
-
)
|
284
|
+
LlmService.add_args(parser)
|
347
285
|
|
348
286
|
parser.add_argument(
|
349
287
|
'-m', '--model',
|
@@ -391,5 +329,4 @@ class Processor(ConsumerProducer):
|
|
391
329
|
|
392
330
|
def run():
|
393
331
|
|
394
|
-
Processor.launch(
|
395
|
-
|
332
|
+
Processor.launch(default_ident, __doc__)
|
{trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/PKG-INFO
RENAMED
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: trustgraph-bedrock
|
3
|
-
Version: 0.23.
|
3
|
+
Version: 0.23.2
|
4
4
|
Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
|
5
5
|
Home-page: https://github.com/trustgraph-ai/trustgraph
|
6
|
-
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.23.
|
6
|
+
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.23.2.tar.gz
|
7
7
|
Author: trustgraph.ai
|
8
8
|
Author-email: security@trustgraph.ai
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
@@ -1 +0,0 @@
|
|
1
|
-
__version__ = "0.23.1"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/SOURCES.txt
RENAMED
File without changes
|
File without changes
|
{trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/requires.txt
RENAMED
File without changes
|
{trustgraph-bedrock-0.23.1 → trustgraph-bedrock-0.23.2}/trustgraph_bedrock.egg-info/top_level.txt
RENAMED
File without changes
|