trustgraph-base 0.11.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. trustgraph-base-0.11.11/PKG-INFO +17 -0
  2. trustgraph-base-0.11.11/README.md +1 -0
  3. trustgraph-base-0.11.11/setup.cfg +4 -0
  4. trustgraph-base-0.11.11/setup.py +42 -0
  5. trustgraph-base-0.11.11/trustgraph/base/__init__.py +6 -0
  6. trustgraph-base-0.11.11/trustgraph/base/base_processor.py +119 -0
  7. trustgraph-base-0.11.11/trustgraph/base/consumer.py +107 -0
  8. trustgraph-base-0.11.11/trustgraph/base/consumer_producer.py +139 -0
  9. trustgraph-base-0.11.11/trustgraph/base/producer.py +55 -0
  10. trustgraph-base-0.11.11/trustgraph/base_version.py +1 -0
  11. trustgraph-base-0.11.11/trustgraph/clients/__init__.py +0 -0
  12. trustgraph-base-0.11.11/trustgraph/clients/base.py +125 -0
  13. trustgraph-base-0.11.11/trustgraph/clients/document_embeddings_client.py +45 -0
  14. trustgraph-base-0.11.11/trustgraph/clients/document_rag_client.py +46 -0
  15. trustgraph-base-0.11.11/trustgraph/clients/embeddings_client.py +44 -0
  16. trustgraph-base-0.11.11/trustgraph/clients/graph_embeddings_client.py +45 -0
  17. trustgraph-base-0.11.11/trustgraph/clients/graph_rag_client.py +46 -0
  18. trustgraph-base-0.11.11/trustgraph/clients/llm_client.py +40 -0
  19. trustgraph-base-0.11.11/trustgraph/clients/prompt_client.py +100 -0
  20. trustgraph-base-0.11.11/trustgraph/clients/triples_query_client.py +59 -0
  21. trustgraph-base-0.11.11/trustgraph/exceptions.py +14 -0
  22. trustgraph-base-0.11.11/trustgraph/log_level.py +20 -0
  23. trustgraph-base-0.11.11/trustgraph/objects/__init__.py +0 -0
  24. trustgraph-base-0.11.11/trustgraph/objects/field.py +72 -0
  25. trustgraph-base-0.11.11/trustgraph/objects/object.py +8 -0
  26. trustgraph-base-0.11.11/trustgraph/rdf.py +6 -0
  27. trustgraph-base-0.11.11/trustgraph/schema/__init__.py +12 -0
  28. trustgraph-base-0.11.11/trustgraph/schema/documents.py +68 -0
  29. trustgraph-base-0.11.11/trustgraph/schema/graph.py +69 -0
  30. trustgraph-base-0.11.11/trustgraph/schema/models.py +44 -0
  31. trustgraph-base-0.11.11/trustgraph/schema/object.py +33 -0
  32. trustgraph-base-0.11.11/trustgraph/schema/prompt.py +65 -0
  33. trustgraph-base-0.11.11/trustgraph/schema/retrieval.py +40 -0
  34. trustgraph-base-0.11.11/trustgraph/schema/topic.py +4 -0
  35. trustgraph-base-0.11.11/trustgraph/schema/types.py +25 -0
  36. trustgraph-base-0.11.11/trustgraph_base.egg-info/PKG-INFO +17 -0
  37. trustgraph-base-0.11.11/trustgraph_base.egg-info/SOURCES.txt +38 -0
  38. trustgraph-base-0.11.11/trustgraph_base.egg-info/dependency_links.txt +1 -0
  39. trustgraph-base-0.11.11/trustgraph_base.egg-info/requires.txt +2 -0
  40. trustgraph-base-0.11.11/trustgraph_base.egg-info/top_level.txt +2 -0
@@ -0,0 +1,17 @@
1
+ Metadata-Version: 2.1
2
+ Name: trustgraph-base
3
+ Version: 0.11.11
4
+ Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
5
+ Home-page: https://github.com/trustgraph-ai/trustgraph
6
+ Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.11.11.tar.gz
7
+ Author: trustgraph.ai
8
+ Author-email: security@trustgraph.ai
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.8
13
+ Description-Content-Type: text/markdown
14
+ Requires-Dist: pulsar-client
15
+ Requires-Dist: prometheus-client
16
+
17
+ See https://trustgraph.ai/
@@ -0,0 +1 @@
1
+ See https://trustgraph.ai/
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,42 @@
1
+ import setuptools
2
+ import os
3
+ import importlib
4
+
5
+ with open("README.md", "r") as fh:
6
+ long_description = fh.read()
7
+
8
+ # Load a version number module
9
+ spec = importlib.util.spec_from_file_location(
10
+ 'version', 'trustgraph/base_version.py'
11
+ )
12
+ version_module = importlib.util.module_from_spec(spec)
13
+ spec.loader.exec_module(version_module)
14
+
15
+ version = version_module.__version__
16
+
17
+ setuptools.setup(
18
+ name="trustgraph-base",
19
+ version=version,
20
+ author="trustgraph.ai",
21
+ author_email="security@trustgraph.ai",
22
+ description="TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.",
23
+ long_description=long_description,
24
+ long_description_content_type="text/markdown",
25
+ url="https://github.com/trustgraph-ai/trustgraph",
26
+ packages=setuptools.find_namespace_packages(
27
+ where='./',
28
+ ),
29
+ classifiers=[
30
+ "Programming Language :: Python :: 3",
31
+ "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
32
+ "Operating System :: OS Independent",
33
+ ],
34
+ python_requires='>=3.8',
35
+ download_url = "https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v" + version + ".tar.gz",
36
+ install_requires=[
37
+ "pulsar-client",
38
+ "prometheus-client",
39
+ ],
40
+ scripts=[
41
+ ]
42
+ )
@@ -0,0 +1,6 @@
1
+
2
+ from . base_processor import BaseProcessor
3
+ from . consumer import Consumer
4
+ from . producer import Producer
5
+ from . consumer_producer import ConsumerProducer
6
+
@@ -0,0 +1,119 @@
1
+
2
+ import os
3
+ import argparse
4
+ import pulsar
5
+ import _pulsar
6
+ import time
7
+ from prometheus_client import start_http_server, Info
8
+
9
+ from .. log_level import LogLevel
10
+
11
+ class BaseProcessor:
12
+
13
+ default_pulsar_host = os.getenv("PULSAR_HOST", 'pulsar://pulsar:6650')
14
+
15
+ def __init__(self, **params):
16
+
17
+ self.client = None
18
+
19
+ if not hasattr(__class__, "params_metric"):
20
+ __class__.params_metric = Info(
21
+ 'params', 'Parameters configuration'
22
+ )
23
+
24
+ # FIXME: Maybe outputs information it should not
25
+ __class__.params_metric.info({
26
+ k: str(params[k])
27
+ for k in params
28
+ })
29
+
30
+ pulsar_host = params.get("pulsar_host", self.default_pulsar_host)
31
+ log_level = params.get("log_level", LogLevel.INFO)
32
+
33
+ self.pulsar_host = pulsar_host
34
+
35
+ self.client = pulsar.Client(
36
+ pulsar_host,
37
+ logger=pulsar.ConsoleLogger(log_level.to_pulsar())
38
+ )
39
+
40
+ def __del__(self):
41
+
42
+ if self.client:
43
+ self.client.close()
44
+
45
+ @staticmethod
46
+ def add_args(parser):
47
+
48
+ parser.add_argument(
49
+ '-p', '--pulsar-host',
50
+ default=__class__.default_pulsar_host,
51
+ help=f'Pulsar host (default: {__class__.default_pulsar_host})',
52
+ )
53
+
54
+ parser.add_argument(
55
+ '-l', '--log-level',
56
+ type=LogLevel,
57
+ default=LogLevel.INFO,
58
+ choices=list(LogLevel),
59
+ help=f'Output queue (default: info)'
60
+ )
61
+
62
+ parser.add_argument(
63
+ '--metrics',
64
+ action=argparse.BooleanOptionalAction,
65
+ default=True,
66
+ help=f'Metrics enabled (default: true)',
67
+ )
68
+
69
+ parser.add_argument(
70
+ '-P', '--metrics-port',
71
+ type=int,
72
+ default=8000,
73
+ help=f'Pulsar host (default: 8000)',
74
+ )
75
+
76
+ def run(self):
77
+ raise RuntimeError("Something should have implemented the run method")
78
+
79
+ @classmethod
80
+ def start(cls, prog, doc):
81
+
82
+ parser = argparse.ArgumentParser(
83
+ prog=prog,
84
+ description=doc
85
+ )
86
+
87
+ cls.add_args(parser)
88
+
89
+ args = parser.parse_args()
90
+ args = vars(args)
91
+
92
+ print(args)
93
+
94
+ if args["metrics"]:
95
+ start_http_server(args["metrics_port"])
96
+
97
+ while True:
98
+
99
+ try:
100
+
101
+ p = cls(**args)
102
+ p.run()
103
+
104
+ except KeyboardInterrupt:
105
+ print("Keyboard interrupt.")
106
+ return
107
+
108
+ except _pulsar.Interrupted:
109
+ print("Pulsar Interrupted.")
110
+ return
111
+
112
+ except Exception as e:
113
+
114
+ print(type(e))
115
+
116
+ print("Exception:", e, flush=True)
117
+ print("Will retry...", flush=True)
118
+
119
+ time.sleep(4)
@@ -0,0 +1,107 @@
1
+
2
+ from pulsar.schema import JsonSchema
3
+ from prometheus_client import Histogram, Info, Counter, Enum
4
+ import time
5
+
6
+ from . base_processor import BaseProcessor
7
+ from .. exceptions import TooManyRequests
8
+
9
+ class Consumer(BaseProcessor):
10
+
11
+ def __init__(self, **params):
12
+
13
+ if not hasattr(__class__, "state_metric"):
14
+ __class__.state_metric = Enum(
15
+ 'processor_state', 'Processor state',
16
+ states=['starting', 'running', 'stopped']
17
+ )
18
+ __class__.state_metric.state('starting')
19
+
20
+ __class__.state_metric.state('starting')
21
+
22
+ super(Consumer, self).__init__(**params)
23
+
24
+ input_queue = params.get("input_queue")
25
+ subscriber = params.get("subscriber")
26
+ input_schema = params.get("input_schema")
27
+
28
+ if input_schema == None:
29
+ raise RuntimeError("input_schema must be specified")
30
+
31
+ if not hasattr(__class__, "request_metric"):
32
+ __class__.request_metric = Histogram(
33
+ 'request_latency', 'Request latency (seconds)'
34
+ )
35
+
36
+ if not hasattr(__class__, "pubsub_metric"):
37
+ __class__.pubsub_metric = Info(
38
+ 'pubsub', 'Pub/sub configuration'
39
+ )
40
+
41
+ if not hasattr(__class__, "processing_metric"):
42
+ __class__.processing_metric = Counter(
43
+ 'processing_count', 'Processing count', ["status"]
44
+ )
45
+
46
+ __class__.pubsub_metric.info({
47
+ "input_queue": input_queue,
48
+ "subscriber": subscriber,
49
+ "input_schema": input_schema.__name__,
50
+ })
51
+
52
+ self.consumer = self.client.subscribe(
53
+ input_queue, subscriber,
54
+ schema=JsonSchema(input_schema),
55
+ )
56
+
57
+ def run(self):
58
+
59
+ __class__.state_metric.state('running')
60
+
61
+ while True:
62
+
63
+ msg = self.consumer.receive()
64
+
65
+ try:
66
+
67
+ with __class__.request_metric.time():
68
+ self.handle(msg)
69
+
70
+ # Acknowledge successful processing of the message
71
+ self.consumer.acknowledge(msg)
72
+
73
+ __class__.processing_metric.labels(status="success").inc()
74
+
75
+ except TooManyRequests:
76
+ self.consumer.negative_acknowledge(msg)
77
+ print("TooManyRequests: will retry")
78
+ __class__.processing_metric.labels(status="rate-limit").inc()
79
+ time.sleep(5)
80
+ continue
81
+
82
+ except Exception as e:
83
+
84
+ print("Exception:", e, flush=True)
85
+
86
+ # Message failed to be processed
87
+ self.consumer.negative_acknowledge(msg)
88
+
89
+ __class__.processing_metric.labels(status="error").inc()
90
+
91
+ @staticmethod
92
+ def add_args(parser, default_input_queue, default_subscriber):
93
+
94
+ BaseProcessor.add_args(parser)
95
+
96
+ parser.add_argument(
97
+ '-i', '--input-queue',
98
+ default=default_input_queue,
99
+ help=f'Input queue (default: {default_input_queue})'
100
+ )
101
+
102
+ parser.add_argument(
103
+ '-s', '--subscriber',
104
+ default=default_subscriber,
105
+ help=f'Queue subscriber name (default: {default_subscriber})'
106
+ )
107
+
@@ -0,0 +1,139 @@
1
+
2
+ from pulsar.schema import JsonSchema
3
+ from prometheus_client import Histogram, Info, Counter, Enum
4
+ import time
5
+
6
+ from . base_processor import BaseProcessor
7
+ from .. exceptions import TooManyRequests
8
+
9
+ # FIXME: Derive from consumer? And producer?
10
+
11
+ class ConsumerProducer(BaseProcessor):
12
+
13
+ def __init__(self, **params):
14
+
15
+ if not hasattr(__class__, "state_metric"):
16
+ __class__.state_metric = Enum(
17
+ 'processor_state', 'Processor state',
18
+ states=['starting', 'running', 'stopped']
19
+ )
20
+ __class__.state_metric.state('starting')
21
+
22
+ __class__.state_metric.state('starting')
23
+
24
+ input_queue = params.get("input_queue")
25
+ output_queue = params.get("output_queue")
26
+ subscriber = params.get("subscriber")
27
+ input_schema = params.get("input_schema")
28
+ output_schema = params.get("output_schema")
29
+
30
+ if not hasattr(__class__, "request_metric"):
31
+ __class__.request_metric = Histogram(
32
+ 'request_latency', 'Request latency (seconds)'
33
+ )
34
+
35
+ if not hasattr(__class__, "output_metric"):
36
+ __class__.output_metric = Counter(
37
+ 'output_count', 'Output items created'
38
+ )
39
+
40
+ if not hasattr(__class__, "pubsub_metric"):
41
+ __class__.pubsub_metric = Info(
42
+ 'pubsub', 'Pub/sub configuration'
43
+ )
44
+
45
+ if not hasattr(__class__, "processing_metric"):
46
+ __class__.processing_metric = Counter(
47
+ 'processing_count', 'Processing count', ["status"]
48
+ )
49
+
50
+ __class__.pubsub_metric.info({
51
+ "input_queue": input_queue,
52
+ "output_queue": output_queue,
53
+ "subscriber": subscriber,
54
+ "input_schema": input_schema.__name__,
55
+ "output_schema": output_schema.__name__,
56
+ })
57
+
58
+ super(ConsumerProducer, self).__init__(**params)
59
+
60
+ if input_schema == None:
61
+ raise RuntimeError("input_schema must be specified")
62
+
63
+ if output_schema == None:
64
+ raise RuntimeError("output_schema must be specified")
65
+
66
+ self.producer = self.client.create_producer(
67
+ topic=output_queue,
68
+ schema=JsonSchema(output_schema),
69
+ )
70
+
71
+ self.consumer = self.client.subscribe(
72
+ input_queue, subscriber,
73
+ schema=JsonSchema(input_schema),
74
+ )
75
+
76
+ def run(self):
77
+
78
+ __class__.state_metric.state('running')
79
+
80
+ while True:
81
+
82
+ msg = self.consumer.receive()
83
+
84
+ try:
85
+
86
+ with __class__.request_metric.time():
87
+ resp = self.handle(msg)
88
+
89
+ # Acknowledge successful processing of the message
90
+ self.consumer.acknowledge(msg)
91
+
92
+ __class__.processing_metric.labels(status="success").inc()
93
+
94
+ except TooManyRequests:
95
+ self.consumer.negative_acknowledge(msg)
96
+ print("TooManyRequests: will retry")
97
+ __class__.processing_metric.labels(status="rate-limit").inc()
98
+ time.sleep(5)
99
+ continue
100
+
101
+ except Exception as e:
102
+
103
+ print("Exception:", e, flush=True)
104
+
105
+ # Message failed to be processed
106
+ self.consumer.negative_acknowledge(msg)
107
+
108
+ __class__.processing_metric.labels(status="error").inc()
109
+
110
+ def send(self, msg, properties={}):
111
+ self.producer.send(msg, properties)
112
+ __class__.output_metric.inc()
113
+
114
+ @staticmethod
115
+ def add_args(
116
+ parser, default_input_queue, default_subscriber,
117
+ default_output_queue,
118
+ ):
119
+
120
+ BaseProcessor.add_args(parser)
121
+
122
+ parser.add_argument(
123
+ '-i', '--input-queue',
124
+ default=default_input_queue,
125
+ help=f'Input queue (default: {default_input_queue})'
126
+ )
127
+
128
+ parser.add_argument(
129
+ '-s', '--subscriber',
130
+ default=default_subscriber,
131
+ help=f'Queue subscriber name (default: {default_subscriber})'
132
+ )
133
+
134
+ parser.add_argument(
135
+ '-o', '--output-queue',
136
+ default=default_output_queue,
137
+ help=f'Output queue (default: {default_output_queue})'
138
+ )
139
+
@@ -0,0 +1,55 @@
1
+
2
+ from pulsar.schema import JsonSchema
3
+ from prometheus_client import Info, Counter
4
+
5
+ from . base_processor import BaseProcessor
6
+
7
+ class Producer(BaseProcessor):
8
+
9
+ def __init__(self, **params):
10
+
11
+ output_queue = params.get("output_queue")
12
+ output_schema = params.get("output_schema")
13
+
14
+ if not hasattr(__class__, "output_metric"):
15
+ __class__.output_metric = Counter(
16
+ 'output_count', 'Output items created'
17
+ )
18
+
19
+ if not hasattr(__class__, "pubsub_metric"):
20
+ __class__.pubsub_metric = Info(
21
+ 'pubsub', 'Pub/sub configuration'
22
+ )
23
+
24
+ __class__.pubsub_metric.info({
25
+ "output_queue": output_queue,
26
+ "output_schema": output_schema.__name__,
27
+ })
28
+
29
+ super(Producer, self).__init__(**params)
30
+
31
+ if output_schema == None:
32
+ raise RuntimeError("output_schema must be specified")
33
+
34
+ self.producer = self.client.create_producer(
35
+ topic=output_queue,
36
+ schema=JsonSchema(output_schema),
37
+ )
38
+
39
+ def send(self, msg, properties={}):
40
+ self.producer.send(msg, properties)
41
+ __class__.output_metric.inc()
42
+
43
+ @staticmethod
44
+ def add_args(
45
+ parser, default_input_queue, default_subscriber,
46
+ default_output_queue,
47
+ ):
48
+
49
+ BaseProcessor.add_args(parser)
50
+
51
+ parser.add_argument(
52
+ '-o', '--output-queue',
53
+ default=default_output_queue,
54
+ help=f'Output queue (default: {default_output_queue})'
55
+ )
@@ -0,0 +1 @@
1
+ __version__ = "0.11.11"
File without changes
@@ -0,0 +1,125 @@
1
+
2
+ import pulsar
3
+ import _pulsar
4
+ import hashlib
5
+ import uuid
6
+ import time
7
+ from pulsar.schema import JsonSchema
8
+
9
+ from .. exceptions import *
10
+
11
+ # Default timeout for a request/response. In seconds.
12
+ DEFAULT_TIMEOUT=300
13
+
14
+ # Ugly
15
+ ERROR=_pulsar.LoggerLevel.Error
16
+ WARN=_pulsar.LoggerLevel.Warn
17
+ INFO=_pulsar.LoggerLevel.Info
18
+ DEBUG=_pulsar.LoggerLevel.Debug
19
+
20
+ class BaseClient:
21
+
22
+ def __init__(
23
+ self, log_level=ERROR,
24
+ subscriber=None,
25
+ input_queue=None,
26
+ output_queue=None,
27
+ input_schema=None,
28
+ output_schema=None,
29
+ pulsar_host="pulsar://pulsar:6650",
30
+ ):
31
+
32
+ if input_queue == None: raise RuntimeError("Need input_queue")
33
+ if output_queue == None: raise RuntimeError("Need output_queue")
34
+ if input_schema == None: raise RuntimeError("Need input_schema")
35
+ if output_schema == None: raise RuntimeError("Need output_schema")
36
+
37
+ if subscriber == None:
38
+ subscriber = str(uuid.uuid4())
39
+
40
+ self.client = pulsar.Client(
41
+ pulsar_host,
42
+ logger=pulsar.ConsoleLogger(log_level),
43
+ )
44
+
45
+ self.producer = self.client.create_producer(
46
+ topic=input_queue,
47
+ schema=JsonSchema(input_schema),
48
+ chunking_enabled=True,
49
+ )
50
+
51
+ self.consumer = self.client.subscribe(
52
+ output_queue, subscriber,
53
+ schema=JsonSchema(output_schema),
54
+ )
55
+
56
+ self.input_schema = input_schema
57
+ self.output_schema = output_schema
58
+
59
+ def call(self, **args):
60
+
61
+ timeout = args.get("timeout", DEFAULT_TIMEOUT)
62
+
63
+ if "timeout" in args:
64
+ del args["timeout"]
65
+
66
+ id = str(uuid.uuid4())
67
+
68
+ r = self.input_schema(**args)
69
+
70
+ end_time = time.time() + timeout
71
+
72
+ self.producer.send(r, properties={ "id": id })
73
+
74
+ while time.time() < end_time:
75
+
76
+ try:
77
+ msg = self.consumer.receive(timeout_millis=2500)
78
+ except pulsar.exceptions.Timeout:
79
+ continue
80
+
81
+ mid = msg.properties()["id"]
82
+
83
+ if mid == id:
84
+
85
+ value = msg.value()
86
+
87
+ if value.error:
88
+
89
+ self.consumer.acknowledge(msg)
90
+
91
+ if value.error.type == "llm-error":
92
+ raise LlmError(value.error.message)
93
+
94
+ elif value.error.type == "too-many-requests":
95
+ raise TooManyRequests(value.error.message)
96
+
97
+ elif value.error.type == "ParseError":
98
+ raise ParseError(value.error.message)
99
+
100
+ else:
101
+
102
+ raise RuntimeError(
103
+ f"{value.error.type}: {value.error.message}"
104
+ )
105
+
106
+ resp = msg.value()
107
+ self.consumer.acknowledge(msg)
108
+ return resp
109
+
110
+ # Ignore messages with wrong ID
111
+ self.consumer.acknowledge(msg)
112
+
113
+ raise TimeoutError("Timed out waiting for response")
114
+
115
+ def __del__(self):
116
+
117
+ if hasattr(self, "consumer"):
118
+ self.consumer.close()
119
+
120
+ if hasattr(self, "producer"):
121
+ self.producer.flush()
122
+ self.producer.close()
123
+
124
+ self.client.close()
125
+
@@ -0,0 +1,45 @@
1
+
2
+ import _pulsar
3
+
4
+ from .. schema import DocumentEmbeddingsRequest, DocumentEmbeddingsResponse
5
+ from .. schema import document_embeddings_request_queue
6
+ from .. schema import document_embeddings_response_queue
7
+ from . base import BaseClient
8
+
9
+ # Ugly
10
+ ERROR=_pulsar.LoggerLevel.Error
11
+ WARN=_pulsar.LoggerLevel.Warn
12
+ INFO=_pulsar.LoggerLevel.Info
13
+ DEBUG=_pulsar.LoggerLevel.Debug
14
+
15
+ class DocumentEmbeddingsClient(BaseClient):
16
+
17
+ def __init__(
18
+ self, log_level=ERROR,
19
+ subscriber=None,
20
+ input_queue=None,
21
+ output_queue=None,
22
+ pulsar_host="pulsar://pulsar:6650",
23
+ ):
24
+
25
+ if input_queue == None:
26
+ input_queue = document_embeddings_request_queue
27
+
28
+ if output_queue == None:
29
+ output_queue = document_embeddings_response_queue
30
+
31
+ super(DocumentEmbeddingsClient, self).__init__(
32
+ log_level=log_level,
33
+ subscriber=subscriber,
34
+ input_queue=input_queue,
35
+ output_queue=output_queue,
36
+ pulsar_host=pulsar_host,
37
+ input_schema=DocumentEmbeddingsRequest,
38
+ output_schema=DocumentEmbeddingsResponse,
39
+ )
40
+
41
+ def request(self, vectors, limit=10, timeout=300):
42
+ return self.call(
43
+ vectors=vectors, limit=limit, timeout=timeout
44
+ ).documents
45
+