trustgraph-base 1.4.19__tar.gz → 1.6.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/PKG-INFO +1 -1
  2. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/__init__.py +1 -1
  3. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/embeddings_service.py +10 -2
  4. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/llm_service.py +80 -21
  5. trustgraph_base-1.6.4/trustgraph/base/prompt_client.py +171 -0
  6. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/subscriber.py +30 -11
  7. trustgraph_base-1.6.4/trustgraph/base/text_completion_client.py +57 -0
  8. trustgraph_base-1.6.4/trustgraph/base_version.py +1 -0
  9. trustgraph_base-1.6.4/trustgraph/clients/llm_client.py +105 -0
  10. trustgraph_base-1.6.4/trustgraph/messaging/translators/agent.py +70 -0
  11. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/prompt.py +6 -3
  12. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/retrieval.py +66 -22
  13. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/text_completion.py +6 -3
  14. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/rdf.py +1 -0
  15. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/agent.py +9 -1
  16. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/llm.py +3 -1
  17. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/prompt.py +7 -1
  18. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/retrieval.py +6 -0
  19. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph_base.egg-info/PKG-INFO +1 -1
  20. trustgraph_base-1.4.19/trustgraph/base/prompt_client.py +0 -100
  21. trustgraph_base-1.4.19/trustgraph/base/text_completion_client.py +0 -30
  22. trustgraph_base-1.4.19/trustgraph/base_version.py +0 -1
  23. trustgraph_base-1.4.19/trustgraph/clients/llm_client.py +0 -44
  24. trustgraph_base-1.4.19/trustgraph/messaging/translators/agent.py +0 -46
  25. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/README.md +0 -0
  26. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/pyproject.toml +0 -0
  27. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/setup.cfg +0 -0
  28. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/__init__.py +0 -0
  29. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/api.py +0 -0
  30. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/collection.py +0 -0
  31. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/config.py +0 -0
  32. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/exceptions.py +0 -0
  33. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/flow.py +0 -0
  34. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/knowledge.py +0 -0
  35. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/library.py +0 -0
  36. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/api/types.py +0 -0
  37. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/agent_client.py +0 -0
  38. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/agent_service.py +0 -0
  39. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/async_processor.py +0 -0
  40. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/cassandra_config.py +0 -0
  41. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/chunking_service.py +0 -0
  42. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/consumer.py +0 -0
  43. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/consumer_spec.py +0 -0
  44. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/document_embeddings_client.py +0 -0
  45. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/document_embeddings_query_service.py +0 -0
  46. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/document_embeddings_store_service.py +0 -0
  47. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/embeddings_client.py +0 -0
  48. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/flow.py +0 -0
  49. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/flow_processor.py +0 -0
  50. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/graph_embeddings_client.py +0 -0
  51. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/graph_embeddings_query_service.py +0 -0
  52. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/graph_embeddings_store_service.py +0 -0
  53. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/graph_rag_client.py +0 -0
  54. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/metrics.py +0 -0
  55. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/parameter_spec.py +0 -0
  56. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/producer.py +0 -0
  57. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/producer_spec.py +0 -0
  58. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/publisher.py +0 -0
  59. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/pubsub.py +0 -0
  60. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/request_response_spec.py +0 -0
  61. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/spec.py +0 -0
  62. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/structured_query_client.py +0 -0
  63. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/subscriber_spec.py +0 -0
  64. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/tool_client.py +0 -0
  65. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/tool_service.py +0 -0
  66. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/triples_client.py +0 -0
  67. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/triples_query_service.py +0 -0
  68. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/base/triples_store_service.py +0 -0
  69. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/__init__.py +0 -0
  70. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/agent_client.py +0 -0
  71. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/base.py +0 -0
  72. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/config_client.py +0 -0
  73. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/document_embeddings_client.py +0 -0
  74. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/document_rag_client.py +0 -0
  75. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/embeddings_client.py +0 -0
  76. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/graph_embeddings_client.py +0 -0
  77. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/graph_rag_client.py +0 -0
  78. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/prompt_client.py +0 -0
  79. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/clients/triples_query_client.py +0 -0
  80. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/exceptions.py +0 -0
  81. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/knowledge/__init__.py +0 -0
  82. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/knowledge/defs.py +0 -0
  83. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/knowledge/document.py +0 -0
  84. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/knowledge/identifier.py +0 -0
  85. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/knowledge/organization.py +0 -0
  86. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/knowledge/publication.py +0 -0
  87. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/log_level.py +0 -0
  88. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/__init__.py +0 -0
  89. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/registry.py +0 -0
  90. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/__init__.py +0 -0
  91. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/base.py +0 -0
  92. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/collection.py +0 -0
  93. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/config.py +0 -0
  94. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/diagnosis.py +0 -0
  95. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/document_loading.py +0 -0
  96. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/embeddings.py +0 -0
  97. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/embeddings_query.py +0 -0
  98. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/flow.py +0 -0
  99. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/knowledge.py +0 -0
  100. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/library.py +0 -0
  101. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/metadata.py +0 -0
  102. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/nlp_query.py +0 -0
  103. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/objects_query.py +0 -0
  104. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/primitives.py +0 -0
  105. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/structured_query.py +0 -0
  106. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/tool.py +0 -0
  107. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/messaging/translators/triples.py +0 -0
  108. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/objects/__init__.py +0 -0
  109. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/objects/field.py +0 -0
  110. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/objects/object.py +0 -0
  111. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/__init__.py +0 -0
  112. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/core/__init__.py +0 -0
  113. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/core/metadata.py +0 -0
  114. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/core/primitives.py +0 -0
  115. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/core/topic.py +0 -0
  116. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/__init__.py +0 -0
  117. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/document.py +0 -0
  118. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/embeddings.py +0 -0
  119. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/graph.py +0 -0
  120. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/knowledge.py +0 -0
  121. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/nlp.py +0 -0
  122. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/object.py +0 -0
  123. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/rows.py +0 -0
  124. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/knowledge/structured.py +0 -0
  125. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/__init__.py +0 -0
  126. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/collection.py +0 -0
  127. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/config.py +0 -0
  128. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/diagnosis.py +0 -0
  129. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/flow.py +0 -0
  130. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/library.py +0 -0
  131. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/lookup.py +0 -0
  132. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/nlp_query.py +0 -0
  133. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/objects_query.py +0 -0
  134. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/query.py +0 -0
  135. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/storage.py +0 -0
  136. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph/schema/services/structured_query.py +0 -0
  137. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph_base.egg-info/SOURCES.txt +0 -0
  138. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph_base.egg-info/dependency_links.txt +0 -0
  139. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph_base.egg-info/requires.txt +0 -0
  140. {trustgraph_base-1.4.19 → trustgraph_base-1.6.4}/trustgraph_base.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: trustgraph-base
3
- Version: 1.4.19
3
+ Version: 1.6.4
4
4
  Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
5
5
  Author-email: "trustgraph.ai" <security@trustgraph.ai>
6
6
  Project-URL: Homepage, https://github.com/trustgraph-ai/trustgraph
@@ -12,7 +12,7 @@ from . parameter_spec import ParameterSpec
12
12
  from . producer_spec import ProducerSpec
13
13
  from . subscriber_spec import SubscriberSpec
14
14
  from . request_response_spec import RequestResponseSpec
15
- from . llm_service import LlmService, LlmResult
15
+ from . llm_service import LlmService, LlmResult, LlmChunk
16
16
  from . chunking_service import ChunkingService
17
17
  from . embeddings_service import EmbeddingsService
18
18
  from . embeddings_client import EmbeddingsClientSpec
@@ -9,7 +9,7 @@ from prometheus_client import Histogram
9
9
 
10
10
  from .. schema import EmbeddingsRequest, EmbeddingsResponse, Error
11
11
  from .. exceptions import TooManyRequests
12
- from .. base import FlowProcessor, ConsumerSpec, ProducerSpec
12
+ from .. base import FlowProcessor, ConsumerSpec, ProducerSpec, ParameterSpec
13
13
 
14
14
  # Module logger
15
15
  logger = logging.getLogger(__name__)
@@ -45,6 +45,12 @@ class EmbeddingsService(FlowProcessor):
45
45
  )
46
46
  )
47
47
 
48
+ self.register_specification(
49
+ ParameterSpec(
50
+ name = "model",
51
+ )
52
+ )
53
+
48
54
  async def on_request(self, msg, consumer, flow):
49
55
 
50
56
  try:
@@ -57,7 +63,9 @@ class EmbeddingsService(FlowProcessor):
57
63
 
58
64
  logger.debug(f"Handling embeddings request {id}...")
59
65
 
60
- vectors = await self.on_embeddings(request.text)
66
+ # Pass model from request if specified (non-empty), otherwise use default
67
+ model = flow("model")
68
+ vectors = await self.on_embeddings(request.text, model=model)
61
69
 
62
70
  await flow("response").send(
63
71
  EmbeddingsResponse(
@@ -28,6 +28,19 @@ class LlmResult:
28
28
  self.model = model
29
29
  __slots__ = ["text", "in_token", "out_token", "model"]
30
30
 
31
+ class LlmChunk:
32
+ """Represents a streaming chunk from an LLM"""
33
+ def __init__(
34
+ self, text = None, in_token = None, out_token = None,
35
+ model = None, is_final = False,
36
+ ):
37
+ self.text = text
38
+ self.in_token = in_token
39
+ self.out_token = out_token
40
+ self.model = model
41
+ self.is_final = is_final
42
+ __slots__ = ["text", "in_token", "out_token", "model", "is_final"]
43
+
31
44
  class LlmService(FlowProcessor):
32
45
 
33
46
  def __init__(self, **params):
@@ -99,16 +112,57 @@ class LlmService(FlowProcessor):
99
112
 
100
113
  id = msg.properties()["id"]
101
114
 
102
- with __class__.text_completion_metric.labels(
103
- id=self.id,
104
- flow=f"{flow.name}-{consumer.name}",
105
- ).time():
106
-
107
- model = flow("model")
108
- temperature = flow("temperature")
109
-
110
- response = await self.generate_content(
111
- request.system, request.prompt, model, temperature
115
+ model = flow("model")
116
+ temperature = flow("temperature")
117
+
118
+ # Check if streaming is requested and supported
119
+ streaming = getattr(request, 'streaming', False)
120
+
121
+ if streaming and self.supports_streaming():
122
+
123
+ # Streaming mode
124
+ with __class__.text_completion_metric.labels(
125
+ id=self.id,
126
+ flow=f"{flow.name}-{consumer.name}",
127
+ ).time():
128
+
129
+ async for chunk in self.generate_content_stream(
130
+ request.system, request.prompt, model, temperature
131
+ ):
132
+ await flow("response").send(
133
+ TextCompletionResponse(
134
+ error=None,
135
+ response=chunk.text,
136
+ in_token=chunk.in_token,
137
+ out_token=chunk.out_token,
138
+ model=chunk.model,
139
+ end_of_stream=chunk.is_final
140
+ ),
141
+ properties={"id": id}
142
+ )
143
+
144
+ else:
145
+
146
+ # Non-streaming mode (original behavior)
147
+ with __class__.text_completion_metric.labels(
148
+ id=self.id,
149
+ flow=f"{flow.name}-{consumer.name}",
150
+ ).time():
151
+
152
+ response = await self.generate_content(
153
+ request.system, request.prompt, model, temperature
154
+ )
155
+
156
+ await flow("response").send(
157
+ TextCompletionResponse(
158
+ error=None,
159
+ response=response.text,
160
+ in_token=response.in_token,
161
+ out_token=response.out_token,
162
+ model=response.model,
163
+ end_of_stream=True
164
+ ),
165
+ properties={"id": id}
112
166
  )
113
167
 
114
168
  __class__.text_completion_model_metric.labels(
@@ -119,17 +173,6 @@ class LlmService(FlowProcessor):
119
173
  "temperature": str(temperature) if temperature is not None else "",
120
174
  })
121
175
 
122
- await flow("response").send(
123
- TextCompletionResponse(
124
- error=None,
125
- response=response.text,
126
- in_token=response.in_token,
127
- out_token=response.out_token,
128
- model=response.model
129
- ),
130
- properties={"id": id}
131
- )
132
-
133
176
  except TooManyRequests as e:
134
177
  raise e
135
178
 
@@ -151,10 +194,26 @@ class LlmService(FlowProcessor):
151
194
  in_token=None,
152
195
  out_token=None,
153
196
  model=None,
197
+ end_of_stream=True
154
198
  ),
155
199
  properties={"id": id}
156
200
  )
157
201
 
202
+ def supports_streaming(self):
203
+ """
204
+ Override in subclass to indicate streaming support.
205
+ Returns False by default.
206
+ """
207
+ return False
208
+
209
+ async def generate_content_stream(self, system, prompt, model=None, temperature=None):
210
+ """
211
+ Override in subclass to implement streaming.
212
+ Should yield LlmChunk objects.
213
+ The final chunk should have is_final=True.
214
+ """
215
+ raise NotImplementedError("Streaming not implemented for this provider")
216
+
158
217
  @staticmethod
159
218
  def add_args(parser):
160
219
 
@@ -0,0 +1,171 @@
1
+
2
+ import json
3
+ import asyncio
4
+ import logging
5
+
6
+ from . request_response_spec import RequestResponse, RequestResponseSpec
7
+ from .. schema import PromptRequest, PromptResponse
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ class PromptClient(RequestResponse):
12
+
13
+ async def prompt(self, id, variables, timeout=600, streaming=False, chunk_callback=None):
14
+ logger.info(f"DEBUG prompt_client: prompt called, id={id}, streaming={streaming}, chunk_callback={chunk_callback is not None}")
15
+
16
+ if not streaming:
17
+ logger.info("DEBUG prompt_client: Non-streaming path")
18
+ # Non-streaming path
19
+ resp = await self.request(
20
+ PromptRequest(
21
+ id = id,
22
+ terms = {
23
+ k: json.dumps(v)
24
+ for k, v in variables.items()
25
+ },
26
+ streaming = False
27
+ ),
28
+ timeout=timeout
29
+ )
30
+
31
+ if resp.error:
32
+ raise RuntimeError(resp.error.message)
33
+
34
+ if resp.text: return resp.text
35
+
36
+ return json.loads(resp.object)
37
+
38
+ else:
39
+ logger.info("DEBUG prompt_client: Streaming path")
40
+ # Streaming path - collect all chunks
41
+ full_text = ""
42
+ full_object = None
43
+
44
+ async def collect_chunks(resp):
45
+ nonlocal full_text, full_object
46
+ logger.info(f"DEBUG prompt_client: collect_chunks called, resp.text={resp.text[:50] if resp.text else None}, end_of_stream={getattr(resp, 'end_of_stream', False)}")
47
+
48
+ if resp.error:
49
+ logger.error(f"DEBUG prompt_client: Error in response: {resp.error.message}")
50
+ raise RuntimeError(resp.error.message)
51
+
52
+ if resp.text:
53
+ full_text += resp.text
54
+ logger.info(f"DEBUG prompt_client: Accumulated {len(full_text)} chars")
55
+ # Call chunk callback if provided
56
+ if chunk_callback:
57
+ logger.info(f"DEBUG prompt_client: Calling chunk_callback")
58
+ if asyncio.iscoroutinefunction(chunk_callback):
59
+ await chunk_callback(resp.text)
60
+ else:
61
+ chunk_callback(resp.text)
62
+ elif resp.object:
63
+ logger.info(f"DEBUG prompt_client: Got object response")
64
+ full_object = resp.object
65
+
66
+ end_stream = getattr(resp, 'end_of_stream', False)
67
+ logger.info(f"DEBUG prompt_client: Returning end_of_stream={end_stream}")
68
+ return end_stream
69
+
70
+ logger.info("DEBUG prompt_client: Creating PromptRequest")
71
+ req = PromptRequest(
72
+ id = id,
73
+ terms = {
74
+ k: json.dumps(v)
75
+ for k, v in variables.items()
76
+ },
77
+ streaming = True
78
+ )
79
+ logger.info(f"DEBUG prompt_client: About to call self.request with recipient, timeout={timeout}")
80
+ await self.request(
81
+ req,
82
+ recipient=collect_chunks,
83
+ timeout=timeout
84
+ )
85
+ logger.info(f"DEBUG prompt_client: self.request returned, full_text has {len(full_text)} chars")
86
+
87
+ if full_text:
88
+ logger.info("DEBUG prompt_client: Returning full_text")
89
+ return full_text
90
+
91
+ logger.info("DEBUG prompt_client: Returning parsed full_object")
92
+ return json.loads(full_object)
93
+
94
+ async def extract_definitions(self, text, timeout=600):
95
+ return await self.prompt(
96
+ id = "extract-definitions",
97
+ variables = { "text": text },
98
+ timeout = timeout,
99
+ )
100
+
101
+ async def extract_relationships(self, text, timeout=600):
102
+ return await self.prompt(
103
+ id = "extract-relationships",
104
+ variables = { "text": text },
105
+ timeout = timeout,
106
+ )
107
+
108
+ async def extract_objects(self, text, schema, timeout=600):
109
+ return await self.prompt(
110
+ id = "extract-rows",
111
+ variables = { "text": text, "schema": schema, },
112
+ timeout = timeout,
113
+ )
114
+
115
+ async def kg_prompt(self, query, kg, timeout=600, streaming=False, chunk_callback=None):
116
+ return await self.prompt(
117
+ id = "kg-prompt",
118
+ variables = {
119
+ "query": query,
120
+ "knowledge": [
121
+ { "s": v[0], "p": v[1], "o": v[2] }
122
+ for v in kg
123
+ ]
124
+ },
125
+ timeout = timeout,
126
+ streaming = streaming,
127
+ chunk_callback = chunk_callback,
128
+ )
129
+
130
+ async def document_prompt(self, query, documents, timeout=600, streaming=False, chunk_callback=None):
131
+ return await self.prompt(
132
+ id = "document-prompt",
133
+ variables = {
134
+ "query": query,
135
+ "documents": documents,
136
+ },
137
+ timeout = timeout,
138
+ streaming = streaming,
139
+ chunk_callback = chunk_callback,
140
+ )
141
+
142
+ async def agent_react(self, variables, timeout=600, streaming=False, chunk_callback=None):
143
+ return await self.prompt(
144
+ id = "agent-react",
145
+ variables = variables,
146
+ timeout = timeout,
147
+ streaming = streaming,
148
+ chunk_callback = chunk_callback,
149
+ )
150
+
151
+ async def question(self, question, timeout=600):
152
+ return await self.prompt(
153
+ id = "question",
154
+ variables = {
155
+ "question": question,
156
+ },
157
+ timeout = timeout,
158
+ )
159
+
160
+ class PromptClientSpec(RequestResponseSpec):
161
+ def __init__(
162
+ self, request_name, response_name,
163
+ ):
164
+ super(PromptClientSpec, self).__init__(
165
+ request_name = request_name,
166
+ request_schema = PromptRequest,
167
+ response_name = response_name,
168
+ response_schema = PromptResponse,
169
+ impl = PromptClient,
170
+ )
171
+
@@ -43,12 +43,18 @@ class Subscriber:
43
43
 
44
44
  async def start(self):
45
45
 
46
- self.consumer = self.client.subscribe(
47
- topic = self.topic,
48
- subscription_name = self.subscription,
49
- consumer_name = self.consumer_name,
50
- schema = JsonSchema(self.schema),
51
- )
46
+ # Build subscribe arguments
47
+ subscribe_args = {
48
+ 'topic': self.topic,
49
+ 'subscription_name': self.subscription,
50
+ 'consumer_name': self.consumer_name,
51
+ }
52
+
53
+ # Only add schema if provided (omit if None)
54
+ if self.schema is not None:
55
+ subscribe_args['schema'] = JsonSchema(self.schema)
56
+
57
+ self.consumer = self.client.subscribe(**subscribe_args)
52
58
 
53
59
  self.task = asyncio.create_task(self.run())
54
60
 
@@ -87,10 +93,14 @@ class Subscriber:
87
93
  if self.draining and drain_end_time is None:
88
94
  drain_end_time = time.time() + self.drain_timeout
89
95
  logger.info(f"Subscriber entering drain mode, timeout={self.drain_timeout}s")
90
-
96
+
91
97
  # Stop accepting new messages from Pulsar during drain
92
98
  if self.consumer:
93
- self.consumer.pause_message_listener()
99
+ try:
100
+ self.consumer.pause_message_listener()
101
+ except _pulsar.InvalidConfiguration:
102
+ # Not all consumers have message listeners (e.g., blocking receive mode)
103
+ pass
94
104
 
95
105
  # Check drain timeout
96
106
  if self.draining and drain_end_time and time.time() > drain_end_time:
@@ -145,12 +155,21 @@ class Subscriber:
145
155
  finally:
146
156
  # Negative acknowledge any pending messages
147
157
  for msg in self.pending_acks.values():
148
- self.consumer.negative_acknowledge(msg)
158
+ try:
159
+ self.consumer.negative_acknowledge(msg)
160
+ except _pulsar.AlreadyClosed:
161
+ pass # Consumer already closed
149
162
  self.pending_acks.clear()
150
163
 
151
164
  if self.consumer:
152
- self.consumer.unsubscribe()
153
- self.consumer.close()
165
+ try:
166
+ self.consumer.unsubscribe()
167
+ except _pulsar.AlreadyClosed:
168
+ pass # Already closed
169
+ try:
170
+ self.consumer.close()
171
+ except _pulsar.AlreadyClosed:
172
+ pass # Already closed
154
173
  self.consumer = None
155
174
 
156
175
 
@@ -0,0 +1,57 @@
1
+
2
+ from . request_response_spec import RequestResponse, RequestResponseSpec
3
+ from .. schema import TextCompletionRequest, TextCompletionResponse
4
+
5
+ class TextCompletionClient(RequestResponse):
6
+ async def text_completion(self, system, prompt, streaming=False, timeout=600):
7
+ # If not streaming, use original behavior
8
+ if not streaming:
9
+ resp = await self.request(
10
+ TextCompletionRequest(
11
+ system = system, prompt = prompt, streaming = False
12
+ ),
13
+ timeout=timeout
14
+ )
15
+
16
+ if resp.error:
17
+ raise RuntimeError(resp.error.message)
18
+
19
+ return resp.response
20
+
21
+ # For streaming: collect all chunks and return complete response
22
+ full_response = ""
23
+
24
+ async def collect_chunks(resp):
25
+ nonlocal full_response
26
+
27
+ if resp.error:
28
+ raise RuntimeError(resp.error.message)
29
+
30
+ if resp.response:
31
+ full_response += resp.response
32
+
33
+ # Return True when end_of_stream is reached
34
+ return getattr(resp, 'end_of_stream', False)
35
+
36
+ await self.request(
37
+ TextCompletionRequest(
38
+ system = system, prompt = prompt, streaming = True
39
+ ),
40
+ recipient=collect_chunks,
41
+ timeout=timeout
42
+ )
43
+
44
+ return full_response
45
+
46
+ class TextCompletionClientSpec(RequestResponseSpec):
47
+ def __init__(
48
+ self, request_name, response_name,
49
+ ):
50
+ super(TextCompletionClientSpec, self).__init__(
51
+ request_name = request_name,
52
+ request_schema = TextCompletionRequest,
53
+ response_name = response_name,
54
+ response_schema = TextCompletionResponse,
55
+ impl = TextCompletionClient,
56
+ )
57
+
@@ -0,0 +1 @@
1
+ __version__ = "1.6.4"
@@ -0,0 +1,105 @@
1
+
2
+ import _pulsar
3
+
4
+ from .. schema import TextCompletionRequest, TextCompletionResponse
5
+ from .. schema import text_completion_request_queue
6
+ from .. schema import text_completion_response_queue
7
+ from . base import BaseClient
8
+ from .. exceptions import LlmError
9
+
10
+ # Ugly
11
+ ERROR=_pulsar.LoggerLevel.Error
12
+ WARN=_pulsar.LoggerLevel.Warn
13
+ INFO=_pulsar.LoggerLevel.Info
14
+ DEBUG=_pulsar.LoggerLevel.Debug
15
+
16
+ class LlmClient(BaseClient):
17
+
18
+ def __init__(
19
+ self, log_level=ERROR,
20
+ subscriber=None,
21
+ input_queue=None,
22
+ output_queue=None,
23
+ pulsar_host="pulsar://pulsar:6650",
24
+ pulsar_api_key=None,
25
+ ):
26
+
27
+ if input_queue is None: input_queue = text_completion_request_queue
28
+ if output_queue is None: output_queue = text_completion_response_queue
29
+
30
+ super(LlmClient, self).__init__(
31
+ log_level=log_level,
32
+ subscriber=subscriber,
33
+ input_queue=input_queue,
34
+ output_queue=output_queue,
35
+ pulsar_host=pulsar_host,
36
+ pulsar_api_key=pulsar_api_key,
37
+ input_schema=TextCompletionRequest,
38
+ output_schema=TextCompletionResponse,
39
+ )
40
+
41
+ def request(self, system, prompt, timeout=300, streaming=False):
42
+ """
43
+ Non-streaming request (backward compatible).
44
+ Returns complete response string.
45
+ """
46
+ if streaming:
47
+ raise ValueError("Use request_stream() for streaming requests")
48
+ return self.call(
49
+ system=system, prompt=prompt, streaming=False, timeout=timeout
50
+ ).response
51
+
52
+ def request_stream(self, system, prompt, timeout=300):
53
+ """
54
+ Streaming request generator.
55
+ Yields response chunks as they arrive.
56
+ Usage:
57
+ for chunk in client.request_stream(system, prompt):
58
+ print(chunk.response, end='', flush=True)
59
+ """
60
+ import time
61
+ import uuid
62
+
63
+ id = str(uuid.uuid4())
64
+ request = TextCompletionRequest(
65
+ system=system, prompt=prompt, streaming=True
66
+ )
67
+
68
+ end_time = time.time() + timeout
69
+ self.producer.send(request, properties={"id": id})
70
+
71
+ # Collect responses until end_of_stream
72
+ while time.time() < end_time:
73
+ try:
74
+ msg = self.consumer.receive(timeout_millis=2500)
75
+ except Exception:
76
+ continue
77
+
78
+ mid = msg.properties()["id"]
79
+
80
+ if mid == id:
81
+ value = msg.value()
82
+
83
+ # Handle errors
84
+ if value.error:
85
+ self.consumer.acknowledge(msg)
86
+ if value.error.type == "llm-error":
87
+ raise LlmError(value.error.message)
88
+ else:
89
+ raise RuntimeError(
90
+ f"{value.error.type}: {value.error.message}"
91
+ )
92
+
93
+ self.consumer.acknowledge(msg)
94
+ yield value
95
+
96
+ # Check if this is the final chunk
97
+ if getattr(value, 'end_of_stream', True):
98
+ break
99
+ else:
100
+ # Ignore messages with wrong ID
101
+ self.consumer.acknowledge(msg)
102
+
103
+ if time.time() >= end_time:
104
+ raise TimeoutError("Timed out waiting for response")
105
+
@@ -0,0 +1,70 @@
1
+ from typing import Dict, Any, Tuple
2
+ from ...schema import AgentRequest, AgentResponse
3
+ from .base import MessageTranslator
4
+
5
+
6
+ class AgentRequestTranslator(MessageTranslator):
7
+ """Translator for AgentRequest schema objects"""
8
+
9
+ def to_pulsar(self, data: Dict[str, Any]) -> AgentRequest:
10
+ return AgentRequest(
11
+ question=data["question"],
12
+ state=data.get("state", None),
13
+ group=data.get("group", None),
14
+ history=data.get("history", []),
15
+ user=data.get("user", "trustgraph"),
16
+ streaming=data.get("streaming", False)
17
+ )
18
+
19
+ def from_pulsar(self, obj: AgentRequest) -> Dict[str, Any]:
20
+ return {
21
+ "question": obj.question,
22
+ "state": obj.state,
23
+ "group": obj.group,
24
+ "history": obj.history,
25
+ "user": obj.user,
26
+ "streaming": getattr(obj, "streaming", False)
27
+ }
28
+
29
+
30
+ class AgentResponseTranslator(MessageTranslator):
31
+ """Translator for AgentResponse schema objects"""
32
+
33
+ def to_pulsar(self, data: Dict[str, Any]) -> AgentResponse:
34
+ raise NotImplementedError("Response translation to Pulsar not typically needed")
35
+
36
+ def from_pulsar(self, obj: AgentResponse) -> Dict[str, Any]:
37
+ result = {}
38
+
39
+ # Check if this is a streaming response (has chunk_type)
40
+ if hasattr(obj, 'chunk_type') and obj.chunk_type:
41
+ result["chunk_type"] = obj.chunk_type
42
+ if obj.content:
43
+ result["content"] = obj.content
44
+ result["end_of_message"] = getattr(obj, "end_of_message", False)
45
+ result["end_of_dialog"] = getattr(obj, "end_of_dialog", False)
46
+ else:
47
+ # Legacy format
48
+ if obj.answer:
49
+ result["answer"] = obj.answer
50
+ if obj.thought:
51
+ result["thought"] = obj.thought
52
+ if obj.observation:
53
+ result["observation"] = obj.observation
54
+
55
+ # Always include error if present
56
+ if hasattr(obj, 'error') and obj.error and obj.error.message:
57
+ result["error"] = {"message": obj.error.message, "code": obj.error.code}
58
+
59
+ return result
60
+
61
+ def from_response_with_completion(self, obj: AgentResponse) -> Tuple[Dict[str, Any], bool]:
62
+ """Returns (response_dict, is_final)"""
63
+ # For streaming responses, check end_of_dialog
64
+ if hasattr(obj, 'chunk_type') and obj.chunk_type:
65
+ is_final = getattr(obj, 'end_of_dialog', False)
66
+ else:
67
+ # For legacy responses, check if answer is present
68
+ is_final = (obj.answer is not None)
69
+
70
+ return self.from_pulsar(obj), is_final
@@ -16,10 +16,11 @@ class PromptRequestTranslator(MessageTranslator):
16
16
  k: json.dumps(v)
17
17
  for k, v in data["variables"].items()
18
18
  }
19
-
19
+
20
20
  return PromptRequest(
21
21
  id=data.get("id"),
22
- terms=terms
22
+ terms=terms,
23
+ streaming=data.get("streaming", False)
23
24
  )
24
25
 
25
26
  def from_pulsar(self, obj: PromptRequest) -> Dict[str, Any]:
@@ -51,4 +52,6 @@ class PromptResponseTranslator(MessageTranslator):
51
52
 
52
53
  def from_response_with_completion(self, obj: PromptResponse) -> Tuple[Dict[str, Any], bool]:
53
54
  """Returns (response_dict, is_final)"""
54
- return self.from_pulsar(obj), True
55
+ # Check end_of_stream field to determine if this is the final message
56
+ is_final = getattr(obj, 'end_of_stream', True)
57
+ return self.from_pulsar(obj), is_final