trustgraph-cli 2.2.26__tar.gz → 2.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/PKG-INFO +2 -2
  2. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/pyproject.toml +1 -1
  3. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_agent.py +19 -1
  4. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_document_rag.py +30 -4
  5. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_graph_rag.py +29 -4
  6. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_llm.py +29 -6
  7. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_prompt.py +29 -7
  8. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_sparql_query.py +5 -0
  9. trustgraph_cli-2.3.0/trustgraph/cli_version.py +1 -0
  10. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph_cli.egg-info/PKG-INFO +2 -2
  11. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph_cli.egg-info/requires.txt +1 -1
  12. trustgraph_cli-2.2.26/trustgraph/cli_version.py +0 -1
  13. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/README.md +0 -0
  14. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/setup.cfg +0 -0
  15. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/__init__.py +0 -0
  16. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/add_library_document.py +0 -0
  17. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/delete_collection.py +0 -0
  18. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/delete_config_item.py +0 -0
  19. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/delete_flow_blueprint.py +0 -0
  20. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/delete_kg_core.py +0 -0
  21. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/delete_mcp_tool.py +0 -0
  22. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/delete_tool.py +0 -0
  23. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/dump_msgpack.py +0 -0
  24. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/dump_queues.py +0 -0
  25. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/get_config_item.py +0 -0
  26. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/get_document_content.py +0 -0
  27. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/get_flow_blueprint.py +0 -0
  28. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/get_kg_core.py +0 -0
  29. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/graph_to_turtle.py +0 -0
  30. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/init_pulsar_manager.py +0 -0
  31. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/init_trustgraph.py +0 -0
  32. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_document_embeddings.py +0 -0
  33. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_embeddings.py +0 -0
  34. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_graph_embeddings.py +0 -0
  35. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_mcp_tool.py +0 -0
  36. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_nlp_query.py +0 -0
  37. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_row_embeddings.py +0 -0
  38. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_rows_query.py +0 -0
  39. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/invoke_structured_query.py +0 -0
  40. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/list_collections.py +0 -0
  41. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/list_config_items.py +0 -0
  42. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/list_explain_traces.py +0 -0
  43. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/load_doc_embeds.py +0 -0
  44. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/load_kg_core.py +0 -0
  45. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/load_knowledge.py +0 -0
  46. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/load_sample_documents.py +0 -0
  47. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/load_structured_data.py +0 -0
  48. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/load_turtle.py +0 -0
  49. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/monitor_prompts.py +0 -0
  50. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/put_config_item.py +0 -0
  51. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/put_flow_blueprint.py +0 -0
  52. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/put_kg_core.py +0 -0
  53. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/query_graph.py +0 -0
  54. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/remove_library_document.py +0 -0
  55. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/save_doc_embeds.py +0 -0
  56. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/set_collection.py +0 -0
  57. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/set_mcp_tool.py +0 -0
  58. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/set_prompt.py +0 -0
  59. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/set_token_costs.py +0 -0
  60. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/set_tool.py +0 -0
  61. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_config.py +0 -0
  62. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_explain_trace.py +0 -0
  63. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_extraction_provenance.py +0 -0
  64. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_flow_blueprints.py +0 -0
  65. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_flow_state.py +0 -0
  66. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_flows.py +0 -0
  67. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_graph.py +0 -0
  68. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_kg_cores.py +0 -0
  69. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_library_documents.py +0 -0
  70. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_library_processing.py +0 -0
  71. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_mcp_tools.py +0 -0
  72. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_parameter_types.py +0 -0
  73. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_processor_state.py +0 -0
  74. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_prompts.py +0 -0
  75. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_token_costs.py +0 -0
  76. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_token_rate.py +0 -0
  77. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/show_tools.py +0 -0
  78. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/start_flow.py +0 -0
  79. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/start_library_processing.py +0 -0
  80. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/stop_flow.py +0 -0
  81. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/stop_library_processing.py +0 -0
  82. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/unload_kg_core.py +0 -0
  83. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph/cli/verify_system_status.py +0 -0
  84. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph_cli.egg-info/SOURCES.txt +0 -0
  85. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph_cli.egg-info/dependency_links.txt +0 -0
  86. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph_cli.egg-info/entry_points.txt +0 -0
  87. {trustgraph_cli-2.2.26 → trustgraph_cli-2.3.0}/trustgraph_cli.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: trustgraph-cli
3
- Version: 2.2.26
3
+ Version: 2.3.0
4
4
  Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
5
5
  Author-email: "trustgraph.ai" <security@trustgraph.ai>
6
6
  Project-URL: Homepage, https://github.com/trustgraph-ai/trustgraph
@@ -8,7 +8,7 @@ Classifier: Programming Language :: Python :: 3
8
8
  Classifier: Operating System :: OS Independent
9
9
  Requires-Python: >=3.8
10
10
  Description-Content-Type: text/markdown
11
- Requires-Dist: trustgraph-base<2.3,>=2.2
11
+ Requires-Dist: trustgraph-base<2.4,>=2.3
12
12
  Requires-Dist: requests
13
13
  Requires-Dist: pulsar-client
14
14
  Requires-Dist: aiohttp
@@ -10,7 +10,7 @@ description = "TrustGraph provides a means to run a pipeline of flexible AI proc
10
10
  readme = "README.md"
11
11
  requires-python = ">=3.8"
12
12
  dependencies = [
13
- "trustgraph-base>=2.2,<2.3",
13
+ "trustgraph-base>=2.3,<2.4",
14
14
  "requests",
15
15
  "pulsar-client",
16
16
  "aiohttp",
@@ -272,7 +272,8 @@ def question(
272
272
  url, question, flow_id, user, collection,
273
273
  plan=None, state=None, group=None, pattern=None,
274
274
  verbose=False, streaming=True,
275
- token=None, explainable=False, debug=False
275
+ token=None, explainable=False, debug=False,
276
+ show_usage=False
276
277
  ):
277
278
  # Explainable mode uses the API to capture and process provenance events
278
279
  if explainable:
@@ -323,6 +324,7 @@ def question(
323
324
  # Track last chunk type and current outputter for streaming
324
325
  last_chunk_type = None
325
326
  current_outputter = None
327
+ last_answer_chunk = None
326
328
 
327
329
  for chunk in response:
328
330
  chunk_type = chunk.chunk_type
@@ -357,6 +359,7 @@ def question(
357
359
  current_outputter.word_buffer = ""
358
360
  elif chunk_type == "final-answer":
359
361
  print(content, end="", flush=True)
362
+ last_answer_chunk = chunk
360
363
 
361
364
  # Close any remaining outputter
362
365
  if current_outputter:
@@ -366,6 +369,14 @@ def question(
366
369
  elif last_chunk_type == "final-answer":
367
370
  print()
368
371
 
372
+ if show_usage and last_answer_chunk:
373
+ print(
374
+ f"Input tokens: {last_answer_chunk.in_token} "
375
+ f"Output tokens: {last_answer_chunk.out_token} "
376
+ f"Model: {last_answer_chunk.model}",
377
+ file=sys.stderr,
378
+ )
379
+
369
380
  else:
370
381
  # Non-streaming response - but agents use multipart messaging
371
382
  # so we iterate through the chunks (which are complete messages, not text chunks)
@@ -477,6 +488,12 @@ def main():
477
488
  help='Show debug output for troubleshooting'
478
489
  )
479
490
 
491
+ parser.add_argument(
492
+ '--show-usage',
493
+ action='store_true',
494
+ help='Show token usage and model on stderr'
495
+ )
496
+
480
497
  args = parser.parse_args()
481
498
 
482
499
  try:
@@ -496,6 +513,7 @@ def main():
496
513
  token = args.token,
497
514
  explainable = args.explainable,
498
515
  debug = args.debug,
516
+ show_usage = args.show_usage,
499
517
  )
500
518
 
501
519
  except Exception as e:
@@ -99,7 +99,8 @@ def question_explainable(
99
99
 
100
100
  def question(
101
101
  url, flow_id, question_text, user, collection, doc_limit,
102
- streaming=True, token=None, explainable=False, debug=False
102
+ streaming=True, token=None, explainable=False, debug=False,
103
+ show_usage=False
103
104
  ):
104
105
  # Explainable mode uses the API to capture and process provenance events
105
106
  if explainable:
@@ -133,22 +134,40 @@ def question(
133
134
  )
134
135
 
135
136
  # Stream output
137
+ last_chunk = None
136
138
  for chunk in response:
137
- print(chunk, end="", flush=True)
139
+ print(chunk.content, end="", flush=True)
140
+ last_chunk = chunk
138
141
  print() # Final newline
139
142
 
143
+ if show_usage and last_chunk:
144
+ print(
145
+ f"Input tokens: {last_chunk.in_token} "
146
+ f"Output tokens: {last_chunk.out_token} "
147
+ f"Model: {last_chunk.model}",
148
+ file=sys.stderr,
149
+ )
150
+
140
151
  finally:
141
152
  socket.close()
142
153
  else:
143
154
  # Use REST API for non-streaming
144
155
  flow = api.flow().id(flow_id)
145
- resp = flow.document_rag(
156
+ result = flow.document_rag(
146
157
  query=question_text,
147
158
  user=user,
148
159
  collection=collection,
149
160
  doc_limit=doc_limit,
150
161
  )
151
- print(resp)
162
+ print(result.text)
163
+
164
+ if show_usage:
165
+ print(
166
+ f"Input tokens: {result.in_token} "
167
+ f"Output tokens: {result.out_token} "
168
+ f"Model: {result.model}",
169
+ file=sys.stderr,
170
+ )
152
171
 
153
172
 
154
173
  def main():
@@ -219,6 +238,12 @@ def main():
219
238
  help='Show debug output for troubleshooting'
220
239
  )
221
240
 
241
+ parser.add_argument(
242
+ '--show-usage',
243
+ action='store_true',
244
+ help='Show token usage and model on stderr'
245
+ )
246
+
222
247
  args = parser.parse_args()
223
248
 
224
249
  try:
@@ -234,6 +259,7 @@ def main():
234
259
  token=args.token,
235
260
  explainable=args.explainable,
236
261
  debug=args.debug,
262
+ show_usage=args.show_usage,
237
263
  )
238
264
 
239
265
  except Exception as e:
@@ -753,7 +753,7 @@ def question(
753
753
  url, flow_id, question, user, collection, entity_limit, triple_limit,
754
754
  max_subgraph_size, max_path_length, edge_score_limit=50,
755
755
  edge_limit=25, streaming=True, token=None,
756
- explainable=False, debug=False
756
+ explainable=False, debug=False, show_usage=False
757
757
  ):
758
758
 
759
759
  # Explainable mode uses the API to capture and process provenance events
@@ -798,16 +798,26 @@ def question(
798
798
  )
799
799
 
800
800
  # Stream output
801
+ last_chunk = None
801
802
  for chunk in response:
802
- print(chunk, end="", flush=True)
803
+ print(chunk.content, end="", flush=True)
804
+ last_chunk = chunk
803
805
  print() # Final newline
804
806
 
807
+ if show_usage and last_chunk:
808
+ print(
809
+ f"Input tokens: {last_chunk.in_token} "
810
+ f"Output tokens: {last_chunk.out_token} "
811
+ f"Model: {last_chunk.model}",
812
+ file=sys.stderr,
813
+ )
814
+
805
815
  finally:
806
816
  socket.close()
807
817
  else:
808
818
  # Use REST API for non-streaming
809
819
  flow = api.flow().id(flow_id)
810
- resp = flow.graph_rag(
820
+ result = flow.graph_rag(
811
821
  query=question,
812
822
  user=user,
813
823
  collection=collection,
@@ -818,7 +828,15 @@ def question(
818
828
  edge_score_limit=edge_score_limit,
819
829
  edge_limit=edge_limit,
820
830
  )
821
- print(resp)
831
+ print(result.text)
832
+
833
+ if show_usage:
834
+ print(
835
+ f"Input tokens: {result.in_token} "
836
+ f"Output tokens: {result.out_token} "
837
+ f"Model: {result.model}",
838
+ file=sys.stderr,
839
+ )
822
840
 
823
841
  def main():
824
842
 
@@ -923,6 +941,12 @@ def main():
923
941
  help='Show debug output for troubleshooting'
924
942
  )
925
943
 
944
+ parser.add_argument(
945
+ '--show-usage',
946
+ action='store_true',
947
+ help='Show token usage and model on stderr'
948
+ )
949
+
926
950
  args = parser.parse_args()
927
951
 
928
952
  try:
@@ -943,6 +967,7 @@ def main():
943
967
  token=args.token,
944
968
  explainable=args.explainable,
945
969
  debug=args.debug,
970
+ show_usage=args.show_usage,
946
971
  )
947
972
 
948
973
  except Exception as e:
@@ -10,7 +10,8 @@ from trustgraph.api import Api
10
10
  default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/')
11
11
  default_token = os.getenv("TRUSTGRAPH_TOKEN", None)
12
12
 
13
- def query(url, flow_id, system, prompt, streaming=True, token=None):
13
+ def query(url, flow_id, system, prompt, streaming=True, token=None,
14
+ show_usage=False):
14
15
 
15
16
  # Create API client
16
17
  api = Api(url=url, token=token)
@@ -26,14 +27,29 @@ def query(url, flow_id, system, prompt, streaming=True, token=None):
26
27
  )
27
28
 
28
29
  if streaming:
29
- # Stream output to stdout without newline
30
+ last_chunk = None
30
31
  for chunk in response:
31
- print(chunk, end="", flush=True)
32
- # Add final newline after streaming
32
+ print(chunk.content, end="", flush=True)
33
+ last_chunk = chunk
33
34
  print()
35
+
36
+ if show_usage and last_chunk:
37
+ print(
38
+ f"Input tokens: {last_chunk.in_token} "
39
+ f"Output tokens: {last_chunk.out_token} "
40
+ f"Model: {last_chunk.model}",
41
+ file=__import__('sys').stderr,
42
+ )
34
43
  else:
35
- # Non-streaming: print complete response
36
- print(response)
44
+ print(response.text)
45
+
46
+ if show_usage:
47
+ print(
48
+ f"Input tokens: {response.in_token} "
49
+ f"Output tokens: {response.out_token} "
50
+ f"Model: {response.model}",
51
+ file=__import__('sys').stderr,
52
+ )
37
53
 
38
54
  finally:
39
55
  # Clean up socket connection
@@ -82,6 +98,12 @@ def main():
82
98
  help='Disable streaming (default: streaming enabled)'
83
99
  )
84
100
 
101
+ parser.add_argument(
102
+ '--show-usage',
103
+ action='store_true',
104
+ help='Show token usage and model on stderr'
105
+ )
106
+
85
107
  args = parser.parse_args()
86
108
 
87
109
  try:
@@ -93,6 +115,7 @@ def main():
93
115
  prompt=args.prompt[0],
94
116
  streaming=not args.no_streaming,
95
117
  token=args.token,
118
+ show_usage=args.show_usage,
96
119
  )
97
120
 
98
121
  except Exception as e:
@@ -15,7 +15,8 @@ from trustgraph.api import Api
15
15
  default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/')
16
16
  default_token = os.getenv("TRUSTGRAPH_TOKEN", None)
17
17
 
18
- def query(url, flow_id, template_id, variables, streaming=True, token=None):
18
+ def query(url, flow_id, template_id, variables, streaming=True, token=None,
19
+ show_usage=False):
19
20
 
20
21
  # Create API client
21
22
  api = Api(url=url, token=token)
@@ -31,16 +32,30 @@ def query(url, flow_id, template_id, variables, streaming=True, token=None):
31
32
  )
32
33
 
33
34
  if streaming:
34
- # Stream output (prompt yields strings directly)
35
+ last_chunk = None
35
36
  for chunk in response:
36
- if chunk:
37
- print(chunk, end="", flush=True)
38
- # Add final newline after streaming
37
+ if chunk.content:
38
+ print(chunk.content, end="", flush=True)
39
+ last_chunk = chunk
39
40
  print()
40
41
 
42
+ if show_usage and last_chunk:
43
+ print(
44
+ f"Input tokens: {last_chunk.in_token} "
45
+ f"Output tokens: {last_chunk.out_token} "
46
+ f"Model: {last_chunk.model}",
47
+ file=__import__('sys').stderr,
48
+ )
41
49
  else:
42
- # Non-streaming: print complete response
43
- print(response)
50
+ print(response.text)
51
+
52
+ if show_usage:
53
+ print(
54
+ f"Input tokens: {response.in_token} "
55
+ f"Output tokens: {response.out_token} "
56
+ f"Model: {response.model}",
57
+ file=__import__('sys').stderr,
58
+ )
44
59
 
45
60
  finally:
46
61
  # Clean up socket connection
@@ -92,6 +107,12 @@ specified multiple times''',
92
107
  help='Disable streaming (default: streaming enabled for text responses)'
93
108
  )
94
109
 
110
+ parser.add_argument(
111
+ '--show-usage',
112
+ action='store_true',
113
+ help='Show token usage and model on stderr'
114
+ )
115
+
95
116
  args = parser.parse_args()
96
117
 
97
118
  variables = {}
@@ -113,6 +134,7 @@ specified multiple times''',
113
134
  variables=variables,
114
135
  streaming=not args.no_streaming,
115
136
  token=args.token,
137
+ show_usage=args.show_usage,
116
138
  )
117
139
 
118
140
  except Exception as e:
@@ -62,6 +62,11 @@ def sparql_query(url, token, flow_id, query, user, collection, limit,
62
62
  limit=limit,
63
63
  batch_size=batch_size,
64
64
  ):
65
+ if "error" in response:
66
+ err = response["error"]
67
+ msg = err.get("message", err) if isinstance(err, dict) else err
68
+ raise RuntimeError(msg)
69
+
65
70
  query_type = response.get("query-type", "select")
66
71
 
67
72
  # ASK queries - just print and return
@@ -0,0 +1 @@
1
+ __version__ = "2.3.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: trustgraph-cli
3
- Version: 2.2.26
3
+ Version: 2.3.0
4
4
  Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
5
5
  Author-email: "trustgraph.ai" <security@trustgraph.ai>
6
6
  Project-URL: Homepage, https://github.com/trustgraph-ai/trustgraph
@@ -8,7 +8,7 @@ Classifier: Programming Language :: Python :: 3
8
8
  Classifier: Operating System :: OS Independent
9
9
  Requires-Python: >=3.8
10
10
  Description-Content-Type: text/markdown
11
- Requires-Dist: trustgraph-base<2.3,>=2.2
11
+ Requires-Dist: trustgraph-base<2.4,>=2.3
12
12
  Requires-Dist: requests
13
13
  Requires-Dist: pulsar-client
14
14
  Requires-Dist: aiohttp
@@ -1,4 +1,4 @@
1
- trustgraph-base<2.3,>=2.2
1
+ trustgraph-base<2.4,>=2.3
2
2
  requests
3
3
  pulsar-client
4
4
  aiohttp
@@ -1 +0,0 @@
1
- __version__ = "2.2.26"