trustgraph-flow 0.23.1__tar.gz → 0.23.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (310) hide show
  1. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/PKG-INFO +2 -2
  2. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/config/service/flow.py +8 -0
  3. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/decoding/pdf/pdf_decoder.py +0 -1
  4. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/relationships/extract.py +1 -1
  5. trustgraph-flow-0.23.3/trustgraph/flow_version.py +1 -0
  6. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/metering/counter.py +47 -31
  7. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/azure/llm.py +19 -68
  8. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/azure_openai/llm.py +32 -91
  9. trustgraph-flow-0.23.3/trustgraph/model/text_completion/claude/llm.py +130 -0
  10. trustgraph-flow-0.23.3/trustgraph/model/text_completion/cohere/llm.py +113 -0
  11. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/googleaistudio/llm.py +19 -81
  12. trustgraph-flow-0.23.3/trustgraph/model/text_completion/llamafile/llm.py +125 -0
  13. trustgraph-flow-0.23.3/trustgraph/model/text_completion/lmstudio/llm.py +131 -0
  14. trustgraph-flow-0.23.3/trustgraph/model/text_completion/mistral/llm.py +142 -0
  15. trustgraph-flow-0.23.3/trustgraph/model/text_completion/ollama/llm.py +85 -0
  16. trustgraph-flow-0.23.3/trustgraph/model/text_completion/openai/llm.py +148 -0
  17. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph_flow.egg-info/PKG-INFO +2 -2
  18. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph_flow.egg-info/SOURCES.txt +0 -1
  19. trustgraph-flow-0.23.1/trustgraph/flow_version.py +0 -1
  20. trustgraph-flow-0.23.1/trustgraph/metering/pricelist.py +0 -104
  21. trustgraph-flow-0.23.1/trustgraph/model/text_completion/claude/llm.py +0 -195
  22. trustgraph-flow-0.23.1/trustgraph/model/text_completion/cohere/llm.py +0 -174
  23. trustgraph-flow-0.23.1/trustgraph/model/text_completion/llamafile/llm.py +0 -190
  24. trustgraph-flow-0.23.1/trustgraph/model/text_completion/lmstudio/llm.py +0 -193
  25. trustgraph-flow-0.23.1/trustgraph/model/text_completion/mistral/llm.py +0 -207
  26. trustgraph-flow-0.23.1/trustgraph/model/text_completion/ollama/llm.py +0 -150
  27. trustgraph-flow-0.23.1/trustgraph/model/text_completion/openai/llm.py +0 -210
  28. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/README.md +0 -0
  29. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/agent-manager-react +0 -0
  30. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/api-gateway +0 -0
  31. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/chunker-recursive +0 -0
  32. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/chunker-token +0 -0
  33. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/config-svc +0 -0
  34. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/de-query-milvus +0 -0
  35. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/de-query-pinecone +0 -0
  36. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/de-query-qdrant +0 -0
  37. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/de-write-milvus +0 -0
  38. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/de-write-pinecone +0 -0
  39. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/de-write-qdrant +0 -0
  40. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/document-embeddings +0 -0
  41. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/document-rag +0 -0
  42. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/embeddings-fastembed +0 -0
  43. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/embeddings-ollama +0 -0
  44. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/ge-query-milvus +0 -0
  45. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/ge-query-pinecone +0 -0
  46. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/ge-query-qdrant +0 -0
  47. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/ge-write-milvus +0 -0
  48. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/ge-write-pinecone +0 -0
  49. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/ge-write-qdrant +0 -0
  50. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/graph-embeddings +0 -0
  51. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/graph-rag +0 -0
  52. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/kg-extract-definitions +0 -0
  53. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/kg-extract-relationships +0 -0
  54. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/kg-extract-topics +0 -0
  55. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/librarian +0 -0
  56. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/metering +0 -0
  57. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/object-extract-row +0 -0
  58. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/oe-write-milvus +0 -0
  59. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/pdf-decoder +0 -0
  60. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/pdf-ocr-mistral +0 -0
  61. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/prompt-generic +0 -0
  62. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/prompt-template +0 -0
  63. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/rows-write-cassandra +0 -0
  64. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/run-processing +0 -0
  65. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-azure +0 -0
  66. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-azure-openai +0 -0
  67. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-claude +0 -0
  68. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-cohere +0 -0
  69. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-googleaistudio +0 -0
  70. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-llamafile +0 -0
  71. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-lmstudio +0 -0
  72. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-mistral +0 -0
  73. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-ollama +0 -0
  74. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/text-completion-openai +0 -0
  75. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/triples-query-cassandra +0 -0
  76. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/triples-query-falkordb +0 -0
  77. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/triples-query-memgraph +0 -0
  78. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/triples-query-neo4j +0 -0
  79. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/triples-write-cassandra +0 -0
  80. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/triples-write-falkordb +0 -0
  81. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/triples-write-memgraph +0 -0
  82. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/triples-write-neo4j +0 -0
  83. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/scripts/wikipedia-lookup +0 -0
  84. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/setup.cfg +0 -0
  85. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/setup.py +0 -0
  86. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/__init__.py +0 -0
  87. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/agent/__init__.py +0 -0
  88. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/agent/react/__init__.py +0 -0
  89. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/agent/react/__main__.py +0 -0
  90. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/agent/react/agent_manager.py +0 -0
  91. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/agent/react/service.py +0 -0
  92. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/agent/react/tools.py +0 -0
  93. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/agent/react/types.py +0 -0
  94. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/chunking/__init__.py +0 -0
  95. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/chunking/recursive/__init__.py +0 -0
  96. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/chunking/recursive/__main__.py +0 -0
  97. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/chunking/recursive/chunker.py +0 -0
  98. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/chunking/token/__init__.py +0 -0
  99. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/chunking/token/__main__.py +0 -0
  100. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/chunking/token/chunker.py +0 -0
  101. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/config/service/__init__.py +0 -0
  102. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/config/service/__main__.py +0 -0
  103. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/config/service/config.py +0 -0
  104. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/config/service/service.py +0 -0
  105. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/decoding/__init__.py +0 -0
  106. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/decoding/mistral_ocr/__init__.py +0 -0
  107. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/decoding/mistral_ocr/__main__.py +0 -0
  108. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/decoding/mistral_ocr/processor.py +0 -0
  109. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/decoding/pdf/__init__.py +0 -0
  110. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/decoding/pdf/__main__.py +0 -0
  111. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/direct/__init__.py +0 -0
  112. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/direct/cassandra.py +0 -0
  113. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/direct/milvus_doc_embeddings.py +0 -0
  114. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/direct/milvus_graph_embeddings.py +0 -0
  115. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/direct/milvus_object_embeddings.py +0 -0
  116. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/__init__.py +0 -0
  117. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/document_embeddings/__init__.py +0 -0
  118. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/document_embeddings/__main__.py +0 -0
  119. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/document_embeddings/embeddings.py +0 -0
  120. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/fastembed/__init__.py +0 -0
  121. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/fastembed/__main__.py +0 -0
  122. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/fastembed/processor.py +0 -0
  123. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/graph_embeddings/__init__.py +0 -0
  124. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/graph_embeddings/__main__.py +0 -0
  125. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/graph_embeddings/embeddings.py +0 -0
  126. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/ollama/__init__.py +0 -0
  127. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/ollama/__main__.py +0 -0
  128. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/embeddings/ollama/processor.py +0 -0
  129. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/external/__init__.py +0 -0
  130. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/external/wikipedia/__init__.py +0 -0
  131. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/external/wikipedia/__main__.py +0 -0
  132. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/external/wikipedia/service.py +0 -0
  133. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/__init__.py +0 -0
  134. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/__init__.py +0 -0
  135. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/definitions/__init__.py +0 -0
  136. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/definitions/__main__.py +0 -0
  137. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/definitions/extract.py +0 -0
  138. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/relationships/__init__.py +0 -0
  139. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/relationships/__main__.py +0 -0
  140. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/topics/__init__.py +0 -0
  141. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/topics/__main__.py +0 -0
  142. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/kg/topics/extract.py +0 -0
  143. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/object/__init__.py +0 -0
  144. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/object/row/__init__.py +0 -0
  145. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/object/row/__main__.py +0 -0
  146. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/extract/object/row/extract.py +0 -0
  147. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/__init__.py +0 -0
  148. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/__main__.py +0 -0
  149. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/agent.py +0 -0
  150. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/auth.py +0 -0
  151. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/config.py +0 -0
  152. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/dbpedia.py +0 -0
  153. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/document_embeddings_load.py +0 -0
  154. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/document_embeddings_stream.py +0 -0
  155. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/document_load.py +0 -0
  156. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/document_rag.py +0 -0
  157. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/embeddings.py +0 -0
  158. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/encyclopedia.py +0 -0
  159. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/endpoint.py +0 -0
  160. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/flow.py +0 -0
  161. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/graph_embeddings_load.py +0 -0
  162. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/graph_embeddings_query.py +0 -0
  163. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/graph_embeddings_stream.py +0 -0
  164. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/graph_rag.py +0 -0
  165. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/internet_search.py +0 -0
  166. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/librarian.py +0 -0
  167. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/metrics.py +0 -0
  168. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/mux.py +0 -0
  169. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/prompt.py +0 -0
  170. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/requestor.py +0 -0
  171. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/running.py +0 -0
  172. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/sender.py +0 -0
  173. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/serialize.py +0 -0
  174. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/service.py +0 -0
  175. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/socket.py +0 -0
  176. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/text_completion.py +0 -0
  177. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/text_load.py +0 -0
  178. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/triples_load.py +0 -0
  179. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/triples_query.py +0 -0
  180. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/gateway/triples_stream.py +0 -0
  181. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/librarian/__init__.py +0 -0
  182. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/librarian/__main__.py +0 -0
  183. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/librarian/blob_store.py +0 -0
  184. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/librarian/librarian.py +0 -0
  185. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/librarian/service.py +0 -0
  186. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/librarian/table_store.py +0 -0
  187. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/metering/__init__.py +0 -0
  188. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/metering/__main__.py +0 -0
  189. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/__init__.py +0 -0
  190. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/__init__.py +0 -0
  191. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/generic/__init__.py +0 -0
  192. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/generic/__main__.py +0 -0
  193. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/generic/prompts.py +0 -0
  194. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/generic/service.py +0 -0
  195. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/template/__init__.py +0 -0
  196. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/template/__main__.py +0 -0
  197. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/template/prompt_manager.py +0 -0
  198. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/prompt/template/service.py +0 -0
  199. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/__init__.py +0 -0
  200. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/azure/__init__.py +0 -0
  201. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/azure/__main__.py +0 -0
  202. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/azure_openai/__init__.py +0 -0
  203. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/azure_openai/__main__.py +0 -0
  204. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/claude/__init__.py +0 -0
  205. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/claude/__main__.py +0 -0
  206. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/cohere/__init__.py +0 -0
  207. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/cohere/__main__.py +0 -0
  208. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/googleaistudio/__init__.py +0 -0
  209. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/googleaistudio/__main__.py +0 -0
  210. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/llamafile/__init__.py +0 -0
  211. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/llamafile/__main__.py +0 -0
  212. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/lmstudio/__init__.py +0 -0
  213. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/lmstudio/__main__.py +0 -0
  214. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/mistral/__init__.py +0 -0
  215. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/mistral/__main__.py +0 -0
  216. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/ollama/__init__.py +0 -0
  217. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/ollama/__main__.py +0 -0
  218. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/openai/__init__.py +0 -0
  219. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/model/text_completion/openai/__main__.py +0 -0
  220. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/processing/__init__.py +0 -0
  221. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/processing/__main__.py +0 -0
  222. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/processing/processing.py +0 -0
  223. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/__init__.py +0 -0
  224. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/__init__.py +0 -0
  225. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/milvus/__init__.py +0 -0
  226. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/milvus/__main__.py +0 -0
  227. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/milvus/service.py +0 -0
  228. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/pinecone/__init__.py +0 -0
  229. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/pinecone/__main__.py +0 -0
  230. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/pinecone/service.py +0 -0
  231. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/qdrant/__init__.py +0 -0
  232. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/qdrant/__main__.py +0 -0
  233. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/doc_embeddings/qdrant/service.py +0 -0
  234. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/__init__.py +0 -0
  235. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/milvus/__init__.py +0 -0
  236. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/milvus/__main__.py +0 -0
  237. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/milvus/service.py +0 -0
  238. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/pinecone/__init__.py +0 -0
  239. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/pinecone/__main__.py +0 -0
  240. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/pinecone/service.py +0 -0
  241. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/qdrant/__init__.py +0 -0
  242. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/qdrant/__main__.py +0 -0
  243. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/graph_embeddings/qdrant/service.py +0 -0
  244. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/__init__.py +0 -0
  245. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/cassandra/__init__.py +0 -0
  246. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/cassandra/__main__.py +0 -0
  247. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/cassandra/service.py +0 -0
  248. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/falkordb/__init__.py +0 -0
  249. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/falkordb/__main__.py +0 -0
  250. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/falkordb/service.py +0 -0
  251. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/memgraph/__init__.py +0 -0
  252. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/memgraph/__main__.py +0 -0
  253. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/memgraph/service.py +0 -0
  254. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/neo4j/__init__.py +0 -0
  255. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/neo4j/__main__.py +0 -0
  256. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/query/triples/neo4j/service.py +0 -0
  257. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/__init__.py +0 -0
  258. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/document_rag/__init__.py +0 -0
  259. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/document_rag/__main__.py +0 -0
  260. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/document_rag/document_rag.py +0 -0
  261. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/document_rag/rag.py +0 -0
  262. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/graph_rag/__init__.py +0 -0
  263. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/graph_rag/__main__.py +0 -0
  264. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/graph_rag/graph_rag.py +0 -0
  265. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/retrieval/graph_rag/rag.py +0 -0
  266. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/__init__.py +0 -0
  267. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/__init__.py +0 -0
  268. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/milvus/__init__.py +0 -0
  269. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/milvus/__main__.py +0 -0
  270. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/milvus/write.py +0 -0
  271. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/pinecone/__init__.py +0 -0
  272. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/pinecone/__main__.py +0 -0
  273. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/pinecone/write.py +0 -0
  274. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/qdrant/__init__.py +0 -0
  275. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/qdrant/__main__.py +0 -0
  276. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/doc_embeddings/qdrant/write.py +0 -0
  277. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/__init__.py +0 -0
  278. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/milvus/__init__.py +0 -0
  279. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/milvus/__main__.py +0 -0
  280. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/milvus/write.py +0 -0
  281. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/pinecone/__init__.py +0 -0
  282. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/pinecone/__main__.py +0 -0
  283. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/pinecone/write.py +0 -0
  284. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/qdrant/__init__.py +0 -0
  285. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/qdrant/__main__.py +0 -0
  286. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/graph_embeddings/qdrant/write.py +0 -0
  287. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/object_embeddings/__init__.py +0 -0
  288. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/object_embeddings/milvus/__init__.py +0 -0
  289. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/object_embeddings/milvus/__main__.py +0 -0
  290. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/object_embeddings/milvus/write.py +0 -0
  291. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/rows/__init__.py +0 -0
  292. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/rows/cassandra/__init__.py +0 -0
  293. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/rows/cassandra/__main__.py +0 -0
  294. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/rows/cassandra/write.py +0 -0
  295. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/__init__.py +0 -0
  296. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/cassandra/__init__.py +0 -0
  297. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/cassandra/__main__.py +0 -0
  298. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/cassandra/write.py +0 -0
  299. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/falkordb/__init__.py +0 -0
  300. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/falkordb/__main__.py +0 -0
  301. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/falkordb/write.py +0 -0
  302. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/memgraph/__init__.py +0 -0
  303. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/memgraph/__main__.py +0 -0
  304. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/memgraph/write.py +0 -0
  305. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/neo4j/__init__.py +0 -0
  306. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/neo4j/__main__.py +0 -0
  307. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph/storage/triples/neo4j/write.py +0 -0
  308. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph_flow.egg-info/dependency_links.txt +0 -0
  309. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph_flow.egg-info/requires.txt +0 -0
  310. {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.3}/trustgraph_flow.egg-info/top_level.txt +0 -0
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: trustgraph-flow
3
- Version: 0.23.1
3
+ Version: 0.23.3
4
4
  Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
5
5
  Home-page: https://github.com/trustgraph-ai/trustgraph
6
- Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.23.1.tar.gz
6
+ Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.23.3.tar.gz
7
7
  Author: trustgraph.ai
8
8
  Author-email: security@trustgraph.ai
9
9
  Classifier: Programming Language :: Python :: 3
@@ -27,6 +27,8 @@ class FlowConfig:
27
27
 
28
28
  self.config["flow-classes"][msg.class_name] = msg.class_definition
29
29
 
30
+ self.config.version += 1
31
+
30
32
  await self.config.push()
31
33
 
32
34
  return FlowResponse(
@@ -39,6 +41,8 @@ class FlowConfig:
39
41
 
40
42
  del self.config["flow-classes"][msg.class_name]
41
43
 
44
+ self.config.version += 1
45
+
42
46
  await self.config.push()
43
47
 
44
48
  return FlowResponse(
@@ -135,6 +139,8 @@ class FlowConfig:
135
139
  "interfaces": interfaces,
136
140
  })
137
141
 
142
+ self.config.version += 1
143
+
138
144
  await self.config.push()
139
145
 
140
146
  return FlowResponse(
@@ -186,6 +192,8 @@ class FlowConfig:
186
192
  if msg.flow_id in self.config["flows"]:
187
193
  del self.config["flows"][msg.flow_id]
188
194
 
195
+ self.config.version += 1
196
+
189
197
  await self.config.push()
190
198
 
191
199
  return FlowResponse(
@@ -9,7 +9,6 @@ import base64
9
9
  from langchain_community.document_loaders import PyPDFLoader
10
10
 
11
11
  from ... schema import Document, TextDocument, Metadata
12
- from ... log_level import LogLevel
13
12
  from ... base import FlowProcessor, ConsumerSpec, ProducerSpec
14
13
 
15
14
  default_ident = "pdf-decoder"
@@ -173,7 +173,7 @@ class Processor(FlowProcessor):
173
173
  o=Value(value=v.metadata.id, is_uri=True)
174
174
  ))
175
175
 
176
- await self.emit_edges(
176
+ await self.emit_triples(
177
177
  flow("triples"),
178
178
  Metadata(
179
179
  id=v.metadata.id,
@@ -0,0 +1 @@
1
+ __version__ = "0.23.3"
@@ -3,22 +3,19 @@ Simple token counter for each LLM response.
3
3
  """
4
4
 
5
5
  from prometheus_client import Counter
6
- from . pricelist import price_list
6
+ import json
7
7
 
8
8
  from .. schema import TextCompletionResponse, Error
9
- from .. schema import text_completion_response_queue
10
- from .. log_level import LogLevel
11
- from .. base import Consumer
9
+ from .. base import FlowProcessor, ConsumerSpec
12
10
 
13
- module = "metering"
11
+ default_ident = "metering"
14
12
 
15
- default_input_queue = text_completion_response_queue
16
- default_subscriber = module
17
-
18
- class Processor(Consumer):
13
+ class Processor(FlowProcessor):
19
14
 
20
15
  def __init__(self, **params):
21
16
 
17
+ id = params.get("id", default_ident)
18
+
22
19
  if not hasattr(__class__, "input_token_metric"):
23
20
  __class__.input_token_metric = Counter(
24
21
  'input_tokens', 'Input token count'
@@ -39,40 +36,61 @@ class Processor(Consumer):
39
36
  'output_cost', 'Output cost'
40
37
  )
41
38
 
42
- input_queue = params.get("input_queue", default_input_queue)
43
- subscriber = params.get("subscriber", default_subscriber)
44
-
45
39
  super(Processor, self).__init__(
46
40
  **params | {
47
- "input_queue": input_queue,
48
- "subscriber": subscriber,
49
- "input_schema": TextCompletionResponse,
41
+ "id": id,
50
42
  }
51
43
  )
52
44
 
53
- def get_prices(self, prices, modelname):
54
- for model in prices["price_list"]:
55
- if model["model_name"] == modelname:
56
- return model["input_price"], model["output_price"]
57
- return None, None # Return None if model is not found
45
+ self.register_config_handler(self.on_cost_config)
46
+
47
+ self.register_specification(
48
+ ConsumerSpec(
49
+ name = "input",
50
+ schema = TextCompletionResponse,
51
+ handler = self.on_message,
52
+ )
53
+ )
58
54
 
59
- async def handle(self, msg):
55
+ self.prices = {}
60
56
 
61
- v = msg.value()
62
- modelname = v.model
57
+ self.config_key = "token-costs"
58
+
59
+ # Load token costs from the config service
60
+ async def on_cost_config(self, config, version):
63
61
 
64
- # Sender-produced ID
65
- id = msg.properties()["id"]
62
+ print("Loading configuration version", version)
66
63
 
67
- print(f"Handling response {id}...", flush=True)
64
+ if self.config_key not in config:
65
+ print(f"No key {self.config_key} in config", flush=True)
66
+ return
68
67
 
68
+ config = config[self.config_key]
69
+
70
+ self.prices = {
71
+ k: json.loads(v)
72
+ for k, v in config.items()
73
+ }
74
+
75
+ def get_prices(self, modelname):
76
+
77
+ if modelname in self.prices:
78
+ model = self.prices[modelname]
79
+ return model["input_price"], model["output_price"]
80
+ return None, None # Return None if model is not found
81
+
82
+ async def on_message(self, msg, consumer, flow):
83
+
84
+ v = msg.value()
85
+
86
+ modelname = v.model
69
87
  num_in = v.in_token
70
88
  num_out = v.out_token
71
89
 
72
90
  __class__.input_token_metric.inc(num_in)
73
91
  __class__.output_token_metric.inc(num_out)
74
92
 
75
- model_input_price, model_output_price = self.get_prices(price_list, modelname)
93
+ model_input_price, model_output_price = self.get_prices(modelname)
76
94
 
77
95
  if model_input_price == None:
78
96
  cost_per_call = f"Model Not Found in Price list"
@@ -91,10 +109,8 @@ class Processor(Consumer):
91
109
  @staticmethod
92
110
  def add_args(parser):
93
111
 
94
- Consumer.add_args(
95
- parser, default_input_queue, default_subscriber,
96
- )
112
+ FlowProcessor.add_args(parser)
97
113
 
98
114
  def run():
99
115
 
100
- Processor.launch(module, __doc__)
116
+ Processor.launch(default_ident, __doc__)
@@ -9,31 +9,21 @@ import json
9
9
  from prometheus_client import Histogram
10
10
  import os
11
11
 
12
- from .... schema import TextCompletionRequest, TextCompletionResponse, Error
13
- from .... schema import text_completion_request_queue
14
- from .... schema import text_completion_response_queue
15
- from .... log_level import LogLevel
16
- from .... base import ConsumerProducer
17
12
  from .... exceptions import TooManyRequests
13
+ from .... base import LlmService, LlmResult
18
14
 
19
- module = "text-completion"
15
+ default_ident = "text-completion"
20
16
 
21
- default_input_queue = text_completion_request_queue
22
- default_output_queue = text_completion_response_queue
23
- default_subscriber = module
24
17
  default_temperature = 0.0
25
18
  default_max_output = 4192
26
19
  default_model = "AzureAI"
27
20
  default_endpoint = os.getenv("AZURE_ENDPOINT")
28
21
  default_token = os.getenv("AZURE_TOKEN")
29
22
 
30
- class Processor(ConsumerProducer):
23
+ class Processor(LlmService):
31
24
 
32
25
  def __init__(self, **params):
33
26
 
34
- input_queue = params.get("input_queue", default_input_queue)
35
- output_queue = params.get("output_queue", default_output_queue)
36
- subscriber = params.get("subscriber", default_subscriber)
37
27
  endpoint = params.get("endpoint", default_endpoint)
38
28
  token = params.get("token", default_token)
39
29
  temperature = params.get("temperature", default_temperature)
@@ -48,30 +38,13 @@ class Processor(ConsumerProducer):
48
38
 
49
39
  super(Processor, self).__init__(
50
40
  **params | {
51
- "input_queue": input_queue,
52
- "output_queue": output_queue,
53
- "subscriber": subscriber,
54
- "input_schema": TextCompletionRequest,
55
- "output_schema": TextCompletionResponse,
41
+ "endpoint": endpoint,
56
42
  "temperature": temperature,
57
43
  "max_output": max_output,
58
44
  "model": model,
59
45
  }
60
46
  )
61
47
 
62
- if not hasattr(__class__, "text_completion_metric"):
63
- __class__.text_completion_metric = Histogram(
64
- 'text_completion_duration',
65
- 'Text completion duration (seconds)',
66
- buckets=[
67
- 0.25, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
68
- 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
69
- 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,
70
- 30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 80.0, 100.0,
71
- 120.0
72
- ]
73
- )
74
-
75
48
  self.endpoint = endpoint
76
49
  self.token = token
77
50
  self.temperature = temperature
@@ -123,25 +96,16 @@ class Processor(ConsumerProducer):
123
96
 
124
97
  return result
125
98
 
126
- async def handle(self, msg):
127
-
128
- v = msg.value()
129
-
130
- # Sender-produced ID
131
-
132
- id = msg.properties()["id"]
133
-
134
- print(f"Handling prompt {id}...", flush=True)
99
+ async def generate_content(self, system, prompt):
135
100
 
136
101
  try:
137
102
 
138
103
  prompt = self.build_prompt(
139
- v.system,
140
- v.prompt
104
+ system,
105
+ prompt
141
106
  )
142
107
 
143
- with __class__.text_completion_metric.time():
144
- response = self.call_llm(prompt)
108
+ response = self.call_llm(prompt)
145
109
 
146
110
  resp = response['choices'][0]['message']['content']
147
111
  inputtokens = response['usage']['prompt_tokens']
@@ -153,8 +117,14 @@ class Processor(ConsumerProducer):
153
117
 
154
118
  print("Send response...", flush=True)
155
119
 
156
- r = TextCompletionResponse(response=resp, error=None, in_token=inputtokens, out_token=outputtokens, model=self.model)
157
- await self.send(r, properties={"id": id})
120
+ resp = LlmResult(
121
+ text = resp,
122
+ in_token = inputtokens,
123
+ out_token = outputtokens,
124
+ model = self.model
125
+ )
126
+
127
+ return resp
158
128
 
159
129
  except TooManyRequests:
160
130
 
@@ -168,33 +138,14 @@ class Processor(ConsumerProducer):
168
138
  # Apart from rate limits, treat all exceptions as unrecoverable
169
139
 
170
140
  print(f"Exception: {e}")
171
-
172
- print("Send error response...", flush=True)
173
-
174
- r = TextCompletionResponse(
175
- error=Error(
176
- type = "llm-error",
177
- message = str(e),
178
- ),
179
- response=None,
180
- in_token=None,
181
- out_token=None,
182
- model=None,
183
- )
184
-
185
- await self.send(r, properties={"id": id})
186
-
187
- self.consumer.acknowledge(msg)
141
+ raise e
188
142
 
189
143
  print("Done.", flush=True)
190
144
 
191
145
  @staticmethod
192
146
  def add_args(parser):
193
147
 
194
- ConsumerProducer.add_args(
195
- parser, default_input_queue, default_subscriber,
196
- default_output_queue,
197
- )
148
+ LlmService.add_args(parser)
198
149
 
199
150
  parser.add_argument(
200
151
  '-e', '--endpoint',
@@ -224,4 +175,4 @@ class Processor(ConsumerProducer):
224
175
 
225
176
  def run():
226
177
 
227
- Processor.launch(module, __doc__)
178
+ Processor.launch(default_ident, __doc__)
@@ -9,18 +9,11 @@ from prometheus_client import Histogram
9
9
  from openai import AzureOpenAI, RateLimitError
10
10
  import os
11
11
 
12
- from .... schema import TextCompletionRequest, TextCompletionResponse, Error
13
- from .... schema import text_completion_request_queue
14
- from .... schema import text_completion_response_queue
15
- from .... log_level import LogLevel
16
- from .... base import ConsumerProducer
17
12
  from .... exceptions import TooManyRequests
13
+ from .... base import LlmService, LlmResult
18
14
 
19
- module = "text-completion"
15
+ default_ident = "text-completion"
20
16
 
21
- default_input_queue = text_completion_request_queue
22
- default_output_queue = text_completion_response_queue
23
- default_subscriber = module
24
17
  default_temperature = 0.0
25
18
  default_max_output = 4192
26
19
  default_api = "2024-12-01-preview"
@@ -28,13 +21,10 @@ default_endpoint = os.getenv("AZURE_ENDPOINT", None)
28
21
  default_token = os.getenv("AZURE_TOKEN", None)
29
22
  default_model = os.getenv("AZURE_MODEL", None)
30
23
 
31
- class Processor(ConsumerProducer):
24
+ class Processor(LlmService):
32
25
 
33
26
  def __init__(self, **params):
34
27
 
35
- input_queue = params.get("input_queue", default_input_queue)
36
- output_queue = params.get("output_queue", default_output_queue)
37
- subscriber = params.get("subscriber", default_subscriber)
38
28
  temperature = params.get("temperature", default_temperature)
39
29
  max_output = params.get("max_output", default_max_output)
40
30
 
@@ -51,11 +41,6 @@ class Processor(ConsumerProducer):
51
41
 
52
42
  super(Processor, self).__init__(
53
43
  **params | {
54
- "input_queue": input_queue,
55
- "output_queue": output_queue,
56
- "subscriber": subscriber,
57
- "input_schema": TextCompletionRequest,
58
- "output_schema": TextCompletionResponse,
59
44
  "temperature": temperature,
60
45
  "max_output": max_output,
61
46
  "model": model,
@@ -63,19 +48,6 @@ class Processor(ConsumerProducer):
63
48
  }
64
49
  )
65
50
 
66
- if not hasattr(__class__, "text_completion_metric"):
67
- __class__.text_completion_metric = Histogram(
68
- 'text_completion_duration',
69
- 'Text completion duration (seconds)',
70
- buckets=[
71
- 0.25, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
72
- 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
73
- 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,
74
- 30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 80.0, 100.0,
75
- 120.0
76
- ]
77
- )
78
-
79
51
  self.temperature = temperature
80
52
  self.max_output = max_output
81
53
  self.model = model
@@ -84,41 +56,31 @@ class Processor(ConsumerProducer):
84
56
  api_key=token,
85
57
  api_version=api,
86
58
  azure_endpoint = endpoint,
87
- )
88
-
89
- async def handle(self, msg):
90
-
91
- v = msg.value()
92
-
93
- # Sender-produced ID
94
-
95
- id = msg.properties()["id"]
96
-
97
- print(f"Handling prompt {id}...", flush=True)
59
+ )
98
60
 
99
- prompt = v.system + "\n\n" + v.prompt
61
+ async def generate_content(self, system, prompt):
100
62
 
63
+ prompt = system + "\n\n" + prompt
101
64
 
102
65
  try:
103
66
 
104
- with __class__.text_completion_metric.time():
105
- resp = self.openai.chat.completions.create(
106
- model=self.model,
107
- messages=[
108
- {
109
- "role": "user",
110
- "content": [
111
- {
112
- "type": "text",
113
- "text": prompt
114
- }
115
- ]
116
- }
117
- ],
118
- temperature=self.temperature,
119
- max_tokens=self.max_output,
120
- top_p=1,
121
- )
67
+ resp = self.openai.chat.completions.create(
68
+ model=self.model,
69
+ messages=[
70
+ {
71
+ "role": "user",
72
+ "content": [
73
+ {
74
+ "type": "text",
75
+ "text": prompt
76
+ }
77
+ ]
78
+ }
79
+ ],
80
+ temperature=self.temperature,
81
+ max_tokens=self.max_output,
82
+ top_p=1,
83
+ )
122
84
 
123
85
  inputtokens = resp.usage.prompt_tokens
124
86
  outputtokens = resp.usage.completion_tokens
@@ -127,15 +89,14 @@ class Processor(ConsumerProducer):
127
89
  print(f"Output Tokens: {outputtokens}", flush=True)
128
90
  print("Send response...", flush=True)
129
91
 
130
- r = TextCompletionResponse(
131
- response=resp.choices[0].message.content,
132
- error=None,
133
- in_token=inputtokens,
134
- out_token=outputtokens,
135
- model=self.model
92
+ r = LlmResult(
93
+ text = resp.choices[0].message.content,
94
+ in_token = inputtokens,
95
+ out_token = outputtokens,
96
+ model = self.model
136
97
  )
137
98
 
138
- await self.send(r, properties={"id": id})
99
+ return r
139
100
 
140
101
  except RateLimitError:
141
102
 
@@ -147,35 +108,15 @@ class Processor(ConsumerProducer):
147
108
  except Exception as e:
148
109
 
149
110
  # Apart from rate limits, treat all exceptions as unrecoverable
150
-
151
111
  print(f"Exception: {e}")
152
-
153
- print("Send error response...", flush=True)
154
-
155
- r = TextCompletionResponse(
156
- error=Error(
157
- type = "llm-error",
158
- message = str(e),
159
- ),
160
- response=None,
161
- in_token=None,
162
- out_token=None,
163
- model=None,
164
- )
165
-
166
- await self.send(r, properties={"id": id})
167
-
168
- self.consumer.acknowledge(msg)
112
+ raise e
169
113
 
170
114
  print("Done.", flush=True)
171
115
 
172
116
  @staticmethod
173
117
  def add_args(parser):
174
118
 
175
- ConsumerProducer.add_args(
176
- parser, default_input_queue, default_subscriber,
177
- default_output_queue,
178
- )
119
+ LlmService.add_args(parser)
179
120
 
180
121
  parser.add_argument(
181
122
  '-e', '--endpoint',
@@ -217,4 +158,4 @@ class Processor(ConsumerProducer):
217
158
 
218
159
  def run():
219
160
 
220
- Processor.launch(module, __doc__)
161
+ Processor.launch(default_ident, __doc__)
@@ -0,0 +1,130 @@
1
+
2
+ """
3
+ Simple LLM service, performs text prompt completion using Claude.
4
+ Input is prompt, output is response.
5
+ """
6
+
7
+ import anthropic
8
+ import os
9
+
10
+ from .... exceptions import TooManyRequests
11
+ from .... base import LlmService, LlmResult
12
+
13
+ default_ident = "text-completion"
14
+
15
+ default_model = 'claude-3-5-sonnet-20240620'
16
+ default_temperature = 0.0
17
+ default_max_output = 8192
18
+ default_api_key = os.getenv("CLAUDE_KEY")
19
+
20
+ class Processor(LlmService):
21
+
22
+ def __init__(self, **params):
23
+
24
+ model = params.get("model", default_model)
25
+ api_key = params.get("api_key", default_api_key)
26
+ temperature = params.get("temperature", default_temperature)
27
+ max_output = params.get("max_output", default_max_output)
28
+
29
+ if api_key is None:
30
+ raise RuntimeError("Claude API key not specified")
31
+
32
+ super(Processor, self).__init__(
33
+ **params | {
34
+ "model": model,
35
+ "temperature": temperature,
36
+ "max_output": max_output,
37
+ }
38
+ )
39
+
40
+ self.model = model
41
+ self.claude = anthropic.Anthropic(api_key=api_key)
42
+ self.temperature = temperature
43
+ self.max_output = max_output
44
+
45
+ print("Initialised", flush=True)
46
+
47
+ async def generate_content(self, system, prompt):
48
+
49
+ try:
50
+
51
+ response = message = self.claude.messages.create(
52
+ model=self.model,
53
+ max_tokens=self.max_output,
54
+ temperature=self.temperature,
55
+ system = system,
56
+ messages=[
57
+ {
58
+ "role": "user",
59
+ "content": [
60
+ {
61
+ "type": "text",
62
+ "text": prompt
63
+ }
64
+ ]
65
+ }
66
+ ]
67
+ )
68
+
69
+ resp = response.content[0].text
70
+ inputtokens = response.usage.input_tokens
71
+ outputtokens = response.usage.output_tokens
72
+ print(resp, flush=True)
73
+ print(f"Input Tokens: {inputtokens}", flush=True)
74
+ print(f"Output Tokens: {outputtokens}", flush=True)
75
+
76
+ resp = LlmResult(
77
+ text = resp,
78
+ in_token = inputtokens,
79
+ out_token = outputtokens,
80
+ model = self.model
81
+ )
82
+
83
+ return resp
84
+
85
+ except anthropic.RateLimitError:
86
+
87
+ # Leave rate limit retries to the base handler
88
+ raise TooManyRequests()
89
+
90
+ except Exception as e:
91
+
92
+ # Apart from rate limits, treat all exceptions as unrecoverable
93
+
94
+ print(f"Exception: {e}")
95
+ raise e
96
+
97
+ @staticmethod
98
+ def add_args(parser):
99
+
100
+ LlmService.add_args(parser)
101
+
102
+ parser.add_argument(
103
+ '-m', '--model',
104
+ default="claude-3-5-sonnet-20240620",
105
+ help=f'LLM model (default: claude-3-5-sonnet-20240620)'
106
+ )
107
+
108
+ parser.add_argument(
109
+ '-k', '--api-key',
110
+ default=default_api_key,
111
+ help=f'Claude API key'
112
+ )
113
+
114
+ parser.add_argument(
115
+ '-t', '--temperature',
116
+ type=float,
117
+ default=default_temperature,
118
+ help=f'LLM temperature parameter (default: {default_temperature})'
119
+ )
120
+
121
+ parser.add_argument(
122
+ '-x', '--max-output',
123
+ type=int,
124
+ default=default_max_output,
125
+ help=f'LLM max output tokens (default: {default_max_output})'
126
+ )
127
+
128
+ def run():
129
+
130
+ Processor.launch(default_ident, __doc__)