trustgraph-flow 0.23.1__tar.gz → 0.23.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/PKG-INFO +2 -2
- trustgraph-flow-0.23.2/trustgraph/flow_version.py +1 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/azure/llm.py +19 -68
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/azure_openai/llm.py +32 -91
- trustgraph-flow-0.23.2/trustgraph/model/text_completion/claude/llm.py +130 -0
- trustgraph-flow-0.23.2/trustgraph/model/text_completion/cohere/llm.py +113 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/googleaistudio/llm.py +19 -81
- trustgraph-flow-0.23.2/trustgraph/model/text_completion/llamafile/llm.py +125 -0
- trustgraph-flow-0.23.2/trustgraph/model/text_completion/lmstudio/llm.py +131 -0
- trustgraph-flow-0.23.2/trustgraph/model/text_completion/mistral/llm.py +142 -0
- trustgraph-flow-0.23.2/trustgraph/model/text_completion/ollama/llm.py +85 -0
- trustgraph-flow-0.23.2/trustgraph/model/text_completion/openai/llm.py +148 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph_flow.egg-info/PKG-INFO +2 -2
- trustgraph-flow-0.23.1/trustgraph/flow_version.py +0 -1
- trustgraph-flow-0.23.1/trustgraph/model/text_completion/claude/llm.py +0 -195
- trustgraph-flow-0.23.1/trustgraph/model/text_completion/cohere/llm.py +0 -174
- trustgraph-flow-0.23.1/trustgraph/model/text_completion/llamafile/llm.py +0 -190
- trustgraph-flow-0.23.1/trustgraph/model/text_completion/lmstudio/llm.py +0 -193
- trustgraph-flow-0.23.1/trustgraph/model/text_completion/mistral/llm.py +0 -207
- trustgraph-flow-0.23.1/trustgraph/model/text_completion/ollama/llm.py +0 -150
- trustgraph-flow-0.23.1/trustgraph/model/text_completion/openai/llm.py +0 -210
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/README.md +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/agent-manager-react +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/api-gateway +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/chunker-recursive +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/chunker-token +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/config-svc +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/de-query-milvus +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/de-query-pinecone +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/de-query-qdrant +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/de-write-milvus +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/de-write-pinecone +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/de-write-qdrant +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/document-embeddings +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/document-rag +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/embeddings-fastembed +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/embeddings-ollama +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/ge-query-milvus +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/ge-query-pinecone +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/ge-query-qdrant +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/ge-write-milvus +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/ge-write-pinecone +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/ge-write-qdrant +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/graph-embeddings +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/graph-rag +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/kg-extract-definitions +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/kg-extract-relationships +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/kg-extract-topics +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/librarian +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/metering +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/object-extract-row +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/oe-write-milvus +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/pdf-decoder +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/pdf-ocr-mistral +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/prompt-generic +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/prompt-template +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/rows-write-cassandra +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/run-processing +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-azure +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-azure-openai +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-claude +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-cohere +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-googleaistudio +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-llamafile +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-lmstudio +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-mistral +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-ollama +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/text-completion-openai +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/triples-query-cassandra +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/triples-query-falkordb +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/triples-query-memgraph +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/triples-query-neo4j +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/triples-write-cassandra +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/triples-write-falkordb +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/triples-write-memgraph +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/triples-write-neo4j +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/scripts/wikipedia-lookup +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/setup.cfg +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/setup.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/agent/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/agent/react/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/agent/react/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/agent/react/agent_manager.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/agent/react/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/agent/react/tools.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/agent/react/types.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/chunking/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/chunking/recursive/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/chunking/recursive/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/chunking/recursive/chunker.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/chunking/token/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/chunking/token/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/chunking/token/chunker.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/config/service/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/config/service/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/config/service/config.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/config/service/flow.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/config/service/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/decoding/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/decoding/mistral_ocr/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/decoding/mistral_ocr/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/decoding/mistral_ocr/processor.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/decoding/pdf/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/decoding/pdf/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/decoding/pdf/pdf_decoder.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/direct/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/direct/cassandra.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/direct/milvus_doc_embeddings.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/direct/milvus_graph_embeddings.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/direct/milvus_object_embeddings.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/document_embeddings/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/document_embeddings/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/document_embeddings/embeddings.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/fastembed/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/fastembed/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/fastembed/processor.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/graph_embeddings/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/graph_embeddings/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/graph_embeddings/embeddings.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/ollama/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/ollama/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/embeddings/ollama/processor.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/external/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/external/wikipedia/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/external/wikipedia/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/external/wikipedia/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/definitions/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/definitions/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/definitions/extract.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/relationships/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/relationships/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/relationships/extract.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/topics/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/topics/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/kg/topics/extract.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/object/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/object/row/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/object/row/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/extract/object/row/extract.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/agent.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/auth.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/config.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/dbpedia.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/document_embeddings_load.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/document_embeddings_stream.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/document_load.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/document_rag.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/embeddings.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/encyclopedia.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/endpoint.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/flow.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/graph_embeddings_load.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/graph_embeddings_query.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/graph_embeddings_stream.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/graph_rag.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/internet_search.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/librarian.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/metrics.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/mux.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/prompt.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/requestor.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/running.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/sender.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/serialize.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/socket.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/text_completion.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/text_load.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/triples_load.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/triples_query.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/gateway/triples_stream.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/librarian/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/librarian/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/librarian/blob_store.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/librarian/librarian.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/librarian/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/librarian/table_store.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/metering/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/metering/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/metering/counter.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/metering/pricelist.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/generic/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/generic/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/generic/prompts.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/generic/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/template/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/template/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/template/prompt_manager.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/prompt/template/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/azure/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/azure/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/azure_openai/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/azure_openai/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/claude/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/claude/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/cohere/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/cohere/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/googleaistudio/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/googleaistudio/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/llamafile/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/llamafile/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/lmstudio/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/lmstudio/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/mistral/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/mistral/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/ollama/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/ollama/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/openai/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/openai/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/processing/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/processing/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/processing/processing.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/milvus/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/milvus/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/milvus/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/pinecone/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/pinecone/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/pinecone/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/qdrant/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/qdrant/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/doc_embeddings/qdrant/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/milvus/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/milvus/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/milvus/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/pinecone/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/pinecone/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/pinecone/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/qdrant/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/qdrant/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/graph_embeddings/qdrant/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/cassandra/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/cassandra/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/cassandra/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/falkordb/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/falkordb/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/falkordb/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/memgraph/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/memgraph/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/memgraph/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/neo4j/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/neo4j/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/query/triples/neo4j/service.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/document_rag/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/document_rag/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/document_rag/document_rag.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/document_rag/rag.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/graph_rag/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/graph_rag/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/graph_rag/graph_rag.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/retrieval/graph_rag/rag.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/milvus/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/milvus/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/milvus/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/pinecone/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/pinecone/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/pinecone/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/qdrant/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/qdrant/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/doc_embeddings/qdrant/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/milvus/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/milvus/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/milvus/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/pinecone/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/pinecone/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/pinecone/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/qdrant/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/qdrant/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/graph_embeddings/qdrant/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/object_embeddings/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/object_embeddings/milvus/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/object_embeddings/milvus/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/object_embeddings/milvus/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/rows/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/rows/cassandra/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/rows/cassandra/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/rows/cassandra/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/cassandra/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/cassandra/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/cassandra/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/falkordb/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/falkordb/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/falkordb/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/memgraph/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/memgraph/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/memgraph/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/neo4j/__init__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/neo4j/__main__.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/storage/triples/neo4j/write.py +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph_flow.egg-info/SOURCES.txt +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph_flow.egg-info/dependency_links.txt +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph_flow.egg-info/requires.txt +0 -0
- {trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph_flow.egg-info/top_level.txt +0 -0
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: trustgraph-flow
|
3
|
-
Version: 0.23.
|
3
|
+
Version: 0.23.2
|
4
4
|
Summary: TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.
|
5
5
|
Home-page: https://github.com/trustgraph-ai/trustgraph
|
6
|
-
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.23.
|
6
|
+
Download-URL: https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v0.23.2.tar.gz
|
7
7
|
Author: trustgraph.ai
|
8
8
|
Author-email: security@trustgraph.ai
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "0.23.2"
|
{trustgraph-flow-0.23.1 → trustgraph-flow-0.23.2}/trustgraph/model/text_completion/azure/llm.py
RENAMED
@@ -9,31 +9,21 @@ import json
|
|
9
9
|
from prometheus_client import Histogram
|
10
10
|
import os
|
11
11
|
|
12
|
-
from .... schema import TextCompletionRequest, TextCompletionResponse, Error
|
13
|
-
from .... schema import text_completion_request_queue
|
14
|
-
from .... schema import text_completion_response_queue
|
15
|
-
from .... log_level import LogLevel
|
16
|
-
from .... base import ConsumerProducer
|
17
12
|
from .... exceptions import TooManyRequests
|
13
|
+
from .... base import LlmService, LlmResult
|
18
14
|
|
19
|
-
|
15
|
+
default_ident = "text-completion"
|
20
16
|
|
21
|
-
default_input_queue = text_completion_request_queue
|
22
|
-
default_output_queue = text_completion_response_queue
|
23
|
-
default_subscriber = module
|
24
17
|
default_temperature = 0.0
|
25
18
|
default_max_output = 4192
|
26
19
|
default_model = "AzureAI"
|
27
20
|
default_endpoint = os.getenv("AZURE_ENDPOINT")
|
28
21
|
default_token = os.getenv("AZURE_TOKEN")
|
29
22
|
|
30
|
-
class Processor(
|
23
|
+
class Processor(LlmService):
|
31
24
|
|
32
25
|
def __init__(self, **params):
|
33
26
|
|
34
|
-
input_queue = params.get("input_queue", default_input_queue)
|
35
|
-
output_queue = params.get("output_queue", default_output_queue)
|
36
|
-
subscriber = params.get("subscriber", default_subscriber)
|
37
27
|
endpoint = params.get("endpoint", default_endpoint)
|
38
28
|
token = params.get("token", default_token)
|
39
29
|
temperature = params.get("temperature", default_temperature)
|
@@ -48,30 +38,13 @@ class Processor(ConsumerProducer):
|
|
48
38
|
|
49
39
|
super(Processor, self).__init__(
|
50
40
|
**params | {
|
51
|
-
"
|
52
|
-
"output_queue": output_queue,
|
53
|
-
"subscriber": subscriber,
|
54
|
-
"input_schema": TextCompletionRequest,
|
55
|
-
"output_schema": TextCompletionResponse,
|
41
|
+
"endpoint": endpoint,
|
56
42
|
"temperature": temperature,
|
57
43
|
"max_output": max_output,
|
58
44
|
"model": model,
|
59
45
|
}
|
60
46
|
)
|
61
47
|
|
62
|
-
if not hasattr(__class__, "text_completion_metric"):
|
63
|
-
__class__.text_completion_metric = Histogram(
|
64
|
-
'text_completion_duration',
|
65
|
-
'Text completion duration (seconds)',
|
66
|
-
buckets=[
|
67
|
-
0.25, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
|
68
|
-
8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
|
69
|
-
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,
|
70
|
-
30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 80.0, 100.0,
|
71
|
-
120.0
|
72
|
-
]
|
73
|
-
)
|
74
|
-
|
75
48
|
self.endpoint = endpoint
|
76
49
|
self.token = token
|
77
50
|
self.temperature = temperature
|
@@ -123,25 +96,16 @@ class Processor(ConsumerProducer):
|
|
123
96
|
|
124
97
|
return result
|
125
98
|
|
126
|
-
async def
|
127
|
-
|
128
|
-
v = msg.value()
|
129
|
-
|
130
|
-
# Sender-produced ID
|
131
|
-
|
132
|
-
id = msg.properties()["id"]
|
133
|
-
|
134
|
-
print(f"Handling prompt {id}...", flush=True)
|
99
|
+
async def generate_content(self, system, prompt):
|
135
100
|
|
136
101
|
try:
|
137
102
|
|
138
103
|
prompt = self.build_prompt(
|
139
|
-
|
140
|
-
|
104
|
+
system,
|
105
|
+
prompt
|
141
106
|
)
|
142
107
|
|
143
|
-
|
144
|
-
response = self.call_llm(prompt)
|
108
|
+
response = self.call_llm(prompt)
|
145
109
|
|
146
110
|
resp = response['choices'][0]['message']['content']
|
147
111
|
inputtokens = response['usage']['prompt_tokens']
|
@@ -153,8 +117,14 @@ class Processor(ConsumerProducer):
|
|
153
117
|
|
154
118
|
print("Send response...", flush=True)
|
155
119
|
|
156
|
-
|
157
|
-
|
120
|
+
resp = LlmResult(
|
121
|
+
text = resp,
|
122
|
+
in_token = inputtokens,
|
123
|
+
out_token = outputtokens,
|
124
|
+
model = self.model
|
125
|
+
)
|
126
|
+
|
127
|
+
return resp
|
158
128
|
|
159
129
|
except TooManyRequests:
|
160
130
|
|
@@ -168,33 +138,14 @@ class Processor(ConsumerProducer):
|
|
168
138
|
# Apart from rate limits, treat all exceptions as unrecoverable
|
169
139
|
|
170
140
|
print(f"Exception: {e}")
|
171
|
-
|
172
|
-
print("Send error response...", flush=True)
|
173
|
-
|
174
|
-
r = TextCompletionResponse(
|
175
|
-
error=Error(
|
176
|
-
type = "llm-error",
|
177
|
-
message = str(e),
|
178
|
-
),
|
179
|
-
response=None,
|
180
|
-
in_token=None,
|
181
|
-
out_token=None,
|
182
|
-
model=None,
|
183
|
-
)
|
184
|
-
|
185
|
-
await self.send(r, properties={"id": id})
|
186
|
-
|
187
|
-
self.consumer.acknowledge(msg)
|
141
|
+
raise e
|
188
142
|
|
189
143
|
print("Done.", flush=True)
|
190
144
|
|
191
145
|
@staticmethod
|
192
146
|
def add_args(parser):
|
193
147
|
|
194
|
-
|
195
|
-
parser, default_input_queue, default_subscriber,
|
196
|
-
default_output_queue,
|
197
|
-
)
|
148
|
+
LlmService.add_args(parser)
|
198
149
|
|
199
150
|
parser.add_argument(
|
200
151
|
'-e', '--endpoint',
|
@@ -224,4 +175,4 @@ class Processor(ConsumerProducer):
|
|
224
175
|
|
225
176
|
def run():
|
226
177
|
|
227
|
-
Processor.launch(
|
178
|
+
Processor.launch(default_ident, __doc__)
|
@@ -9,18 +9,11 @@ from prometheus_client import Histogram
|
|
9
9
|
from openai import AzureOpenAI, RateLimitError
|
10
10
|
import os
|
11
11
|
|
12
|
-
from .... schema import TextCompletionRequest, TextCompletionResponse, Error
|
13
|
-
from .... schema import text_completion_request_queue
|
14
|
-
from .... schema import text_completion_response_queue
|
15
|
-
from .... log_level import LogLevel
|
16
|
-
from .... base import ConsumerProducer
|
17
12
|
from .... exceptions import TooManyRequests
|
13
|
+
from .... base import LlmService, LlmResult
|
18
14
|
|
19
|
-
|
15
|
+
default_ident = "text-completion"
|
20
16
|
|
21
|
-
default_input_queue = text_completion_request_queue
|
22
|
-
default_output_queue = text_completion_response_queue
|
23
|
-
default_subscriber = module
|
24
17
|
default_temperature = 0.0
|
25
18
|
default_max_output = 4192
|
26
19
|
default_api = "2024-12-01-preview"
|
@@ -28,13 +21,10 @@ default_endpoint = os.getenv("AZURE_ENDPOINT", None)
|
|
28
21
|
default_token = os.getenv("AZURE_TOKEN", None)
|
29
22
|
default_model = os.getenv("AZURE_MODEL", None)
|
30
23
|
|
31
|
-
class Processor(
|
24
|
+
class Processor(LlmService):
|
32
25
|
|
33
26
|
def __init__(self, **params):
|
34
27
|
|
35
|
-
input_queue = params.get("input_queue", default_input_queue)
|
36
|
-
output_queue = params.get("output_queue", default_output_queue)
|
37
|
-
subscriber = params.get("subscriber", default_subscriber)
|
38
28
|
temperature = params.get("temperature", default_temperature)
|
39
29
|
max_output = params.get("max_output", default_max_output)
|
40
30
|
|
@@ -51,11 +41,6 @@ class Processor(ConsumerProducer):
|
|
51
41
|
|
52
42
|
super(Processor, self).__init__(
|
53
43
|
**params | {
|
54
|
-
"input_queue": input_queue,
|
55
|
-
"output_queue": output_queue,
|
56
|
-
"subscriber": subscriber,
|
57
|
-
"input_schema": TextCompletionRequest,
|
58
|
-
"output_schema": TextCompletionResponse,
|
59
44
|
"temperature": temperature,
|
60
45
|
"max_output": max_output,
|
61
46
|
"model": model,
|
@@ -63,19 +48,6 @@ class Processor(ConsumerProducer):
|
|
63
48
|
}
|
64
49
|
)
|
65
50
|
|
66
|
-
if not hasattr(__class__, "text_completion_metric"):
|
67
|
-
__class__.text_completion_metric = Histogram(
|
68
|
-
'text_completion_duration',
|
69
|
-
'Text completion duration (seconds)',
|
70
|
-
buckets=[
|
71
|
-
0.25, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
|
72
|
-
8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
|
73
|
-
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,
|
74
|
-
30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 80.0, 100.0,
|
75
|
-
120.0
|
76
|
-
]
|
77
|
-
)
|
78
|
-
|
79
51
|
self.temperature = temperature
|
80
52
|
self.max_output = max_output
|
81
53
|
self.model = model
|
@@ -84,41 +56,31 @@ class Processor(ConsumerProducer):
|
|
84
56
|
api_key=token,
|
85
57
|
api_version=api,
|
86
58
|
azure_endpoint = endpoint,
|
87
|
-
|
88
|
-
|
89
|
-
async def handle(self, msg):
|
90
|
-
|
91
|
-
v = msg.value()
|
92
|
-
|
93
|
-
# Sender-produced ID
|
94
|
-
|
95
|
-
id = msg.properties()["id"]
|
96
|
-
|
97
|
-
print(f"Handling prompt {id}...", flush=True)
|
59
|
+
)
|
98
60
|
|
99
|
-
|
61
|
+
async def generate_content(self, system, prompt):
|
100
62
|
|
63
|
+
prompt = system + "\n\n" + prompt
|
101
64
|
|
102
65
|
try:
|
103
66
|
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
)
|
67
|
+
resp = self.openai.chat.completions.create(
|
68
|
+
model=self.model,
|
69
|
+
messages=[
|
70
|
+
{
|
71
|
+
"role": "user",
|
72
|
+
"content": [
|
73
|
+
{
|
74
|
+
"type": "text",
|
75
|
+
"text": prompt
|
76
|
+
}
|
77
|
+
]
|
78
|
+
}
|
79
|
+
],
|
80
|
+
temperature=self.temperature,
|
81
|
+
max_tokens=self.max_output,
|
82
|
+
top_p=1,
|
83
|
+
)
|
122
84
|
|
123
85
|
inputtokens = resp.usage.prompt_tokens
|
124
86
|
outputtokens = resp.usage.completion_tokens
|
@@ -127,15 +89,14 @@ class Processor(ConsumerProducer):
|
|
127
89
|
print(f"Output Tokens: {outputtokens}", flush=True)
|
128
90
|
print("Send response...", flush=True)
|
129
91
|
|
130
|
-
r =
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
model=self.model
|
92
|
+
r = LlmResult(
|
93
|
+
text = resp.choices[0].message.content,
|
94
|
+
in_token = inputtokens,
|
95
|
+
out_token = outputtokens,
|
96
|
+
model = self.model
|
136
97
|
)
|
137
98
|
|
138
|
-
|
99
|
+
return r
|
139
100
|
|
140
101
|
except RateLimitError:
|
141
102
|
|
@@ -147,35 +108,15 @@ class Processor(ConsumerProducer):
|
|
147
108
|
except Exception as e:
|
148
109
|
|
149
110
|
# Apart from rate limits, treat all exceptions as unrecoverable
|
150
|
-
|
151
111
|
print(f"Exception: {e}")
|
152
|
-
|
153
|
-
print("Send error response...", flush=True)
|
154
|
-
|
155
|
-
r = TextCompletionResponse(
|
156
|
-
error=Error(
|
157
|
-
type = "llm-error",
|
158
|
-
message = str(e),
|
159
|
-
),
|
160
|
-
response=None,
|
161
|
-
in_token=None,
|
162
|
-
out_token=None,
|
163
|
-
model=None,
|
164
|
-
)
|
165
|
-
|
166
|
-
await self.send(r, properties={"id": id})
|
167
|
-
|
168
|
-
self.consumer.acknowledge(msg)
|
112
|
+
raise e
|
169
113
|
|
170
114
|
print("Done.", flush=True)
|
171
115
|
|
172
116
|
@staticmethod
|
173
117
|
def add_args(parser):
|
174
118
|
|
175
|
-
|
176
|
-
parser, default_input_queue, default_subscriber,
|
177
|
-
default_output_queue,
|
178
|
-
)
|
119
|
+
LlmService.add_args(parser)
|
179
120
|
|
180
121
|
parser.add_argument(
|
181
122
|
'-e', '--endpoint',
|
@@ -217,4 +158,4 @@ class Processor(ConsumerProducer):
|
|
217
158
|
|
218
159
|
def run():
|
219
160
|
|
220
|
-
Processor.launch(
|
161
|
+
Processor.launch(default_ident, __doc__)
|
@@ -0,0 +1,130 @@
|
|
1
|
+
|
2
|
+
"""
|
3
|
+
Simple LLM service, performs text prompt completion using Claude.
|
4
|
+
Input is prompt, output is response.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import anthropic
|
8
|
+
import os
|
9
|
+
|
10
|
+
from .... exceptions import TooManyRequests
|
11
|
+
from .... base import LlmService, LlmResult
|
12
|
+
|
13
|
+
default_ident = "text-completion"
|
14
|
+
|
15
|
+
default_model = 'claude-3-5-sonnet-20240620'
|
16
|
+
default_temperature = 0.0
|
17
|
+
default_max_output = 8192
|
18
|
+
default_api_key = os.getenv("CLAUDE_KEY")
|
19
|
+
|
20
|
+
class Processor(LlmService):
|
21
|
+
|
22
|
+
def __init__(self, **params):
|
23
|
+
|
24
|
+
model = params.get("model", default_model)
|
25
|
+
api_key = params.get("api_key", default_api_key)
|
26
|
+
temperature = params.get("temperature", default_temperature)
|
27
|
+
max_output = params.get("max_output", default_max_output)
|
28
|
+
|
29
|
+
if api_key is None:
|
30
|
+
raise RuntimeError("Claude API key not specified")
|
31
|
+
|
32
|
+
super(Processor, self).__init__(
|
33
|
+
**params | {
|
34
|
+
"model": model,
|
35
|
+
"temperature": temperature,
|
36
|
+
"max_output": max_output,
|
37
|
+
}
|
38
|
+
)
|
39
|
+
|
40
|
+
self.model = model
|
41
|
+
self.claude = anthropic.Anthropic(api_key=api_key)
|
42
|
+
self.temperature = temperature
|
43
|
+
self.max_output = max_output
|
44
|
+
|
45
|
+
print("Initialised", flush=True)
|
46
|
+
|
47
|
+
async def generate_content(self, system, prompt):
|
48
|
+
|
49
|
+
try:
|
50
|
+
|
51
|
+
response = message = self.claude.messages.create(
|
52
|
+
model=self.model,
|
53
|
+
max_tokens=self.max_output,
|
54
|
+
temperature=self.temperature,
|
55
|
+
system = system,
|
56
|
+
messages=[
|
57
|
+
{
|
58
|
+
"role": "user",
|
59
|
+
"content": [
|
60
|
+
{
|
61
|
+
"type": "text",
|
62
|
+
"text": prompt
|
63
|
+
}
|
64
|
+
]
|
65
|
+
}
|
66
|
+
]
|
67
|
+
)
|
68
|
+
|
69
|
+
resp = response.content[0].text
|
70
|
+
inputtokens = response.usage.input_tokens
|
71
|
+
outputtokens = response.usage.output_tokens
|
72
|
+
print(resp, flush=True)
|
73
|
+
print(f"Input Tokens: {inputtokens}", flush=True)
|
74
|
+
print(f"Output Tokens: {outputtokens}", flush=True)
|
75
|
+
|
76
|
+
resp = LlmResult(
|
77
|
+
text = resp,
|
78
|
+
in_token = inputtokens,
|
79
|
+
out_token = outputtokens,
|
80
|
+
model = self.model
|
81
|
+
)
|
82
|
+
|
83
|
+
return resp
|
84
|
+
|
85
|
+
except anthropic.RateLimitError:
|
86
|
+
|
87
|
+
# Leave rate limit retries to the base handler
|
88
|
+
raise TooManyRequests()
|
89
|
+
|
90
|
+
except Exception as e:
|
91
|
+
|
92
|
+
# Apart from rate limits, treat all exceptions as unrecoverable
|
93
|
+
|
94
|
+
print(f"Exception: {e}")
|
95
|
+
raise e
|
96
|
+
|
97
|
+
@staticmethod
|
98
|
+
def add_args(parser):
|
99
|
+
|
100
|
+
LlmService.add_args(parser)
|
101
|
+
|
102
|
+
parser.add_argument(
|
103
|
+
'-m', '--model',
|
104
|
+
default="claude-3-5-sonnet-20240620",
|
105
|
+
help=f'LLM model (default: claude-3-5-sonnet-20240620)'
|
106
|
+
)
|
107
|
+
|
108
|
+
parser.add_argument(
|
109
|
+
'-k', '--api-key',
|
110
|
+
default=default_api_key,
|
111
|
+
help=f'Claude API key'
|
112
|
+
)
|
113
|
+
|
114
|
+
parser.add_argument(
|
115
|
+
'-t', '--temperature',
|
116
|
+
type=float,
|
117
|
+
default=default_temperature,
|
118
|
+
help=f'LLM temperature parameter (default: {default_temperature})'
|
119
|
+
)
|
120
|
+
|
121
|
+
parser.add_argument(
|
122
|
+
'-x', '--max-output',
|
123
|
+
type=int,
|
124
|
+
default=default_max_output,
|
125
|
+
help=f'LLM max output tokens (default: {default_max_output})'
|
126
|
+
)
|
127
|
+
|
128
|
+
def run():
|
129
|
+
|
130
|
+
Processor.launch(default_ident, __doc__)
|
@@ -0,0 +1,113 @@
|
|
1
|
+
|
2
|
+
"""
|
3
|
+
Simple LLM service, performs text prompt completion using Cohere.
|
4
|
+
Input is prompt, output is response.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import cohere
|
8
|
+
from prometheus_client import Histogram
|
9
|
+
import os
|
10
|
+
|
11
|
+
from .... exceptions import TooManyRequests
|
12
|
+
from .... base import LlmService, LlmResult
|
13
|
+
|
14
|
+
default_ident = "text-completion"
|
15
|
+
|
16
|
+
default_model = 'c4ai-aya-23-8b'
|
17
|
+
default_temperature = 0.0
|
18
|
+
default_api_key = os.getenv("COHERE_KEY")
|
19
|
+
|
20
|
+
class Processor(LlmService):
|
21
|
+
|
22
|
+
def __init__(self, **params):
|
23
|
+
|
24
|
+
model = params.get("model", default_model)
|
25
|
+
api_key = params.get("api_key", default_api_key)
|
26
|
+
temperature = params.get("temperature", default_temperature)
|
27
|
+
|
28
|
+
if api_key is None:
|
29
|
+
raise RuntimeError("Cohere API key not specified")
|
30
|
+
|
31
|
+
super(Processor, self).__init__(
|
32
|
+
**params | {
|
33
|
+
"model": model,
|
34
|
+
"temperature": temperature,
|
35
|
+
}
|
36
|
+
)
|
37
|
+
|
38
|
+
self.model = model
|
39
|
+
self.temperature = temperature
|
40
|
+
self.cohere = cohere.Client(api_key=api_key)
|
41
|
+
|
42
|
+
print("Initialised", flush=True)
|
43
|
+
|
44
|
+
async def generate_content(self, system, prompt):
|
45
|
+
|
46
|
+
try:
|
47
|
+
|
48
|
+
output = self.cohere.chat(
|
49
|
+
model=self.model,
|
50
|
+
message=prompt,
|
51
|
+
preamble = system,
|
52
|
+
temperature=self.temperature,
|
53
|
+
chat_history=[],
|
54
|
+
prompt_truncation='auto',
|
55
|
+
connectors=[]
|
56
|
+
)
|
57
|
+
|
58
|
+
resp = output.text
|
59
|
+
inputtokens = int(output.meta.billed_units.input_tokens)
|
60
|
+
outputtokens = int(output.meta.billed_units.output_tokens)
|
61
|
+
|
62
|
+
print(resp, flush=True)
|
63
|
+
print(f"Input Tokens: {inputtokens}", flush=True)
|
64
|
+
print(f"Output Tokens: {outputtokens}", flush=True)
|
65
|
+
|
66
|
+
resp = LlmResult(
|
67
|
+
text = resp,
|
68
|
+
in_token = inputtokens,
|
69
|
+
out_token = outputtokens,
|
70
|
+
model = self.model
|
71
|
+
)
|
72
|
+
|
73
|
+
# FIXME: Wrong exception, don't know what this LLM throws
|
74
|
+
# for a rate limit
|
75
|
+
except cohere.TooManyRequestsError:
|
76
|
+
|
77
|
+
# Leave rate limit retries to the base handler
|
78
|
+
raise TooManyRequests()
|
79
|
+
|
80
|
+
except Exception as e:
|
81
|
+
|
82
|
+
# Apart from rate limits, treat all exceptions as unrecoverable
|
83
|
+
|
84
|
+
print(f"Exception: {e}")
|
85
|
+
raise e
|
86
|
+
|
87
|
+
@staticmethod
|
88
|
+
def add_args(parser):
|
89
|
+
|
90
|
+
LlmService.add_args(parser)
|
91
|
+
|
92
|
+
parser.add_argument(
|
93
|
+
'-m', '--model',
|
94
|
+
default="c4ai-aya-23-8b",
|
95
|
+
help=f'Cohere model (default: c4ai-aya-23-8b)'
|
96
|
+
)
|
97
|
+
|
98
|
+
parser.add_argument(
|
99
|
+
'-k', '--api-key',
|
100
|
+
default=default_api_key,
|
101
|
+
help=f'Cohere API key'
|
102
|
+
)
|
103
|
+
|
104
|
+
parser.add_argument(
|
105
|
+
'-t', '--temperature',
|
106
|
+
type=float,
|
107
|
+
default=default_temperature,
|
108
|
+
help=f'LLM temperature parameter (default: {default_temperature})'
|
109
|
+
)
|
110
|
+
|
111
|
+
def run():
|
112
|
+
|
113
|
+
Processor.launch(default_ident, __doc__)
|