qtype 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/Concepts/mental-model-and-philosophy.md +363 -0
- docs/Contributing/index.md +276 -0
- docs/Contributing/roadmap.md +81 -0
- docs/Decisions/ADR-001-Chat-vs-Completion-Endpoint-Features.md +56 -0
- docs/Gallery/dataflow_pipelines.md +80 -0
- docs/Gallery/dataflow_pipelines.mermaid +45 -0
- docs/Gallery/research_assistant.md +98 -0
- docs/Gallery/research_assistant.mermaid +42 -0
- docs/Gallery/simple_chatbot.md +36 -0
- docs/Gallery/simple_chatbot.mermaid +35 -0
- docs/How To/Authentication/configure_aws_authentication.md +60 -0
- docs/How To/Authentication/use_api_key_authentication.md +40 -0
- docs/How To/Command Line Usage/load_multiple_inputs_from_files.md +62 -0
- docs/How To/Command Line Usage/pass_inputs_on_the_cli.md +52 -0
- docs/How To/Command Line Usage/serve_with_auto_reload.md +26 -0
- docs/How To/Data Processing/adjust_concurrency.md +41 -0
- docs/How To/Data Processing/cache_step_results.md +71 -0
- docs/How To/Data Processing/decode_json_xml.md +24 -0
- docs/How To/Data Processing/explode_collections.md +40 -0
- docs/How To/Data Processing/gather_results.md +68 -0
- docs/How To/Data Processing/read_data_from_files.md +35 -0
- docs/How To/Data Processing/read_sql_databases.md +47 -0
- docs/How To/Data Processing/write_data_to_file.md +40 -0
- docs/How To/Invoke Models/call_large_language_models.md +51 -0
- docs/How To/Invoke Models/create_embeddings.md +49 -0
- docs/How To/Invoke Models/reuse_prompts_with_templates.md +39 -0
- docs/How To/Language Features/include_qtype_yaml.md +45 -0
- docs/How To/Language Features/include_raw_text_from_other_files.md +47 -0
- docs/How To/Language Features/reference_entities_by_id.md +51 -0
- docs/How To/Language Features/use_environment_variables.md +47 -0
- docs/How To/Language Features/use_qtype_mcp.md +59 -0
- docs/How To/Observability & Debugging/trace_calls_with_open_telemetry.md +49 -0
- docs/How To/Observability & Debugging/validate_qtype_yaml.md +35 -0
- docs/How To/Observability & Debugging/visualize_application_architecture.md +61 -0
- docs/How To/Observability & Debugging/visualize_example.mermaid +35 -0
- docs/How To/Qtype Server/flow_as_ui.png +0 -0
- docs/How To/Qtype Server/serve_flows_as_apis.md +40 -0
- docs/How To/Qtype Server/serve_flows_as_ui.md +42 -0
- docs/How To/Qtype Server/use_conversational_interfaces.md +59 -0
- docs/How To/Qtype Server/use_variables_with_ui_hints.md +47 -0
- docs/How To/Tools & Integration/bind_tool_inputs_and_outputs.md +48 -0
- docs/How To/Tools & Integration/create_tools_from_openapi_specifications.md +89 -0
- docs/How To/Tools & Integration/create_tools_from_python_modules.md +90 -0
- docs/Reference/cli.md +338 -0
- docs/Reference/plugins.md +95 -0
- docs/Reference/semantic-validation-rules.md +179 -0
- docs/Tutorials/01-first-qtype-application.md +248 -0
- docs/Tutorials/02-conversational-chatbot.md +327 -0
- docs/Tutorials/03-structured-data.md +481 -0
- docs/Tutorials/04-tools-and-function-calling.md +483 -0
- docs/Tutorials/example_chat.png +0 -0
- docs/Tutorials/index.md +92 -0
- docs/components/APIKeyAuthProvider.md +7 -0
- docs/components/APITool.md +10 -0
- docs/components/AWSAuthProvider.md +13 -0
- docs/components/AWSSecretManager.md +5 -0
- docs/components/Agent.md +6 -0
- docs/components/Aggregate.md +8 -0
- docs/components/AggregateStats.md +7 -0
- docs/components/Application.md +22 -0
- docs/components/AuthorizationProvider.md +6 -0
- docs/components/AuthorizationProviderList.md +5 -0
- docs/components/BearerTokenAuthProvider.md +6 -0
- docs/components/BedrockReranker.md +8 -0
- docs/components/ChatContent.md +7 -0
- docs/components/ChatMessage.md +6 -0
- docs/components/ConstantPath.md +5 -0
- docs/components/CustomType.md +7 -0
- docs/components/Decoder.md +8 -0
- docs/components/DecoderFormat.md +8 -0
- docs/components/DocToTextConverter.md +7 -0
- docs/components/Document.md +7 -0
- docs/components/DocumentEmbedder.md +7 -0
- docs/components/DocumentIndex.md +7 -0
- docs/components/DocumentSearch.md +7 -0
- docs/components/DocumentSource.md +12 -0
- docs/components/DocumentSplitter.md +10 -0
- docs/components/Echo.md +8 -0
- docs/components/Embedding.md +7 -0
- docs/components/EmbeddingModel.md +6 -0
- docs/components/FieldExtractor.md +20 -0
- docs/components/FileSource.md +6 -0
- docs/components/FileWriter.md +7 -0
- docs/components/Flow.md +14 -0
- docs/components/FlowInterface.md +7 -0
- docs/components/Index.md +8 -0
- docs/components/IndexUpsert.md +6 -0
- docs/components/InvokeEmbedding.md +7 -0
- docs/components/InvokeFlow.md +8 -0
- docs/components/InvokeTool.md +8 -0
- docs/components/LLMInference.md +9 -0
- docs/components/ListType.md +5 -0
- docs/components/Memory.md +8 -0
- docs/components/MessageRole.md +14 -0
- docs/components/Model.md +10 -0
- docs/components/ModelList.md +5 -0
- docs/components/OAuth2AuthProvider.md +9 -0
- docs/components/PrimitiveTypeEnum.md +21 -0
- docs/components/PromptTemplate.md +7 -0
- docs/components/PythonFunctionTool.md +7 -0
- docs/components/RAGChunk.md +7 -0
- docs/components/RAGDocument.md +10 -0
- docs/components/RAGSearchResult.md +8 -0
- docs/components/Reranker.md +5 -0
- docs/components/SQLSource.md +8 -0
- docs/components/Search.md +7 -0
- docs/components/SearchResult.md +7 -0
- docs/components/SecretManager.md +7 -0
- docs/components/SecretReference.md +7 -0
- docs/components/Source.md +6 -0
- docs/components/Step.md +9 -0
- docs/components/TelemetrySink.md +9 -0
- docs/components/Tool.md +9 -0
- docs/components/ToolList.md +5 -0
- docs/components/ToolParameter.md +6 -0
- docs/components/TypeList.md +5 -0
- docs/components/Variable.md +6 -0
- docs/components/VariableList.md +5 -0
- docs/components/VectorIndex.md +7 -0
- docs/components/VectorSearch.md +6 -0
- docs/components/VertexAuthProvider.md +9 -0
- docs/components/Writer.md +5 -0
- docs/example_ui.png +0 -0
- docs/index.md +81 -0
- docs/legacy_how_tos/Configuration/modular-yaml.md +366 -0
- docs/legacy_how_tos/Configuration/phoenix_projects.png +0 -0
- docs/legacy_how_tos/Configuration/phoenix_traces.png +0 -0
- docs/legacy_how_tos/Configuration/reference-by-id.md +251 -0
- docs/legacy_how_tos/Configuration/telemetry-setup.md +259 -0
- docs/legacy_how_tos/Data Types/custom-types.md +52 -0
- docs/legacy_how_tos/Data Types/domain-types.md +113 -0
- docs/legacy_how_tos/Debugging/visualize-apps.md +147 -0
- docs/legacy_how_tos/Tools/api-tools.md +29 -0
- docs/legacy_how_tos/Tools/python-tools.md +299 -0
- examples/authentication/aws_authentication.qtype.yaml +63 -0
- examples/conversational_ai/hello_world_chat.qtype.yaml +43 -0
- examples/conversational_ai/simple_chatbot.qtype.yaml +40 -0
- examples/data_processing/batch_processing.qtype.yaml +54 -0
- examples/data_processing/cache_step_results.qtype.yaml +78 -0
- examples/data_processing/collect_results.qtype.yaml +55 -0
- examples/data_processing/dataflow_pipelines.qtype.yaml +108 -0
- examples/data_processing/decode_json.qtype.yaml +23 -0
- examples/data_processing/explode_items.qtype.yaml +25 -0
- examples/data_processing/read_file.qtype.yaml +60 -0
- examples/invoke_models/create_embeddings.qtype.yaml +28 -0
- examples/invoke_models/simple_llm_call.qtype.yaml +32 -0
- examples/language_features/include_raw.qtype.yaml +27 -0
- examples/language_features/ui_hints.qtype.yaml +52 -0
- examples/legacy/bedrock/data_analysis_with_telemetry.qtype.yaml +169 -0
- examples/legacy/bedrock/hello_world.qtype.yaml +39 -0
- examples/legacy/bedrock/hello_world_chat.qtype.yaml +37 -0
- examples/legacy/bedrock/hello_world_chat_with_telemetry.qtype.yaml +40 -0
- examples/legacy/bedrock/hello_world_chat_with_thinking.qtype.yaml +40 -0
- examples/legacy/bedrock/hello_world_completion.qtype.yaml +41 -0
- examples/legacy/bedrock/hello_world_completion_with_auth.qtype.yaml +44 -0
- examples/legacy/bedrock/simple_agent_chat.qtype.yaml +46 -0
- examples/legacy/chat_with_langfuse.qtype.yaml +50 -0
- examples/legacy/data_processor.qtype.yaml +48 -0
- examples/legacy/echo/debug_example.qtype.yaml +59 -0
- examples/legacy/echo/prompt.qtype.yaml +22 -0
- examples/legacy/echo/test.qtype.yaml +26 -0
- examples/legacy/echo/video.qtype.yaml +20 -0
- examples/legacy/field_extractor_example.qtype.yaml +137 -0
- examples/legacy/multi_flow_example.qtype.yaml +125 -0
- examples/legacy/openai/hello_world_chat.qtype.yaml +43 -0
- examples/legacy/openai/hello_world_chat_with_telemetry.qtype.yaml +46 -0
- examples/legacy/rag.qtype.yaml +207 -0
- examples/legacy/time_utilities.qtype.yaml +64 -0
- examples/legacy/vertex/hello_world_chat.qtype.yaml +36 -0
- examples/legacy/vertex/hello_world_completion.qtype.yaml +40 -0
- examples/legacy/vertex/hello_world_completion_with_auth.qtype.yaml +45 -0
- examples/observability_debugging/trace_with_opentelemetry.qtype.yaml +40 -0
- examples/research_assistant/research_assistant.qtype.yaml +94 -0
- examples/research_assistant/tavily.oas.yaml +722 -0
- examples/research_assistant/tavily.qtype.yaml +289 -0
- examples/tutorials/01_hello_world.qtype.yaml +48 -0
- examples/tutorials/02_conversational_chat.qtype.yaml +37 -0
- examples/tutorials/03_structured_data.qtype.yaml +130 -0
- examples/tutorials/04_tools_and_function_calling.qtype.yaml +89 -0
- qtype/application/converters/tools_from_api.py +39 -35
- qtype/base/types.py +6 -1
- qtype/commands/convert.py +3 -6
- qtype/commands/generate.py +7 -3
- qtype/commands/mcp.py +68 -0
- qtype/commands/validate.py +4 -4
- qtype/dsl/custom_types.py +2 -1
- qtype/dsl/linker.py +15 -7
- qtype/dsl/loader.py +3 -3
- qtype/dsl/model.py +24 -3
- qtype/interpreter/api.py +4 -1
- qtype/interpreter/base/base_step_executor.py +3 -1
- qtype/interpreter/conversions.py +7 -3
- qtype/interpreter/executors/construct_executor.py +1 -1
- qtype/interpreter/executors/file_source_executor.py +3 -3
- qtype/interpreter/executors/file_writer_executor.py +4 -4
- qtype/interpreter/executors/index_upsert_executor.py +1 -1
- qtype/interpreter/executors/sql_source_executor.py +1 -1
- qtype/interpreter/resource_cache.py +3 -1
- qtype/interpreter/rich_progress.py +6 -3
- qtype/interpreter/stream/chat/converter.py +25 -17
- qtype/interpreter/stream/chat/ui_request_to_domain_type.py +2 -2
- qtype/interpreter/typing.py +5 -7
- qtype/mcp/__init__.py +0 -0
- qtype/mcp/server.py +467 -0
- qtype/semantic/checker.py +1 -1
- qtype/semantic/generate.py +3 -3
- qtype/semantic/visualize.py +38 -51
- {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/METADATA +21 -1
- qtype-0.1.12.dist-info/RECORD +325 -0
- {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/WHEEL +1 -1
- schema/qtype.schema.json +4018 -0
- qtype-0.1.11.dist-info/RECORD +0 -142
- {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/entry_points.txt +0 -0
- {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/licenses/LICENSE +0 -0
- {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
id: hello_world
|
|
2
|
+
description: A simple hello world application using AWS Bedrock with AWS authentication
|
|
3
|
+
models:
|
|
4
|
+
- type: Model
|
|
5
|
+
id: nova_lite
|
|
6
|
+
provider: aws-bedrock
|
|
7
|
+
model_id: amazon.nova-lite-v1:0
|
|
8
|
+
inference_params:
|
|
9
|
+
temperature: 0.7
|
|
10
|
+
max_tokens: 512
|
|
11
|
+
flows:
|
|
12
|
+
- type: Flow
|
|
13
|
+
id: simple_example
|
|
14
|
+
interface:
|
|
15
|
+
type: Complete
|
|
16
|
+
variables:
|
|
17
|
+
- id: prompt
|
|
18
|
+
type: text
|
|
19
|
+
- id: formatted_prompt
|
|
20
|
+
type: text
|
|
21
|
+
- id: answer
|
|
22
|
+
type: text
|
|
23
|
+
inputs:
|
|
24
|
+
- prompt
|
|
25
|
+
outputs:
|
|
26
|
+
- answer
|
|
27
|
+
steps:
|
|
28
|
+
- id: question_prompt
|
|
29
|
+
type: PromptTemplate
|
|
30
|
+
template: "You are a helpful assistant. Answer the following question:\n{prompt}\n"
|
|
31
|
+
inputs:
|
|
32
|
+
- prompt
|
|
33
|
+
outputs:
|
|
34
|
+
- formatted_prompt
|
|
35
|
+
- id: llm_inference_step
|
|
36
|
+
type: LLMInference
|
|
37
|
+
model: nova_lite
|
|
38
|
+
inputs:
|
|
39
|
+
- formatted_prompt
|
|
40
|
+
outputs:
|
|
41
|
+
- answer
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
id: hello_world
|
|
2
|
+
description: A simple hello world application using AWS Bedrock with AWS authentication
|
|
3
|
+
models:
|
|
4
|
+
- type: Model
|
|
5
|
+
id: nova_lite
|
|
6
|
+
provider: aws-bedrock
|
|
7
|
+
model_id: amazon.nova-lite-v1:0
|
|
8
|
+
inference_params:
|
|
9
|
+
temperature: 0.7
|
|
10
|
+
max_tokens: 512
|
|
11
|
+
auth: aws_auth
|
|
12
|
+
auths:
|
|
13
|
+
- type: aws
|
|
14
|
+
id: aws_auth
|
|
15
|
+
profile_name: default # Replace with relevant profile configured in .aws/config
|
|
16
|
+
flows:
|
|
17
|
+
- type: Flow
|
|
18
|
+
id: simple_example
|
|
19
|
+
variables:
|
|
20
|
+
- id: question
|
|
21
|
+
type: text
|
|
22
|
+
- id: prompt
|
|
23
|
+
type: text
|
|
24
|
+
- id: answer
|
|
25
|
+
type: text
|
|
26
|
+
inputs:
|
|
27
|
+
- question
|
|
28
|
+
outputs:
|
|
29
|
+
- answer
|
|
30
|
+
steps:
|
|
31
|
+
- id: question_prompt
|
|
32
|
+
type: PromptTemplate
|
|
33
|
+
template: "You are a helpful assistant. Answer the following question:\n{question}\n"
|
|
34
|
+
inputs:
|
|
35
|
+
- question
|
|
36
|
+
outputs:
|
|
37
|
+
- prompt
|
|
38
|
+
- id: llm_inference_step
|
|
39
|
+
type: LLMInference
|
|
40
|
+
model: nova_lite
|
|
41
|
+
inputs:
|
|
42
|
+
- prompt
|
|
43
|
+
outputs:
|
|
44
|
+
- answer
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
id: simple_agent
|
|
2
|
+
description: A simple agent chatbot with time and date tools
|
|
3
|
+
models:
|
|
4
|
+
- type: Model
|
|
5
|
+
id: nova_lite
|
|
6
|
+
provider: aws-bedrock
|
|
7
|
+
model_id: amazon.nova-lite-v1:0
|
|
8
|
+
inference_params:
|
|
9
|
+
temperature: 0.7
|
|
10
|
+
max_tokens: 512
|
|
11
|
+
references:
|
|
12
|
+
- !include ../../common/tools.qtype.yaml
|
|
13
|
+
memories:
|
|
14
|
+
- id: chat_memory
|
|
15
|
+
token_limit: 10000
|
|
16
|
+
flows:
|
|
17
|
+
- type: Flow
|
|
18
|
+
id: agent_chat
|
|
19
|
+
interface:
|
|
20
|
+
type: Conversational
|
|
21
|
+
variables:
|
|
22
|
+
- id: user_message
|
|
23
|
+
type: ChatMessage
|
|
24
|
+
- id: response
|
|
25
|
+
type: ChatMessage
|
|
26
|
+
inputs:
|
|
27
|
+
- user_message
|
|
28
|
+
outputs:
|
|
29
|
+
- response
|
|
30
|
+
steps:
|
|
31
|
+
- id: agent_step
|
|
32
|
+
type: Agent
|
|
33
|
+
model: nova_lite
|
|
34
|
+
system_message: "You are a helpful assistant with access to time and date tools. Use them when users ask about time-related information."
|
|
35
|
+
memory: chat_memory
|
|
36
|
+
tools:
|
|
37
|
+
- qtype.application.commons.tools.get_current_timestamp
|
|
38
|
+
- qtype.application.commons.tools.format_datetime
|
|
39
|
+
- qtype.application.commons.tools.timedelta
|
|
40
|
+
inputs:
|
|
41
|
+
- user_message
|
|
42
|
+
outputs:
|
|
43
|
+
- response
|
|
44
|
+
telemetry:
|
|
45
|
+
id: simple_agent_telemetry
|
|
46
|
+
endpoint: http://localhost:6006/v1/traces
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
id: hello_world_langfuse
|
|
2
|
+
description: A simple chat flow with Langfuse telemetry
|
|
3
|
+
models:
|
|
4
|
+
- type: Model
|
|
5
|
+
id: gpt4
|
|
6
|
+
provider: openai
|
|
7
|
+
model_id: gpt-4
|
|
8
|
+
inference_params:
|
|
9
|
+
temperature: 0.7
|
|
10
|
+
max_tokens: 512
|
|
11
|
+
auth: openai_auth
|
|
12
|
+
auths:
|
|
13
|
+
- type: api_key
|
|
14
|
+
id: openai_auth
|
|
15
|
+
api_key: ${OPENAI_KEY}
|
|
16
|
+
memories:
|
|
17
|
+
- id: chat_memory
|
|
18
|
+
token_limit: 10000
|
|
19
|
+
flows:
|
|
20
|
+
- type: Flow
|
|
21
|
+
id: chat_example
|
|
22
|
+
description: A simple chat flow with telemetry sent to Langfuse
|
|
23
|
+
interface:
|
|
24
|
+
type: Conversational
|
|
25
|
+
variables:
|
|
26
|
+
- id: user_message
|
|
27
|
+
type: ChatMessage
|
|
28
|
+
- id: response
|
|
29
|
+
type: ChatMessage
|
|
30
|
+
inputs:
|
|
31
|
+
- user_message
|
|
32
|
+
outputs:
|
|
33
|
+
- response
|
|
34
|
+
steps:
|
|
35
|
+
- type: LLMInference
|
|
36
|
+
id: llm_inference_step
|
|
37
|
+
model: gpt4
|
|
38
|
+
memory: chat_memory
|
|
39
|
+
system_message: "You are a helpful assistant."
|
|
40
|
+
inputs:
|
|
41
|
+
- user_message
|
|
42
|
+
outputs:
|
|
43
|
+
- response
|
|
44
|
+
telemetry:
|
|
45
|
+
id: langfuse_telemetry
|
|
46
|
+
provider: Langfuse
|
|
47
|
+
endpoint: https://cloud.langfuse.com
|
|
48
|
+
args:
|
|
49
|
+
public_key: ${LANGFUSE_PUBLIC_KEY}
|
|
50
|
+
secret_key: ${LANGFUSE_SECRET_KEY}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
id: data_processor
|
|
2
|
+
description: Process CSV data to extract and summarize information
|
|
3
|
+
|
|
4
|
+
flows:
|
|
5
|
+
- type: Flow
|
|
6
|
+
id: process_customers
|
|
7
|
+
description: Load customer data and count records
|
|
8
|
+
|
|
9
|
+
variables:
|
|
10
|
+
- id: file_path
|
|
11
|
+
type: text
|
|
12
|
+
- id: name
|
|
13
|
+
type: text
|
|
14
|
+
- id: region
|
|
15
|
+
type: text
|
|
16
|
+
- id: purchases
|
|
17
|
+
type: int
|
|
18
|
+
- id: stats
|
|
19
|
+
type: AggregateStats
|
|
20
|
+
|
|
21
|
+
inputs:
|
|
22
|
+
- file_path
|
|
23
|
+
|
|
24
|
+
outputs:
|
|
25
|
+
- stats
|
|
26
|
+
- name
|
|
27
|
+
- region
|
|
28
|
+
- purchases
|
|
29
|
+
|
|
30
|
+
steps:
|
|
31
|
+
# Step 1: Read CSV file (emits many records, one per row)
|
|
32
|
+
- id: load_file
|
|
33
|
+
type: FileSource
|
|
34
|
+
path: file_path
|
|
35
|
+
inputs:
|
|
36
|
+
- file_path
|
|
37
|
+
outputs:
|
|
38
|
+
- name
|
|
39
|
+
- region
|
|
40
|
+
- purchases
|
|
41
|
+
|
|
42
|
+
# Step 2: Count all records
|
|
43
|
+
- id: count_records
|
|
44
|
+
type: Aggregate
|
|
45
|
+
inputs:
|
|
46
|
+
- region
|
|
47
|
+
outputs:
|
|
48
|
+
- stats
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
id: echo-debug-example
|
|
2
|
+
description: Example showing how to use Echo step for debugging flows
|
|
3
|
+
|
|
4
|
+
flows:
|
|
5
|
+
- id: debug-flow
|
|
6
|
+
description: A flow that uses Echo to inspect variables at different points
|
|
7
|
+
variables:
|
|
8
|
+
- id: original_input
|
|
9
|
+
type: text
|
|
10
|
+
- id: processed_text
|
|
11
|
+
type: text
|
|
12
|
+
- id: final_output
|
|
13
|
+
type: text
|
|
14
|
+
inputs:
|
|
15
|
+
- original_input
|
|
16
|
+
outputs:
|
|
17
|
+
- final_output
|
|
18
|
+
steps:
|
|
19
|
+
# Step 1: Echo the original input for debugging
|
|
20
|
+
- id: debug_input
|
|
21
|
+
type: Echo
|
|
22
|
+
inputs:
|
|
23
|
+
- original_input
|
|
24
|
+
outputs:
|
|
25
|
+
- original_input
|
|
26
|
+
|
|
27
|
+
# Step 2: Process the input (example: add prefix)
|
|
28
|
+
- id: process_input
|
|
29
|
+
type: PromptTemplate
|
|
30
|
+
template: "Processed: {original_input}"
|
|
31
|
+
inputs:
|
|
32
|
+
- original_input
|
|
33
|
+
outputs:
|
|
34
|
+
- processed_text
|
|
35
|
+
|
|
36
|
+
# Step 3: Echo the processed value for debugging
|
|
37
|
+
- id: debug_processed
|
|
38
|
+
type: Echo
|
|
39
|
+
inputs:
|
|
40
|
+
- processed_text
|
|
41
|
+
outputs:
|
|
42
|
+
- processed_text
|
|
43
|
+
|
|
44
|
+
# Step 4: Final transformation
|
|
45
|
+
- id: final_transform
|
|
46
|
+
type: PromptTemplate
|
|
47
|
+
template: "{processed_text} - DONE"
|
|
48
|
+
inputs:
|
|
49
|
+
- processed_text
|
|
50
|
+
outputs:
|
|
51
|
+
- final_output
|
|
52
|
+
|
|
53
|
+
# Step 5: Echo the final output for debugging
|
|
54
|
+
- id: debug_output
|
|
55
|
+
type: Echo
|
|
56
|
+
inputs:
|
|
57
|
+
- final_output
|
|
58
|
+
outputs:
|
|
59
|
+
- final_output
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
id: echo
|
|
2
|
+
description: Repeats the user inputs
|
|
3
|
+
flows:
|
|
4
|
+
- type: Flow
|
|
5
|
+
id: echo_flow
|
|
6
|
+
variables:
|
|
7
|
+
- id: message
|
|
8
|
+
type: text
|
|
9
|
+
- id: echo_output
|
|
10
|
+
type: text
|
|
11
|
+
inputs:
|
|
12
|
+
- message
|
|
13
|
+
outputs:
|
|
14
|
+
- echo_output
|
|
15
|
+
steps:
|
|
16
|
+
- id: echo_step
|
|
17
|
+
type: PromptTemplate
|
|
18
|
+
template: "You said {message}"
|
|
19
|
+
inputs:
|
|
20
|
+
- message
|
|
21
|
+
outputs:
|
|
22
|
+
- echo_output
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
id: test-echo
|
|
2
|
+
description: Test the Echo step for debugging
|
|
3
|
+
|
|
4
|
+
flows:
|
|
5
|
+
- id: echo-test
|
|
6
|
+
description: Simple flow to test the Echo step
|
|
7
|
+
variables:
|
|
8
|
+
- id: user_input
|
|
9
|
+
type: text
|
|
10
|
+
- id: user_count
|
|
11
|
+
type: number
|
|
12
|
+
inputs:
|
|
13
|
+
- user_input
|
|
14
|
+
- user_count
|
|
15
|
+
outputs:
|
|
16
|
+
- user_input
|
|
17
|
+
- user_count
|
|
18
|
+
steps:
|
|
19
|
+
- id: echo_debug
|
|
20
|
+
type: Echo
|
|
21
|
+
inputs:
|
|
22
|
+
- user_input
|
|
23
|
+
- user_count
|
|
24
|
+
outputs:
|
|
25
|
+
- user_input
|
|
26
|
+
- user_count
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
id: echo-video
|
|
2
|
+
description: Test the Echo step for video
|
|
3
|
+
|
|
4
|
+
flows:
|
|
5
|
+
- id: echo-video
|
|
6
|
+
description: Simple flow to test the Echo step
|
|
7
|
+
variables:
|
|
8
|
+
- id: user_input
|
|
9
|
+
type: video
|
|
10
|
+
inputs:
|
|
11
|
+
- user_input
|
|
12
|
+
outputs:
|
|
13
|
+
- user_input
|
|
14
|
+
steps:
|
|
15
|
+
- id: echo_debug
|
|
16
|
+
type: Echo
|
|
17
|
+
inputs:
|
|
18
|
+
- user_input
|
|
19
|
+
outputs:
|
|
20
|
+
- user_input
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# Example demonstrating the FieldExtractor step
|
|
2
|
+
# This shows how to extract fields from structured data using JSONPath
|
|
3
|
+
|
|
4
|
+
id: field_extractor_demo
|
|
5
|
+
description: |
|
|
6
|
+
Demonstrates the FieldExtractor step for extracting specific fields from input data.
|
|
7
|
+
Shows both single value extraction and multiple value extraction (1-to-many).
|
|
8
|
+
|
|
9
|
+
types:
|
|
10
|
+
- id: Person
|
|
11
|
+
description: A person with name and age
|
|
12
|
+
properties:
|
|
13
|
+
name: text
|
|
14
|
+
age: int
|
|
15
|
+
|
|
16
|
+
- id: DataRecord
|
|
17
|
+
description: A complex data record with nested structure
|
|
18
|
+
properties:
|
|
19
|
+
timestamp: text
|
|
20
|
+
metadata: text
|
|
21
|
+
items: list[text]
|
|
22
|
+
|
|
23
|
+
flows:
|
|
24
|
+
# Example 1: Extract a single field from input
|
|
25
|
+
- id: extract_single_field
|
|
26
|
+
description: Extract the name field from a Person
|
|
27
|
+
variables:
|
|
28
|
+
- id: person_data
|
|
29
|
+
type: Person
|
|
30
|
+
- id: person_name
|
|
31
|
+
type: text
|
|
32
|
+
|
|
33
|
+
steps:
|
|
34
|
+
- type: FieldExtractor
|
|
35
|
+
id: extract_name
|
|
36
|
+
json_path: $.name
|
|
37
|
+
inputs:
|
|
38
|
+
- person_data
|
|
39
|
+
outputs:
|
|
40
|
+
- person_name
|
|
41
|
+
|
|
42
|
+
inputs:
|
|
43
|
+
- person_data
|
|
44
|
+
outputs:
|
|
45
|
+
- person_name
|
|
46
|
+
|
|
47
|
+
# Example 2: Extract multiple items (1-to-many)
|
|
48
|
+
- id: extract_multiple_items
|
|
49
|
+
description: Extract all items from a list, yielding one output per item
|
|
50
|
+
variables:
|
|
51
|
+
- id: data_record
|
|
52
|
+
type: DataRecord
|
|
53
|
+
- id: item
|
|
54
|
+
type: text
|
|
55
|
+
|
|
56
|
+
steps:
|
|
57
|
+
- type: FieldExtractor
|
|
58
|
+
id: extract_items
|
|
59
|
+
json_path: $.items[*]
|
|
60
|
+
inputs:
|
|
61
|
+
- data_record
|
|
62
|
+
outputs:
|
|
63
|
+
- item
|
|
64
|
+
|
|
65
|
+
inputs:
|
|
66
|
+
- data_record
|
|
67
|
+
outputs:
|
|
68
|
+
- item
|
|
69
|
+
|
|
70
|
+
# Example 3: Extract nested fields
|
|
71
|
+
- id: extract_nested_field
|
|
72
|
+
description: Extract a field from nested data structure
|
|
73
|
+
variables:
|
|
74
|
+
- id: complex_data
|
|
75
|
+
type: text # JSON string
|
|
76
|
+
- id: parsed_data
|
|
77
|
+
type: text
|
|
78
|
+
- id: nested_value
|
|
79
|
+
type: text
|
|
80
|
+
|
|
81
|
+
steps:
|
|
82
|
+
# First decode the JSON string
|
|
83
|
+
- type: Decoder
|
|
84
|
+
id: decode_json
|
|
85
|
+
format: json
|
|
86
|
+
inputs:
|
|
87
|
+
- complex_data
|
|
88
|
+
outputs:
|
|
89
|
+
- parsed_data
|
|
90
|
+
|
|
91
|
+
# Then extract a specific nested field
|
|
92
|
+
- type: FieldExtractor
|
|
93
|
+
id: extract_nested
|
|
94
|
+
json_path: $.data.value
|
|
95
|
+
inputs:
|
|
96
|
+
- parsed_data
|
|
97
|
+
outputs:
|
|
98
|
+
- nested_value
|
|
99
|
+
|
|
100
|
+
inputs:
|
|
101
|
+
- complex_data
|
|
102
|
+
outputs:
|
|
103
|
+
- nested_value
|
|
104
|
+
|
|
105
|
+
# Example 4: Extract with filtering
|
|
106
|
+
- id: extract_filtered_items
|
|
107
|
+
description: Extract items that match a condition using JSONPath filter
|
|
108
|
+
variables:
|
|
109
|
+
- id: product_list
|
|
110
|
+
type: text # JSON array
|
|
111
|
+
- id: products
|
|
112
|
+
type: text
|
|
113
|
+
- id: expensive_item
|
|
114
|
+
type: text
|
|
115
|
+
|
|
116
|
+
steps:
|
|
117
|
+
- type: Decoder
|
|
118
|
+
id: decode_products
|
|
119
|
+
format: json
|
|
120
|
+
inputs:
|
|
121
|
+
- product_list
|
|
122
|
+
outputs:
|
|
123
|
+
- products
|
|
124
|
+
|
|
125
|
+
# Extract only products with price > 100
|
|
126
|
+
- type: FieldExtractor
|
|
127
|
+
id: filter_expensive
|
|
128
|
+
json_path: $[?(@.price > 100)]
|
|
129
|
+
inputs:
|
|
130
|
+
- products
|
|
131
|
+
outputs:
|
|
132
|
+
- expensive_item
|
|
133
|
+
|
|
134
|
+
inputs:
|
|
135
|
+
- product_list
|
|
136
|
+
outputs:
|
|
137
|
+
- expensive_item
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
id: multi_flow_example
|
|
2
|
+
description: Multi-flow application demonstrating multiple independent flows and variable scoping
|
|
3
|
+
|
|
4
|
+
models:
|
|
5
|
+
- type: Model
|
|
6
|
+
id: gpt4o-mini
|
|
7
|
+
provider: openai
|
|
8
|
+
model_id: gpt-4o-mini
|
|
9
|
+
inference_params:
|
|
10
|
+
temperature: 0.7
|
|
11
|
+
|
|
12
|
+
flows:
|
|
13
|
+
# Flow 1: Clean customer names
|
|
14
|
+
- type: Flow
|
|
15
|
+
id: clean_names
|
|
16
|
+
description: Clean and standardize customer names
|
|
17
|
+
|
|
18
|
+
variables:
|
|
19
|
+
- id: raw_name
|
|
20
|
+
type: text
|
|
21
|
+
- id: clean_prompt
|
|
22
|
+
type: text
|
|
23
|
+
- id: clean_name
|
|
24
|
+
type: text
|
|
25
|
+
|
|
26
|
+
inputs:
|
|
27
|
+
- raw_name
|
|
28
|
+
|
|
29
|
+
outputs:
|
|
30
|
+
- clean_name
|
|
31
|
+
|
|
32
|
+
steps:
|
|
33
|
+
# Create prompt to clean the name
|
|
34
|
+
- id: create_clean_prompt
|
|
35
|
+
type: PromptTemplate
|
|
36
|
+
template: "Clean this name by trimming whitespace and converting to title case: {{raw_name}}. Return ONLY the cleaned name, nothing else."
|
|
37
|
+
inputs:
|
|
38
|
+
- raw_name
|
|
39
|
+
outputs:
|
|
40
|
+
- clean_prompt
|
|
41
|
+
|
|
42
|
+
# Call LLM to clean the name
|
|
43
|
+
- id: clean_step
|
|
44
|
+
type: LLMInference
|
|
45
|
+
model: gpt4o-mini
|
|
46
|
+
inputs:
|
|
47
|
+
- clean_prompt
|
|
48
|
+
outputs:
|
|
49
|
+
- clean_name
|
|
50
|
+
|
|
51
|
+
# Flow 2: Validate names
|
|
52
|
+
- type: Flow
|
|
53
|
+
id: validate_names
|
|
54
|
+
description: Validate that a name is a valid person name
|
|
55
|
+
|
|
56
|
+
variables:
|
|
57
|
+
- id: name_to_validate
|
|
58
|
+
type: text
|
|
59
|
+
- id: validate_prompt
|
|
60
|
+
type: text
|
|
61
|
+
- id: validation_result
|
|
62
|
+
type: text
|
|
63
|
+
|
|
64
|
+
inputs:
|
|
65
|
+
- name_to_validate
|
|
66
|
+
|
|
67
|
+
outputs:
|
|
68
|
+
- validation_result
|
|
69
|
+
|
|
70
|
+
steps:
|
|
71
|
+
# Create validation prompt
|
|
72
|
+
- id: create_validate_prompt
|
|
73
|
+
type: PromptTemplate
|
|
74
|
+
template: "Is '{{name_to_validate}}' a valid person name? Reply with just 'Valid' or 'Invalid: <reason>'"
|
|
75
|
+
inputs:
|
|
76
|
+
- name_to_validate
|
|
77
|
+
outputs:
|
|
78
|
+
- validate_prompt
|
|
79
|
+
|
|
80
|
+
# Call LLM to validate
|
|
81
|
+
- id: validate_step
|
|
82
|
+
type: LLMInference
|
|
83
|
+
model: gpt4o-mini
|
|
84
|
+
inputs:
|
|
85
|
+
- validate_prompt
|
|
86
|
+
outputs:
|
|
87
|
+
- validation_result
|
|
88
|
+
|
|
89
|
+
# Flow 3: Generate customer profile
|
|
90
|
+
- type: Flow
|
|
91
|
+
id: generate_profile
|
|
92
|
+
description: Generate a customer profile with account details
|
|
93
|
+
|
|
94
|
+
variables:
|
|
95
|
+
- id: customer_name
|
|
96
|
+
type: text
|
|
97
|
+
- id: profile_prompt
|
|
98
|
+
type: text
|
|
99
|
+
- id: customer_profile
|
|
100
|
+
type: text
|
|
101
|
+
|
|
102
|
+
inputs:
|
|
103
|
+
- customer_name
|
|
104
|
+
|
|
105
|
+
outputs:
|
|
106
|
+
- customer_profile
|
|
107
|
+
|
|
108
|
+
steps:
|
|
109
|
+
# Create profile generation prompt
|
|
110
|
+
- id: create_profile_prompt
|
|
111
|
+
type: PromptTemplate
|
|
112
|
+
template: "Generate realistic customer information for: {{customer_name}}. Include: account number, member since date, and status (Gold/Silver/Bronze). Format as a brief profile."
|
|
113
|
+
inputs:
|
|
114
|
+
- customer_name
|
|
115
|
+
outputs:
|
|
116
|
+
- profile_prompt
|
|
117
|
+
|
|
118
|
+
# Call LLM to generate profile
|
|
119
|
+
- id: generate_step
|
|
120
|
+
type: LLMInference
|
|
121
|
+
model: gpt4o-mini
|
|
122
|
+
inputs:
|
|
123
|
+
- profile_prompt
|
|
124
|
+
outputs:
|
|
125
|
+
- customer_profile
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
id: hello_world
|
|
2
|
+
description: A simple chat flow with OpenAI
|
|
3
|
+
models:
|
|
4
|
+
- type: Model
|
|
5
|
+
id: gpt4
|
|
6
|
+
provider: openai
|
|
7
|
+
model_id: gpt-4
|
|
8
|
+
inference_params:
|
|
9
|
+
temperature: 0.7
|
|
10
|
+
max_tokens: 512
|
|
11
|
+
auth: openai_auth
|
|
12
|
+
auths:
|
|
13
|
+
- type: api_key
|
|
14
|
+
id: openai_auth
|
|
15
|
+
api_key: ${OPENAI_KEY}
|
|
16
|
+
host: https://api.openai.com
|
|
17
|
+
memories:
|
|
18
|
+
- id: chat_memory
|
|
19
|
+
token_limit: 10000
|
|
20
|
+
flows:
|
|
21
|
+
- type: Flow
|
|
22
|
+
id: chat_example
|
|
23
|
+
interface:
|
|
24
|
+
type: Conversational
|
|
25
|
+
variables:
|
|
26
|
+
- id: user_message
|
|
27
|
+
type: ChatMessage
|
|
28
|
+
- id: response
|
|
29
|
+
type: ChatMessage
|
|
30
|
+
inputs:
|
|
31
|
+
- user_message
|
|
32
|
+
outputs:
|
|
33
|
+
- response
|
|
34
|
+
steps:
|
|
35
|
+
- id: llm_inference_step
|
|
36
|
+
type: LLMInference
|
|
37
|
+
model: gpt4
|
|
38
|
+
system_message: "You are a helpful assistant."
|
|
39
|
+
memory: chat_memory
|
|
40
|
+
inputs:
|
|
41
|
+
- user_message
|
|
42
|
+
outputs:
|
|
43
|
+
- response
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
id: hello_world
|
|
2
|
+
description: A simple chat flow with OpenAI and telemetry
|
|
3
|
+
models:
|
|
4
|
+
- type: Model
|
|
5
|
+
id: gpt4
|
|
6
|
+
provider: openai
|
|
7
|
+
model_id: gpt-4
|
|
8
|
+
inference_params:
|
|
9
|
+
temperature: 0.7
|
|
10
|
+
max_tokens: 512
|
|
11
|
+
auth: openai_auth
|
|
12
|
+
auths:
|
|
13
|
+
- type: api_key
|
|
14
|
+
id: openai_auth
|
|
15
|
+
api_key: ${OPENAI_KEY}
|
|
16
|
+
host: https://api.openai.com
|
|
17
|
+
memories:
|
|
18
|
+
- id: chat_memory
|
|
19
|
+
token_limit: 10000
|
|
20
|
+
flows:
|
|
21
|
+
- type: Flow
|
|
22
|
+
id: chat_example
|
|
23
|
+
interface:
|
|
24
|
+
type: Conversational
|
|
25
|
+
variables:
|
|
26
|
+
- id: user_message
|
|
27
|
+
type: ChatMessage
|
|
28
|
+
- id: response
|
|
29
|
+
type: ChatMessage
|
|
30
|
+
inputs:
|
|
31
|
+
- user_message
|
|
32
|
+
outputs:
|
|
33
|
+
- response
|
|
34
|
+
steps:
|
|
35
|
+
- id: llm_inference_step
|
|
36
|
+
type: LLMInference
|
|
37
|
+
model: gpt4
|
|
38
|
+
system_message: "You are a helpful assistant."
|
|
39
|
+
memory: chat_memory
|
|
40
|
+
inputs:
|
|
41
|
+
- user_message
|
|
42
|
+
outputs:
|
|
43
|
+
- response
|
|
44
|
+
telemetry:
|
|
45
|
+
id: hello_world_telemetry
|
|
46
|
+
endpoint: http://localhost:6006/v1/traces
|