qtype 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (215) hide show
  1. docs/Concepts/mental-model-and-philosophy.md +363 -0
  2. docs/Contributing/index.md +276 -0
  3. docs/Contributing/roadmap.md +81 -0
  4. docs/Decisions/ADR-001-Chat-vs-Completion-Endpoint-Features.md +56 -0
  5. docs/Gallery/dataflow_pipelines.md +80 -0
  6. docs/Gallery/dataflow_pipelines.mermaid +45 -0
  7. docs/Gallery/research_assistant.md +98 -0
  8. docs/Gallery/research_assistant.mermaid +42 -0
  9. docs/Gallery/simple_chatbot.md +36 -0
  10. docs/Gallery/simple_chatbot.mermaid +35 -0
  11. docs/How To/Authentication/configure_aws_authentication.md +60 -0
  12. docs/How To/Authentication/use_api_key_authentication.md +40 -0
  13. docs/How To/Command Line Usage/load_multiple_inputs_from_files.md +62 -0
  14. docs/How To/Command Line Usage/pass_inputs_on_the_cli.md +52 -0
  15. docs/How To/Command Line Usage/serve_with_auto_reload.md +26 -0
  16. docs/How To/Data Processing/adjust_concurrency.md +41 -0
  17. docs/How To/Data Processing/cache_step_results.md +71 -0
  18. docs/How To/Data Processing/decode_json_xml.md +24 -0
  19. docs/How To/Data Processing/explode_collections.md +40 -0
  20. docs/How To/Data Processing/gather_results.md +68 -0
  21. docs/How To/Data Processing/read_data_from_files.md +35 -0
  22. docs/How To/Data Processing/read_sql_databases.md +47 -0
  23. docs/How To/Data Processing/write_data_to_file.md +40 -0
  24. docs/How To/Invoke Models/call_large_language_models.md +51 -0
  25. docs/How To/Invoke Models/create_embeddings.md +49 -0
  26. docs/How To/Invoke Models/reuse_prompts_with_templates.md +39 -0
  27. docs/How To/Language Features/include_qtype_yaml.md +45 -0
  28. docs/How To/Language Features/include_raw_text_from_other_files.md +47 -0
  29. docs/How To/Language Features/reference_entities_by_id.md +51 -0
  30. docs/How To/Language Features/use_environment_variables.md +47 -0
  31. docs/How To/Language Features/use_qtype_mcp.md +59 -0
  32. docs/How To/Observability & Debugging/trace_calls_with_open_telemetry.md +49 -0
  33. docs/How To/Observability & Debugging/validate_qtype_yaml.md +35 -0
  34. docs/How To/Observability & Debugging/visualize_application_architecture.md +61 -0
  35. docs/How To/Observability & Debugging/visualize_example.mermaid +35 -0
  36. docs/How To/Qtype Server/flow_as_ui.png +0 -0
  37. docs/How To/Qtype Server/serve_flows_as_apis.md +40 -0
  38. docs/How To/Qtype Server/serve_flows_as_ui.md +42 -0
  39. docs/How To/Qtype Server/use_conversational_interfaces.md +59 -0
  40. docs/How To/Qtype Server/use_variables_with_ui_hints.md +47 -0
  41. docs/How To/Tools & Integration/bind_tool_inputs_and_outputs.md +48 -0
  42. docs/How To/Tools & Integration/create_tools_from_openapi_specifications.md +89 -0
  43. docs/How To/Tools & Integration/create_tools_from_python_modules.md +90 -0
  44. docs/Reference/cli.md +338 -0
  45. docs/Reference/plugins.md +95 -0
  46. docs/Reference/semantic-validation-rules.md +179 -0
  47. docs/Tutorials/01-first-qtype-application.md +248 -0
  48. docs/Tutorials/02-conversational-chatbot.md +327 -0
  49. docs/Tutorials/03-structured-data.md +481 -0
  50. docs/Tutorials/04-tools-and-function-calling.md +483 -0
  51. docs/Tutorials/example_chat.png +0 -0
  52. docs/Tutorials/index.md +92 -0
  53. docs/components/APIKeyAuthProvider.md +7 -0
  54. docs/components/APITool.md +10 -0
  55. docs/components/AWSAuthProvider.md +13 -0
  56. docs/components/AWSSecretManager.md +5 -0
  57. docs/components/Agent.md +6 -0
  58. docs/components/Aggregate.md +8 -0
  59. docs/components/AggregateStats.md +7 -0
  60. docs/components/Application.md +22 -0
  61. docs/components/AuthorizationProvider.md +6 -0
  62. docs/components/AuthorizationProviderList.md +5 -0
  63. docs/components/BearerTokenAuthProvider.md +6 -0
  64. docs/components/BedrockReranker.md +8 -0
  65. docs/components/ChatContent.md +7 -0
  66. docs/components/ChatMessage.md +6 -0
  67. docs/components/ConstantPath.md +5 -0
  68. docs/components/CustomType.md +7 -0
  69. docs/components/Decoder.md +8 -0
  70. docs/components/DecoderFormat.md +8 -0
  71. docs/components/DocToTextConverter.md +7 -0
  72. docs/components/Document.md +7 -0
  73. docs/components/DocumentEmbedder.md +7 -0
  74. docs/components/DocumentIndex.md +7 -0
  75. docs/components/DocumentSearch.md +7 -0
  76. docs/components/DocumentSource.md +12 -0
  77. docs/components/DocumentSplitter.md +10 -0
  78. docs/components/Echo.md +8 -0
  79. docs/components/Embedding.md +7 -0
  80. docs/components/EmbeddingModel.md +6 -0
  81. docs/components/FieldExtractor.md +20 -0
  82. docs/components/FileSource.md +6 -0
  83. docs/components/FileWriter.md +7 -0
  84. docs/components/Flow.md +14 -0
  85. docs/components/FlowInterface.md +7 -0
  86. docs/components/Index.md +8 -0
  87. docs/components/IndexUpsert.md +6 -0
  88. docs/components/InvokeEmbedding.md +7 -0
  89. docs/components/InvokeFlow.md +8 -0
  90. docs/components/InvokeTool.md +8 -0
  91. docs/components/LLMInference.md +9 -0
  92. docs/components/ListType.md +5 -0
  93. docs/components/Memory.md +8 -0
  94. docs/components/MessageRole.md +14 -0
  95. docs/components/Model.md +10 -0
  96. docs/components/ModelList.md +5 -0
  97. docs/components/OAuth2AuthProvider.md +9 -0
  98. docs/components/PrimitiveTypeEnum.md +21 -0
  99. docs/components/PromptTemplate.md +7 -0
  100. docs/components/PythonFunctionTool.md +7 -0
  101. docs/components/RAGChunk.md +7 -0
  102. docs/components/RAGDocument.md +10 -0
  103. docs/components/RAGSearchResult.md +8 -0
  104. docs/components/Reranker.md +5 -0
  105. docs/components/SQLSource.md +8 -0
  106. docs/components/Search.md +7 -0
  107. docs/components/SearchResult.md +7 -0
  108. docs/components/SecretManager.md +7 -0
  109. docs/components/SecretReference.md +7 -0
  110. docs/components/Source.md +6 -0
  111. docs/components/Step.md +9 -0
  112. docs/components/TelemetrySink.md +9 -0
  113. docs/components/Tool.md +9 -0
  114. docs/components/ToolList.md +5 -0
  115. docs/components/ToolParameter.md +6 -0
  116. docs/components/TypeList.md +5 -0
  117. docs/components/Variable.md +6 -0
  118. docs/components/VariableList.md +5 -0
  119. docs/components/VectorIndex.md +7 -0
  120. docs/components/VectorSearch.md +6 -0
  121. docs/components/VertexAuthProvider.md +9 -0
  122. docs/components/Writer.md +5 -0
  123. docs/example_ui.png +0 -0
  124. docs/index.md +81 -0
  125. docs/legacy_how_tos/Configuration/modular-yaml.md +366 -0
  126. docs/legacy_how_tos/Configuration/phoenix_projects.png +0 -0
  127. docs/legacy_how_tos/Configuration/phoenix_traces.png +0 -0
  128. docs/legacy_how_tos/Configuration/reference-by-id.md +251 -0
  129. docs/legacy_how_tos/Configuration/telemetry-setup.md +259 -0
  130. docs/legacy_how_tos/Data Types/custom-types.md +52 -0
  131. docs/legacy_how_tos/Data Types/domain-types.md +113 -0
  132. docs/legacy_how_tos/Debugging/visualize-apps.md +147 -0
  133. docs/legacy_how_tos/Tools/api-tools.md +29 -0
  134. docs/legacy_how_tos/Tools/python-tools.md +299 -0
  135. examples/authentication/aws_authentication.qtype.yaml +63 -0
  136. examples/conversational_ai/hello_world_chat.qtype.yaml +43 -0
  137. examples/conversational_ai/simple_chatbot.qtype.yaml +40 -0
  138. examples/data_processing/batch_processing.qtype.yaml +54 -0
  139. examples/data_processing/cache_step_results.qtype.yaml +78 -0
  140. examples/data_processing/collect_results.qtype.yaml +55 -0
  141. examples/data_processing/dataflow_pipelines.qtype.yaml +108 -0
  142. examples/data_processing/decode_json.qtype.yaml +23 -0
  143. examples/data_processing/explode_items.qtype.yaml +25 -0
  144. examples/data_processing/read_file.qtype.yaml +60 -0
  145. examples/invoke_models/create_embeddings.qtype.yaml +28 -0
  146. examples/invoke_models/simple_llm_call.qtype.yaml +32 -0
  147. examples/language_features/include_raw.qtype.yaml +27 -0
  148. examples/language_features/ui_hints.qtype.yaml +52 -0
  149. examples/legacy/bedrock/data_analysis_with_telemetry.qtype.yaml +169 -0
  150. examples/legacy/bedrock/hello_world.qtype.yaml +39 -0
  151. examples/legacy/bedrock/hello_world_chat.qtype.yaml +37 -0
  152. examples/legacy/bedrock/hello_world_chat_with_telemetry.qtype.yaml +40 -0
  153. examples/legacy/bedrock/hello_world_chat_with_thinking.qtype.yaml +40 -0
  154. examples/legacy/bedrock/hello_world_completion.qtype.yaml +41 -0
  155. examples/legacy/bedrock/hello_world_completion_with_auth.qtype.yaml +44 -0
  156. examples/legacy/bedrock/simple_agent_chat.qtype.yaml +46 -0
  157. examples/legacy/chat_with_langfuse.qtype.yaml +50 -0
  158. examples/legacy/data_processor.qtype.yaml +48 -0
  159. examples/legacy/echo/debug_example.qtype.yaml +59 -0
  160. examples/legacy/echo/prompt.qtype.yaml +22 -0
  161. examples/legacy/echo/test.qtype.yaml +26 -0
  162. examples/legacy/echo/video.qtype.yaml +20 -0
  163. examples/legacy/field_extractor_example.qtype.yaml +137 -0
  164. examples/legacy/multi_flow_example.qtype.yaml +125 -0
  165. examples/legacy/openai/hello_world_chat.qtype.yaml +43 -0
  166. examples/legacy/openai/hello_world_chat_with_telemetry.qtype.yaml +46 -0
  167. examples/legacy/rag.qtype.yaml +207 -0
  168. examples/legacy/time_utilities.qtype.yaml +64 -0
  169. examples/legacy/vertex/hello_world_chat.qtype.yaml +36 -0
  170. examples/legacy/vertex/hello_world_completion.qtype.yaml +40 -0
  171. examples/legacy/vertex/hello_world_completion_with_auth.qtype.yaml +45 -0
  172. examples/observability_debugging/trace_with_opentelemetry.qtype.yaml +40 -0
  173. examples/research_assistant/research_assistant.qtype.yaml +94 -0
  174. examples/research_assistant/tavily.oas.yaml +722 -0
  175. examples/research_assistant/tavily.qtype.yaml +289 -0
  176. examples/tutorials/01_hello_world.qtype.yaml +48 -0
  177. examples/tutorials/02_conversational_chat.qtype.yaml +37 -0
  178. examples/tutorials/03_structured_data.qtype.yaml +130 -0
  179. examples/tutorials/04_tools_and_function_calling.qtype.yaml +89 -0
  180. qtype/application/converters/tools_from_api.py +39 -35
  181. qtype/base/types.py +6 -1
  182. qtype/commands/convert.py +3 -6
  183. qtype/commands/generate.py +7 -3
  184. qtype/commands/mcp.py +68 -0
  185. qtype/commands/validate.py +4 -4
  186. qtype/dsl/custom_types.py +2 -1
  187. qtype/dsl/linker.py +15 -7
  188. qtype/dsl/loader.py +3 -3
  189. qtype/dsl/model.py +24 -3
  190. qtype/interpreter/api.py +4 -1
  191. qtype/interpreter/base/base_step_executor.py +3 -1
  192. qtype/interpreter/conversions.py +7 -3
  193. qtype/interpreter/executors/construct_executor.py +1 -1
  194. qtype/interpreter/executors/file_source_executor.py +3 -3
  195. qtype/interpreter/executors/file_writer_executor.py +4 -4
  196. qtype/interpreter/executors/index_upsert_executor.py +1 -1
  197. qtype/interpreter/executors/sql_source_executor.py +1 -1
  198. qtype/interpreter/resource_cache.py +3 -1
  199. qtype/interpreter/rich_progress.py +6 -3
  200. qtype/interpreter/stream/chat/converter.py +25 -17
  201. qtype/interpreter/stream/chat/ui_request_to_domain_type.py +2 -2
  202. qtype/interpreter/typing.py +5 -7
  203. qtype/mcp/__init__.py +0 -0
  204. qtype/mcp/server.py +467 -0
  205. qtype/semantic/checker.py +1 -1
  206. qtype/semantic/generate.py +3 -3
  207. qtype/semantic/visualize.py +38 -51
  208. {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/METADATA +21 -1
  209. qtype-0.1.12.dist-info/RECORD +325 -0
  210. {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/WHEEL +1 -1
  211. schema/qtype.schema.json +4018 -0
  212. qtype-0.1.11.dist-info/RECORD +0 -142
  213. {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/entry_points.txt +0 -0
  214. {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/licenses/LICENSE +0 -0
  215. {qtype-0.1.11.dist-info → qtype-0.1.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,108 @@
1
+ id: review_analysis_pipeline
2
+ description: |
3
+ Automated product review analysis pipeline demonstrating dataflow processing.
4
+ Reads reviews from SQLite database, analyzes sentiment with LLM, and writes
5
+ enriched results to a Parquet file.
6
+
7
+ models:
8
+ - type: Model
9
+ id: nova_lite
10
+ provider: aws-bedrock
11
+ model_id: amazon.nova-lite-v1:0
12
+ inference_params:
13
+ temperature: 0.7
14
+ max_tokens: 256
15
+
16
+ flows:
17
+ - type: Flow
18
+ id: analyze_reviews
19
+ description: Batch process product reviews with LLM sentiment analysis
20
+
21
+ variables:
22
+ - id: review_id
23
+ type: int
24
+ - id: product_name
25
+ type: text
26
+ - id: rating
27
+ type: int
28
+ - id: review_text
29
+ type: text
30
+ - id: analysis_prompt
31
+ type: text
32
+ - id: llm_analysis
33
+ type: text
34
+ - id: output_path
35
+ type: text
36
+ - id: result_file
37
+ type: text
38
+
39
+ inputs:
40
+ - output_path
41
+
42
+ outputs:
43
+ - result_file
44
+
45
+ steps:
46
+ # Step 1: Read reviews from SQLite database
47
+ # SQLSource emits one message per database row
48
+ - id: load_reviews
49
+ type: SQLSource
50
+ connection: "sqlite:///examples/data_processing/reviews.db"
51
+ query: |
52
+ SELECT
53
+ review_id,
54
+ product_name,
55
+ rating,
56
+ review_text
57
+ FROM product_reviews
58
+ ORDER BY review_id
59
+ inputs: []
60
+ outputs:
61
+ - review_id
62
+ - product_name
63
+ - rating
64
+ - review_text
65
+
66
+ # Step 2: Format analysis prompt for each review
67
+ # PromptTemplate creates structured prompts from review data
68
+ - id: create_prompt
69
+ type: PromptTemplate
70
+ template: |
71
+ Analyze this product review in 1-2 sentences. Include:
72
+ - Overall sentiment (positive/negative/mixed)
73
+ - 2-3 key themes or points
74
+
75
+ Product: {product_name}
76
+ Rating: {rating}/5
77
+ Review: {review_text}
78
+ inputs:
79
+ - product_name
80
+ - rating
81
+ - review_text
82
+ outputs:
83
+ - analysis_prompt
84
+
85
+ # Step 3: Analyze each review with LLM
86
+ # LLMInference processes each message through the language model
87
+ - id: analyze_sentiment
88
+ type: LLMInference
89
+ model: nova_lite
90
+ inputs:
91
+ - analysis_prompt
92
+ outputs:
93
+ - llm_analysis
94
+
95
+ # Step 4: Write enriched results to Parquet file
96
+ # FileWriter batches all messages and writes once
97
+ - id: write_results
98
+ type: FileWriter
99
+ path: output_path
100
+ inputs:
101
+ - review_id
102
+ - product_name
103
+ - rating
104
+ - review_text
105
+ - llm_analysis
106
+ - output_path
107
+ outputs:
108
+ - result_file
@@ -0,0 +1,23 @@
1
+ id: decode_json_example
2
+ description: Decode JSON string into structured data
3
+
4
+ flows:
5
+ - id: decode_product
6
+ description: Parse JSON string into variables
7
+ inputs: [json_string]
8
+ outputs: [name, price]
9
+
10
+ variables:
11
+ - id: json_string
12
+ type: text
13
+ - id: name
14
+ type: text
15
+ - id: price
16
+ type: float
17
+
18
+ steps:
19
+ - type: Decoder
20
+ id: parse_json
21
+ format: json
22
+ inputs: [json_string]
23
+ outputs: [name, price]
@@ -0,0 +1,25 @@
1
+ id: explode_example
2
+ description: Explode a list into individual items for fan-out processing
3
+
4
+ flows:
5
+ - type: Flow
6
+ id: main
7
+ description: Takes a list and processes each item individually
8
+
9
+ variables:
10
+ - id: items
11
+ type: list[text]
12
+ - id: item
13
+ type: text
14
+
15
+ inputs:
16
+ - items
17
+
18
+ outputs:
19
+ - item
20
+
21
+ steps:
22
+ - type: Explode
23
+ id: fan_out
24
+ inputs: [items]
25
+ outputs: [item]
@@ -0,0 +1,60 @@
1
+ id: read_file_example
2
+ description: Read data from a CSV file
3
+
4
+ models:
5
+ - type: Model
6
+ id: nova
7
+ provider: aws-bedrock
8
+ model_id: amazon.nova-lite-v1:0
9
+
10
+ flows:
11
+ - type: Flow
12
+ id: process_file_data
13
+ description: Read and process data from a CSV file
14
+
15
+ variables:
16
+ - id: query
17
+ type: text
18
+ - id: topic
19
+ type: text
20
+ - id: prompt
21
+ type: text
22
+ - id: answer
23
+ type: text
24
+
25
+ inputs: []
26
+
27
+ outputs:
28
+ - query
29
+ - topic
30
+ - answer
31
+
32
+ steps:
33
+ - id: read_data
34
+ type: FileSource
35
+ path:
36
+ uri: examples/data_processing/batch_inputs.csv
37
+ outputs:
38
+ - query
39
+ - topic
40
+
41
+ - id: create_prompt
42
+ type: PromptTemplate
43
+ template: |
44
+ Topic: {topic}
45
+ Question: {query}
46
+
47
+ Provide a concise answer:
48
+ inputs:
49
+ - query
50
+ - topic
51
+ outputs:
52
+ - prompt
53
+
54
+ - id: generate_answer
55
+ type: LLMInference
56
+ model: nova
57
+ inputs:
58
+ - prompt
59
+ outputs:
60
+ - answer
@@ -0,0 +1,28 @@
1
+ id: create_embeddings
2
+ description: Generate embeddings from text using AWS Bedrock Titan
3
+
4
+ models:
5
+ - type: EmbeddingModel
6
+ id: titan_embed
7
+ provider: aws-bedrock
8
+ model_id: amazon.titan-embed-text-v2:0
9
+ dimensions: 1024
10
+
11
+ flows:
12
+ - type: Flow
13
+ id: main
14
+ variables:
15
+ - id: text
16
+ type: text
17
+ - id: embedding
18
+ type: Embedding
19
+ inputs:
20
+ - text
21
+ outputs:
22
+ - embedding
23
+ steps:
24
+ - type: InvokeEmbedding
25
+ id: embed_text
26
+ model: titan_embed
27
+ inputs: [text]
28
+ outputs: [embedding]
@@ -0,0 +1,32 @@
1
+ id: simple_llm_call
2
+ description: Simple example of calling a large language model
3
+
4
+ models:
5
+ - type: Model
6
+ id: nova_lite
7
+ provider: aws-bedrock
8
+ model_id: amazon.nova-lite-v1:0
9
+ inference_params:
10
+ temperature: 0.7
11
+ max_tokens: 500
12
+
13
+ flows:
14
+ - type: Flow
15
+ id: main
16
+ variables:
17
+ - id: text
18
+ type: text
19
+ - id: response
20
+ type: text
21
+ inputs:
22
+ - text
23
+ outputs:
24
+ - response
25
+
26
+ steps:
27
+ - type: LLMInference
28
+ id: assistant
29
+ model: nova_lite
30
+ system_message: "You are a helpful assistant"
31
+ inputs: [text]
32
+ outputs: [response]
@@ -0,0 +1,27 @@
1
+ id: include_raw_example
2
+ description: Demonstrates using include_raw to load prompt templates from external files
3
+
4
+ flows:
5
+ - type: Flow
6
+ id: main
7
+ variables:
8
+ - id: theme
9
+ type: text
10
+ - id: tone
11
+ type: text
12
+ - id: story_prompt
13
+ type: text
14
+ inputs:
15
+ - theme
16
+ - tone
17
+ outputs:
18
+ - story_prompt
19
+ steps:
20
+ - id: generate_story
21
+ type: PromptTemplate
22
+ template: !include_raw story_prompt.txt
23
+ inputs:
24
+ - theme
25
+ - tone
26
+ outputs:
27
+ - story_prompt
@@ -0,0 +1,52 @@
1
+ id: ui_hints_example
2
+ description: Demonstrates how to use UI hints to customize input widgets
3
+
4
+ models:
5
+ - type: Model
6
+ id: nova
7
+ provider: aws-bedrock
8
+ model_id: amazon.nova-lite-v1:0
9
+ inference_params:
10
+ temperature: 0.7
11
+ max_tokens: 500
12
+
13
+ flows:
14
+ - type: Flow
15
+ id: generate_story
16
+ description: Generate a creative story based on a detailed prompt
17
+
18
+ variables:
19
+ # The 'ui' field provides hints to the web UI about how to render inputs
20
+ # Using 'widget: textarea' renders a multi-line text area instead of single-line input
21
+ - id: story_prompt
22
+ type: text
23
+ ui:
24
+ widget: textarea # Options: text (default), textarea
25
+
26
+ # Variables without 'ui' hints use default widgets based on their type
27
+ - id: max_length
28
+ type: int
29
+
30
+ - id: story
31
+ type: text
32
+
33
+ inputs:
34
+ - story_prompt
35
+ - max_length
36
+
37
+ outputs:
38
+ - story
39
+
40
+ steps:
41
+ - id: generate
42
+ type: LLMInference
43
+ model: nova
44
+ inputs: [story_prompt, max_length]
45
+ system_message: |
46
+ Write a creative story based on this prompt:
47
+
48
+ {{story_prompt}}
49
+
50
+ Keep the story under {{max_length}} words.
51
+ outputs:
52
+ - story
@@ -0,0 +1,169 @@
1
+ id: data_analysis_pipeline
2
+ description: A multi-step data analysis pipeline with AWS Bedrock and telemetry tracking
3
+
4
+ models:
5
+ - type: Model
6
+ id: nova_lite
7
+ provider: aws-bedrock
8
+ model_id: amazon.nova-lite-v1:0
9
+ inference_params:
10
+ temperature: 0.7
11
+ max_tokens: 1024
12
+
13
+ flows:
14
+ - type: Flow
15
+ id: analyze_data_flow
16
+ description: Load sales data, analyze it with LLM, generate insights
17
+ variables:
18
+ - id: raw_data
19
+ type: text
20
+ - id: analysis_prompt
21
+ type: text
22
+ - id: analysis_result
23
+ type: text
24
+ - id: insights_prompt
25
+ type: text
26
+ - id: key_insights
27
+ type: text
28
+ - id: summary_prompt
29
+ type: text
30
+ - id: executive_summary
31
+ type: text
32
+ - id: final_report
33
+ type: text
34
+ - id: output_file
35
+ type: text
36
+ inputs:
37
+ - output_file
38
+ outputs:
39
+ - executive_summary
40
+ - final_report
41
+ steps:
42
+ # Step 1: Load sales data from file
43
+ - id: load_sales_data
44
+ type: PromptTemplate
45
+ template: !include_raw sample_data.txt
46
+ outputs:
47
+ - raw_data
48
+
49
+ # Step 2: Create analysis prompt
50
+ - id: create_analysis_prompt
51
+ type: PromptTemplate
52
+ template: |
53
+ You are a data analyst. Analyze the following sales data and provide:
54
+ 1. A summary of what the data contains
55
+ 2. Key patterns or trends you observe
56
+ 3. Any notable observations or anomalies
57
+
58
+ Sales Data:
59
+ {raw_data}
60
+
61
+ Provide your analysis in a clear, structured format.
62
+ inputs:
63
+ - raw_data
64
+ outputs:
65
+ - analysis_prompt
66
+
67
+ # Step 3: Generate initial analysis
68
+ - id: analyze_with_llm
69
+ type: LLMInference
70
+ model: nova_lite
71
+ system_message: You are an expert data analyst with strong analytical and critical thinking skills.
72
+ inputs:
73
+ - analysis_prompt
74
+ outputs:
75
+ - analysis_result
76
+
77
+ # Step 4: Create insights extraction prompt
78
+ - id: create_insights_prompt
79
+ type: PromptTemplate
80
+ template: |
81
+ Based on this analysis, extract the 3-5 most important insights or takeaways:
82
+
83
+ {analysis_result}
84
+
85
+ Format as a bullet list with clear, actionable insights.
86
+ inputs:
87
+ - analysis_result
88
+ outputs:
89
+ - insights_prompt
90
+
91
+ # Step 5: Extract key insights
92
+ - id: extract_insights
93
+ type: LLMInference
94
+ model: nova_lite
95
+ system_message: You are an expert at distilling complex analysis into clear, actionable insights.
96
+ inputs:
97
+ - insights_prompt
98
+ outputs:
99
+ - key_insights
100
+
101
+ # Step 6: Create executive summary prompt
102
+ - id: create_summary_prompt
103
+ type: PromptTemplate
104
+ template: |
105
+ Create a concise executive summary (2-3 paragraphs) that combines:
106
+
107
+ Original Analysis:
108
+ {analysis_result}
109
+
110
+ Key Insights:
111
+ {key_insights}
112
+
113
+ Make it suitable for C-level executives - clear, impactful, and action-oriented.
114
+ inputs:
115
+ - analysis_result
116
+ - key_insights
117
+ outputs:
118
+ - summary_prompt
119
+
120
+ # Step 7: Generate executive summary
121
+ - id: generate_summary
122
+ type: LLMInference
123
+ model: nova_lite
124
+ system_message: You are an expert at writing executive summaries for business leaders.
125
+ inputs:
126
+ - summary_prompt
127
+ outputs:
128
+ - executive_summary
129
+
130
+ # Step 8: Create final report
131
+ - id: create_final_report
132
+ type: PromptTemplate
133
+ template: |
134
+ ================================================================================
135
+ DATA ANALYSIS REPORT
136
+ ================================================================================
137
+
138
+ EXECUTIVE SUMMARY
139
+ -----------------
140
+ {executive_summary}
141
+
142
+ KEY INSIGHTS
143
+ ------------
144
+ {key_insights}
145
+
146
+ DETAILED ANALYSIS
147
+ -----------------
148
+ {analysis_result}
149
+
150
+ ================================================================================
151
+ Report generated by QType Data Analysis Pipeline
152
+ ================================================================================
153
+ inputs:
154
+ - executive_summary
155
+ - key_insights
156
+ - analysis_result
157
+ outputs:
158
+ - final_report
159
+
160
+ # Step 9: Write results to file
161
+ - id: save_report
162
+ type: FileWriter
163
+ path: output_file
164
+ inputs:
165
+ - final_report
166
+
167
+ telemetry:
168
+ id: data_analysis_telemetry
169
+ endpoint: http://localhost:6006/v1/traces
@@ -0,0 +1,39 @@
1
+ id: hello_world
2
+ description: A simple hello world application using AWS Bedrock
3
+ models:
4
+ - type: Model
5
+ id: nova_lite
6
+ provider: aws-bedrock
7
+ model_id: amazon.nova-lite-v1:0
8
+ inference_params:
9
+ temperature: 0.7
10
+ max_tokens: 512
11
+ flows:
12
+ - type: Flow
13
+ id: simple_example
14
+ variables:
15
+ - id: question
16
+ type: text
17
+ - id: formatted_prompt
18
+ type: text
19
+ - id: answer
20
+ type: text
21
+ inputs:
22
+ - question
23
+ outputs:
24
+ - answer
25
+ steps:
26
+ - id: question_prompt
27
+ type: PromptTemplate
28
+ template: "You are a helpful assistant. Answer the following question:\n{question}\n"
29
+ inputs:
30
+ - question
31
+ outputs:
32
+ - formatted_prompt
33
+ - id: llm_inference_step
34
+ type: LLMInference
35
+ model: nova_lite
36
+ inputs:
37
+ - formatted_prompt
38
+ outputs:
39
+ - answer
@@ -0,0 +1,37 @@
1
+ id: hello_world
2
+ description: A simple stateful chat flow with AWS Bedrock
3
+ models:
4
+ - type: Model
5
+ id: nova_lite
6
+ provider: aws-bedrock
7
+ model_id: amazon.nova-lite-v1:0
8
+ inference_params:
9
+ temperature: 0.7
10
+ max_tokens: 512
11
+ memories:
12
+ - id: chat_memory
13
+ token_limit: 10000
14
+ flows:
15
+ - type: Flow
16
+ id: simple_chat_example
17
+ interface:
18
+ type: Conversational
19
+ variables:
20
+ - id: user_message
21
+ type: ChatMessage
22
+ - id: response_message
23
+ type: ChatMessage
24
+ inputs:
25
+ - user_message
26
+ outputs:
27
+ - response_message
28
+ steps:
29
+ - id: llm_inference_step
30
+ type: LLMInference
31
+ model: nova_lite
32
+ system_message: "You are a helpful assistant."
33
+ memory: chat_memory
34
+ inputs:
35
+ - user_message
36
+ outputs:
37
+ - response_message
@@ -0,0 +1,40 @@
1
+ id: hello_world
2
+ description: A simple chat flow with AWS Bedrock and telemetry
3
+ models:
4
+ - type: Model
5
+ id: nova_lite
6
+ provider: aws-bedrock
7
+ model_id: amazon.nova-lite-v1:0
8
+ inference_params:
9
+ temperature: 0.7
10
+ max_tokens: 512
11
+ memories:
12
+ - id: chat_memory
13
+ token_limit: 10000
14
+ flows:
15
+ - type: Flow
16
+ id: simple_chat_example
17
+ interface:
18
+ type: Conversational
19
+ variables:
20
+ - id: user_message
21
+ type: ChatMessage
22
+ - id: response_message
23
+ type: ChatMessage
24
+ inputs:
25
+ - user_message
26
+ outputs:
27
+ - response_message
28
+ steps:
29
+ - id: llm_inference_step
30
+ type: LLMInference
31
+ model: nova_lite
32
+ system_message: 'You are a helpful assistant, but you provide opposite or incorrect answers to any question. For example, if the user asks "What is 2 + 2?", you should respond with "3".'
33
+ memory: chat_memory
34
+ inputs:
35
+ - user_message
36
+ outputs:
37
+ - response_message
38
+ telemetry:
39
+ id: hello_world_telemetry
40
+ endpoint: http://localhost:6006/v1/traces
@@ -0,0 +1,40 @@
1
+ id: hello_world
2
+ description: A simple stateful chat flow with AWS Bedrock
3
+ models:
4
+ - type: Model
5
+ id: claude-haiku
6
+ provider: aws-bedrock
7
+ model_id: us.anthropic.claude-haiku-4-5-20251001-v1:0
8
+ inference_params:
9
+ temperature: 1
10
+ max_tokens: 2048
11
+ thinking:
12
+ type: enabled
13
+ budget_tokens: 1024
14
+ memories:
15
+ - id: chat_memory
16
+ token_limit: 10000
17
+ flows:
18
+ - type: Flow
19
+ id: simple_chat_example
20
+ interface:
21
+ type: Conversational
22
+ variables:
23
+ - id: user_message
24
+ type: ChatMessage
25
+ - id: response_message
26
+ type: ChatMessage
27
+ inputs:
28
+ - user_message
29
+ outputs:
30
+ - response_message
31
+ steps:
32
+ - id: llm_inference_step
33
+ type: LLMInference
34
+ model: claude-haiku
35
+ system_message: "You are a helpful assistant."
36
+ memory: chat_memory
37
+ inputs:
38
+ - user_message
39
+ outputs:
40
+ - response_message