flowra 0.0.1.dev1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. flowra-0.0.1.dev1/.claude/commands/update-pricing.md +48 -0
  2. flowra-0.0.1.dev1/.env.example +8 -0
  3. flowra-0.0.1.dev1/.github/workflows/master.yml +40 -0
  4. flowra-0.0.1.dev1/.github/workflows/publish.yml +132 -0
  5. flowra-0.0.1.dev1/.github/workflows/pull_request.yml +32 -0
  6. flowra-0.0.1.dev1/.github/workflows/pull_request_e2e.yml +45 -0
  7. flowra-0.0.1.dev1/.gitignore +15 -0
  8. flowra-0.0.1.dev1/.python-version +1 -0
  9. flowra-0.0.1.dev1/CHANGELOG.md +21 -0
  10. flowra-0.0.1.dev1/CLAUDE.md +58 -0
  11. flowra-0.0.1.dev1/Makefile +61 -0
  12. flowra-0.0.1.dev1/PKG-INFO +18 -0
  13. flowra-0.0.1.dev1/README.md +115 -0
  14. flowra-0.0.1.dev1/context7.json +81 -0
  15. flowra-0.0.1.dev1/docs/agent.md +785 -0
  16. flowra-0.0.1.dev1/docs/architecture.md +121 -0
  17. flowra-0.0.1.dev1/docs/lib.md +564 -0
  18. flowra-0.0.1.dev1/docs/llm.md +746 -0
  19. flowra-0.0.1.dev1/docs/review_plan.md +196 -0
  20. flowra-0.0.1.dev1/docs/review_prompts/step1_structure.md +80 -0
  21. flowra-0.0.1.dev1/docs/review_prompts/step2_code_style.md +261 -0
  22. flowra-0.0.1.dev1/docs/review_prompts/step3_documentation.md +106 -0
  23. flowra-0.0.1.dev1/docs/review_prompts/step4_doc_readability.md +94 -0
  24. flowra-0.0.1.dev1/docs/review_prompts/step5_doc_audit.md +112 -0
  25. flowra-0.0.1.dev1/docs/review_prompts/step6_tests.md +130 -0
  26. flowra-0.0.1.dev1/docs/runtime.md +529 -0
  27. flowra-0.0.1.dev1/docs/todo.md +22 -0
  28. flowra-0.0.1.dev1/docs/tools.md +320 -0
  29. flowra-0.0.1.dev1/examples/__init__.py +0 -0
  30. flowra-0.0.1.dev1/examples/app_agent.py +93 -0
  31. flowra-0.0.1.dev1/examples/console_chat.py +211 -0
  32. flowra-0.0.1.dev1/examples/llm_logging.py +82 -0
  33. flowra-0.0.1.dev1/examples/llm_routing.py +35 -0
  34. flowra-0.0.1.dev1/examples/menu_agent.py +254 -0
  35. flowra-0.0.1.dev1/examples/menu_agent_class.py +185 -0
  36. flowra-0.0.1.dev1/examples/model_registry.py +81 -0
  37. flowra-0.0.1.dev1/examples/system_prompt.txt +1 -0
  38. flowra-0.0.1.dev1/examples/tools/__init__.py +3 -0
  39. flowra-0.0.1.dev1/examples/tools/calculator.py +93 -0
  40. flowra-0.0.1.dev1/examples/tools/random_numbers.py +45 -0
  41. flowra-0.0.1.dev1/examples/tools/switch_model.py +35 -0
  42. flowra-0.0.1.dev1/examples/tui_chat.py +698 -0
  43. flowra-0.0.1.dev1/flowra/__init__.py +3 -0
  44. flowra-0.0.1.dev1/flowra/agent/__init__.py +70 -0
  45. flowra-0.0.1.dev1/flowra/agent/agent.py +21 -0
  46. flowra-0.0.1.dev1/flowra/agent/agent_def.py +213 -0
  47. flowra-0.0.1.dev1/flowra/agent/agent_registry.py +139 -0
  48. flowra-0.0.1.dev1/flowra/agent/agent_store.py +10 -0
  49. flowra-0.0.1.dev1/flowra/agent/compile.py +417 -0
  50. flowra-0.0.1.dev1/flowra/agent/interrupt_token.py +15 -0
  51. flowra-0.0.1.dev1/flowra/agent/service_locator.py +15 -0
  52. flowra-0.0.1.dev1/flowra/agent/step_decorator.py +28 -0
  53. flowra-0.0.1.dev1/flowra/agent/stored_values.py +114 -0
  54. flowra-0.0.1.dev1/flowra/lib/__init__.py +11 -0
  55. flowra-0.0.1.dev1/flowra/lib/chat/__init__.py +14 -0
  56. flowra-0.0.1.dev1/flowra/lib/chat/agent.py +75 -0
  57. flowra-0.0.1.dev1/flowra/lib/chat/config.py +29 -0
  58. flowra-0.0.1.dev1/flowra/lib/chat/hook_executor.py +21 -0
  59. flowra-0.0.1.dev1/flowra/lib/chat/hooks.py +43 -0
  60. flowra-0.0.1.dev1/flowra/lib/chat/spec.py +19 -0
  61. flowra-0.0.1.dev1/flowra/lib/config_value.py +19 -0
  62. flowra-0.0.1.dev1/flowra/lib/llm_config.py +13 -0
  63. flowra-0.0.1.dev1/flowra/lib/tool_loop/__init__.py +99 -0
  64. flowra-0.0.1.dev1/flowra/lib/tool_loop/_tool_call_agent.py +76 -0
  65. flowra-0.0.1.dev1/flowra/lib/tool_loop/agent.py +282 -0
  66. flowra-0.0.1.dev1/flowra/lib/tool_loop/cache.py +157 -0
  67. flowra-0.0.1.dev1/flowra/lib/tool_loop/config.py +26 -0
  68. flowra-0.0.1.dev1/flowra/lib/tool_loop/context.py +21 -0
  69. flowra-0.0.1.dev1/flowra/lib/tool_loop/hook_executor.py +189 -0
  70. flowra-0.0.1.dev1/flowra/lib/tool_loop/hooks.py +433 -0
  71. flowra-0.0.1.dev1/flowra/lib/tool_loop/spec.py +30 -0
  72. flowra-0.0.1.dev1/flowra/llm/__init__.py +28 -0
  73. flowra-0.0.1.dev1/flowra/llm/_base.py +10 -0
  74. flowra-0.0.1.dev1/flowra/llm/blocks.py +58 -0
  75. flowra-0.0.1.dev1/flowra/llm/messages.py +33 -0
  76. flowra-0.0.1.dev1/flowra/llm/pricing/__init__.py +3 -0
  77. flowra-0.0.1.dev1/flowra/llm/pricing/anthropic.py +64 -0
  78. flowra-0.0.1.dev1/flowra/llm/pricing/google.py +50 -0
  79. flowra-0.0.1.dev1/flowra/llm/pricing/openai.py +62 -0
  80. flowra-0.0.1.dev1/flowra/llm/provider.py +13 -0
  81. flowra-0.0.1.dev1/flowra/llm/providers/__init__.py +11 -0
  82. flowra-0.0.1.dev1/flowra/llm/providers/anthropic_vertex.py +343 -0
  83. flowra-0.0.1.dev1/flowra/llm/providers/google_vertex.py +379 -0
  84. flowra-0.0.1.dev1/flowra/llm/providers/openai.py +367 -0
  85. flowra-0.0.1.dev1/flowra/llm/request.py +23 -0
  86. flowra-0.0.1.dev1/flowra/llm/response.py +66 -0
  87. flowra-0.0.1.dev1/flowra/llm/schema_formatting.py +73 -0
  88. flowra-0.0.1.dev1/flowra/llm/schema_validation.py +33 -0
  89. flowra-0.0.1.dev1/flowra/llm/tools.py +14 -0
  90. flowra-0.0.1.dev1/flowra/py.typed +1 -0
  91. flowra-0.0.1.dev1/flowra/runtime/__init__.py +12 -0
  92. flowra-0.0.1.dev1/flowra/runtime/_sealed_scope.py +52 -0
  93. flowra-0.0.1.dev1/flowra/runtime/engine.py +328 -0
  94. flowra-0.0.1.dev1/flowra/runtime/execution.py +40 -0
  95. flowra-0.0.1.dev1/flowra/runtime/interrupt.py +46 -0
  96. flowra-0.0.1.dev1/flowra/runtime/runtime.py +280 -0
  97. flowra-0.0.1.dev1/flowra/runtime/runtime_scope.py +125 -0
  98. flowra-0.0.1.dev1/flowra/runtime/serialization.py +65 -0
  99. flowra-0.0.1.dev1/flowra/runtime/storage/__init__.py +5 -0
  100. flowra-0.0.1.dev1/flowra/runtime/storage/file.py +90 -0
  101. flowra-0.0.1.dev1/flowra/runtime/storage/in_memory.py +47 -0
  102. flowra-0.0.1.dev1/flowra/runtime/storage/session_storage.py +48 -0
  103. flowra-0.0.1.dev1/flowra/tools/__init__.py +24 -0
  104. flowra-0.0.1.dev1/flowra/tools/local_tool.py +313 -0
  105. flowra-0.0.1.dev1/flowra/tools/mcp_connection.py +389 -0
  106. flowra-0.0.1.dev1/flowra/tools/tool_group.py +38 -0
  107. flowra-0.0.1.dev1/flowra/tools/tool_registry.py +216 -0
  108. flowra-0.0.1.dev1/flowra/tools/types.py +18 -0
  109. flowra-0.0.1.dev1/flowra/version.py +2 -0
  110. flowra-0.0.1.dev1/pyproject.toml +87 -0
  111. flowra-0.0.1.dev1/tests/__init__.py +0 -0
  112. flowra-0.0.1.dev1/tests/agent/__init__.py +0 -0
  113. flowra-0.0.1.dev1/tests/agent/test_agent.py +70 -0
  114. flowra-0.0.1.dev1/tests/agent/test_agent_def.py +200 -0
  115. flowra-0.0.1.dev1/tests/agent/test_agent_registry.py +316 -0
  116. flowra-0.0.1.dev1/tests/agent/test_compile.py +876 -0
  117. flowra-0.0.1.dev1/tests/agent/test_step_ref.py +179 -0
  118. flowra-0.0.1.dev1/tests/agent/test_values.py +226 -0
  119. flowra-0.0.1.dev1/tests/lib/__init__.py +1 -0
  120. flowra-0.0.1.dev1/tests/lib/test_chat_agent.py +541 -0
  121. flowra-0.0.1.dev1/tests/lib/test_config_value.py +15 -0
  122. flowra-0.0.1.dev1/tests/lib/test_tool_call_agent.py +121 -0
  123. flowra-0.0.1.dev1/tests/lib/test_tool_loop_agent.py +1029 -0
  124. flowra-0.0.1.dev1/tests/lib/tool_loop/__init__.py +1 -0
  125. flowra-0.0.1.dev1/tests/lib/tool_loop/test_cache.py +348 -0
  126. flowra-0.0.1.dev1/tests/llm/__init__.py +0 -0
  127. flowra-0.0.1.dev1/tests/llm/pricing/__init__.py +0 -0
  128. flowra-0.0.1.dev1/tests/llm/pricing/test_anthropic.py +62 -0
  129. flowra-0.0.1.dev1/tests/llm/pricing/test_google.py +36 -0
  130. flowra-0.0.1.dev1/tests/llm/pricing/test_openai.py +39 -0
  131. flowra-0.0.1.dev1/tests/llm/providers/__init__.py +0 -0
  132. flowra-0.0.1.dev1/tests/llm/providers/test_anthropic_e2e.py +259 -0
  133. flowra-0.0.1.dev1/tests/llm/providers/test_anthropic_vertex.py +186 -0
  134. flowra-0.0.1.dev1/tests/llm/providers/test_google_vertex.py +350 -0
  135. flowra-0.0.1.dev1/tests/llm/providers/test_google_vertex_e2e.py +200 -0
  136. flowra-0.0.1.dev1/tests/llm/providers/test_openai_e2e.py +212 -0
  137. flowra-0.0.1.dev1/tests/llm/providers/test_openai_provider.py +177 -0
  138. flowra-0.0.1.dev1/tests/llm/test_metadata.py +8 -0
  139. flowra-0.0.1.dev1/tests/llm/test_response.py +78 -0
  140. flowra-0.0.1.dev1/tests/llm/test_schema_formatting.py +180 -0
  141. flowra-0.0.1.dev1/tests/llm/test_schema_validation.py +87 -0
  142. flowra-0.0.1.dev1/tests/runtime/__init__.py +0 -0
  143. flowra-0.0.1.dev1/tests/runtime/storage/__init__.py +26 -0
  144. flowra-0.0.1.dev1/tests/runtime/storage/test_file.py +153 -0
  145. flowra-0.0.1.dev1/tests/runtime/storage/test_in_memory.py +70 -0
  146. flowra-0.0.1.dev1/tests/runtime/test_engine.py +863 -0
  147. flowra-0.0.1.dev1/tests/runtime/test_interrupt.py +49 -0
  148. flowra-0.0.1.dev1/tests/runtime/test_persistence.py +681 -0
  149. flowra-0.0.1.dev1/tests/runtime/test_runtime.py +502 -0
  150. flowra-0.0.1.dev1/tests/runtime/test_scope.py +244 -0
  151. flowra-0.0.1.dev1/tests/runtime/test_serialization.py +419 -0
  152. flowra-0.0.1.dev1/tests/tools/__init__.py +0 -0
  153. flowra-0.0.1.dev1/tests/tools/test_local_tool.py +566 -0
  154. flowra-0.0.1.dev1/tests/tools/test_mcp_connection.py +456 -0
  155. flowra-0.0.1.dev1/tests/tools/test_tool_group.py +63 -0
  156. flowra-0.0.1.dev1/tests/tools/test_tool_registry.py +433 -0
  157. flowra-0.0.1.dev1/uv.lock +1138 -0
@@ -0,0 +1,48 @@
1
+ # Update Pricing
2
+
3
+ Update the LLM pricing tables in `flowra/llm/pricing/` with current pricing from the web.
4
+
5
+ ## Instructions
6
+
7
+ You are updating the hardcoded pricing tables for LLM models used by this project.
8
+
9
+ ### Project structure
10
+
11
+ Pricing is split into three modules by API protocol (not by vendor):
12
+
13
+ - `flowra/llm/pricing/anthropic.py` — Anthropic Claude models. Fields: input, output, cache_read, cache_creation_5m, cache_creation_1h.
14
+ - `flowra/llm/pricing/openai.py` — OpenAI + OpenAI-compatible (Mercury/Inception, etc.). Fields: input, output, cache_read. Cache creation is free.
15
+ - `flowra/llm/pricing/google.py` — Google Gemini models. Fields: input, output, cache_read. Cache creation is free.
16
+
17
+ Each module has a `PRICING: list[tuple[str, ModelPricing]]` table ordered longest-key-first for correct substring matching.
18
+
19
+ ### Steps to follow
20
+
21
+ 1. **Read current pricing tables**: Read all three files in `flowra/llm/pricing/` to see existing models and prices.
22
+
23
+ 2. **Determine which models to update**:
24
+ - If the user provided arguments (e.g., `/update-pricing all Gemini models`), search for pricing for those specific models PLUS all existing models in the tables.
25
+ - If no arguments were provided, refresh pricing for all existing models in all three tables.
26
+
27
+ 3. **Web search for current pricing**: Use WebSearch to find the most up-to-date pricing for:
28
+ - All models currently in the pricing tables
29
+ - Any additional models the user requested
30
+ - Search for official pricing pages: Anthropic (anthropic.com/pricing), OpenAI (openai.com/api/pricing), Google Cloud (cloud.google.com/vertex-ai/generative-ai/pricing)
31
+
32
+ 4. **Update the pricing tables**:
33
+ - Update existing entries with the latest pricing
34
+ - Add new entries for any models the user requested
35
+ - Remove entries for deprecated/discontinued models if appropriate
36
+ - **IMPORTANT**: Keep keys ordered longest-first to ensure correct substring matching. For example, `"gpt-4o-mini"` must come before `"gpt-4o"`.
37
+ - Place new models in the correct position by key length
38
+
39
+ 5. **Verify the changes**: After updating, briefly summarize what was changed (which models were updated, added, or removed) per module.
40
+
41
+ ## Important notes
42
+
43
+ - All prices are in dollars per 1 million tokens
44
+ - `cache_read` should be 0 for models that don't support prompt caching
45
+ - Anthropic has separate 5-minute and 1-hour cache creation prices. 5-minute = 1.25x base input, 1-hour = 2x base input. Cache read is the same for both TTLs (0.1x base input).
46
+ - OpenAI and Google cache creation is free (automatic) — no cache_creation field in their ModelPricing
47
+ - Model matching uses substring matching, so keys should be specific enough to avoid false matches but general enough to match version suffixes (e.g., `"claude-sonnet-4-5"` matches `"claude-sonnet-4-5@20250929"`)
48
+ - Always prioritize official pricing pages from model providers
@@ -0,0 +1,8 @@
1
+ VERTEX_CREDENTIALS=
2
+ VERTEX_PROJECT_NAME=
3
+ VERTEX_LOCATION=
4
+ OPENAI_API_KEY=
5
+ OPENAI_BASE_URL=https://eu.api.openai.com/v1
6
+ OPENAI_ORGANIZATION=
7
+ INCEPTION_BASE_URL=https://api.inceptionlabs.ai/v1
8
+ INCEPTION_API_KEY=
@@ -0,0 +1,40 @@
1
+ name: master
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - master
7
+
8
+ concurrency:
9
+ group: ci-${{ github.ref }}
10
+ cancel-in-progress: false
11
+
12
+ jobs:
13
+ test_lint:
14
+ runs-on: ${{ matrix.os }}
15
+ timeout-minutes: 15
16
+ strategy:
17
+ fail-fast: false
18
+ matrix:
19
+ os: [ubuntu-latest, macos-latest]
20
+ python: ["3.12", "3.13", "3.14"]
21
+ steps:
22
+ - name: Checkout code
23
+ uses: actions/checkout@v6
24
+ - name: Set up uv
25
+ uses: astral-sh/setup-uv@v7
26
+ - name: Set up Python ${{ matrix.python }}
27
+ run: echo ${{ matrix.python }} > .python-version
28
+ - name: Install deps
29
+ run: MODE=ci make deps
30
+ - name: Run lint
31
+ run: MODE=ci make lint
32
+ - name: Run all tests
33
+ run: MODE=ci make test
34
+ env:
35
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
36
+ OPENAI_ORGANIZATION: ${{ secrets.OPENAI_ORGANIZATION }}
37
+ OPENAI_BASE_URL: ${{ vars.OPENAI_BASE_URL }}
38
+ VERTEX_PROJECT_NAME: ${{ vars.VERTEX_PROJECT_NAME }}
39
+ VERTEX_LOCATION: ${{ vars.VERTEX_LOCATION }}
40
+ VERTEX_CREDENTIALS: ${{ secrets.VERTEX_CREDENTIALS }}
@@ -0,0 +1,132 @@
1
+ name: publish
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ inputs:
6
+ type:
7
+ description: "Release type"
8
+ required: true
9
+ type: choice
10
+ options:
11
+ - pre-release
12
+ - release
13
+
14
+ jobs:
15
+ publish_prerelease:
16
+ if: inputs.type == 'pre-release'
17
+ runs-on: ubuntu-latest
18
+ steps:
19
+ - name: Checkout code
20
+ uses: actions/checkout@v6
21
+ - name: Set up uv
22
+ uses: astral-sh/setup-uv@v7
23
+ - name: Install deps
24
+ run: MODE=ci make deps
25
+ - name: Run lint
26
+ run: MODE=ci make lint
27
+ - name: Run all tests
28
+ run: MODE=ci make test
29
+ env:
30
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
31
+ OPENAI_ORGANIZATION: ${{ secrets.OPENAI_ORGANIZATION }}
32
+ OPENAI_BASE_URL: ${{ vars.OPENAI_BASE_URL }}
33
+ VERTEX_PROJECT_NAME: ${{ vars.VERTEX_PROJECT_NAME }}
34
+ VERTEX_LOCATION: ${{ vars.VERTEX_LOCATION }}
35
+ VERTEX_CREDENTIALS: ${{ secrets.VERTEX_CREDENTIALS }}
36
+ - name: Set dev version
37
+ id: version
38
+ run: |
39
+ BASE=$(grep -oP '(?<=__version__ = ")[^"]+' flowra/version.py)
40
+ VERSION="${BASE}.dev${{ github.run_number }}"
41
+ sed -i "s/^__version__ = .*/__version__ = \"$VERSION\"/" flowra/version.py
42
+ echo "version=$VERSION" >> $GITHUB_OUTPUT
43
+ - name: Build package
44
+ run: uv build
45
+ - name: Publish to PyPI
46
+ run: uv publish dist/*
47
+ env:
48
+ UV_PUBLISH_TOKEN: ${{ secrets.PYPI_TOKEN }}
49
+ - name: Summary
50
+ run: echo "Published **${{ steps.version.outputs.version }}** to PyPI" >> $GITHUB_STEP_SUMMARY
51
+
52
+ publish_release:
53
+ if: inputs.type == 'release'
54
+ runs-on: ubuntu-latest
55
+ steps:
56
+ - name: Checkout code
57
+ uses: actions/checkout@v6
58
+ with:
59
+ token: ${{ secrets.GITHUB_TOKEN }}
60
+ - name: Set up uv
61
+ uses: astral-sh/setup-uv@v7
62
+ - name: Install deps
63
+ run: MODE=ci make deps
64
+ - name: Run lint
65
+ run: MODE=ci make lint
66
+ - name: Run all tests
67
+ run: MODE=ci make test
68
+ env:
69
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
70
+ OPENAI_ORGANIZATION: ${{ secrets.OPENAI_ORGANIZATION }}
71
+ OPENAI_BASE_URL: ${{ vars.OPENAI_BASE_URL }}
72
+ VERTEX_PROJECT_NAME: ${{ vars.VERTEX_PROJECT_NAME }}
73
+ VERTEX_LOCATION: ${{ vars.VERTEX_LOCATION }}
74
+ VERTEX_CREDENTIALS: ${{ secrets.VERTEX_CREDENTIALS }}
75
+ - name: Read version
76
+ id: version
77
+ run: |
78
+ VERSION=$(grep -oP '(?<=__version__ = ")[^"]+' flowra/version.py)
79
+ echo "version=$VERSION" >> $GITHUB_OUTPUT
80
+ - name: Validate changelog
81
+ run: |
82
+ VERSION=${{ steps.version.outputs.version }}
83
+ if ! grep -q "^## \[$VERSION\]" CHANGELOG.md; then
84
+ echo "::error::CHANGELOG.md has no section for version $VERSION"
85
+ exit 1
86
+ fi
87
+ - name: Validate branch
88
+ run: |
89
+ if [ "${{ github.ref_name }}" != "master" ]; then
90
+ echo "::error::Release must be published from master, got ${{ github.ref_name }}"
91
+ exit 1
92
+ fi
93
+ - name: Build package
94
+ run: uv build
95
+ - name: Publish to PyPI
96
+ run: uv publish dist/*
97
+ env:
98
+ UV_PUBLISH_TOKEN: ${{ secrets.PYPI_TOKEN }}
99
+ - name: Create git tag
100
+ run: |
101
+ VERSION=${{ steps.version.outputs.version }}
102
+ git tag "v$VERSION"
103
+ git push origin "v$VERSION"
104
+ - name: Extract changelog
105
+ id: changelog
106
+ run: |
107
+ VERSION=${{ steps.version.outputs.version }}
108
+ EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
109
+ echo "body<<$EOF" >> $GITHUB_OUTPUT
110
+ sed -n "/^## \[$VERSION\]/,/^## \[/{/^## \[/!p}" CHANGELOG.md >> $GITHUB_OUTPUT
111
+ echo "$EOF" >> $GITHUB_OUTPUT
112
+ - name: Create GitHub Release
113
+ uses: softprops/action-gh-release@v2
114
+ with:
115
+ tag_name: v${{ steps.version.outputs.version }}
116
+ body: ${{ steps.changelog.outputs.body }}
117
+ files: dist/*
118
+ - name: Bump version for next development cycle
119
+ run: |
120
+ VERSION=${{ steps.version.outputs.version }}
121
+ # Bump patch: 0.1.0 -> 0.1.1
122
+ NEXT=$(echo "$VERSION" | awk -F. '{print $1"."$2"."$3+1}')
123
+ sed -i "s/^__version__ = .*/__version__ = \"$NEXT\"/" flowra/version.py
124
+ # Add new Unreleased section to changelog
125
+ sed -i "s/^## \[$VERSION\]/## [Unreleased]\n\n## [$VERSION]/" CHANGELOG.md
126
+ git config user.name "github-actions[bot]"
127
+ git config user.email "github-actions[bot]@users.noreply.github.com"
128
+ git add flowra/version.py CHANGELOG.md
129
+ git commit -m "Bump version to $NEXT after $VERSION release"
130
+ git push origin master
131
+ - name: Summary
132
+ run: echo "Published **${{ steps.version.outputs.version }}** to PyPI" >> $GITHUB_STEP_SUMMARY
@@ -0,0 +1,32 @@
1
+ name: pull_request
2
+
3
+ on:
4
+ pull_request:
5
+ branches: [ master ]
6
+
7
+ concurrency:
8
+ group: pr-${{ github.event.pull_request.number }}
9
+ cancel-in-progress: true
10
+
11
+ jobs:
12
+ test_and_lint:
13
+ runs-on: ${{ matrix.os }}
14
+ timeout-minutes: 15
15
+ strategy:
16
+ fail-fast: false
17
+ matrix:
18
+ os: [ubuntu-latest, macos-latest]
19
+ python: ["3.12", "3.13", "3.14"]
20
+ steps:
21
+ - name: Checkout code
22
+ uses: actions/checkout@v6
23
+ - name: Set up uv
24
+ uses: astral-sh/setup-uv@v7
25
+ - name: Set up Python ${{ matrix.python }}
26
+ run: echo ${{ matrix.python }} > .python-version
27
+ - name: Install deps
28
+ run: MODE=ci make deps
29
+ - name: Run lint
30
+ run: MODE=ci make lint
31
+ - name: Run unit tests
32
+ run: MODE=ci make test-unit
@@ -0,0 +1,45 @@
1
+ name: pull_request_e2e
2
+
3
+ on:
4
+ pull_request_target:
5
+ branches: [ master ]
6
+ types: [opened, synchronize, reopened, labeled]
7
+
8
+ concurrency:
9
+ group: pr-e2e-${{ github.event.pull_request.number }}
10
+ cancel-in-progress: true
11
+
12
+ jobs:
13
+ test_e2e:
14
+ if: >-
15
+ contains(github.event.pull_request.labels.*.name, 'e2e')
16
+ || github.event.pull_request.author_association == 'OWNER'
17
+ || github.event.pull_request.author_association == 'MEMBER'
18
+ || github.event.pull_request.author_association == 'COLLABORATOR'
19
+ runs-on: ${{ matrix.os }}
20
+ timeout-minutes: 15
21
+ strategy:
22
+ fail-fast: false
23
+ matrix:
24
+ os: [ubuntu-latest, macos-latest]
25
+ python: ["3.12", "3.13", "3.14"]
26
+ steps:
27
+ - name: Checkout PR code
28
+ uses: actions/checkout@v6
29
+ with:
30
+ ref: ${{ github.event.pull_request.head.sha }}
31
+ - name: Set up uv
32
+ uses: astral-sh/setup-uv@v7
33
+ - name: Set up Python ${{ matrix.python }}
34
+ run: echo ${{ matrix.python }} > .python-version
35
+ - name: Install deps
36
+ run: MODE=ci make deps
37
+ - name: Run e2e tests
38
+ run: MODE=ci make test-e2e
39
+ env:
40
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
41
+ OPENAI_ORGANIZATION: ${{ secrets.OPENAI_ORGANIZATION }}
42
+ OPENAI_BASE_URL: ${{ vars.OPENAI_BASE_URL }}
43
+ VERTEX_PROJECT_NAME: ${{ vars.VERTEX_PROJECT_NAME }}
44
+ VERTEX_LOCATION: ${{ vars.VERTEX_LOCATION }}
45
+ VERTEX_CREDENTIALS: ${{ secrets.VERTEX_CREDENTIALS }}
@@ -0,0 +1,15 @@
1
+ __pycache__/
2
+ *.py[cod]
3
+ *$py.class
4
+ *.egg-info/
5
+ dist/
6
+ build/
7
+ .venv/
8
+ .env
9
+ .ruff_cache/
10
+ .pytest_cache/
11
+ .pyright/
12
+ *.egg
13
+ /.idea/
14
+ .chat_sessions/
15
+ logs/
@@ -0,0 +1 @@
1
+ 3.12
@@ -0,0 +1,21 @@
1
+ # Changelog
2
+
3
+ All notable changes to this project will be documented in this file.
4
+
5
+ The format is based on [Keep a Changelog](https://keepachangelog.com),
6
+ and this project adheres to [Semantic Versioning](https://semver.org).
7
+
8
+ ## [0.0.1] - 2026-03-07
9
+
10
+ Initial release.
11
+
12
+ ### Added
13
+ - State machine agents with `@step` methods, `Goto`, `Spawn`, and stored values (`Scalar`, `AppendOnlyList`)
14
+ - Provider-agnostic LLM abstraction (`LLMProvider`, `LLMRequest`, `LLMResponse`)
15
+ - LLM providers: `AnthropicVertexProvider`, `GoogleVertexProvider`, `OpenAIProvider`
16
+ - Tool integration: `@tool` decorator, MCP server support, DI into tool handlers
17
+ - Execution engine with persistence, crash recovery, and cooperative interrupts
18
+ - Pre-built agents: `ChatAgent` (multi-turn chat) and `ToolLoopAgent` (tool loop with hooks)
19
+ - `ChatHooks` with `on_save_turn_messages` for transient message filtering
20
+ - Optional provider dependencies via extras: `flowra[anthropic]`, `flowra[openai]`, `flowra[google]`, `flowra[all]`
21
+ - Python 3.12, 3.13, 3.14 support
@@ -0,0 +1,58 @@
1
+ # CLAUDE.md
2
+
3
+ This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
4
+
5
+ ## Commands
6
+
7
+ - `make deps` — install dependencies (`uv sync`; in CI with `MODE=ci` uses `--locked --no-cache`)
8
+ - `make lock` — upgrade all dependencies (`uv lock --upgrade`)
9
+ - `make lint` — ruff check + ruff format + pyright (auto-fixes locally; in CI with `MODE=ci` checks only)
10
+ - `make test` — run all tests in parallel (`-n auto`)
11
+ - `make test name="some_name"` — filter tests by name (`-k`)
12
+ - `make test parallel=0` — run tests sequentially
13
+ - `make test args="-vv"` — pass extra pytest arguments
14
+ - `make check` — lint + test
15
+ - `make example1` — run menu agent (functional style)
16
+ - `make example2` — run menu agent (class-based style)
17
+ - `make chat` — run interactive console chat example
18
+ - `make chat resume=last` — resume the last chat session
19
+ - `make chat resume=<session_id>` — resume a specific session
20
+ - `make chat input="..."` — send a single message (batch mode)
21
+ - `make chat crash=0.5 input="..."` — crash recovery demo
22
+ - `make chat-tui` — run TUI chat with interrupt support
23
+ - `make chat-tui resume=last` — resume the last TUI session
24
+
25
+ ## Architecture
26
+
27
+ Python 3.12+ library. Package manager: **uv**. All config in `pyproject.toml`.
28
+
29
+ ### LLM abstraction (`flowra/llm/`)
30
+
31
+ Provider-agnostic interface for calling LLMs:
32
+
33
+ - `LLMProvider` (abc) — single method `async call(LLMRequest) -> LLMResponse`
34
+ - `LLMRequest` — model, messages, tools, json_schema, temperature, max_tokens, stop_sequences
35
+ - `LLMResponse` — message (AssistantMessage), stop_reason, usage
36
+ - `Usage` — input_tokens, output_tokens, cache_read_input_tokens, cache_creation_input_tokens, cost_usd. Token contract: `input_tokens` excludes cached tokens
37
+ - Messages: `SystemMessage`, `UserMessage`, `AssistantMessage` — system messages must be at the beginning of the messages list
38
+ - Blocks: `TextBlock` (with `cache: bool` for prompt caching), `ImageBlock`, `ToolUseBlock`, `ToolResultBlock`
39
+ - `Tool` — name, description, input_schema, output_schema, cache
40
+
41
+ Providers live in `flowra/llm/providers/`. Currently: `AnthropicVertexProvider`, `OpenAIProvider`, `GoogleVertexProvider`.
42
+
43
+ Pricing utilities live in `flowra/llm/pricing/` — per-protocol cost estimation (anthropic, openai, google). Providers use these to populate `Usage.cost_usd`.
44
+
45
+ ## Code Review
46
+
47
+ Review prompts live in `docs/review_prompts/`:
48
+
49
+ - `step1_structure.md` — package structure and responsibility
50
+ - `step2_code_style.md` — code style rules (**canonical source of all style rules**)
51
+ - `step3_documentation.md` — documentation completeness and quality
52
+ - `step4_doc_readability.md` — documentation readability (as a first-time reader)
53
+ - `step5_doc_audit.md` — documentation audit against source code
54
+ - `step6_tests.md` — test quality and coverage
55
+
56
+ ### Tests
57
+
58
+ Test directory structure mirrors `flowra/`. E2E tests use `_e2e` suffix (e.g., `test_anthropic_e2e.py`). Environment variables loaded from `.env` via Makefile.
@@ -0,0 +1,61 @@
1
+ -include .env
2
+ export
3
+
4
+ deps:
5
+ ifeq ($(MODE), ci)
6
+ uv sync --all-extras --locked --no-cache
7
+ else
8
+ uv sync --all-extras
9
+ endif
10
+
11
+ lock:
12
+ uv lock --upgrade
13
+
14
+ lint:
15
+ ifeq ($(MODE), ci)
16
+ uv run ruff check flowra/ tests/ examples/
17
+ uv run ruff format --check flowra/ tests/ examples/
18
+ else
19
+ uv run ruff check --fix flowra/ tests/ examples/
20
+ uv run ruff format flowra/ tests/ examples/
21
+ endif
22
+ uv run pyright
23
+
24
+ # ── test parameters ──────────────────────────────────────────────────
25
+ # Examples:
26
+ # make test # all tests, parallel
27
+ # make test name="some_name" # filter by name (-k)
28
+ # make test parallel=0 # sequential
29
+ # make test parallel=4 # 4 workers
30
+ # make test name="e2e" parallel=0 # filter + sequential
31
+ _TEST_ARGS = $(if $(parallel),$(if $(filter 0,$(parallel)),,-n $(parallel)),-n auto) \
32
+ $(if $(name),-k $(name))
33
+
34
+ test:
35
+ uv run pytest tests/ $(_TEST_ARGS) $(args)
36
+
37
+ test-unit:
38
+ uv run pytest tests/ --ignore=tests/llm/providers $(_TEST_ARGS) $(args)
39
+
40
+ test-e2e:
41
+ uv run pytest tests/llm/providers/ -k e2e $(_TEST_ARGS) $(args)
42
+
43
+ check: lint test
44
+
45
+ example1:
46
+ uv run python examples/menu_agent.py $(args)
47
+
48
+ example2:
49
+ uv run python examples/menu_agent_class.py $(args)
50
+
51
+ chat:
52
+ CRASH_CHANCE=$(or $(crash),0) TOOL_DELAY=$(or $(delay),0) uv run python examples/console_chat.py $(if $(model),--model $(model)) $(if $(resume),--resume $(resume)) $(if $(input),--input "$(input)") $(args)
53
+ # make chat — new session
54
+ # make chat resume=last — resume last session
55
+ # make chat resume=<session_id> — resume specific session
56
+ # make chat input="What is 2+2?" — batch mode (single message)
57
+ # make chat crash=0.5 input="..." — crash recovery demo
58
+ # make chat delay=3 — slow tools (seconds)
59
+
60
+ chat-tui:
61
+ CRASH_CHANCE=$(or $(crash),0) TOOL_DELAY=$(or $(delay),0) uv run python examples/tui_chat.py $(if $(model),--model $(model)) $(if $(resume),--resume $(resume)) $(args)
@@ -0,0 +1,18 @@
1
+ Metadata-Version: 2.4
2
+ Name: flowra
3
+ Version: 0.0.1.dev1
4
+ Summary: Flowra — flow infrastructure for building stateful LLM agents
5
+ Requires-Python: >=3.12
6
+ Requires-Dist: httpx>=0.28
7
+ Requires-Dist: jsonschema>=4.26
8
+ Requires-Dist: marshmallow-recipe>=0.0.85
9
+ Provides-Extra: all
10
+ Requires-Dist: anthropic[vertex]; extra == 'all'
11
+ Requires-Dist: google-genai; extra == 'all'
12
+ Requires-Dist: openai; extra == 'all'
13
+ Provides-Extra: anthropic
14
+ Requires-Dist: anthropic[vertex]; extra == 'anthropic'
15
+ Provides-Extra: google
16
+ Requires-Dist: google-genai; extra == 'google'
17
+ Provides-Extra: openai
18
+ Requires-Dist: openai; extra == 'openai'
@@ -0,0 +1,115 @@
1
+ # Flowra
2
+
3
+ **Flow infra** for building stateful, persistent LLM agents with tool use,
4
+ parallel execution, and crash recovery. Requires Python 3.12+.
5
+
6
+ ## Features
7
+
8
+ - **State machine agents** — define agents as classes with `@step` methods that
9
+ transition via `Goto`, `Spawn` (parallel children), or return a result
10
+ - **Persistent state** — `Scalar[T]` and `AppendOnlyList[T]` with incremental
11
+ dirty-tracking and pluggable storage (in-memory, file-based, or custom)
12
+ - **Tool integration** — `@tool` decorator for local functions, MCP server support,
13
+ DI into tool handlers
14
+ - **LLM abstraction** — provider-agnostic `LLMProvider` interface with immutable
15
+ message types (ships `AnthropicVertexProvider`, `GoogleVertexProvider`, `OpenAIProvider`)
16
+ - **Cooperative interrupts** — `InterruptToken` for graceful cancellation across
17
+ the entire execution tree
18
+ - **Pre-built agents** — `ChatAgent` (multi-turn chat with session history) and
19
+ `ToolLoopAgent` (single-turn LLM tool loop with hooks and caching)
20
+
21
+ ## Installation
22
+
23
+ ```bash
24
+ # Base package (no LLM providers)
25
+ pip install flowra
26
+
27
+ # With specific providers
28
+ pip install flowra[anthropic]
29
+ pip install flowra[openai]
30
+ pip install flowra[google]
31
+
32
+ # All providers
33
+ pip install flowra[all]
34
+ ```
35
+
36
+ ## Quick start
37
+
38
+ ```python
39
+ import asyncio
40
+
41
+ from flowra.lib.chat import ChatAgent, ChatConfig, ChatResult, ChatSpec
42
+ from flowra.lib import LLMConfig
43
+ from flowra.llm import LLMProvider, SystemMessage, TextBlock
44
+ from flowra.llm.providers.anthropic_vertex import AnthropicVertexProvider
45
+ from flowra.runtime import AgentRuntime
46
+ from flowra.tools import ToolRegistry
47
+
48
+
49
+ async def main() -> None:
50
+ provider = AnthropicVertexProvider()
51
+
52
+ async with await ToolRegistry.create([]) as registry:
53
+ config = ChatConfig(
54
+ llm_config=LLMConfig(model="claude-sonnet-4-5@20250929"),
55
+ system_messages=[
56
+ SystemMessage(blocks=[TextBlock(text="You are a helpful assistant.")])
57
+ ],
58
+ )
59
+
60
+ runtime = AgentRuntime(
61
+ agents={"chat": ChatAgent},
62
+ services={
63
+ LLMProvider: provider,
64
+ ToolRegistry: registry,
65
+ ChatConfig: config,
66
+ },
67
+ )
68
+
69
+ while True:
70
+ user_input = input("You: ")
71
+ if not user_input:
72
+ break
73
+
74
+ result = await runtime.run(
75
+ agent=ChatAgent, step=ChatAgent.process_message,
76
+ spec=ChatSpec(user_message=user_input),
77
+ )
78
+
79
+ if isinstance(result, ChatResult) and result.response:
80
+ print(f"Assistant: {result.response}")
81
+
82
+
83
+ asyncio.run(main())
84
+ ```
85
+
86
+ ## Package structure
87
+
88
+ ```
89
+ flowra/
90
+ ├── llm/ # LLM abstraction (messages, blocks, provider interface)
91
+ ├── tools/ # Tool definition, registration, execution
92
+ ├── agent/ # State machine framework (@step, Goto, Spawn, stored values)
93
+ ├── runtime/ # Execution engine, persistence, interrupt support
94
+ └── lib/ # Pre-built agents (ChatAgent, ToolLoopAgent, hooks, caching)
95
+ ```
96
+
97
+ See [docs/architecture.md](docs/architecture.md) for the full dependency graph and
98
+ data flow. Each package has its own documentation in `docs/`.
99
+
100
+ ## Development
101
+
102
+ ```bash
103
+ make deps # install dependencies (uv sync)
104
+ make check # lint + test
105
+ make chat # run interactive console chat example
106
+ ```
107
+
108
+ ## Documentation
109
+
110
+ - [Architecture](docs/architecture.md) — package structure and data flow
111
+ - [LLM](docs/llm.md) — message types, provider interface
112
+ - [Tools](docs/tools.md) — tool definition and execution
113
+ - [Agent](docs/agent.md) — state machine framework
114
+ - [Runtime](docs/runtime.md) — execution engine and persistence
115
+ - [Lib](docs/lib.md) — pre-built agents, hooks, caching