c1groupy 0.3.0__tar.gz → 0.4.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- c1groupy-0.3.0/README.md → c1groupy-0.4.2/PKG-INFO +142 -1
- c1groupy-0.3.0/PKG-INFO → c1groupy-0.4.2/README.md +111 -26
- {c1groupy-0.3.0 → c1groupy-0.4.2}/pyproject.toml +14 -2
- c1groupy-0.4.2/src/c1gpy/llm_handler/__init__.py +78 -0
- c1groupy-0.4.2/src/c1gpy/llm_handler/config.py +44 -0
- c1groupy-0.4.2/src/c1gpy/llm_handler/exceptions.py +142 -0
- c1groupy-0.4.2/src/c1gpy/llm_handler/handler.py +574 -0
- c1groupy-0.4.2/src/c1gpy/llm_handler/langfuse_client.py +24 -0
- c1groupy-0.4.2/src/c1gpy/llm_handler/provider_registry.py +158 -0
- c1groupy-0.3.0/src/c1gpy/llm.py +0 -8
- {c1groupy-0.3.0 → c1groupy-0.4.2}/LICENCE +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/.gitignore +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/.pre-commit-config.yaml +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/Dockerfile +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/README.md.template +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/docker-compose.yml +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/pyproject.toml.template +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/api_interface.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/endpoints/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/endpoints/router1/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/endpoints/router1/example_endpoint.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/endpoints/router2/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/endpoints/router2/example_endpoint.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/entrypoint.sh +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/routers/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/routers/router1.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/src/routers/router2.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/tests/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/fastapi_project_files/tests/test_example.py +0 -0
- /c1groupy-0.3.0/src/c1gpy/google-utils.py → /c1groupy-0.4.2/src/c1gpy/google_utils.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/init_fastapi_project.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/init_streamlit_project.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/logging.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/.gitignore +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/.pre-commit-config.yaml +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/Dockerfile +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/README.md.template +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/docker-compose.yml +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/pyproject.toml.template +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/src/app/App.py.template +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/src/app/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/src/app/pages/1_Page_1.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/src/app/pages/2_Page_2.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/src/app/pages/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/src/app/pages/home.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/src/entrypoint.sh +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/tests/__init__.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/streamlit_project_files/tests/test_example.py +0 -0
- {c1groupy-0.3.0 → c1groupy-0.4.2}/src/c1gpy/utils.py +0 -0
|
@@ -1,4 +1,35 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: c1groupy
|
|
3
|
+
Version: 0.4.2
|
|
4
|
+
Summary: C1G company Python module for utilities and project setup
|
|
5
|
+
License-Expression: MIT
|
|
6
|
+
License-File: LICENCE
|
|
7
|
+
Author: Tim M Schendzielorz
|
|
8
|
+
Author-email: tim.schendzielorz@googlemail.com
|
|
9
|
+
Requires-Python: >=3.12, <4.0
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
14
|
+
Requires-Dist: argon2-cffi (>=23.0.0,<24.0.0)
|
|
15
|
+
Requires-Dist: google-api-python-client (>=2.0.0,<3.0.0)
|
|
16
|
+
Requires-Dist: google-auth (>=2.0.0,<3.0.0)
|
|
17
|
+
Requires-Dist: google-cloud-logging (>=3.0.0,<4.0.0)
|
|
18
|
+
Requires-Dist: google-cloud-secret-manager (>=2.0.0,<3.0.0)
|
|
19
|
+
Requires-Dist: google-cloud-storage (>=2.0.0,<3.0.0)
|
|
20
|
+
Requires-Dist: httpx[http2] (>=0.27.0,<1.0.0)
|
|
21
|
+
Requires-Dist: langchain (>=0.3.0,<1.0.0)
|
|
22
|
+
Requires-Dist: langchain-anthropic (>=0.2.0,<1.0.0)
|
|
23
|
+
Requires-Dist: langchain-core (>=0.3.0,<1.0.0)
|
|
24
|
+
Requires-Dist: langchain-google-genai (>=2.0.0,<3.0.0)
|
|
25
|
+
Requires-Dist: langchain-openai (>=0.2.0,<1.0.0)
|
|
26
|
+
Requires-Dist: langfuse (>=3.6.1,<4.0.0)
|
|
27
|
+
Requires-Dist: pydantic (>=2.0.0,<3.0.0)
|
|
28
|
+
Requires-Dist: rich (>=13.0.0,<14.0.0)
|
|
29
|
+
Description-Content-Type: text/markdown
|
|
30
|
+
|
|
1
31
|
# c1gpy
|
|
32
|
+
[](https://github.com/Career1Group/c1gpy/actions/workflows/publish_package.yaml)
|
|
2
33
|
|
|
3
34
|
C1G company Python module for utilities and project setup.
|
|
4
35
|
|
|
@@ -13,7 +44,7 @@ pip install c1groupy
|
|
|
13
44
|
Or with uv:
|
|
14
45
|
|
|
15
46
|
```bash
|
|
16
|
-
uv
|
|
47
|
+
uv add c1groupy
|
|
17
48
|
```
|
|
18
49
|
|
|
19
50
|
## Features
|
|
@@ -574,6 +605,116 @@ client.delete_content("document_id", start_index=1, end_index=10)
|
|
|
574
605
|
|
|
575
606
|
---
|
|
576
607
|
|
|
608
|
+
## 6. LLM Handler
|
|
609
|
+
|
|
610
|
+
Unified interface for calling LLMs (OpenAI, Anthropic, Google Gemini) with:
|
|
611
|
+
|
|
612
|
+
- **Langfuse** prompt management (prompt fetch + compilation)
|
|
613
|
+
- **Tracing** via Langfuse callback handler
|
|
614
|
+
- **Sync + async** calls
|
|
615
|
+
- **Batch** calls
|
|
616
|
+
- **Streaming** responses
|
|
617
|
+
|
|
618
|
+
### Basic usage
|
|
619
|
+
|
|
620
|
+
```python
|
|
621
|
+
from c1gpy.llm_handler import LangfuseConfig, LLMHandler
|
|
622
|
+
|
|
623
|
+
langfuse_config = LangfuseConfig(
|
|
624
|
+
secret_key="sk-lf-...",
|
|
625
|
+
public_key="pk-lf-...",
|
|
626
|
+
host="https://cloud.langfuse.com",
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
handler = LLMHandler(
|
|
630
|
+
model_name="gpt-4o",
|
|
631
|
+
api_key="sk-...",
|
|
632
|
+
langfuse_prompt_name="my-prompt",
|
|
633
|
+
langfuse_config=langfuse_config,
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
result = handler.call_model(keyword="test")
|
|
637
|
+
```
|
|
638
|
+
|
|
639
|
+
### Reusing a single Langfuse client (recommended)
|
|
640
|
+
|
|
641
|
+
If you create multiple `LLMHandler` instances in the same service, reuse a single
|
|
642
|
+
Langfuse client:
|
|
643
|
+
|
|
644
|
+
```python
|
|
645
|
+
from c1gpy.llm_handler import LangfuseConfig, LLMHandler, get_langfuse_client
|
|
646
|
+
|
|
647
|
+
langfuse_config = LangfuseConfig(
|
|
648
|
+
secret_key="sk-lf-...",
|
|
649
|
+
public_key="pk-lf-...",
|
|
650
|
+
host="https://cloud.langfuse.com",
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
langfuse = get_langfuse_client(langfuse_config)
|
|
654
|
+
|
|
655
|
+
handler_1 = LLMHandler(
|
|
656
|
+
model_name="gpt-4o",
|
|
657
|
+
api_key="sk-...",
|
|
658
|
+
langfuse_prompt_name="my-prompt",
|
|
659
|
+
langfuse_config=langfuse_config,
|
|
660
|
+
langfuse=langfuse,
|
|
661
|
+
)
|
|
662
|
+
|
|
663
|
+
handler_2 = LLMHandler(
|
|
664
|
+
model_name="gpt-4o-mini",
|
|
665
|
+
api_key="sk-...",
|
|
666
|
+
langfuse_prompt_name="my-other-prompt",
|
|
667
|
+
langfuse_config=langfuse_config,
|
|
668
|
+
langfuse=langfuse,
|
|
669
|
+
)
|
|
670
|
+
```
|
|
671
|
+
|
|
672
|
+
### Async
|
|
673
|
+
|
|
674
|
+
```python
|
|
675
|
+
result = await handler.acall_model(keyword="test")
|
|
676
|
+
```
|
|
677
|
+
|
|
678
|
+
### Batch
|
|
679
|
+
|
|
680
|
+
```python
|
|
681
|
+
results = handler.batch_invoke(
|
|
682
|
+
[{"keyword": "a"}, {"keyword": "b"}, {"keyword": "c"}]
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
results_async = await handler.abatch_invoke(
|
|
686
|
+
[{"keyword": "a"}, {"keyword": "b"}, {"keyword": "c"}]
|
|
687
|
+
)
|
|
688
|
+
```
|
|
689
|
+
|
|
690
|
+
### Streaming
|
|
691
|
+
|
|
692
|
+
```python
|
|
693
|
+
for chunk in handler.call_model_stream(keyword="test"):
|
|
694
|
+
print(chunk, end="")
|
|
695
|
+
|
|
696
|
+
async for chunk in handler.acall_model_stream(keyword="test"):
|
|
697
|
+
print(chunk, end="")
|
|
698
|
+
```
|
|
699
|
+
|
|
700
|
+
### Provider selection
|
|
701
|
+
|
|
702
|
+
Providers are auto-detected from `model_name` prefixes (e.g. `gpt-*`, `claude-*`, `gemini-*`).
|
|
703
|
+
You can also explicitly override the provider:
|
|
704
|
+
|
|
705
|
+
```python
|
|
706
|
+
from c1gpy.llm_handler import ModelProvider, LLMHandler
|
|
707
|
+
|
|
708
|
+
handler = LLMHandler(
|
|
709
|
+
model_name="gpt-4o",
|
|
710
|
+
api_key="sk-...",
|
|
711
|
+
langfuse_prompt_name="my-prompt",
|
|
712
|
+
langfuse_config=langfuse_config,
|
|
713
|
+
provider=ModelProvider.OPENAI,
|
|
714
|
+
)
|
|
715
|
+
```
|
|
716
|
+
|
|
577
717
|
## License
|
|
578
718
|
|
|
579
719
|
MIT
|
|
720
|
+
|
|
@@ -1,28 +1,5 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: c1groupy
|
|
3
|
-
Version: 0.3.0
|
|
4
|
-
Summary: C1G company Python module for utilities and project setup
|
|
5
|
-
License-Expression: MIT
|
|
6
|
-
License-File: LICENCE
|
|
7
|
-
Author: Tim M Schendzielorz
|
|
8
|
-
Author-email: tim.schendzielorz@googlemail.com
|
|
9
|
-
Requires-Python: >=3.12, <4.0
|
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.13
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.14
|
|
14
|
-
Requires-Dist: argon2-cffi (>=23.0.0,<24.0.0)
|
|
15
|
-
Requires-Dist: google-api-python-client (>=2.0.0,<3.0.0)
|
|
16
|
-
Requires-Dist: google-auth (>=2.0.0,<3.0.0)
|
|
17
|
-
Requires-Dist: google-cloud-logging (>=3.0.0,<4.0.0)
|
|
18
|
-
Requires-Dist: google-cloud-secret-manager (>=2.0.0,<3.0.0)
|
|
19
|
-
Requires-Dist: google-cloud-storage (>=2.0.0,<3.0.0)
|
|
20
|
-
Requires-Dist: httpx[http2] (>=0.27.0,<1.0.0)
|
|
21
|
-
Requires-Dist: langfuse (>=3.6.1,<4.0.0)
|
|
22
|
-
Requires-Dist: rich (>=13.0.0,<14.0.0)
|
|
23
|
-
Description-Content-Type: text/markdown
|
|
24
|
-
|
|
25
1
|
# c1gpy
|
|
2
|
+
[](https://github.com/Career1Group/c1gpy/actions/workflows/publish_package.yaml)
|
|
26
3
|
|
|
27
4
|
C1G company Python module for utilities and project setup.
|
|
28
5
|
|
|
@@ -37,7 +14,7 @@ pip install c1groupy
|
|
|
37
14
|
Or with uv:
|
|
38
15
|
|
|
39
16
|
```bash
|
|
40
|
-
uv
|
|
17
|
+
uv add c1groupy
|
|
41
18
|
```
|
|
42
19
|
|
|
43
20
|
## Features
|
|
@@ -598,7 +575,115 @@ client.delete_content("document_id", start_index=1, end_index=10)
|
|
|
598
575
|
|
|
599
576
|
---
|
|
600
577
|
|
|
578
|
+
## 6. LLM Handler
|
|
579
|
+
|
|
580
|
+
Unified interface for calling LLMs (OpenAI, Anthropic, Google Gemini) with:
|
|
581
|
+
|
|
582
|
+
- **Langfuse** prompt management (prompt fetch + compilation)
|
|
583
|
+
- **Tracing** via Langfuse callback handler
|
|
584
|
+
- **Sync + async** calls
|
|
585
|
+
- **Batch** calls
|
|
586
|
+
- **Streaming** responses
|
|
587
|
+
|
|
588
|
+
### Basic usage
|
|
589
|
+
|
|
590
|
+
```python
|
|
591
|
+
from c1gpy.llm_handler import LangfuseConfig, LLMHandler
|
|
592
|
+
|
|
593
|
+
langfuse_config = LangfuseConfig(
|
|
594
|
+
secret_key="sk-lf-...",
|
|
595
|
+
public_key="pk-lf-...",
|
|
596
|
+
host="https://cloud.langfuse.com",
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
handler = LLMHandler(
|
|
600
|
+
model_name="gpt-4o",
|
|
601
|
+
api_key="sk-...",
|
|
602
|
+
langfuse_prompt_name="my-prompt",
|
|
603
|
+
langfuse_config=langfuse_config,
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
result = handler.call_model(keyword="test")
|
|
607
|
+
```
|
|
608
|
+
|
|
609
|
+
### Reusing a single Langfuse client (recommended)
|
|
610
|
+
|
|
611
|
+
If you create multiple `LLMHandler` instances in the same service, reuse a single
|
|
612
|
+
Langfuse client:
|
|
613
|
+
|
|
614
|
+
```python
|
|
615
|
+
from c1gpy.llm_handler import LangfuseConfig, LLMHandler, get_langfuse_client
|
|
616
|
+
|
|
617
|
+
langfuse_config = LangfuseConfig(
|
|
618
|
+
secret_key="sk-lf-...",
|
|
619
|
+
public_key="pk-lf-...",
|
|
620
|
+
host="https://cloud.langfuse.com",
|
|
621
|
+
)
|
|
622
|
+
|
|
623
|
+
langfuse = get_langfuse_client(langfuse_config)
|
|
624
|
+
|
|
625
|
+
handler_1 = LLMHandler(
|
|
626
|
+
model_name="gpt-4o",
|
|
627
|
+
api_key="sk-...",
|
|
628
|
+
langfuse_prompt_name="my-prompt",
|
|
629
|
+
langfuse_config=langfuse_config,
|
|
630
|
+
langfuse=langfuse,
|
|
631
|
+
)
|
|
632
|
+
|
|
633
|
+
handler_2 = LLMHandler(
|
|
634
|
+
model_name="gpt-4o-mini",
|
|
635
|
+
api_key="sk-...",
|
|
636
|
+
langfuse_prompt_name="my-other-prompt",
|
|
637
|
+
langfuse_config=langfuse_config,
|
|
638
|
+
langfuse=langfuse,
|
|
639
|
+
)
|
|
640
|
+
```
|
|
641
|
+
|
|
642
|
+
### Async
|
|
643
|
+
|
|
644
|
+
```python
|
|
645
|
+
result = await handler.acall_model(keyword="test")
|
|
646
|
+
```
|
|
647
|
+
|
|
648
|
+
### Batch
|
|
649
|
+
|
|
650
|
+
```python
|
|
651
|
+
results = handler.batch_invoke(
|
|
652
|
+
[{"keyword": "a"}, {"keyword": "b"}, {"keyword": "c"}]
|
|
653
|
+
)
|
|
654
|
+
|
|
655
|
+
results_async = await handler.abatch_invoke(
|
|
656
|
+
[{"keyword": "a"}, {"keyword": "b"}, {"keyword": "c"}]
|
|
657
|
+
)
|
|
658
|
+
```
|
|
659
|
+
|
|
660
|
+
### Streaming
|
|
661
|
+
|
|
662
|
+
```python
|
|
663
|
+
for chunk in handler.call_model_stream(keyword="test"):
|
|
664
|
+
print(chunk, end="")
|
|
665
|
+
|
|
666
|
+
async for chunk in handler.acall_model_stream(keyword="test"):
|
|
667
|
+
print(chunk, end="")
|
|
668
|
+
```
|
|
669
|
+
|
|
670
|
+
### Provider selection
|
|
671
|
+
|
|
672
|
+
Providers are auto-detected from `model_name` prefixes (e.g. `gpt-*`, `claude-*`, `gemini-*`).
|
|
673
|
+
You can also explicitly override the provider:
|
|
674
|
+
|
|
675
|
+
```python
|
|
676
|
+
from c1gpy.llm_handler import ModelProvider, LLMHandler
|
|
677
|
+
|
|
678
|
+
handler = LLMHandler(
|
|
679
|
+
model_name="gpt-4o",
|
|
680
|
+
api_key="sk-...",
|
|
681
|
+
langfuse_prompt_name="my-prompt",
|
|
682
|
+
langfuse_config=langfuse_config,
|
|
683
|
+
provider=ModelProvider.OPENAI,
|
|
684
|
+
)
|
|
685
|
+
```
|
|
686
|
+
|
|
601
687
|
## License
|
|
602
688
|
|
|
603
689
|
MIT
|
|
604
|
-
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "c1groupy"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.4.2"
|
|
4
4
|
description = "C1G company Python module for utilities and project setup"
|
|
5
5
|
authors = [
|
|
6
6
|
{name = "Tim M Schendzielorz",email = "tim.schendzielorz@googlemail.com"},
|
|
@@ -20,7 +20,13 @@ dependencies = [
|
|
|
20
20
|
"google-cloud-secret-manager (>=2.0.0,<3.0.0)",
|
|
21
21
|
"google-cloud-storage (>=2.0.0,<3.0.0)",
|
|
22
22
|
"google-api-python-client (>=2.0.0,<3.0.0)",
|
|
23
|
-
"google-auth (>=2.0.0,<3.0.0)"
|
|
23
|
+
"google-auth (>=2.0.0,<3.0.0)",
|
|
24
|
+
"langchain (>=0.3.0,<1.0.0)",
|
|
25
|
+
"langchain-core (>=0.3.0,<1.0.0)",
|
|
26
|
+
"langchain-openai (>=0.2.0,<1.0.0)",
|
|
27
|
+
"langchain-anthropic (>=0.2.0,<1.0.0)",
|
|
28
|
+
"langchain-google-genai (>=2.0.0,<3.0.0)",
|
|
29
|
+
"pydantic (>=2.0.0,<3.0.0)"
|
|
24
30
|
]
|
|
25
31
|
|
|
26
32
|
[project.scripts]
|
|
@@ -34,3 +40,9 @@ packages = [{include = "c1gpy", from = "src"}]
|
|
|
34
40
|
[build-system]
|
|
35
41
|
requires = ["poetry-core>=2.0.0,<3.0.0"]
|
|
36
42
|
build-backend = "poetry.core.masonry.api"
|
|
43
|
+
|
|
44
|
+
[dependency-groups]
|
|
45
|
+
dev = [
|
|
46
|
+
"pytest>=9.0.2",
|
|
47
|
+
"pytest-asyncio>=1.3.0",
|
|
48
|
+
]
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Handler sub-package for unified LLM interactions.
|
|
3
|
+
|
|
4
|
+
This package provides a Pythonic interface for calling various LLM providers
|
|
5
|
+
(OpenAI, Anthropic, Google) with integrated Langfuse prompt management and tracing.
|
|
6
|
+
|
|
7
|
+
Key Features:
|
|
8
|
+
- Unified API across multiple providers (OpenAI, Anthropic, Google)
|
|
9
|
+
- Langfuse integration for prompt management and tracing
|
|
10
|
+
- Structured output support via Pydantic models
|
|
11
|
+
- JSON mode for raw dictionary responses
|
|
12
|
+
- Synchronous and asynchronous operations
|
|
13
|
+
- Streaming support
|
|
14
|
+
|
|
15
|
+
Example:
|
|
16
|
+
>>> from c1gpy.llm_handler import LLMHandler, LangfuseConfig, ModelProvider
|
|
17
|
+
>>> from pydantic import BaseModel
|
|
18
|
+
>>>
|
|
19
|
+
>>> class ProductTitles(BaseModel):
|
|
20
|
+
... titles: list[str]
|
|
21
|
+
...
|
|
22
|
+
>>> handler = LLMHandler(
|
|
23
|
+
... model_name="gpt-4o",
|
|
24
|
+
... api_key="sk-...",
|
|
25
|
+
... langfuse_prompt_name="product-title-generator",
|
|
26
|
+
... langfuse_config=LangfuseConfig(
|
|
27
|
+
... secret_key="sk-lf-...",
|
|
28
|
+
... public_key="pk-lf-...",
|
|
29
|
+
... ),
|
|
30
|
+
... response_model=ProductTitles,
|
|
31
|
+
... )
|
|
32
|
+
>>>
|
|
33
|
+
>>> # Sync call
|
|
34
|
+
>>> result = handler.call_model(keywords=["laptop", "gaming"], n_titles=5)
|
|
35
|
+
>>> print(result.titles)
|
|
36
|
+
>>>
|
|
37
|
+
>>> # Async call
|
|
38
|
+
>>> result = await handler.acall_model(keywords=["laptop"], n_titles=3)
|
|
39
|
+
>>>
|
|
40
|
+
>>> # Streaming
|
|
41
|
+
>>> for chunk in handler.call_model_stream(keywords=["laptop"]):
|
|
42
|
+
... print(chunk, end="")
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
from c1gpy.llm_handler.config import LangfuseConfig
|
|
46
|
+
from c1gpy.llm_handler.exceptions import (
|
|
47
|
+
LLMHandlerError,
|
|
48
|
+
PromptCompilationError,
|
|
49
|
+
PromptNotFoundError,
|
|
50
|
+
ProviderError,
|
|
51
|
+
ResponseParsingError,
|
|
52
|
+
)
|
|
53
|
+
from c1gpy.llm_handler.langfuse_client import get_langfuse_client
|
|
54
|
+
from c1gpy.llm_handler.handler import LLMHandler
|
|
55
|
+
from c1gpy.llm_handler.provider_registry import (
|
|
56
|
+
ModelProvider,
|
|
57
|
+
create_llm,
|
|
58
|
+
detect_provider,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
__all__ = [
|
|
62
|
+
# Main handler
|
|
63
|
+
"LLMHandler",
|
|
64
|
+
# Configuration
|
|
65
|
+
"LangfuseConfig",
|
|
66
|
+
# Langfuse
|
|
67
|
+
"get_langfuse_client",
|
|
68
|
+
# Providers
|
|
69
|
+
"ModelProvider",
|
|
70
|
+
"create_llm",
|
|
71
|
+
"detect_provider",
|
|
72
|
+
# Exceptions
|
|
73
|
+
"LLMHandlerError",
|
|
74
|
+
"ProviderError",
|
|
75
|
+
"PromptCompilationError",
|
|
76
|
+
"PromptNotFoundError",
|
|
77
|
+
"ResponseParsingError",
|
|
78
|
+
]
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration classes for the LLM Handler module.
|
|
3
|
+
|
|
4
|
+
This module provides dataclasses for configuring the LLM handler,
|
|
5
|
+
including Langfuse integration settings.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass(frozen=True)
|
|
12
|
+
class LangfuseConfig:
|
|
13
|
+
"""Configuration for Langfuse integration.
|
|
14
|
+
|
|
15
|
+
This immutable dataclass holds the credentials and settings needed
|
|
16
|
+
to connect to Langfuse for prompt management and tracing.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
secret_key: Langfuse secret key (starts with 'sk-lf-').
|
|
20
|
+
public_key: Langfuse public key (starts with 'pk-lf-').
|
|
21
|
+
host: Langfuse API host URL. Defaults to cloud.langfuse.com.
|
|
22
|
+
|
|
23
|
+
Example:
|
|
24
|
+
>>> config = LangfuseConfig(
|
|
25
|
+
... secret_key="sk-lf-...",
|
|
26
|
+
... public_key="pk-lf-...",
|
|
27
|
+
... )
|
|
28
|
+
>>> config.host
|
|
29
|
+
'https://cloud.langfuse.com'
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
secret_key: str
|
|
33
|
+
public_key: str
|
|
34
|
+
host: str = field(default="https://cloud.langfuse.com")
|
|
35
|
+
|
|
36
|
+
def __post_init__(self) -> None:
|
|
37
|
+
"""Validate configuration after initialization."""
|
|
38
|
+
if not self.secret_key:
|
|
39
|
+
raise ValueError("secret_key cannot be empty")
|
|
40
|
+
if not self.public_key:
|
|
41
|
+
raise ValueError("public_key cannot be empty")
|
|
42
|
+
if not self.host:
|
|
43
|
+
raise ValueError("host cannot be empty")
|
|
44
|
+
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Custom exceptions for the LLM Handler module.
|
|
3
|
+
|
|
4
|
+
This module defines exceptions for error handling across the LLM handler,
|
|
5
|
+
including provider-related errors, prompt compilation errors, and general
|
|
6
|
+
handler errors.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LLMHandlerError(Exception):
|
|
11
|
+
"""Base exception for all LLM Handler errors.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
message: Human-readable error description.
|
|
15
|
+
|
|
16
|
+
Example:
|
|
17
|
+
>>> raise LLMHandlerError("Failed to process LLM response")
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, message: str) -> None:
|
|
21
|
+
self.message = message
|
|
22
|
+
super().__init__(self.message)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ProviderError(LLMHandlerError):
|
|
26
|
+
"""Raised when there is an error related to the model provider.
|
|
27
|
+
|
|
28
|
+
This includes errors such as:
|
|
29
|
+
- Unknown or unsupported model provider
|
|
30
|
+
- Failed provider initialization
|
|
31
|
+
- Invalid API key for provider
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
message: Human-readable error description.
|
|
35
|
+
provider: Optional provider name that caused the error.
|
|
36
|
+
|
|
37
|
+
Example:
|
|
38
|
+
>>> raise ProviderError("Unknown model: xyz-model", provider="unknown")
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(self, message: str, provider: str | None = None) -> None:
|
|
42
|
+
self.provider = provider
|
|
43
|
+
super().__init__(message)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class PromptCompilationError(LLMHandlerError):
|
|
47
|
+
"""Raised when prompt compilation fails.
|
|
48
|
+
|
|
49
|
+
This occurs when:
|
|
50
|
+
- Required template variables are missing
|
|
51
|
+
- Template syntax is invalid
|
|
52
|
+
- Langfuse prompt retrieval fails
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
message: Human-readable error description.
|
|
56
|
+
prompt_name: Optional name of the prompt that failed.
|
|
57
|
+
missing_vars: Optional list of missing template variables.
|
|
58
|
+
|
|
59
|
+
Example:
|
|
60
|
+
>>> raise PromptCompilationError(
|
|
61
|
+
... "Missing required variable",
|
|
62
|
+
... prompt_name="product-generator",
|
|
63
|
+
... missing_vars=["keywords"]
|
|
64
|
+
... )
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(
|
|
68
|
+
self,
|
|
69
|
+
message: str,
|
|
70
|
+
prompt_name: str | None = None,
|
|
71
|
+
missing_vars: list[str] | None = None,
|
|
72
|
+
) -> None:
|
|
73
|
+
self.prompt_name = prompt_name
|
|
74
|
+
self.missing_vars = missing_vars or []
|
|
75
|
+
super().__init__(message)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class ResponseParsingError(LLMHandlerError):
|
|
79
|
+
"""Raised when the LLM response cannot be parsed.
|
|
80
|
+
|
|
81
|
+
This occurs when:
|
|
82
|
+
- Response is not valid JSON when json_mode is enabled
|
|
83
|
+
- Response does not match the expected Pydantic model schema
|
|
84
|
+
- Response content is empty or malformed
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
message: Human-readable error description.
|
|
88
|
+
raw_response: Optional raw response content that failed to parse.
|
|
89
|
+
|
|
90
|
+
Example:
|
|
91
|
+
>>> raise ResponseParsingError(
|
|
92
|
+
... "Invalid JSON in response",
|
|
93
|
+
... raw_response="not valid json"
|
|
94
|
+
... )
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(self, message: str, raw_response: str | None = None) -> None:
|
|
98
|
+
self.raw_response = raw_response
|
|
99
|
+
super().__init__(message)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class PromptNotFoundError(LLMHandlerError):
|
|
103
|
+
"""Raised when a Langfuse prompt cannot be found.
|
|
104
|
+
|
|
105
|
+
This error typically occurs due to one of the following reasons:
|
|
106
|
+
- The prompt name does not exist in the Langfuse project
|
|
107
|
+
- The specified label (e.g., 'production') does not exist for the prompt
|
|
108
|
+
- The API keys (secret_key, public_key) belong to a different Langfuse project
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
prompt_name: Name of the prompt that was not found.
|
|
112
|
+
label: Label that was searched for.
|
|
113
|
+
original_error: Optional original exception from Langfuse.
|
|
114
|
+
|
|
115
|
+
Example:
|
|
116
|
+
>>> raise PromptNotFoundError(
|
|
117
|
+
... prompt_name="my-prompt",
|
|
118
|
+
... label="production",
|
|
119
|
+
... )
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
def __init__(
|
|
123
|
+
self,
|
|
124
|
+
prompt_name: str,
|
|
125
|
+
label: str,
|
|
126
|
+
original_error: Exception | None = None,
|
|
127
|
+
) -> None:
|
|
128
|
+
self.prompt_name = prompt_name
|
|
129
|
+
self.label = label
|
|
130
|
+
self.original_error = original_error
|
|
131
|
+
|
|
132
|
+
message = (
|
|
133
|
+
f"Langfuse prompt not found: '{prompt_name}' with label '{label}'.\n\n"
|
|
134
|
+
f"Please verify:\n"
|
|
135
|
+
f" 1. The prompt name '{prompt_name}' exists in your Langfuse project\n"
|
|
136
|
+
f" 2. The prompt has a version with label '{label}'\n"
|
|
137
|
+
f" 3. Your API keys (secret_key, public_key) are from the SAME Langfuse "
|
|
138
|
+
f"project where the prompt was created\n\n"
|
|
139
|
+
f"Hint: Prompts are project-scoped in Langfuse. If you created the prompt "
|
|
140
|
+
f"in a different project, you need to use that project's API keys."
|
|
141
|
+
)
|
|
142
|
+
super().__init__(message)
|