amd-gaia 0.15.0__py3-none-any.whl → 0.15.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/METADATA +222 -223
- amd_gaia-0.15.2.dist-info/RECORD +182 -0
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/WHEEL +1 -1
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/entry_points.txt +1 -0
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/licenses/LICENSE.md +20 -20
- gaia/__init__.py +29 -29
- gaia/agents/__init__.py +19 -19
- gaia/agents/base/__init__.py +9 -9
- gaia/agents/base/agent.py +2132 -2177
- gaia/agents/base/api_agent.py +119 -120
- gaia/agents/base/console.py +1967 -1841
- gaia/agents/base/errors.py +237 -237
- gaia/agents/base/mcp_agent.py +86 -86
- gaia/agents/base/tools.py +88 -83
- gaia/agents/blender/__init__.py +7 -0
- gaia/agents/blender/agent.py +553 -556
- gaia/agents/blender/agent_simple.py +133 -135
- gaia/agents/blender/app.py +211 -211
- gaia/agents/blender/app_simple.py +41 -41
- gaia/agents/blender/core/__init__.py +16 -16
- gaia/agents/blender/core/materials.py +506 -506
- gaia/agents/blender/core/objects.py +316 -316
- gaia/agents/blender/core/rendering.py +225 -225
- gaia/agents/blender/core/scene.py +220 -220
- gaia/agents/blender/core/view.py +146 -146
- gaia/agents/chat/__init__.py +9 -9
- gaia/agents/chat/agent.py +809 -835
- gaia/agents/chat/app.py +1065 -1058
- gaia/agents/chat/session.py +508 -508
- gaia/agents/chat/tools/__init__.py +15 -15
- gaia/agents/chat/tools/file_tools.py +96 -96
- gaia/agents/chat/tools/rag_tools.py +1744 -1729
- gaia/agents/chat/tools/shell_tools.py +437 -436
- gaia/agents/code/__init__.py +7 -7
- gaia/agents/code/agent.py +549 -549
- gaia/agents/code/cli.py +377 -0
- gaia/agents/code/models.py +135 -135
- gaia/agents/code/orchestration/__init__.py +24 -24
- gaia/agents/code/orchestration/checklist_executor.py +1763 -1763
- gaia/agents/code/orchestration/checklist_generator.py +713 -713
- gaia/agents/code/orchestration/factories/__init__.py +9 -9
- gaia/agents/code/orchestration/factories/base.py +63 -63
- gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -118
- gaia/agents/code/orchestration/factories/python_factory.py +106 -106
- gaia/agents/code/orchestration/orchestrator.py +841 -841
- gaia/agents/code/orchestration/project_analyzer.py +391 -391
- gaia/agents/code/orchestration/steps/__init__.py +67 -67
- gaia/agents/code/orchestration/steps/base.py +188 -188
- gaia/agents/code/orchestration/steps/error_handler.py +314 -314
- gaia/agents/code/orchestration/steps/nextjs.py +828 -828
- gaia/agents/code/orchestration/steps/python.py +307 -307
- gaia/agents/code/orchestration/template_catalog.py +469 -469
- gaia/agents/code/orchestration/workflows/__init__.py +14 -14
- gaia/agents/code/orchestration/workflows/base.py +80 -80
- gaia/agents/code/orchestration/workflows/nextjs.py +186 -186
- gaia/agents/code/orchestration/workflows/python.py +94 -94
- gaia/agents/code/prompts/__init__.py +11 -11
- gaia/agents/code/prompts/base_prompt.py +77 -77
- gaia/agents/code/prompts/code_patterns.py +2034 -2036
- gaia/agents/code/prompts/nextjs_prompt.py +40 -40
- gaia/agents/code/prompts/python_prompt.py +109 -109
- gaia/agents/code/schema_inference.py +365 -365
- gaia/agents/code/system_prompt.py +41 -41
- gaia/agents/code/tools/__init__.py +42 -42
- gaia/agents/code/tools/cli_tools.py +1138 -1138
- gaia/agents/code/tools/code_formatting.py +319 -319
- gaia/agents/code/tools/code_tools.py +769 -769
- gaia/agents/code/tools/error_fixing.py +1347 -1347
- gaia/agents/code/tools/external_tools.py +180 -180
- gaia/agents/code/tools/file_io.py +845 -845
- gaia/agents/code/tools/prisma_tools.py +190 -190
- gaia/agents/code/tools/project_management.py +1016 -1016
- gaia/agents/code/tools/testing.py +321 -321
- gaia/agents/code/tools/typescript_tools.py +122 -122
- gaia/agents/code/tools/validation_parsing.py +461 -461
- gaia/agents/code/tools/validation_tools.py +806 -806
- gaia/agents/code/tools/web_dev_tools.py +1758 -1758
- gaia/agents/code/validators/__init__.py +16 -16
- gaia/agents/code/validators/antipattern_checker.py +241 -241
- gaia/agents/code/validators/ast_analyzer.py +197 -197
- gaia/agents/code/validators/requirements_validator.py +145 -145
- gaia/agents/code/validators/syntax_validator.py +171 -171
- gaia/agents/docker/__init__.py +7 -7
- gaia/agents/docker/agent.py +643 -642
- gaia/agents/emr/__init__.py +8 -8
- gaia/agents/emr/agent.py +1504 -1506
- gaia/agents/emr/cli.py +1322 -1322
- gaia/agents/emr/constants.py +475 -475
- gaia/agents/emr/dashboard/__init__.py +4 -4
- gaia/agents/emr/dashboard/server.py +1972 -1974
- gaia/agents/jira/__init__.py +11 -11
- gaia/agents/jira/agent.py +894 -894
- gaia/agents/jira/jql_templates.py +299 -299
- gaia/agents/routing/__init__.py +7 -7
- gaia/agents/routing/agent.py +567 -570
- gaia/agents/routing/system_prompt.py +75 -75
- gaia/agents/summarize/__init__.py +11 -0
- gaia/agents/summarize/agent.py +885 -0
- gaia/agents/summarize/prompts.py +129 -0
- gaia/api/__init__.py +23 -23
- gaia/api/agent_registry.py +238 -238
- gaia/api/app.py +305 -305
- gaia/api/openai_server.py +575 -575
- gaia/api/schemas.py +186 -186
- gaia/api/sse_handler.py +373 -373
- gaia/apps/__init__.py +4 -4
- gaia/apps/llm/__init__.py +6 -6
- gaia/apps/llm/app.py +184 -169
- gaia/apps/summarize/app.py +116 -633
- gaia/apps/summarize/html_viewer.py +133 -133
- gaia/apps/summarize/pdf_formatter.py +284 -284
- gaia/audio/__init__.py +2 -2
- gaia/audio/audio_client.py +439 -439
- gaia/audio/audio_recorder.py +269 -269
- gaia/audio/kokoro_tts.py +599 -599
- gaia/audio/whisper_asr.py +432 -432
- gaia/chat/__init__.py +16 -16
- gaia/chat/app.py +428 -430
- gaia/chat/prompts.py +522 -522
- gaia/chat/sdk.py +1228 -1225
- gaia/cli.py +5659 -5632
- gaia/database/__init__.py +10 -10
- gaia/database/agent.py +176 -176
- gaia/database/mixin.py +290 -290
- gaia/database/testing.py +64 -64
- gaia/eval/batch_experiment.py +2332 -2332
- gaia/eval/claude.py +542 -542
- gaia/eval/config.py +37 -37
- gaia/eval/email_generator.py +512 -512
- gaia/eval/eval.py +3179 -3179
- gaia/eval/groundtruth.py +1130 -1130
- gaia/eval/transcript_generator.py +582 -582
- gaia/eval/webapp/README.md +167 -167
- gaia/eval/webapp/package-lock.json +875 -875
- gaia/eval/webapp/package.json +20 -20
- gaia/eval/webapp/public/app.js +3402 -3402
- gaia/eval/webapp/public/index.html +87 -87
- gaia/eval/webapp/public/styles.css +3661 -3661
- gaia/eval/webapp/server.js +415 -415
- gaia/eval/webapp/test-setup.js +72 -72
- gaia/installer/__init__.py +23 -0
- gaia/installer/init_command.py +1275 -0
- gaia/installer/lemonade_installer.py +619 -0
- gaia/llm/__init__.py +10 -2
- gaia/llm/base_client.py +60 -0
- gaia/llm/exceptions.py +12 -0
- gaia/llm/factory.py +70 -0
- gaia/llm/lemonade_client.py +3421 -3221
- gaia/llm/lemonade_manager.py +294 -294
- gaia/llm/providers/__init__.py +9 -0
- gaia/llm/providers/claude.py +108 -0
- gaia/llm/providers/lemonade.py +118 -0
- gaia/llm/providers/openai_provider.py +79 -0
- gaia/llm/vlm_client.py +382 -382
- gaia/logger.py +189 -189
- gaia/mcp/agent_mcp_server.py +245 -245
- gaia/mcp/blender_mcp_client.py +138 -138
- gaia/mcp/blender_mcp_server.py +648 -648
- gaia/mcp/context7_cache.py +332 -332
- gaia/mcp/external_services.py +518 -518
- gaia/mcp/mcp_bridge.py +811 -550
- gaia/mcp/servers/__init__.py +6 -6
- gaia/mcp/servers/docker_mcp.py +83 -83
- gaia/perf_analysis.py +361 -0
- gaia/rag/__init__.py +10 -10
- gaia/rag/app.py +293 -293
- gaia/rag/demo.py +304 -304
- gaia/rag/pdf_utils.py +235 -235
- gaia/rag/sdk.py +2194 -2194
- gaia/security.py +183 -163
- gaia/talk/app.py +287 -289
- gaia/talk/sdk.py +538 -538
- gaia/testing/__init__.py +87 -87
- gaia/testing/assertions.py +330 -330
- gaia/testing/fixtures.py +333 -333
- gaia/testing/mocks.py +493 -493
- gaia/util.py +46 -46
- gaia/utils/__init__.py +33 -33
- gaia/utils/file_watcher.py +675 -675
- gaia/utils/parsing.py +223 -223
- gaia/version.py +100 -100
- amd_gaia-0.15.0.dist-info/RECORD +0 -168
- gaia/agents/code/app.py +0 -266
- gaia/llm/llm_client.py +0 -723
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
# Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
"""Claude provider - no embeddings support."""
|
|
4
|
+
|
|
5
|
+
from typing import Iterator, Optional, Union
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
import anthropic
|
|
9
|
+
except ImportError:
|
|
10
|
+
anthropic = None # type: ignore
|
|
11
|
+
|
|
12
|
+
from ..base_client import LLMClient
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ClaudeProvider(LLMClient):
|
|
16
|
+
"""Claude (Anthropic) provider."""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
api_key: Optional[str] = None,
|
|
21
|
+
model: str = "claude-3-5-sonnet-20241022",
|
|
22
|
+
system_prompt: Optional[str] = None,
|
|
23
|
+
**_kwargs,
|
|
24
|
+
):
|
|
25
|
+
if anthropic is None:
|
|
26
|
+
raise ImportError(
|
|
27
|
+
"anthropic package is required for ClaudeProvider. "
|
|
28
|
+
"Install it with: pip install anthropic"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
self._client = anthropic.Anthropic(api_key=api_key)
|
|
32
|
+
self._model = model
|
|
33
|
+
self._system_prompt = system_prompt
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def provider_name(self) -> str:
|
|
37
|
+
return "Claude"
|
|
38
|
+
|
|
39
|
+
def generate(
|
|
40
|
+
self,
|
|
41
|
+
prompt: str,
|
|
42
|
+
model: str | None = None,
|
|
43
|
+
stream: bool = False,
|
|
44
|
+
**kwargs,
|
|
45
|
+
) -> Union[str, Iterator[str]]:
|
|
46
|
+
return self.chat(
|
|
47
|
+
[{"role": "user", "content": prompt}],
|
|
48
|
+
model=model,
|
|
49
|
+
stream=stream,
|
|
50
|
+
**kwargs,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def chat(
|
|
54
|
+
self,
|
|
55
|
+
messages: list[dict],
|
|
56
|
+
model: str | None = None,
|
|
57
|
+
stream: bool = False,
|
|
58
|
+
**kwargs,
|
|
59
|
+
) -> Union[str, Iterator[str]]:
|
|
60
|
+
# Build parameters for Anthropic messages.create
|
|
61
|
+
params = {
|
|
62
|
+
"model": model or self._model,
|
|
63
|
+
"messages": messages,
|
|
64
|
+
"stream": stream,
|
|
65
|
+
**kwargs,
|
|
66
|
+
}
|
|
67
|
+
# Claude API requires system prompt as separate parameter, not in messages
|
|
68
|
+
if self._system_prompt:
|
|
69
|
+
params["system"] = self._system_prompt
|
|
70
|
+
|
|
71
|
+
response = self._client.messages.create(**params)
|
|
72
|
+
if stream:
|
|
73
|
+
return self._handle_stream(response)
|
|
74
|
+
return response.content[0].text
|
|
75
|
+
|
|
76
|
+
# embed() inherited from ABC - raises NotSupportedError
|
|
77
|
+
|
|
78
|
+
def vision(self, images: list[bytes], prompt: str, **kwargs) -> str:
|
|
79
|
+
import base64
|
|
80
|
+
|
|
81
|
+
# Claude supports vision via messages
|
|
82
|
+
image_b64 = base64.b64encode(images[0]).decode()
|
|
83
|
+
messages = [
|
|
84
|
+
{
|
|
85
|
+
"role": "user",
|
|
86
|
+
"content": [
|
|
87
|
+
{
|
|
88
|
+
"type": "image",
|
|
89
|
+
"source": {
|
|
90
|
+
"type": "base64",
|
|
91
|
+
"media_type": "image/jpeg",
|
|
92
|
+
"data": image_b64,
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
{"type": "text", "text": prompt},
|
|
96
|
+
],
|
|
97
|
+
}
|
|
98
|
+
]
|
|
99
|
+
return self.chat(messages, **kwargs)
|
|
100
|
+
|
|
101
|
+
# get_performance_stats() inherited from ABC - raises NotSupportedError
|
|
102
|
+
# load_model() inherited from ABC - raises NotSupportedError
|
|
103
|
+
# unload_model() inherited from ABC - raises NotSupportedError
|
|
104
|
+
|
|
105
|
+
def _handle_stream(self, response) -> Iterator[str]:
|
|
106
|
+
for chunk in response:
|
|
107
|
+
if hasattr(chunk, "delta") and hasattr(chunk.delta, "text"):
|
|
108
|
+
yield chunk.delta.text
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
# Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
"""Lemonade provider - supports ALL methods."""
|
|
4
|
+
|
|
5
|
+
from typing import Iterator, Optional, Union
|
|
6
|
+
|
|
7
|
+
from ..base_client import LLMClient
|
|
8
|
+
from ..lemonade_client import DEFAULT_MODEL_NAME, LemonadeClient
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LemonadeProvider(LLMClient):
|
|
12
|
+
"""Lemonade provider - local AMD-optimized inference."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
model: Optional[str] = None,
|
|
17
|
+
base_url: Optional[str] = None,
|
|
18
|
+
host: Optional[str] = None,
|
|
19
|
+
port: Optional[int] = None,
|
|
20
|
+
system_prompt: Optional[str] = None,
|
|
21
|
+
**kwargs,
|
|
22
|
+
):
|
|
23
|
+
# Build kwargs for LemonadeClient, only including non-None values
|
|
24
|
+
backend_kwargs = {}
|
|
25
|
+
if model is not None:
|
|
26
|
+
backend_kwargs["model"] = model
|
|
27
|
+
if base_url is not None:
|
|
28
|
+
backend_kwargs["base_url"] = base_url
|
|
29
|
+
if host is not None:
|
|
30
|
+
backend_kwargs["host"] = host
|
|
31
|
+
if port is not None:
|
|
32
|
+
backend_kwargs["port"] = port
|
|
33
|
+
backend_kwargs.update(kwargs)
|
|
34
|
+
|
|
35
|
+
self._backend = LemonadeClient(**backend_kwargs)
|
|
36
|
+
self._model = model
|
|
37
|
+
self._system_prompt = system_prompt
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def provider_name(self) -> str:
|
|
41
|
+
return "Lemonade"
|
|
42
|
+
|
|
43
|
+
def generate(
|
|
44
|
+
self,
|
|
45
|
+
prompt: str,
|
|
46
|
+
model: str | None = None,
|
|
47
|
+
stream: bool = False,
|
|
48
|
+
**kwargs,
|
|
49
|
+
) -> Union[str, Iterator[str]]:
|
|
50
|
+
# Use chat endpoint (completions endpoint not available in Lemonade v9.1+)
|
|
51
|
+
return self.chat(
|
|
52
|
+
[{"role": "user", "content": prompt}],
|
|
53
|
+
model=model,
|
|
54
|
+
stream=stream,
|
|
55
|
+
**kwargs,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
def chat(
|
|
59
|
+
self,
|
|
60
|
+
messages: list[dict],
|
|
61
|
+
model: str | None = None,
|
|
62
|
+
stream: bool = False,
|
|
63
|
+
**kwargs,
|
|
64
|
+
) -> Union[str, Iterator[str]]:
|
|
65
|
+
# Use provided model, instance model, or default CPU model
|
|
66
|
+
effective_model = model or self._model or DEFAULT_MODEL_NAME
|
|
67
|
+
|
|
68
|
+
# Prepend system prompt if set
|
|
69
|
+
if self._system_prompt:
|
|
70
|
+
messages = [{"role": "system", "content": self._system_prompt}] + list(
|
|
71
|
+
messages
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# Default to low temperature for deterministic responses (matches old LLMClient behavior)
|
|
75
|
+
kwargs.setdefault("temperature", 0.1)
|
|
76
|
+
|
|
77
|
+
response = self._backend.chat_completions(
|
|
78
|
+
model=effective_model, messages=messages, stream=stream, **kwargs
|
|
79
|
+
)
|
|
80
|
+
if stream:
|
|
81
|
+
return self._handle_stream(response)
|
|
82
|
+
return response["choices"][0]["message"]["content"]
|
|
83
|
+
|
|
84
|
+
def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
|
|
85
|
+
response = self._backend.embeddings(texts, **kwargs)
|
|
86
|
+
return [item["embedding"] for item in response["data"]]
|
|
87
|
+
|
|
88
|
+
def vision(self, images: list[bytes], prompt: str, **kwargs) -> str:
|
|
89
|
+
# Delegate to VLMClient
|
|
90
|
+
from ..vlm_client import VLMClient
|
|
91
|
+
|
|
92
|
+
vlm = VLMClient(base_url=self._backend.base_url)
|
|
93
|
+
return vlm.extract_from_image(images[0], prompt=prompt)
|
|
94
|
+
|
|
95
|
+
def get_performance_stats(self) -> dict:
|
|
96
|
+
return self._backend.get_stats() or {}
|
|
97
|
+
|
|
98
|
+
def load_model(self, model_name: str, **kwargs) -> None:
|
|
99
|
+
self._backend.load_model(model_name, **kwargs)
|
|
100
|
+
self._model = model_name
|
|
101
|
+
|
|
102
|
+
def unload_model(self) -> None:
|
|
103
|
+
self._backend.unload_model()
|
|
104
|
+
|
|
105
|
+
def _extract_text(self, response: dict) -> str:
|
|
106
|
+
return response["choices"][0]["text"]
|
|
107
|
+
|
|
108
|
+
def _handle_stream(self, response) -> Iterator[str]:
|
|
109
|
+
for chunk in response:
|
|
110
|
+
if "choices" in chunk and chunk["choices"]:
|
|
111
|
+
delta = chunk["choices"][0].get("delta", {})
|
|
112
|
+
content = delta.get("content")
|
|
113
|
+
if content:
|
|
114
|
+
yield content
|
|
115
|
+
elif "text" in chunk["choices"][0]:
|
|
116
|
+
text = chunk["choices"][0]["text"]
|
|
117
|
+
if text:
|
|
118
|
+
yield text
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
# Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
"""OpenAI provider - no vision support."""
|
|
4
|
+
|
|
5
|
+
from typing import Iterator, Optional, Union
|
|
6
|
+
|
|
7
|
+
from ..base_client import LLMClient
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class OpenAIProvider(LLMClient):
|
|
11
|
+
"""OpenAI (OpenAI API) provider."""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
api_key: Optional[str] = None,
|
|
16
|
+
model: str = "gpt-4o",
|
|
17
|
+
system_prompt: Optional[str] = None,
|
|
18
|
+
**_kwargs,
|
|
19
|
+
):
|
|
20
|
+
import openai
|
|
21
|
+
|
|
22
|
+
self._client = openai.OpenAI(api_key=api_key)
|
|
23
|
+
self._model = model
|
|
24
|
+
self._system_prompt = system_prompt
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def provider_name(self) -> str:
|
|
28
|
+
return "OpenAI"
|
|
29
|
+
|
|
30
|
+
def generate(
|
|
31
|
+
self,
|
|
32
|
+
prompt: str,
|
|
33
|
+
model: str | None = None,
|
|
34
|
+
stream: bool = False,
|
|
35
|
+
**kwargs,
|
|
36
|
+
) -> Union[str, Iterator[str]]:
|
|
37
|
+
# OpenAI doesn't have a separate completions endpoint for chat models
|
|
38
|
+
return self.chat(
|
|
39
|
+
[{"role": "user", "content": prompt}],
|
|
40
|
+
model=model,
|
|
41
|
+
stream=stream,
|
|
42
|
+
**kwargs,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
def chat(
|
|
46
|
+
self,
|
|
47
|
+
messages: list[dict],
|
|
48
|
+
model: str | None = None,
|
|
49
|
+
stream: bool = False,
|
|
50
|
+
**kwargs,
|
|
51
|
+
) -> Union[str, Iterator[str]]:
|
|
52
|
+
# Prepend system prompt if set
|
|
53
|
+
if self._system_prompt:
|
|
54
|
+
messages = [{"role": "system", "content": self._system_prompt}] + list(
|
|
55
|
+
messages
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
response = self._client.chat.completions.create(
|
|
59
|
+
model=model or self._model, messages=messages, stream=stream, **kwargs
|
|
60
|
+
)
|
|
61
|
+
if stream:
|
|
62
|
+
return self._handle_stream(response)
|
|
63
|
+
return response.choices[0].message.content
|
|
64
|
+
|
|
65
|
+
def embed(
|
|
66
|
+
self, texts: list[str], model: str = "text-embedding-3-small", **kwargs
|
|
67
|
+
) -> list[list[float]]:
|
|
68
|
+
response = self._client.embeddings.create(model=model, input=texts, **kwargs)
|
|
69
|
+
return [item.embedding for item in response.data]
|
|
70
|
+
|
|
71
|
+
# vision() inherited from ABC - raises NotSupportedError
|
|
72
|
+
# get_performance_stats() inherited from ABC - raises NotSupportedError
|
|
73
|
+
# load_model() inherited from ABC - raises NotSupportedError
|
|
74
|
+
# unload_model() inherited from ABC - raises NotSupportedError
|
|
75
|
+
|
|
76
|
+
def _handle_stream(self, response) -> Iterator[str]:
|
|
77
|
+
for chunk in response:
|
|
78
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
79
|
+
yield chunk.choices[0].delta.content
|