vision-agents-plugins-xai 0.0.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vision-agents-plugins-xai might be problematic. Click here for more details.

@@ -0,0 +1,32 @@
1
+ */__pycache__
2
+ */chat/__pycache__
3
+ */video/__pycache__
4
+ */chat/sync/__pycache__
5
+ */chat/async_/__pycache__
6
+ */sync/__pycache__
7
+ */async_/__pycache__
8
+ */video/sync/__pycache__
9
+ */model/__pycache__/
10
+ */cli/__pycache__
11
+ */cli/__pycache__
12
+ .env
13
+ .venv
14
+ .vscode/settings.json
15
+ *.pyc
16
+ dist/*
17
+ dist/*
18
+ *.log
19
+ .python-version
20
+ pyvenv.cfg
21
+ .idea*
22
+ bin/*
23
+ lib/*
24
+ shell.nix
25
+ pyrightconfig.json
26
+ .DS_Store
27
+
28
+ *.egg-info/
29
+ *.egg
30
+ *.pt
31
+ *.kef
32
+ .env.bak
@@ -0,0 +1,166 @@
1
+ Metadata-Version: 2.4
2
+ Name: vision-agents-plugins-xai
3
+ Version: 0.0.17
4
+ Summary: XAI for stream agents
5
+ Project-URL: Documentation, https://visionagents.ai/
6
+ Project-URL: Website, https://visionagents.ai/
7
+ Project-URL: Source, https://github.com/GetStream/Vision-Agents
8
+ License-Expression: Apache-2.0
9
+ Requires-Python: >=3.10.0
10
+ Requires-Dist: xai-sdk
11
+ Description-Content-Type: text/markdown
12
+
13
+ # XAI Plugin for Stream Agents
14
+
15
+ This package provides xAI (Grok) integration for the Stream Agents ecosystem, enabling you to use xAI's powerful language models in your conversational AI applications.
16
+
17
+ ## Features
18
+
19
+ - **Native xAI SDK Integration**: Full access to xAI's chat completion and streaming APIs
20
+ - **Conversation Memory**: Automatic conversation history management
21
+ - **Streaming Support**: Real-time response streaming with standardized events
22
+ - **Multimodal Support**: Handle text and image inputs
23
+ - **Event System**: Subscribe to response events for custom handling
24
+ - **Easy Integration**: Drop-in replacement for other LLM providers
25
+
26
+ ## Installation
27
+
28
+ ```bash
29
+ pip install vision-agents-plugins-xai
30
+ ```
31
+
32
+ ## Quick Start
33
+
34
+ ```python
35
+ import asyncio
36
+ from vision_agents.plugins import xai
37
+
38
+ async def main():
39
+ # Initialize with your xAI API key
40
+ llm = xai.LLM(
41
+ model="grok-4",
42
+ api_key="your_xai_api_key" # or set XAI_API_KEY environment variable
43
+ )
44
+
45
+ # Simple response
46
+ response = await llm.simple_response("Explain quantum computing in simple terms")
47
+
48
+ print(f"\n\nComplete response: {response.text}")
49
+
50
+ if __name__ == "__main__":
51
+ asyncio.run(main())
52
+ ```
53
+
54
+ ## Advanced Usage
55
+
56
+ ### Conversation with Memory
57
+
58
+ ```python
59
+ from vision_agents.plugins import xai
60
+
61
+ llm = xai.LLM(model="grok-4", api_key="your_api_key")
62
+
63
+ # First message
64
+ await llm.simple_response("My name is Alice and I have 2 cats")
65
+
66
+ # Second message - the LLM remembers the context
67
+ response = await llm.simple_response("How many pets do I have?")
68
+ print(response.text) # Will mention the 2 cats
69
+ ```
70
+
71
+ ### Using Instructions
72
+
73
+ ```python
74
+ llm = LLM(
75
+ model="grok-4",
76
+ api_key="your_api_key"
77
+ )
78
+
79
+ # Create a response with system instructions
80
+ response = await llm.create_response(
81
+ input="Tell me about the weather",
82
+ instructions="You are a helpful weather assistant. Always be cheerful and optimistic.",
83
+ stream=True
84
+ )
85
+ ```
86
+
87
+ ### Multimodal Input
88
+
89
+ ```python
90
+ # Handle complex multimodal messages
91
+ advanced_message = [
92
+ {
93
+ "role": "user",
94
+ "content": [
95
+ {"type": "input_text", "text": "What do you see in this image?"},
96
+ {"type": "input_image", "image_url": "https://example.com/image.jpg"},
97
+ ],
98
+ }
99
+ ]
100
+
101
+ messages = LLM._normalize_message(advanced_message)
102
+ # Use with your conversation system
103
+ ```
104
+
105
+
106
+ ## API Reference
107
+
108
+ ### XAILLM Class
109
+
110
+ #### Constructor
111
+
112
+ ```python
113
+ LLM(
114
+ model: str = "grok-4",
115
+ api_key: Optional[str] = None,
116
+ client: Optional[AsyncClient] = None
117
+ )
118
+ ```
119
+
120
+ **Parameters:**
121
+ - `model`: xAI model to use (default: "grok-4")
122
+ - `api_key`: Your xAI API key (default: reads from `XAI_API_KEY` environment variable)
123
+ - `client`: Optional pre-configured xAI AsyncClient
124
+
125
+ #### Methods
126
+
127
+ ##### `async simple_response(text: str, processors=None, participant=None)`
128
+
129
+ Generate a simple response to text input.
130
+
131
+ **Parameters:**
132
+ - `text`: Input text to respond to
133
+ - `processors`: Optional list of processors for video/voice AI context
134
+ - `participant`: Optional participant object
135
+
136
+ **Returns:** `LLMResponseEvent[Response]` with the generated text
137
+
138
+ ##### `async create_response(input: str, instructions: str = "", model: str = None, stream: bool = True)`
139
+
140
+ Create a response with full control over parameters.
141
+
142
+ **Parameters:**
143
+ - `input`: Input text
144
+ - `instructions`: System instructions for the model
145
+ - `model`: Override the default model
146
+ - `stream`: Whether to stream the response (default: True)
147
+
148
+ **Returns:** `LLMResponseEvent[Response]` with the generated text
149
+
150
+
151
+ ## Configuration
152
+
153
+ ### Environment Variables
154
+
155
+ - `XAI_API_KEY`: Your xAI API key (required if not provided in constructor)
156
+
157
+
158
+ ## Requirements
159
+
160
+ - Python 3.10+
161
+ - `xai-sdk`
162
+ - `vision-agents-core`
163
+
164
+ ## License
165
+
166
+ Apache-2.0
@@ -0,0 +1,154 @@
1
+ # XAI Plugin for Stream Agents
2
+
3
+ This package provides xAI (Grok) integration for the Stream Agents ecosystem, enabling you to use xAI's powerful language models in your conversational AI applications.
4
+
5
+ ## Features
6
+
7
+ - **Native xAI SDK Integration**: Full access to xAI's chat completion and streaming APIs
8
+ - **Conversation Memory**: Automatic conversation history management
9
+ - **Streaming Support**: Real-time response streaming with standardized events
10
+ - **Multimodal Support**: Handle text and image inputs
11
+ - **Event System**: Subscribe to response events for custom handling
12
+ - **Easy Integration**: Drop-in replacement for other LLM providers
13
+
14
+ ## Installation
15
+
16
+ ```bash
17
+ pip install vision-agents-plugins-xai
18
+ ```
19
+
20
+ ## Quick Start
21
+
22
+ ```python
23
+ import asyncio
24
+ from vision_agents.plugins import xai
25
+
26
+ async def main():
27
+ # Initialize with your xAI API key
28
+ llm = xai.LLM(
29
+ model="grok-4",
30
+ api_key="your_xai_api_key" # or set XAI_API_KEY environment variable
31
+ )
32
+
33
+ # Simple response
34
+ response = await llm.simple_response("Explain quantum computing in simple terms")
35
+
36
+ print(f"\n\nComplete response: {response.text}")
37
+
38
+ if __name__ == "__main__":
39
+ asyncio.run(main())
40
+ ```
41
+
42
+ ## Advanced Usage
43
+
44
+ ### Conversation with Memory
45
+
46
+ ```python
47
+ from vision_agents.plugins import xai
48
+
49
+ llm = xai.LLM(model="grok-4", api_key="your_api_key")
50
+
51
+ # First message
52
+ await llm.simple_response("My name is Alice and I have 2 cats")
53
+
54
+ # Second message - the LLM remembers the context
55
+ response = await llm.simple_response("How many pets do I have?")
56
+ print(response.text) # Will mention the 2 cats
57
+ ```
58
+
59
+ ### Using Instructions
60
+
61
+ ```python
62
+ llm = LLM(
63
+ model="grok-4",
64
+ api_key="your_api_key"
65
+ )
66
+
67
+ # Create a response with system instructions
68
+ response = await llm.create_response(
69
+ input="Tell me about the weather",
70
+ instructions="You are a helpful weather assistant. Always be cheerful and optimistic.",
71
+ stream=True
72
+ )
73
+ ```
74
+
75
+ ### Multimodal Input
76
+
77
+ ```python
78
+ # Handle complex multimodal messages
79
+ advanced_message = [
80
+ {
81
+ "role": "user",
82
+ "content": [
83
+ {"type": "input_text", "text": "What do you see in this image?"},
84
+ {"type": "input_image", "image_url": "https://example.com/image.jpg"},
85
+ ],
86
+ }
87
+ ]
88
+
89
+ messages = LLM._normalize_message(advanced_message)
90
+ # Use with your conversation system
91
+ ```
92
+
93
+
94
+ ## API Reference
95
+
96
+ ### XAILLM Class
97
+
98
+ #### Constructor
99
+
100
+ ```python
101
+ LLM(
102
+ model: str = "grok-4",
103
+ api_key: Optional[str] = None,
104
+ client: Optional[AsyncClient] = None
105
+ )
106
+ ```
107
+
108
+ **Parameters:**
109
+ - `model`: xAI model to use (default: "grok-4")
110
+ - `api_key`: Your xAI API key (default: reads from `XAI_API_KEY` environment variable)
111
+ - `client`: Optional pre-configured xAI AsyncClient
112
+
113
+ #### Methods
114
+
115
+ ##### `async simple_response(text: str, processors=None, participant=None)`
116
+
117
+ Generate a simple response to text input.
118
+
119
+ **Parameters:**
120
+ - `text`: Input text to respond to
121
+ - `processors`: Optional list of processors for video/voice AI context
122
+ - `participant`: Optional participant object
123
+
124
+ **Returns:** `LLMResponseEvent[Response]` with the generated text
125
+
126
+ ##### `async create_response(input: str, instructions: str = "", model: str = None, stream: bool = True)`
127
+
128
+ Create a response with full control over parameters.
129
+
130
+ **Parameters:**
131
+ - `input`: Input text
132
+ - `instructions`: System instructions for the model
133
+ - `model`: Override the default model
134
+ - `stream`: Whether to stream the response (default: True)
135
+
136
+ **Returns:** `LLMResponseEvent[Response]` with the generated text
137
+
138
+
139
+ ## Configuration
140
+
141
+ ### Environment Variables
142
+
143
+ - `XAI_API_KEY`: Your xAI API key (required if not provided in constructor)
144
+
145
+
146
+ ## Requirements
147
+
148
+ - Python 3.10+
149
+ - `xai-sdk`
150
+ - `vision-agents-core`
151
+
152
+ ## License
153
+
154
+ Apache-2.0
@@ -0,0 +1,35 @@
1
+ [build-system]
2
+ requires = ["hatchling", "hatch-vcs"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "vision-agents-plugins-xai"
7
+ dynamic = ["version"]
8
+ description = "XAI for stream agents"
9
+ readme = "README.md"
10
+ requires-python = ">=3.10.0"
11
+ license = "Apache-2.0"
12
+ dependencies = [
13
+ "xai-sdk",
14
+ ]
15
+
16
+ [project.urls]
17
+ Documentation = "https://visionagents.ai/"
18
+ Website = "https://visionagents.ai/"
19
+ Source = "https://github.com/GetStream/Vision-Agents"
20
+
21
+ [tool.hatch.version]
22
+ source = "vcs"
23
+ raw-options = { root = "..", search_parent_directories = true, fallback_version = "0.0.0" }
24
+
25
+ [tool.hatch.build.targets.wheel]
26
+ packages = ["."]
27
+
28
+ [tool.uv.sources]
29
+ vision-agents = { workspace = true }
30
+
31
+ [dependency-groups]
32
+ dev = [
33
+ "pytest>=8.4.1",
34
+ "pytest-asyncio>=1.0.0",
35
+ ]
@@ -0,0 +1,98 @@
1
+ import pytest
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ from vision_agents.core.agents.conversation import Message
6
+ from vision_agents.plugins.xai.llm import XAILLM
7
+ from vision_agents.core.llm.events import LLMResponseChunkEvent
8
+
9
+ load_dotenv()
10
+
11
+
12
+ class TestXAILLM:
13
+ """Test suite for XAILLM class with live API calls."""
14
+
15
+ def test_message(self):
16
+ messages = XAILLM._normalize_message("say hi")
17
+ assert isinstance(messages[0], Message)
18
+ message = messages[0]
19
+ assert message.original is not None
20
+ assert message.content == "say hi"
21
+
22
+ async def test_advanced_message(self):
23
+ img_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/2023_06_08_Raccoon1.jpg/1599px-2023_06_08_Raccoon1.jpg"
24
+
25
+ advanced = [
26
+ {
27
+ "role": "user",
28
+ "content": [
29
+ {"type": "input_text", "text": "what do you see in this image?"},
30
+ {"type": "input_image", "image_url": f"{img_url}"},
31
+ ],
32
+ }
33
+ ]
34
+ messages2 = XAILLM._normalize_message(advanced)
35
+ assert messages2[0].original is not None
36
+
37
+ @pytest.mark.integration
38
+ @pytest.mark.skipif(not os.getenv("XAI_API_KEY"), reason="XAI_API_KEY not set")
39
+ async def test_simple(self):
40
+ llm = XAILLM(model="grok-4", api_key=os.getenv("XAI_API_KEY"))
41
+ response = await llm.simple_response(
42
+ "Explain quantum computing in 1 paragraph",
43
+ )
44
+ assert response.text
45
+
46
+ @pytest.mark.integration
47
+ @pytest.mark.skipif(not os.getenv("XAI_API_KEY"), reason="XAI_API_KEY not set")
48
+ async def test_native_api(self):
49
+ llm = XAILLM(model="grok-4", api_key=os.getenv("XAI_API_KEY"))
50
+ response = await llm.create_response(
51
+ input="say hi", instructions="You are a helpful assistant."
52
+ )
53
+ assert response.text
54
+
55
+ @pytest.mark.integration
56
+ @pytest.mark.skipif(not os.getenv("XAI_API_KEY"), reason="XAI_API_KEY not set")
57
+ async def test_streaming(self):
58
+ llm = XAILLM(model="grok-4", api_key=os.getenv("XAI_API_KEY"))
59
+ streaming_works = False
60
+
61
+ @llm.events.subscribe
62
+ async def passed(event: LLMResponseChunkEvent):
63
+ nonlocal streaming_works
64
+ streaming_works = True
65
+
66
+ response = await llm.simple_response(
67
+ "Explain quantum computing in 1 paragraph",
68
+ )
69
+ await llm.events.wait()
70
+
71
+ assert response.text
72
+ assert streaming_works
73
+
74
+ @pytest.mark.integration
75
+ @pytest.mark.skipif(not os.getenv("XAI_API_KEY"), reason="XAI_API_KEY not set")
76
+ async def test_memory(self):
77
+ llm = XAILLM(model="grok-4", api_key=os.getenv("XAI_API_KEY"))
78
+ await llm.simple_response(
79
+ text="There are 2 dogs in the room",
80
+ )
81
+ await llm.events.wait()
82
+ response = await llm.simple_response(
83
+ text="How many paws are there in the room?",
84
+ )
85
+ assert "8" in response.text or "eight" in response.text
86
+
87
+ @pytest.mark.integration
88
+ @pytest.mark.skipif(not os.getenv("XAI_API_KEY"), reason="XAI_API_KEY not set")
89
+ async def test_native_memory(self):
90
+ llm = XAILLM(model="grok-4", api_key=os.getenv("XAI_API_KEY"))
91
+ await llm.create_response(
92
+ input="There are 2 dogs in the room",
93
+ )
94
+ await llm.events.wait()
95
+ response = await llm.create_response(
96
+ input="How many paws are there in the room?",
97
+ )
98
+ assert "8" in response.text or "eight" in response.text
@@ -0,0 +1,4 @@
1
+ from .llm import XAILLM as LLM
2
+ from .version import __version__
3
+
4
+ __all__ = ["LLM", "__version__"]
@@ -0,0 +1,10 @@
1
+ from dataclasses import dataclass, field
2
+ from vision_agents.core.events import PluginBaseEvent
3
+ from typing import Optional, Any
4
+
5
+
6
+ @dataclass
7
+ class XAIChunkEvent(PluginBaseEvent):
8
+ """Event emitted when xAI provides a chunk."""
9
+ type: str = field(default='plugin.xai.chunk', init=False)
10
+ chunk: Optional[Any] = None
@@ -0,0 +1,200 @@
1
+ from typing import Optional, List, Any, TYPE_CHECKING
2
+ from xai_sdk import AsyncClient
3
+ from xai_sdk.chat import system, user, Response, Chunk
4
+ from xai_sdk.proto import chat_pb2
5
+
6
+ from vision_agents.core.llm.llm import LLM, LLMResponseEvent
7
+ from vision_agents.core.processors import Processor
8
+ from vision_agents.core.llm.events import LLMResponseChunkEvent, LLMResponseCompletedEvent
9
+ from . import events
10
+
11
+ if TYPE_CHECKING:
12
+ from vision_agents.core.agents.conversation import Message
13
+ from getstream.video.rtc.pb.stream.video.sfu.models.models_pb2 import Participant
14
+ from xai_sdk.aio.chat import Chat
15
+ else:
16
+ from getstream.video.rtc.pb.stream.video.sfu.models.models_pb2 import Participant
17
+
18
+
19
+ class XAILLM(LLM):
20
+ """
21
+ The XAILLM class provides full/native access to the xAI SDK methods.
22
+ It only standardizes the minimal feature set that's needed for the agent integration.
23
+
24
+ The agent requires that we standardize:
25
+ - sharing instructions
26
+ - keeping conversation history
27
+ - response normalization
28
+
29
+ Notes on the xAI integration
30
+ - the native method is called create_response (maps to xAI chat.sample())
31
+ - history is maintained using the chat object's append method
32
+
33
+ Examples:
34
+
35
+ from vision_agents.plugins import xai
36
+ llm = xai.LLM(model="grok-beta")
37
+
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ model: str = "grok-4",
43
+ api_key: Optional[str] = None,
44
+ client: Optional[AsyncClient] = None,
45
+ ):
46
+ """
47
+ Initialize the XAILLM class.
48
+
49
+ Args:
50
+ model (str): The xAI model to use. Defaults to "grok-4"
51
+ api_key: optional API key. by default loads from XAI_API_KEY
52
+ client: optional xAI client. by default creates a new client object.
53
+ """
54
+ super().__init__()
55
+ self.events.register_events_from_module(events)
56
+ self.model = model
57
+ self.xai_chat: Optional["Chat"] = None
58
+ self.conversation = None
59
+
60
+ if client is not None:
61
+ self.client = client
62
+ elif api_key is not None and api_key != "":
63
+ self.client = AsyncClient(api_key=api_key)
64
+ else:
65
+ self.client = AsyncClient()
66
+
67
+ async def simple_response(
68
+ self,
69
+ text: str,
70
+ processors: Optional[List[Processor]] = None,
71
+ participant: Optional[Participant] = None,
72
+ ):
73
+ """
74
+ simple_response is a standardized way (across openai, claude, gemini etc.) to create a response.
75
+
76
+ Args:
77
+ text: The text to respond to
78
+ processors: list of processors (which contain state) about the video/voice AI
79
+ participant: optionally the participant object
80
+
81
+ Examples:
82
+
83
+ llm.simple_response("say hi to the user, be mean")
84
+ """
85
+ instructions = None
86
+ if self.conversation is not None:
87
+ instructions = self.conversation.instructions
88
+
89
+ return await self.create_response(
90
+ input=text,
91
+ instructions=instructions,
92
+ )
93
+
94
+ async def create_response(self, *args: Any, **kwargs: Any) -> LLMResponseEvent[Response]:
95
+ """
96
+ create_response gives you full support/access to the native xAI chat.sample() and chat.stream() methods
97
+ this method wraps the xAI method and ensures we broadcast an event which the agent class hooks into
98
+ """
99
+ input_text = kwargs.get("input", "")
100
+ instructions = kwargs.get("instructions", "")
101
+ model = kwargs.get("model", self.model)
102
+ stream = kwargs.get("stream", True)
103
+
104
+ # Create or reuse chat session
105
+ if not self.xai_chat:
106
+ messages = []
107
+ if instructions:
108
+ messages.append(system(instructions))
109
+ self.xai_chat = self.client.chat.create(model=model, messages=messages)
110
+
111
+ # Add user message
112
+ assert self.xai_chat is not None
113
+ self.xai_chat.append(user(input_text))
114
+
115
+ # Get response based on streaming preference
116
+ if stream:
117
+ # Handle streaming response
118
+ llm_response: Optional[LLMResponseEvent[Response]] = None
119
+ assert self.xai_chat is not None
120
+ async for response, chunk in self.xai_chat.stream():
121
+ llm_response_optional = self._standardize_and_emit_chunk(
122
+ chunk, response
123
+ )
124
+ if llm_response_optional is not None:
125
+ llm_response = llm_response_optional
126
+
127
+ # Add response to chat history
128
+ if llm_response and llm_response.original:
129
+ assert self.xai_chat is not None
130
+ self.xai_chat.append(llm_response.original)
131
+ else:
132
+ # Handle non-streaming response
133
+ assert self.xai_chat is not None
134
+ response = await self.xai_chat.sample()
135
+ llm_response = LLMResponseEvent[Response](response, response.content)
136
+
137
+ # Add response to chat history
138
+ assert self.xai_chat is not None
139
+ self.xai_chat.append(response)
140
+
141
+ if llm_response is not None:
142
+ self.events.send(LLMResponseCompletedEvent(
143
+ original=llm_response.original,
144
+ text=llm_response.text
145
+ ))
146
+
147
+ return llm_response or LLMResponseEvent[Response](
148
+ Response(chat_pb2.GetChatCompletionResponse(), 0), ""
149
+ )
150
+
151
+ @staticmethod
152
+ def _normalize_message(input_text: str) -> List["Message"]:
153
+ """
154
+ Takes the input text and standardizes it so we can store it in chat
155
+ """
156
+ from vision_agents.core.agents.conversation import Message
157
+
158
+ # Create a standardized message from input text
159
+ message = Message(
160
+ original={"content": input_text, "role": "user", "type": "message"},
161
+ content=input_text,
162
+ )
163
+
164
+ return [message]
165
+
166
+ def _standardize_and_emit_chunk(
167
+ self, chunk: Chunk, response: Response
168
+ ) -> Optional[LLMResponseEvent[Response]]:
169
+ """
170
+ Forwards the chunk events and also send out a standardized version (the agent class hooks into that)
171
+ """
172
+ # Emit the raw chunk event
173
+ self.events.send(events.XAIChunkEvent(
174
+ plugin_name="xai",
175
+ chunk=chunk
176
+ ))
177
+
178
+ # Emit standardized delta events for content
179
+ if chunk.content:
180
+ self.events.send(LLMResponseChunkEvent(
181
+ content_index=0, # xAI doesn't have content_index
182
+ item_id=chunk.proto.id if hasattr(chunk.proto, "id") else "",
183
+ output_index=0, # xAI doesn't have output_index
184
+ sequence_number=0, # xAI doesn't have sequence_number
185
+ delta=chunk.content,
186
+ plugin_name="xai",
187
+ ))
188
+
189
+ # Check if this is the final chunk (finish_reason indicates completion)
190
+ if chunk.choices and chunk.choices[0].finish_reason:
191
+ # This is the final chunk, return the complete response
192
+ llm_response = LLMResponseEvent[Response](response, response.content)
193
+ self.events.send(LLMResponseCompletedEvent(
194
+ plugin_name="xai",
195
+ text=llm_response.text,
196
+ original=llm_response.original
197
+ ))
198
+ return llm_response
199
+
200
+ return None
@@ -0,0 +1 @@
1
+ __version__ = "0.1.0"