econagents 0.0.1__tar.gz → 0.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. econagents-0.0.2/LICENSE +21 -0
  2. {econagents-0.0.1 → econagents-0.0.2}/PKG-INFO +38 -15
  3. {econagents-0.0.1 → econagents-0.0.2}/README.md +8 -4
  4. {econagents-0.0.1 → econagents-0.0.2}/econagents/__init__.py +4 -2
  5. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/agent_role.py +6 -6
  6. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/game_runner.py +17 -3
  7. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/manager/phase.py +34 -57
  8. econagents-0.0.2/econagents/llm/__init__.py +20 -0
  9. econagents-0.0.2/econagents/llm/base.py +62 -0
  10. econagents-0.0.2/econagents/llm/observability.py +282 -0
  11. econagents-0.0.2/econagents/llm/ollama.py +77 -0
  12. econagents-0.0.2/econagents/llm/openai.py +77 -0
  13. {econagents-0.0.1 → econagents-0.0.2}/pyproject.toml +46 -16
  14. econagents-0.0.1/econagents/llm/__init__.py +0 -3
  15. econagents-0.0.1/econagents/llm/openai.py +0 -61
  16. {econagents-0.0.1 → econagents-0.0.2}/econagents/_c_extension.pyi +0 -0
  17. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/__init__.py +0 -0
  18. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/events.py +0 -0
  19. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/logging_mixin.py +0 -0
  20. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/manager/__init__.py +0 -0
  21. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/manager/base.py +0 -0
  22. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/state/__init__.py +0 -0
  23. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/state/fields.py +0 -0
  24. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/state/game.py +0 -0
  25. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/state/market.py +0 -0
  26. {econagents-0.0.1 → econagents-0.0.2}/econagents/core/transport.py +0 -0
  27. {econagents-0.0.1 → econagents-0.0.2}/econagents/py.typed +0 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) Delft University of Technology
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -1,24 +1,43 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: econagents
3
- Version: 0.0.1
4
- Summary:
5
- License: Apache-2.0
6
- Author: Dylan
3
+ Version: 0.0.2
4
+ Summary: econagents is a Python library that lets you use LLM agents in economic experiments. The framework connects LLM agents to game servers through WebSockets and provides a flexible architecture for designing, customizing, and running economic simulations.
5
+ License: MIT License
6
+
7
+ Copyright (c) Delft University of Technology
8
+
9
+ Permission is hereby granted, free of charge, to any person obtaining a copy
10
+ of this software and associated documentation files (the "Software"), to deal
11
+ in the Software without restriction, including without limitation the rights
12
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
+ copies of the Software, and to permit persons to whom the Software is
14
+ furnished to do so, subject to the following conditions:
15
+
16
+ The above copyright notice and this permission notice shall be included in all
17
+ copies or substantial portions of the Software.
18
+
19
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
+ SOFTWARE.
7
26
  Requires-Python: >=3.10,<3.13
8
- Classifier: License :: OSI Approved :: Apache Software License
27
+ Classifier: License :: Other/Proprietary License
9
28
  Classifier: Programming Language :: Python :: 3
10
29
  Classifier: Programming Language :: Python :: 3.10
11
30
  Classifier: Programming Language :: Python :: 3.11
12
31
  Classifier: Programming Language :: Python :: 3.12
13
- Requires-Dist: langsmith (>=0.3.13,<0.4.0)
14
- Requires-Dist: numpy (>=2.2.3,<3.0.0)
15
- Requires-Dist: openai (>=1.65.5,<2.0.0)
32
+ Provides-Extra: all
33
+ Provides-Extra: default
34
+ Provides-Extra: langfuse
35
+ Provides-Extra: langsmith
36
+ Provides-Extra: ollama
37
+ Provides-Extra: openai
16
38
  Requires-Dist: pydantic (>=2.10.6,<3.0.0)
17
39
  Requires-Dist: requests (>=2.32.3,<3.0.0)
18
- Requires-Dist: typing-extensions (>=4.12.2,<5.0.0)
19
40
  Requires-Dist: websockets (>=15.0,<16.0)
20
- Project-URL: Homepage, https://github.com/iwanalabs/econagents
21
- Project-URL: Repository, https://github.com/iwanalabs/econagents
22
41
  Description-Content-Type: text/markdown
23
42
 
24
43
  <div align="center">
@@ -61,13 +80,15 @@ econagents consists of four key components:
61
80
 
62
81
  ## Example Experiments
63
82
 
64
- The repository includes two example experiments:
83
+ The repository includes three example games:
65
84
 
66
85
  1. **`prisoner`**: An iterated Prisoner's Dilemma game with 5 rounds and 2 LLM agents.
67
- 2. **`tudeflt/harberger`**: A Harberger Tax simulation with LLM agents.
68
- 3. **`tudeflt/futarchy`**: A Futarchy simulation with LLM agents.
86
+ 2. **`ibex_tudelft/harberger`**: A Harberger Tax simulation with LLM agents.
87
+ 3. **`ibex_tudelft/futarchy`**: A Futarchy simulation with LLM agents.
69
88
 
70
- ### Running the Prisoner's Dilemma Experiment
89
+ ### Running the Prisoner's Dilemma game
90
+
91
+ The simplest game to run is a version of the repeated prisoner's dilemma game that runs on your local machine.
71
92
 
72
93
  ```shell
73
94
  # Run the server
@@ -77,6 +98,8 @@ python examples/server/prisoner/server.py
77
98
  python examples/prisoner/run_game.py
78
99
  ```
79
100
 
101
+ Note: you still have to set up the connection to the agents.
102
+
80
103
  ## Key Features
81
104
 
82
105
  - **Flexible Agent Customization**: Customize agent behavior with Jinja templates or custom Python methods
@@ -38,13 +38,15 @@ econagents consists of four key components:
38
38
 
39
39
  ## Example Experiments
40
40
 
41
- The repository includes two example experiments:
41
+ The repository includes three example games:
42
42
 
43
43
  1. **`prisoner`**: An iterated Prisoner's Dilemma game with 5 rounds and 2 LLM agents.
44
- 2. **`tudeflt/harberger`**: A Harberger Tax simulation with LLM agents.
45
- 3. **`tudeflt/futarchy`**: A Futarchy simulation with LLM agents.
44
+ 2. **`ibex_tudelft/harberger`**: A Harberger Tax simulation with LLM agents.
45
+ 3. **`ibex_tudelft/futarchy`**: A Futarchy simulation with LLM agents.
46
46
 
47
- ### Running the Prisoner's Dilemma Experiment
47
+ ### Running the Prisoner's Dilemma game
48
+
49
+ The simplest game to run is a version of the repeated prisoner's dilemma game that runs on your local machine.
48
50
 
49
51
  ```shell
50
52
  # Run the server
@@ -54,6 +56,8 @@ python examples/server/prisoner/server.py
54
56
  python examples/prisoner/run_game.py
55
57
  ```
56
58
 
59
+ Note: you still have to set up the connection to the agents.
60
+
57
61
  ## Key Features
58
62
 
59
63
  - **Flexible Agent Customization**: Customize agent behavior with Jinja templates or custom Python methods
@@ -1,17 +1,18 @@
1
1
  """
2
- econagents: A Python library for setting up and running economic experiments with LLMs or human subjects.
2
+ econagents: A Python library that lets you use LLM agents in economic experiments.
3
3
  """
4
4
 
5
5
  from econagents.core.agent_role import AgentRole
6
6
  from econagents.core.game_runner import GameRunner, HybridGameRunnerConfig, TurnBasedGameRunnerConfig
7
7
  from econagents.core.manager import AgentManager
8
+ from econagents.core.transport import WebSocketTransport
8
9
  from econagents.core.manager.phase import PhaseManager, HybridPhaseManager, TurnBasedPhaseManager
9
10
  from econagents.core.state.fields import EventField
10
11
  from econagents.core.state.game import GameState, MetaInformation, PrivateInformation, PublicInformation
11
12
  from econagents.llm.openai import ChatOpenAI
12
13
 
13
14
  # Don't manually change, let poetry-dynamic-versioning handle it.
14
- __version__ = "0.0.1"
15
+ __version__ = "0.0.2"
15
16
 
16
17
  __all__: list[str] = [
17
18
  "AgentRole",
@@ -28,4 +29,5 @@ __all__: list[str] = [
28
29
  "TurnBasedGameRunnerConfig",
29
30
  "HybridGameRunnerConfig",
30
31
  "EventField",
32
+ "WebSocketTransport",
31
33
  ]
@@ -9,7 +9,7 @@ from jinja2.sandbox import SandboxedEnvironment
9
9
 
10
10
  from econagents.core.logging_mixin import LoggerMixin
11
11
  from econagents.core.state.game import GameStateProtocol
12
- from econagents.llm.openai import ChatOpenAI
12
+ from econagents.llm.base import BaseLLM
13
13
 
14
14
  StateT_contra = TypeVar("StateT_contra", bound=GameStateProtocol, contravariant=True)
15
15
 
@@ -17,7 +17,7 @@ StateT_contra = TypeVar("StateT_contra", bound=GameStateProtocol, contravariant=
17
17
  class AgentProtocol(Protocol):
18
18
  role: ClassVar[int]
19
19
  name: ClassVar[str]
20
- llm: ChatOpenAI
20
+ llm: BaseLLM
21
21
  task_phases: ClassVar[list[int]]
22
22
 
23
23
 
@@ -41,12 +41,12 @@ class AgentRole(ABC, Generic[StateT_contra], LoggerMixin):
41
41
  """Unique identifier for this role"""
42
42
  name: ClassVar[str]
43
43
  """Human-readable name for this role"""
44
- llm: ChatOpenAI
44
+ llm: BaseLLM
45
45
  """Language model instance for generating responses"""
46
46
  task_phases: ClassVar[list[int]] = [] # Empty list means no specific phases are required
47
47
  """List of phases this agent should participate in (empty means all phases)"""
48
48
  task_phases_excluded: ClassVar[list[int]] = [] # Empty list means no phases are excluded
49
-
49
+ """ Alternative way to specify phases this agent should participate in, listed phases are excluded (empty means nothing excluded)"""
50
50
  # Regex patterns for method name extraction
51
51
  _SYSTEM_PROMPT_PATTERN: ClassVar[Pattern] = re.compile(r"get_phase_(\d+)_system_prompt")
52
52
  _USER_PROMPT_PATTERN: ClassVar[Pattern] = re.compile(r"get_phase_(\d+)_user_prompt")
@@ -109,9 +109,9 @@ class AgentRole(ABC, Generic[StateT_contra], LoggerMixin):
109
109
 
110
110
  Template resolution order:
111
111
 
112
- 1. Agent-specific phase prompt (e.g., "agent_name_system_phase_1.jinja2")
112
+ 1. Role-specific phase prompt (e.g., "role_name_system_phase_1.jinja2")
113
113
 
114
- 2. Agent-specific general prompt (e.g., "agent_name_system.jinja2")
114
+ 2. Role-specific general prompt (e.g., "role_name_system.jinja2")
115
115
 
116
116
  3. All-role phase prompt (e.g., "all_system_phase_1.jinja2")
117
117
 
@@ -4,14 +4,14 @@ import queue
4
4
  from contextvars import ContextVar
5
5
  from logging.handlers import QueueHandler, QueueListener
6
6
  from pathlib import Path
7
- from typing import Optional, Type
7
+ from typing import Literal, Optional, Type
8
8
 
9
9
  from pydantic import BaseModel, Field
10
10
 
11
- from econagents.core.manager.base import AgentManager
12
11
  from econagents.core.manager.phase import PhaseManager
13
12
  from econagents.core.state.game import GameState
14
13
  from econagents.core.transport import AuthenticationMechanism, SimpleLoginPayloadAuth
14
+ from econagents.llm.observability import get_observability_provider
15
15
 
16
16
  ctx_agent_id: ContextVar[str] = ContextVar("agent_id", default="N/A")
17
17
 
@@ -59,6 +59,10 @@ class GameRunnerConfig(BaseModel):
59
59
  state_class: Optional[Type[GameState]] = None
60
60
  """Class to use for the state"""
61
61
 
62
+ # Observability configuration
63
+ observability_provider: Optional[Literal["langsmith", "langfuse"]] = None
64
+ """Name of the observability provider to use. Options: 'langsmith' or 'langfuse'"""
65
+
62
66
 
63
67
  class TurnBasedGameRunnerConfig(GameRunnerConfig):
64
68
  """Configuration class for TurnBasedGameRunner."""
@@ -299,12 +303,22 @@ class GameRunner:
299
303
  agent_manager.auth_mechanism = self.config.auth_mechanism
300
304
  agent_manager.logger.debug(f"Injected default auth mechanism: {agent_manager.auth_mechanism}")
301
305
 
306
+ if agent_manager.llm_provider and self.config.observability_provider:
307
+ try:
308
+ provider = get_observability_provider(self.config.observability_provider)
309
+ agent_manager.llm_provider.observability = provider
310
+ agent_manager.logger.debug(
311
+ f"Injected {self.config.observability_provider} observability provider into LLM provider"
312
+ )
313
+ except Exception as e:
314
+ agent_manager.logger.error(f"Failed to initialize observability provider: {e}")
315
+
302
316
  if isinstance(self.config, HybridGameRunnerConfig):
303
317
  agent_manager.continuous_phases = set(self.config.continuous_phases)
304
318
  agent_manager.min_action_delay = self.config.min_action_delay
305
319
  agent_manager.max_action_delay = self.config.max_action_delay
306
320
  agent_manager.logger.debug(
307
- f"Injected default continuous phases: {agent_manager.continuous_phases}, min action delay: {agent_manager.min_action_delay}, max action delay: {agent_manager.max_action_delay}"
321
+ f"Injected default continuous-time phases: {agent_manager.continuous_phases}, min action delay: {agent_manager.min_action_delay}, max action delay: {agent_manager.max_action_delay}"
308
322
  )
309
323
 
310
324
  def _inject_agent_logger(self, agent_manager: PhaseManager, agent_id: int) -> None:
@@ -17,16 +17,14 @@ class PhaseManager(AgentManager, ABC):
17
17
  """
18
18
  Abstract manager that handles the concept of 'phases' in a game.
19
19
 
20
- This manager standardizes the interface for phase-based games with hooks for
21
- phase transitions and optional continuous phase handling.
20
+ This manager standardizes the interface for phase-based games with optional
21
+ continuous-time phase handling.
22
22
 
23
23
  Features:
24
24
  1. Standardized interface for starting a phase
25
25
 
26
26
  2. Optional continuous "tick loop" for phases
27
27
 
28
- 3. Hooks for "on phase start," "on phase end," and "on phase transition event"
29
-
30
28
  All configuration parameters can be:
31
29
 
32
30
  1. Provided at initialization time
@@ -38,8 +36,8 @@ class PhaseManager(AgentManager, ABC):
38
36
  phase_transition_event (Optional[str]): Event name for phase transitions
39
37
  phase_identifier_key (Optional[str]): Key in the event data that identifies the phase
40
38
  continuous_phases (Optional[set[int]]): set of phase numbers that should be treated as continuous
41
- min_action_delay (Optional[int]): Minimum delay in seconds between actions in continuous phases
42
- max_action_delay (Optional[int]): Maximum delay in seconds between actions in continuous phases
39
+ min_action_delay (Optional[int]): Minimum delay in seconds between actions in continuous-time phases
40
+ max_action_delay (Optional[int]): Maximum delay in seconds between actions in continuous-time phases
43
41
  state (Optional[GameState]): Game state object to track game state
44
42
  agent_role (Optional[AgentRole]): Agent role instance to handle game phases
45
43
  auth_mechanism (Optional[AuthenticationMechanism]): Authentication mechanism to use
@@ -145,12 +143,12 @@ class PhaseManager(AgentManager, ABC):
145
143
 
146
144
  @property
147
145
  def continuous_phases(self) -> set[int]:
148
- """Get the set of continuous phases."""
146
+ """Get the set of continuous-time phases."""
149
147
  return self._continuous_phases # type: ignore
150
148
 
151
149
  @continuous_phases.setter
152
150
  def continuous_phases(self, value: set[int]):
153
- """Set the continuous phases."""
151
+ """Set the continuous-time phases."""
154
152
  self._continuous_phases = value
155
153
 
156
154
  @property
@@ -183,6 +181,13 @@ class PhaseManager(AgentManager, ABC):
183
181
  """Set the prompts directory."""
184
182
  self._prompts_dir = value
185
183
 
184
+ @property
185
+ def llm_provider(self):
186
+ """Get the LLM provider from the agent role."""
187
+ if self._agent_role and hasattr(self._agent_role, "llm"):
188
+ return self._agent_role.llm
189
+ return None
190
+
186
191
  async def start(self):
187
192
  """Start the manager."""
188
193
  # TODO: is there a better place to do this?
@@ -220,38 +225,27 @@ class PhaseManager(AgentManager, ABC):
220
225
  Handle a phase transition.
221
226
 
222
227
  This method is the main orchestrator for phase transitions:
223
- 1. If leaving a continuous phase, stops the continuous task
224
- 2. Calls the on_phase_end hook for the old phase
225
- 3. Updates the current phase
226
- 4. Calls the on_phase_start hook for the new phase
227
- 5. Starts a continuous task if entering a continuous phase
228
- 6. Executes a single action if entering a non-continuous phase
228
+ 1. If leaving a continuous-time phase, stops the continuous task
229
+ 2. Updates the current phase
230
+ 3. Starts a continuous task if entering a continuous-time phase
231
+ 4. Executes a single action if entering a non-continuous-time phase
229
232
 
230
233
  Args:
231
234
  new_phase (Optional[int]): The new phase number
232
235
  """
233
236
  self.logger.info(f"Transitioning to phase {new_phase}")
234
237
 
235
- # If we were in a continuous phase, stop it
238
+ # If we were in a continuous-time phase, stop it
236
239
  if self.in_continuous_phase and new_phase != self.current_phase:
237
- self.logger.info(f"Stopping continuous phase {self.current_phase}")
240
+ self.logger.info(f"Stopping continuous-time phase {self.current_phase}")
238
241
  self.in_continuous_phase = False
239
242
  if self._continuous_task:
240
243
  self._continuous_task.cancel()
241
244
  self._continuous_task = None
242
245
 
243
- # Call the on_phase_end hook for the old phase
244
- old_phase = self.current_phase
245
- if old_phase is not None:
246
- await self.on_phase_end(old_phase)
247
-
248
- # Update current phase
249
246
  self.current_phase = new_phase
250
247
 
251
248
  if new_phase is not None:
252
- # Call the on_phase_start hook for the new phase
253
- await self.on_phase_start(new_phase)
254
-
255
249
  # If the new phase is continuous, start a continuous task
256
250
  if self.continuous_phases and new_phase in self.continuous_phases:
257
251
  self.in_continuous_phase = True
@@ -260,12 +254,12 @@ class PhaseManager(AgentManager, ABC):
260
254
  # Execute an initial action
261
255
  await self.execute_phase_action(new_phase)
262
256
  else:
263
- # Execute a single action for non-continuous phases
257
+ # Execute a single action for non-continuous-time phases
264
258
  await self.execute_phase_action(new_phase)
265
259
 
266
260
  async def _continuous_phase_loop(self, phase: int):
267
261
  """
268
- Run a loop that periodically executes actions for a continuous phase.
262
+ Run a loop that periodically executes actions for a continuous-time phase.
269
263
 
270
264
  Args:
271
265
  phase (int): The phase number
@@ -277,16 +271,16 @@ class PhaseManager(AgentManager, ABC):
277
271
  self.logger.debug(f"Waiting {delay} seconds before next action in phase {phase}")
278
272
  await asyncio.sleep(delay)
279
273
 
280
- # Check if we're still in the same continuous phase
274
+ # Check if we're still in the same continuous-time phase
281
275
  if not self.in_continuous_phase or self.current_phase != phase:
282
276
  break
283
277
 
284
278
  # Execute the action
285
279
  await self.execute_phase_action(phase)
286
280
  except asyncio.CancelledError:
287
- self.logger.info(f"Continuous phase {phase} loop cancelled")
281
+ self.logger.info(f"Continuous-time phase {phase} loop cancelled")
288
282
  except Exception as e:
289
- self.logger.exception(f"Error in continuous phase {phase} loop: {e}")
283
+ self.logger.exception(f"Error in continuous-time phase {phase} loop: {e}")
290
284
 
291
285
  @abstractmethod
292
286
  async def execute_phase_action(self, phase: int):
@@ -301,30 +295,8 @@ class PhaseManager(AgentManager, ABC):
301
295
  """
302
296
  pass
303
297
 
304
- async def on_phase_start(self, phase: int):
305
- """
306
- Hook that is called when a phase starts.
307
-
308
- Subclasses can override this to implement custom behavior.
309
-
310
- Args:
311
- phase (int): The phase number
312
- """
313
- pass
314
-
315
- async def on_phase_end(self, phase: int):
316
- """
317
- Hook that is called when a phase ends.
318
-
319
- Subclasses can override this to implement custom behavior.
320
-
321
- Args:
322
- phase (int): The phase number
323
- """
324
- pass
325
-
326
298
  async def stop(self):
327
- """Stop the manager and cancel any continuous phase tasks."""
299
+ """Stop the manager and cancel any continuous-time phase tasks."""
328
300
  self.in_continuous_phase = False
329
301
  if self._continuous_task:
330
302
  self._continuous_task.cancel()
@@ -337,7 +309,8 @@ class TurnBasedPhaseManager(PhaseManager):
337
309
  A manager for turn-based games that handles phase transitions.
338
310
 
339
311
  This manager inherits from PhaseManager and provides a concrete implementation
340
- for executing actions in each phase.
312
+ for executing actions in each phase. All phases are treated as turn-based,
313
+ meaning actions are only taken when explicitly triggered (no continuous actions).
341
314
 
342
315
  Args:
343
316
  url (Optional[str]): WebSocket server URL
@@ -416,7 +389,11 @@ class HybridPhaseManager(PhaseManager):
416
389
  A manager for games that combine turn-based and continuous action phases.
417
390
 
418
391
  This manager extends PhaseManager and configures it with specific phases
419
- that should be treated as continuous.
392
+ that should be treated as continuous. By default, all phases are treated as
393
+ turn-based unless explicitly included in the continuous_phases parameter.
394
+
395
+ For continuous-time phases, the manager will automatically execute actions periodically
396
+ with random delays between min_action_delay and max_action_delay seconds.
420
397
 
421
398
  Args:
422
399
  continuous_phases (Optional[set[int]]): Set of phase numbers that should be treated as continuous
@@ -425,8 +402,8 @@ class HybridPhaseManager(PhaseManager):
425
402
  auth_mechanism_kwargs (Optional[dict[str, Any]]): Keyword arguments for the authentication mechanism
426
403
  phase_transition_event (Optional[str]): Event name for phase transitions
427
404
  phase_identifier_key (Optional[str]): Key in the event data that identifies the phase
428
- min_action_delay (Optional[int]): Minimum delay in seconds between actions in continuous phases
429
- max_action_delay (Optional[int]): Maximum delay in seconds between actions in continuous phases
405
+ min_action_delay (Optional[int]): Minimum delay in seconds between actions in continuous-time phases
406
+ max_action_delay (Optional[int]): Maximum delay in seconds between actions in continuous-time phases
430
407
  state (Optional[GameState]): Game state object to track game state
431
408
  agent_role (Optional[AgentRole]): Agent role instance to handle game phases
432
409
  logger (Optional[logging.Logger]): Logger instance for tracking events
@@ -0,0 +1,20 @@
1
+ from econagents.llm.base import BaseLLM, LLMProvider
2
+ from econagents.llm.observability import ObservabilityProvider, get_observability_provider
3
+
4
+ # Import specific implementations if available
5
+ try:
6
+ from econagents.llm.openai import ChatOpenAI
7
+ except ImportError:
8
+ pass
9
+
10
+ try:
11
+ from econagents.llm.ollama import ChatOllama
12
+ except ImportError:
13
+ pass
14
+
15
+ __all__: list[str] = [
16
+ "BaseLLM",
17
+ "LLMProvider",
18
+ "ObservabilityProvider",
19
+ "get_observability_provider",
20
+ ]
@@ -0,0 +1,62 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any, Protocol, runtime_checkable
3
+
4
+ from econagents.llm.observability import ObservabilityProvider, get_observability_provider
5
+
6
+
7
+ @runtime_checkable
8
+ class LLMProvider(Protocol):
9
+ """Protocol for LLM providers."""
10
+
11
+ async def get_response(
12
+ self,
13
+ messages: list[dict[str, Any]],
14
+ tracing_extra: dict[str, Any],
15
+ **kwargs: Any,
16
+ ) -> str:
17
+ """Get a response from the LLM."""
18
+ ...
19
+
20
+ def build_messages(self, system_prompt: str, user_prompt: str) -> list[dict[str, Any]]:
21
+ """Build messages for the LLM."""
22
+ ...
23
+
24
+
25
+ class BaseLLM(ABC):
26
+ """Base class for LLM implementations."""
27
+
28
+ observability: ObservabilityProvider = get_observability_provider("noop")
29
+
30
+ def build_messages(self, system_prompt: str, user_prompt: str) -> list[dict[str, Any]]:
31
+ """Build messages for the LLM.
32
+
33
+ Args:
34
+ system_prompt: The system prompt for the LLM.
35
+ user_prompt: The user prompt for the LLM.
36
+
37
+ Returns:
38
+ The messages for the LLM.
39
+ """
40
+ return [
41
+ {"role": "system", "content": system_prompt},
42
+ {"role": "user", "content": user_prompt},
43
+ ]
44
+
45
+ @abstractmethod
46
+ async def get_response(
47
+ self,
48
+ messages: list[dict[str, Any]],
49
+ tracing_extra: dict[str, Any],
50
+ **kwargs: Any,
51
+ ) -> str:
52
+ """Get a response from the LLM.
53
+
54
+ Args:
55
+ messages: The messages for the LLM.
56
+ tracing_extra: The extra tracing information.
57
+ **kwargs: Additional arguments to pass to the LLM.
58
+
59
+ Returns:
60
+ The response from the LLM.
61
+ """
62
+ ...
@@ -0,0 +1,282 @@
1
+ """Observability interfaces for LLM providers."""
2
+
3
+ import importlib.util
4
+ import logging
5
+ from abc import ABC, abstractmethod
6
+ from typing import Any, Dict, List, Optional
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class ObservabilityProvider(ABC):
12
+ """Base class for observability providers."""
13
+
14
+ @abstractmethod
15
+ def track_llm_call(
16
+ self,
17
+ name: str,
18
+ model: str,
19
+ messages: List[Dict[str, Any]],
20
+ response: Any,
21
+ metadata: Optional[Dict[str, Any]] = None,
22
+ ) -> None:
23
+ """Track an LLM call directly without creating a run tree.
24
+
25
+ Args:
26
+ name: Name of the operation.
27
+ model: Model used for the call.
28
+ messages: Messages sent to the model.
29
+ response: Response from the model.
30
+ metadata: Additional metadata for the call.
31
+ """
32
+ ...
33
+
34
+
35
+ class NoOpObservability(ObservabilityProvider):
36
+ """No-op observability provider that does nothing."""
37
+
38
+ def track_llm_call(
39
+ self,
40
+ name: str,
41
+ model: str,
42
+ messages: List[Dict[str, Any]],
43
+ response: Any,
44
+ metadata: Optional[Dict[str, Any]] = None,
45
+ ) -> None:
46
+ """No-op implementation of track_llm_call."""
47
+ pass
48
+
49
+
50
+ class LangSmithObservability(ObservabilityProvider):
51
+ """LangSmith observability provider."""
52
+
53
+ def __init__(self) -> None:
54
+ """Initialize the LangSmith observability provider."""
55
+ self._check_langsmith_available()
56
+
57
+ def _check_langsmith_available(self) -> None:
58
+ """Check if LangSmith is available."""
59
+ if not importlib.util.find_spec("langsmith"):
60
+ raise ImportError("LangSmith is not installed. Install it with: pip install econagents[langsmith]")
61
+
62
+ def _create_run_tree(
63
+ self,
64
+ name: str,
65
+ run_type: str,
66
+ inputs: Dict[str, Any],
67
+ ) -> Any:
68
+ """Create a LangSmith run tree.
69
+
70
+ Args:
71
+ name: Name of the run.
72
+ run_type: Type of the run (e.g., "chain", "llm").
73
+ inputs: Inputs for the run.
74
+
75
+ Returns:
76
+ A LangSmith RunTree object.
77
+ """
78
+ try:
79
+ from langsmith.run_trees import RunTree
80
+
81
+ run_tree = RunTree(name=name, run_type=run_type, inputs=inputs)
82
+ run_tree.post()
83
+ return run_tree
84
+ except ImportError:
85
+ logger.warning("LangSmith is not available. Using no-op run tree.")
86
+ return {"name": name, "run_type": run_type, "inputs": inputs}
87
+
88
+ def _create_child_run(
89
+ self,
90
+ parent_run: Any,
91
+ name: str,
92
+ run_type: str,
93
+ inputs: Dict[str, Any],
94
+ ) -> Any:
95
+ """Create a child run in LangSmith.
96
+
97
+ Args:
98
+ parent_run: Parent RunTree object.
99
+ name: Name of the child run.
100
+ run_type: Type of the child run.
101
+ inputs: Inputs for the child run.
102
+
103
+ Returns:
104
+ A child RunTree object.
105
+ """
106
+ try:
107
+ child_run = parent_run.create_child(
108
+ name=name,
109
+ run_type=run_type,
110
+ inputs=inputs,
111
+ )
112
+ child_run.post()
113
+ return child_run
114
+ except (ImportError, AttributeError):
115
+ logger.warning("LangSmith create_child failed. Using no-op child run.")
116
+ return {"name": name, "run_type": run_type, "inputs": inputs, "parent": parent_run}
117
+
118
+ def _end_run(
119
+ self,
120
+ run: Any,
121
+ outputs: Dict[str, Any],
122
+ ) -> None:
123
+ """End a LangSmith run with outputs.
124
+
125
+ Args:
126
+ run: RunTree object to end.
127
+ outputs: Outputs of the run.
128
+ """
129
+ try:
130
+ run.end(outputs=outputs)
131
+ run.patch()
132
+ except (ImportError, AttributeError) as e:
133
+ logger.warning(f"LangSmith end_run failed: {e}")
134
+
135
+ def track_llm_call(
136
+ self,
137
+ name: str,
138
+ model: str,
139
+ messages: List[Dict[str, Any]],
140
+ response: Any,
141
+ metadata: Optional[Dict[str, Any]] = None,
142
+ ) -> None:
143
+ """Track an LLM call using LangSmith RunTree.
144
+
145
+ Args:
146
+ name: Name of the operation.
147
+ model: Model used for the call.
148
+ messages: Messages sent to the model.
149
+ response: Response from the model.
150
+ metadata: Additional metadata for the call.
151
+ """
152
+ try:
153
+ # Create a top-level run
154
+ run_tree = self._create_run_tree(
155
+ name=name, run_type="chain", inputs={"messages": messages, "metadata": metadata or {}}
156
+ )
157
+
158
+ # Create LLM child run
159
+ child_run = self._create_child_run(
160
+ parent_run=run_tree, name=f"{model} Call", run_type="llm", inputs={"messages": messages}
161
+ )
162
+
163
+ # End the runs
164
+ self._end_run(child_run, outputs=response)
165
+
166
+ # Get the content from the response if it's in the expected format
167
+ output_content = None
168
+ if hasattr(response, "choices") and response.choices:
169
+ if hasattr(response.choices[0], "message") and hasattr(response.choices[0].message, "content"):
170
+ output_content = response.choices[0].message.content
171
+
172
+ self._end_run(run_tree, outputs={"response": output_content or response})
173
+ except Exception as e:
174
+ logger.warning(f"Failed to track LLM call with LangSmith: {e}")
175
+
176
+
177
+ class LangFuseObservability(ObservabilityProvider):
178
+ """LangFuse observability provider."""
179
+
180
+ def __init__(self) -> None:
181
+ """Initialize the LangFuse observability provider."""
182
+ self._check_langfuse_available()
183
+ self._langfuse_client = None
184
+
185
+ def _check_langfuse_available(self) -> None:
186
+ """Check if LangFuse is available."""
187
+ if not importlib.util.find_spec("langfuse"):
188
+ raise ImportError("LangFuse is not installed. Install it with: pip install econagents[langfuse]")
189
+
190
+ def _get_langfuse_client(self) -> Any:
191
+ """Get or create a LangFuse client."""
192
+ if self._langfuse_client is None:
193
+ try:
194
+ from langfuse import Langfuse
195
+
196
+ self._langfuse_client = Langfuse()
197
+ except ImportError:
198
+ logger.warning("LangFuse is not available.")
199
+ return None
200
+ return self._langfuse_client
201
+
202
+ def track_llm_call(
203
+ self,
204
+ name: str,
205
+ model: str,
206
+ messages: List[Dict[str, Any]],
207
+ response: Any,
208
+ metadata: Optional[Dict[str, Any]] = None,
209
+ ) -> None:
210
+ """Track an LLM call using LangFuse generation.
211
+
212
+ Args:
213
+ name: Name of the operation.
214
+ model: Model used for the call.
215
+ messages: Messages sent to the model.
216
+ response: Response from the model.
217
+ metadata: Additional metadata for the call.
218
+ """
219
+ client = self._get_langfuse_client()
220
+ if client is None:
221
+ return
222
+
223
+ try:
224
+ # Create a generation in Langfuse
225
+ trace = client.trace(name=name, metadata={"model": model, **metadata} if metadata else {}, input=messages)
226
+ generation = trace.generation(
227
+ name=name + "_generation",
228
+ model=model,
229
+ model_parameters=metadata.get("model_parameters", {}) if metadata else {},
230
+ input=messages,
231
+ metadata=metadata or {},
232
+ )
233
+
234
+ # Get response content in appropriate format
235
+ output_content = response
236
+ if hasattr(response, "choices") and response.choices:
237
+ if hasattr(response.choices[0], "message") and hasattr(response.choices[0].message, "content"):
238
+ output_content = response.choices[0].message.content
239
+ elif isinstance(response, dict) and "message" in response and "content" in response["message"]:
240
+ output_content = response["message"]["content"]
241
+
242
+ # Update generation and set end time
243
+ generation.end(output=output_content)
244
+ trace.update(output=output_content)
245
+
246
+ # Flush to ensure all requests are sent
247
+ client.flush()
248
+ except Exception as e:
249
+ logger.warning(f"Failed to track LLM call with LangFuse: {e}")
250
+
251
+
252
+ def get_observability_provider(provider_name: str = "noop") -> ObservabilityProvider:
253
+ """Get an observability provider by name.
254
+
255
+ Args:
256
+ provider_name: The name of the provider to get.
257
+ Options: "noop", "langsmith", "langfuse"
258
+
259
+ Returns:
260
+ An observability provider.
261
+
262
+ Raises:
263
+ ValueError: If the provider_name is invalid.
264
+ """
265
+ if provider_name == "noop":
266
+ return NoOpObservability()
267
+ elif provider_name == "langsmith":
268
+ try:
269
+ return LangSmithObservability()
270
+ except ImportError as e:
271
+ logger.warning(f"Failed to initialize LangSmith: {e}")
272
+ logger.warning("Falling back to NoOpObservability")
273
+ return NoOpObservability()
274
+ elif provider_name == "langfuse":
275
+ try:
276
+ return LangFuseObservability()
277
+ except ImportError as e:
278
+ logger.warning(f"Failed to initialize LangFuse: {e}")
279
+ logger.warning("Falling back to NoOpObservability")
280
+ return NoOpObservability()
281
+ else:
282
+ raise ValueError(f"Invalid observability provider: {provider_name}")
@@ -0,0 +1,77 @@
1
+ import importlib.util
2
+ import json
3
+ import logging
4
+ from typing import Any, Dict, List, Optional
5
+
6
+ from econagents.llm.base import BaseLLM
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class ChatOllama(BaseLLM):
12
+ """A wrapper for LLM queries using Ollama."""
13
+
14
+ def __init__(
15
+ self,
16
+ model_name: str,
17
+ host: Optional[str] = None,
18
+ ) -> None:
19
+ """Initialize the Ollama LLM interface.
20
+
21
+ Args:
22
+ model_name: The model name to use.
23
+ host: The host for the Ollama API (e.g., "http://localhost:11434").
24
+ """
25
+ self._check_ollama_available()
26
+ self.model_name = model_name
27
+ self.host = host
28
+
29
+ def _check_ollama_available(self) -> None:
30
+ """Check if Ollama is available."""
31
+ if not importlib.util.find_spec("ollama"):
32
+ raise ImportError("Ollama is not installed. Install it with: pip install econagents[ollama]")
33
+
34
+ async def get_response(
35
+ self,
36
+ messages: List[Dict[str, Any]],
37
+ tracing_extra: Dict[str, Any],
38
+ **kwargs: Any,
39
+ ) -> str:
40
+ """Get a response from the LLM.
41
+
42
+ Args:
43
+ messages: The messages for the LLM.
44
+ tracing_extra: The extra tracing information.
45
+ **kwargs: Additional arguments to pass to the LLM.
46
+
47
+ Returns:
48
+ The response from the LLM.
49
+
50
+ Raises:
51
+ ImportError: If Ollama is not installed.
52
+ """
53
+ try:
54
+ from ollama import AsyncClient
55
+
56
+ client = AsyncClient(host=self.host)
57
+
58
+ response = await client.chat(
59
+ model=self.model_name,
60
+ messages=messages,
61
+ **kwargs,
62
+ )
63
+
64
+ # End the LLM run
65
+ self.observability.track_llm_call(
66
+ name="ollama_chat_completion",
67
+ model=self.model_name,
68
+ messages=messages,
69
+ response=response,
70
+ metadata=tracing_extra,
71
+ )
72
+
73
+ return response["message"]["content"]
74
+
75
+ except ImportError as e:
76
+ logger.error(f"Failed to import Ollama: {e}")
77
+ raise ImportError("Ollama is not installed. Install it with: pip install econagents[ollama]") from e
@@ -0,0 +1,77 @@
1
+ import importlib.util
2
+ import logging
3
+ from typing import Any, Optional
4
+
5
+ from econagents.llm.base import BaseLLM
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ class ChatOpenAI(BaseLLM):
11
+ """A wrapper for LLM queries using OpenAI."""
12
+
13
+ def __init__(
14
+ self,
15
+ model_name: str = "gpt-4o",
16
+ api_key: Optional[str] = None,
17
+ ) -> None:
18
+ """Initialize the OpenAI LLM interface.
19
+
20
+ Args:
21
+ model_name: The model name to use.
22
+ api_key: The API key to use for authentication.
23
+ """
24
+ self.model_name = model_name
25
+ self.api_key = api_key
26
+ self._check_openai_available()
27
+
28
+ def _check_openai_available(self) -> None:
29
+ """Check if OpenAI is available."""
30
+ if not importlib.util.find_spec("openai"):
31
+ raise ImportError("OpenAI is not installed. Install it with: pip install econagents[openai]")
32
+
33
+ async def get_response(
34
+ self,
35
+ messages: list[dict[str, Any]],
36
+ tracing_extra: dict[str, Any],
37
+ **kwargs: Any,
38
+ ) -> str:
39
+ """Get a response from the LLM.
40
+
41
+ Args:
42
+ messages: The messages for the LLM.
43
+ tracing_extra: The extra tracing information.
44
+ **kwargs: Additional arguments to pass to the LLM.
45
+
46
+ Returns:
47
+ The response from the LLM.
48
+
49
+ Raises:
50
+ ImportError: If OpenAI is not installed.
51
+ """
52
+ try:
53
+ from openai import AsyncOpenAI
54
+
55
+ client = AsyncOpenAI(api_key=self.api_key)
56
+
57
+ # Create OpenAI completion
58
+ response = await client.chat.completions.create(
59
+ model=self.model_name,
60
+ messages=messages, # type: ignore
61
+ response_format={"type": "json_object"},
62
+ **kwargs,
63
+ )
64
+
65
+ # Track the LLM call using the observability provider
66
+ self.observability.track_llm_call(
67
+ name="openai_chat_completion",
68
+ model=self.model_name,
69
+ messages=messages,
70
+ response=response,
71
+ metadata=tracing_extra,
72
+ )
73
+
74
+ return response.choices[0].message.content
75
+ except ImportError as e:
76
+ logger.error(f"Failed to import OpenAI: {e}")
77
+ raise ImportError("OpenAI is not installed. Install it with: pip install econagents[openai]") from e
@@ -1,5 +1,5 @@
1
1
  [build-system]
2
- requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.1"]
2
+ requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"]
3
3
  build-backend = "poetry_dynamic_versioning.backend"
4
4
 
5
5
  [tool.poetry-dynamic-versioning]
@@ -7,37 +7,59 @@ enable = false
7
7
  vcs = "git"
8
8
  style = "semver"
9
9
 
10
- [tool.poetry]
10
+ [project]
11
11
  name = "econagents"
12
- version = "0.0.1" # Do not change, let poetry-dynamic-versioning handle it.
12
+ license = { file = "LICENSE" }
13
+ description = "econagents is a Python library that lets you use LLM agents in economic experiments. The framework connects LLM agents to game servers through WebSockets and provides a flexible architecture for designing, customizing, and running economic simulations."
14
+ readme = "README.md"
13
15
  homepage = "https://github.com/iwanalabs/econagents"
14
16
  repository = "https://github.com/iwanalabs/econagents"
15
- license = "Apache-2.0"
16
- description = ""
17
- authors = ["Dylan"]
18
- readme = "README.md"
17
+ dynamic = ["version"]
18
+
19
+ [tool.poetry]
20
+ name = "econagents"
21
+ version = "0.0.2" # Do not change, let poetry-dynamic-versioning handle it.
19
22
  packages = [{include = "econagents"}]
20
23
  include = ["econagents/*.so", "econagents/*.pyd"] # Compiled extensions
21
24
 
22
25
  [tool.poetry.build]
23
26
  generate-setup-file = false
24
27
 
25
- [tool.poetry.scripts]
26
-
27
28
  [tool.poetry.dependencies]
28
29
  # Be as loose as possible if writing a library.
29
30
  python = ">=3.10,<3.13"
30
- typing-extensions = "^4.12.2"
31
- openai = "^1.65.5"
32
- langsmith = "^0.3.13"
33
- numpy = "^2.2.3"
34
- websockets = "^15.0"
35
31
  pydantic = "^2.10.6"
36
32
  requests = "^2.32.3"
33
+ websockets = "^15.0"
34
+
35
+ # Optional dependencies
36
+ [tool.poetry.group.openai]
37
+ optional = true
38
+
39
+ [tool.poetry.group.openai.dependencies]
40
+ openai = "^1.68.2"
41
+
42
+ [tool.poetry.group.ollama]
43
+ optional = true
44
+
45
+ [tool.poetry.group.ollama.dependencies]
46
+ ollama = "^0.1.9"
47
+
48
+ [tool.poetry.group.langsmith]
49
+ optional = true
50
+
51
+ [tool.poetry.group.langsmith.dependencies]
52
+ langsmith = "^0.3.19"
53
+
54
+ [tool.poetry.group.langfuse]
55
+ optional = true
56
+
57
+ [tool.poetry.group.langfuse.dependencies]
58
+ langfuse = "^2.60.2"
37
59
 
38
60
  [tool.poetry.group.docs.dependencies]
39
61
  myst-parser = {extras = ["linkify"], version = "^4.0.1"}
40
- sphinx = "~7.4.7"
62
+ sphinx = "~8.1.3"
41
63
  sphinx_rtd_theme = "~3.0.2"
42
64
  gitpython = ">=3.1.31"
43
65
  sphinx-copybutton = "^0.5.2"
@@ -52,7 +74,7 @@ pytest-mock = ">=3.7.0"
52
74
  python-dotenv = "^1.0.1"
53
75
  jupyter = "^1.1.1"
54
76
  nest-asyncio = "^1.6.0"
55
- ruff = "^0.9.7"
77
+ ruff = "^0.11.2"
56
78
  types-requests = "^2.32.0.20250306"
57
79
  pytest-asyncio = "^0.25.3"
58
80
 
@@ -151,3 +173,11 @@ ignore = [
151
173
 
152
174
  [tool.codespell]
153
175
  skip = 'poetry.lock,'
176
+
177
+ [tool.poetry.extras]
178
+ openai = ["openai"]
179
+ ollama = ["ollama"]
180
+ langsmith = ["langsmith"]
181
+ langfuse = ["langfuse"]
182
+ default = ["openai", "langsmith"]
183
+ all = ["openai", "ollama", "langsmith", "langfuse"]
@@ -1,3 +0,0 @@
1
- from econagents.llm.openai import ChatOpenAI
2
-
3
- __all__: list[str] = ["ChatOpenAI"]
@@ -1,61 +0,0 @@
1
- from typing import Any, Optional
2
-
3
- from langsmith import traceable
4
- from langsmith.wrappers import wrap_openai
5
- from openai import AsyncOpenAI
6
-
7
-
8
- class ChatOpenAI:
9
- """
10
- A simple wrapper for LLM queries, e.g. using OpenAI and LangSmith.
11
- """
12
-
13
- def __init__(
14
- self,
15
- model_name: str = "gpt-4o",
16
- api_key: Optional[str] = None,
17
- ) -> None:
18
- """Initialize the LLM interface."""
19
- self.model_name = model_name
20
- self.api_key = api_key
21
-
22
- def build_messages(self, system_prompt: str, user_prompt: str):
23
- """Build messages for the LLM.
24
-
25
- Args:
26
- system_prompt (str): The system prompt for the LLM.
27
- user_prompt (str): The user prompt for the LLM.
28
-
29
- Returns:
30
- list[dict[str, Any]]: The messages for the LLM.
31
- """
32
- return [
33
- {"role": "system", "content": system_prompt},
34
- {"role": "user", "content": user_prompt},
35
- ]
36
-
37
- @traceable
38
- async def get_response(
39
- self,
40
- messages: list[dict[str, Any]],
41
- tracing_extra: dict[str, Any],
42
- **kwargs: Any,
43
- ):
44
- """Get a response from the LLM.
45
-
46
- Args:
47
- messages (list[dict[str, Any]]): The messages for the LLM.
48
- tracing_extra (dict[str, Any]): The extra tracing information.
49
-
50
- Returns:
51
- str: The response from the LLM.
52
- """
53
- client = wrap_openai(AsyncOpenAI(api_key=self.api_key))
54
- response = await client.chat.completions.create(
55
- messages=messages, # type: ignore
56
- model=self.model_name,
57
- response_format={"type": "json_object"},
58
- langsmith_extra=tracing_extra,
59
- **kwargs,
60
- )
61
- return response.choices[0].message.content