econagents 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- econagents/__init__.py +4 -2
- econagents/core/agent_role.py +6 -6
- econagents/core/game_runner.py +17 -3
- econagents/core/manager/phase.py +34 -57
- econagents/llm/__init__.py +19 -2
- econagents/llm/base.py +62 -0
- econagents/llm/observability.py +282 -0
- econagents/llm/ollama.py +77 -0
- econagents/llm/openai.py +52 -36
- econagents-0.0.2.dist-info/LICENSE +21 -0
- {econagents-0.0.1.dist-info → econagents-0.0.2.dist-info}/METADATA +38 -15
- econagents-0.0.2.dist-info/RECORD +25 -0
- {econagents-0.0.1.dist-info → econagents-0.0.2.dist-info}/WHEEL +1 -1
- econagents-0.0.1.dist-info/RECORD +0 -21
econagents/__init__.py
CHANGED
@@ -1,17 +1,18 @@
|
|
1
1
|
"""
|
2
|
-
econagents: A Python library
|
2
|
+
econagents: A Python library that lets you use LLM agents in economic experiments.
|
3
3
|
"""
|
4
4
|
|
5
5
|
from econagents.core.agent_role import AgentRole
|
6
6
|
from econagents.core.game_runner import GameRunner, HybridGameRunnerConfig, TurnBasedGameRunnerConfig
|
7
7
|
from econagents.core.manager import AgentManager
|
8
|
+
from econagents.core.transport import WebSocketTransport
|
8
9
|
from econagents.core.manager.phase import PhaseManager, HybridPhaseManager, TurnBasedPhaseManager
|
9
10
|
from econagents.core.state.fields import EventField
|
10
11
|
from econagents.core.state.game import GameState, MetaInformation, PrivateInformation, PublicInformation
|
11
12
|
from econagents.llm.openai import ChatOpenAI
|
12
13
|
|
13
14
|
# Don't manually change, let poetry-dynamic-versioning handle it.
|
14
|
-
__version__ = "0.0.
|
15
|
+
__version__ = "0.0.2"
|
15
16
|
|
16
17
|
__all__: list[str] = [
|
17
18
|
"AgentRole",
|
@@ -28,4 +29,5 @@ __all__: list[str] = [
|
|
28
29
|
"TurnBasedGameRunnerConfig",
|
29
30
|
"HybridGameRunnerConfig",
|
30
31
|
"EventField",
|
32
|
+
"WebSocketTransport",
|
31
33
|
]
|
econagents/core/agent_role.py
CHANGED
@@ -9,7 +9,7 @@ from jinja2.sandbox import SandboxedEnvironment
|
|
9
9
|
|
10
10
|
from econagents.core.logging_mixin import LoggerMixin
|
11
11
|
from econagents.core.state.game import GameStateProtocol
|
12
|
-
from econagents.llm.
|
12
|
+
from econagents.llm.base import BaseLLM
|
13
13
|
|
14
14
|
StateT_contra = TypeVar("StateT_contra", bound=GameStateProtocol, contravariant=True)
|
15
15
|
|
@@ -17,7 +17,7 @@ StateT_contra = TypeVar("StateT_contra", bound=GameStateProtocol, contravariant=
|
|
17
17
|
class AgentProtocol(Protocol):
|
18
18
|
role: ClassVar[int]
|
19
19
|
name: ClassVar[str]
|
20
|
-
llm:
|
20
|
+
llm: BaseLLM
|
21
21
|
task_phases: ClassVar[list[int]]
|
22
22
|
|
23
23
|
|
@@ -41,12 +41,12 @@ class AgentRole(ABC, Generic[StateT_contra], LoggerMixin):
|
|
41
41
|
"""Unique identifier for this role"""
|
42
42
|
name: ClassVar[str]
|
43
43
|
"""Human-readable name for this role"""
|
44
|
-
llm:
|
44
|
+
llm: BaseLLM
|
45
45
|
"""Language model instance for generating responses"""
|
46
46
|
task_phases: ClassVar[list[int]] = [] # Empty list means no specific phases are required
|
47
47
|
"""List of phases this agent should participate in (empty means all phases)"""
|
48
48
|
task_phases_excluded: ClassVar[list[int]] = [] # Empty list means no phases are excluded
|
49
|
-
|
49
|
+
""" Alternative way to specify phases this agent should participate in, listed phases are excluded (empty means nothing excluded)"""
|
50
50
|
# Regex patterns for method name extraction
|
51
51
|
_SYSTEM_PROMPT_PATTERN: ClassVar[Pattern] = re.compile(r"get_phase_(\d+)_system_prompt")
|
52
52
|
_USER_PROMPT_PATTERN: ClassVar[Pattern] = re.compile(r"get_phase_(\d+)_user_prompt")
|
@@ -109,9 +109,9 @@ class AgentRole(ABC, Generic[StateT_contra], LoggerMixin):
|
|
109
109
|
|
110
110
|
Template resolution order:
|
111
111
|
|
112
|
-
1.
|
112
|
+
1. Role-specific phase prompt (e.g., "role_name_system_phase_1.jinja2")
|
113
113
|
|
114
|
-
2.
|
114
|
+
2. Role-specific general prompt (e.g., "role_name_system.jinja2")
|
115
115
|
|
116
116
|
3. All-role phase prompt (e.g., "all_system_phase_1.jinja2")
|
117
117
|
|
econagents/core/game_runner.py
CHANGED
@@ -4,14 +4,14 @@ import queue
|
|
4
4
|
from contextvars import ContextVar
|
5
5
|
from logging.handlers import QueueHandler, QueueListener
|
6
6
|
from pathlib import Path
|
7
|
-
from typing import Optional, Type
|
7
|
+
from typing import Literal, Optional, Type
|
8
8
|
|
9
9
|
from pydantic import BaseModel, Field
|
10
10
|
|
11
|
-
from econagents.core.manager.base import AgentManager
|
12
11
|
from econagents.core.manager.phase import PhaseManager
|
13
12
|
from econagents.core.state.game import GameState
|
14
13
|
from econagents.core.transport import AuthenticationMechanism, SimpleLoginPayloadAuth
|
14
|
+
from econagents.llm.observability import get_observability_provider
|
15
15
|
|
16
16
|
ctx_agent_id: ContextVar[str] = ContextVar("agent_id", default="N/A")
|
17
17
|
|
@@ -59,6 +59,10 @@ class GameRunnerConfig(BaseModel):
|
|
59
59
|
state_class: Optional[Type[GameState]] = None
|
60
60
|
"""Class to use for the state"""
|
61
61
|
|
62
|
+
# Observability configuration
|
63
|
+
observability_provider: Optional[Literal["langsmith", "langfuse"]] = None
|
64
|
+
"""Name of the observability provider to use. Options: 'langsmith' or 'langfuse'"""
|
65
|
+
|
62
66
|
|
63
67
|
class TurnBasedGameRunnerConfig(GameRunnerConfig):
|
64
68
|
"""Configuration class for TurnBasedGameRunner."""
|
@@ -299,12 +303,22 @@ class GameRunner:
|
|
299
303
|
agent_manager.auth_mechanism = self.config.auth_mechanism
|
300
304
|
agent_manager.logger.debug(f"Injected default auth mechanism: {agent_manager.auth_mechanism}")
|
301
305
|
|
306
|
+
if agent_manager.llm_provider and self.config.observability_provider:
|
307
|
+
try:
|
308
|
+
provider = get_observability_provider(self.config.observability_provider)
|
309
|
+
agent_manager.llm_provider.observability = provider
|
310
|
+
agent_manager.logger.debug(
|
311
|
+
f"Injected {self.config.observability_provider} observability provider into LLM provider"
|
312
|
+
)
|
313
|
+
except Exception as e:
|
314
|
+
agent_manager.logger.error(f"Failed to initialize observability provider: {e}")
|
315
|
+
|
302
316
|
if isinstance(self.config, HybridGameRunnerConfig):
|
303
317
|
agent_manager.continuous_phases = set(self.config.continuous_phases)
|
304
318
|
agent_manager.min_action_delay = self.config.min_action_delay
|
305
319
|
agent_manager.max_action_delay = self.config.max_action_delay
|
306
320
|
agent_manager.logger.debug(
|
307
|
-
f"Injected default continuous phases: {agent_manager.continuous_phases}, min action delay: {agent_manager.min_action_delay}, max action delay: {agent_manager.max_action_delay}"
|
321
|
+
f"Injected default continuous-time phases: {agent_manager.continuous_phases}, min action delay: {agent_manager.min_action_delay}, max action delay: {agent_manager.max_action_delay}"
|
308
322
|
)
|
309
323
|
|
310
324
|
def _inject_agent_logger(self, agent_manager: PhaseManager, agent_id: int) -> None:
|
econagents/core/manager/phase.py
CHANGED
@@ -17,16 +17,14 @@ class PhaseManager(AgentManager, ABC):
|
|
17
17
|
"""
|
18
18
|
Abstract manager that handles the concept of 'phases' in a game.
|
19
19
|
|
20
|
-
This manager standardizes the interface for phase-based games with
|
21
|
-
|
20
|
+
This manager standardizes the interface for phase-based games with optional
|
21
|
+
continuous-time phase handling.
|
22
22
|
|
23
23
|
Features:
|
24
24
|
1. Standardized interface for starting a phase
|
25
25
|
|
26
26
|
2. Optional continuous "tick loop" for phases
|
27
27
|
|
28
|
-
3. Hooks for "on phase start," "on phase end," and "on phase transition event"
|
29
|
-
|
30
28
|
All configuration parameters can be:
|
31
29
|
|
32
30
|
1. Provided at initialization time
|
@@ -38,8 +36,8 @@ class PhaseManager(AgentManager, ABC):
|
|
38
36
|
phase_transition_event (Optional[str]): Event name for phase transitions
|
39
37
|
phase_identifier_key (Optional[str]): Key in the event data that identifies the phase
|
40
38
|
continuous_phases (Optional[set[int]]): set of phase numbers that should be treated as continuous
|
41
|
-
min_action_delay (Optional[int]): Minimum delay in seconds between actions in continuous phases
|
42
|
-
max_action_delay (Optional[int]): Maximum delay in seconds between actions in continuous phases
|
39
|
+
min_action_delay (Optional[int]): Minimum delay in seconds between actions in continuous-time phases
|
40
|
+
max_action_delay (Optional[int]): Maximum delay in seconds between actions in continuous-time phases
|
43
41
|
state (Optional[GameState]): Game state object to track game state
|
44
42
|
agent_role (Optional[AgentRole]): Agent role instance to handle game phases
|
45
43
|
auth_mechanism (Optional[AuthenticationMechanism]): Authentication mechanism to use
|
@@ -145,12 +143,12 @@ class PhaseManager(AgentManager, ABC):
|
|
145
143
|
|
146
144
|
@property
|
147
145
|
def continuous_phases(self) -> set[int]:
|
148
|
-
"""Get the set of continuous phases."""
|
146
|
+
"""Get the set of continuous-time phases."""
|
149
147
|
return self._continuous_phases # type: ignore
|
150
148
|
|
151
149
|
@continuous_phases.setter
|
152
150
|
def continuous_phases(self, value: set[int]):
|
153
|
-
"""Set the continuous phases."""
|
151
|
+
"""Set the continuous-time phases."""
|
154
152
|
self._continuous_phases = value
|
155
153
|
|
156
154
|
@property
|
@@ -183,6 +181,13 @@ class PhaseManager(AgentManager, ABC):
|
|
183
181
|
"""Set the prompts directory."""
|
184
182
|
self._prompts_dir = value
|
185
183
|
|
184
|
+
@property
|
185
|
+
def llm_provider(self):
|
186
|
+
"""Get the LLM provider from the agent role."""
|
187
|
+
if self._agent_role and hasattr(self._agent_role, "llm"):
|
188
|
+
return self._agent_role.llm
|
189
|
+
return None
|
190
|
+
|
186
191
|
async def start(self):
|
187
192
|
"""Start the manager."""
|
188
193
|
# TODO: is there a better place to do this?
|
@@ -220,38 +225,27 @@ class PhaseManager(AgentManager, ABC):
|
|
220
225
|
Handle a phase transition.
|
221
226
|
|
222
227
|
This method is the main orchestrator for phase transitions:
|
223
|
-
1. If leaving a continuous phase, stops the continuous task
|
224
|
-
2.
|
225
|
-
3.
|
226
|
-
4.
|
227
|
-
5. Starts a continuous task if entering a continuous phase
|
228
|
-
6. Executes a single action if entering a non-continuous phase
|
228
|
+
1. If leaving a continuous-time phase, stops the continuous task
|
229
|
+
2. Updates the current phase
|
230
|
+
3. Starts a continuous task if entering a continuous-time phase
|
231
|
+
4. Executes a single action if entering a non-continuous-time phase
|
229
232
|
|
230
233
|
Args:
|
231
234
|
new_phase (Optional[int]): The new phase number
|
232
235
|
"""
|
233
236
|
self.logger.info(f"Transitioning to phase {new_phase}")
|
234
237
|
|
235
|
-
# If we were in a continuous phase, stop it
|
238
|
+
# If we were in a continuous-time phase, stop it
|
236
239
|
if self.in_continuous_phase and new_phase != self.current_phase:
|
237
|
-
self.logger.info(f"Stopping continuous phase {self.current_phase}")
|
240
|
+
self.logger.info(f"Stopping continuous-time phase {self.current_phase}")
|
238
241
|
self.in_continuous_phase = False
|
239
242
|
if self._continuous_task:
|
240
243
|
self._continuous_task.cancel()
|
241
244
|
self._continuous_task = None
|
242
245
|
|
243
|
-
# Call the on_phase_end hook for the old phase
|
244
|
-
old_phase = self.current_phase
|
245
|
-
if old_phase is not None:
|
246
|
-
await self.on_phase_end(old_phase)
|
247
|
-
|
248
|
-
# Update current phase
|
249
246
|
self.current_phase = new_phase
|
250
247
|
|
251
248
|
if new_phase is not None:
|
252
|
-
# Call the on_phase_start hook for the new phase
|
253
|
-
await self.on_phase_start(new_phase)
|
254
|
-
|
255
249
|
# If the new phase is continuous, start a continuous task
|
256
250
|
if self.continuous_phases and new_phase in self.continuous_phases:
|
257
251
|
self.in_continuous_phase = True
|
@@ -260,12 +254,12 @@ class PhaseManager(AgentManager, ABC):
|
|
260
254
|
# Execute an initial action
|
261
255
|
await self.execute_phase_action(new_phase)
|
262
256
|
else:
|
263
|
-
# Execute a single action for non-continuous phases
|
257
|
+
# Execute a single action for non-continuous-time phases
|
264
258
|
await self.execute_phase_action(new_phase)
|
265
259
|
|
266
260
|
async def _continuous_phase_loop(self, phase: int):
|
267
261
|
"""
|
268
|
-
Run a loop that periodically executes actions for a continuous phase.
|
262
|
+
Run a loop that periodically executes actions for a continuous-time phase.
|
269
263
|
|
270
264
|
Args:
|
271
265
|
phase (int): The phase number
|
@@ -277,16 +271,16 @@ class PhaseManager(AgentManager, ABC):
|
|
277
271
|
self.logger.debug(f"Waiting {delay} seconds before next action in phase {phase}")
|
278
272
|
await asyncio.sleep(delay)
|
279
273
|
|
280
|
-
# Check if we're still in the same continuous phase
|
274
|
+
# Check if we're still in the same continuous-time phase
|
281
275
|
if not self.in_continuous_phase or self.current_phase != phase:
|
282
276
|
break
|
283
277
|
|
284
278
|
# Execute the action
|
285
279
|
await self.execute_phase_action(phase)
|
286
280
|
except asyncio.CancelledError:
|
287
|
-
self.logger.info(f"Continuous phase {phase} loop cancelled")
|
281
|
+
self.logger.info(f"Continuous-time phase {phase} loop cancelled")
|
288
282
|
except Exception as e:
|
289
|
-
self.logger.exception(f"Error in continuous phase {phase} loop: {e}")
|
283
|
+
self.logger.exception(f"Error in continuous-time phase {phase} loop: {e}")
|
290
284
|
|
291
285
|
@abstractmethod
|
292
286
|
async def execute_phase_action(self, phase: int):
|
@@ -301,30 +295,8 @@ class PhaseManager(AgentManager, ABC):
|
|
301
295
|
"""
|
302
296
|
pass
|
303
297
|
|
304
|
-
async def on_phase_start(self, phase: int):
|
305
|
-
"""
|
306
|
-
Hook that is called when a phase starts.
|
307
|
-
|
308
|
-
Subclasses can override this to implement custom behavior.
|
309
|
-
|
310
|
-
Args:
|
311
|
-
phase (int): The phase number
|
312
|
-
"""
|
313
|
-
pass
|
314
|
-
|
315
|
-
async def on_phase_end(self, phase: int):
|
316
|
-
"""
|
317
|
-
Hook that is called when a phase ends.
|
318
|
-
|
319
|
-
Subclasses can override this to implement custom behavior.
|
320
|
-
|
321
|
-
Args:
|
322
|
-
phase (int): The phase number
|
323
|
-
"""
|
324
|
-
pass
|
325
|
-
|
326
298
|
async def stop(self):
|
327
|
-
"""Stop the manager and cancel any continuous phase tasks."""
|
299
|
+
"""Stop the manager and cancel any continuous-time phase tasks."""
|
328
300
|
self.in_continuous_phase = False
|
329
301
|
if self._continuous_task:
|
330
302
|
self._continuous_task.cancel()
|
@@ -337,7 +309,8 @@ class TurnBasedPhaseManager(PhaseManager):
|
|
337
309
|
A manager for turn-based games that handles phase transitions.
|
338
310
|
|
339
311
|
This manager inherits from PhaseManager and provides a concrete implementation
|
340
|
-
for executing actions in each phase.
|
312
|
+
for executing actions in each phase. All phases are treated as turn-based,
|
313
|
+
meaning actions are only taken when explicitly triggered (no continuous actions).
|
341
314
|
|
342
315
|
Args:
|
343
316
|
url (Optional[str]): WebSocket server URL
|
@@ -416,7 +389,11 @@ class HybridPhaseManager(PhaseManager):
|
|
416
389
|
A manager for games that combine turn-based and continuous action phases.
|
417
390
|
|
418
391
|
This manager extends PhaseManager and configures it with specific phases
|
419
|
-
that should be treated as continuous.
|
392
|
+
that should be treated as continuous. By default, all phases are treated as
|
393
|
+
turn-based unless explicitly included in the continuous_phases parameter.
|
394
|
+
|
395
|
+
For continuous-time phases, the manager will automatically execute actions periodically
|
396
|
+
with random delays between min_action_delay and max_action_delay seconds.
|
420
397
|
|
421
398
|
Args:
|
422
399
|
continuous_phases (Optional[set[int]]): Set of phase numbers that should be treated as continuous
|
@@ -425,8 +402,8 @@ class HybridPhaseManager(PhaseManager):
|
|
425
402
|
auth_mechanism_kwargs (Optional[dict[str, Any]]): Keyword arguments for the authentication mechanism
|
426
403
|
phase_transition_event (Optional[str]): Event name for phase transitions
|
427
404
|
phase_identifier_key (Optional[str]): Key in the event data that identifies the phase
|
428
|
-
min_action_delay (Optional[int]): Minimum delay in seconds between actions in continuous phases
|
429
|
-
max_action_delay (Optional[int]): Maximum delay in seconds between actions in continuous phases
|
405
|
+
min_action_delay (Optional[int]): Minimum delay in seconds between actions in continuous-time phases
|
406
|
+
max_action_delay (Optional[int]): Maximum delay in seconds between actions in continuous-time phases
|
430
407
|
state (Optional[GameState]): Game state object to track game state
|
431
408
|
agent_role (Optional[AgentRole]): Agent role instance to handle game phases
|
432
409
|
logger (Optional[logging.Logger]): Logger instance for tracking events
|
econagents/llm/__init__.py
CHANGED
@@ -1,3 +1,20 @@
|
|
1
|
-
from econagents.llm.
|
1
|
+
from econagents.llm.base import BaseLLM, LLMProvider
|
2
|
+
from econagents.llm.observability import ObservabilityProvider, get_observability_provider
|
2
3
|
|
3
|
-
|
4
|
+
# Import specific implementations if available
|
5
|
+
try:
|
6
|
+
from econagents.llm.openai import ChatOpenAI
|
7
|
+
except ImportError:
|
8
|
+
pass
|
9
|
+
|
10
|
+
try:
|
11
|
+
from econagents.llm.ollama import ChatOllama
|
12
|
+
except ImportError:
|
13
|
+
pass
|
14
|
+
|
15
|
+
__all__: list[str] = [
|
16
|
+
"BaseLLM",
|
17
|
+
"LLMProvider",
|
18
|
+
"ObservabilityProvider",
|
19
|
+
"get_observability_provider",
|
20
|
+
]
|
econagents/llm/base.py
ADDED
@@ -0,0 +1,62 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from typing import Any, Protocol, runtime_checkable
|
3
|
+
|
4
|
+
from econagents.llm.observability import ObservabilityProvider, get_observability_provider
|
5
|
+
|
6
|
+
|
7
|
+
@runtime_checkable
|
8
|
+
class LLMProvider(Protocol):
|
9
|
+
"""Protocol for LLM providers."""
|
10
|
+
|
11
|
+
async def get_response(
|
12
|
+
self,
|
13
|
+
messages: list[dict[str, Any]],
|
14
|
+
tracing_extra: dict[str, Any],
|
15
|
+
**kwargs: Any,
|
16
|
+
) -> str:
|
17
|
+
"""Get a response from the LLM."""
|
18
|
+
...
|
19
|
+
|
20
|
+
def build_messages(self, system_prompt: str, user_prompt: str) -> list[dict[str, Any]]:
|
21
|
+
"""Build messages for the LLM."""
|
22
|
+
...
|
23
|
+
|
24
|
+
|
25
|
+
class BaseLLM(ABC):
|
26
|
+
"""Base class for LLM implementations."""
|
27
|
+
|
28
|
+
observability: ObservabilityProvider = get_observability_provider("noop")
|
29
|
+
|
30
|
+
def build_messages(self, system_prompt: str, user_prompt: str) -> list[dict[str, Any]]:
|
31
|
+
"""Build messages for the LLM.
|
32
|
+
|
33
|
+
Args:
|
34
|
+
system_prompt: The system prompt for the LLM.
|
35
|
+
user_prompt: The user prompt for the LLM.
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
The messages for the LLM.
|
39
|
+
"""
|
40
|
+
return [
|
41
|
+
{"role": "system", "content": system_prompt},
|
42
|
+
{"role": "user", "content": user_prompt},
|
43
|
+
]
|
44
|
+
|
45
|
+
@abstractmethod
|
46
|
+
async def get_response(
|
47
|
+
self,
|
48
|
+
messages: list[dict[str, Any]],
|
49
|
+
tracing_extra: dict[str, Any],
|
50
|
+
**kwargs: Any,
|
51
|
+
) -> str:
|
52
|
+
"""Get a response from the LLM.
|
53
|
+
|
54
|
+
Args:
|
55
|
+
messages: The messages for the LLM.
|
56
|
+
tracing_extra: The extra tracing information.
|
57
|
+
**kwargs: Additional arguments to pass to the LLM.
|
58
|
+
|
59
|
+
Returns:
|
60
|
+
The response from the LLM.
|
61
|
+
"""
|
62
|
+
...
|
@@ -0,0 +1,282 @@
|
|
1
|
+
"""Observability interfaces for LLM providers."""
|
2
|
+
|
3
|
+
import importlib.util
|
4
|
+
import logging
|
5
|
+
from abc import ABC, abstractmethod
|
6
|
+
from typing import Any, Dict, List, Optional
|
7
|
+
|
8
|
+
logger = logging.getLogger(__name__)
|
9
|
+
|
10
|
+
|
11
|
+
class ObservabilityProvider(ABC):
|
12
|
+
"""Base class for observability providers."""
|
13
|
+
|
14
|
+
@abstractmethod
|
15
|
+
def track_llm_call(
|
16
|
+
self,
|
17
|
+
name: str,
|
18
|
+
model: str,
|
19
|
+
messages: List[Dict[str, Any]],
|
20
|
+
response: Any,
|
21
|
+
metadata: Optional[Dict[str, Any]] = None,
|
22
|
+
) -> None:
|
23
|
+
"""Track an LLM call directly without creating a run tree.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
name: Name of the operation.
|
27
|
+
model: Model used for the call.
|
28
|
+
messages: Messages sent to the model.
|
29
|
+
response: Response from the model.
|
30
|
+
metadata: Additional metadata for the call.
|
31
|
+
"""
|
32
|
+
...
|
33
|
+
|
34
|
+
|
35
|
+
class NoOpObservability(ObservabilityProvider):
|
36
|
+
"""No-op observability provider that does nothing."""
|
37
|
+
|
38
|
+
def track_llm_call(
|
39
|
+
self,
|
40
|
+
name: str,
|
41
|
+
model: str,
|
42
|
+
messages: List[Dict[str, Any]],
|
43
|
+
response: Any,
|
44
|
+
metadata: Optional[Dict[str, Any]] = None,
|
45
|
+
) -> None:
|
46
|
+
"""No-op implementation of track_llm_call."""
|
47
|
+
pass
|
48
|
+
|
49
|
+
|
50
|
+
class LangSmithObservability(ObservabilityProvider):
|
51
|
+
"""LangSmith observability provider."""
|
52
|
+
|
53
|
+
def __init__(self) -> None:
|
54
|
+
"""Initialize the LangSmith observability provider."""
|
55
|
+
self._check_langsmith_available()
|
56
|
+
|
57
|
+
def _check_langsmith_available(self) -> None:
|
58
|
+
"""Check if LangSmith is available."""
|
59
|
+
if not importlib.util.find_spec("langsmith"):
|
60
|
+
raise ImportError("LangSmith is not installed. Install it with: pip install econagents[langsmith]")
|
61
|
+
|
62
|
+
def _create_run_tree(
|
63
|
+
self,
|
64
|
+
name: str,
|
65
|
+
run_type: str,
|
66
|
+
inputs: Dict[str, Any],
|
67
|
+
) -> Any:
|
68
|
+
"""Create a LangSmith run tree.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
name: Name of the run.
|
72
|
+
run_type: Type of the run (e.g., "chain", "llm").
|
73
|
+
inputs: Inputs for the run.
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
A LangSmith RunTree object.
|
77
|
+
"""
|
78
|
+
try:
|
79
|
+
from langsmith.run_trees import RunTree
|
80
|
+
|
81
|
+
run_tree = RunTree(name=name, run_type=run_type, inputs=inputs)
|
82
|
+
run_tree.post()
|
83
|
+
return run_tree
|
84
|
+
except ImportError:
|
85
|
+
logger.warning("LangSmith is not available. Using no-op run tree.")
|
86
|
+
return {"name": name, "run_type": run_type, "inputs": inputs}
|
87
|
+
|
88
|
+
def _create_child_run(
|
89
|
+
self,
|
90
|
+
parent_run: Any,
|
91
|
+
name: str,
|
92
|
+
run_type: str,
|
93
|
+
inputs: Dict[str, Any],
|
94
|
+
) -> Any:
|
95
|
+
"""Create a child run in LangSmith.
|
96
|
+
|
97
|
+
Args:
|
98
|
+
parent_run: Parent RunTree object.
|
99
|
+
name: Name of the child run.
|
100
|
+
run_type: Type of the child run.
|
101
|
+
inputs: Inputs for the child run.
|
102
|
+
|
103
|
+
Returns:
|
104
|
+
A child RunTree object.
|
105
|
+
"""
|
106
|
+
try:
|
107
|
+
child_run = parent_run.create_child(
|
108
|
+
name=name,
|
109
|
+
run_type=run_type,
|
110
|
+
inputs=inputs,
|
111
|
+
)
|
112
|
+
child_run.post()
|
113
|
+
return child_run
|
114
|
+
except (ImportError, AttributeError):
|
115
|
+
logger.warning("LangSmith create_child failed. Using no-op child run.")
|
116
|
+
return {"name": name, "run_type": run_type, "inputs": inputs, "parent": parent_run}
|
117
|
+
|
118
|
+
def _end_run(
|
119
|
+
self,
|
120
|
+
run: Any,
|
121
|
+
outputs: Dict[str, Any],
|
122
|
+
) -> None:
|
123
|
+
"""End a LangSmith run with outputs.
|
124
|
+
|
125
|
+
Args:
|
126
|
+
run: RunTree object to end.
|
127
|
+
outputs: Outputs of the run.
|
128
|
+
"""
|
129
|
+
try:
|
130
|
+
run.end(outputs=outputs)
|
131
|
+
run.patch()
|
132
|
+
except (ImportError, AttributeError) as e:
|
133
|
+
logger.warning(f"LangSmith end_run failed: {e}")
|
134
|
+
|
135
|
+
def track_llm_call(
|
136
|
+
self,
|
137
|
+
name: str,
|
138
|
+
model: str,
|
139
|
+
messages: List[Dict[str, Any]],
|
140
|
+
response: Any,
|
141
|
+
metadata: Optional[Dict[str, Any]] = None,
|
142
|
+
) -> None:
|
143
|
+
"""Track an LLM call using LangSmith RunTree.
|
144
|
+
|
145
|
+
Args:
|
146
|
+
name: Name of the operation.
|
147
|
+
model: Model used for the call.
|
148
|
+
messages: Messages sent to the model.
|
149
|
+
response: Response from the model.
|
150
|
+
metadata: Additional metadata for the call.
|
151
|
+
"""
|
152
|
+
try:
|
153
|
+
# Create a top-level run
|
154
|
+
run_tree = self._create_run_tree(
|
155
|
+
name=name, run_type="chain", inputs={"messages": messages, "metadata": metadata or {}}
|
156
|
+
)
|
157
|
+
|
158
|
+
# Create LLM child run
|
159
|
+
child_run = self._create_child_run(
|
160
|
+
parent_run=run_tree, name=f"{model} Call", run_type="llm", inputs={"messages": messages}
|
161
|
+
)
|
162
|
+
|
163
|
+
# End the runs
|
164
|
+
self._end_run(child_run, outputs=response)
|
165
|
+
|
166
|
+
# Get the content from the response if it's in the expected format
|
167
|
+
output_content = None
|
168
|
+
if hasattr(response, "choices") and response.choices:
|
169
|
+
if hasattr(response.choices[0], "message") and hasattr(response.choices[0].message, "content"):
|
170
|
+
output_content = response.choices[0].message.content
|
171
|
+
|
172
|
+
self._end_run(run_tree, outputs={"response": output_content or response})
|
173
|
+
except Exception as e:
|
174
|
+
logger.warning(f"Failed to track LLM call with LangSmith: {e}")
|
175
|
+
|
176
|
+
|
177
|
+
class LangFuseObservability(ObservabilityProvider):
|
178
|
+
"""LangFuse observability provider."""
|
179
|
+
|
180
|
+
def __init__(self) -> None:
|
181
|
+
"""Initialize the LangFuse observability provider."""
|
182
|
+
self._check_langfuse_available()
|
183
|
+
self._langfuse_client = None
|
184
|
+
|
185
|
+
def _check_langfuse_available(self) -> None:
|
186
|
+
"""Check if LangFuse is available."""
|
187
|
+
if not importlib.util.find_spec("langfuse"):
|
188
|
+
raise ImportError("LangFuse is not installed. Install it with: pip install econagents[langfuse]")
|
189
|
+
|
190
|
+
def _get_langfuse_client(self) -> Any:
|
191
|
+
"""Get or create a LangFuse client."""
|
192
|
+
if self._langfuse_client is None:
|
193
|
+
try:
|
194
|
+
from langfuse import Langfuse
|
195
|
+
|
196
|
+
self._langfuse_client = Langfuse()
|
197
|
+
except ImportError:
|
198
|
+
logger.warning("LangFuse is not available.")
|
199
|
+
return None
|
200
|
+
return self._langfuse_client
|
201
|
+
|
202
|
+
def track_llm_call(
|
203
|
+
self,
|
204
|
+
name: str,
|
205
|
+
model: str,
|
206
|
+
messages: List[Dict[str, Any]],
|
207
|
+
response: Any,
|
208
|
+
metadata: Optional[Dict[str, Any]] = None,
|
209
|
+
) -> None:
|
210
|
+
"""Track an LLM call using LangFuse generation.
|
211
|
+
|
212
|
+
Args:
|
213
|
+
name: Name of the operation.
|
214
|
+
model: Model used for the call.
|
215
|
+
messages: Messages sent to the model.
|
216
|
+
response: Response from the model.
|
217
|
+
metadata: Additional metadata for the call.
|
218
|
+
"""
|
219
|
+
client = self._get_langfuse_client()
|
220
|
+
if client is None:
|
221
|
+
return
|
222
|
+
|
223
|
+
try:
|
224
|
+
# Create a generation in Langfuse
|
225
|
+
trace = client.trace(name=name, metadata={"model": model, **metadata} if metadata else {}, input=messages)
|
226
|
+
generation = trace.generation(
|
227
|
+
name=name + "_generation",
|
228
|
+
model=model,
|
229
|
+
model_parameters=metadata.get("model_parameters", {}) if metadata else {},
|
230
|
+
input=messages,
|
231
|
+
metadata=metadata or {},
|
232
|
+
)
|
233
|
+
|
234
|
+
# Get response content in appropriate format
|
235
|
+
output_content = response
|
236
|
+
if hasattr(response, "choices") and response.choices:
|
237
|
+
if hasattr(response.choices[0], "message") and hasattr(response.choices[0].message, "content"):
|
238
|
+
output_content = response.choices[0].message.content
|
239
|
+
elif isinstance(response, dict) and "message" in response and "content" in response["message"]:
|
240
|
+
output_content = response["message"]["content"]
|
241
|
+
|
242
|
+
# Update generation and set end time
|
243
|
+
generation.end(output=output_content)
|
244
|
+
trace.update(output=output_content)
|
245
|
+
|
246
|
+
# Flush to ensure all requests are sent
|
247
|
+
client.flush()
|
248
|
+
except Exception as e:
|
249
|
+
logger.warning(f"Failed to track LLM call with LangFuse: {e}")
|
250
|
+
|
251
|
+
|
252
|
+
def get_observability_provider(provider_name: str = "noop") -> ObservabilityProvider:
|
253
|
+
"""Get an observability provider by name.
|
254
|
+
|
255
|
+
Args:
|
256
|
+
provider_name: The name of the provider to get.
|
257
|
+
Options: "noop", "langsmith", "langfuse"
|
258
|
+
|
259
|
+
Returns:
|
260
|
+
An observability provider.
|
261
|
+
|
262
|
+
Raises:
|
263
|
+
ValueError: If the provider_name is invalid.
|
264
|
+
"""
|
265
|
+
if provider_name == "noop":
|
266
|
+
return NoOpObservability()
|
267
|
+
elif provider_name == "langsmith":
|
268
|
+
try:
|
269
|
+
return LangSmithObservability()
|
270
|
+
except ImportError as e:
|
271
|
+
logger.warning(f"Failed to initialize LangSmith: {e}")
|
272
|
+
logger.warning("Falling back to NoOpObservability")
|
273
|
+
return NoOpObservability()
|
274
|
+
elif provider_name == "langfuse":
|
275
|
+
try:
|
276
|
+
return LangFuseObservability()
|
277
|
+
except ImportError as e:
|
278
|
+
logger.warning(f"Failed to initialize LangFuse: {e}")
|
279
|
+
logger.warning("Falling back to NoOpObservability")
|
280
|
+
return NoOpObservability()
|
281
|
+
else:
|
282
|
+
raise ValueError(f"Invalid observability provider: {provider_name}")
|
econagents/llm/ollama.py
ADDED
@@ -0,0 +1,77 @@
|
|
1
|
+
import importlib.util
|
2
|
+
import json
|
3
|
+
import logging
|
4
|
+
from typing import Any, Dict, List, Optional
|
5
|
+
|
6
|
+
from econagents.llm.base import BaseLLM
|
7
|
+
|
8
|
+
logger = logging.getLogger(__name__)
|
9
|
+
|
10
|
+
|
11
|
+
class ChatOllama(BaseLLM):
|
12
|
+
"""A wrapper for LLM queries using Ollama."""
|
13
|
+
|
14
|
+
def __init__(
|
15
|
+
self,
|
16
|
+
model_name: str,
|
17
|
+
host: Optional[str] = None,
|
18
|
+
) -> None:
|
19
|
+
"""Initialize the Ollama LLM interface.
|
20
|
+
|
21
|
+
Args:
|
22
|
+
model_name: The model name to use.
|
23
|
+
host: The host for the Ollama API (e.g., "http://localhost:11434").
|
24
|
+
"""
|
25
|
+
self._check_ollama_available()
|
26
|
+
self.model_name = model_name
|
27
|
+
self.host = host
|
28
|
+
|
29
|
+
def _check_ollama_available(self) -> None:
|
30
|
+
"""Check if Ollama is available."""
|
31
|
+
if not importlib.util.find_spec("ollama"):
|
32
|
+
raise ImportError("Ollama is not installed. Install it with: pip install econagents[ollama]")
|
33
|
+
|
34
|
+
async def get_response(
|
35
|
+
self,
|
36
|
+
messages: List[Dict[str, Any]],
|
37
|
+
tracing_extra: Dict[str, Any],
|
38
|
+
**kwargs: Any,
|
39
|
+
) -> str:
|
40
|
+
"""Get a response from the LLM.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
messages: The messages for the LLM.
|
44
|
+
tracing_extra: The extra tracing information.
|
45
|
+
**kwargs: Additional arguments to pass to the LLM.
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
The response from the LLM.
|
49
|
+
|
50
|
+
Raises:
|
51
|
+
ImportError: If Ollama is not installed.
|
52
|
+
"""
|
53
|
+
try:
|
54
|
+
from ollama import AsyncClient
|
55
|
+
|
56
|
+
client = AsyncClient(host=self.host)
|
57
|
+
|
58
|
+
response = await client.chat(
|
59
|
+
model=self.model_name,
|
60
|
+
messages=messages,
|
61
|
+
**kwargs,
|
62
|
+
)
|
63
|
+
|
64
|
+
# End the LLM run
|
65
|
+
self.observability.track_llm_call(
|
66
|
+
name="ollama_chat_completion",
|
67
|
+
model=self.model_name,
|
68
|
+
messages=messages,
|
69
|
+
response=response,
|
70
|
+
metadata=tracing_extra,
|
71
|
+
)
|
72
|
+
|
73
|
+
return response["message"]["content"]
|
74
|
+
|
75
|
+
except ImportError as e:
|
76
|
+
logger.error(f"Failed to import Ollama: {e}")
|
77
|
+
raise ImportError("Ollama is not installed. Install it with: pip install econagents[ollama]") from e
|
econagents/llm/openai.py
CHANGED
@@ -1,61 +1,77 @@
|
|
1
|
+
import importlib.util
|
2
|
+
import logging
|
1
3
|
from typing import Any, Optional
|
2
4
|
|
3
|
-
from
|
4
|
-
from langsmith.wrappers import wrap_openai
|
5
|
-
from openai import AsyncOpenAI
|
5
|
+
from econagents.llm.base import BaseLLM
|
6
6
|
|
7
|
+
logger = logging.getLogger(__name__)
|
7
8
|
|
8
|
-
|
9
|
-
|
10
|
-
A
|
11
|
-
"""
|
9
|
+
|
10
|
+
class ChatOpenAI(BaseLLM):
|
11
|
+
"""A wrapper for LLM queries using OpenAI."""
|
12
12
|
|
13
13
|
def __init__(
|
14
14
|
self,
|
15
15
|
model_name: str = "gpt-4o",
|
16
16
|
api_key: Optional[str] = None,
|
17
17
|
) -> None:
|
18
|
-
"""Initialize the LLM interface.
|
19
|
-
self.model_name = model_name
|
20
|
-
self.api_key = api_key
|
21
|
-
|
22
|
-
def build_messages(self, system_prompt: str, user_prompt: str):
|
23
|
-
"""Build messages for the LLM.
|
18
|
+
"""Initialize the OpenAI LLM interface.
|
24
19
|
|
25
20
|
Args:
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
Returns:
|
30
|
-
list[dict[str, Any]]: The messages for the LLM.
|
21
|
+
model_name: The model name to use.
|
22
|
+
api_key: The API key to use for authentication.
|
31
23
|
"""
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
24
|
+
self.model_name = model_name
|
25
|
+
self.api_key = api_key
|
26
|
+
self._check_openai_available()
|
27
|
+
|
28
|
+
def _check_openai_available(self) -> None:
|
29
|
+
"""Check if OpenAI is available."""
|
30
|
+
if not importlib.util.find_spec("openai"):
|
31
|
+
raise ImportError("OpenAI is not installed. Install it with: pip install econagents[openai]")
|
36
32
|
|
37
|
-
@traceable
|
38
33
|
async def get_response(
|
39
34
|
self,
|
40
35
|
messages: list[dict[str, Any]],
|
41
36
|
tracing_extra: dict[str, Any],
|
42
37
|
**kwargs: Any,
|
43
|
-
):
|
38
|
+
) -> str:
|
44
39
|
"""Get a response from the LLM.
|
45
40
|
|
46
41
|
Args:
|
47
|
-
messages
|
48
|
-
tracing_extra
|
42
|
+
messages: The messages for the LLM.
|
43
|
+
tracing_extra: The extra tracing information.
|
44
|
+
**kwargs: Additional arguments to pass to the LLM.
|
49
45
|
|
50
46
|
Returns:
|
51
|
-
|
47
|
+
The response from the LLM.
|
48
|
+
|
49
|
+
Raises:
|
50
|
+
ImportError: If OpenAI is not installed.
|
52
51
|
"""
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
52
|
+
try:
|
53
|
+
from openai import AsyncOpenAI
|
54
|
+
|
55
|
+
client = AsyncOpenAI(api_key=self.api_key)
|
56
|
+
|
57
|
+
# Create OpenAI completion
|
58
|
+
response = await client.chat.completions.create(
|
59
|
+
model=self.model_name,
|
60
|
+
messages=messages, # type: ignore
|
61
|
+
response_format={"type": "json_object"},
|
62
|
+
**kwargs,
|
63
|
+
)
|
64
|
+
|
65
|
+
# Track the LLM call using the observability provider
|
66
|
+
self.observability.track_llm_call(
|
67
|
+
name="openai_chat_completion",
|
68
|
+
model=self.model_name,
|
69
|
+
messages=messages,
|
70
|
+
response=response,
|
71
|
+
metadata=tracing_extra,
|
72
|
+
)
|
73
|
+
|
74
|
+
return response.choices[0].message.content
|
75
|
+
except ImportError as e:
|
76
|
+
logger.error(f"Failed to import OpenAI: {e}")
|
77
|
+
raise ImportError("OpenAI is not installed. Install it with: pip install econagents[openai]") from e
|
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) Delft University of Technology
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
@@ -1,24 +1,43 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: econagents
|
3
|
-
Version: 0.0.
|
4
|
-
Summary:
|
5
|
-
License:
|
6
|
-
|
3
|
+
Version: 0.0.2
|
4
|
+
Summary: econagents is a Python library that lets you use LLM agents in economic experiments. The framework connects LLM agents to game servers through WebSockets and provides a flexible architecture for designing, customizing, and running economic simulations.
|
5
|
+
License: MIT License
|
6
|
+
|
7
|
+
Copyright (c) Delft University of Technology
|
8
|
+
|
9
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
10
|
+
of this software and associated documentation files (the "Software"), to deal
|
11
|
+
in the Software without restriction, including without limitation the rights
|
12
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
13
|
+
copies of the Software, and to permit persons to whom the Software is
|
14
|
+
furnished to do so, subject to the following conditions:
|
15
|
+
|
16
|
+
The above copyright notice and this permission notice shall be included in all
|
17
|
+
copies or substantial portions of the Software.
|
18
|
+
|
19
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
20
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
21
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
22
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
23
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
24
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
25
|
+
SOFTWARE.
|
7
26
|
Requires-Python: >=3.10,<3.13
|
8
|
-
Classifier: License ::
|
27
|
+
Classifier: License :: Other/Proprietary License
|
9
28
|
Classifier: Programming Language :: Python :: 3
|
10
29
|
Classifier: Programming Language :: Python :: 3.10
|
11
30
|
Classifier: Programming Language :: Python :: 3.11
|
12
31
|
Classifier: Programming Language :: Python :: 3.12
|
13
|
-
|
14
|
-
|
15
|
-
|
32
|
+
Provides-Extra: all
|
33
|
+
Provides-Extra: default
|
34
|
+
Provides-Extra: langfuse
|
35
|
+
Provides-Extra: langsmith
|
36
|
+
Provides-Extra: ollama
|
37
|
+
Provides-Extra: openai
|
16
38
|
Requires-Dist: pydantic (>=2.10.6,<3.0.0)
|
17
39
|
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
18
|
-
Requires-Dist: typing-extensions (>=4.12.2,<5.0.0)
|
19
40
|
Requires-Dist: websockets (>=15.0,<16.0)
|
20
|
-
Project-URL: Homepage, https://github.com/iwanalabs/econagents
|
21
|
-
Project-URL: Repository, https://github.com/iwanalabs/econagents
|
22
41
|
Description-Content-Type: text/markdown
|
23
42
|
|
24
43
|
<div align="center">
|
@@ -61,13 +80,15 @@ econagents consists of four key components:
|
|
61
80
|
|
62
81
|
## Example Experiments
|
63
82
|
|
64
|
-
The repository includes
|
83
|
+
The repository includes three example games:
|
65
84
|
|
66
85
|
1. **`prisoner`**: An iterated Prisoner's Dilemma game with 5 rounds and 2 LLM agents.
|
67
|
-
2. **`
|
68
|
-
3. **`
|
86
|
+
2. **`ibex_tudelft/harberger`**: A Harberger Tax simulation with LLM agents.
|
87
|
+
3. **`ibex_tudelft/futarchy`**: A Futarchy simulation with LLM agents.
|
69
88
|
|
70
|
-
### Running the Prisoner's Dilemma
|
89
|
+
### Running the Prisoner's Dilemma game
|
90
|
+
|
91
|
+
The simplest game to run is a version of the repeated prisoner's dilemma game that runs on your local machine.
|
71
92
|
|
72
93
|
```shell
|
73
94
|
# Run the server
|
@@ -77,6 +98,8 @@ python examples/server/prisoner/server.py
|
|
77
98
|
python examples/prisoner/run_game.py
|
78
99
|
```
|
79
100
|
|
101
|
+
Note: you still have to set up the connection to the agents.
|
102
|
+
|
80
103
|
## Key Features
|
81
104
|
|
82
105
|
- **Flexible Agent Customization**: Customize agent behavior with Jinja templates or custom Python methods
|
@@ -0,0 +1,25 @@
|
|
1
|
+
econagents/__init__.py,sha256=dpcjNajSFFw_fMaRymVtzGFR8ihi9AT3dQ-Zr6Q81RU,1111
|
2
|
+
econagents/_c_extension.pyi,sha256=evVvDNUCGqyMPrNViPF7QXfGUNNIMbUdY5HemRNQ1_o,113
|
3
|
+
econagents/core/__init__.py,sha256=QZoOp6n5CX1j-Ob6PZgyCNY78vi2kWmd_LVLrJUj1TU,393
|
4
|
+
econagents/core/agent_role.py,sha256=XQmq4oVJnyDRyyr3MU-D29nNsaT8b5iIo18IdyNHG1Y,15337
|
5
|
+
econagents/core/events.py,sha256=hx-Ru_NoSISuN--7ZFC3CIql5hry3AATSnHZJJv3Kds,294
|
6
|
+
econagents/core/game_runner.py,sha256=ILBxtb4ysiT1npqB50GHOICe3XoA9Y-cChUsTWTRkKA,13655
|
7
|
+
econagents/core/logging_mixin.py,sha256=tYsRc5ngW-hzfElrb838KO-9-BGOPyUv2v5LLuJToBE,1421
|
8
|
+
econagents/core/manager/__init__.py,sha256=bDpCQlFcw_E-js575X3Xl6iwZ1uILC18An1vt6oE7S4,284
|
9
|
+
econagents/core/manager/base.py,sha256=IMGkyCrghHlkJnkQLGUVfUSSr7sqZsKHYsmgf4UtGlI,16186
|
10
|
+
econagents/core/manager/phase.py,sha256=OPfY0YbvXxUeYuV6h7Ifnj-GRpkNZ3t9FUc31t-mX5M,19292
|
11
|
+
econagents/core/state/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
+
econagents/core/state/fields.py,sha256=YxVOqdriaHRHoyeXsIB8ZDHygneMJD1OikOyeILK_oA,1854
|
13
|
+
econagents/core/state/game.py,sha256=Ux0s7WhOxu0aFvwgX_LM0Aiho-aa1N3yh1ManwWBRP4,8681
|
14
|
+
econagents/core/state/market.py,sha256=Jg-X9mYH6B3cYOwxzjFDV5PDbCIYxipx2UN4ecfyyDE,3909
|
15
|
+
econagents/core/transport.py,sha256=7eq31nb2KY67RuL5i2kxJrcGtwfcVm5qy0eVj4_xWQw,5063
|
16
|
+
econagents/llm/__init__.py,sha256=J1PqpG3wL41oBAfzp5QaaP2ekwxCM_tiRq4H0OeSv4w,482
|
17
|
+
econagents/llm/base.py,sha256=TFZhot8ffW-2JAOxDxeCG8V5Nn56ulSjCzJwSBopKg0,1726
|
18
|
+
econagents/llm/observability.py,sha256=WSkJ7lZZl2FVpDWpFivudoeOENR_lyLZscUisveAi6k,9467
|
19
|
+
econagents/llm/ollama.py,sha256=0ElnQhyCCAOu6u-TIC2JeNzh8OV6_VZjchFpi1F3HxU,2242
|
20
|
+
econagents/llm/openai.py,sha256=x1L9GS2DXNvPTECXP6DVMwr9fuhcHMZh_2k8ds30i-Y,2395
|
21
|
+
econagents/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
+
econagents-0.0.2.dist-info/LICENSE,sha256=Bd4MAEaMayyDO5BncOl3q0T2b6sWcjABigXLRFxgKIU,1082
|
23
|
+
econagents-0.0.2.dist-info/METADATA,sha256=auotxhLfk21vxzEns0AMcGzxRLPdHS-i122vGuJKIgg,4921
|
24
|
+
econagents-0.0.2.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
25
|
+
econagents-0.0.2.dist-info/RECORD,,
|
@@ -1,21 +0,0 @@
|
|
1
|
-
econagents/__init__.py,sha256=7oAI7W8akjmKDPArgmCjA7SDpWqvI7eLPbGeBRTiEzc,1050
|
2
|
-
econagents/_c_extension.pyi,sha256=evVvDNUCGqyMPrNViPF7QXfGUNNIMbUdY5HemRNQ1_o,113
|
3
|
-
econagents/core/__init__.py,sha256=QZoOp6n5CX1j-Ob6PZgyCNY78vi2kWmd_LVLrJUj1TU,393
|
4
|
-
econagents/core/agent_role.py,sha256=viT7V6U9AmwDplgr4xGIVQvm3vLMtIFpGaMcGKM4oWE,15216
|
5
|
-
econagents/core/events.py,sha256=hx-Ru_NoSISuN--7ZFC3CIql5hry3AATSnHZJJv3Kds,294
|
6
|
-
econagents/core/game_runner.py,sha256=CToHQWIWQ1dwFLWRBxgFGlNecOjtlQUx3YbuS4cfOFQ,12869
|
7
|
-
econagents/core/logging_mixin.py,sha256=tYsRc5ngW-hzfElrb838KO-9-BGOPyUv2v5LLuJToBE,1421
|
8
|
-
econagents/core/manager/__init__.py,sha256=bDpCQlFcw_E-js575X3Xl6iwZ1uILC18An1vt6oE7S4,284
|
9
|
-
econagents/core/manager/base.py,sha256=IMGkyCrghHlkJnkQLGUVfUSSr7sqZsKHYsmgf4UtGlI,16186
|
10
|
-
econagents/core/manager/phase.py,sha256=M7s7jyA99BESXC1V9VNBuAOcvVncGoN5lF1nK8dh0Eo,19632
|
11
|
-
econagents/core/state/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
-
econagents/core/state/fields.py,sha256=YxVOqdriaHRHoyeXsIB8ZDHygneMJD1OikOyeILK_oA,1854
|
13
|
-
econagents/core/state/game.py,sha256=Ux0s7WhOxu0aFvwgX_LM0Aiho-aa1N3yh1ManwWBRP4,8681
|
14
|
-
econagents/core/state/market.py,sha256=Jg-X9mYH6B3cYOwxzjFDV5PDbCIYxipx2UN4ecfyyDE,3909
|
15
|
-
econagents/core/transport.py,sha256=7eq31nb2KY67RuL5i2kxJrcGtwfcVm5qy0eVj4_xWQw,5063
|
16
|
-
econagents/llm/__init__.py,sha256=-tgv6qf77EdceWENIX6pDWXxu2AumhuUCjLiv4FmGKk,82
|
17
|
-
econagents/llm/openai.py,sha256=1w8nHr8Ge2aEzO8lEsxKO3tUgLPEaGhYLwYu12653GY,1782
|
18
|
-
econagents/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
|
-
econagents-0.0.1.dist-info/METADATA,sha256=N9v2yhoCGJ_2JdYq-lZQzoE7QKtMHqDamEDVAQVjAno,3431
|
20
|
-
econagents-0.0.1.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
21
|
-
econagents-0.0.1.dist-info/RECORD,,
|