prediction-market-agent-tooling 0.55.2__tar.gz → 0.56.0.dev113__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/PKG-INFO +2 -1
- prediction_market_agent_tooling-0.56.0.dev113/prediction_market_agent_tooling/tools/caches/db_cache.py +334 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/google.py +3 -2
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/is_invalid.py +2 -2
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/is_predictable.py +3 -3
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/relevant_news_analysis/relevant_news_analysis.py +6 -10
- prediction_market_agent_tooling-0.56.0.dev113/prediction_market_agent_tooling/tools/tavily/tavily_models.py +18 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/tavily/tavily_search.py +12 -44
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/pyproject.toml +2 -1
- prediction_market_agent_tooling-0.55.2/prediction_market_agent_tooling/tools/tavily/tavily_models.py +0 -84
- prediction_market_agent_tooling-0.55.2/prediction_market_agent_tooling/tools/tavily/tavily_storage.py +0 -105
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/LICENSE +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/README.md +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/debuggingcontract.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/depositablewrapper_erc20.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/erc20.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/erc4626.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_agentresultmapping.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_dxdao.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_fpmm.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_fpmm_conditionaltokens.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_fpmm_factory.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_kleros.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_oracle.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_realitio.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/omen_thumbnailmapping.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/abis/proxy.abi.json +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/benchmark/__init__.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/benchmark/agents.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/benchmark/benchmark.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/benchmark/utils.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/config.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/deploy/agent.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/deploy/agent_example.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/deploy/betting_strategy.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/deploy/constants.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/deploy/gcp/deploy.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/deploy/gcp/kubernetes_models.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/deploy/gcp/utils.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/deploy/trade_interval.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/gtypes.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/jobs/__init__.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/jobs/jobs.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/jobs/jobs_models.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/jobs/omen/omen_jobs.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/loggers.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/agent_market.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/categorize.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/data_models.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/manifold/__init__.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/manifold/api.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/manifold/data_models.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/manifold/manifold.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/manifold/utils.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/market_fees.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/markets.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/metaculus/api.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/metaculus/data_models.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/metaculus/metaculus.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/omen/__init__.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/omen/data_models.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/omen/omen.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/omen/omen_contracts.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/omen/omen_resolving.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/omen/omen_subgraph_handler.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/polymarket/api.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/polymarket/data_models.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/polymarket/data_models_web.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/polymarket/polymarket.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/markets/polymarket/utils.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/monitor/markets/manifold.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/monitor/markets/metaculus.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/monitor/markets/omen.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/monitor/markets/polymarket.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/monitor/monitor.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/monitor/monitor_app.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/monitor/monitor_settings.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/py.typed +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/balances.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/betting_strategies/kelly_criterion.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/betting_strategies/market_moving.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/betting_strategies/minimum_bet_to_win.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/betting_strategies/stretch_bet_between.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/betting_strategies/utils.py +0 -0
- /prediction_market_agent_tooling-0.55.2/prediction_market_agent_tooling/tools/cache.py → /prediction_market_agent_tooling-0.56.0.dev113/prediction_market_agent_tooling/tools/caches/inmemory_cache.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/contract.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/costs.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/datetime_utc.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/gnosis_rpc.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/hexbytes_custom.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/httpx_cached_client.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/image_gen/image_gen.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/image_gen/market_thumbnail_gen.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/ipfs/ipfs_handler.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/langfuse_.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/langfuse_client_utils.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/omen/reality_accuracy.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/parallelism.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/relevant_news_analysis/data_models.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/relevant_news_analysis/relevant_news_cache.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/safe.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/singleton.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/streamlit_user_login.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/utils.py +0 -0
- {prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/prediction_market_agent_tooling/tools/web3_utils.py +0 -0
{prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: prediction-market-agent-tooling
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.56.0.dev113
|
4
4
|
Summary: Tools to benchmark, deploy and monitor prediction market agents.
|
5
5
|
Author: Gnosis
|
6
6
|
Requires-Python: >=3.10,<3.12
|
@@ -36,6 +36,7 @@ Requires-Dist: psycopg2-binary (>=2.9.9,<3.0.0)
|
|
36
36
|
Requires-Dist: pydantic (>=2.6.1,<3.0.0)
|
37
37
|
Requires-Dist: pydantic-settings (>=2.4.0,<3.0.0)
|
38
38
|
Requires-Dist: pymongo (>=4.8.0,<5.0.0)
|
39
|
+
Requires-Dist: pytest-postgresql (>=6.1.1,<7.0.0)
|
39
40
|
Requires-Dist: python-dateutil (>=2.9.0.post0,<3.0.0)
|
40
41
|
Requires-Dist: safe-cli (>=1.0.0,<2.0.0)
|
41
42
|
Requires-Dist: safe-eth-py (>=6.0.0b41,<7.0.0)
|
@@ -0,0 +1,334 @@
|
|
1
|
+
import hashlib
|
2
|
+
import inspect
|
3
|
+
import json
|
4
|
+
from datetime import date, timedelta
|
5
|
+
from functools import wraps
|
6
|
+
from typing import (
|
7
|
+
Any,
|
8
|
+
Callable,
|
9
|
+
Sequence,
|
10
|
+
TypeVar,
|
11
|
+
cast,
|
12
|
+
get_args,
|
13
|
+
get_origin,
|
14
|
+
overload,
|
15
|
+
)
|
16
|
+
|
17
|
+
from pydantic import BaseModel
|
18
|
+
from sqlalchemy import Column
|
19
|
+
from sqlalchemy.dialects.postgresql import JSONB
|
20
|
+
from sqlmodel import Field, Session, SQLModel, create_engine, desc, select
|
21
|
+
|
22
|
+
from prediction_market_agent_tooling.config import APIKeys
|
23
|
+
from prediction_market_agent_tooling.loggers import logger
|
24
|
+
from prediction_market_agent_tooling.tools.datetime_utc import DatetimeUTC
|
25
|
+
from prediction_market_agent_tooling.tools.utils import utcnow
|
26
|
+
|
27
|
+
FunctionT = TypeVar("FunctionT", bound=Callable[..., Any])
|
28
|
+
|
29
|
+
|
30
|
+
class FunctionCache(SQLModel, table=True):
|
31
|
+
__tablename__ = "function_cache"
|
32
|
+
id: int | None = Field(default=None, primary_key=True)
|
33
|
+
function_name: str = Field(index=True)
|
34
|
+
# Args are stored to see what was the function called with.
|
35
|
+
args: Any = Field(sa_column=Column(JSONB, nullable=False))
|
36
|
+
# Args hash is stored as a fast look-up option when looking for cache hits.
|
37
|
+
args_hash: str = Field(index=True)
|
38
|
+
result: Any = Field(sa_column=Column(JSONB, nullable=False))
|
39
|
+
created_at: DatetimeUTC = Field(default_factory=utcnow, index=True)
|
40
|
+
|
41
|
+
|
42
|
+
@overload
|
43
|
+
def db_cache(
|
44
|
+
func: None = None,
|
45
|
+
*,
|
46
|
+
max_age: timedelta | None = None,
|
47
|
+
cache_none: bool = True,
|
48
|
+
api_keys: APIKeys | None = None,
|
49
|
+
ignore_args: Sequence[str] | None = None,
|
50
|
+
ignore_arg_types: Sequence[type] | None = None,
|
51
|
+
) -> Callable[[FunctionT], FunctionT]:
|
52
|
+
...
|
53
|
+
|
54
|
+
|
55
|
+
@overload
|
56
|
+
def db_cache(
|
57
|
+
func: FunctionT,
|
58
|
+
*,
|
59
|
+
max_age: timedelta | None = None,
|
60
|
+
cache_none: bool = True,
|
61
|
+
api_keys: APIKeys | None = None,
|
62
|
+
ignore_args: Sequence[str] | None = None,
|
63
|
+
ignore_arg_types: Sequence[type] | None = None,
|
64
|
+
) -> FunctionT:
|
65
|
+
...
|
66
|
+
|
67
|
+
|
68
|
+
def db_cache(
|
69
|
+
func: FunctionT | None = None,
|
70
|
+
*,
|
71
|
+
max_age: timedelta | None = None,
|
72
|
+
cache_none: bool = True,
|
73
|
+
api_keys: APIKeys | None = None,
|
74
|
+
ignore_args: Sequence[str] | None = None,
|
75
|
+
ignore_arg_types: Sequence[type] | None = None,
|
76
|
+
) -> FunctionT | Callable[[FunctionT], FunctionT]:
|
77
|
+
if func is None:
|
78
|
+
# Ugly Pythonic way to support this decorator as `@postgres_cache` but also `@postgres_cache(max_age=timedelta(days=3))`
|
79
|
+
def decorator(func: FunctionT) -> FunctionT:
|
80
|
+
return db_cache(
|
81
|
+
func,
|
82
|
+
max_age=max_age,
|
83
|
+
cache_none=cache_none,
|
84
|
+
api_keys=api_keys,
|
85
|
+
ignore_args=ignore_args,
|
86
|
+
ignore_arg_types=ignore_arg_types,
|
87
|
+
)
|
88
|
+
|
89
|
+
return decorator
|
90
|
+
|
91
|
+
api_keys = api_keys if api_keys is not None else APIKeys()
|
92
|
+
|
93
|
+
sqlalchemy_db_url = api_keys.SQLALCHEMY_DB_URL
|
94
|
+
if sqlalchemy_db_url is None:
|
95
|
+
logger.warning(
|
96
|
+
f"SQLALCHEMY_DB_URL not provided in the environment, skipping function caching."
|
97
|
+
)
|
98
|
+
|
99
|
+
engine = (
|
100
|
+
create_engine(
|
101
|
+
sqlalchemy_db_url.get_secret_value(),
|
102
|
+
# Use custom json serializer and deserializer, because otherwise, for example `datetime` serialization would fail.
|
103
|
+
json_serializer=json_serializer,
|
104
|
+
json_deserializer=json_deserializer,
|
105
|
+
)
|
106
|
+
if sqlalchemy_db_url is not None
|
107
|
+
else None
|
108
|
+
)
|
109
|
+
|
110
|
+
# Create table if it doesn't exist
|
111
|
+
if engine is not None:
|
112
|
+
SQLModel.metadata.create_all(engine)
|
113
|
+
|
114
|
+
@wraps(func)
|
115
|
+
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
116
|
+
if not api_keys.ENABLE_CACHE:
|
117
|
+
return func(*args, **kwargs)
|
118
|
+
|
119
|
+
# Convert *args and **kwargs to a single dictionary, where we have names for arguments passed as args as well.
|
120
|
+
signature = inspect.signature(func)
|
121
|
+
bound_arguments = signature.bind(*args, **kwargs)
|
122
|
+
bound_arguments.apply_defaults()
|
123
|
+
|
124
|
+
# Convert any argument that is Pydantic model into classic dictionary, otherwise it won't be json-serializable.
|
125
|
+
args_dict = convert_pydantic_to_dict(bound_arguments.arguments)
|
126
|
+
|
127
|
+
# Remove `self` or `cls` if present (in case of class' methods)
|
128
|
+
if "self" in args_dict:
|
129
|
+
del args_dict["self"]
|
130
|
+
if "cls" in args_dict:
|
131
|
+
del args_dict["cls"]
|
132
|
+
|
133
|
+
# Remove ignored arguments
|
134
|
+
if ignore_args:
|
135
|
+
for arg in ignore_args:
|
136
|
+
if arg in args_dict:
|
137
|
+
del args_dict[arg]
|
138
|
+
|
139
|
+
# Remove arguments of ignored types
|
140
|
+
if ignore_arg_types:
|
141
|
+
args_dict = {
|
142
|
+
k: v
|
143
|
+
for k, v in args_dict.items()
|
144
|
+
if not isinstance(v, tuple(ignore_arg_types))
|
145
|
+
}
|
146
|
+
|
147
|
+
# Compute a hash of the function arguments used for lookup of cached results
|
148
|
+
arg_string = json.dumps(args_dict, sort_keys=True, default=str)
|
149
|
+
args_hash = hashlib.md5(arg_string.encode()).hexdigest()
|
150
|
+
|
151
|
+
# Get the function name as concat of module and qualname, to not accidentally clash
|
152
|
+
function_name = func.__module__ + "." + func.__qualname__
|
153
|
+
|
154
|
+
# Determine if the function returns or contains Pydantic BaseModel(s)
|
155
|
+
return_type = func.__annotations__.get("return", None)
|
156
|
+
is_pydantic_model = False
|
157
|
+
|
158
|
+
if return_type is not None and contains_pydantic_model(return_type):
|
159
|
+
is_pydantic_model = True
|
160
|
+
|
161
|
+
# If postgres access was specified, try to find a hit
|
162
|
+
if engine is not None:
|
163
|
+
with Session(engine) as session:
|
164
|
+
# Try to get cached result
|
165
|
+
statement = (
|
166
|
+
select(FunctionCache)
|
167
|
+
.where(
|
168
|
+
FunctionCache.function_name == function_name,
|
169
|
+
FunctionCache.args_hash == args_hash,
|
170
|
+
)
|
171
|
+
.order_by(desc(FunctionCache.created_at))
|
172
|
+
)
|
173
|
+
if max_age is not None:
|
174
|
+
cutoff_time = utcnow() - max_age
|
175
|
+
statement = statement.where(FunctionCache.created_at >= cutoff_time)
|
176
|
+
cached_result = session.exec(statement).first()
|
177
|
+
else:
|
178
|
+
cached_result = None
|
179
|
+
|
180
|
+
if cached_result:
|
181
|
+
logger.info(
|
182
|
+
f"Cache hit for {function_name} with args {args_dict} and output {cached_result.result}"
|
183
|
+
)
|
184
|
+
if is_pydantic_model:
|
185
|
+
try:
|
186
|
+
return convert_to_pydantic(return_type, cached_result.result)
|
187
|
+
except ValueError as e:
|
188
|
+
# In case of backward-incompatible pydantic model, just treat it as cache miss, to not error out.
|
189
|
+
logger.warning(
|
190
|
+
f"Can not validate {cached_result=} into {return_type=} because {e=}, treating as cache miss."
|
191
|
+
)
|
192
|
+
cached_result = None
|
193
|
+
else:
|
194
|
+
return cached_result.result
|
195
|
+
|
196
|
+
# On cache miss, compute the result
|
197
|
+
computed_result = func(*args, **kwargs)
|
198
|
+
logger.info(
|
199
|
+
f"Cache miss for {function_name} with args {args_dict}, computed the output {computed_result}"
|
200
|
+
)
|
201
|
+
|
202
|
+
# If postgres access was specified, save it to dB.
|
203
|
+
if engine is not None and (cache_none or computed_result is not None):
|
204
|
+
# Call the original function
|
205
|
+
result_data = (
|
206
|
+
convert_pydantic_to_dict(computed_result)
|
207
|
+
if is_pydantic_model
|
208
|
+
else computed_result
|
209
|
+
)
|
210
|
+
cache_entry = FunctionCache(
|
211
|
+
function_name=function_name,
|
212
|
+
args_hash=args_hash,
|
213
|
+
args=args_dict,
|
214
|
+
result=result_data,
|
215
|
+
created_at=utcnow(),
|
216
|
+
)
|
217
|
+
with Session(engine) as session:
|
218
|
+
logger.info(f"Saving {cache_entry} into database.")
|
219
|
+
session.add(cache_entry)
|
220
|
+
session.commit()
|
221
|
+
|
222
|
+
return computed_result
|
223
|
+
|
224
|
+
return cast(FunctionT, wrapper)
|
225
|
+
|
226
|
+
|
227
|
+
def contains_pydantic_model(tp: Any) -> bool:
|
228
|
+
if tp is None:
|
229
|
+
return False
|
230
|
+
origin = get_origin(tp)
|
231
|
+
if origin is not None:
|
232
|
+
return any(contains_pydantic_model(arg) for arg in get_args(tp))
|
233
|
+
if inspect.isclass(tp):
|
234
|
+
return issubclass(tp, BaseModel)
|
235
|
+
return False
|
236
|
+
|
237
|
+
|
238
|
+
def json_serializer_default_fn(y: Any) -> Any:
|
239
|
+
if isinstance(y, DatetimeUTC):
|
240
|
+
return f"DatetimeUTC::{y.isoformat()}"
|
241
|
+
elif isinstance(y, timedelta):
|
242
|
+
return f"timedelta::{y.total_seconds()}"
|
243
|
+
elif isinstance(y, date):
|
244
|
+
return f"date::{y.isoformat()}"
|
245
|
+
raise TypeError(
|
246
|
+
f"Unsuported type for the default json serialize function, value is {y}."
|
247
|
+
)
|
248
|
+
|
249
|
+
|
250
|
+
def json_serializer(x: Any) -> str:
|
251
|
+
return json.dumps(x, default=json_serializer_default_fn)
|
252
|
+
|
253
|
+
|
254
|
+
def replace_custom_stringified_objects(obj: Any) -> Any:
|
255
|
+
if isinstance(obj, str):
|
256
|
+
if obj.startswith("DatetimeUTC::"):
|
257
|
+
iso_str = obj[len("DatetimeUTC::") :]
|
258
|
+
return DatetimeUTC.to_datetime_utc(iso_str)
|
259
|
+
elif obj.startswith("timedelta::"):
|
260
|
+
total_seconds_str = obj[len("timedelta::") :]
|
261
|
+
return timedelta(seconds=float(total_seconds_str))
|
262
|
+
elif obj.startswith("date::"):
|
263
|
+
iso_str = obj[len("date::") :]
|
264
|
+
return date.fromisoformat(iso_str)
|
265
|
+
else:
|
266
|
+
return obj
|
267
|
+
elif isinstance(obj, dict):
|
268
|
+
return {k: replace_custom_stringified_objects(v) for k, v in obj.items()}
|
269
|
+
elif isinstance(obj, list):
|
270
|
+
return [replace_custom_stringified_objects(item) for item in obj]
|
271
|
+
else:
|
272
|
+
return obj
|
273
|
+
|
274
|
+
|
275
|
+
def json_deserializer(s: str) -> Any:
|
276
|
+
data = json.loads(s)
|
277
|
+
return replace_custom_stringified_objects(data)
|
278
|
+
|
279
|
+
|
280
|
+
def convert_pydantic_to_dict(value: Any) -> Any:
|
281
|
+
if isinstance(value, BaseModel):
|
282
|
+
return value.model_dump()
|
283
|
+
elif isinstance(value, dict):
|
284
|
+
return {k: convert_pydantic_to_dict(v) for k, v in value.items()}
|
285
|
+
elif isinstance(value, (list, tuple)):
|
286
|
+
return type(value)(convert_pydantic_to_dict(v) for v in value)
|
287
|
+
elif isinstance(value, set):
|
288
|
+
return {convert_pydantic_to_dict(v) for v in value}
|
289
|
+
else:
|
290
|
+
return value
|
291
|
+
|
292
|
+
|
293
|
+
def convert_to_pydantic(model: Any, data: Any) -> Any:
|
294
|
+
# Get the origin and arguments of the model type
|
295
|
+
origin = get_origin(model)
|
296
|
+
args = get_args(model)
|
297
|
+
|
298
|
+
# Check if the data is a dictionary
|
299
|
+
if isinstance(data, dict):
|
300
|
+
# If the model has no origin, check if it is a subclass of BaseModel
|
301
|
+
if origin is None:
|
302
|
+
if inspect.isclass(model) and issubclass(model, BaseModel):
|
303
|
+
# Convert the dictionary to a Pydantic model
|
304
|
+
return model(
|
305
|
+
**{
|
306
|
+
k: convert_to_pydantic(getattr(model, k, None), v)
|
307
|
+
for k, v in data.items()
|
308
|
+
}
|
309
|
+
)
|
310
|
+
else:
|
311
|
+
# If not a Pydantic model, return the data as is
|
312
|
+
return data
|
313
|
+
# If the origin is a dictionary, convert keys and values
|
314
|
+
elif origin is dict:
|
315
|
+
key_type, value_type = args
|
316
|
+
return {
|
317
|
+
convert_to_pydantic(key_type, k): convert_to_pydantic(value_type, v)
|
318
|
+
for k, v in data.items()
|
319
|
+
}
|
320
|
+
else:
|
321
|
+
# If the origin is not a dictionary, return the data as is
|
322
|
+
return data
|
323
|
+
# Check if the data is a list
|
324
|
+
elif isinstance(data, list):
|
325
|
+
# If the origin is a list, convert each item
|
326
|
+
if origin is list:
|
327
|
+
item_type = args[0]
|
328
|
+
return [convert_to_pydantic(item_type, item) for item in data]
|
329
|
+
else:
|
330
|
+
# If the origin is not a list, return the data as is
|
331
|
+
return data
|
332
|
+
else:
|
333
|
+
# If the data is neither a dictionary nor a list, return it as is
|
334
|
+
return data
|
@@ -1,11 +1,12 @@
|
|
1
1
|
import typing as t
|
2
|
+
from datetime import timedelta
|
2
3
|
|
3
4
|
import tenacity
|
4
5
|
from googleapiclient.discovery import build
|
5
6
|
|
6
7
|
from prediction_market_agent_tooling.config import APIKeys
|
7
8
|
from prediction_market_agent_tooling.loggers import logger
|
8
|
-
from prediction_market_agent_tooling.tools.
|
9
|
+
from prediction_market_agent_tooling.tools.caches.db_cache import db_cache
|
9
10
|
|
10
11
|
|
11
12
|
@tenacity.retry(
|
@@ -13,7 +14,7 @@ from prediction_market_agent_tooling.tools.cache import persistent_inmemory_cach
|
|
13
14
|
stop=tenacity.stop_after_attempt(3),
|
14
15
|
after=lambda x: logger.debug(f"search_google failed, {x.attempt_number=}."),
|
15
16
|
)
|
16
|
-
@
|
17
|
+
@db_cache(max_age=timedelta(days=1))
|
17
18
|
def search_google(
|
18
19
|
query: str | None = None,
|
19
20
|
num: int = 3,
|
@@ -2,7 +2,7 @@ import tenacity
|
|
2
2
|
|
3
3
|
from prediction_market_agent_tooling.config import APIKeys
|
4
4
|
from prediction_market_agent_tooling.loggers import logger
|
5
|
-
from prediction_market_agent_tooling.tools.
|
5
|
+
from prediction_market_agent_tooling.tools.caches.db_cache import db_cache
|
6
6
|
from prediction_market_agent_tooling.tools.is_predictable import (
|
7
7
|
parse_decision_yes_no_completion,
|
8
8
|
)
|
@@ -54,9 +54,9 @@ Finally, write your final decision, write `decision: ` followed by either "yes i
|
|
54
54
|
"""
|
55
55
|
|
56
56
|
|
57
|
-
@persistent_inmemory_cache
|
58
57
|
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_fixed(1))
|
59
58
|
@observe()
|
59
|
+
@db_cache
|
60
60
|
def is_invalid(
|
61
61
|
question: str,
|
62
62
|
engine: str = "gpt-4o",
|
@@ -2,7 +2,7 @@ import tenacity
|
|
2
2
|
|
3
3
|
from prediction_market_agent_tooling.config import APIKeys
|
4
4
|
from prediction_market_agent_tooling.loggers import logger
|
5
|
-
from prediction_market_agent_tooling.tools.
|
5
|
+
from prediction_market_agent_tooling.tools.caches.db_cache import db_cache
|
6
6
|
from prediction_market_agent_tooling.tools.langfuse_ import (
|
7
7
|
get_langfuse_langchain_config,
|
8
8
|
observe,
|
@@ -76,9 +76,9 @@ Finally, write your final decision, write `decision: ` followed by either "yes i
|
|
76
76
|
"""
|
77
77
|
|
78
78
|
|
79
|
-
@persistent_inmemory_cache
|
80
79
|
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_fixed(1))
|
81
80
|
@observe()
|
81
|
+
@db_cache
|
82
82
|
def is_predictable_binary(
|
83
83
|
question: str,
|
84
84
|
engine: str = "gpt-4-1106-preview",
|
@@ -112,9 +112,9 @@ def is_predictable_binary(
|
|
112
112
|
return parse_decision_yes_no_completion(question, completion)
|
113
113
|
|
114
114
|
|
115
|
-
@persistent_inmemory_cache
|
116
115
|
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_fixed(1))
|
117
116
|
@observe()
|
117
|
+
@db_cache
|
118
118
|
def is_predictable_without_description(
|
119
119
|
question: str,
|
120
120
|
description: str,
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from datetime import
|
1
|
+
from datetime import date, timedelta
|
2
2
|
|
3
3
|
from langchain_core.output_parsers import PydanticOutputParser
|
4
4
|
from langchain_core.prompts import PromptTemplate
|
@@ -20,8 +20,7 @@ from prediction_market_agent_tooling.tools.relevant_news_analysis.relevant_news_
|
|
20
20
|
from prediction_market_agent_tooling.tools.tavily.tavily_search import (
|
21
21
|
get_relevant_news_since,
|
22
22
|
)
|
23
|
-
from prediction_market_agent_tooling.tools.
|
24
|
-
from prediction_market_agent_tooling.tools.utils import check_not_none, utcnow
|
23
|
+
from prediction_market_agent_tooling.tools.utils import check_not_none
|
25
24
|
|
26
25
|
SUMMARISE_RELEVANT_NEWS_PROMPT_TEMPLATE = """
|
27
26
|
You are an expert news analyst, tracking stories that may affect your prediction to the outcome of a particular QUESTION.
|
@@ -55,7 +54,7 @@ For your analysis, you should:
|
|
55
54
|
def analyse_news_relevance(
|
56
55
|
raw_content: str,
|
57
56
|
question: str,
|
58
|
-
date_of_interest:
|
57
|
+
date_of_interest: date,
|
59
58
|
model: str,
|
60
59
|
temperature: float,
|
61
60
|
) -> RelevantNewsAnalysis:
|
@@ -91,19 +90,18 @@ def analyse_news_relevance(
|
|
91
90
|
def get_certified_relevant_news_since(
|
92
91
|
question: str,
|
93
92
|
days_ago: int,
|
94
|
-
tavily_storage: TavilyStorage | None = None,
|
95
93
|
) -> RelevantNews | None:
|
96
94
|
"""
|
97
95
|
Get relevant news since a given date for a given question. Retrieves
|
98
96
|
possibly relevant news from tavily, then checks that it is relevant via
|
99
97
|
an LLM call.
|
100
98
|
"""
|
99
|
+
news_since = date.today() - timedelta(days=days_ago)
|
101
100
|
results = get_relevant_news_since(
|
102
101
|
question=question,
|
103
|
-
|
102
|
+
news_since=news_since,
|
104
103
|
score_threshold=0.0, # Be conservative to avoid missing relevant information
|
105
104
|
max_results=3, # A tradeoff between cost and quality. 3 seems to be a good balance.
|
106
|
-
tavily_storage=tavily_storage,
|
107
105
|
)
|
108
106
|
|
109
107
|
# Sort results by descending 'relevance score' to maximise the chance of
|
@@ -118,7 +116,7 @@ def get_certified_relevant_news_since(
|
|
118
116
|
relevant_news_analysis = analyse_news_relevance(
|
119
117
|
raw_content=check_not_none(result.raw_content),
|
120
118
|
question=question,
|
121
|
-
date_of_interest=
|
119
|
+
date_of_interest=news_since,
|
122
120
|
model="gpt-4o", # 4o-mini isn't good enough, 1o and 1o-mini are too expensive
|
123
121
|
temperature=0.0,
|
124
122
|
)
|
@@ -140,7 +138,6 @@ def get_certified_relevant_news_since_cached(
|
|
140
138
|
question: str,
|
141
139
|
days_ago: int,
|
142
140
|
cache: RelevantNewsResponseCache,
|
143
|
-
tavily_storage: TavilyStorage | None = None,
|
144
141
|
) -> RelevantNews | None:
|
145
142
|
cached = cache.find(question=question, days_ago=days_ago)
|
146
143
|
|
@@ -150,7 +147,6 @@ def get_certified_relevant_news_since_cached(
|
|
150
147
|
relevant_news = get_certified_relevant_news_since(
|
151
148
|
question=question,
|
152
149
|
days_ago=days_ago,
|
153
|
-
tavily_storage=tavily_storage,
|
154
150
|
)
|
155
151
|
cache.save(
|
156
152
|
question=question,
|
@@ -0,0 +1,18 @@
|
|
1
|
+
from pydantic import BaseModel
|
2
|
+
|
3
|
+
|
4
|
+
class TavilyResult(BaseModel):
|
5
|
+
title: str
|
6
|
+
url: str
|
7
|
+
content: str
|
8
|
+
score: float
|
9
|
+
raw_content: str | None
|
10
|
+
|
11
|
+
|
12
|
+
class TavilyResponse(BaseModel):
|
13
|
+
query: str
|
14
|
+
follow_up_questions: str | None = None
|
15
|
+
answer: str
|
16
|
+
images: list[str]
|
17
|
+
results: list[TavilyResult]
|
18
|
+
response_time: float
|
@@ -1,23 +1,25 @@
|
|
1
1
|
import typing as t
|
2
|
+
from datetime import date, timedelta
|
2
3
|
|
3
4
|
import tenacity
|
4
5
|
from tavily import TavilyClient
|
5
6
|
|
6
7
|
from prediction_market_agent_tooling.config import APIKeys
|
8
|
+
from prediction_market_agent_tooling.tools.caches.db_cache import db_cache
|
7
9
|
from prediction_market_agent_tooling.tools.tavily.tavily_models import (
|
8
10
|
TavilyResponse,
|
9
11
|
TavilyResult,
|
10
12
|
)
|
11
|
-
from prediction_market_agent_tooling.tools.tavily.tavily_storage import TavilyStorage
|
12
13
|
|
13
14
|
DEFAULT_SCORE_THRESHOLD = 0.75 # Based on some empirical testing, anything lower wasn't very relevant to the question being asked
|
14
15
|
|
15
16
|
|
17
|
+
@db_cache(max_age=timedelta(days=1), ignore_args=["api_keys"])
|
16
18
|
def tavily_search(
|
17
19
|
query: str,
|
18
20
|
search_depth: t.Literal["basic", "advanced"] = "advanced",
|
19
21
|
topic: t.Literal["general", "news"] = "general",
|
20
|
-
|
22
|
+
news_since: date | None = None,
|
21
23
|
max_results: int = 5,
|
22
24
|
include_domains: t.Sequence[str] | None = None,
|
23
25
|
exclude_domains: t.Sequence[str] | None = None,
|
@@ -26,34 +28,16 @@ def tavily_search(
|
|
26
28
|
include_images: bool = True,
|
27
29
|
use_cache: bool = False,
|
28
30
|
api_keys: APIKeys | None = None,
|
29
|
-
tavily_storage: TavilyStorage | None = None,
|
30
31
|
) -> TavilyResponse:
|
31
32
|
"""
|
32
|
-
Wrapper around Tavily's search method that will save the response to `TavilyResponseCache`, if provided.
|
33
|
-
|
34
33
|
Argument default values are different from the original method, to return everything by default, because it can be handy in the future and it doesn't increase the costs.
|
35
34
|
"""
|
36
|
-
if topic == "news" and
|
37
|
-
raise ValueError("When topic is 'news',
|
38
|
-
if topic == "general" and
|
39
|
-
raise ValueError("When topic is 'general',
|
35
|
+
if topic == "news" and news_since is None:
|
36
|
+
raise ValueError("When topic is 'news', news_since must be provided")
|
37
|
+
if topic == "general" and news_since is not None:
|
38
|
+
raise ValueError("When topic is 'general', news_since must be None")
|
40
39
|
|
41
|
-
if
|
42
|
-
response_parsed := tavily_storage.find(
|
43
|
-
query=query,
|
44
|
-
search_depth=search_depth,
|
45
|
-
topic=topic,
|
46
|
-
max_results=max_results,
|
47
|
-
days=days,
|
48
|
-
include_domains=include_domains,
|
49
|
-
exclude_domains=exclude_domains,
|
50
|
-
include_answer=include_answer,
|
51
|
-
include_raw_content=include_raw_content,
|
52
|
-
include_images=include_images,
|
53
|
-
use_cache=use_cache,
|
54
|
-
)
|
55
|
-
):
|
56
|
-
return response_parsed
|
40
|
+
days = None if news_since is None else (date.today() - news_since).days
|
57
41
|
response = _tavily_search(
|
58
42
|
query=query,
|
59
43
|
search_depth=search_depth,
|
@@ -69,21 +53,7 @@ def tavily_search(
|
|
69
53
|
api_keys=api_keys,
|
70
54
|
)
|
71
55
|
response_parsed = TavilyResponse.model_validate(response)
|
72
|
-
|
73
|
-
tavily_storage.save(
|
74
|
-
query=query,
|
75
|
-
search_depth=search_depth,
|
76
|
-
topic=topic,
|
77
|
-
days=days,
|
78
|
-
max_results=max_results,
|
79
|
-
include_domains=include_domains,
|
80
|
-
exclude_domains=exclude_domains,
|
81
|
-
include_answer=include_answer,
|
82
|
-
include_raw_content=include_raw_content,
|
83
|
-
include_images=include_images,
|
84
|
-
use_cache=use_cache,
|
85
|
-
response=response_parsed,
|
86
|
-
)
|
56
|
+
|
87
57
|
return response_parsed
|
88
58
|
|
89
59
|
|
@@ -131,16 +101,14 @@ def _tavily_search(
|
|
131
101
|
|
132
102
|
def get_relevant_news_since(
|
133
103
|
question: str,
|
134
|
-
|
104
|
+
news_since: date,
|
135
105
|
score_threshold: float = DEFAULT_SCORE_THRESHOLD,
|
136
106
|
max_results: int = 3,
|
137
|
-
tavily_storage: TavilyStorage | None = None,
|
138
107
|
) -> list[TavilyResult]:
|
139
108
|
news = tavily_search(
|
140
109
|
query=question,
|
141
|
-
|
110
|
+
news_since=news_since,
|
142
111
|
max_results=max_results,
|
143
112
|
topic="news",
|
144
|
-
tavily_storage=tavily_storage,
|
145
113
|
)
|
146
114
|
return [r for r in news.results if r.score > score_threshold]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "prediction-market-agent-tooling"
|
3
|
-
version = "0.
|
3
|
+
version = "0.56.0.dev113"
|
4
4
|
description = "Tools to benchmark, deploy and monitor prediction market agents."
|
5
5
|
authors = ["Gnosis"]
|
6
6
|
readme = "README.md"
|
@@ -52,6 +52,7 @@ python-dateutil = "^2.9.0.post0"
|
|
52
52
|
types-python-dateutil = "^2.9.0.20240906"
|
53
53
|
pinatapy-vourhey = "^0.2.0"
|
54
54
|
hishel = "^0.0.31"
|
55
|
+
pytest-postgresql = "^6.1.1"
|
55
56
|
|
56
57
|
[tool.poetry.extras]
|
57
58
|
openai = ["openai"]
|
prediction_market_agent_tooling-0.55.2/prediction_market_agent_tooling/tools/tavily/tavily_models.py
DELETED
@@ -1,84 +0,0 @@
|
|
1
|
-
import typing as t
|
2
|
-
|
3
|
-
from pydantic import BaseModel
|
4
|
-
from sqlalchemy import Column
|
5
|
-
from sqlalchemy.dialects.postgresql import JSONB
|
6
|
-
from sqlmodel import ARRAY, Field, SQLModel, String
|
7
|
-
|
8
|
-
from prediction_market_agent_tooling.tools.utils import DatetimeUTC, utcnow
|
9
|
-
|
10
|
-
|
11
|
-
class TavilyResult(BaseModel):
|
12
|
-
title: str
|
13
|
-
url: str
|
14
|
-
content: str
|
15
|
-
score: float
|
16
|
-
raw_content: str | None
|
17
|
-
|
18
|
-
|
19
|
-
class TavilyResponse(BaseModel):
|
20
|
-
query: str
|
21
|
-
follow_up_questions: str | None = None
|
22
|
-
answer: str
|
23
|
-
images: list[str]
|
24
|
-
results: list[TavilyResult]
|
25
|
-
response_time: float
|
26
|
-
|
27
|
-
|
28
|
-
class TavilyResponseModel(SQLModel, table=True):
|
29
|
-
__tablename__ = "tavily_response"
|
30
|
-
__table_args__ = {"extend_existing": True}
|
31
|
-
id: int | None = Field(None, primary_key=True)
|
32
|
-
agent_id: str = Field(index=True, nullable=False)
|
33
|
-
# Parameters used to execute the search
|
34
|
-
query: str = Field(index=True, nullable=False)
|
35
|
-
search_depth: str
|
36
|
-
topic: str
|
37
|
-
days: int | None = Field(default=None, nullable=True)
|
38
|
-
max_results: int
|
39
|
-
include_domains: list[str] | None = Field(
|
40
|
-
None, sa_column=Column(ARRAY(String), nullable=True)
|
41
|
-
)
|
42
|
-
exclude_domains: list[str] | None = Field(
|
43
|
-
None, sa_column=Column(ARRAY(String), nullable=True)
|
44
|
-
)
|
45
|
-
include_answer: bool
|
46
|
-
include_raw_content: bool
|
47
|
-
include_images: bool
|
48
|
-
use_cache: bool
|
49
|
-
# Datetime at the time of search response and response from the search
|
50
|
-
datetime_: DatetimeUTC = Field(index=True, nullable=False)
|
51
|
-
response: dict[str, t.Any] = Field(sa_column=Column(JSONB, nullable=False))
|
52
|
-
|
53
|
-
@staticmethod
|
54
|
-
def from_model(
|
55
|
-
agent_id: str,
|
56
|
-
query: str,
|
57
|
-
search_depth: t.Literal["basic", "advanced"],
|
58
|
-
topic: t.Literal["general", "news"],
|
59
|
-
days: int | None,
|
60
|
-
max_results: int,
|
61
|
-
include_domains: t.Sequence[str] | None,
|
62
|
-
exclude_domains: t.Sequence[str] | None,
|
63
|
-
include_answer: bool,
|
64
|
-
include_raw_content: bool,
|
65
|
-
include_images: bool,
|
66
|
-
use_cache: bool,
|
67
|
-
response: TavilyResponse,
|
68
|
-
) -> "TavilyResponseModel":
|
69
|
-
return TavilyResponseModel(
|
70
|
-
agent_id=agent_id,
|
71
|
-
query=query,
|
72
|
-
search_depth=search_depth,
|
73
|
-
topic=topic,
|
74
|
-
days=days,
|
75
|
-
max_results=max_results,
|
76
|
-
include_domains=sorted(include_domains) if include_domains else None,
|
77
|
-
exclude_domains=sorted(exclude_domains) if exclude_domains else None,
|
78
|
-
include_answer=include_answer,
|
79
|
-
include_raw_content=include_raw_content,
|
80
|
-
include_images=include_images,
|
81
|
-
use_cache=use_cache,
|
82
|
-
datetime_=utcnow(),
|
83
|
-
response=response.model_dump(),
|
84
|
-
)
|
@@ -1,105 +0,0 @@
|
|
1
|
-
import typing as t
|
2
|
-
from datetime import timedelta
|
3
|
-
|
4
|
-
import tenacity
|
5
|
-
from sqlmodel import Session, SQLModel, create_engine, desc, select
|
6
|
-
|
7
|
-
from prediction_market_agent_tooling.config import APIKeys
|
8
|
-
from prediction_market_agent_tooling.loggers import logger
|
9
|
-
from prediction_market_agent_tooling.tools.tavily.tavily_models import (
|
10
|
-
TavilyResponse,
|
11
|
-
TavilyResponseModel,
|
12
|
-
)
|
13
|
-
from prediction_market_agent_tooling.tools.utils import utcnow
|
14
|
-
|
15
|
-
|
16
|
-
class TavilyStorage:
|
17
|
-
def __init__(self, agent_id: str, sqlalchemy_db_url: str | None = None):
|
18
|
-
self.agent_id = agent_id
|
19
|
-
self.engine = create_engine(
|
20
|
-
sqlalchemy_db_url
|
21
|
-
if sqlalchemy_db_url
|
22
|
-
else APIKeys().sqlalchemy_db_url.get_secret_value()
|
23
|
-
)
|
24
|
-
self._initialize_db()
|
25
|
-
|
26
|
-
def _initialize_db(self) -> None:
|
27
|
-
"""
|
28
|
-
Creates the tables if they don't exist
|
29
|
-
"""
|
30
|
-
|
31
|
-
# trick for making models import mandatory - models must be imported for metadata.create_all to work
|
32
|
-
logger.debug(f"tables being added {TavilyResponseModel}")
|
33
|
-
SQLModel.metadata.create_all(self.engine)
|
34
|
-
|
35
|
-
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_fixed(1))
|
36
|
-
def save(
|
37
|
-
self,
|
38
|
-
query: str,
|
39
|
-
search_depth: t.Literal["basic", "advanced"],
|
40
|
-
topic: t.Literal["general", "news"],
|
41
|
-
days: int | None,
|
42
|
-
max_results: int,
|
43
|
-
include_domains: t.Sequence[str] | None,
|
44
|
-
exclude_domains: t.Sequence[str] | None,
|
45
|
-
include_answer: bool,
|
46
|
-
include_raw_content: bool,
|
47
|
-
include_images: bool,
|
48
|
-
use_cache: bool,
|
49
|
-
response: TavilyResponse,
|
50
|
-
) -> None:
|
51
|
-
db_item = TavilyResponseModel.from_model(
|
52
|
-
agent_id=self.agent_id,
|
53
|
-
query=query,
|
54
|
-
search_depth=search_depth,
|
55
|
-
topic=topic,
|
56
|
-
max_results=max_results,
|
57
|
-
days=days,
|
58
|
-
include_domains=include_domains,
|
59
|
-
exclude_domains=exclude_domains,
|
60
|
-
include_answer=include_answer,
|
61
|
-
include_raw_content=include_raw_content,
|
62
|
-
include_images=include_images,
|
63
|
-
use_cache=use_cache,
|
64
|
-
response=response,
|
65
|
-
)
|
66
|
-
with Session(self.engine) as session:
|
67
|
-
session.add(db_item)
|
68
|
-
session.commit()
|
69
|
-
|
70
|
-
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_fixed(1))
|
71
|
-
def find(
|
72
|
-
self,
|
73
|
-
query: str,
|
74
|
-
search_depth: t.Literal["basic", "advanced"],
|
75
|
-
topic: t.Literal["general", "news"],
|
76
|
-
days: int | None,
|
77
|
-
max_results: int,
|
78
|
-
include_domains: t.Sequence[str] | None,
|
79
|
-
exclude_domains: t.Sequence[str] | None,
|
80
|
-
include_answer: bool,
|
81
|
-
include_raw_content: bool,
|
82
|
-
include_images: bool,
|
83
|
-
use_cache: bool,
|
84
|
-
max_age: timedelta = timedelta(days=1),
|
85
|
-
) -> TavilyResponse | None:
|
86
|
-
with Session(self.engine) as session:
|
87
|
-
sql_query = (
|
88
|
-
select(TavilyResponseModel)
|
89
|
-
.where(TavilyResponseModel.query == query)
|
90
|
-
.where(TavilyResponseModel.search_depth == search_depth)
|
91
|
-
.where(TavilyResponseModel.topic == topic)
|
92
|
-
.where(TavilyResponseModel.days == days)
|
93
|
-
.where(TavilyResponseModel.max_results == max_results)
|
94
|
-
.where(TavilyResponseModel.include_domains == include_domains)
|
95
|
-
.where(TavilyResponseModel.exclude_domains == exclude_domains)
|
96
|
-
.where(TavilyResponseModel.include_answer == include_answer)
|
97
|
-
.where(TavilyResponseModel.include_raw_content == include_raw_content)
|
98
|
-
.where(TavilyResponseModel.include_images == include_images)
|
99
|
-
.where(TavilyResponseModel.use_cache == use_cache)
|
100
|
-
.where(TavilyResponseModel.datetime_ >= utcnow() - max_age)
|
101
|
-
)
|
102
|
-
item = session.exec(
|
103
|
-
sql_query.order_by(desc(TavilyResponseModel.datetime_))
|
104
|
-
).first()
|
105
|
-
return TavilyResponse.model_validate(item.response) if item else None
|
{prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/LICENSE
RENAMED
File without changes
|
{prediction_market_agent_tooling-0.55.2 → prediction_market_agent_tooling-0.56.0.dev113}/README.md
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|