dao-scripts 1.2.2__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. dao_analyzer/cache_scripts/_version.py +2 -2
  2. dao_analyzer/cache_scripts/aragon/runner.py +23 -26
  3. dao_analyzer/cache_scripts/argparser.py +14 -19
  4. dao_analyzer/cache_scripts/common/__init__.py +3 -1
  5. dao_analyzer/cache_scripts/common/api_requester.py +14 -13
  6. dao_analyzer/cache_scripts/common/blockscout.py +11 -13
  7. dao_analyzer/cache_scripts/common/common.py +55 -28
  8. dao_analyzer/cache_scripts/common/cryptocompare.py +4 -4
  9. dao_analyzer/cache_scripts/common/thegraph.py +203 -0
  10. dao_analyzer/cache_scripts/config.py +57 -15
  11. dao_analyzer/cache_scripts/daohaus/runner.py +20 -20
  12. dao_analyzer/cache_scripts/daostack/runner.py +25 -28
  13. dao_analyzer/cache_scripts/endpoints.json +14 -18
  14. dao_analyzer/cache_scripts/logging.py +98 -0
  15. dao_analyzer/cache_scripts/main.py +83 -77
  16. dao_analyzer/cache_scripts/metadata.py +6 -6
  17. dao_scripts-1.3.0-py3.11-nspkg.pth +1 -0
  18. dao_scripts-1.3.0.dist-info/LICENSE +674 -0
  19. {dao_scripts-1.2.2.dist-info → dao_scripts-1.3.0.dist-info}/METADATA +41 -7
  20. dao_scripts-1.3.0.dist-info/RECORD +32 -0
  21. {dao_scripts-1.2.2.dist-info → dao_scripts-1.3.0.dist-info}/WHEEL +1 -1
  22. dao_analyzer/cache_scripts/common/graphql.py +0 -143
  23. dao_scripts-1.2.2-py3.11-nspkg.pth +0 -1
  24. dao_scripts-1.2.2.dist-info/RECORD +0 -30
  25. {dao_scripts-1.2.2.dist-info → dao_scripts-1.3.0.dist-info}/entry_points.txt +0 -0
  26. {dao_scripts-1.2.2.dist-info → dao_scripts-1.3.0.dist-info}/namespace_packages.txt +0 -0
  27. {dao_scripts-1.2.2.dist-info → dao_scripts-1.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,203 @@
1
+ from typing import Optional, Callable, Any
2
+ from abc import ABC, abstractmethod
3
+
4
+ from gql.dsl import DSLField
5
+
6
+ import pandas as pd
7
+
8
+ from .common import ENDPOINTS, Runner, NetworkCollector, UpdatableCollector, GQLRequester, get_graph_url
9
+ from ..metadata import Block
10
+ from .. import config
11
+
12
+ Postprocessor = Callable[[pd.DataFrame], pd.DataFrame]
13
+
14
+ EMPTY_KEY_MSG = """
15
+ Empty The Graph API key. You can obtain one from https://thegraph.com/docs/en/querying/managing-api-keys/
16
+ """
17
+
18
+ def add_where(d: dict[str, Any], **kwargs):
19
+ """
20
+ Adds the values specified in kwargs to the where inside d
21
+ Example: `**add_where(kwargs, deleted=False)`
22
+ """
23
+ if "where" in d:
24
+ d["where"] |= kwargs
25
+ else:
26
+ d["where"] = kwargs
27
+
28
+ return d
29
+
30
+ def partial_query(q: Callable[..., DSLField], w: dict[str, Any]) -> Callable[..., DSLField]:
31
+ def wrapper(**kwargs):
32
+ return q(**add_where(kwargs, **w))
33
+ return wrapper
34
+
35
+
36
+ class TheGraphCollector(NetworkCollector, UpdatableCollector, ABC):
37
+ def __init__(
38
+ self,
39
+ name: str,
40
+ network: str,
41
+ subgraph_id: str,
42
+ runner: Runner,
43
+ index: Optional[str]=None,
44
+ result_key: Optional[str]=None,
45
+ pbar_enabled: bool=True
46
+ ):
47
+ super().__init__(name, runner, network)
48
+
49
+ self._index_col: str = index or 'id'
50
+ self._result_key: str = result_key or name
51
+ self._postprocessors: list[Postprocessor] = []
52
+ self._indexer_block: Optional[Block] = None
53
+ self._requester = GQLRequester(
54
+ endpoint=get_graph_url(subgraph_id),
55
+ pbar_enabled=pbar_enabled,
56
+ )
57
+
58
+ def postprocessor(self, f: Postprocessor):
59
+ self._postprocessors.append(f)
60
+ return f
61
+
62
+ @property
63
+ def endpoint(self):
64
+ return self._endpoint
65
+
66
+ @property
67
+ def schema(self):
68
+ return self._requester.get_schema()
69
+
70
+ @abstractmethod
71
+ def query(self, **kwargs) -> DSLField:
72
+ raise NotImplementedError
73
+
74
+ @property
75
+ def df(self) -> pd.DataFrame:
76
+ if not self.data_path.is_file():
77
+ return pd.DataFrame()
78
+
79
+ df = pd.read_feather(self.data_path)
80
+ if self.network:
81
+ df = df[df['network'] == self.network]
82
+
83
+ return df
84
+
85
+ def transform_to_df(self, data: list[dict[str, Any]], skip_post: bool=False) -> pd.DataFrame:
86
+ df = pd.DataFrame.from_dict(pd.json_normalize(data))
87
+
88
+ # For compatibility reasons we change from . to snake case
89
+ def dotsToSnakeCase(str: str) -> str:
90
+ splitted = str.split('.')
91
+ return splitted[0] + ''.join(x[0].upper()+x[1:] for x in splitted[1:])
92
+
93
+ df = df.rename(columns=dotsToSnakeCase)
94
+ df['network'] = self.network
95
+
96
+ if not skip_post:
97
+ for post in self._postprocessors:
98
+ self.logger.debug(f"Running postprocessor {post.__name__}")
99
+ df = post(df)
100
+ if df is None:
101
+ raise ValueError(f"The postprocessor {post.__name__} returned None")
102
+
103
+ return df
104
+
105
+ def check_deployment_health(self, deployment_id: str) -> bool:
106
+ _requester = GQLRequester(ENDPOINTS['_theGraph']['index-node'])
107
+ ds = _requester.get_schema()
108
+ q = ds.Query.indexingStatuses(subgraphs=[deployment_id]).select(
109
+ ds.SubgraphIndexingStatus.node,
110
+ ds.SubgraphIndexingStatus.entityCount,
111
+ ds.SubgraphIndexingStatus.health,
112
+ ds.SubgraphIndexingStatus.subgraph,
113
+ ds.SubgraphIndexingStatus.synced,
114
+ ds.SubgraphIndexingStatus.fatalError.select(
115
+ ds.SubgraphError.message,
116
+ ),
117
+ ds.SubgraphIndexingStatus.nonFatalErrors.select(
118
+ ds.SubgraphError.message,
119
+ ),
120
+ ds.SubgraphIndexingStatus.chains.select(
121
+ ds.ChainIndexingStatus.network,
122
+ ),
123
+ )
124
+
125
+ r: dict[str, Any] = _requester.request_single(q)[0]
126
+
127
+ no_errors = True
128
+ assert r['subgraph'] == deployment_id, "Got response for other subgraph"
129
+ if r['fatalError']:
130
+ self.logger.error(f'Subgraph {deployment_id} has fatal error: {r["fatalError"]}')
131
+ no_errors = False
132
+
133
+ if r['health'] != 'healthy':
134
+ self.logger.error(f'Subgraph {deployment_id} is not healthy.')
135
+ no_errors = False
136
+
137
+ _network = r['chains'][0]['network']
138
+ if _network != self.network:
139
+ self.logger.error(f'Subgraph {deployment_id} is deployed on incorrect network. Expected {self.network} but got {_network}')
140
+ no_errors = False
141
+
142
+ if r['nonFatalErrors']:
143
+ self.logger.warning(f'Subgraph {deployment_id} has non fatal errors, check subgraph studio')
144
+
145
+ if not r['synced']:
146
+ self.logger.warning(f'Subgraph {deployment_id} is not synced. Check subgraph studio.')
147
+
148
+ return no_errors
149
+
150
+ def check_subgraph_health(self) -> bool:
151
+ ds = self.schema
152
+ q = ds.Query._meta().select(
153
+ ds._Meta_.deployment,
154
+ ds._Meta_.hasIndexingErrors,
155
+ ds._Meta_.block.select(
156
+ ds._Block_.hash,
157
+ ds._Block_.number,
158
+ ds._Block_.timestamp,
159
+ ),
160
+ )
161
+
162
+ r = self._requester.request_single(q)
163
+
164
+ if r['hasIndexingErrors']:
165
+ self.logger.error('Subgraph has indexing errors')
166
+ return False
167
+
168
+ # TODO: Save the block info to use it later in run
169
+ self._indexer_block = Block(r['block'])
170
+
171
+ return self.check_deployment_health(r['deployment'])
172
+
173
+ def verify(self) -> bool:
174
+ if not config.THE_GRAPH_API_KEY:
175
+ self.logger.error('Empty The Graph api key')
176
+ return False
177
+
178
+ # Checking if the queryBuilder doesnt raise any errors
179
+ self.query()
180
+
181
+ return self.check_subgraph_health()
182
+
183
+ def query_cb(self, prev_block: Optional[Block] = None):
184
+ if prev_block:
185
+ return partial_query(self.query, {'_change_block': {'number_gte': prev_block.number}})
186
+ else:
187
+ return self.query
188
+
189
+ def run(self, force=False, block: Optional[Block] = None, prev_block: Optional[Block] = None):
190
+ self.logger.info(f"Running The Graph collector with block: {block}, prev_block: {prev_block}")
191
+ if block and self._indexer_block:
192
+ assert self._indexer_block >= block, "Block number is not indexed yet"
193
+
194
+ if block is None:
195
+ block = Block()
196
+ if prev_block is None or force:
197
+ prev_block = Block()
198
+
199
+ data = self._requester.n_requests(query=self.query_cb(prev_block), block_hash=block.id)
200
+
201
+ # transform to df
202
+ df: pd.DataFrame = self.transform_to_df(data)
203
+ self._update_data(df, force)
@@ -1,28 +1,70 @@
1
+ from typing import Any
2
+
3
+ import re
4
+
1
5
  from pathlib import Path
2
- import os
6
+ from argparse import Namespace
7
+
8
+ from dynaconf import Dynaconf, Validator
9
+
10
+ _units = {
11
+ "B": 1,
12
+ "KB": 10**3, "MB": 10**6, "GB": 10**9, "TB": 10**12,
13
+ "KiB": 2**10, "MiB": 2**20, "GiB": 2**30, "TiB": 2**40,
14
+ }
15
+
16
+ def parse_size(size):
17
+ if isinstance(size, int):
18
+ return size
19
+
20
+ size = size.upper()
21
+ if not re.match(r' ', size):
22
+ size = re.sub(r'([KMGT]?B)', r' \1', size)
23
+ number, unit = [string.strip() for string in size.split()]
24
+ return int(float(number)*_units[unit])
25
+
26
+ # TODO: Add some way of making very Runner capable of definig its config
27
+ # there somehow
28
+ _RUNNER_VALIDATORS: list[Validator] = [
29
+ Validator('daohaus.skip_names', cast=bool, default=False),
30
+
31
+ Validator('daostack.registered_only', cast=bool, default=True),
32
+ ]
3
33
 
4
- from . import __version__
34
+ settings = Dynaconf(
35
+ envvar_prefix="DAOA",
36
+ validate_on_update=True,
37
+ validators=[
38
+ Validator('SKIP_INVALID_BLOCKS', cast=int, default=250),
39
+ Validator('DEFAULT_DATAWAREHOUSE', cast=Path, default=Path("datawarehouse")),
40
+ Validator('LOGGING_BACKUP_COUNT', cast=int, default=3),
41
+ Validator('LOGGING_MAX_SIZE', cast=parse_size, default="100MB"),
42
+ Validator('CC_API_KEY', default=""),
43
+ Validator('THE_GRAPH_API_KEY', default=""),
5
44
 
6
- CACHE_SCRIPTS_VERSION = __version__
45
+ # Can be overriden by argparser
46
+ Validator('run_only_updatable', cast=bool, default=False),
47
+ Validator('DEBUG', cast=bool, default=False),
48
+ Validator('raise_runner_errors', cast=bool, default=False),
49
+ Validator('skip_token_balances', cast=bool, default=False),
7
50
 
8
- # https://letsexchange.io/blog/what-is-block-confirmation-on-ethereum-and-how-many-confirmations-are-required/
9
- # Number of blocks to skip to only consult confirmed blocks
10
- SKIP_INVALID_BLOCKS = 250
11
- DEFAULT_DATAWAREHOUSE = Path(os.getenv('DAOA_DW_PATH', 'datawarehouse'))
51
+ *_RUNNER_VALIDATORS,
52
+ ]
53
+ )
12
54
 
13
- # LOGGING CONFIG
14
- LOGGING_BACKUP_COUNT = os.getenv('DAOA_LOGGING_BACKUP_COUNT', 3)
15
- LOGGING_MAX_MB = os.getenv('DAOA_LOGGING_MAX_MB', 100)
55
+ def _sanitize_argname(name: str) -> str:
56
+ return name.replace(".", "__")
16
57
 
17
- __args = None
58
+ def args2config(args: Namespace):
59
+ argsdict: dict[str, Any] = vars(args)
18
60
 
19
- def populate_args(args):
20
- global __args
21
- __args = args
61
+ all_names = [ (vn,_sanitize_argname(vn)) for v in settings.validators for vn in v.names ]
62
+ settings_update = { vn:(argsdict[an] or settings[vn]) for vn, an in all_names if an in argsdict }
22
63
 
64
+ settings.update(settings_update)
23
65
 
24
66
  def __getattr__(name):
25
67
  """
26
68
  Called when no function has been defined. Defaults to search argsparser.
27
69
  """
28
- return __args.__getattribute__(name)
70
+ return settings[name]
@@ -19,14 +19,14 @@ from .. import config
19
19
  from ..common.common import solve_decimals
20
20
  from ..common.cryptocompare import cc_postprocessor
21
21
 
22
- from ..common import ENDPOINTS, Collector
23
- from ..common.graphql import GraphQLCollector, GraphQLRunner, add_where
22
+ from ..common import ENDPOINTS, Collector, NetworkRunner
23
+ from ..common.thegraph import TheGraphCollector, add_where
24
24
 
25
25
  DATA_ENDPOINT: str = "https://data.daohaus.club/dao/{id}"
26
26
 
27
- class MembersCollector(GraphQLCollector):
27
+ class MembersCollector(TheGraphCollector):
28
28
  def __init__(self, runner, network: str):
29
- super().__init__('members', runner, network=network, endpoint=ENDPOINTS[network]['daohaus'])
29
+ super().__init__('members', network, ENDPOINTS[network]['daohaus'], runner)
30
30
 
31
31
  def query(self, **kwargs) -> DSLField:
32
32
  ds = self.schema
@@ -42,9 +42,9 @@ class MembersCollector(GraphQLCollector):
42
42
  ds.Member.didRagequit
43
43
  )
44
44
 
45
- class MolochesCollector(GraphQLCollector):
45
+ class MolochesCollector(TheGraphCollector):
46
46
  def __init__(self, runner, network: str):
47
- super().__init__('moloches', runner, network=network, endpoint=ENDPOINTS[network]['daohaus'])
47
+ super().__init__('moloches', network, ENDPOINTS[network]['daohaus'], runner)
48
48
 
49
49
  @self.postprocessor
50
50
  def moloch_id(df: pd.DataFrame) -> pd.DataFrame:
@@ -55,7 +55,7 @@ class MolochesCollector(GraphQLCollector):
55
55
  def moloch_names(df: pd.DataFrame) -> pd.DataFrame:
56
56
  df = df.rename(columns={"title":"name"})
57
57
 
58
- if config.skip_daohaus_names:
58
+ if config.daohaus.skip_names:
59
59
  return df
60
60
 
61
61
  cached = requests_cache.CachedSession(self.runner.cache / 'daohaus_names_cache',
@@ -92,9 +92,9 @@ class MolochesCollector(GraphQLCollector):
92
92
  ds.Moloch.totalLoot,
93
93
  )
94
94
 
95
- class ProposalsCollector(GraphQLCollector):
95
+ class ProposalsCollector(TheGraphCollector):
96
96
  def __init__(self, runner, network: str):
97
- super().__init__('proposals', runner, network=network, endpoint=ENDPOINTS[network]["daohaus"])
97
+ super().__init__('proposals', network, ENDPOINTS[network]['daohaus'], runner)
98
98
 
99
99
  def query(self, **kwargs) -> DSLField:
100
100
  ds = self.schema
@@ -123,9 +123,9 @@ class ProposalsCollector(GraphQLCollector):
123
123
  ds.Proposal.details,
124
124
  )
125
125
 
126
- class RageQuitCollector(GraphQLCollector):
126
+ class RageQuitCollector(TheGraphCollector):
127
127
  def __init__(self, runner, network: str):
128
- super().__init__('rageQuits', runner, network=network, endpoint=ENDPOINTS[network]["daohaus"])
128
+ super().__init__('rageQuits', network, ENDPOINTS[network]['daohaus'], runner)
129
129
 
130
130
  def query(self, **kwargs) -> DSLField:
131
131
  ds = self.schema
@@ -138,9 +138,9 @@ class RageQuitCollector(GraphQLCollector):
138
138
  ds.RageQuit.loot
139
139
  )
140
140
 
141
- class TokenBalancesCollector(GraphQLCollector):
141
+ class TokenBalancesCollector(TheGraphCollector):
142
142
  def __init__(self, runner, network: str):
143
- super().__init__('tokenBalances', runner, network=network, endpoint=ENDPOINTS[network]["daohaus"])
143
+ super().__init__('tokenBalances', network, ENDPOINTS[network]['daohaus'], runner)
144
144
 
145
145
  @self.postprocessor
146
146
  def change_col_names(df: pd.DataFrame) -> pd.DataFrame:
@@ -163,8 +163,8 @@ class TokenBalancesCollector(GraphQLCollector):
163
163
 
164
164
  return df
165
165
 
166
- self.postprocessors.append(solve_decimals)
167
- self.postprocessors.append(cc_postprocessor)
166
+ self.postprocessor(solve_decimals)
167
+ self.postprocessor(cc_postprocessor)
168
168
 
169
169
  def query(self, **kwargs) -> DSLField:
170
170
  ds = self.schema
@@ -184,9 +184,9 @@ class TokenBalancesCollector(GraphQLCollector):
184
184
  ds.TokenBalance.tokenBalance
185
185
  )
186
186
 
187
- class VoteCollector(GraphQLCollector):
187
+ class VoteCollector(TheGraphCollector):
188
188
  def __init__(self, runner, network: str):
189
- super().__init__('votes', runner, network=network, endpoint=ENDPOINTS[network]["daohaus"])
189
+ super().__init__('votes', network, ENDPOINTS[network]['daohaus'], runner)
190
190
 
191
191
  @self.postprocessor
192
192
  def changeColumnNames(df: pd.DataFrame) -> pd.DataFrame:
@@ -204,11 +204,11 @@ class VoteCollector(GraphQLCollector):
204
204
  ds.Vote.uintVote
205
205
  )
206
206
 
207
- class DaohausRunner(GraphQLRunner):
207
+ class DaohausRunner(NetworkRunner):
208
208
  name: str = 'daohaus'
209
209
 
210
- def __init__(self):
211
- super().__init__()
210
+ def __init__(self, dw):
211
+ super().__init__(dw)
212
212
  self._collectors: List[Collector] = []
213
213
  for n in self.networks:
214
214
  self._collectors.extend([
@@ -15,8 +15,8 @@ from .. import config
15
15
  from ..common.blockscout import BlockscoutBallancesCollector
16
16
  from ..common.cryptocompare import CCPricesCollector
17
17
 
18
- from ..common import ENDPOINTS, Collector
19
- from ..common.graphql import GraphQLCollector, GraphQLRunner, add_where
18
+ from ..common import ENDPOINTS, Collector, NetworkRunner
19
+ from ..common.thegraph import TheGraphCollector, add_where
20
20
 
21
21
  def _changeProposalColumnNames(df: pd.DataFrame) -> pd.DataFrame:
22
22
  df = df.rename(columns={
@@ -38,9 +38,9 @@ class BalancesCollector(BlockscoutBallancesCollector):
38
38
  def __init__(self, runner, base, network: str):
39
39
  super().__init__(runner, base=base, network=network, addr_key='dao')
40
40
 
41
- class DaosCollector(GraphQLCollector):
41
+ class DaosCollector(TheGraphCollector):
42
42
  def __init__(self, runner, network: str):
43
- super().__init__('daos', runner, network=network, endpoint=ENDPOINTS[network]['daostack'])
43
+ super().__init__('daos', network, ENDPOINTS[network]['daostack'], runner)
44
44
 
45
45
  @self.postprocessor
46
46
  def changeColumnNames(df: pd.DataFrame) -> pd.DataFrame:
@@ -60,9 +60,9 @@ class DaosCollector(GraphQLCollector):
60
60
  def query(self, **kwargs) -> DSLField:
61
61
  ds = self.schema
62
62
 
63
- where = { 'register': 'registered' }
64
- if config.daostack_all:
65
- where.pop('register')
63
+ where = dict()
64
+ if config.daostack.registered_only:
65
+ where['register'] = 'registered'
66
66
 
67
67
  return ds.Query.daos(**add_where(kwargs, **where)).select(
68
68
  ds.DAO.id,
@@ -72,9 +72,9 @@ class DaosCollector(GraphQLCollector):
72
72
  ds.DAO.nativeReputation.select(ds.Rep.id)
73
73
  )
74
74
 
75
- class ProposalsCollector(GraphQLCollector):
75
+ class ProposalsCollector(TheGraphCollector):
76
76
  def __init__(self, runner, network: str, daoC: DaosCollector):
77
- super().__init__('proposals', runner, network=network, endpoint=ENDPOINTS[network]['daostack'])
77
+ super().__init__('proposals', network, ENDPOINTS[network]['daostack'], runner)
78
78
 
79
79
  @self.postprocessor
80
80
  def changeColumnNames(df: pd.DataFrame) -> pd.DataFrame:
@@ -86,7 +86,7 @@ class ProposalsCollector(GraphQLCollector):
86
86
  def deleteColums(df: pd.DataFrame) -> pd.DataFrame:
87
87
  return df.drop(columns=['competition'], errors='ignore')
88
88
 
89
- self.postprocessors.append(_remove_phantom_daos_wr(daoC))
89
+ self.postprocessor(_remove_phantom_daos_wr(daoC))
90
90
 
91
91
  @staticmethod
92
92
  def _stripGenesis(s: str):
@@ -139,12 +139,11 @@ class ProposalsCollector(GraphQLCollector):
139
139
  ds.Proposal.competition.select(ds.CompetitionProposal.id)
140
140
  )
141
141
 
142
- class ReputationHoldersCollector(GraphQLCollector):
142
+ class ReputationHoldersCollector(TheGraphCollector):
143
143
  def __init__(self, runner, network: str, daoC: DaosCollector):
144
- super().__init__('reputationHolders', runner, network=network, endpoint=ENDPOINTS[network]['daostack'])
144
+ super().__init__('reputationHolders', network, ENDPOINTS[network]['daostack'], runner)
145
145
  self.postprocessor(_changeProposalColumnNames)
146
-
147
- self.postprocessors.append(_remove_phantom_daos_wr(daoC))
146
+ self.postprocessor(_remove_phantom_daos_wr(daoC))
148
147
 
149
148
  def query(self, **kwargs) -> DSLField:
150
149
  ds = self.schema
@@ -157,12 +156,11 @@ class ReputationHoldersCollector(GraphQLCollector):
157
156
  ds.ReputationHolder.dao.select(ds.DAO.id)
158
157
  )
159
158
 
160
- class StakesCollector(GraphQLCollector):
159
+ class StakesCollector(TheGraphCollector):
161
160
  def __init__(self, runner, network: str, daoC: DaosCollector):
162
- super().__init__('stakes', runner, network=network, endpoint=ENDPOINTS[network]['daostack'])
161
+ super().__init__('stakes',network, ENDPOINTS[network]['daostack'], runner)
163
162
  self.postprocessor(_changeProposalColumnNames)
164
-
165
- self.postprocessors.append(_remove_phantom_daos_wr(daoC))
163
+ self.postprocessor(_remove_phantom_daos_wr(daoC))
166
164
 
167
165
  def query(self, **kwargs) -> DSLField:
168
166
  ds = self.schema
@@ -179,12 +177,11 @@ class StakesCollector(GraphQLCollector):
179
177
  class TokenPricesCollector(CCPricesCollector):
180
178
  pass
181
179
 
182
- class VotesCollector(GraphQLCollector):
180
+ class VotesCollector(TheGraphCollector):
183
181
  def __init__(self, runner, network: str, daoC: DaosCollector):
184
- super().__init__('votes', runner, network=network, endpoint=ENDPOINTS[network]['daostack'])
182
+ super().__init__('votes', network, ENDPOINTS[network]['daostack'], runner)
185
183
  self.postprocessor(_changeProposalColumnNames)
186
-
187
- self.postprocessors.append(_remove_phantom_daos_wr(daoC))
184
+ self.postprocessor(_remove_phantom_daos_wr(daoC))
188
185
 
189
186
  def query(self, **kwargs) -> DSLField:
190
187
  ds = self.schema
@@ -198,9 +195,9 @@ class VotesCollector(GraphQLCollector):
198
195
  ds.ProposalVote.proposal.select(ds.Proposal.id)
199
196
  )
200
197
 
201
- class CommonRepEventCollector(GraphQLCollector):
198
+ class CommonRepEventCollector(TheGraphCollector):
202
199
  def __init__(self, name, runner, base, network: str):
203
- super().__init__(name, runner, network=network, endpoint=ENDPOINTS[network]['daostack'])
200
+ super().__init__(name, network, ENDPOINTS[network]['daostack'], runner)
204
201
  self.base = base
205
202
 
206
203
  @self.postprocessor
@@ -230,7 +227,7 @@ class CommonRepEventCollector(GraphQLCollector):
230
227
 
231
228
  return df
232
229
 
233
- self.postprocessors.append(_remove_phantom_daos_wr(self.base))
230
+ self.postprocessor(_remove_phantom_daos_wr(self.base))
234
231
 
235
232
  class ReputationMintsCollector(CommonRepEventCollector):
236
233
  def __init__(self, *args, **kwargs):
@@ -262,11 +259,11 @@ class ReputationBurnsCollector(CommonRepEventCollector):
262
259
  ds.ReputationBurn.createdAt
263
260
  )
264
261
 
265
- class DaostackRunner(GraphQLRunner):
262
+ class DaostackRunner(NetworkRunner):
266
263
  name: str = 'daostack'
267
264
 
268
- def __init__(self):
269
- super().__init__()
265
+ def __init__(self, dw):
266
+ super().__init__(dw)
270
267
  self._collectors: List[Collector] = []
271
268
  for n in self.networks:
272
269
  dc = DaosCollector(self, n)
@@ -1,31 +1,27 @@
1
1
  {
2
2
  "mainnet": {
3
- "_blocks": "https://api.thegraph.com/subgraphs/name/blocklytics/ethereum-blocks",
3
+ "_blocks": "9A6bkprqEG2XsZUYJ5B2XXp6ymz9fNcn4tVPxMWDztYC",
4
4
  "blockscout": "https://blockscout.com/eth/mainnet/api",
5
- "daostack": "https://api.thegraph.com/subgraphs/name/grasia/daostack",
6
- "daohaus": "https://api.thegraph.com/subgraphs/name/odyssy-automaton/daohaus",
7
- "aragon": "https://api.thegraph.com/subgraphs/name/aragon/aragon-mainnet",
8
- "aragon_tokens": "https://api.thegraph.com/subgraphs/name/grasia/aragon-tokens-mainnet",
9
- "aragon_voting": "https://api.thegraph.com/subgraphs/name/grasia/aragon-voting-mainnet",
10
- "aragon_finance": "https://api.thegraph.com/subgraphs/name/grasia/aragon-finance-mainnet"
5
+ "daostack": "43HtV3dXRLPX7GKyV7f8GMLhevPXBmnSvpGBRKyfr53k",
6
+ "daohaus": "2d3CDkKyxhpLDZRLWHMCvWp9cCYdWp4Y7g5ecaBmeqad",
7
+ "aragon": "QmduJYwyy3STKaZCEEHtxCQ9LMRYDPdfqYbgobRob6bDp7",
8
+ "aragon_tokens": "EioF9792T18u3oqtt2Hfv5gYgw2Rp2qeqnrURP6deVHg",
9
+ "aragon_voting": "GBsV7wbrW9gTBjJhvBpML5Gcadtaj1fEnapB6NujNaai",
10
+ "aragon_finance": "7pHfNgz8XYHH2Uu9RdqqP8G29BCqi1VRgo3cdLzz59HZ"
11
11
  },
12
12
  "xdai": {
13
- "_blocks": "https://api.thegraph.com/subgraphs/name/1hive/xdai-blocks",
13
+ "_blocks": "D58aXwnRLfosFtRaVJAbAjjvKZ11bEsbdiDLkJJRdSC9",
14
14
  "blockscout": "https://blockscout.com/xdai/mainnet/api",
15
- "daostack": "https://api.thegraph.com/subgraphs/name/grasia/daostack-xdai",
16
- "daohaus": "https://api.thegraph.com/subgraphs/name/odyssy-automaton/daohaus-xdai",
17
- "aragon": "https://api.thegraph.com/subgraphs/name/1hive/aragon-xdai",
18
- "aragon_tokens": "https://api.thegraph.com/subgraphs/name/grasia/aragon-tokens-xdai",
19
- "aragon_voting": "https://api.thegraph.com/subgraphs/name/grasia/aragon-voting-xdai",
20
- "aragon_finance": "https://api.thegraph.com/subgraphs/name/grasia/aragon-finance-xdai"
15
+ "daostack": "Bnqq6ARphxg2rnAvjBrQGxCPNtDGiygv235GbRzZqwfb",
16
+ "daohaus": "2GJY9uxsLQUCvgqSfy6QCLAJgM9P9kdxBUpwNcGs7nPR"
21
17
  },
22
18
  "polygon": {
23
- "_blocks": "https://api.thegraph.com/subgraphs/name/grasia/matic-blocks",
24
- "daohaus": "https://api.thegraph.com/subgraphs/name/odyssy-automaton/daohaus-matic"
19
+ "_blocks": "4mJTujCUWKLd4SPBYE1YXbamfNSNRMyphk8tHzczYWf9",
20
+ "daohaus": "AkzZk4BsvfNRkRDtzz7Bc8TgktTJN4uv9tgfCehhE6fB"
25
21
  },
26
22
  "arbitrum": {
27
- "_blocks": "https://api.thegraph.com/subgraphs/name/grasia/arbitrum-blocks",
28
- "daohaus": "https://api.thegraph.com/subgraphs/name/odyssy-automaton/daohaus-arbitrum"
23
+ "_blocks": "AyY1GDtmAyavGWsdoHQ4vNoBCVWSqfDwHa6jPu5KdHgP",
24
+ "daohaus": "2c41cggebRCMzFiDqoqDwShZtz4xYucsFKbQnEiXUTzY"
29
25
  },
30
26
  "_theGraph": {
31
27
  "index-node": "https://api.thegraph.com/index-node/graphql"