structifyai 1.179.0__py3-none-any.whl → 1.182.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. structify/_base_client.py +5 -2
  2. structify/_compat.py +3 -3
  3. structify/_utils/_json.py +35 -0
  4. structify/_version.py +1 -1
  5. structify/resources/__init__.py +0 -2
  6. structify/resources/connector_catalog/admin.py +76 -0
  7. structify/resources/connectors/connectors.py +357 -1
  8. structify/resources/polars.py +2 -7
  9. structify/resources/sessions.py +83 -0
  10. structify/resources/slack.py +8 -8
  11. structify/resources/teams.py +12 -76
  12. structify/resources/wiki.py +22 -18
  13. structify/resources/workflow.py +7 -1
  14. structify/types/__init__.py +7 -3
  15. structify/types/admin/admin_sandbox.py +0 -2
  16. structify/types/{team_create_link_code_params.py → cell_edit_param.py} +7 -3
  17. structify/types/chat_create_session_params.py +2 -0
  18. structify/types/code_generate_code_params.py +2 -0
  19. structify/types/connector_add_schema_object_params.py +59 -0
  20. structify/types/connector_add_schema_object_response.py +35 -0
  21. structify/types/dashboard_component.py +7 -45
  22. structify/types/dashboard_component_param.py +8 -52
  23. structify/types/dashboard_page.py +3 -3
  24. structify/types/dashboard_page_param.py +3 -3
  25. structify/types/job_event_body.py +4 -0
  26. structify/types/llm_information_store.py +4 -0
  27. structify/types/parquet_edit_param.py +29 -0
  28. structify/types/session_edit_node_output_params.py +14 -0
  29. structify/types/session_edit_node_output_response.py +11 -0
  30. structify/types/slack_event_payload_param.py +2 -2
  31. structify/types/slack_events_params.py +2 -2
  32. structify/types/team_update_params.py +6 -0
  33. structify/types/usage_group_key.py +1 -0
  34. structify/types/user_info.py +4 -0
  35. structify/types/wiki_create_params.py +1 -2
  36. structify/types/wiki_list_response.py +2 -2
  37. structify/types/wiki_page.py +23 -0
  38. structify/types/wiki_page_with_references.py +2 -2
  39. structify/types/wiki_update_params.py +4 -2
  40. structify/types/workflow_run_params.py +3 -0
  41. structify/types/workflow_session_node.py +2 -0
  42. {structifyai-1.179.0.dist-info → structifyai-1.182.0.dist-info}/METADATA +1 -1
  43. {structifyai-1.179.0.dist-info → structifyai-1.182.0.dist-info}/RECORD +45 -42
  44. structify/resources/external.py +0 -99
  45. structify/resources/external_dataframe_proxy.py +0 -290
  46. structify/types/team_wiki_page.py +0 -28
  47. structify/types/teams_link_code_response.py +0 -13
  48. {structifyai-1.179.0.dist-info → structifyai-1.182.0.dist-info}/WHEEL +0 -0
  49. {structifyai-1.179.0.dist-info → structifyai-1.182.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,290 +0,0 @@
1
- # Dynamic proxy for adding DataFrame batch processing to all external endpoints
2
-
3
- from __future__ import annotations
4
-
5
- import re
6
- from typing import Any, Dict, List, Callable, Optional
7
- from dataclasses import field, dataclass
8
- from concurrent.futures import Future, ThreadPoolExecutor
9
-
10
- import polars as pl
11
- from pydantic import BaseModel
12
-
13
- from .external import ExternalResource
14
-
15
- __all__ = ["ServicesProxy"]
16
-
17
-
18
- @dataclass
19
- class EndpointConfig:
20
- """Configuration for how to process API responses into DataFrames"""
21
- expand_path: Optional[str] = None # Path to list to expand (e.g., 'organic_results')
22
- properties: List[str] = field(default_factory=lambda: []) # Properties to extract (relative to expanded items)
23
-
24
-
25
- # Configuration for each endpoint - how to transform responses into clean DataFrames
26
- ENDPOINT_CONFIGS = {
27
- # News endpoints
28
- 'news.top_headlines': EndpointConfig(
29
- expand_path='articles',
30
- properties=['title', 'url', 'content', 'publishedAt', 'source.name', 'author']
31
- ),
32
- 'news.everything': EndpointConfig(
33
- expand_path='articles',
34
- properties=['title', 'url', 'content', 'publishedAt', 'source.name', 'author']
35
- ),
36
- 'news.sources': EndpointConfig(
37
- expand_path='sources',
38
- properties=['id', 'name', 'description', 'url', 'category', 'country', 'language']
39
- ),
40
-
41
- # Search API endpoints
42
- 'search_api.google_search': EndpointConfig(
43
- expand_path='organic_results',
44
- properties=['link', 'title', 'snippet', 'display_link']
45
- ),
46
- 'search_api.google_maps_search': EndpointConfig(
47
- # Direct list response - no expand_path needed
48
- properties=['name', 'address', 'rating', 'place_id', 'types']
49
- ),
50
- 'search_api.google_maps_place_details': EndpointConfig(
51
- properties=['name', 'formatted_address', 'rating', 'user_ratings_total', 'price_level', 'website']
52
- ),
53
- 'search_api.google_maps_place_reviews': EndpointConfig(
54
- expand_path='reviews',
55
- properties=['author_name', 'rating', 'text', 'time', 'relative_time_description']
56
- ),
57
- 'search_api.google_maps_place_photos': EndpointConfig(
58
- expand_path='photos',
59
- properties=['photo_reference', 'height', 'width', 'html_attributions']
60
- ),
61
- 'search_api.google_flights_search': EndpointConfig(
62
- expand_path='flights',
63
- properties=['price', 'departure_time', 'arrival_time', 'airline', 'flight_number', 'duration']
64
- ),
65
- 'search_api.google_flights_calendar': EndpointConfig(
66
- expand_path='calendar_results',
67
- properties=['date', 'price', 'departure_time', 'arrival_time']
68
- ),
69
- 'search_api.google_flights_location_search': EndpointConfig(
70
- expand_path='locations',
71
- properties=['name', 'code', 'city', 'country']
72
- ),
73
- 'search_api.google_scholar_search': EndpointConfig(
74
- expand_path='organic_results',
75
- properties=['title', 'link', 'snippet', 'publication_info', 'citation_count']
76
- ),
77
- 'search_api.google_scholar_author_search': EndpointConfig(
78
- expand_path='authors',
79
- properties=['name', 'affiliation', 'citations', 'h_index', 'i10_index']
80
- ),
81
- 'search_api.google_scholar_citations': EndpointConfig(
82
- expand_path='citations',
83
- properties=['title', 'authors', 'publication', 'year', 'citation_count']
84
- ),
85
- 'search_api.location_search': EndpointConfig(
86
- expand_path='locations',
87
- properties=['name', 'display_name', 'lat', 'lon', 'country', 'state']
88
- ),
89
-
90
- # People endpoints
91
- 'people.people_search': EndpointConfig(
92
- expand_path='people',
93
- properties=['name', 'title', 'company', 'location', 'linkedin_url']
94
- ),
95
- 'people.people_match': EndpointConfig(
96
- expand_path='matches',
97
- properties=['name', 'confidence_score', 'linkedin_url', 'company', 'title']
98
- ),
99
- 'people.companies_search': EndpointConfig(
100
- expand_path='companies',
101
- properties=['name', 'domain', 'industry', 'size', 'linkedin_url', 'description']
102
- ),
103
- 'people.organizations_enrich': EndpointConfig(
104
- properties=['name', 'domain', 'industry', 'employee_count', 'description', 'linkedin_url']
105
- ),
106
- 'people.organization_detail': EndpointConfig(
107
- properties=['name', 'domain', 'industry', 'employee_count', 'description', 'founded']
108
- ),
109
- 'people.organization_job_postings': EndpointConfig(
110
- expand_path='job_postings',
111
- properties=['title', 'company', 'location', 'description', 'posted_date', 'url']
112
- ),
113
- }
114
-
115
-
116
- class EndpointProxy:
117
- """
118
- Proxy for individual service endpoints (e.g., news, people, search_api).
119
-
120
- Intercepts method calls and automatically detects DataFrame inputs,
121
- processing each row as a parallel API call.
122
- """
123
-
124
- def __init__(self, service: Any):
125
- self._service = service
126
- self._client = getattr(service, '_client', None)
127
-
128
- def __getattr__(self, name: str) -> Any:
129
- # Get the original method/attribute
130
- original_attr = getattr(self._service, name)
131
-
132
- # If it's not callable, return as-is
133
- if not callable(original_attr):
134
- return original_attr
135
-
136
- # If it's a special method, return as-is
137
- if name.startswith('_') or name in ['with_raw_response', 'with_streaming_response']:
138
- return original_attr
139
-
140
- # Return a wrapped version that supports DataFrame batch processing
141
- def wrapped_method(*args: Any, **kwargs: Any) -> Any:
142
- # Check if first argument is a DataFrame
143
- if args and isinstance(args[0], pl.DataFrame):
144
- df = args[0]
145
- return self._batch_process_dataframe(name, df)
146
- else:
147
- # Regular method call - delegate to original
148
- return original_attr(*args, **kwargs)
149
-
150
- return wrapped_method
151
-
152
- def _batch_process_dataframe(self, method_name: str, df: pl.DataFrame) -> pl.DataFrame:
153
- """Process DataFrame rows as parallel API calls."""
154
- THREAD_POOL_SIZE = 20
155
-
156
- if df.is_empty():
157
- return df.clear()
158
-
159
- rows = df.to_dicts()
160
-
161
- # Get the original method once
162
- original_method = getattr(self._service, method_name)
163
-
164
- # Execute parallel requests
165
- with ThreadPoolExecutor(max_workers=THREAD_POOL_SIZE) as executor:
166
- futures: List[Future[Any]] = []
167
- for row in rows:
168
- future = executor.submit(self._execute_single_request, original_method, row)
169
- futures.append(future)
170
-
171
- # Process results using endpoint configuration
172
- all_results: List[Dict[str, Any]] = []
173
- for i, future in enumerate(futures):
174
- try:
175
- result = future.result()
176
- processed_rows = self._process_response(method_name, result, rows[i])
177
- all_results.extend(processed_rows)
178
- except Exception as e:
179
- # Add error row with query context
180
- error_row: Dict[str, Any] = {"error": str(e)}
181
- error_row.update({f"query_{k}": v for k, v in rows[i].items()})
182
- all_results.append(error_row)
183
-
184
- return pl.DataFrame(all_results) if all_results else pl.DataFrame()
185
-
186
- def _execute_single_request(self, original_method: Callable[..., Any], payload: Dict[str, Any]) -> Any:
187
- """Execute a single API request."""
188
- # Call the original method with the payload as keyword arguments
189
- return original_method(**payload)
190
-
191
- def _process_response(self, method_name: str, result: BaseModel, original_query_row: Dict[str, Any]) -> List[Dict[str, Any]]:
192
- """Process API response using endpoint configuration to create clean DataFrame rows."""
193
-
194
- # Build endpoint key (e.g., 'search_api.google_search')
195
- # Convert class name to resource name by removing 'Resource' suffix and converting to snake_case
196
- class_name = self._service.__class__.__name__
197
- resource_name = class_name.replace('Resource', '')
198
- # Convert from PascalCase to snake_case
199
- resource_name = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', resource_name)
200
- resource_name = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', resource_name)
201
- resource_name = resource_name.lower()
202
- endpoint_key = f"{resource_name}.{method_name}"
203
-
204
- # Get configuration for this endpoint
205
- config = ENDPOINT_CONFIGS.get(endpoint_key, EndpointConfig())
206
-
207
- # Convert Pydantic model to dict
208
- data: Any = result.model_dump()
209
-
210
- # Determine what to iterate over
211
- items: List[Dict[str, Any]]
212
- if config.expand_path:
213
- # Extract list from the specified path
214
- extracted = self._get_by_path(data, config.expand_path)
215
- if not isinstance(extracted, list):
216
- raise ValueError(f"Expected list at path '{config.expand_path}', got {type(extracted).__name__}")
217
- items = extracted # type: ignore[assignment]
218
- elif isinstance(data, list):
219
- # Direct list response
220
- items = data # type: ignore[assignment]
221
- else:
222
- # Single dict response
223
- items = [data]
224
-
225
- # Extract properties from each item
226
- processed_rows: List[Dict[str, Any]] = []
227
- for item in items:
228
- if config.properties:
229
- # Extract only specified properties
230
- row: Dict[str, Any] = {}
231
- for prop_path in config.properties:
232
- value = self._get_by_path(item, prop_path)
233
- # Use last part of path as column name (e.g., 'source.name' -> 'name')
234
- col_name = prop_path.split('.')[-1]
235
- row[col_name] = value
236
- else:
237
- # No properties specified - use whole item
238
- row = item # type: ignore[assignment]
239
-
240
- # Add original query data with 'query_' prefix
241
- row.update({f"query_{k}": v for k, v in original_query_row.items()})
242
- processed_rows.append(row)
243
-
244
- return processed_rows
245
-
246
- def _get_by_path(self, data: Any, path: str) -> Any:
247
- """Get value from nested dict using dot notation (e.g., 'source.name')."""
248
- if not path:
249
- return data
250
-
251
- current = data
252
- for key in path.split('.'):
253
- if isinstance(current, dict):
254
- current = current.get(key) # type: ignore[union-attr]
255
- else:
256
- return None
257
- if current is None:
258
- return None
259
- return current # type: ignore[return-value]
260
-
261
-
262
-
263
- class ServicesProxy:
264
- """
265
- Proxy for the main ExternalResource that wraps all service endpoints
266
- with DataFrame batch processing capability.
267
- """
268
-
269
- def __init__(self, client: Any) -> None:
270
- self._client = client
271
- self._external_resource = ExternalResource(client)
272
-
273
- @property
274
- def news(self) -> EndpointProxy:
275
- """News API with DataFrame batch processing."""
276
- return EndpointProxy(self._external_resource.news)
277
-
278
- @property
279
- def people(self) -> EndpointProxy:
280
- """People/Apollo API with DataFrame batch processing."""
281
- return EndpointProxy(self._external_resource.people)
282
-
283
- @property
284
- def search_api(self) -> EndpointProxy:
285
- """Search API with DataFrame batch processing."""
286
- return EndpointProxy(self._external_resource.search_api)
287
-
288
- def __getattr__(self, name: str) -> Any:
289
- """Delegate any other attributes to the original external resource."""
290
- return getattr(self._external_resource, name)
@@ -1,28 +0,0 @@
1
- # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
-
3
- from typing import Dict, Optional
4
- from datetime import datetime
5
-
6
- from .._models import BaseModel
7
-
8
- __all__ = ["TeamWikiPage"]
9
-
10
-
11
- class TeamWikiPage(BaseModel):
12
- id: str
13
-
14
- content: Dict[str, object]
15
-
16
- created_at: datetime
17
-
18
- created_by: str
19
-
20
- slug: str
21
-
22
- team_id: str
23
-
24
- title: str
25
-
26
- updated_at: datetime
27
-
28
- deleted_at: Optional[datetime] = None
@@ -1,13 +0,0 @@
1
- # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
-
3
- from datetime import datetime
4
-
5
- from .._models import BaseModel
6
-
7
- __all__ = ["TeamsLinkCodeResponse"]
8
-
9
-
10
- class TeamsLinkCodeResponse(BaseModel):
11
- code: str
12
-
13
- expires_at: datetime