airbyte-agent-slack 0.1.3__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,15 +17,30 @@ from .models import (
17
17
  ChannelPurpose,
18
18
  ChannelsListResponse,
19
19
  ChannelResponse,
20
- Attachment,
21
20
  Reaction,
22
21
  File,
22
+ Attachment,
23
23
  Message,
24
24
  Thread,
25
25
  EditedInfo,
26
26
  BotProfile,
27
27
  MessagesListResponse,
28
28
  ThreadRepliesResponse,
29
+ MessageCreateParams,
30
+ CreatedMessage,
31
+ MessageCreateResponse,
32
+ MessageUpdateParams,
33
+ MessageUpdateResponse,
34
+ ChannelCreateParams,
35
+ ChannelCreateResponse,
36
+ ChannelRenameParams,
37
+ ChannelRenameResponse,
38
+ ChannelTopicParams,
39
+ ChannelTopicResponse,
40
+ ChannelPurposeParams,
41
+ ChannelPurposeResponse,
42
+ ReactionAddParams,
43
+ ReactionAddResponse,
29
44
  UsersListResultMeta,
30
45
  ChannelsListResultMeta,
31
46
  ChannelMessagesListResultMeta,
@@ -43,7 +58,14 @@ from .types import (
43
58
  ChannelsListParams,
44
59
  ChannelsGetParams,
45
60
  ChannelMessagesListParams,
46
- ThreadsListParams
61
+ ThreadsListParams,
62
+ MessagesCreateParams,
63
+ MessagesUpdateParams,
64
+ ChannelsCreateParams,
65
+ ChannelsUpdateParams,
66
+ ChannelTopicsCreateParams,
67
+ ChannelPurposesCreateParams,
68
+ ReactionsCreateParams
47
69
  )
48
70
 
49
71
  __all__ = [
@@ -59,15 +81,30 @@ __all__ = [
59
81
  "ChannelPurpose",
60
82
  "ChannelsListResponse",
61
83
  "ChannelResponse",
62
- "Attachment",
63
84
  "Reaction",
64
85
  "File",
86
+ "Attachment",
65
87
  "Message",
66
88
  "Thread",
67
89
  "EditedInfo",
68
90
  "BotProfile",
69
91
  "MessagesListResponse",
70
92
  "ThreadRepliesResponse",
93
+ "MessageCreateParams",
94
+ "CreatedMessage",
95
+ "MessageCreateResponse",
96
+ "MessageUpdateParams",
97
+ "MessageUpdateResponse",
98
+ "ChannelCreateParams",
99
+ "ChannelCreateResponse",
100
+ "ChannelRenameParams",
101
+ "ChannelRenameResponse",
102
+ "ChannelTopicParams",
103
+ "ChannelTopicResponse",
104
+ "ChannelPurposeParams",
105
+ "ChannelPurposeResponse",
106
+ "ReactionAddParams",
107
+ "ReactionAddResponse",
71
108
  "UsersListResultMeta",
72
109
  "ChannelsListResultMeta",
73
110
  "ChannelMessagesListResultMeta",
@@ -84,4 +121,11 @@ __all__ = [
84
121
  "ChannelsGetParams",
85
122
  "ChannelMessagesListParams",
86
123
  "ThreadsListParams",
124
+ "MessagesCreateParams",
125
+ "MessagesUpdateParams",
126
+ "ChannelsCreateParams",
127
+ "ChannelsUpdateParams",
128
+ "ChannelTopicsCreateParams",
129
+ "ChannelPurposesCreateParams",
130
+ "ReactionsCreateParams",
87
131
  ]
@@ -987,7 +987,9 @@ class LocalExecutor:
987
987
 
988
988
  # Substitute variables from params
989
989
  if "variables" in graphql_config and graphql_config["variables"]:
990
- body["variables"] = self._interpolate_variables(graphql_config["variables"], params, param_defaults)
990
+ variables = self._interpolate_variables(graphql_config["variables"], params, param_defaults)
991
+ # Filter out None values (optional fields not provided) - matches REST _extract_body() behavior
992
+ body["variables"] = {k: v for k, v in variables.items() if v is not None}
991
993
 
992
994
  # Add operation name if specified
993
995
  if "operationName" in graphql_config:
@@ -1222,15 +1224,22 @@ class LocalExecutor:
1222
1224
  def _extract_metadata(
1223
1225
  self,
1224
1226
  response_data: dict[str, Any],
1227
+ response_headers: dict[str, str],
1225
1228
  endpoint: EndpointDefinition,
1226
1229
  ) -> dict[str, Any] | None:
1227
1230
  """Extract metadata from response using meta extractor.
1228
1231
 
1229
- Each field in meta_extractor dict is independently extracted using JSONPath.
1232
+ Each field in meta_extractor dict is independently extracted using JSONPath
1233
+ for body extraction, or special prefixes for header extraction:
1234
+ - @link.{rel}: Extract URL from RFC 5988 Link header by rel type
1235
+ - @header.{name}: Extract raw header value by header name
1236
+ - Otherwise: JSONPath expression for body extraction
1237
+
1230
1238
  Missing or invalid paths result in None for that field (no crash).
1231
1239
 
1232
1240
  Args:
1233
1241
  response_data: Full API response (before record extraction)
1242
+ response_headers: HTTP response headers
1234
1243
  endpoint: Endpoint with optional meta extractor configuration
1235
1244
 
1236
1245
  Returns:
@@ -1241,11 +1250,15 @@ class LocalExecutor:
1241
1250
  Example:
1242
1251
  meta_extractor = {
1243
1252
  "pagination": "$.records",
1244
- "request_id": "$.requestId"
1253
+ "request_id": "$.requestId",
1254
+ "next_page_url": "@link.next",
1255
+ "rate_limit": "@header.X-RateLimit-Remaining"
1245
1256
  }
1246
1257
  Returns: {
1247
1258
  "pagination": {"cursor": "abc", "total": 100},
1248
- "request_id": "xyz123"
1259
+ "request_id": "xyz123",
1260
+ "next_page_url": "https://api.example.com/data?cursor=abc",
1261
+ "rate_limit": "99"
1249
1262
  }
1250
1263
  """
1251
1264
  # Check if endpoint has meta extractor
@@ -1255,26 +1268,96 @@ class LocalExecutor:
1255
1268
  extracted_meta: dict[str, Any] = {}
1256
1269
 
1257
1270
  # Extract each field independently
1258
- for field_name, jsonpath_expr_str in endpoint.meta_extractor.items():
1271
+ for field_name, extractor_expr in endpoint.meta_extractor.items():
1259
1272
  try:
1260
- # Parse and apply JSONPath expression
1261
- jsonpath_expr = parse_jsonpath(jsonpath_expr_str)
1262
- matches = [match.value for match in jsonpath_expr.find(response_data)]
1263
-
1264
- if matches:
1265
- # Return first match (most common case)
1266
- extracted_meta[field_name] = matches[0]
1273
+ if extractor_expr.startswith("@link."):
1274
+ # RFC 5988 Link header extraction
1275
+ rel = extractor_expr[6:]
1276
+ extracted_meta[field_name] = self._extract_link_url(response_headers, rel)
1277
+ elif extractor_expr.startswith("@header."):
1278
+ # Raw header value extraction (case-insensitive lookup)
1279
+ header_name = extractor_expr[8:]
1280
+ extracted_meta[field_name] = self._get_header_value(response_headers, header_name)
1267
1281
  else:
1268
- # Path not found - set to None
1269
- extracted_meta[field_name] = None
1282
+ # JSONPath body extraction
1283
+ jsonpath_expr = parse_jsonpath(extractor_expr)
1284
+ matches = [match.value for match in jsonpath_expr.find(response_data)]
1285
+
1286
+ if matches:
1287
+ # Return first match (most common case)
1288
+ extracted_meta[field_name] = matches[0]
1289
+ else:
1290
+ # Path not found - set to None
1291
+ extracted_meta[field_name] = None
1270
1292
 
1271
1293
  except Exception as e:
1272
1294
  # Log error but continue with other fields
1273
- logging.warning(f"Failed to apply meta extractor for field '{field_name}' with path '{jsonpath_expr_str}': {e}. Setting to None.")
1295
+ logging.warning(f"Failed to apply meta extractor for field '{field_name}' with expression '{extractor_expr}': {e}. Setting to None.")
1274
1296
  extracted_meta[field_name] = None
1275
1297
 
1276
1298
  return extracted_meta
1277
1299
 
1300
+ @staticmethod
1301
+ def _extract_link_url(headers: dict[str, str], rel: str) -> str | None:
1302
+ """Extract URL from RFC 5988 Link header by rel type.
1303
+
1304
+ Parses Link header format: <url>; param1="value1"; rel="next"; param2="value2"
1305
+
1306
+ Supports:
1307
+ - Multiple parameters per link in any order
1308
+ - Both quoted and unquoted rel values
1309
+ - Multiple links separated by commas
1310
+
1311
+ Args:
1312
+ headers: Response headers dict
1313
+ rel: The rel type to extract (e.g., "next", "prev", "first", "last")
1314
+
1315
+ Returns:
1316
+ The URL for the specified rel type, or None if not found
1317
+ """
1318
+ link_header = headers.get("Link") or headers.get("link", "")
1319
+ if not link_header:
1320
+ return None
1321
+
1322
+ for link_segment in re.split(r",(?=\s*<)", link_header):
1323
+ link_segment = link_segment.strip()
1324
+
1325
+ url_match = re.match(r"<([^>]+)>", link_segment)
1326
+ if not url_match:
1327
+ continue
1328
+
1329
+ url = url_match.group(1)
1330
+ params_str = link_segment[url_match.end() :]
1331
+
1332
+ rel_match = re.search(r';\s*rel="?([^";,]+)"?', params_str, re.IGNORECASE)
1333
+ if rel_match and rel_match.group(1).strip() == rel:
1334
+ return url
1335
+
1336
+ return None
1337
+
1338
+ @staticmethod
1339
+ def _get_header_value(headers: dict[str, str], header_name: str) -> str | None:
1340
+ """Get header value with case-insensitive lookup.
1341
+
1342
+ Args:
1343
+ headers: Response headers dict
1344
+ header_name: Header name to look up
1345
+
1346
+ Returns:
1347
+ Header value or None if not found
1348
+ """
1349
+ # Try exact match first
1350
+ if header_name in headers:
1351
+ return headers[header_name]
1352
+
1353
+ # Case-insensitive lookup
1354
+ header_name_lower = header_name.lower()
1355
+ for key, value in headers.items():
1356
+ if key.lower() == header_name_lower:
1357
+ return value
1358
+
1359
+ return None
1360
+
1278
1361
  def _validate_required_body_fields(self, endpoint: Any, params: dict[str, Any], action: Action, entity: str) -> None:
1279
1362
  """Validate that required body fields are present for CREATE/UPDATE operations.
1280
1363
 
@@ -1402,7 +1485,7 @@ class _StandardOperationHandler:
1402
1485
  request_kwargs = self.ctx.determine_request_format(endpoint, body)
1403
1486
 
1404
1487
  # Execute async HTTP request
1405
- response = await self.ctx.http_client.request(
1488
+ response_data, response_headers = await self.ctx.http_client.request(
1406
1489
  method=endpoint.method,
1407
1490
  path=path,
1408
1491
  params=query_params if query_params else None,
@@ -1411,10 +1494,10 @@ class _StandardOperationHandler:
1411
1494
  )
1412
1495
 
1413
1496
  # Extract metadata from original response (before record extraction)
1414
- metadata = self.ctx.executor._extract_metadata(response, endpoint)
1497
+ metadata = self.ctx.executor._extract_metadata(response_data, response_headers, endpoint)
1415
1498
 
1416
1499
  # Extract records if extractor configured
1417
- response = self.ctx.extract_records(response, endpoint)
1500
+ response = self.ctx.extract_records(response_data, endpoint)
1418
1501
 
1419
1502
  # Assume success with 200 status code if no exception raised
1420
1503
  status_code = 200
@@ -1540,7 +1623,7 @@ class _DownloadOperationHandler:
1540
1623
  request_format = self.ctx.determine_request_format(operation, request_body)
1541
1624
  self.ctx.validate_required_body_fields(operation, params, action, entity)
1542
1625
 
1543
- metadata_response = await self.ctx.http_client.request(
1626
+ metadata_response, _ = await self.ctx.http_client.request(
1544
1627
  method=operation.method,
1545
1628
  path=path,
1546
1629
  params=query_params,
@@ -1555,7 +1638,7 @@ class _DownloadOperationHandler:
1555
1638
  )
1556
1639
 
1557
1640
  # Step 3: Stream file from extracted URL
1558
- file_response = await self.ctx.http_client.request(
1641
+ file_response, _ = await self.ctx.http_client.request(
1559
1642
  method="GET",
1560
1643
  path=file_url,
1561
1644
  headers=headers,
@@ -1563,7 +1646,7 @@ class _DownloadOperationHandler:
1563
1646
  )
1564
1647
  else:
1565
1648
  # One-step direct download: stream file directly from endpoint
1566
- file_response = await self.ctx.http_client.request(
1649
+ file_response, _ = await self.ctx.http_client.request(
1567
1650
  method=operation.method,
1568
1651
  path=path,
1569
1652
  params=query_params,
@@ -421,10 +421,14 @@ class HTTPClient:
421
421
  headers: dict[str, str] | None = None,
422
422
  *,
423
423
  stream: bool = False,
424
- ):
424
+ ) -> tuple[dict[str, Any], dict[str, str]]:
425
425
  """Execute a single HTTP request attempt (no retries).
426
426
 
427
427
  This is the core request logic, separated from retry handling.
428
+
429
+ Returns:
430
+ Tuple of (response_data, response_headers) for non-streaming requests.
431
+ For streaming requests, returns (response_object, response_headers).
428
432
  """
429
433
  # Ensure auth credentials are initialized (proactive refresh if needed)
430
434
  await self._ensure_auth_initialized()
@@ -474,8 +478,9 @@ class HTTPClient:
474
478
  request_id=request_id,
475
479
  status_code=status_code,
476
480
  response_body=f"<binary content, {response.headers.get('content-length', 'unknown')} bytes>",
481
+ response_headers=dict(response.headers),
477
482
  )
478
- return response
483
+ return response, dict(response.headers)
479
484
 
480
485
  # Parse response - handle non-JSON responses gracefully
481
486
  content_type = response.headers.get("content-type", "")
@@ -500,8 +505,9 @@ class HTTPClient:
500
505
  request_id=request_id,
501
506
  status_code=status_code,
502
507
  response_body=response_data,
508
+ response_headers=dict(response.headers),
503
509
  )
504
- return response_data
510
+ return response_data, dict(response.headers)
505
511
 
506
512
  except AuthenticationError as e:
507
513
  # Auth error (401, 403) - handle token refresh
@@ -631,7 +637,7 @@ class HTTPClient:
631
637
  *,
632
638
  stream: bool = False,
633
639
  _auth_retry_attempted: bool = False,
634
- ):
640
+ ) -> tuple[dict[str, Any], dict[str, str]]:
635
641
  """Make an async HTTP request with optional streaming and automatic retries.
636
642
 
637
643
  Args:
@@ -644,8 +650,9 @@ class HTTPClient:
644
650
  stream: If True, do not eagerly read the body (useful for downloads)
645
651
 
646
652
  Returns:
647
- - If stream=False: Parsed JSON (dict) or empty dict
648
- - If stream=True: Response object suitable for streaming
653
+ Tuple of (response_data, response_headers):
654
+ - If stream=False: (parsed JSON dict or empty dict, response headers dict)
655
+ - If stream=True: (response object suitable for streaming, response headers dict)
649
656
 
650
657
  Raises:
651
658
  HTTPStatusError: If request fails with 4xx/5xx status after all retries
@@ -134,6 +134,7 @@ class RequestLogger:
134
134
  request_id: str,
135
135
  status_code: int,
136
136
  response_body: Any | None = None,
137
+ response_headers: Dict[str, str] | None = None,
137
138
  ) -> None:
138
139
  """
139
140
  Log a successful HTTP response.
@@ -142,6 +143,7 @@ class RequestLogger:
142
143
  request_id: ID returned from log_request
143
144
  status_code: HTTP status code
144
145
  response_body: Response body
146
+ response_headers: Response headers
145
147
  """
146
148
  if request_id not in self._active_requests:
147
149
  return
@@ -166,6 +168,7 @@ class RequestLogger:
166
168
  body=request_data["body"],
167
169
  response_status=status_code,
168
170
  response_body=serializable_body,
171
+ response_headers=response_headers or {},
169
172
  timing_ms=timing_ms,
170
173
  )
171
174
 
@@ -243,7 +246,13 @@ class NullLogger:
243
246
  """No-op log_request."""
244
247
  return ""
245
248
 
246
- def log_response(self, *args, **kwargs) -> None:
249
+ def log_response(
250
+ self,
251
+ request_id: str,
252
+ status_code: int,
253
+ response_body: Any | None = None,
254
+ response_headers: Dict[str, str] | None = None,
255
+ ) -> None:
247
256
  """No-op log_response."""
248
257
  pass
249
258
 
@@ -31,6 +31,7 @@ class RequestLog(BaseModel):
31
31
  body: Any | None = None
32
32
  response_status: int | None = None
33
33
  response_body: Any | None = None
34
+ response_headers: Dict[str, str] = Field(default_factory=dict)
34
35
  timing_ms: float | None = None
35
36
  error: str | None = None
36
37
 
@@ -486,30 +486,36 @@ def validate_meta_extractor_fields(
486
486
  response_body = spec.captured_response.body
487
487
 
488
488
  # Validate each meta extractor field
489
- for field_name, jsonpath_expr in endpoint.meta_extractor.items():
489
+ for field_name, extractor_expr in endpoint.meta_extractor.items():
490
+ # Skip header-based extractors - they extract from headers, not response body
491
+ # @link.next extracts from RFC 5988 Link header
492
+ # @header.X-Name extracts raw header value
493
+ if extractor_expr.startswith("@link.") or extractor_expr.startswith("@header."):
494
+ continue
495
+
490
496
  # Check 1: Does the JSONPath find data in the actual response?
491
497
  try:
492
- parsed_expr = parse_jsonpath(jsonpath_expr)
498
+ parsed_expr = parse_jsonpath(extractor_expr)
493
499
  matches = [match.value for match in parsed_expr.find(response_body)]
494
500
 
495
501
  if not matches:
496
502
  warnings.append(
497
503
  f"{entity_name}.{action}: x-airbyte-meta-extractor field '{field_name}' "
498
- f"with JSONPath '{jsonpath_expr}' found no matches in cassette response"
504
+ f"with JSONPath '{extractor_expr}' found no matches in cassette response"
499
505
  )
500
506
  except Exception as e:
501
507
  warnings.append(
502
- f"{entity_name}.{action}: x-airbyte-meta-extractor field '{field_name}' has invalid JSONPath '{jsonpath_expr}': {str(e)}"
508
+ f"{entity_name}.{action}: x-airbyte-meta-extractor field '{field_name}' has invalid JSONPath '{extractor_expr}': {str(e)}"
503
509
  )
504
510
 
505
511
  # Check 2: Is this field path declared in the response schema?
506
512
  if endpoint.response_schema:
507
- field_in_schema = _check_field_in_schema(jsonpath_expr, endpoint.response_schema)
513
+ field_in_schema = _check_field_in_schema(extractor_expr, endpoint.response_schema)
508
514
 
509
515
  if not field_in_schema:
510
516
  warnings.append(
511
517
  f"{entity_name}.{action}: x-airbyte-meta-extractor field '{field_name}' "
512
- f"extracts from '{jsonpath_expr}' but this path is not declared in response schema"
518
+ f"extracts from '{extractor_expr}' but this path is not declared in response schema"
513
519
  )
514
520
 
515
521
  except Exception as e: