sfq 0.0.42__tar.gz → 0.0.43__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. {sfq-0.0.42 → sfq-0.0.43}/PKG-INFO +1 -1
  2. {sfq-0.0.42 → sfq-0.0.43}/pyproject.toml +1 -1
  3. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/__init__.py +4 -4
  4. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/http_client.py +1 -1
  5. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/utils.py +68 -0
  6. sfq-0.0.43/tests/test_SFTokenAuth_e2e.py +116 -0
  7. sfq-0.0.43/tests/test_fuzzing.py +659 -0
  8. {sfq-0.0.42 → sfq-0.0.43}/uv.lock +1 -1
  9. sfq-0.0.42/tests/test_SFTokenAuth_e2e.py +0 -58
  10. {sfq-0.0.42 → sfq-0.0.43}/.github/workflows/publish.yml +0 -0
  11. {sfq-0.0.42 → sfq-0.0.43}/.gitignore +0 -0
  12. {sfq-0.0.42 → sfq-0.0.43}/.python-version +0 -0
  13. {sfq-0.0.42 → sfq-0.0.43}/README.md +0 -0
  14. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/_cometd.py +0 -0
  15. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/auth.py +0 -0
  16. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/crud.py +0 -0
  17. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/debug_cleanup.py +0 -0
  18. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/exceptions.py +0 -0
  19. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/platform_events.py +0 -0
  20. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/py.typed +0 -0
  21. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/query.py +0 -0
  22. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/soap.py +0 -0
  23. {sfq-0.0.42 → sfq-0.0.43}/src/sfq/timeout_detector.py +0 -0
  24. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_complex_nested.html +0 -0
  25. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_complex_nested_styled.html +0 -0
  26. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_empty_list.html +0 -0
  27. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_empty_list_styled.html +0 -0
  28. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_int_float_bool.html +0 -0
  29. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_int_float_bool_styled.html +0 -0
  30. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_list_value.html +0 -0
  31. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_list_value_styled.html +0 -0
  32. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_multiple_dicts.html +0 -0
  33. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_multiple_dicts_styled.html +0 -0
  34. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_nested_dict.html +0 -0
  35. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_nested_dict_styled.html +0 -0
  36. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_none_value.html +0 -0
  37. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_none_value_styled.html +0 -0
  38. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_other_types.html +0 -0
  39. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_other_types_styled.html +0 -0
  40. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_sample_report.html +0 -0
  41. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_sample_report_styled.html +0 -0
  42. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_single_flat_dict.html +0 -0
  43. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_single_flat_dict_styled.html +0 -0
  44. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_typecastable_keys_bool.html +0 -0
  45. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_typecastable_keys_bool_styled.html +0 -0
  46. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_typecastable_keys_float.html +0 -0
  47. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_typecastable_keys_float_styled.html +0 -0
  48. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_typecastable_keys_int.html +0 -0
  49. {sfq-0.0.42 → sfq-0.0.43}/tests/html/test_typecastable_keys_int_styled.html +0 -0
  50. {sfq-0.0.42 → sfq-0.0.43}/tests/test_auth.py +0 -0
  51. {sfq-0.0.42 → sfq-0.0.43}/tests/test_cdelete.py +0 -0
  52. {sfq-0.0.42 → sfq-0.0.43}/tests/test_compatibility.py +0 -0
  53. {sfq-0.0.42 → sfq-0.0.43}/tests/test_cquery.py +0 -0
  54. {sfq-0.0.42 → sfq-0.0.43}/tests/test_create.py +0 -0
  55. {sfq-0.0.42 → sfq-0.0.43}/tests/test_crud.py +0 -0
  56. {sfq-0.0.42 → sfq-0.0.43}/tests/test_crud_e2e.py +0 -0
  57. {sfq-0.0.42 → sfq-0.0.43}/tests/test_cupdate.py +0 -0
  58. {sfq-0.0.42 → sfq-0.0.43}/tests/test_debug_cleanup_e2e.py +0 -0
  59. {sfq-0.0.42 → sfq-0.0.43}/tests/test_debug_cleanup_unit.py +0 -0
  60. {sfq-0.0.42 → sfq-0.0.43}/tests/test_http_client.py +0 -0
  61. {sfq-0.0.42 → sfq-0.0.43}/tests/test_http_client_retry.py +0 -0
  62. {sfq-0.0.42 → sfq-0.0.43}/tests/test_limits_api.py +0 -0
  63. {sfq-0.0.42 → sfq-0.0.43}/tests/test_log_trace_redact.py +0 -0
  64. {sfq-0.0.42 → sfq-0.0.43}/tests/test_open_frontdoor.py +0 -0
  65. {sfq-0.0.42 → sfq-0.0.43}/tests/test_platform_events_e2e.py +0 -0
  66. {sfq-0.0.42 → sfq-0.0.43}/tests/test_publish_pe_baseline.py +0 -0
  67. {sfq-0.0.42 → sfq-0.0.43}/tests/test_query.py +0 -0
  68. {sfq-0.0.42 → sfq-0.0.43}/tests/test_query_client.py +0 -0
  69. {sfq-0.0.42 → sfq-0.0.43}/tests/test_query_client_timeout_integration.py +0 -0
  70. {sfq-0.0.42 → sfq-0.0.43}/tests/test_query_e2e.py +0 -0
  71. {sfq-0.0.42 → sfq-0.0.43}/tests/test_query_integration.py +0 -0
  72. {sfq-0.0.42 → sfq-0.0.43}/tests/test_records_to_html.py +0 -0
  73. {sfq-0.0.42 → sfq-0.0.43}/tests/test_soap.py +0 -0
  74. {sfq-0.0.42 → sfq-0.0.43}/tests/test_soap_batch_operation.py +0 -0
  75. {sfq-0.0.42 → sfq-0.0.43}/tests/test_static_resources.py +0 -0
  76. {sfq-0.0.42 → sfq-0.0.43}/tests/test_timeout_detector.py +0 -0
  77. {sfq-0.0.42 → sfq-0.0.43}/tests/test_timeout_edge_cases.py +0 -0
  78. {sfq-0.0.42 → sfq-0.0.43}/tests/test_timeout_scenarios_comprehensive.py +0 -0
  79. {sfq-0.0.42 → sfq-0.0.43}/tests/test_timeout_scenarios_summary.py +0 -0
  80. {sfq-0.0.42 → sfq-0.0.43}/tests/test_utils.py +0 -0
  81. {sfq-0.0.42 → sfq-0.0.43}/tests/test_utils_html_table.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sfq
3
- Version: 0.0.42
3
+ Version: 0.0.43
4
4
  Summary: Python wrapper for the Salesforce's Query API.
5
5
  Author-email: David Moruzzi <sfq.pypi@dmoruzi.com>
6
6
  Keywords: salesforce,salesforce query
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "sfq"
3
- version = "0.0.42"
3
+ version = "0.0.43"
4
4
  description = "Python wrapper for the Salesforce's Query API."
5
5
  readme = "README.md"
6
6
  authors = [{ name = "David Moruzzi", email = "sfq.pypi@dmoruzi.com" }]
@@ -52,7 +52,7 @@ __all__ = [
52
52
  "PlatformEventsClient",
53
53
  ]
54
54
 
55
- __version__ = "0.0.42"
55
+ __version__ = "0.0.43"
56
56
  """
57
57
  ### `__version__`
58
58
 
@@ -70,7 +70,7 @@ class _SFTokenAuth:
70
70
  access_token: str,
71
71
  api_version: str = "v64.0",
72
72
  token_endpoint: str = "/services/oauth2/token",
73
- user_agent: str = "sfq/0.0.42",
73
+ user_agent: str = "sfq/0.0.43",
74
74
  sforce_client: str = "_auto",
75
75
  proxy: str = "_auto",
76
76
  ) -> None:
@@ -114,7 +114,7 @@ class SFAuth:
114
114
  access_token: Optional[str] = None,
115
115
  token_expiration_time: Optional[float] = None,
116
116
  token_lifetime: int = 15 * 60,
117
- user_agent: str = "sfq/0.0.42",
117
+ user_agent: str = "sfq/0.0.43",
118
118
  sforce_client: str = "_auto",
119
119
  proxy: str = "_auto",
120
120
  ) -> None:
@@ -185,7 +185,7 @@ class SFAuth:
185
185
  )
186
186
 
187
187
  # Store version information
188
- self.__version__ = "0.0.42"
188
+ self.__version__ = "0.0.43"
189
189
  """
190
190
  ### `__version__`
191
191
 
@@ -29,7 +29,7 @@ class HTTPClient:
29
29
  def __init__(
30
30
  self,
31
31
  auth_manager: AuthManager,
32
- user_agent: str = "sfq/0.0.42",
32
+ user_agent: str = "sfq/0.0.43",
33
33
  sforce_client: str = "_auto",
34
34
  high_api_usage_threshold: int = 80,
35
35
  ) -> None:
@@ -6,6 +6,8 @@ used throughout the SFQ library, including the custom TRACE logging level and
6
6
  sensitive data redaction functionality.
7
7
  """
8
8
 
9
+ import base64
10
+ import hashlib
9
11
  import json
10
12
  import logging
11
13
  import re
@@ -374,3 +376,69 @@ def records_to_html_table(
374
376
  normalized_data.append(normalized_row)
375
377
 
376
378
  return dicts_to_html_table(normalized_data, styled=styled)
379
+
380
+
381
+ def fuzz(text: str, key: str, prefix_len: int = 4, suffix_len: int = 4) -> str:
382
+ """Lightweight XOR-based obfuscation with variable hash prefix/suffix (no separators).
383
+
384
+ Args:
385
+ text: The text to obfuscate
386
+ key: The key for XOR operation
387
+ prefix_len: Length of the MD5 hash prefix (default: 4)
388
+ suffix_len: Length of the SHA1 hash suffix (default: 4)
389
+
390
+ Returns:
391
+ Base64 encoded obfuscated string
392
+ """
393
+
394
+ prefix = hashlib.md5(text.encode()).hexdigest()[:prefix_len]
395
+ suffix = hashlib.sha1(text.encode()).hexdigest()[-suffix_len:] if suffix_len > 0 else ""
396
+
397
+ if not key:
398
+ combined = prefix + text + suffix
399
+ else:
400
+ fuzzed_chars = [
401
+ chr(ord(char) ^ ord(key[i % len(key)])) for i, char in enumerate(text)
402
+ ]
403
+ combined = prefix + ''.join(fuzzed_chars) + suffix
404
+
405
+ encoded = base64.b64encode(combined.encode("utf-8")).decode("utf-8")
406
+ return encoded
407
+
408
+
409
+ def defuzz(encoded_text: str, key: str, prefix_len: int = 4, suffix_len: int = 4) -> str:
410
+ """Reverse the fuzz transformation (no separators).
411
+
412
+ Args:
413
+ encoded_text: The base64 encoded obfuscated text
414
+ key: The key used for original XOR operation
415
+ prefix_len: Length of the MD5 hash prefix (must match encoding)
416
+ suffix_len: Length of the SHA1 hash suffix (must match encoding)
417
+
418
+ Returns:
419
+ The original decoded text
420
+
421
+ Raises:
422
+ ValueError: If encoded text format is invalid or corrupted
423
+ """
424
+
425
+ decoded = base64.b64decode(encoded_text.encode("utf-8")).decode("utf-8")
426
+
427
+ if len(decoded) < prefix_len + suffix_len:
428
+ raise ValueError("Invalid encoded text format or corrupted data.")
429
+
430
+ prefix = decoded[:prefix_len]
431
+ suffix = decoded[-suffix_len:] if suffix_len > 0 else ""
432
+ body = decoded[prefix_len:-suffix_len] if suffix_len > 0 else decoded[prefix_len:]
433
+
434
+ if len(prefix) != prefix_len or len(suffix) != suffix_len:
435
+ raise ValueError("Prefix/suffix length mismatch or corrupted data.")
436
+
437
+ if not key:
438
+ return body
439
+
440
+ defuzzed_chars = [
441
+ chr(ord(char) ^ ord(key[i % len(key)])) for i, char in enumerate(body)
442
+ ]
443
+
444
+ return ''.join(defuzzed_chars)
@@ -0,0 +1,116 @@
1
+ """
2
+ End-to-end tests for the SFTokenAuth module.
3
+
4
+ These tests run against a real Salesforce instance using environment variables
5
+ to ensure the SFTokenAuth functionality works correctly in practice.
6
+ """
7
+
8
+ import os
9
+ from typing import Dict, Any
10
+
11
+ import pytest
12
+
13
+ from sfq import SFAuth, _SFTokenAuth
14
+
15
+
16
+ # Environment variable names for Salesforce authentication
17
+ SF_ENV_VARS = {
18
+ "INSTANCE_URL": "SF_INSTANCE_URL",
19
+ "CLIENT_ID": "SF_CLIENT_ID",
20
+ "CLIENT_SECRET": "SF_CLIENT_SECRET",
21
+ "REFRESH_TOKEN": "SF_REFRESH_TOKEN"
22
+ }
23
+
24
+
25
+ def _validate_required_env_vars() -> None:
26
+ """Validate that all required environment variables are present."""
27
+ missing_vars = [
28
+ env_var for env_var in SF_ENV_VARS.values()
29
+ if not os.getenv(env_var)
30
+ ]
31
+
32
+ if missing_vars:
33
+ pytest.fail(f"Missing required environment variables: {', '.join(missing_vars)}")
34
+
35
+
36
+ def _get_auth_credentials() -> Dict[str, str]:
37
+ """Get authentication credentials from environment variables."""
38
+ _validate_required_env_vars()
39
+
40
+ return {
41
+ "instance_url": os.getenv(SF_ENV_VARS["INSTANCE_URL"], ""),
42
+ "client_id": os.getenv(SF_ENV_VARS["CLIENT_ID"], ""),
43
+ "client_secret": os.getenv(SF_ENV_VARS["CLIENT_SECRET"], "").strip(),
44
+ "refresh_token": os.getenv(SF_ENV_VARS["REFRESH_TOKEN"], ""),
45
+ }
46
+
47
+
48
+ @pytest.fixture(scope="module")
49
+ def sf_auth_instance() -> SFAuth:
50
+ """Create an SFAuth instance for E2E testing."""
51
+ credentials = _get_auth_credentials()
52
+ return SFAuth(**credentials)
53
+
54
+
55
+ @pytest.fixture(scope="module")
56
+ def token_auth_instance(sf_auth_instance: SFAuth) -> _SFTokenAuth:
57
+ """Create an _SFTokenAuth instance using access token from SFAuth."""
58
+ # Ensure we have a valid access token
59
+ sf_auth_instance._refresh_token_if_needed()
60
+
61
+ return _SFTokenAuth(
62
+ instance_url=sf_auth_instance.instance_url,
63
+ access_token=sf_auth_instance.access_token,
64
+ )
65
+
66
+
67
+ def _validate_query_response(response: Dict[str, Any], expected_record_count: int = 1) -> None:
68
+ """Validate that a query response contains the expected structure and data."""
69
+ # Basic response validation
70
+ assert response is not None, "Query response should not be None"
71
+ assert isinstance(response, dict), f"Query should return a dict, got: {type(response)}"
72
+
73
+ # Validate response structure
74
+ required_fields = ["records", "done", "totalSize"]
75
+ for field in required_fields:
76
+ assert field in response, f"Response missing required field '{field}': {response}"
77
+
78
+ # Validate records
79
+ records = response["records"]
80
+ assert len(records) == expected_record_count, (
81
+ f"Expected {expected_record_count} record(s), got {len(records)}: {response}"
82
+ )
83
+
84
+ # Validate record structure (for expected record count > 0)
85
+ if expected_record_count > 0:
86
+ record = records[0]
87
+ assert "Id" in record, f"Record missing 'Id' field: {record}"
88
+ assert record["Id"], f"Record 'Id' should not be empty: {record}"
89
+
90
+ # Validate query completion status
91
+ assert response["done"] is True, f"Query should be marked as done: {response}"
92
+
93
+ # Validate total size
94
+ assert response["totalSize"] == expected_record_count, (
95
+ f"Expected totalSize {expected_record_count}, got {response['totalSize']}: {response}"
96
+ )
97
+
98
+
99
+ def test_basic_query_execution(token_auth_instance: _SFTokenAuth) -> None:
100
+ """Test that a basic SOQL query executes successfully and returns expected results."""
101
+ # Define a simple query that should return at least one record
102
+ query = "SELECT Id FROM FeedComment LIMIT 1"
103
+
104
+ # Execute the query
105
+ response = token_auth_instance.query(query)
106
+
107
+ # Validate the response
108
+ _validate_query_response(response, expected_record_count=1)
109
+
110
+
111
+ def test_query_with_multiple_records(token_auth_instance: _SFTokenAuth) -> None:
112
+ """Test query execution that returns multiple records."""
113
+ query = "SELECT Id FROM FeedComment LIMIT 5"
114
+ response = token_auth_instance.query(query)
115
+
116
+ _validate_query_response(response, expected_record_count=5)
@@ -0,0 +1,659 @@
1
+ import base64
2
+ import pytest
3
+ from sfq.utils import fuzz, defuzz
4
+
5
+ # ============================================================================
6
+ # BASIC FUNCTIONALITY TESTS
7
+ # ============================================================================
8
+
9
+
10
+ @pytest.mark.parametrize("prefix_len,suffix_len", [(2, 2), (4, 6), (8, 8)])
11
+ def test_fuzz_defuzz_roundtrip(prefix_len: int, suffix_len: int) -> None:
12
+ """Test that fuzz/defuzz roundtrip works with various prefix/suffix lengths."""
13
+ text = "SensitiveData123!"
14
+ key = "mySecretKey"
15
+
16
+ encoded = fuzz(text, key, prefix_len=prefix_len, suffix_len=suffix_len)
17
+ decoded = defuzz(encoded, key, prefix_len=prefix_len, suffix_len=suffix_len)
18
+
19
+ assert decoded == text, (
20
+ f"Roundtrip failed for prefix={prefix_len}, suffix={suffix_len}"
21
+ )
22
+
23
+
24
+ def test_different_keys_produce_different_output() -> None:
25
+ """Test that different keys produce different encoded results."""
26
+ text = "hello world"
27
+ key1 = "keyA"
28
+ key2 = "keyB"
29
+
30
+ enc1 = fuzz(text, key1)
31
+ enc2 = fuzz(text, key2)
32
+
33
+ assert enc1 != enc2, "Different keys should produce different results"
34
+
35
+
36
+ def test_same_input_same_key_produces_same_output() -> None:
37
+ """Test that same input and key always produce identical output."""
38
+ text = "repeatable"
39
+ key = "staticKey"
40
+
41
+ enc1 = fuzz(text, key)
42
+ enc2 = fuzz(text, key)
43
+
44
+ assert enc1 == enc2, "Same input and key should always produce same output"
45
+
46
+
47
+ # ============================================================================
48
+ # ERROR HANDLING TESTS
49
+ # ============================================================================
50
+
51
+
52
+ def test_invalid_encoded_text_format() -> None:
53
+ """Test that invalid encoded text format raises appropriate exception."""
54
+ key = "abc"
55
+ bad_data = "not_base64|format"
56
+ with pytest.raises(Exception):
57
+ defuzz(bad_data, key)
58
+
59
+
60
+ def test_invalid_base64_format() -> None:
61
+ """Test that invalid base64 format raises appropriate exception."""
62
+ key = "abc"
63
+ bad_data = "invalid_base64_string!"
64
+ with pytest.raises(Exception):
65
+ defuzz(bad_data, key)
66
+
67
+
68
+ def test_prefix_suffix_length_mismatch() -> None:
69
+ """Test that mismatched prefix/suffix lengths raise ValueError."""
70
+ text = "TestData"
71
+ key = "abc123"
72
+ prefix_length = 4
73
+ suffix_length = 4
74
+
75
+ encoded = fuzz(text, key, prefix_len=prefix_length, suffix_len=suffix_length)
76
+ decoded = defuzz(encoded, key, prefix_len=prefix_length, suffix_len=suffix_length)
77
+ decoded_mismatch = defuzz(
78
+ encoded_text=encoded,
79
+ key=key,
80
+ prefix_len=prefix_length * 2,
81
+ suffix_len=suffix_length * 2,
82
+ )
83
+
84
+ assert decoded == text, "The decoded result should equal the original text"
85
+ assert decoded_mismatch != text, (
86
+ "The mismatched keys should not be equal to the original text"
87
+ )
88
+
89
+
90
+ def test_missing_separators() -> None:
91
+ """Test that encoded text missing separators is handled correctly."""
92
+ key = "abc"
93
+ # Create valid format without separators (this should work fine)
94
+ text = "noseparators"
95
+ encoded = fuzz(text, key)
96
+ # This should not raise an exception
97
+ decoded = defuzz(encoded, key)
98
+ assert decoded == text, "Text without separators should work fine"
99
+
100
+
101
+ def test_too_many_separators() -> None:
102
+ """Test that encoded text with too many separators raises ValueError."""
103
+ key = "abc"
104
+ # Create invalid format with too many separators
105
+ invalid_data = base64.b64encode("a|b|c|d".encode()).decode()
106
+ with pytest.raises(ValueError, match="Invalid encoded text format"):
107
+ defuzz(invalid_data, key)
108
+
109
+
110
+ # ============================================================================
111
+ # EDGE CASE TESTS
112
+ # ============================================================================
113
+
114
+
115
+ def test_empty_string() -> None:
116
+ """Test fuzzing/defuzzing of empty string."""
117
+ text = ""
118
+ key = "someKey"
119
+
120
+ encoded = fuzz(text, key)
121
+ decoded = defuzz(encoded, key)
122
+
123
+ assert decoded == text, "Empty string should survive roundtrip"
124
+
125
+
126
+ def test_single_character() -> None:
127
+ """Test fuzzing/defuzzing of single character."""
128
+ text = "a"
129
+ key = "someKey"
130
+
131
+ encoded = fuzz(text, key)
132
+ decoded = defuzz(encoded, key)
133
+
134
+ assert decoded == text, "Single character should survive roundtrip"
135
+
136
+
137
+ def test_unicode_support() -> None:
138
+ """Test that Unicode text survives roundtrip."""
139
+ text = "秘密情報🔒"
140
+ key = "unicodeKey"
141
+
142
+ encoded = fuzz(text, key, prefix_len=5, suffix_len=5)
143
+ decoded = defuzz(encoded, key, prefix_len=5, suffix_len=5)
144
+
145
+ assert decoded == text, "Unicode text should survive roundtrip"
146
+
147
+
148
+ def test_special_characters() -> None:
149
+ """Test fuzzing/defuzzing of special characters."""
150
+ text = "!@#$%^&*()_+-=[]{}|;':\",./<>?"
151
+ key = "specialKey"
152
+
153
+ encoded = fuzz(text, key)
154
+ decoded = defuzz(encoded, key)
155
+
156
+ assert decoded == text, "Special characters should survive roundtrip"
157
+
158
+
159
+ def test_newlines_and_whitespace() -> None:
160
+ """Test fuzzing/defuzzing of strings with newlines and whitespace."""
161
+ text = "Line 1\nLine 2\tTabbed\r\nCarriage Return"
162
+ key = "whitespaceKey"
163
+
164
+ encoded = fuzz(text, key)
165
+ decoded = defuzz(encoded, key)
166
+
167
+ assert decoded == text, "Newlines and whitespace should survive roundtrip"
168
+
169
+
170
+ def test_long_strings() -> None:
171
+ """Test fuzzing/defuzzing of very long strings."""
172
+ text = "A" * 10000 # 10KB string
173
+ key = "longKey"
174
+
175
+ encoded = fuzz(text, key)
176
+ decoded = defuzz(encoded, key)
177
+
178
+ assert decoded == text, "Long strings should survive roundtrip"
179
+
180
+
181
+ def test_binary_data() -> None:
182
+ """Test fuzzing/defuzzing of binary-like data."""
183
+ text = "\x00\x01\x02\x03\xff\xfe\xfd"
184
+ key = "binaryKey"
185
+
186
+ encoded = fuzz(text, key)
187
+ decoded = defuzz(encoded, key)
188
+
189
+ assert decoded == text, "Binary data should survive roundtrip"
190
+
191
+
192
+ # ============================================================================
193
+ # PARAMETER VARIATION TESTS
194
+ # ============================================================================
195
+
196
+
197
+ @pytest.mark.parametrize("prefix_len", [0, 1, 2, 4, 8, 16])
198
+ @pytest.mark.parametrize("suffix_len", [0, 1, 2, 4, 8, 16])
199
+ def test_various_prefix_suffix_lengths(prefix_len: int, suffix_len: int) -> None:
200
+ """Test fuzzing/defuzzing with various prefix/suffix length combinations."""
201
+ text = "Test data with varying lengths"
202
+ key = "testKey"
203
+
204
+ encoded = fuzz(text, key, prefix_len=prefix_len, suffix_len=suffix_len)
205
+ decoded = defuzz(encoded, key, prefix_len=prefix_len, suffix_len=suffix_len)
206
+
207
+ assert decoded == text, (
208
+ f"Roundtrip failed for prefix={prefix_len}, suffix={suffix_len}"
209
+ )
210
+
211
+
212
+ def test_zero_prefix_suffix_length() -> None:
213
+ """Test fuzzing/defuzzing with zero prefix/suffix lengths."""
214
+ text = "Test data"
215
+ key = "testKey"
216
+
217
+ encoded = fuzz(text, key, prefix_len=0, suffix_len=0)
218
+ decoded = defuzz(encoded, key, prefix_len=0, suffix_len=0)
219
+
220
+ assert decoded == text, "Zero prefix/suffix lengths should work"
221
+
222
+
223
+ def test_maximum_prefix_suffix_length() -> None:
224
+ """Test fuzzing/defuzzing with maximum reasonable prefix/suffix lengths."""
225
+ text = "Test data"
226
+ key = "testKey"
227
+
228
+ encoded = fuzz(text, key, prefix_len=32, suffix_len=32)
229
+ decoded = defuzz(encoded, key, prefix_len=32, suffix_len=32)
230
+
231
+ assert decoded == text, "Maximum prefix/suffix lengths should work"
232
+
233
+
234
+ # ============================================================================
235
+ # KEY VARIATION TESTS
236
+ # ============================================================================
237
+
238
+
239
+ @pytest.mark.parametrize("key", ["", "a", "ab", "abc", "very_long_key_string"])
240
+ def test_various_key_lengths(key: str) -> None:
241
+ """Test fuzzing/defuzzing with various key lengths."""
242
+ text = "Test data"
243
+
244
+ encoded = fuzz(text, key)
245
+ decoded = defuzz(encoded, key)
246
+
247
+ assert decoded == text, f"Roundtrip failed for key length {len(key)}"
248
+
249
+
250
+ def test_empty_key() -> None:
251
+ """Test fuzzing/defuzzing with empty key."""
252
+ text = "Test data"
253
+ key = ""
254
+
255
+ encoded = fuzz(text, key)
256
+ decoded = defuzz(encoded, key)
257
+
258
+ assert decoded == text, "Empty key should work"
259
+
260
+
261
+ def test_special_characters_in_key() -> None:
262
+ """Test fuzzing/defuzzing with special characters in key."""
263
+ text = "Test data"
264
+ key = "!@#$%^&*()"
265
+
266
+ encoded = fuzz(text, key)
267
+ decoded = defuzz(encoded, key)
268
+
269
+ assert decoded == text, "Special characters in key should work"
270
+
271
+
272
+ # ============================================================================
273
+ # SECURITY AND COLLISION TESTS
274
+ # ============================================================================
275
+
276
+
277
+ def test_different_inputs_same_key() -> None:
278
+ """Test that different inputs produce different outputs with same key."""
279
+ key = "sameKey"
280
+ text1 = "hello world"
281
+ text2 = "goodbye world"
282
+
283
+ enc1 = fuzz(text1, key)
284
+ enc2 = fuzz(text2, key)
285
+
286
+ assert enc1 != enc2, "Different inputs should produce different outputs"
287
+
288
+
289
+ def test_similar_inputs_different_output() -> None:
290
+ """Test that similar inputs produce different outputs."""
291
+ key = "testKey"
292
+ text1 = "hello world"
293
+ text2 = "hello worlD" # Only one character difference
294
+
295
+ enc1 = fuzz(text1, key)
296
+ enc2 = fuzz(text2, key)
297
+
298
+ assert enc1 != enc2, "Similar inputs should produce different outputs"
299
+
300
+
301
+ def test_hash_collision_resistance() -> None:
302
+ """Test that hash collisions are handled properly."""
303
+ # Test with inputs that might produce similar hash prefixes
304
+ key = "testKey"
305
+ text1 = "test"
306
+ text2 = "test " # Slightly different
307
+
308
+ enc1 = fuzz(text1, key, prefix_len=8, suffix_len=8)
309
+ enc2 = fuzz(text2, key, prefix_len=8, suffix_len=8)
310
+
311
+ # Even if hashes are similar, the XOR should make them different
312
+ assert enc1 != enc2, "Hash collisions should not produce identical outputs"
313
+
314
+
315
+ # ============================================================================
316
+ # PERFORMANCE TESTS
317
+ # ============================================================================
318
+
319
+
320
+ def test_performance_multiple_calls() -> None:
321
+ """Test that multiple calls to fuzz/defuzz are consistent."""
322
+ text = "Performance test data"
323
+ key = "perfKey"
324
+
325
+ # Test multiple calls to ensure consistency
326
+ results = []
327
+ for _ in range(10):
328
+ encoded = fuzz(text, key)
329
+ decoded = defuzz(encoded, key)
330
+ results.append(decoded)
331
+
332
+ # All results should be identical to original
333
+ for result in results:
334
+ assert result == text, "All roundtrips should produce identical results"
335
+
336
+
337
+ def test_large_data_performance() -> None:
338
+ """Test performance with large data sets."""
339
+ text = "Large data " * 1000 # ~15KB string
340
+ key = "largeKey"
341
+
342
+ encoded = fuzz(text, key)
343
+ decoded = defuzz(encoded, key)
344
+
345
+ assert decoded == text, "Large data should survive roundtrip"
346
+
347
+
348
+ # ============================================================================
349
+ # BACKWARD COMPATIBILITY TESTS
350
+ # ============================================================================
351
+
352
+
353
+ def test_default_parameters() -> None:
354
+ """Test that default parameters work correctly."""
355
+ text = "Test data"
356
+ key = "testKey"
357
+
358
+ # Test with default parameters (should be prefix_len=4, suffix_len=4)
359
+ encoded = fuzz(text, key)
360
+ decoded = defuzz(encoded, key)
361
+
362
+ assert decoded == text, "Default parameters should work"
363
+
364
+
365
+ def test_backward_compatibility() -> None:
366
+ """Test that existing code patterns still work."""
367
+ # Simulate old usage patterns
368
+ text = "Legacy data"
369
+ key = "legacyKey"
370
+
371
+ # Old style without explicit parameters
372
+ encoded = fuzz(text, key)
373
+ decoded = defuzz(encoded, key)
374
+
375
+ assert decoded == text, "Backward compatibility should be maintained"
376
+
377
+
378
+ # ============================================================================
379
+ # ENCODING SCENARIOS TESTS
380
+ # ============================================================================
381
+
382
+
383
+ def test_base64_encoding_robustness() -> None:
384
+ """Test that base64 encoding/decoding is robust."""
385
+ text = "Test data with various characters: áéíóú 中文 🚀"
386
+ key = "encodingKey"
387
+
388
+ encoded = fuzz(text, key)
389
+
390
+ # Verify the encoded result is valid base64
391
+ try:
392
+ decoded_base64 = base64.b64decode(encoded.encode()).decode()
393
+ # The implementation doesn't use separators, so we don't expect "|"
394
+ assert len(decoded_base64) > 0, "Decoded base64 should not be empty"
395
+ except Exception:
396
+ pytest.fail("Encoded result should be valid base64")
397
+
398
+ # Test roundtrip
399
+ decoded = defuzz(encoded, key)
400
+ assert decoded == text, "Base64 encoding should be robust"
401
+
402
+
403
+ def test_hash_algorithm_consistency() -> None:
404
+ """Test that hash algorithms produce consistent results."""
405
+ text = "Consistency test"
406
+ key = "hashKey"
407
+
408
+ # Multiple calls should produce identical hash prefixes/suffixes
409
+ encoded1 = fuzz(text, key, prefix_len=8, suffix_len=8)
410
+ encoded2 = fuzz(text, key, prefix_len=8, suffix_len=8)
411
+
412
+ # Extract the hash parts to verify consistency
413
+ decoded1 = base64.b64decode(encoded1.encode()).decode()
414
+ decoded2 = base64.b64decode(encoded2.encode()).decode()
415
+
416
+ # Extract prefix and suffix directly (no separators in implementation)
417
+ prefix1 = decoded1[:8]
418
+ suffix1 = decoded1[-8:]
419
+ prefix2 = decoded2[:8]
420
+ suffix2 = decoded2[-8:]
421
+
422
+ assert prefix1 == prefix2, "MD5 hash prefix should be consistent"
423
+ assert suffix1 == suffix2, "SHA1 hash suffix should be consistent"
424
+
425
+
426
+ def test_xor_operation_properties() -> None:
427
+ """Test XOR operation mathematical properties."""
428
+ text = "XOR test"
429
+ key = "xorKey"
430
+
431
+ # Test that XOR is reversible: (a ^ b) ^ b = a
432
+ encoded = fuzz(text, key)
433
+ decoded = defuzz(encoded, key)
434
+
435
+ assert decoded == text, "XOR operation should be mathematically reversible"
436
+
437
+ # Test that same key produces same transformation
438
+ encoded2 = fuzz(text, key)
439
+ assert encoded == encoded2, "Same key should produce identical XOR transformation"
440
+
441
+
442
+ # ============================================================================
443
+ # ADDITIONAL EDGE CASES
444
+ # ============================================================================
445
+
446
+
447
+ def test_repeated_characters() -> None:
448
+ """Test fuzzing/defuzzing of strings with repeated characters."""
449
+ text = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # 32 'a's
450
+ key = "repeatKey"
451
+
452
+ encoded = fuzz(text, key)
453
+ decoded = defuzz(encoded, key)
454
+
455
+ assert decoded == text, "Repeated characters should survive roundtrip"
456
+
457
+
458
+ def test_mixed_case_sensitivity() -> None:
459
+ """Test case sensitivity of the fuzzing algorithm."""
460
+ text1 = "Hello World"
461
+ text2 = "hello world"
462
+ key = "caseKey"
463
+
464
+ enc1 = fuzz(text1, key)
465
+ enc2 = fuzz(text2, key)
466
+
467
+ assert enc1 != enc2, "Case sensitivity should be preserved"
468
+
469
+
470
+ def test_numeric_data() -> None:
471
+ """Test fuzzing/defuzzing of numeric strings."""
472
+ text = "1234567890"
473
+ key = "numericKey"
474
+
475
+ encoded = fuzz(text, key)
476
+ decoded = defuzz(encoded, key)
477
+
478
+ assert decoded == text, "Numeric data should survive roundtrip"
479
+
480
+
481
+ def test_alphanumeric_data() -> None:
482
+ """Test fuzzing/defuzzing of alphanumeric strings."""
483
+ text = "Alphanumeric123!@#"
484
+ key = "alphanumericKey"
485
+
486
+ encoded = fuzz(text, key)
487
+ decoded = defuzz(encoded, key)
488
+
489
+ assert decoded == text, "Alphanumeric data should survive roundtrip"
490
+
491
+
492
+ def test_unicode_combining_characters() -> None:
493
+ """Test fuzzing/defuzzing of Unicode combining characters."""
494
+ text = "c\u0327" # c + combining tilde = ç
495
+ key = "unicodeCombineKey"
496
+
497
+ encoded = fuzz(text, key)
498
+ decoded = defuzz(encoded, key)
499
+
500
+ assert decoded == text, "Unicode combining characters should survive roundtrip"
501
+
502
+
503
+ # ============================================================================
504
+ # STRESS TESTS
505
+ # ============================================================================
506
+
507
+
508
+ def test_stress_multiple_variations() -> None:
509
+ """Stress test with multiple parameter variations."""
510
+ test_cases = [
511
+ ("short", "k"),
512
+ ("medium length text", "mediumKey"),
513
+ ("very long text " * 50, "veryLongKey"),
514
+ ("", ""),
515
+ ("special!@#$%^&*()", "special!@#$%^&*()"),
516
+ ("áéíóú 中文 🚀", "unicodeKey"),
517
+ ]
518
+
519
+ for text, key in test_cases:
520
+ encoded = fuzz(text, key)
521
+ decoded = defuzz(encoded, key)
522
+ assert decoded == text, (
523
+ f"Stress test failed for: text='{text[:50]}...', key='{key}'"
524
+ )
525
+
526
+
527
+ def test_stress_hash_collisions() -> None:
528
+ """Stress test to check for hash collisions with many inputs."""
529
+ key = "stressKey"
530
+ results = set()
531
+
532
+ # Test many different inputs
533
+ for i in range(1000):
534
+ text = f"test_input_{i}"
535
+ encoded = fuzz(text, key, prefix_len=4, suffix_len=4)
536
+ results.add(encoded)
537
+
538
+ # Should have mostly unique results (allowing for some collisions due to short prefix/suffix)
539
+ assert len(results) >= 900, (
540
+ f"Too many hash collisions: {1000 - len(results)} collisions out of 1000"
541
+ )
542
+
543
+
544
+ # ============================================================================
545
+ # PROPERTY-BASED TESTS (using hypothesis-like testing)
546
+ # ============================================================================
547
+
548
+
549
+ def test_property_reversibility() -> None:
550
+ """Test that fuzz/defuzz is always reversible (property-based test)."""
551
+ # Test with a variety of inputs
552
+ test_inputs = [
553
+ "",
554
+ "a",
555
+ "ab",
556
+ "abc",
557
+ "hello",
558
+ "hello world",
559
+ "hello world!",
560
+ "123",
561
+ "123abc",
562
+ "abc123",
563
+ "123abc!@#",
564
+ "line1\nline2\tline3",
565
+ "áéíóú",
566
+ "中文",
567
+ "🚀",
568
+ "a" * 100,
569
+ "a" * 1000,
570
+ ]
571
+
572
+ key = "propertyKey"
573
+
574
+ for text in test_inputs:
575
+ encoded = fuzz(text, key)
576
+ decoded = defuzz(encoded, key)
577
+ assert decoded == text, f"Reversibility property failed for: {repr(text)}"
578
+
579
+
580
+ def test_property_deterministic() -> None:
581
+ """Test that fuzz/defuzz is deterministic (property-based test)."""
582
+ test_inputs = ["hello", "", "special!@#", "unicode🚀"]
583
+ key = "deterministicKey"
584
+
585
+ for text in test_inputs:
586
+ # Multiple calls should produce identical results
587
+ encoded1 = fuzz(text, key)
588
+ encoded2 = fuzz(text, key)
589
+ encoded3 = fuzz(text, key)
590
+
591
+ assert encoded1 == encoded2 == encoded3, (
592
+ f"Deterministic property failed for: {repr(text)}"
593
+ )
594
+
595
+ decoded1 = defuzz(encoded1, key)
596
+ decoded2 = defuzz(encoded2, key)
597
+ decoded3 = defuzz(encoded3, key)
598
+
599
+ assert decoded1 == decoded2 == decoded3, (
600
+ f"Deterministic defuzz failed for: {repr(text)}"
601
+ )
602
+
603
+
604
+ # ============================================================================
605
+ # DOCUMENTATION AND EXAMPLE TESTS
606
+ # ============================================================================
607
+
608
+
609
+ def test_examples_from_documentation():
610
+ """Test examples that would be suitable for documentation."""
611
+ # Example 1: Basic usage
612
+ text = "sensitive_password_123"
613
+ key = "my_secret_key"
614
+
615
+ encoded = fuzz(text, key)
616
+ decoded = defuzz(encoded, key)
617
+
618
+ assert decoded == text, "Basic usage example should work"
619
+
620
+ # Example 2: Custom prefix/suffix lengths
621
+ encoded_custom = fuzz(text, key, prefix_len=8, suffix_len=8)
622
+ decoded_custom = defuzz(encoded_custom, key, prefix_len=8, suffix_len=8)
623
+
624
+ assert decoded_custom == text, "Custom prefix/suffix example should work"
625
+
626
+ # Example 3: Verify encoded format
627
+ decoded_format = base64.b64decode(encoded.encode()).decode()
628
+ # The implementation doesn't use separators, so we check the structure differently
629
+ assert len(decoded_format) > 0, "Encoded format should not be empty"
630
+ # Verify it has prefix + body + suffix structure
631
+ assert len(decoded_format) >= 8, (
632
+ "Encoded format should have at least prefix and suffix"
633
+ )
634
+
635
+
636
+ def test_real_world_scenarios():
637
+ """Test real-world usage scenarios."""
638
+ scenarios = [
639
+ # API tokens
640
+ ("sk-1234567890abcdef", "api_key"),
641
+ # Passwords
642
+ ("P@ssw0rd!2023", "auth_key"),
643
+ # Session IDs
644
+ ("sess_abc123def456", "session_key"),
645
+ # JWT tokens
646
+ (
647
+ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c",
648
+ "jwt_key",
649
+ ),
650
+ # SQL queries (sanitized)
651
+ ("SELECT * FROM users WHERE id = 123", "query_key"),
652
+ # JSON data
653
+ ('{"user": "john", "pass": "secret"}', "json_key"),
654
+ ]
655
+
656
+ for text, key in scenarios:
657
+ encoded = fuzz(text, key)
658
+ decoded = defuzz(encoded, key)
659
+ assert decoded == text, f"Real-world scenario failed for: {text[:50]}..."
@@ -3,5 +3,5 @@ requires-python = ">=3.9"
3
3
 
4
4
  [[package]]
5
5
  name = "sfq"
6
- version = "0.0.42"
6
+ version = "0.0.43"
7
7
  source = { editable = "." }
@@ -1,58 +0,0 @@
1
- """
2
- End-to-end tests for the SFTokenAuth module.
3
-
4
- These tests run against a real Salesforce instance using environment variables
5
- to ensure the SFTokenAuth functionality works correctly in practice.
6
- """
7
-
8
- import os
9
-
10
- import pytest
11
-
12
- from sfq import _SFTokenAuth
13
-
14
-
15
- @pytest.fixture(scope="module")
16
- def sf_instance():
17
- """Create an AuthManager instance for E2E testing."""
18
- required_env_vars = [
19
- "SF_INSTANCE_URL",
20
- "SF_ACCESS_TOKEN",
21
- ]
22
-
23
- missing_vars = [var for var in required_env_vars if not os.getenv(var)]
24
- if missing_vars:
25
- pytest.fail(f"Missing required env vars: {', '.join(missing_vars)}")
26
-
27
- sf = _SFTokenAuth(
28
- instance_url=os.getenv("SF_INSTANCE_URL"),
29
- access_token=os.getenv("SF_ACCESS_TOKEN"),
30
- )
31
- return sf
32
-
33
-
34
- def test_query(sf_instance):
35
- """Ensure that a simple query returns the expected results."""
36
- query = "SELECT Id FROM FeedComment LIMIT 1"
37
- response = sf_instance.query(query)
38
-
39
- assert response and isinstance(response, dict), (
40
- f"Query did not return a dict: {response}"
41
- )
42
-
43
- assert "records" in response, f"No records in response: {response}"
44
- assert len(response["records"]) == 1, (
45
- f"Expected 1 record, got {len(response['records'])}: {response}"
46
- )
47
- assert "Id" in response["records"][0], (
48
- f"No Id in record: {response['records'][0]}"
49
- )
50
- assert response["records"][0]["Id"], (
51
- f"Id is empty in record: {response['records'][0]}"
52
- )
53
- assert response["done"] is True, f"Query not marked as done: {response}"
54
- assert "totalSize" in response, f"No totalSize in response: {response}"
55
- assert response["totalSize"] == 1, (
56
- f"Expected totalSize 1, got {response['totalSize']}: {response}"
57
- )
58
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes