amplify-excel-migrator 1.1.5__py3-none-any.whl → 1.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. amplify_excel_migrator/__init__.py +17 -0
  2. amplify_excel_migrator/auth/__init__.py +6 -0
  3. amplify_excel_migrator/auth/cognito_auth.py +306 -0
  4. amplify_excel_migrator/auth/provider.py +42 -0
  5. amplify_excel_migrator/cli/__init__.py +5 -0
  6. amplify_excel_migrator/cli/commands.py +165 -0
  7. amplify_excel_migrator/client.py +47 -0
  8. amplify_excel_migrator/core/__init__.py +5 -0
  9. amplify_excel_migrator/core/config.py +98 -0
  10. amplify_excel_migrator/data/__init__.py +7 -0
  11. amplify_excel_migrator/data/excel_reader.py +23 -0
  12. amplify_excel_migrator/data/transformer.py +119 -0
  13. amplify_excel_migrator/data/validator.py +48 -0
  14. amplify_excel_migrator/graphql/__init__.py +8 -0
  15. amplify_excel_migrator/graphql/client.py +137 -0
  16. amplify_excel_migrator/graphql/executor.py +405 -0
  17. amplify_excel_migrator/graphql/mutation_builder.py +80 -0
  18. amplify_excel_migrator/graphql/query_builder.py +194 -0
  19. amplify_excel_migrator/migration/__init__.py +8 -0
  20. amplify_excel_migrator/migration/batch_uploader.py +23 -0
  21. amplify_excel_migrator/migration/failure_tracker.py +92 -0
  22. amplify_excel_migrator/migration/orchestrator.py +143 -0
  23. amplify_excel_migrator/migration/progress_reporter.py +57 -0
  24. amplify_excel_migrator/schema/__init__.py +6 -0
  25. model_field_parser.py → amplify_excel_migrator/schema/field_parser.py +100 -22
  26. amplify_excel_migrator/schema/introspector.py +95 -0
  27. {amplify_excel_migrator-1.1.5.dist-info → amplify_excel_migrator-1.2.15.dist-info}/METADATA +121 -26
  28. amplify_excel_migrator-1.2.15.dist-info/RECORD +40 -0
  29. amplify_excel_migrator-1.2.15.dist-info/entry_points.txt +2 -0
  30. amplify_excel_migrator-1.2.15.dist-info/top_level.txt +2 -0
  31. tests/__init__.py +1 -0
  32. tests/test_cli_commands.py +292 -0
  33. tests/test_client.py +187 -0
  34. tests/test_cognito_auth.py +363 -0
  35. tests/test_config_manager.py +347 -0
  36. tests/test_field_parser.py +615 -0
  37. tests/test_mutation_builder.py +391 -0
  38. tests/test_query_builder.py +384 -0
  39. amplify_client.py +0 -941
  40. amplify_excel_migrator-1.1.5.dist-info/RECORD +0 -9
  41. amplify_excel_migrator-1.1.5.dist-info/entry_points.txt +0 -2
  42. amplify_excel_migrator-1.1.5.dist-info/top_level.txt +0 -3
  43. migrator.py +0 -437
  44. {amplify_excel_migrator-1.1.5.dist-info → amplify_excel_migrator-1.2.15.dist-info}/WHEEL +0 -0
  45. {amplify_excel_migrator-1.1.5.dist-info → amplify_excel_migrator-1.2.15.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,615 @@
1
+ """Tests for FieldParser class"""
2
+
3
+ import pytest
4
+ from amplify_excel_migrator.schema import FieldParser
5
+
6
+
7
+ class TestExtractRelationshipInfo:
8
+ """Test _extract_relationship_info method"""
9
+
10
+ def test_extracts_belongsto_relationship(self):
11
+ """Test extracting belongsTo relationship info"""
12
+ parser = FieldParser()
13
+
14
+ field = {"name": "photographer", "type": {"kind": "OBJECT", "name": "Reporter", "ofType": None}, "args": []}
15
+
16
+ result = parser._extract_relationship_info(field)
17
+
18
+ assert result is not None
19
+ assert result["target_model"] == "Reporter"
20
+ assert result["foreign_key"] == "photographerId"
21
+
22
+ def test_extracts_with_non_null_wrapper(self):
23
+ """Test extracting relationship with NON_NULL wrapper"""
24
+ parser = FieldParser()
25
+
26
+ field = {
27
+ "name": "author",
28
+ "type": {"kind": "NON_NULL", "name": None, "ofType": {"kind": "OBJECT", "name": "User", "ofType": None}},
29
+ "args": [],
30
+ }
31
+
32
+ result = parser._extract_relationship_info(field)
33
+
34
+ assert result is not None
35
+ assert result["target_model"] == "User"
36
+ assert result["foreign_key"] == "authorId"
37
+
38
+ def test_extracts_from_all_fields(self):
39
+ """Test that _extract_relationship_info filters out Connection types"""
40
+ parser = FieldParser()
41
+
42
+ # Connection types should be filtered out by _extract_relationship_info
43
+ field = {"name": "posts", "type": {"kind": "OBJECT", "name": "ModelPostConnection", "ofType": None}, "args": []}
44
+
45
+ result = parser._extract_relationship_info(field)
46
+
47
+ # Connection types are filtered, so result should be None
48
+ assert result is None
49
+
50
+
51
+ class TestParseModelStructure:
52
+ """Test parse_model_structure method with belongsTo relationships"""
53
+
54
+ def test_raises_value_error_on_empty_introspection(self):
55
+ """Test that ValueError is raised when introspection result is empty"""
56
+ parser = FieldParser()
57
+
58
+ with pytest.raises(ValueError, match="Introspection result cannot be empty"):
59
+ parser.parse_model_structure(None)
60
+
61
+ with pytest.raises(ValueError, match="Introspection result cannot be empty"):
62
+ parser.parse_model_structure({})
63
+
64
+ def test_includes_all_fields_except_metadata(self):
65
+ """Test that all fields except metadata and relationship objects are included"""
66
+ parser = FieldParser()
67
+
68
+ introspection_result = {
69
+ "name": "Story",
70
+ "kind": "OBJECT",
71
+ "description": None,
72
+ "fields": [
73
+ {
74
+ "name": "id",
75
+ "type": {"kind": "NON_NULL", "name": None, "ofType": {"kind": "SCALAR", "name": "ID"}},
76
+ "description": None,
77
+ },
78
+ {
79
+ "name": "photographerId",
80
+ "type": {"kind": "NON_NULL", "name": None, "ofType": {"kind": "SCALAR", "name": "ID"}},
81
+ "description": None,
82
+ },
83
+ {
84
+ "name": "photographer",
85
+ "type": {"kind": "OBJECT", "name": "Reporter", "ofType": None},
86
+ "description": None,
87
+ "args": [],
88
+ },
89
+ {"name": "title", "type": {"kind": "SCALAR", "name": "String", "ofType": None}, "description": None},
90
+ ],
91
+ }
92
+
93
+ result = parser.parse_model_structure(introspection_result)
94
+
95
+ field_names = [f["name"] for f in result["fields"]]
96
+
97
+ # Should include all fields except metadata (id) and relationship objects (photographer)
98
+ assert "photographerId" in field_names
99
+ assert "photographer" not in field_names # Relationship OBJECT fields are filtered
100
+ assert "title" in field_names
101
+ assert "id" not in field_names # metadata filtered
102
+
103
+ def test_adds_related_model_to_foreign_key(self):
104
+ """Test that foreign key fields get related_model property"""
105
+ parser = FieldParser()
106
+
107
+ introspection_result = {
108
+ "name": "Story",
109
+ "kind": "OBJECT",
110
+ "description": None,
111
+ "fields": [
112
+ {
113
+ "name": "photographerId",
114
+ "type": {"kind": "NON_NULL", "name": None, "ofType": {"kind": "SCALAR", "name": "ID"}},
115
+ "description": None,
116
+ },
117
+ {
118
+ "name": "photographer",
119
+ "type": {"kind": "OBJECT", "name": "Reporter", "ofType": None},
120
+ "description": None,
121
+ "args": [],
122
+ },
123
+ ],
124
+ }
125
+
126
+ result = parser.parse_model_structure(introspection_result)
127
+
128
+ photographer_id_field = next(f for f in result["fields"] if f["name"] == "photographerId")
129
+
130
+ assert "related_model" in photographer_id_field
131
+ assert photographer_id_field["related_model"] == "Reporter"
132
+
133
+ def test_handles_multiple_relationships(self):
134
+ """Test handling multiple belongsTo relationships"""
135
+ parser = FieldParser()
136
+
137
+ introspection_result = {
138
+ "name": "Story",
139
+ "kind": "OBJECT",
140
+ "description": None,
141
+ "fields": [
142
+ {"name": "authorId", "type": {"kind": "SCALAR", "name": "ID", "ofType": None}, "description": None},
143
+ {
144
+ "name": "author",
145
+ "type": {"kind": "OBJECT", "name": "User", "ofType": None},
146
+ "description": None,
147
+ "args": [],
148
+ },
149
+ {"name": "editorId", "type": {"kind": "SCALAR", "name": "ID", "ofType": None}, "description": None},
150
+ {
151
+ "name": "editor",
152
+ "type": {"kind": "OBJECT", "name": "User", "ofType": None},
153
+ "description": None,
154
+ "args": [],
155
+ },
156
+ ],
157
+ }
158
+
159
+ result = parser.parse_model_structure(introspection_result)
160
+
161
+ field_names = [f["name"] for f in result["fields"]]
162
+
163
+ # ID fields are included, relationship objects are filtered
164
+ assert "authorId" in field_names
165
+ assert "author" not in field_names # Relationship objects are filtered
166
+ assert "editorId" in field_names
167
+ assert "editor" not in field_names # Relationship objects are filtered
168
+
169
+ # Check related_model properties on ID fields
170
+ author_id_field = next(f for f in result["fields"] if f["name"] == "authorId")
171
+ editor_id_field = next(f for f in result["fields"] if f["name"] == "editorId")
172
+
173
+ assert author_id_field["related_model"] == "User"
174
+ assert editor_id_field["related_model"] == "User"
175
+
176
+ def test_preserves_custom_types(self):
177
+ """Test that custom type OBJECT fields are preserved (not filtered)"""
178
+ parser = FieldParser()
179
+
180
+ introspection_result = {
181
+ "name": "User",
182
+ "kind": "OBJECT",
183
+ "description": None,
184
+ "fields": [
185
+ {"name": "name", "type": {"kind": "SCALAR", "name": "String", "ofType": None}, "description": None},
186
+ {
187
+ "name": "addresses",
188
+ "type": {
189
+ "kind": "LIST",
190
+ "name": None,
191
+ "ofType": {"kind": "OBJECT", "name": "Address", "ofType": None},
192
+ },
193
+ "description": None,
194
+ },
195
+ ],
196
+ }
197
+
198
+ result = parser.parse_model_structure(introspection_result)
199
+
200
+ field_names = [f["name"] for f in result["fields"]]
201
+
202
+ # Custom type list should be preserved
203
+ assert "addresses" in field_names
204
+ addresses_field = next(f for f in result["fields"] if f["name"] == "addresses")
205
+ assert addresses_field["is_custom_type"] is True
206
+ assert addresses_field["is_list"] is True
207
+
208
+
209
+ class TestParseFieldWithRelationships:
210
+ """Test _parse_field method behavior with relationship fields"""
211
+
212
+ def test_marks_custom_type_correctly(self):
213
+ """Test that custom type fields are marked correctly"""
214
+ parser = FieldParser()
215
+
216
+ field = {"name": "address", "type": {"kind": "OBJECT", "name": "Address", "ofType": None}, "description": None}
217
+
218
+ result = parser._parse_field(field)
219
+
220
+ assert result["is_custom_type"] is True
221
+ assert result["type"] == "Address"
222
+
223
+ def test_skips_connections(self):
224
+ """Test that Connection types are skipped"""
225
+ parser = FieldParser()
226
+
227
+ field = {
228
+ "name": "posts",
229
+ "type": {"kind": "OBJECT", "name": "ModelPostConnection", "ofType": None},
230
+ "description": None,
231
+ }
232
+
233
+ result = parser._parse_field(field)
234
+
235
+ assert result == {}
236
+
237
+ def test_skips_metadata_fields(self):
238
+ """Test that metadata fields are skipped"""
239
+ parser = FieldParser()
240
+
241
+ for field_name in ["id", "createdAt", "updatedAt", "owner"]:
242
+ field = {
243
+ "name": field_name,
244
+ "type": {"kind": "SCALAR", "name": "String", "ofType": None},
245
+ "description": None,
246
+ }
247
+
248
+ result = parser._parse_field(field)
249
+
250
+ assert result == {}, f"Should skip metadata field: {field_name}"
251
+
252
+
253
+ class TestCleanInput:
254
+ """Test clean_input static method for Unicode character cleaning"""
255
+
256
+ def test_strips_whitespace(self):
257
+ """Test that leading and trailing whitespace is stripped"""
258
+ parser = FieldParser()
259
+
260
+ assert parser.clean_input(" test ") == "test"
261
+ assert parser.clean_input("\t test \n") == "test"
262
+ assert parser.clean_input(" ") == ""
263
+
264
+ def test_removes_unicode_control_characters(self):
265
+ """Test that Unicode control characters (Cc) are removed"""
266
+ parser = FieldParser()
267
+
268
+ # NULL character (U+0000)
269
+ assert parser.clean_input("test\x00value") == "testvalue"
270
+ # BELL character (U+0007)
271
+ assert parser.clean_input("test\x07value") == "testvalue"
272
+ # BACKSPACE character (U+0008)
273
+ assert parser.clean_input("test\x08value") == "testvalue"
274
+
275
+ def test_removes_unicode_format_characters(self):
276
+ """Test that Unicode format characters (Cf) are removed"""
277
+ parser = FieldParser()
278
+
279
+ # Zero-width space (U+200B)
280
+ assert parser.clean_input("test\u200bvalue") == "testvalue"
281
+ # Zero-width non-joiner (U+200C)
282
+ assert parser.clean_input("test\u200cvalue") == "testvalue"
283
+ # Soft hyphen (U+00AD)
284
+ assert parser.clean_input("test\u00advalue") == "testvalue"
285
+ # Left-to-right mark (U+200E)
286
+ assert parser.clean_input("test\u200evalue") == "testvalue"
287
+
288
+ def test_preserves_newline_tab_carriage_return(self):
289
+ """Test that newline, tab, and carriage return are preserved"""
290
+ parser = FieldParser()
291
+
292
+ # Newline (U+000A)
293
+ assert parser.clean_input("test\nvalue") == "test\nvalue"
294
+ # Tab (U+0009)
295
+ assert parser.clean_input("test\tvalue") == "test\tvalue"
296
+ # Carriage return (U+000D)
297
+ assert parser.clean_input("test\rvalue") == "test\rvalue"
298
+
299
+ def test_handles_non_string_input(self):
300
+ """Test that non-string values are returned unchanged"""
301
+ parser = FieldParser()
302
+
303
+ assert parser.clean_input(123) == 123
304
+ assert parser.clean_input(123.45) == 123.45
305
+ assert parser.clean_input(None) is None
306
+ assert parser.clean_input(True) is True
307
+ assert parser.clean_input([1, 2, 3]) == [1, 2, 3]
308
+
309
+ def test_handles_empty_string(self):
310
+ """Test that empty string is handled correctly"""
311
+ parser = FieldParser()
312
+
313
+ assert parser.clean_input("") == ""
314
+
315
+ def test_preserves_valid_unicode_characters(self):
316
+ """Test that valid Unicode characters are preserved"""
317
+ parser = FieldParser()
318
+
319
+ # Emoji
320
+ assert parser.clean_input("test 😀 value") == "test 😀 value"
321
+ # Hebrew
322
+ assert parser.clean_input("שלום") == "שלום"
323
+ # Chinese
324
+ assert parser.clean_input("你好") == "你好"
325
+ # Arabic
326
+ assert parser.clean_input("مرحبا") == "مرحبا"
327
+
328
+ def test_complex_string_with_multiple_control_chars(self):
329
+ """Test cleaning string with multiple control characters"""
330
+ parser = FieldParser()
331
+
332
+ # Mix of control characters that should be removed and preserved
333
+ input_str = " test\x00\u200b\nvalue\t\u00adend "
334
+ expected = "test\nvalue\tend"
335
+ assert parser.clean_input(input_str) == expected
336
+
337
+
338
+ class TestIntegrationBelongsToFlow:
339
+ """Integration tests for full belongsTo relationship flow"""
340
+
341
+ def test_full_story_model_with_photographer(self):
342
+ """Test complete Story model with photographer belongsTo relationship"""
343
+ parser = FieldParser()
344
+
345
+ # Simulate real GraphQL introspection response
346
+ introspection_result = {
347
+ "data": {
348
+ "__type": {
349
+ "name": "Story",
350
+ "kind": "OBJECT",
351
+ "description": None,
352
+ "fields": [
353
+ {
354
+ "name": "id",
355
+ "type": {
356
+ "kind": "NON_NULL",
357
+ "name": None,
358
+ "ofType": {"kind": "SCALAR", "name": "ID", "ofType": None},
359
+ },
360
+ "description": None,
361
+ },
362
+ {
363
+ "name": "title",
364
+ "type": {
365
+ "kind": "NON_NULL",
366
+ "name": None,
367
+ "ofType": {"kind": "SCALAR", "name": "String", "ofType": None},
368
+ },
369
+ "description": None,
370
+ },
371
+ {
372
+ "name": "photographerId",
373
+ "type": {
374
+ "kind": "NON_NULL",
375
+ "name": None,
376
+ "ofType": {"kind": "SCALAR", "name": "ID", "ofType": None},
377
+ },
378
+ "description": None,
379
+ },
380
+ {
381
+ "name": "photographer",
382
+ "type": {"kind": "OBJECT", "name": "Reporter", "ofType": None},
383
+ "description": None,
384
+ "args": [],
385
+ },
386
+ {
387
+ "name": "createdAt",
388
+ "type": {
389
+ "kind": "NON_NULL",
390
+ "name": None,
391
+ "ofType": {"kind": "SCALAR", "name": "AWSDateTime", "ofType": None},
392
+ },
393
+ "description": None,
394
+ },
395
+ {
396
+ "name": "updatedAt",
397
+ "type": {
398
+ "kind": "NON_NULL",
399
+ "name": None,
400
+ "ofType": {"kind": "SCALAR", "name": "AWSDateTime", "ofType": None},
401
+ },
402
+ "description": None,
403
+ },
404
+ ],
405
+ }
406
+ }
407
+ }
408
+
409
+ result = parser.parse_model_structure(introspection_result)
410
+
411
+ # Verify model info
412
+ assert result["name"] == "Story"
413
+ assert result["kind"] == "OBJECT"
414
+
415
+ # Get field names
416
+ field_names = [f["name"] for f in result["fields"]]
417
+
418
+ # Should include regular fields and foreign key, but not relationship object fields
419
+ assert "title" in field_names
420
+ assert "photographerId" in field_names
421
+ assert "photographer" not in field_names # Relationship OBJECT fields are filtered
422
+
423
+ # Should NOT include metadata fields
424
+ assert "id" not in field_names
425
+ assert "createdAt" not in field_names
426
+ assert "updatedAt" not in field_names
427
+
428
+ # Verify photographerId has related_model
429
+ photographer_id_field = next(f for f in result["fields"] if f["name"] == "photographerId")
430
+ assert photographer_id_field["is_id"] is True
431
+ assert photographer_id_field["is_required"] is True
432
+ assert photographer_id_field["related_model"] == "Reporter"
433
+ assert photographer_id_field["type"] == "ID"
434
+
435
+ def test_model_without_relationships(self):
436
+ """Test model without any belongsTo relationships"""
437
+ parser = FieldParser()
438
+
439
+ introspection_result = {
440
+ "name": "User",
441
+ "kind": "OBJECT",
442
+ "description": None,
443
+ "fields": [
444
+ {
445
+ "name": "id",
446
+ "type": {"kind": "NON_NULL", "name": None, "ofType": {"kind": "SCALAR", "name": "ID"}},
447
+ "description": None,
448
+ },
449
+ {
450
+ "name": "name",
451
+ "type": {"kind": "NON_NULL", "name": None, "ofType": {"kind": "SCALAR", "name": "String"}},
452
+ "description": None,
453
+ },
454
+ {"name": "email", "type": {"kind": "SCALAR", "name": "String", "ofType": None}, "description": None},
455
+ ],
456
+ }
457
+
458
+ result = parser.parse_model_structure(introspection_result)
459
+
460
+ field_names = [f["name"] for f in result["fields"]]
461
+
462
+ # Should have regular fields (no id as it's metadata)
463
+ assert "name" in field_names
464
+ assert "email" in field_names
465
+ assert "id" not in field_names
466
+
467
+ # No fields should have related_model
468
+ for field in result["fields"]:
469
+ assert "related_model" not in field
470
+
471
+
472
+ class TestParseScalarArray:
473
+ """Test parse_scalar_array method for handling array fields"""
474
+
475
+ def test_parses_json_array_format(self):
476
+ """Test parsing JSON array format"""
477
+ parser = FieldParser()
478
+ field = {"name": "urls", "type": "AWSURL", "is_list": True, "is_scalar": True}
479
+
480
+ result = parser.parse_scalar_array(
481
+ field, "urls", '["https://url1.com", "https://url2.com", "https://url3.com"]'
482
+ )
483
+
484
+ assert result == ["https://url1.com", "https://url2.com", "https://url3.com"]
485
+
486
+ def test_parses_semicolon_separated(self):
487
+ """Test parsing semicolon-separated values"""
488
+ parser = FieldParser()
489
+ field = {"name": "urls", "type": "AWSURL", "is_list": True, "is_scalar": True}
490
+
491
+ result = parser.parse_scalar_array(field, "urls", "https://url1.com; https://url2.com; https://url3.com")
492
+
493
+ assert result == ["https://url1.com", "https://url2.com", "https://url3.com"]
494
+
495
+ def test_parses_comma_separated(self):
496
+ """Test parsing comma-separated values"""
497
+ parser = FieldParser()
498
+ field = {"name": "tags", "type": "String", "is_list": True, "is_scalar": True}
499
+
500
+ result = parser.parse_scalar_array(field, "tags", "tag1, tag2, tag3")
501
+
502
+ assert result == ["tag1", "tag2", "tag3"]
503
+
504
+ def test_parses_space_separated(self):
505
+ """Test parsing space-separated values"""
506
+ parser = FieldParser()
507
+ field = {"name": "tags", "type": "String", "is_list": True, "is_scalar": True}
508
+
509
+ result = parser.parse_scalar_array(field, "tags", "tag1 tag2 tag3")
510
+
511
+ assert result == ["tag1", "tag2", "tag3"]
512
+
513
+ def test_parses_int_array(self):
514
+ """Test parsing integer array"""
515
+ parser = FieldParser()
516
+ field = {"name": "numbers", "type": "Int", "is_list": True, "is_scalar": True}
517
+
518
+ result = parser.parse_scalar_array(field, "numbers", "1, 2, 3, 4, 5")
519
+
520
+ assert result == [1, 2, 3, 4, 5]
521
+
522
+ def test_parses_float_array(self):
523
+ """Test parsing float array"""
524
+ parser = FieldParser()
525
+ field = {"name": "scores", "type": "Float", "is_list": True, "is_scalar": True}
526
+
527
+ result = parser.parse_scalar_array(field, "scores", "1.5, 2.7, 3.9")
528
+
529
+ assert result == [1.5, 2.7, 3.9]
530
+
531
+ def test_parses_boolean_array(self):
532
+ """Test parsing boolean array"""
533
+ parser = FieldParser()
534
+ field = {"name": "flags", "type": "Boolean", "is_list": True, "is_scalar": True}
535
+
536
+ result = parser.parse_scalar_array(field, "flags", "true, false, yes, no")
537
+
538
+ assert result == [True, False, True, False]
539
+
540
+ def test_returns_none_for_empty_value(self):
541
+ """Test that empty values return None"""
542
+ parser = FieldParser()
543
+ field = {"name": "tags", "type": "String", "is_list": True, "is_scalar": True}
544
+
545
+ assert parser.parse_scalar_array(field, "tags", "") is None
546
+ assert parser.parse_scalar_array(field, "tags", None) is None
547
+ import pandas as pd
548
+
549
+ assert parser.parse_scalar_array(field, "tags", pd.NA) is None
550
+
551
+ def test_handles_single_value_as_array(self):
552
+ """Test that single value is returned as single-item array"""
553
+ parser = FieldParser()
554
+ field = {"name": "tags", "type": "String", "is_list": True, "is_scalar": True}
555
+
556
+ result = parser.parse_scalar_array(field, "tags", "single-value")
557
+
558
+ assert result == ["single-value"]
559
+
560
+ def test_skips_empty_elements(self):
561
+ """Test that empty elements are skipped"""
562
+ parser = FieldParser()
563
+ field = {"name": "tags", "type": "String", "is_list": True, "is_scalar": True}
564
+
565
+ result = parser.parse_scalar_array(field, "tags", "tag1, , tag3, , tag5")
566
+
567
+ assert result == ["tag1", "tag3", "tag5"]
568
+
569
+ def test_cleans_whitespace_from_elements(self):
570
+ """Test that whitespace is cleaned from elements"""
571
+ parser = FieldParser()
572
+ field = {"name": "tags", "type": "String", "is_list": True, "is_scalar": True}
573
+
574
+ result = parser.parse_scalar_array(field, "tags", " tag1 , tag2 , tag3 ")
575
+
576
+ assert result == ["tag1", "tag2", "tag3"]
577
+
578
+ def test_handles_mixed_separators_prioritizes_semicolon(self):
579
+ """Test that semicolon takes priority over comma and space"""
580
+ parser = FieldParser()
581
+ field = {"name": "values", "type": "String", "is_list": True, "is_scalar": True}
582
+
583
+ # When semicolon is present, use it (even if commas exist)
584
+ result = parser.parse_scalar_array(field, "values", "val1, val2; val3, val4")
585
+
586
+ assert result == ["val1, val2", "val3, val4"]
587
+
588
+ def test_handles_json_array_with_numbers(self):
589
+ """Test JSON array with numbers"""
590
+ parser = FieldParser()
591
+ field = {"name": "numbers", "type": "Int", "is_list": True, "is_scalar": True}
592
+
593
+ result = parser.parse_scalar_array(field, "numbers", "[1, 2, 3, 4, 5]")
594
+
595
+ assert result == [1, 2, 3, 4, 5]
596
+
597
+ def test_handles_invalid_type_conversion_gracefully(self):
598
+ """Test that invalid type conversions are skipped with warning"""
599
+ parser = FieldParser()
600
+ field = {"name": "numbers", "type": "Int", "is_list": True, "is_scalar": True}
601
+
602
+ # Mix of valid and invalid integers
603
+ result = parser.parse_scalar_array(field, "numbers", "1, abc, 3, xyz, 5")
604
+
605
+ # Only valid numbers should be included
606
+ assert result == [1, 3, 5]
607
+
608
+ def test_handles_enum_array(self):
609
+ """Test parsing enum array"""
610
+ parser = FieldParser()
611
+ field = {"name": "statuses", "type": "Status", "is_list": True, "is_scalar": True, "is_enum": True}
612
+
613
+ result = parser.parse_scalar_array(field, "statuses", "active, pending, completed")
614
+
615
+ assert result == ["ACTIVE", "PENDING", "COMPLETED"]