ogc-na 0.1.3__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ogc-na might be problematic. Click here for more details.

Files changed (47) hide show
  1. {ogc_na-0.1.3 → ogc_na-0.1.4}/PKG-INFO +1 -1
  2. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/ingest_json.py +16 -7
  3. ogc_na-0.1.4/ogc/na/input_filters/__init__.py +25 -0
  4. ogc_na-0.1.4/ogc/na/input_filters/csv.py +71 -0
  5. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/util.py +13 -3
  6. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc_na.egg-info/PKG-INFO +1 -1
  7. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc_na.egg-info/SOURCES.txt +5 -0
  8. ogc_na-0.1.4/test/data/headers.csv +89 -0
  9. ogc_na-0.1.4/test/data/no-headers.csv +6 -0
  10. ogc_na-0.1.4/test/test_input_filters_csv.py +86 -0
  11. {ogc_na-0.1.3 → ogc_na-0.1.4}/.github/workflows/python-publish.yml +0 -0
  12. {ogc_na-0.1.3 → ogc_na-0.1.4}/.gitignore +0 -0
  13. {ogc_na-0.1.3 → ogc_na-0.1.4}/MANIFEST.in +0 -0
  14. {ogc_na-0.1.3 → ogc_na-0.1.4}/README.md +0 -0
  15. {ogc_na-0.1.3 → ogc_na-0.1.4}/docs/examples.md +0 -0
  16. {ogc_na-0.1.3 → ogc_na-0.1.4}/docs/gen_ref_pages.py +0 -0
  17. {ogc_na-0.1.3 → ogc_na-0.1.4}/docs/index.md +0 -0
  18. {ogc_na-0.1.3 → ogc_na-0.1.4}/docs/tutorials.md +0 -0
  19. {ogc_na-0.1.3 → ogc_na-0.1.4}/mkdocs.yml +0 -0
  20. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/__init__.py +0 -0
  21. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/annotate_schema.py +0 -0
  22. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/domain_config.py +0 -0
  23. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/download.py +0 -0
  24. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/profile.py +0 -0
  25. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/provenance.py +0 -0
  26. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/update_vocabs.py +0 -0
  27. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc/na/validation.py +0 -0
  28. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc_na.egg-info/dependency_links.txt +0 -0
  29. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc_na.egg-info/requires.txt +0 -0
  30. {ogc_na-0.1.3 → ogc_na-0.1.4}/ogc_na.egg-info/top_level.txt +0 -0
  31. {ogc_na-0.1.3 → ogc_na-0.1.4}/pyproject.toml +0 -0
  32. {ogc_na-0.1.3 → ogc_na-0.1.4}/rdf/catalog-v001.xml +0 -0
  33. {ogc_na-0.1.3 → ogc_na-0.1.4}/rdf/domaincfg.vocab.ttl +0 -0
  34. {ogc_na-0.1.3 → ogc_na-0.1.4}/requirements.txt +0 -0
  35. {ogc_na-0.1.3 → ogc_na-0.1.4}/setup.cfg +0 -0
  36. {ogc_na-0.1.3 → ogc_na-0.1.4}/setup.py +0 -0
  37. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/__init__.py +0 -0
  38. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/data/empty.ttl +0 -0
  39. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/data/profile_tree.ttl +0 -0
  40. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/data/profile_tree_cyclic.ttl +0 -0
  41. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/data/sample-context.jsonld +0 -0
  42. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/data/sample-schema-prop-c.yml +0 -0
  43. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/data/sample-schema.yml +0 -0
  44. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/data/uplift_context_valid.yml +0 -0
  45. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/test_annotate_schema.py +0 -0
  46. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/test_ingest_json.py +0 -0
  47. {ogc_na-0.1.3 → ogc_na-0.1.4}/test/test_profile.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ogc_na
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: OGC Naming Authority tools
5
5
  Author-email: Rob Atkinson <ratkinson@ogc.org>, Piotr Zaborowski <pzaborowski@ogc.org>, Alejandro Villar <avillar@ogc.org>
6
6
  Project-URL: Homepage, https://github.com/opengeospatial/ogc-na-tools/
@@ -50,6 +50,7 @@ from rdflib.namespace import Namespace, DefinedNamespace
50
50
  from ogc.na import util, profile
51
51
  from ogc.na.domain_config import UpliftConfigurationEntry, DomainConfiguration
52
52
  from ogc.na.provenance import ProvenanceMetadata, FileProvenanceMetadata, generate_provenance
53
+ from ogc.na.input_filters import apply_input_filter
53
54
 
54
55
  logger = logging.getLogger(__name__)
55
56
 
@@ -393,7 +394,8 @@ def process_file(input_fn: str | Path,
393
394
  If False, no Turtle output will be generated.
394
395
  :param context_fn: YAML context filename. If None, will be autodetected:
395
396
  1. From a file with the same name but yml/yaml extension (test.json -> test.yml)
396
- 2. From a _json-context.yml/_json-context.yaml file in the same directory
397
+ 2. From the domain_cfg
398
+ 3. From a _json-context.yml/_json-context.yaml file in the same directory
397
399
  :param domain_cfg: domain configuration with uplift definition locations
398
400
  :param base: base URI for JSON-LD
399
401
  :param provenance_base_uri: base URI for provenance resources
@@ -436,8 +438,15 @@ def process_file(input_fn: str | Path,
436
438
  if not contexts:
437
439
  raise MissingContextException('No context file provided and one could not be discovered automatically')
438
440
 
439
- with open(input_fn, 'r') as j:
440
- input_data = json.load(j)
441
+ # Apply input filter of first context only (if any)
442
+ input_filters = contexts[0].get('input-filter')
443
+ if input_filters:
444
+ if not isinstance(input_filters, dict):
445
+ raise ValueError('input-filter must be an object')
446
+ input_data = apply_input_filter(input_fn, input_filters)
447
+ else:
448
+ with open(input_fn, 'r') as j:
449
+ input_data = json.load(j)
441
450
 
442
451
  provenance_metadata: ProvenanceMetadata | None = None
443
452
  if provenance_base_uri is not False:
@@ -635,7 +644,10 @@ def process(input_files: str | Path | Sequence[str | Path],
635
644
  logger.info("Input files: %s", input_files)
636
645
  remaining_fn: deque = deque()
637
646
  for input_file in input_files:
638
- remaining_fn.extend(input_file.split(','))
647
+ if isinstance(input_file, str):
648
+ remaining_fn.extend(input_file.split(','))
649
+ else:
650
+ remaining_fn.append(input_file)
639
651
  while remaining_fn:
640
652
  fn = str(remaining_fn.popleft())
641
653
 
@@ -645,9 +657,6 @@ def process(input_files: str | Path | Sequence[str | Path],
645
657
  remaining_fn.extend(filenames_from_context(fn, domain_config=domain_cfg) or [])
646
658
  continue
647
659
 
648
- if not re.match(r'.*\.json-?(ld)?$', fn):
649
- logger.debug('File %s does not match, skipping', fn)
650
- continue
651
660
  logger.info('File %s matches, processing', fn)
652
661
  try:
653
662
  result.append(process_file(
@@ -0,0 +1,25 @@
1
+ from __future__ import annotations
2
+
3
+ from importlib import import_module
4
+ from io import BytesIO
5
+ from pathlib import Path
6
+ from typing import Any, IO, TextIO
7
+
8
+
9
+ def apply_input_filter(stream: IO | bytes | str | Path, filters: dict[str, dict]) -> dict[str, Any] | list:
10
+ filter_name, filter_conf = filters.popitem()
11
+ try:
12
+ filter_mod = import_module(f"ogc.na.input_filters.{filter_name}")
13
+ except ImportError:
14
+ raise ValueError(f'Cannot find input filter with name "{filter_name}"')
15
+
16
+ content: bytes | None = None
17
+ if isinstance(stream, Path) or isinstance(stream, str):
18
+ with open(stream, 'rb') as f:
19
+ content = f.read()
20
+ elif isinstance(stream, TextIO):
21
+ content = stream.read().encode('utf-8')
22
+ else:
23
+ content = stream.read()
24
+
25
+ return filter_mod.apply_filter(content, filter_conf)
@@ -0,0 +1,71 @@
1
+ """
2
+ CSV Input filter for ingest_json.
3
+
4
+ Returns CSV rows as a list. Values will always be strings (no type inference or coercion is performed).
5
+
6
+ Configuration values:
7
+
8
+ * `rows` (default: `dict`): type of elements in the result list:
9
+ * `dict`: elements will be dictionaries, with the keys taken from the `header-row`.
10
+ * `list`: each resulting row will be an array values.
11
+ * `header-row` (default: `0`): if `rows` is `dict`, the (0-based) index of the header row. All rows before the
12
+ header row will be skipped.
13
+ * `skip-rows` (default: `0`): number of rows to skip at the beginning of the file (apart from the header and pre-header
14
+ ones if `rows` is `dict`).
15
+ * `delimiter` (default: `,`): field separator character
16
+ * `quotechar` (default: `"`): char used to quote (enclose) field values
17
+ * `skip-empty-rows` (default: `True`): whether to omit empty rows (i.e., those with no values) from the result
18
+ * `trim-values` (default: `False`): whether to apply `.strip()` to the resulting values
19
+ """
20
+ from __future__ import annotations
21
+
22
+ import csv
23
+ from io import BytesIO, TextIOWrapper, StringIO
24
+ from typing import IO, Any
25
+
26
+ from ogc.na import util
27
+
28
+ DEFAULT_CONF = {
29
+ 'rows': 'dict',
30
+ 'header-row': 0,
31
+ 'skip-rows': 0,
32
+ 'delimiter': ',',
33
+ 'quotechar': '"',
34
+ 'skip-empty-rows': True,
35
+ 'trim-values': False,
36
+ }
37
+
38
+
39
+ def apply_filter(content: bytes, conf: dict[str, Any] | None) -> dict[str, Any] | list:
40
+ conf = util.deep_update(DEFAULT_CONF, conf) if conf else DEFAULT_CONF
41
+
42
+ textio = StringIO(content.decode('utf-8'))
43
+ reader = csv.reader(textio, delimiter=conf['delimiter'], quotechar=conf['quotechar'])
44
+
45
+ headers = None
46
+ if conf['rows'] == 'dict':
47
+ header_row = max(conf['header-row'], 0)
48
+ # Skip to header row
49
+ for i in range(header_row):
50
+ next(reader, None)
51
+ headers = next(reader, [])
52
+ if not headers:
53
+ return []
54
+
55
+ # Skip requested rows
56
+ for i in range(conf['skip-rows']):
57
+ next(reader, None)
58
+
59
+ result = []
60
+ for row in reader:
61
+ if not row and conf['skip-empty-rows']:
62
+ # skip empty rows
63
+ continue
64
+ if conf['trim-values']:
65
+ row = [v.strip() for v in row]
66
+ if conf['rows'] == 'list':
67
+ result.append(row)
68
+ else:
69
+ result.append(dict(zip(headers, row)))
70
+
71
+ return result
@@ -8,7 +8,7 @@ import shlex
8
8
  from glob import glob
9
9
  from pathlib import Path
10
10
  from time import time
11
- from typing import Optional, Union, Any, Hashable
11
+ from typing import Optional, Union, Any, Mapping, Hashable
12
12
 
13
13
  import requests
14
14
  import rfc3987
@@ -112,7 +112,7 @@ def is_url(url: str, http_only: bool = False) -> bool:
112
112
  Checks whether a string is a valid URL.
113
113
 
114
114
  :param url: the input string
115
- :param http_only: whether to only accept HTTP and HTTPS URL's as valid
115
+ :param http_only: whether to only accept HTTP and HTTPS URLs as valid
116
116
  :return: `True` if this is a valid URL, otherwise `False`
117
117
  """
118
118
  if not url:
@@ -161,7 +161,7 @@ def dump_yaml(content: Any, filename: str | Path | None = None,
161
161
 
162
162
  :param content: content to convert to YAML.
163
163
  :param filename: optional filename to dump the content into. If None, string content will be returned.
164
- :param kwargs: other args to pass to yaml.dump
164
+ :param kwargs: other args to pass to `yaml.dump()`
165
165
  """
166
166
  kwargs.setdefault('sort_keys', False)
167
167
  if filename:
@@ -229,3 +229,13 @@ class LRUCache:
229
229
  del self._last_access[key_to_remove]
230
230
  self._cache[key] = value
231
231
  self._last_access[key] = time()
232
+
233
+
234
+ def deep_update(orig_dict: dict, with_dict: dict, replace: bool = False) -> dict:
235
+ dest = orig_dict if replace else {**orig_dict}
236
+ for k, v in with_dict.items():
237
+ if isinstance(v, Mapping):
238
+ dest[k] = deep_update(orig_dict.get(k, {}), with_dict, replace)
239
+ else:
240
+ dest[k] = v
241
+ return dest
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ogc-na
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: OGC Naming Authority tools
5
5
  Author-email: Rob Atkinson <ratkinson@ogc.org>, Piotr Zaborowski <pzaborowski@ogc.org>, Alejandro Villar <avillar@ogc.org>
6
6
  Project-URL: Homepage, https://github.com/opengeospatial/ogc-na-tools/
@@ -20,6 +20,8 @@ ogc/na/provenance.py
20
20
  ogc/na/update_vocabs.py
21
21
  ogc/na/util.py
22
22
  ogc/na/validation.py
23
+ ogc/na/input_filters/__init__.py
24
+ ogc/na/input_filters/csv.py
23
25
  ogc_na.egg-info/PKG-INFO
24
26
  ogc_na.egg-info/SOURCES.txt
25
27
  ogc_na.egg-info/dependency_links.txt
@@ -30,8 +32,11 @@ rdf/domaincfg.vocab.ttl
30
32
  test/__init__.py
31
33
  test/test_annotate_schema.py
32
34
  test/test_ingest_json.py
35
+ test/test_input_filters_csv.py
33
36
  test/test_profile.py
34
37
  test/data/empty.ttl
38
+ test/data/headers.csv
39
+ test/data/no-headers.csv
35
40
  test/data/profile_tree.ttl
36
41
  test/data/profile_tree_cyclic.ttl
37
42
  test/data/sample-context.jsonld
@@ -0,0 +1,89 @@
1
+ "Year","Score","Title"
2
+ 1968,86,"Greetings"
3
+ 1970,17,"Bloody Mama"
4
+ 1970,73,"Hi,Mom!"
5
+ 1971,40,"Born to Win"
6
+ 1973,98,"Mean Streets"
7
+ 1973,88,"Bang the Drum Slowly"
8
+ 1974,97,"The Godfather,Part II"
9
+ 1976,41,"The Last Tycoon"
10
+ 1976,99,"Taxi Driver"
11
+ 1977,47,"1900"
12
+ 1977,67,"New York,New York"
13
+ 1978,93,"The Deer Hunter"
14
+ 1980,97,"Raging Bull"
15
+ 1981,75,"True Confessions"
16
+ 1983,90,"The King of Comedy"
17
+ 1984,89,"Once Upon a Time in America"
18
+ 1984,60,"Falling in Love"
19
+ 1985,98,"Brazil"
20
+ 1986,65,"The Mission"
21
+ 1987,100,"Dear America: Letters Home From Vietnam"
22
+ 1987,80,"The Untouchables"
23
+ 1987,78,"Angel Heart"
24
+ 1988,96,"Midnight Run"
25
+ 1989,64,"Jacknife"
26
+ 1989,47,"We're No Angels"
27
+ 1990,88,"Awakenings"
28
+ 1990,29,"Stanley & Iris"
29
+ 1990,96,"Goodfellas"
30
+ 1991,76,"Cape Fear"
31
+ 1991,69,"Mistress"
32
+ 1991,65,"Guilty by Suspicion"
33
+ 1991,71,"Backdraft"
34
+ 1992,87,"Thunderheart"
35
+ 1992,67,"Night and the City"
36
+ 1993,75,"This Boy's Life"
37
+ 1993,78,"Mad Dog and Glory"
38
+ 1993,96,"A Bronx Tale"
39
+ 1994,39,"Mary Shelley's Frankenstein"
40
+ 1995,80,"Casino"
41
+ 1995,86,"Heat"
42
+ 1996,74,"Sleepers"
43
+ 1996,38,"The Fan"
44
+ 1996,80,"Marvin's Room"
45
+ 1997,85,"Wag the Dog"
46
+ 1997,87,"Jackie Brown"
47
+ 1997,72,"Cop Land"
48
+ 1998,68,"Ronin"
49
+ 1998,38,"Great Expectations"
50
+ 1999,69,"Analyze This"
51
+ 1999,43,"Flawless"
52
+ 2000,43,"The Adventures of Rocky & Bullwinkle"
53
+ 2000,84,"Meet the Parents"
54
+ 2000,41,"Men of Honor"
55
+ 2001,73,"The Score"
56
+ 2001,33,"15 Minutes"
57
+ 2002,48,"City by the Sea"
58
+ 2002,27,"Analyze That"
59
+ 2003,4,"Godsend"
60
+ 2004,35,"Shark Tale"
61
+ 2004,38,"Meet the Fockers"
62
+ 2005,4,"The Bridge of San Luis Rey"
63
+ 2005,46,"Rent"
64
+ 2005,13,"Hide and Seek"
65
+ 2006,54,"The Good Shepherd"
66
+ 2007,21,"Arthur and the Invisibles"
67
+ 2007,76,"Captain Shakespeare"
68
+ 2008,19,"Righteous Kill"
69
+ 2008,51,"What Just Happened?"
70
+ 2009,46,"Everybody's Fine"
71
+ 2010,72,"Machete"
72
+ 2010,10,"Little Fockers"
73
+ 2010,50,"Stone"
74
+ 2011,25,"Killer Elite"
75
+ 2011,7,"New Year's Eve"
76
+ 2011,70,"Limitless"
77
+ 2012,92,"Silver Linings Playbook"
78
+ 2012,51,"Being Flynn"
79
+ 2012,29,"Red Lights"
80
+ 2013,46,"Last Vegas"
81
+ 2013,7,"The Big Wedding"
82
+ 2013,29,"Grudge Match"
83
+ 2013,11,"Killing Season"
84
+ 2014,9,"The Bag Man"
85
+ 2015,60,"Joy"
86
+ 2015,26,"Heist"
87
+ 2015,61,"The Intern"
88
+ 2016,11,"Dirty Grandpa"
89
+
@@ -0,0 +1,6 @@
1
+ John,Doe,120 jefferson st.,Riverside, NJ, 08075
2
+ Jack,McGinnis,220 hobo Av.,Phila, PA,09119
3
+ "John ""Da Man""",Repici,120 Jefferson St.,Riverside, NJ,08075
4
+ Stephen,Tyler,"7452 Terrace ""At the Plaza"" road",SomeTown,SD, 91234
5
+ ,Blankman,,SomeTown, SD, 00298
6
+ "Joan ""the bone"", Anne",Jet,"9th, at Terrace plc",Desert City,CO,00123
@@ -0,0 +1,86 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import unittest
4
+ from io import BytesIO
5
+ from pathlib import Path
6
+
7
+ from ogc.na.input_filters import csv
8
+
9
+ THIS_DIR = Path(__file__).parent
10
+ DATA_DIR = THIS_DIR / 'data'
11
+
12
+ with open(DATA_DIR / 'headers.csv', 'rb') as f:
13
+ WITH_HEADERS = f.read()
14
+ with open(DATA_DIR / 'no-headers.csv', 'rb') as f:
15
+ NO_HEADERS = f.read()
16
+
17
+
18
+ class InputFiltersCSVTest(unittest.TestCase):
19
+
20
+ def test_rows_objects(self):
21
+ cfg = {
22
+ 'rows': 'dict',
23
+ 'skip-rows': 0,
24
+ 'header-row': 0,
25
+ }
26
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
27
+ self.assertEqual(len(rows), 87)
28
+ self.assertEqual(rows[0], {
29
+ 'Year': '1968',
30
+ 'Score': '86',
31
+ 'Title': 'Greetings',
32
+ })
33
+ self.assertEqual(rows[10], {
34
+ 'Year': '1977',
35
+ 'Score': '67',
36
+ 'Title': 'New York,New York',
37
+ })
38
+
39
+ cfg['skip-rows'] = 3
40
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
41
+ self.assertEqual(len(rows), 84)
42
+ self.assertEqual(rows[0], {
43
+ 'Year': '1971',
44
+ 'Score': '40',
45
+ 'Title': 'Born to Win',
46
+ })
47
+ self.assertEqual(rows[7], {
48
+ 'Year': '1977',
49
+ 'Score': '67',
50
+ 'Title': 'New York,New York',
51
+ })
52
+
53
+ cfg['skip-rows'] = 0
54
+ cfg['header-row'] = 2
55
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
56
+ self.assertEqual(len(rows), 85)
57
+ self.assertEqual(rows[0], {
58
+ '1970': '1970',
59
+ '17': '73',
60
+ 'Bloody Mama': 'Hi,Mom!',
61
+ })
62
+
63
+ def test_rows_lists(self):
64
+ cfg = {
65
+ 'rows': 'list',
66
+ 'skip-rows': 0,
67
+ }
68
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
69
+ self.assertEqual(len(rows), 88)
70
+ self.assertEqual(rows[0], ['Year', 'Score', 'Title'])
71
+ self.assertEqual(rows[10], ['1977', '47', '1900'])
72
+ cfg['skip-rows'] = 1
73
+ rows = csv.apply_filter(WITH_HEADERS, cfg)
74
+ self.assertEqual(len(rows), 87)
75
+ self.assertEqual(rows[0], ['1968', '86', "Greetings"])
76
+ self.assertEqual(rows[3], ['1971', '40', "Born to Win"])
77
+ cfg['header-row'] = 2 # should have no effect
78
+ self.assertEqual(rows, csv.apply_filter(WITH_HEADERS, cfg))
79
+
80
+ cfg['skip-rows'] = 0
81
+ rows = csv.apply_filter(NO_HEADERS, cfg)
82
+ self.assertEqual(len(rows), 6)
83
+ self.assertEqual(rows[0], ['John', 'Doe', '120 jefferson st.', 'Riverside', ' NJ', ' 08075'])
84
+ cfg['trim-values'] = True
85
+ rows = csv.apply_filter(NO_HEADERS, cfg)
86
+ self.assertEqual(rows[0], ['John', 'Doe', '120 jefferson st.', 'Riverside', 'NJ', '08075'])
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes