adss 1.23__py3-none-any.whl → 1.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
adss/endpoints/images.py CHANGED
@@ -200,17 +200,12 @@ class ImagesEndpoint:
200
200
  raise ResourceNotFoundError(f"Failed to perform cone search: {e}")
201
201
 
202
202
  def download_file(self, file_id: int, output_path: Optional[str] = None, **kwargs) -> Union[bytes, str]:
203
- url = f"{self.base_url}/adss/v1/images/files/{file_id}/download"
204
- try:
205
- headers = self.auth_manager._get_auth_headers()
206
- except:
207
- headers = {"Accept": "application/octet-stream"}
203
+ url = f"{self.base_url}/adss/v1/images/files/{file_id}/download?token={self.auth_manager.token}"
208
204
 
209
205
  try:
210
206
  resp = self.auth_manager.request(
211
207
  method="GET",
212
208
  url=url,
213
- headers=headers,
214
209
  stream=True,
215
210
  auth_required=False,
216
211
  **kwargs
adss/endpoints/queries.py CHANGED
@@ -107,7 +107,7 @@ class QueriesEndpoint:
107
107
  query_obj = Query(
108
108
  id="sync_query", # Synchronous queries don't have an ID
109
109
  query_text=query,
110
- status="COMPLETED",
110
+ status="completed",
111
111
  created_at=pd.Timestamp.now(),
112
112
  mode=mode,
113
113
  completed_at=pd.Timestamp.now(),
adss/models/query.py CHANGED
@@ -69,27 +69,27 @@ class Query:
69
69
  @property
70
70
  def is_complete(self) -> bool:
71
71
  """Check if the query has completed (successfully or with error)."""
72
- return self.status in ['COMPLETED', 'ERROR']
72
+ return self.status in ['completed', 'failed']
73
73
 
74
74
  @property
75
75
  def is_running(self) -> bool:
76
76
  """Check if the query is currently running."""
77
- return self.status == 'RUNNING'
77
+ return self.status == 'running'
78
78
 
79
79
  @property
80
80
  def is_queued(self) -> bool:
81
81
  """Check if the query is queued."""
82
- return self.status == 'QUEUED'
82
+ return self.status == 'queued'
83
83
 
84
84
  @property
85
85
  def is_successful(self) -> bool:
86
86
  """Check if the query completed successfully."""
87
- return self.status == 'COMPLETED'
87
+ return self.status == 'completed'
88
88
 
89
89
  @property
90
90
  def is_failed(self) -> bool:
91
91
  """Check if the query failed."""
92
- return self.status == 'ERROR'
92
+ return self.status == 'failed'
93
93
 
94
94
 
95
95
  @dataclass
@@ -0,0 +1,58 @@
1
+ Metadata-Version: 2.4
2
+ Name: adss
3
+ Version: 1.25
4
+ Summary: Astronomical Data Smart System
5
+ Author-email: Gustavo Schwarz <gustavo.b.schwarz@gmail.com>
6
+ Project-URL: Homepage, https://github.com/schwarzam/adss
7
+ Classifier: Programming Language :: Python :: 3
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE
11
+ Requires-Dist: requests
12
+ Requires-Dist: astropy
13
+ Dynamic: license-file
14
+
15
+ # ADSS
16
+ Astronomical Data Smart System
17
+
18
+ ADSS is a database/server project hosted at CBPF (Brazilian Center for Research in Physics) that provides access to astronomical data from different surveys.
19
+
20
+ This repository provides a set of tools for querying astronomical ADSS services using ADQL. You can perform cone searches, cross-match queries between tables, and even cross-match against user-supplied data. The library supports both synchronous and asynchronous query execution.
21
+
22
+ ## Instalation
23
+
24
+ ```bash
25
+ pip install adss
26
+ ```
27
+
28
+ or
29
+
30
+ ```bash
31
+ git clone https://github.com/schwarzam/adss.git
32
+ cd adss
33
+ pip install .
34
+ ```
35
+
36
+ ## Tutorials
37
+
38
+ We provide a set of tutorials to help you get started with the library:
39
+
40
+ Perform a simple query to retrieve the available tables from the service, print the columns of a table, set the columns and constraints to perform a query and retrieve the data.
41
+ - [Basic API](docs/basic_api.md)
42
+
43
+ Learn the difference between sync and async queries and when to use each one.
44
+ - [Methods of query](docs/sync_async.md)
45
+
46
+ Perform a raw query to the service.
47
+ - [Raw Query](docs/perform_raw_queries.md)
48
+
49
+ Perform a match between two database tables and a match between a database table and a user input table.
50
+ - [Match API](docs/match_api.md)
51
+
52
+ Perform a match between a database table and a user input table.
53
+ - [User Table Input Match](docs/usertable_input_match.md)
54
+
55
+
56
+ ## Contributing
57
+
58
+ We welcome contributions to this project.
@@ -5,16 +5,17 @@ adss/exceptions.py,sha256=YeN-xRHvlSmwyS8ni2jOEhhgZK9J1jsG11pOedy3Gfg,1482
5
5
  adss/utils.py,sha256=KeQUtTCcye3W07oHpBnwS7g3gG-RqwWMlaE7UgDWwsU,3557
6
6
  adss/endpoints/__init__.py,sha256=Pr29901fT8ClCS2GasTjTiBNyn7DfVfxILpYDFsMvPA,488
7
7
  adss/endpoints/admin.py,sha256=S6ZrkeA_Lh_LCpF1NHyfMKqjbIiylYXUSV65H_WKg1U,16391
8
- adss/endpoints/images.py,sha256=ItAiBss_jQvWQWRUvy0c9Cjn1r9lDR8eOPauqOcPcZ8,35777
8
+ adss/endpoints/images.py,sha256=K6ARdsyle9Y77qCs2_IfRWW17zZegYPcwY70RovZoPM,35626
9
9
  adss/endpoints/metadata.py,sha256=RPrRP6Uz6-uPMIcntMgfss9vAd5iN7JXjZbF8SW0EYg,8238
10
- adss/endpoints/queries.py,sha256=5BONw_IcGORMPNe-5J6BpoFY6z7lKcktEVhqZ9j17_8,17286
10
+ adss/endpoints/queries.py,sha256=HBhECuS20Vf2kQnyzeZP2JaS_ppkywcFHMYs1KGprSY,17286
11
11
  adss/endpoints/users.py,sha256=6Abkl3c3_YKdMYR_JWI-uL9HTHxcjlIOnE29GyN5_QE,10811
12
12
  adss/models/__init__.py,sha256=ADWVaGy4dkpEMH3iS_6EnRSBlEgoM5Vy9zORQr-UG6w,404
13
13
  adss/models/metadata.py,sha256=6fdH_0BenVRmeXkkKbsG2B68O-N2FXTTRgxsEhAHRoU,4058
14
- adss/models/query.py,sha256=Af-iojZb-nO6qj-yMT_PlNM7Hip6EwBfNeaQPMJPNM0,4293
14
+ adss/models/query.py,sha256=vVM6h2rbbNIhXh7InuD2d9JSIArkFiReGfIkL5XwMvs,4295
15
15
  adss/models/user.py,sha256=5qVT5qOktokmVLkGszPGCTZWv0wC-7aBMvJ8EeBOqdw,3493
16
- adss-1.23.dist-info/LICENSE,sha256=1aYqcyqjrdNXY9hqgZkCWprcoA112oKvdrfPyvMYPTc,1468
17
- adss-1.23.dist-info/METADATA,sha256=jkFEx5_HoE0kyop0UCpMGZTJDYnBjsLgAzSz9e8MBG8,379
18
- adss-1.23.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
19
- adss-1.23.dist-info/top_level.txt,sha256=ebD44L3R0PEvEFoRCJ-RjTIsQ9Yjpo2aAYC1BMtueLg,5
20
- adss-1.23.dist-info/RECORD,,
16
+ adss-1.25.dist-info/licenses/LICENSE,sha256=yPw116pnd1J4TuMPnvm6I_irZUyC30EoBZ4BtWFAL7I,1557
17
+ dev/fetch_idr6.py,sha256=b6FrHPr-ZLaDup_wLOaQWP2fK254Sr3YNHbTxuUt088,12788
18
+ adss-1.25.dist-info/METADATA,sha256=i3blf4fGy-4Bi_Ty22H20doLLvvgZnaCII1TL4A-c7s,1843
19
+ adss-1.25.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
+ adss-1.25.dist-info/top_level.txt,sha256=NT2zObOOiTWXc0yowpEjT6BiiI1e7WXlXd0ZoK7T5hk,9
21
+ adss-1.25.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (71.1.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -0,0 +1,29 @@
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2025, the respective contributors, as shown by the AUTHORS file.
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ * Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ * Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ * Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
dev/fetch_idr6.py ADDED
@@ -0,0 +1,298 @@
1
+ import adss
2
+ import os
3
+ import pandas as pd
4
+ import argparse
5
+ import time
6
+ import threading
7
+ from queue import Queue
8
+
9
+ from tqdm import tqdm
10
+ from concurrent.futures import ThreadPoolExecutor, as_completed
11
+
12
+ # Pretty prints
13
+ from datetime import datetime
14
+
15
+ # Thread-safe printing
16
+ print_lock = threading.Lock()
17
+
18
+ def print_log(message, level="INFO"):
19
+ if level == "ERROR":
20
+ color = "\033[91m" # Red color for error messages
21
+ elif level == "WARNING":
22
+ color = "\033[93m" # Yellow color for warning messages
23
+ elif level == "SUCCESS":
24
+ color = "\033[92m" # Green color for success messages
25
+ else:
26
+ color = "\033[94m" # Blue color for info messages
27
+
28
+ white_color = "\033[97m" # White color for the timestamp
29
+ reset_color = "\033[0m" # Reset color
30
+
31
+ with print_lock:
32
+ print(f"{color}[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} - {level}] {white_color}{message}{reset_color}")
33
+
34
+ def print_field_status(field, status, details="", thread_id=None):
35
+ if status == "STARTING":
36
+ color = "\033[96m" # Cyan
37
+ elif status == "QUERYING":
38
+ color = "\033[93m" # Yellow
39
+ elif status == "SAVING":
40
+ color = "\033[95m" # Magenta
41
+ elif status == "COMPLETED":
42
+ color = "\033[92m" # Green
43
+ elif status == "SKIPPED":
44
+ color = "\033[90m" # Gray
45
+ elif status == "ERROR":
46
+ color = "\033[91m" # Red
47
+ else:
48
+ color = "\033[97m" # White
49
+
50
+ reset_color = "\033[0m"
51
+ thread_info = f" [T{thread_id}]" if thread_id else ""
52
+
53
+ with print_lock:
54
+ print(f"{color}[{datetime.now().strftime('%H:%M:%S')}]{thread_info} {field}: {status}{reset_color} {details}")
55
+
56
+ print_log("Test")
57
+
58
+ # Fetch code
59
+ def main(multithreaded=False, fields_out_format='csv', max_workers=7):
60
+ class Args:
61
+ def __init__(self):
62
+ self.username = 'splusdatateam'
63
+ self.password = 'asdflkjh'
64
+ self.outfolder = 'VAC_Catalogues/'
65
+
66
+ args = Args()
67
+
68
+ # Initialize client
69
+ print_log("Initializing ADSS client...")
70
+ client = adss.ADSSClient(
71
+ base_url="https://andromeda.cbpf.br/",
72
+ username=args.username,
73
+ password=args.password,
74
+ verify_ssl=False
75
+ )
76
+
77
+ # Get database metadata
78
+ print_log("Fetching database metadata...")
79
+ metadata = client.get_database_metadata()
80
+ print_log(f"Available schemas: {metadata.schema_names()}")
81
+
82
+ schema = metadata.get_schema("splus")
83
+ print_log(f"Available tables: {schema.table_names()}")
84
+
85
+ table = schema.get_table("splus_idr6")
86
+ print_log(f"Table columns: {len(table.column_names())} columns")
87
+
88
+ # Get or load fields
89
+ print_log("Loading or querying fields...")
90
+ fields_file = os.path.join(args.outfolder, f"fields.csv")
91
+ if not os.path.exists(fields_file):
92
+ print_log("Querying distinct fields...")
93
+ result = client.query_and_wait("SELECT DISTINCT field FROM splus.splus_idr6")
94
+ print_log(f"Query result: {result}")
95
+
96
+ os.makedirs(args.outfolder, exist_ok=True)
97
+ result.to_csv(fields_file, index=False)
98
+ fields = result.data["field"].tolist()
99
+ else:
100
+ fields = pd.read_csv(fields_file)["field"].tolist()
101
+ print_log(f"Fields loaded from file: {len(fields)} fields")
102
+
103
+ # Process each field
104
+ print_log(f"Processing {len(fields)} fields...")
105
+ splus_filters = ['u', 'j0378', 'j0395', 'j0410', 'j0430', 'g', 'j0515', 'r', 'j0660', 'i', 'j0861', 'z']
106
+ apertures = ['aper_3', 'aper_6', 'auto', 'auto_restricted', 'isophotal', 'petro', 'psf', 'pstotal']
107
+
108
+ magnitudes = {}
109
+ magnitude_errors = {}
110
+ for aperture in apertures:
111
+ magnitudes[aperture] = [f'mag_{aperture}_{f}' for f in splus_filters]
112
+ if aperture != 'auto_restricted':
113
+ magnitude_errors[aperture] = [f'err_mag_{aperture}_{f}' for f in splus_filters]
114
+
115
+ columns_to_get = ['field', 'id', 'ra', 'dec', 'class_star_det', 'class_star_r',
116
+ 'flux_radius_as_20_det', 'flux_radius_as_50_det', 'flux_radius_as_70_det', 'flux_radius_as_90_det',
117
+ 'a_pixel_det', 'err_a_pixel_det', 'a_restricted_pixel_r', 'b_pixel_det', 'err_b_pixel_det', 'b_restricted_pixel_r',
118
+ 'ellipticity_det', 'elongation_det',
119
+ 'flags_det', 'flags_r',
120
+ 'fwhm_n_det', 'fwhm_pixels_det',
121
+ 'isophotal_area_pixel_det', 'kron_radius_det', 'kron_radius_restricted_r', 'petro_radius_det',
122
+ 'mu_background_r', 'mu_max_g', 'mu_max_r', 'mu_threshold_g', 'mu_threshold_r',
123
+ 's2n_aper_3_det', 's2n_aper_6_det', 's2n_auto_det', 's2n_iso_det', 's2n_petro_det', 's2n_psf_r', 's2n_pstotal_det']
124
+ columns_to_get = (columns_to_get + magnitudes['aper_3'] + magnitudes['aper_6'] + magnitudes['auto'] + magnitudes['auto_restricted'] +
125
+ magnitudes['isophotal'] + magnitudes['petro'] + magnitudes['psf'] + magnitudes['pstotal'])
126
+ columns_to_get = (columns_to_get + magnitude_errors['aper_3'] + magnitude_errors['aper_6'] + magnitude_errors['auto'] +
127
+ magnitude_errors['isophotal'] + magnitude_errors['petro'] + magnitude_errors['psf'] + magnitude_errors['pstotal'])
128
+
129
+ print_log(f"Columns to get: {len(columns_to_get)} columns")
130
+ print_log(f"Files will be saved in: {args.outfolder}")
131
+ print_log(f"Using {max_workers} parallel workers")
132
+
133
+ if not multithreaded:
134
+ os.makedirs(args.outfolder, exist_ok=True)
135
+ pbar = tqdm(enumerate(fields, 1), total=len(fields))
136
+ for i, field in pbar:
137
+ field_file = os.path.join(args.outfolder, f"{field}.{fields_out_format}")
138
+ if os.path.exists(field_file):
139
+ print(f"[{i}/{len(fields)}] Skipping {field} (already exists)")
140
+ continue
141
+
142
+ pbar.set_description_str(f"Processing field: {field}")
143
+ try:
144
+ result = client.query_and_wait(
145
+ f"""SELECT {', '.join(columns_to_get)}
146
+ FROM splus.splus_idr6
147
+ WHERE field = '{field}'"""
148
+ )
149
+
150
+ if result.data.empty:
151
+ print(f" No data found for field: {field}")
152
+ else:
153
+ if fields_out_format == 'parquet':
154
+ result.to_parquet(field_file, index=False)
155
+ if fields_out_format == 'csv':
156
+ result.to_csv(field_file, index=False)
157
+ except Exception as e:
158
+ print(f" Error processing field {field}: {e}")
159
+
160
+ else:
161
+ print_log("Starting multithreaded processing with detailed progress tracking...")
162
+ print()
163
+
164
+ os.makedirs(args.outfolder, exist_ok=True)
165
+
166
+ # Statistics tracking
167
+ completed_count = 0
168
+ skipped_count = 0
169
+ error_count = 0
170
+ stats_lock = threading.Lock()
171
+
172
+ def update_stats(status):
173
+ nonlocal completed_count, skipped_count, error_count
174
+ with stats_lock:
175
+ if status == "COMPLETED":
176
+ completed_count += 1
177
+ elif status == "SKIPPED":
178
+ skipped_count += 1
179
+ elif status == "ERROR":
180
+ error_count += 1
181
+
182
+ def process_field(field, field_index, total_fields):
183
+ thread_id = threading.current_thread().ident % 1000 # Short thread ID
184
+
185
+ field_file = os.path.join(args.outfolder, f"{field}.{fields_out_format}")
186
+
187
+ # Check if file already exists
188
+ if os.path.exists(field_file):
189
+ print_field_status(field, "SKIPPED", "file already exists", thread_id)
190
+ update_stats("SKIPPED")
191
+ return {"field": field, "status": "skipped", "thread_id": thread_id}
192
+
193
+ try:
194
+ print_field_status(field, "STARTING", f"[{field_index}/{total_fields}]", thread_id)
195
+
196
+ # Query phase
197
+ print_field_status(field, "QUERYING", "executing database query...", thread_id)
198
+ start_time = time.time()
199
+
200
+ result = client.query_and_wait(
201
+ f"""SELECT {', '.join(columns_to_get)}
202
+ FROM splus.splus_idr6
203
+ WHERE field = '{field}'"""
204
+ )
205
+ print_field_status(field, "FINISHED", "executing database query...", thread_id)
206
+
207
+ query_time = time.time() - start_time
208
+
209
+ if result.data.empty:
210
+ print_field_status(field, "COMPLETED", f"no data found (query: {query_time:.1f}s)", thread_id)
211
+ update_stats("COMPLETED")
212
+ return {"field": field, "status": "no_data", "query_time": query_time, "thread_id": thread_id}
213
+
214
+ # Save phase
215
+ row_count = len(result.data)
216
+ print_field_status(field, "SAVING", f"{row_count:,} rows (query: {query_time:.1f}s)", thread_id)
217
+
218
+ save_start = time.time()
219
+ if fields_out_format == 'parquet':
220
+ result.to_parquet(field_file, index=False)
221
+ elif fields_out_format == 'csv':
222
+ result.to_csv(field_file, index=False)
223
+
224
+ save_time = time.time() - save_start
225
+ total_time = time.time() - start_time
226
+
227
+ print_field_status(field, "COMPLETED",
228
+ f"{row_count:,} rows saved (query: {query_time:.1f}s, save: {save_time:.1f}s, total: {total_time:.1f}s)",
229
+ thread_id)
230
+
231
+ update_stats("COMPLETED")
232
+ return {
233
+ "field": field,
234
+ "status": "success",
235
+ "rows": row_count,
236
+ "query_time": query_time,
237
+ "save_time": save_time,
238
+ "total_time": total_time,
239
+ "thread_id": thread_id
240
+ }
241
+
242
+ except Exception as e:
243
+ print_field_status(field, "ERROR", f"{str(e)}", thread_id)
244
+ update_stats("ERROR")
245
+ return {"field": field, "status": "error", "error": str(e), "thread_id": thread_id}
246
+
247
+ # Submit all jobs
248
+ results = []
249
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
250
+ # Submit all futures
251
+ future_to_field = {}
252
+ for i, field in enumerate(fields, 1):
253
+ future = executor.submit(process_field, field, i, len(fields))
254
+ future_to_field[future] = field
255
+
256
+ # Process completed futures with progress bar
257
+ with tqdm(total=len(fields), desc="Overall Progress",
258
+ bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]") as pbar:
259
+
260
+ for future in as_completed(future_to_field):
261
+ try:
262
+ result = future.result()
263
+ results.append(result)
264
+
265
+ # Update progress bar with current stats
266
+ pbar.set_postfix({
267
+ 'completed': completed_count,
268
+ 'skipped': skipped_count,
269
+ 'errors': error_count
270
+ })
271
+ pbar.update(1)
272
+
273
+ except Exception as e:
274
+ field = future_to_field[future]
275
+ print_field_status(field, "ERROR", f"Future exception: {str(e)}")
276
+ results.append({"field": field, "status": "future_error", "error": str(e)})
277
+ pbar.update(1)
278
+
279
+ # Summary
280
+ print()
281
+ print_log("=" * 60, "INFO")
282
+ print_log("PROCESSING SUMMARY", "INFO")
283
+ print_log("=" * 60, "INFO")
284
+ print_log(f"Total fields: {len(fields)}")
285
+ print_log(f"Completed: {completed_count}", "SUCCESS")
286
+ print_log(f"Skipped: {skipped_count}", "WARNING")
287
+ print_log(f"Errors: {error_count}", "ERROR")
288
+
289
+ # Show errors if any
290
+ if error_count > 0:
291
+ print_log("Fields with errors:", "ERROR")
292
+ for result in results:
293
+ if result.get("status") in ["error", "future_error"]:
294
+ print_log(f" {result['field']}: {result.get('error', 'Unknown error')}", "ERROR")
295
+
296
+ # Run code
297
+ if __name__ == "__main__":
298
+ main(multithreaded=True, fields_out_format='parquet', max_workers=7)
@@ -1,11 +0,0 @@
1
- Copyright <YEAR> <COPYRIGHT HOLDER>
2
-
3
- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
4
-
5
- 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
6
-
7
- 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
8
-
9
- 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
10
-
11
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -1,13 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: adss
3
- Version: 1.23
4
- Summary: Astronomical Data Smart System
5
- Home-page: https://github.com/schwarzam/adss
6
- Author: Gustavo Schwarz
7
- Author-email: gustavo.b.schwarz@gmail.com
8
- Classifier: Programming Language :: Python :: 3
9
- Classifier: License :: OSI Approved :: Apache Software License
10
- License-File: LICENSE
11
- Requires-Dist: requests
12
- Requires-Dist: astropy
13
-