windborne 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
windborne/__init__.py CHANGED
@@ -8,11 +8,11 @@ from .utils import (
8
8
 
9
9
  # Import Data API functions
10
10
  from .data_api import (
11
- get_observations,
12
- get_super_observations,
11
+ get_observations_page,
12
+ observations,
13
13
 
14
- poll_super_observations,
15
- poll_observations,
14
+ get_super_observations_page,
15
+ super_observations,
16
16
 
17
17
  get_flying_missions,
18
18
  get_mission_launch_site,
@@ -44,10 +44,11 @@ __all__ = [
44
44
  "convert_to_netcdf",
45
45
  "sync_to_s3",
46
46
 
47
- "get_observations",
48
- "get_super_observations",
49
- "poll_super_observations",
50
- "poll_observations",
47
+ "get_observations_page",
48
+ "observations",
49
+
50
+ "get_super_observations_page",
51
+ "super_observations",
51
52
 
52
53
  "get_flying_missions",
53
54
  "get_mission_launch_site",
windborne/cli.py CHANGED
@@ -1,10 +1,10 @@
1
1
  import argparse
2
2
 
3
3
  from . import (
4
- poll_super_observations,
5
- poll_observations,
6
- get_observations,
7
- get_super_observations,
4
+ super_observations,
5
+ observations,
6
+ get_observations_page,
7
+ get_super_observations_page,
8
8
  get_flying_missions,
9
9
  get_mission_launch_site,
10
10
  get_predicted_path,
@@ -35,55 +35,57 @@ def main():
35
35
  ####################################################################################################################
36
36
  # DATA API FUNCTIONS
37
37
  ####################################################################################################################
38
- # Poll Super Observations Command
39
- poll_super_parser = subparsers.add_parser('poll-super-observations', help='Poll super observations within a time range')
40
- poll_super_parser.add_argument('start_time', help='Starting time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
41
- poll_super_parser.add_argument('end_time', help='End time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)', nargs='?', default=None)
42
- poll_super_parser.add_argument('-i', '--interval', type=int, default=60, help='Polling interval in seconds')
43
- poll_super_parser.add_argument('-b', '--bucket-hours', type=float, default=6.0, help='Hours per bucket')
44
- poll_super_parser.add_argument('output', help='Save output to a single file (filename.csv, filename.json or filename.little_r) or to multiple files (csv or little_r)')
45
-
46
- # Poll Observations Command
47
- poll_parser = subparsers.add_parser('poll-observations', help='Poll observations within a time range')
48
- poll_parser.add_argument('start_time', help='Starting time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
49
- poll_parser.add_argument('end_time', help='End time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)', nargs='?', default=None)
50
- poll_parser.add_argument('-m', '--mission-id', help='Filter observations by mission ID')
51
- poll_parser.add_argument('-ml', '--min-latitude', type=float, help='Minimum latitude filter')
52
- poll_parser.add_argument('-xl', '--max-latitude', type=float, help='Maximum latitude filter')
53
- poll_parser.add_argument('-mg', '--min-longitude', type=float, help='Minimum longitude filter')
54
- poll_parser.add_argument('-xg', '--max-longitude', type=float, help='Maximum longitude filter')
55
- poll_parser.add_argument('-id', '--include-ids', action='store_true', help='Include observation IDs')
56
- poll_parser.add_argument('-u', '--include-updated-at', action='store_true', help='Include update timestamps')
57
- poll_parser.add_argument('-i', '--interval', type=int, default=60, help='Polling interval in seconds')
58
- poll_parser.add_argument('-b', '--bucket-hours', type=float, default=6.0, help='Hours per bucket')
59
- poll_parser.add_argument('output', help='Save output to a single file (filename.csv, filename.json or filename.little_r) or to multiple files (csv or little_r)')
60
-
61
-
62
- # Get Observations Command
63
- obs_parser = subparsers.add_parser('observations', help='Get observations with filters')
64
- obs_parser.add_argument('since', help='Get observations since this time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
65
- obs_parser.add_argument('-mt', '--min-time', help='Minimum time filter (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
66
- obs_parser.add_argument('-xt', '--max-time', help='Maximum time filter (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
67
- obs_parser.add_argument('-m', '--mission-id', help='Filter by mission ID')
38
+ # Super Observations Command
39
+ super_obs_parser = subparsers.add_parser('super-observations', help='Poll super observations within a time range')
40
+ super_obs_parser.add_argument('start_time', help='Starting time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
41
+ super_obs_parser.add_argument('end_time', help='End time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)', nargs='?', default=None)
42
+ super_obs_parser.add_argument('-i', '--interval', type=int, default=60, help='Polling interval in seconds')
43
+ super_obs_parser.add_argument('-b', '--bucket-hours', type=float, default=6.0, help='Hours per bucket')
44
+ super_obs_parser.add_argument('-d', '--output-dir', help='Directory path where the separate files should be saved. If not provided, files will be saved in current directory.')
45
+ super_obs_parser.add_argument('output', help='Save output to a single file (filename.csv, filename.json or filename.little_r) or to multiple files (csv or little_r)')
46
+
47
+ # Observations Command
48
+ obs_parser = subparsers.add_parser('observations', help='Poll observations within a time range')
49
+ obs_parser.add_argument('start_time', help='Starting time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
50
+ obs_parser.add_argument('end_time', help='End time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)', nargs='?', default=None)
51
+ obs_parser.add_argument('-m', '--mission-id', help='Filter observations by mission ID')
68
52
  obs_parser.add_argument('-ml', '--min-latitude', type=float, help='Minimum latitude filter')
69
53
  obs_parser.add_argument('-xl', '--max-latitude', type=float, help='Maximum latitude filter')
70
54
  obs_parser.add_argument('-mg', '--min-longitude', type=float, help='Minimum longitude filter')
71
55
  obs_parser.add_argument('-xg', '--max-longitude', type=float, help='Maximum longitude filter')
72
56
  obs_parser.add_argument('-id', '--include-ids', action='store_true', help='Include observation IDs')
73
- obs_parser.add_argument('-mn', '--include-mission-name', action='store_true', help='Include mission names')
74
57
  obs_parser.add_argument('-u', '--include-updated-at', action='store_true', help='Include update timestamps')
75
- obs_parser.add_argument('output', nargs='?', help='Output file')
58
+ obs_parser.add_argument('-i', '--interval', type=int, default=60, help='Polling interval in seconds')
59
+ obs_parser.add_argument('-b', '--bucket-hours', type=float, default=6.0, help='Hours per bucket')
60
+ obs_parser.add_argument('-d', '--output-dir', help='Directory path where the separate files should be saved. If not provided, files will be saved in current directory.')
61
+ obs_parser.add_argument('output', help='Save output to a single file (filename.csv, filename.json or filename.little_r) or to multiple files (csv or little_r)')
62
+
63
+
64
+ # Get Observations Page Command
65
+ obs_page_parser = subparsers.add_parser('observations-page', help='Get observations page with filters')
66
+ obs_page_parser.add_argument('since', help='Get observations since this time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
67
+ obs_page_parser.add_argument('-mt', '--min-time', help='Minimum time filter (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
68
+ obs_page_parser.add_argument('-xt', '--max-time', help='Maximum time filter (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
69
+ obs_page_parser.add_argument('-m', '--mission-id', help='Filter by mission ID')
70
+ obs_page_parser.add_argument('-ml', '--min-latitude', type=float, help='Minimum latitude filter')
71
+ obs_page_parser.add_argument('-xl', '--max-latitude', type=float, help='Maximum latitude filter')
72
+ obs_page_parser.add_argument('-mg', '--min-longitude', type=float, help='Minimum longitude filter')
73
+ obs_page_parser.add_argument('-xg', '--max-longitude', type=float, help='Maximum longitude filter')
74
+ obs_page_parser.add_argument('-id', '--include-ids', action='store_true', help='Include observation IDs')
75
+ obs_page_parser.add_argument('-mn', '--include-mission-name', action='store_true', help='Include mission names')
76
+ obs_page_parser.add_argument('-u', '--include-updated-at', action='store_true', help='Include update timestamps')
77
+ obs_page_parser.add_argument('output', nargs='?', help='Output file')
76
78
 
77
79
  # Get Super Observations Command
78
- super_obs_parser = subparsers.add_parser('super-observations', help='Get super observations with filters')
79
- super_obs_parser.add_argument('since', help='Get super observations since this time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
80
- super_obs_parser.add_argument('-mt', '--min-time', help='Minimum time filter (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
81
- super_obs_parser.add_argument('-xt', '--max-time', help='Maximum time filter (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
82
- super_obs_parser.add_argument('-m', '--mission-id', help='Filter by mission ID')
83
- super_obs_parser.add_argument('-id', '--include-ids', action='store_true', help='Include observation IDs')
84
- super_obs_parser.add_argument('-mn', '--include-mission-name', action='store_true', help='Include mission names')
85
- super_obs_parser.add_argument('-u', '--include-updated-at', action='store_true', help='Include update timestamps')
86
- super_obs_parser.add_argument('output', nargs='?', help='Output file')
80
+ super_obs_page_parser = subparsers.add_parser('super-observations-page', help='Get super observations page with filters')
81
+ super_obs_page_parser.add_argument('since', help='Get super observations page since this time (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
82
+ super_obs_page_parser.add_argument('-mt', '--min-time', help='Minimum time filter (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
83
+ super_obs_page_parser.add_argument('-xt', '--max-time', help='Maximum time filter (YYYY-MM-DD_HH:MM, "YYYY-MM-DD HH:MM:SS" or YYYY-MM-DDTHH:MM:SS.fffZ)')
84
+ super_obs_page_parser.add_argument('-m', '--mission-id', help='Filter by mission ID')
85
+ super_obs_page_parser.add_argument('-id', '--include-ids', action='store_true', help='Include observation IDs')
86
+ super_obs_page_parser.add_argument('-mn', '--include-mission-name', action='store_true', help='Include mission names')
87
+ super_obs_page_parser.add_argument('-u', '--include-updated-at', action='store_true', help='Include update timestamps')
88
+ super_obs_page_parser.add_argument('output', nargs='?', help='Output file')
87
89
 
88
90
  # Get Flying Missions Command
89
91
  flying_parser = subparsers.add_parser('flying-missions', help='Get currently flying missions')
@@ -198,8 +200,8 @@ def main():
198
200
  ####################################################################################################################
199
201
  # DATA API FUNCTIONS CALLED
200
202
  ####################################################################################################################
201
- if args.command == 'poll-super-observations':
202
- # Error handling is performed within poll_super_observations
203
+ if args.command == 'super-observations':
204
+ # Error handling is performed within super_observations
203
205
  # and we display the appropriate error messages
204
206
  # No need to implement them here
205
207
 
@@ -207,22 +209,25 @@ def main():
207
209
  if '.' in args.output:
208
210
  save_to_file = args.output
209
211
  output_format = None
212
+ output_dir = None
210
213
  # In case user wants separate file for each data from missions (buckets)
211
214
  else:
212
215
  save_to_file = None
213
216
  output_format = args.output
217
+ output_dir = args.output_dir
214
218
 
215
- poll_super_observations(
219
+ super_observations(
216
220
  start_time=args.start_time,
217
221
  end_time=args.end_time,
218
222
  interval=args.interval,
219
223
  save_to_file=save_to_file,
220
224
  bucket_hours=args.bucket_hours,
225
+ output_dir=output_dir,
221
226
  output_format=output_format
222
227
  )
223
228
 
224
- elif args.command == 'poll-observations':
225
- # Error handling is performed within poll_observations
229
+ elif args.command == 'observations':
230
+ # Error handling is performed within observations
226
231
  # and we display the appropriate error messages
227
232
  # No need to implement them here
228
233
 
@@ -230,12 +235,14 @@ def main():
230
235
  if '.' in args.output:
231
236
  save_to_file = args.output
232
237
  output_format = None
238
+ output_dir = None
233
239
  # In case user wants separate file for each data from missions (buckets)
234
240
  else:
235
241
  save_to_file = None
236
242
  output_format = args.output
243
+ output_dir = args.output_dir
237
244
 
238
- poll_observations(
245
+ observations(
239
246
  start_time=args.start_time,
240
247
  end_time=args.end_time,
241
248
  include_ids=args.include_ids,
@@ -248,36 +255,63 @@ def main():
248
255
  interval=args.interval,
249
256
  save_to_file=save_to_file,
250
257
  bucket_hours=args.bucket_hours,
258
+ output_dir=output_dir,
251
259
  output_format=output_format
252
260
  )
253
261
 
254
- elif args.command == 'observations':
255
- get_observations(
256
- since=args.since,
257
- min_time=args.min_time,
258
- max_time=args.max_time,
259
- include_ids=args.include_ids,
260
- include_mission_name=args.include_mission_name,
261
- include_updated_at=args.include_updated_at,
262
- mission_id=args.mission_id,
263
- min_latitude=args.min_latitude,
264
- max_latitude=args.max_latitude,
265
- min_longitude=args.min_longitude,
266
- max_longitude=args.max_longitude,
267
- save_to_file=args.output
268
- )
269
-
270
- elif args.command == 'super-observations':
271
- get_super_observations(
272
- since=args.since,
273
- min_time=args.min_time,
274
- max_time=args.max_time,
275
- include_ids=args.include_ids,
276
- include_mission_name=args.include_mission_name,
277
- include_updated_at=args.include_updated_at,
278
- mission_id=args.mission_id,
279
- save_to_file=args.output
280
- )
262
+ elif args.command == 'observations-page':
263
+ if not args.output:
264
+ pprint(get_observations_page(
265
+ since=args.since,
266
+ min_time=args.min_time,
267
+ max_time=args.max_time,
268
+ include_ids=args.include_ids,
269
+ include_mission_name=args.include_mission_name,
270
+ include_updated_at=args.include_updated_at,
271
+ mission_id=args.mission_id,
272
+ min_latitude=args.min_latitude,
273
+ max_latitude=args.max_latitude,
274
+ min_longitude=args.min_longitude,
275
+ max_longitude=args.max_longitude
276
+ ))
277
+ else:
278
+ get_observations_page(
279
+ since=args.since,
280
+ min_time=args.min_time,
281
+ max_time=args.max_time,
282
+ include_ids=args.include_ids,
283
+ include_mission_name=args.include_mission_name,
284
+ include_updated_at=args.include_updated_at,
285
+ mission_id=args.mission_id,
286
+ min_latitude=args.min_latitude,
287
+ max_latitude=args.max_latitude,
288
+ min_longitude=args.min_longitude,
289
+ max_longitude=args.max_longitude,
290
+ save_to_file=args.output
291
+ )
292
+
293
+ elif args.command == 'super-observations-page':
294
+ if not args.output:
295
+ pprint(get_super_observations_page(
296
+ since=args.since,
297
+ min_time=args.min_time,
298
+ max_time=args.max_time,
299
+ include_ids=args.include_ids,
300
+ include_mission_name=args.include_mission_name,
301
+ include_updated_at=args.include_updated_at,
302
+ mission_id=args.mission_id
303
+ ))
304
+ else:
305
+ get_super_observations_page(
306
+ since=args.since,
307
+ min_time=args.min_time,
308
+ max_time=args.max_time,
309
+ include_ids=args.include_ids,
310
+ include_mission_name=args.include_mission_name,
311
+ include_updated_at=args.include_updated_at,
312
+ mission_id=args.mission_id,
313
+ save_to_file=args.output
314
+ )
281
315
 
282
316
  elif args.command == 'flying-missions':
283
317
  get_flying_missions(cli=True, save_to_file=args.output)
windborne/data_api.py CHANGED
@@ -8,9 +8,15 @@ from datetime import datetime, timezone, timedelta
8
8
  import csv
9
9
  import json
10
10
 
11
- def get_observations(since=None, min_time=None, max_time=None, include_ids=None, include_mission_name=True, include_updated_at=None, mission_id=None, min_latitude=None, max_latitude=None, min_longitude=None, max_longitude=None, save_to_file=None):
11
+ # ------------
12
+ # CORE RESOURCES
13
+ # ------------
14
+
15
+ # Observations
16
+ # ------------
17
+ def get_observations_page(since=None, min_time=None, max_time=None, include_ids=None, include_mission_name=True, include_updated_at=None, mission_id=None, min_latitude=None, max_latitude=None, min_longitude=None, max_longitude=None, save_to_file=None):
12
18
  """
13
- Retrieves observations based on specified filters including geographical bounds.
19
+ Retrieves observations page based on specified filters including geographical bounds.
14
20
 
15
21
  Args:
16
22
  since (str): Filter observations after this timestamp.
@@ -70,52 +76,7 @@ def get_observations(since=None, min_time=None, max_time=None, include_ids=None,
70
76
 
71
77
  return response
72
78
 
73
- def get_super_observations(since=None, min_time=None, max_time=None, include_ids=None, include_mission_name=None, include_updated_at=None, mission_id=None, save_to_file=None):
74
- """
75
- Retrieves super observations based on specified filters.
76
-
77
- Args:
78
- since (str): Filter observations after this timestamp.
79
- min_time (str): Minimum timestamp for observations.
80
- max_time (str): Maximum timestamp for observations.
81
- include_ids (bool): Include observation IDs in response.
82
- include_mission_name (bool): Include mission names in response.
83
- include_updated_at (bool): Include update timestamps in response.
84
- mission_id (str): Filter observations by mission ID.
85
- save_to_file (str): Optional path to save the response data.
86
- If provided, saves the data in CSV format.
87
-
88
- Returns:
89
- dict: The API response containing filtered super observations.
90
- """
91
-
92
- url = f"{DATA_API_BASE_URL}/super_observations.json"
93
-
94
- params = {}
95
- if since:
96
- params["since"] = to_unix_timestamp(since)
97
- if min_time:
98
- params["min_time"] = to_unix_timestamp(min_time)
99
- if max_time:
100
- params["max_time"] = to_unix_timestamp(max_time)
101
- if mission_id:
102
- params["mission_id"] = mission_id
103
- if include_ids:
104
- params["include_ids"] = True
105
- if include_mission_name:
106
- params["include_mission_name"] = True
107
- if include_updated_at:
108
- params["include_updated_at"] = True
109
-
110
- params = {k: v for k, v in params.items() if v is not None}
111
-
112
- response = make_api_request(url, params=params)
113
- if save_to_file:
114
- save_csv_json(save_to_file, response, csv_data_key='observations')
115
-
116
- return response
117
-
118
- def poll_observations(start_time, end_time=None, include_ids=None, include_updated_at=None, mission_id=None, min_latitude=None, max_latitude=None, min_longitude=None, max_longitude=None, interval=60, save_to_file=None, bucket_hours=6.0, output_format=None, callback=None):
79
+ def observations(start_time, end_time=None, include_ids=None, include_updated_at=None, mission_id=None, min_latitude=None, max_latitude=None, min_longitude=None, max_longitude=None, interval=60, save_to_file=None, bucket_hours=6.0, output_format=None, output_dir=None, callback=None):
119
80
  """
120
81
  Fetches observations between a start time and an optional end time and saves to files in specified format.
121
82
  Files are broken up into time buckets, with filenames containing the time at the mid-point of the bucket.
@@ -140,6 +101,7 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
140
101
  Supported formats are '.csv', '.json', '.little_r' and '.nc'
141
102
  bucket_hours (int): Optional. Size of time buckets in hours. Defaults to 6 hours.
142
103
  output_format (str): Optional. Format to save data in separate files. Supported formats are 'json, 'csv', 'little_r' and 'netcdf'.
104
+ output_dir (str): Optional. Directory path where the separate files should be saved. If not provided, files will be saved in current directory.
143
105
  callback (callable): Optional callback function that receives (super observations, metadata) before saving.
144
106
  This allows custom processing or saving in custom formats.
145
107
  """
@@ -165,7 +127,7 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
165
127
  return
166
128
 
167
129
  # Supported formats for saving into a single file:
168
- # NOTE: for poll_observations we handle .csv saving within poll_observations and not using save_csv_json
130
+ # NOTE: for observations we handle .csv saving within observations and not using save_csv_json
169
131
  # - .csv
170
132
  # - .json
171
133
  # - .little_r
@@ -201,12 +163,13 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
201
163
  # Initialize the polling loop
202
164
  current_timestamp = start_time
203
165
  has_next_page = True
166
+ fetced_so_far = 0
204
167
 
205
168
 
206
169
  while has_next_page:
207
170
  try:
208
171
  # Fetch observations
209
- observations_page = get_observations(
172
+ observations_page = get_observations_page(
210
173
  since=current_timestamp,
211
174
  min_latitude=min_latitude,
212
175
  max_latitude=max_latitude,
@@ -226,11 +189,15 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
226
189
  continue
227
190
 
228
191
  observations = observations_page.get('observations', [])
229
- print(f"Fetched {len(observations)} observation(s)")
192
+ fetced_so_far = fetced_so_far + len(observations)
193
+ print_current_timestamp = current_timestamp if current_timestamp < 1e11 else current_timestamp / 1e9
194
+ print(f"Fetched {fetced_so_far} observation(s)")
195
+ print(f"Current time: {datetime.fromtimestamp(print_current_timestamp).strftime('%Y-%m-%d %H:%M:%S')}")
196
+ print("-----------------------------------------------------")
230
197
 
231
198
  # Invoke the callback with fetched observations
232
199
  if callback:
233
- print("/nCallback/n")
200
+ print("\nCallback\n")
234
201
  callback(observations)
235
202
 
236
203
  for obs in observations:
@@ -281,7 +248,7 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
281
248
 
282
249
  if not has_next_page or not next_timestamp or next_timestamp <= current_timestamp:
283
250
  print("-----------------------------------------------------\n")
284
- print("No more pages available or reached end of time range.")
251
+ print("Fetching complete.")
285
252
  print("\n-----------------------------------------------------")
286
253
  break
287
254
 
@@ -293,12 +260,20 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
293
260
 
294
261
  # Save data to a single file
295
262
  if save_to_file:
263
+ # Create directory path if it doesn't exist
264
+ directory = os.path.dirname(save_to_file)
265
+ if directory and not os.path.isdir(directory):
266
+ os.makedirs(directory, exist_ok=True)
296
267
  filtered_observations = {obs_id: obs for obs_id, obs in all_observations.items()
297
268
  if float(obs['timestamp']) >= start_time}
298
269
  # Sort by timestamp
299
270
  sorted_observations = dict(sorted(filtered_observations.items(),
300
271
  key=lambda x: float(x[1]['timestamp'])))
301
272
 
273
+ print(f"Saving {len(sorted_observations)} {'observation' if len(sorted_observations) == 1 else 'observations'} to {save_to_file}")
274
+ print("This may take a while...")
275
+ print("-----------------------------------------------------\n")
276
+
302
277
  if save_to_file.endswith('.nc'):
303
278
  first_obs_timestamp = float(next(iter(sorted_observations.values()))['timestamp'])
304
279
  convert_to_netcdf(sorted_observations, first_obs_timestamp, output_filename=save_to_file)
@@ -321,6 +296,15 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
321
296
 
322
297
  # Save data to multiple file
323
298
  elif output_format:
299
+ # Create output directory if specified
300
+ if output_dir:
301
+ os.makedirs(output_dir, exist_ok=True)
302
+ print(f"Files will be saved to {output_dir}")
303
+ else:
304
+ print(f"Files will be saved to {os.getcwd()}")
305
+ print(f"Processing {fetced_so_far} {'observation' if fetced_so_far == 1 else 'observations'} and save them over multiple files.")
306
+ print("This may take a while...")
307
+ print("-----------------------------------------------------\n")
324
308
  # Track statistics per mission
325
309
  mission_stats = {} # {mission_name: {'files': 0, 'observations': 0}}
326
310
  total_observations_written = 0
@@ -329,48 +313,38 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
329
313
  for (bucket_center, mission_name), observations in buckets.items():
330
314
  if observations:
331
315
  # Format hour to be the actual bucket center
332
- bucket_hour = int((bucket_center.hour + bucket_hours/2) % 24)
316
+ bucket_hour = int((bucket_center.hour + bucket_hours / 2) % 24)
333
317
 
334
- if output_format == 'netcdf':
335
- convert_to_netcdf(observations, bucket_center.timestamp())
318
+ # Generate file name based on output format
319
+ file_name_format = {
320
+ 'csv': f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d_%dh.csv",
321
+ 'json': f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d_%dh.json",
322
+ 'little_r': f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d-00_%dh.little_r"
323
+ }
324
+ file_name = file_name_format[output_format] % (
325
+ bucket_center.year, bucket_center.month, bucket_center.day,
326
+ bucket_hour, bucket_hours)
336
327
 
337
- if output_format == 'csv':
338
- output_file = (f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d_%dh.csv" %
339
- (bucket_center.year, bucket_center.month, bucket_center.day,
340
- bucket_hour, bucket_hours))
328
+ output_file = os.path.join(output_dir or '.', file_name)
341
329
 
342
- os.makedirs(os.path.dirname(output_file) or '.', exist_ok=True)
330
+ # Sort observations by timestamp within each bucket
331
+ sorted_obs = sorted(observations.values(), key=lambda x: int(x['timestamp']))
343
332
 
344
- # Sort observations by timestamp within each bucket
345
- sorted_obs = sorted(observations.values(), key=lambda x: int(x['timestamp']))
333
+ if output_format == 'netcdf':
334
+ convert_to_netcdf(sorted_obs, bucket_center.timestamp())
346
335
 
336
+ elif output_format == 'csv':
347
337
  with open(output_file, mode='w', newline='') as file:
348
338
  writer = csv.DictWriter(file, fieldnames=headers)
349
339
  writer.writeheader()
350
340
  writer.writerows(sorted_obs)
351
341
 
352
342
  elif output_format == 'json':
353
- output_file = (f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d_%dh.json" %
354
- (bucket_center.year, bucket_center.month, bucket_center.day,
355
- bucket_hour, bucket_hours))
356
-
357
- os.makedirs(os.path.dirname(output_file) or '.', exist_ok=True)
358
-
359
- # Sort observations by timestamp within each bucket
360
- sorted_obs = dict(sorted(observations.items(), key=lambda x: int(x[1]['timestamp'])))
361
-
343
+ sorted_obs_dict = {k: v for k, v in sorted(observations.items(), key=lambda x: int(x[1]['timestamp']))}
362
344
  with open(output_file, 'w', encoding='utf-8') as file:
363
- json.dump(sorted_obs, file, indent=4)
345
+ json.dump(sorted_obs_dict, file, indent=4)
364
346
 
365
347
  elif output_format == 'little_r':
366
- output_file = (f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d-00_%dh.little_r" %
367
- (bucket_center.year, bucket_center.month, bucket_center.day,
368
- bucket_hour, bucket_hours))
369
-
370
- os.makedirs(os.path.dirname(output_file) or '.', exist_ok=True)
371
-
372
- sorted_obs = sorted(observations.items(), key=lambda x: int(x[1]['timestamp']))
373
-
374
348
  little_r_records = format_little_r(sorted_obs)
375
349
  with open(output_file, 'w') as file:
376
350
  file.write('\n'.join(little_r_records))
@@ -382,7 +356,7 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
382
356
  mission_stats[mission_name]['files'] += 1
383
357
  mission_stats[mission_name]['observations'] += len(observations)
384
358
  # Print total observations written
385
- print(f"Total {'observation' if total_observations_written == 1 else 'observations'} written: {total_observations_written}")
359
+ print(f"Saved {total_observations_written} {'observation.' if total_observations_written == 1 else 'observations.'}")
386
360
  print("-----------------------------------------------------")
387
361
 
388
362
  # Print summary for each mission
@@ -392,7 +366,55 @@ def poll_observations(start_time, end_time=None, include_ids=None, include_updat
392
366
  print("-----------------------------------------------------")
393
367
  print("All observations have been processed and saved.")
394
368
 
395
- def poll_super_observations(start_time, end_time=None, interval=60, save_to_file=None, bucket_hours=6.0, output_format=None, callback=None):
369
+
370
+ # Super Observations
371
+ # ------------
372
+ def get_super_observations_page(since=None, min_time=None, max_time=None, include_ids=None, include_mission_name=None, include_updated_at=None, mission_id=None, save_to_file=None):
373
+ """
374
+ Retrieves super observations page based on specified filters.
375
+
376
+ Args:
377
+ since (str): Filter observations after this timestamp.
378
+ min_time (str): Minimum timestamp for observations.
379
+ max_time (str): Maximum timestamp for observations.
380
+ include_ids (bool): Include observation IDs in response.
381
+ include_mission_name (bool): Include mission names in response.
382
+ include_updated_at (bool): Include update timestamps in response.
383
+ mission_id (str): Filter observations by mission ID.
384
+ save_to_file (str): Optional path to save the response data.
385
+ If provided, saves the data in CSV format.
386
+
387
+ Returns:
388
+ dict: The API response containing filtered super observations.
389
+ """
390
+
391
+ url = f"{DATA_API_BASE_URL}/super_observations.json"
392
+
393
+ params = {}
394
+ if since:
395
+ params["since"] = to_unix_timestamp(since)
396
+ if min_time:
397
+ params["min_time"] = to_unix_timestamp(min_time)
398
+ if max_time:
399
+ params["max_time"] = to_unix_timestamp(max_time)
400
+ if mission_id:
401
+ params["mission_id"] = mission_id
402
+ if include_ids:
403
+ params["include_ids"] = True
404
+ if include_mission_name:
405
+ params["include_mission_name"] = True
406
+ if include_updated_at:
407
+ params["include_updated_at"] = True
408
+
409
+ params = {k: v for k, v in params.items() if v is not None}
410
+
411
+ response = make_api_request(url, params=params)
412
+ if save_to_file:
413
+ save_csv_json(save_to_file, response, csv_data_key='observations')
414
+
415
+ return response
416
+
417
+ def super_observations(start_time, end_time=None, interval=60, save_to_file=None, bucket_hours=6.0, output_format=None, output_dir=None, callback=None):
396
418
  """
397
419
  Fetches super observations between a start time and an optional end time and saves to files in specified format.
398
420
  Files are broken up into time buckets, with filenames containing the time at the mid-point of the bucket.
@@ -408,6 +430,7 @@ def poll_super_observations(start_time, end_time=None, interval=60, save_to_file
408
430
  Supported formats are '.csv', '.json', '.little_r' and '.nc'
409
431
  bucket_hours (int): Optional. Size of time buckets in hours. Defaults to 6 hours.
410
432
  output_format (str): Optional. Format to save data in separate files. Supported formats are 'json, 'csv', 'little_r' and 'netcdf'.
433
+ output_dir (str): Optional. Directory path where the separate files should be saved. If not provided, files will be saved in current directory.
411
434
  callback (callable): Optional callback function that receives (super observations, metadata) before saving.
412
435
  This allows custom processing or saving in custom formats.
413
436
  """
@@ -469,12 +492,13 @@ def poll_super_observations(start_time, end_time=None, interval=60, save_to_file
469
492
  # Initialize the polling loop
470
493
  current_timestamp = start_time
471
494
  has_next_page = True
495
+ fetced_so_far = 0
472
496
 
473
497
 
474
498
  while has_next_page:
475
499
  try:
476
500
  # Fetch observations
477
- observations_page = get_super_observations(
501
+ observations_page = get_super_observations_page(
478
502
  since=current_timestamp,
479
503
  min_time=start_time,
480
504
  max_time=end_time,
@@ -490,7 +514,11 @@ def poll_super_observations(start_time, end_time=None, interval=60, save_to_file
490
514
  continue
491
515
 
492
516
  observations = observations_page.get('observations', [])
493
- print(f"Fetched {len(observations)} super observation(s)")
517
+ fetced_so_far = fetced_so_far + len(observations)
518
+ print_current_timestamp = current_timestamp if current_timestamp < 1e11 else current_timestamp / 1e9
519
+ print(f"Fetched {fetced_so_far} super observation(s)")
520
+ print(f"Current time: {datetime.fromtimestamp(print_current_timestamp).strftime('%Y-%m-%d %H:%M:%S')}")
521
+ print("-----------------------------------------------------")
494
522
 
495
523
  # Invoke the callback with fetched observations
496
524
  if callback:
@@ -547,7 +575,7 @@ def poll_super_observations(start_time, end_time=None, interval=60, save_to_file
547
575
 
548
576
  if not has_next_page or not next_timestamp or next_timestamp <= current_timestamp:
549
577
  print("-----------------------------------------------------\n")
550
- print("No more pages available or reached end of time range.")
578
+ print("Fetching complete.")
551
579
  print("\n-----------------------------------------------------")
552
580
  break
553
581
 
@@ -559,12 +587,20 @@ def poll_super_observations(start_time, end_time=None, interval=60, save_to_file
559
587
 
560
588
  # Save data to a single file
561
589
  if save_to_file:
590
+ # Create directory path if it doesn't exist
591
+ directory = os.path.dirname(save_to_file)
592
+ if directory and not os.path.isdir(directory):
593
+ os.makedirs(directory, exist_ok=True)
562
594
  filtered_observations = {obs_id: obs for obs_id, obs in all_observations.items()
563
595
  if float(obs['timestamp']) >= start_time}
564
596
  # Sort by timestamp
565
597
  sorted_observations = dict(sorted(filtered_observations.items(),
566
598
  key=lambda x: float(x[1]['timestamp'])))
567
599
 
600
+ print(f"Saving {len(sorted_observations)} super {'observation' if len(sorted_observations) == 1 else 'observations'} to {save_to_file}")
601
+ print("This may take a while...")
602
+ print("-----------------------------------------------------\n")
603
+
568
604
  if save_to_file.endswith('.nc'):
569
605
  first_obs_timestamp = float(next(iter(sorted_observations.values()))['timestamp'])
570
606
  convert_to_netcdf(sorted_observations, first_obs_timestamp, output_filename=save_to_file)
@@ -588,6 +624,16 @@ def poll_super_observations(start_time, end_time=None, interval=60, save_to_file
588
624
 
589
625
  # Save data to multiple file
590
626
  elif output_format:
627
+ # Create output directory if specified
628
+ if output_dir:
629
+ os.makedirs(output_dir, exist_ok=True)
630
+ print(f"Files will be saved to {output_dir}")
631
+ else:
632
+ print(f"Files will be saved to {os.getcwd()}")
633
+
634
+ print(f"Processing {fetced_so_far} super {'observation' if fetced_so_far == 1 else 'observations'} and save them over multiple files.")
635
+ print("This may take a while...")
636
+ print("-----------------------------------------------------\n")
591
637
  # Track statistics per mission
592
638
  mission_stats = {} # {mission_name: {'files': 0, 'observations': 0}}
593
639
  total_observations_written = 0
@@ -598,46 +644,36 @@ def poll_super_observations(start_time, end_time=None, interval=60, save_to_file
598
644
  # Format hour to be the actual bucket center
599
645
  bucket_hour = int((bucket_center.hour + bucket_hours/2) % 24)
600
646
 
601
- if output_format == 'netcdf':
602
- convert_to_netcdf(observations, bucket_center.timestamp())
647
+ # Generate file name based on output format
648
+ file_name_format = {
649
+ 'csv': f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d_%dh.csv",
650
+ 'json': f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d_%dh.json",
651
+ 'little_r': f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d-00_%dh.little_r"
652
+ }
653
+ file_name = file_name_format[output_format] % (
654
+ bucket_center.year, bucket_center.month, bucket_center.day,
655
+ bucket_hour, bucket_hours)
603
656
 
604
- if output_format == 'csv':
605
- output_file = (f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d_%dh.csv" %
606
- (bucket_center.year, bucket_center.month, bucket_center.day,
607
- bucket_hour, bucket_hours))
657
+ output_file = os.path.join(output_dir or '.', file_name)
608
658
 
609
- os.makedirs(os.path.dirname(output_file) or '.', exist_ok=True)
659
+ # Sort observations by timestamp within each bucket
660
+ sorted_obs = sorted(observations.values(), key=lambda x: int(x['timestamp']))
610
661
 
611
- # Sort observations by timestamp within each bucket
612
- sorted_obs = sorted(observations.values(), key=lambda x: int(x['timestamp']))
662
+ if output_format == 'netcdf':
663
+ convert_to_netcdf(sorted_obs, bucket_center.timestamp())
613
664
 
665
+ elif output_format == 'csv':
614
666
  with open(output_file, mode='w', newline='') as file:
615
667
  writer = csv.DictWriter(file, fieldnames=headers)
616
668
  writer.writeheader()
617
669
  writer.writerows(sorted_obs)
618
670
 
619
671
  elif output_format == 'json':
620
- output_file = (f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d_%dh.json" %
621
- (bucket_center.year, bucket_center.month, bucket_center.day,
622
- bucket_hour, bucket_hours))
623
-
624
- os.makedirs(os.path.dirname(output_file) or '.', exist_ok=True)
625
-
626
- # Sort observations by timestamp within each bucket
627
- sorted_obs = dict(sorted(observations.items(), key=lambda x: int(x[1]['timestamp'])))
628
-
672
+ sorted_obs_dict = {k: v for k, v in sorted(observations.items(), key=lambda x: int(x[1]['timestamp']))}
629
673
  with open(output_file, 'w', encoding='utf-8') as file:
630
- json.dump(sorted_obs, file, indent=4)
674
+ json.dump(sorted_obs_dict, file, indent=4)
631
675
 
632
676
  elif output_format == 'little_r':
633
- output_file = (f"WindBorne_{mission_name}_%04d-%02d-%02d_%02d-00_%dh.little_r" %
634
- (bucket_center.year, bucket_center.month, bucket_center.day,
635
- bucket_hour, bucket_hours))
636
-
637
- os.makedirs(os.path.dirname(output_file) or '.', exist_ok=True)
638
-
639
- sorted_obs = sorted(observations.items(), key=lambda x: int(x[1]['timestamp']))
640
-
641
677
  little_r_records = format_little_r(sorted_obs)
642
678
  with open(output_file, 'w') as file:
643
679
  file.write('\n'.join(little_r_records))
@@ -659,6 +695,10 @@ def poll_super_observations(start_time, end_time=None, interval=60, save_to_file
659
695
  print("-----------------------------------------------------")
660
696
  print("All super observations have been processed and saved.")
661
697
 
698
+
699
+ # ------------
700
+ # METADATA
701
+ # ------------
662
702
  def get_flying_missions(cli=None, save_to_file=None):
663
703
  """
664
704
  Retrieves a list of currently flying missions.
windborne/utils.py CHANGED
@@ -1,5 +1,6 @@
1
1
  from .config import CLIENT_ID, API_KEY
2
2
 
3
+ import os
3
4
  import requests
4
5
  import jwt
5
6
  import time
@@ -58,6 +59,20 @@ def make_api_request(url, params=None, return_type=None):
58
59
  print("--------------------------------------")
59
60
  print("To get an API key, email data@windbornesystems.com.")
60
61
  exit(91)
62
+ # Check if credentials are swapped
63
+ elif len(CLIENT_ID) in [32, 35]:
64
+ print("Your Client ID and API Key are swapped.")
65
+ print("--------------------------------------")
66
+ print("Swap them or modify them accordingly to get access to WindBorne API.")
67
+ print("--------------------------------------")
68
+ print("You may refer to https://windbornesystems.com/docs/api/cli#introduction\n"
69
+ "for instructions on how to set your credentials as environment variables for CLI and Code usage\n\n"
70
+ "and to https://windbornesystems.com/docs/api/pip_data#introduction\n"
71
+ "for instruction on how to set your credentials for code usage.")
72
+ print("--------------------------------------")
73
+ print(f"Current Client ID: {CLIENT_ID}")
74
+ print(f"Current API Key: {API_KEY}")
75
+ exit(95)
61
76
 
62
77
  # Validate WB_CLIENT_ID format
63
78
  if not (is_valid_uuid_v4(CLIENT_ID) or is_valid_client_id_format(CLIENT_ID)):
@@ -263,6 +278,11 @@ def save_csv_json(save_to_file, response, csv_data_key=None):
263
278
  response (dict or list): The response data to save.
264
279
  csv_data_key (str, optional): Key to extract data for CSV. Defaults to None.
265
280
  """
281
+ # Create directory path if it doesn't exist
282
+ directory = os.path.dirname(save_to_file)
283
+ if directory and not os.path.isdir(directory):
284
+ os.makedirs(directory, exist_ok=True)
285
+
266
286
  if '.' not in save_to_file:
267
287
  print("You have to provide a file type for your filename.")
268
288
  print("Supported formats:")
@@ -357,8 +377,6 @@ def convert_to_netcdf(data, curtime, output_filename=None):
357
377
  if col in df.columns:
358
378
  df[col] = pd.to_numeric(df[col], errors='coerce')
359
379
 
360
- df['id'] = pd.to_numeric(df['id'], downcast='integer')
361
-
362
380
  ds = xr.Dataset.from_dataframe(df)
363
381
 
364
382
  # Build the filename and save some variables for use later
@@ -392,6 +410,9 @@ def convert_to_netcdf(data, curtime, output_filename=None):
392
410
 
393
411
  # Now that calculations are done, remove variables not needed in the netcdf output
394
412
  variables_to_drop = ['humidity', 'speed_x', 'speed_y', 'timestamp']
413
+ if 'id' in ds and pd.isna(ds['id']).all():
414
+ variables_to_drop.append('id')
415
+
395
416
  existing_vars = [var for var in variables_to_drop if var in ds]
396
417
  ds = ds.drop_vars(existing_vars)
397
418
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: windborne
3
- Version: 1.0.4
3
+ Version: 1.0.6
4
4
  Summary: A Python library for interacting with WindBorne Data and Forecasts API
5
5
  Author-email: WindBorne Systems <data@windbornesystems.com>
6
6
  Classifier: Programming Language :: Python :: 3
@@ -0,0 +1,11 @@
1
+ windborne/__init__.py,sha256=tNnX9BrdgFNjy9NY6ucobCVAgV3KYwUydEAuwxdaiqQ,1784
2
+ windborne/cli.py,sha256=YlIz9KgsJumaCxz64uwAxfUUTJzc2rPEwzwGajCT9Jw,32838
3
+ windborne/config.py,sha256=FYIBRiIuii5igAFQlOsHUa6u2i1kKnO1yZE7QfQJvUg,1688
4
+ windborne/data_api.py,sha256=uggo2Y5U36ptvpSYgCUHNFwbNlczsCx7OUrf_OwlvtE,37629
5
+ windborne/forecasts_api.py,sha256=AYuhFRls_XvzuNB55NF0w3y-_ocYwPxmI6C1lIyFkgM,16865
6
+ windborne/utils.py,sha256=Zp9oTWfbISmJ9nO893RrW6MkqOwCDaFgsszmYaRgJSg,39670
7
+ windborne-1.0.6.dist-info/METADATA,sha256=Fx7tFudf2QWvgLmkzfmTr4BNy2id_rKoOj-J5W4ZqVQ,1264
8
+ windborne-1.0.6.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
9
+ windborne-1.0.6.dist-info/entry_points.txt,sha256=j_YrqdCDrCd7p5MIwQ2BYwNXEi95VNANzLRJmcXEg1U,49
10
+ windborne-1.0.6.dist-info/top_level.txt,sha256=PE9Lauriu5S5REf7JKhXprufZ_V5RiZ_TnfnrLGJrmE,10
11
+ windborne-1.0.6.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- windborne/__init__.py,sha256=Dv2RK6qcZa3YDrTo_W_N9YROKOQV1_HFa0OWnZSBKAc,1783
2
- windborne/cli.py,sha256=ZNqeIlWqlKE4iCo8kptUH6Wbish0T8O7-Hljt-gEi9Q,31031
3
- windborne/config.py,sha256=FYIBRiIuii5igAFQlOsHUa6u2i1kKnO1yZE7QfQJvUg,1688
4
- windborne/data_api.py,sha256=dnF54sLPYRU5dect3WxmonbHKUjihfE9dVdpaT-fylk,35560
5
- windborne/forecasts_api.py,sha256=AYuhFRls_XvzuNB55NF0w3y-_ocYwPxmI6C1lIyFkgM,16865
6
- windborne/utils.py,sha256=srFToLi3vtgrmqorwpJSIoG5KMyJgK_GggEinYfF02k,38572
7
- windborne-1.0.4.dist-info/METADATA,sha256=tKud7EN6Dwm4BADJoYHu3cwYB_wvSlAtJSl4gta6yzI,1264
8
- windborne-1.0.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
9
- windborne-1.0.4.dist-info/entry_points.txt,sha256=j_YrqdCDrCd7p5MIwQ2BYwNXEi95VNANzLRJmcXEg1U,49
10
- windborne-1.0.4.dist-info/top_level.txt,sha256=PE9Lauriu5S5REf7JKhXprufZ_V5RiZ_TnfnrLGJrmE,10
11
- windborne-1.0.4.dist-info/RECORD,,