micromegas 0.2.3__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,99 @@
1
+ import pyarrow
2
+
3
+ # based on https://github.com/apache/arrow-rs/blob/main/arrow-ipc/src/reader.rs
4
+
5
+ class ArrayReader:
6
+ def __init__(self, schema, nodes, buffers):
7
+ self.schema = schema
8
+ self.nodes = nodes
9
+ self.current_node = 0
10
+ self.buffers = buffers
11
+ self.current_buffer = 0
12
+
13
+ def next_node(self):
14
+ assert self.current_node < len(self.nodes)
15
+ node = self.nodes[self.current_node]
16
+ self.current_node += 1
17
+ return node
18
+
19
+ def next_buffer(self):
20
+ assert self.current_buffer < len(self.buffers)
21
+ buffer = self.buffers[self.current_buffer]
22
+ self.current_buffer += 1
23
+ return buffer
24
+
25
+
26
+ def create_primitive_array(node, data_type, null_buffer, data_buffer):
27
+ return pyarrow.NumericArray.from_buffers(
28
+ data_type, node.Length(), [null_buffer, data_buffer], node.NullCount()
29
+ )
30
+
31
+
32
+ def create_string_array(node, data_type, null_buffer, offset_buffer, data_buffer):
33
+ return pyarrow.NumericArray.from_buffers(
34
+ data_type,
35
+ node.Length(),
36
+ [null_buffer, offset_buffer, data_buffer],
37
+ node.NullCount(),
38
+ )
39
+
40
+
41
+ def read_column(reader, arrow_field):
42
+ if arrow_field.type in [
43
+ pyarrow.string(),
44
+ pyarrow.binary(),
45
+ pyarrow.large_binary(),
46
+ pyarrow.large_string(),
47
+ ]:
48
+ return create_string_array(
49
+ reader.next_node(),
50
+ arrow_field.type,
51
+ reader.next_buffer(),
52
+ reader.next_buffer(),
53
+ reader.next_buffer(),
54
+ )
55
+ elif pyarrow.types.is_primitive(arrow_field.type):
56
+ return create_primitive_array(
57
+ reader.next_node(),
58
+ arrow_field.type,
59
+ reader.next_buffer(),
60
+ reader.next_buffer(),
61
+ )
62
+ elif pyarrow.types.is_list(arrow_field.type):
63
+ list_node = reader.next_node()
64
+ list_buffers = [reader.next_buffer(), reader.next_buffer()]
65
+ values = read_column(reader, arrow_field.type.value_field)
66
+ return pyarrow.ListArray.from_buffers(
67
+ arrow_field.type,
68
+ list_node.Length(),
69
+ list_buffers,
70
+ list_node.NullCount(),
71
+ 0,
72
+ [values],
73
+ )
74
+ elif pyarrow.types.is_struct(arrow_field.type):
75
+ struct_node = reader.next_node()
76
+ null_buffer = reader.next_buffer()
77
+ children = []
78
+ for child_field in arrow_field.type.fields:
79
+ child_column = read_column(reader, child_field)
80
+ children.append(child_column)
81
+ return pyarrow.StructArray.from_buffers(
82
+ arrow_field.type,
83
+ struct_node.Length(),
84
+ [null_buffer],
85
+ struct_node.NullCount(),
86
+ 0,
87
+ children,
88
+ )
89
+ else:
90
+ raise RuntimeError("unsupported arrow field type {}".format(arrow_field.type))
91
+
92
+
93
+ def read_record_batch(arrow_schema, nodes, buffers):
94
+ reader = ArrayReader(arrow_schema, nodes, buffers)
95
+ columns = []
96
+ for arrow_field in arrow_schema:
97
+ column = read_column(reader, arrow_field)
98
+ columns.append(column)
99
+ return pyarrow.RecordBatch.from_arrays(columns, schema=arrow_schema)
@@ -0,0 +1,308 @@
1
+ import pyarrow
2
+ import grpc
3
+ from google.protobuf import any_pb2
4
+ from . import Flight_pb2_grpc
5
+ from . import FlightSql_pb2_grpc
6
+ from . import FlightSql_pb2
7
+ from . import Flight_pb2
8
+ from . import arrow_flatbuffers
9
+ from . import arrow_ipc_reader
10
+ from . import time
11
+
12
+
13
+ def fb_time_unit_to_string(fb_time_unit):
14
+ time_unit_enum = arrow_flatbuffers.TimeUnit
15
+ if fb_time_unit == time_unit_enum.SECOND:
16
+ return "s"
17
+ if fb_time_unit == time_unit_enum.MILLISECOND:
18
+ return "ms"
19
+ if fb_time_unit == time_unit_enum.MICROSECOND:
20
+ return "us"
21
+ if fb_time_unit == time_unit_enum.NANOSECOND:
22
+ return "ns"
23
+ raise RuntimeError("unsupported time unit {}".format(fb_time_unit))
24
+
25
+
26
+ def fb_field_type_to_arrow(fb_field):
27
+ fb_type = fb_field.TypeType()
28
+ fb_type_enum = arrow_flatbuffers.Type
29
+ assert fb_type != fb_type_enum.NONE
30
+ if fb_type == fb_type_enum.Null:
31
+ return pyarrow.null()
32
+ elif fb_type == fb_type_enum.Int:
33
+ type_int = arrow_flatbuffers.Int()
34
+ field_type_table = fb_field.Type()
35
+ type_int.Init(field_type_table.Bytes, field_type_table.Pos)
36
+ if type_int.IsSigned():
37
+ if type_int.BitWidth() == 8:
38
+ return pyarrow.int8()
39
+ elif type_int.BitWidth() == 16:
40
+ return pyarrow.int16()
41
+ elif type_int.BitWidth() == 32:
42
+ return pyarrow.int32()
43
+ elif type_int.BitWidth() == 64:
44
+ return pyarrow.int64()
45
+ else:
46
+ raise RuntimeError(
47
+ "unsupported int size {}".format(type_int.BitWidth())
48
+ )
49
+ else:
50
+ if type_int.BitWidth() == 8:
51
+ return pyarrow.uint8()
52
+ elif type_int.BitWidth() == 16:
53
+ return pyarrow.uint16()
54
+ elif type_int.BitWidth() == 32:
55
+ return pyarrow.uint32()
56
+ elif type_int.BitWidth() == 64:
57
+ return pyarrow.uint64()
58
+ else:
59
+ raise RuntimeError(
60
+ "unsupported uint size {}".format(type_int.BitWidth())
61
+ )
62
+ elif fb_type == fb_type_enum.FloatingPoint:
63
+ return pyarrow.float64()
64
+ elif fb_type == fb_type_enum.Binary:
65
+ return pyarrow.binary()
66
+ elif fb_type == fb_type_enum.Utf8:
67
+ return pyarrow.utf8()
68
+ elif fb_type == fb_type_enum.Bool:
69
+ return pyarrow.bool()
70
+ elif fb_type == fb_type_enum.Timestamp:
71
+ ts_type = arrow_flatbuffers.Timestamp()
72
+ field_type_table = fb_field.Type()
73
+ ts_type.Init(field_type_table.Bytes, field_type_table.Pos)
74
+ return pyarrow.timestamp(
75
+ fb_time_unit_to_string(ts_type.Unit()), ts_type.Timezone()
76
+ )
77
+ elif fb_type == fb_type_enum.List:
78
+ assert 1 == fb_field.ChildrenLength()
79
+ child_field = fb_field_to_arrow(fb_field.Children(0))
80
+ return pyarrow.list_(child_field)
81
+ elif fb_type == fb_type_enum.Struct_:
82
+ struct_fields = []
83
+ for child_index in range(fb_field.ChildrenLength()):
84
+ child = fb_field_to_arrow(fb_field.Children(child_index))
85
+ struct_fields.append(child)
86
+ return pyarrow.struct(struct_fields)
87
+ raise RuntimeError("unknown flatbuffer type {}".format(fb_type))
88
+
89
+
90
+ def fb_field_to_arrow(fb_field):
91
+ arrow_type = fb_field_type_to_arrow(fb_field)
92
+ return pyarrow.field(fb_field.Name(), arrow_type)
93
+
94
+
95
+ def make_query_flight_descriptor(sql):
96
+ command_query = FlightSql_pb2.CommandStatementQuery(query=sql)
97
+ any_cmd = any_pb2.Any()
98
+ any_cmd.Pack(command_query)
99
+ desc = Flight_pb2.FlightDescriptor()
100
+ desc.type = Flight_pb2.FlightDescriptor.DescriptorType.CMD
101
+ desc.cmd = any_cmd.SerializeToString()
102
+ return desc
103
+
104
+
105
+ def read_schema_from_flight_data(flight_data):
106
+ msg = arrow_flatbuffers.Message.GetRootAs(flight_data.data_header, 0)
107
+ assert msg.Version() == arrow_flatbuffers.MetadataVersion.V5
108
+ header = msg.Header()
109
+ assert msg.HeaderType() == arrow_flatbuffers.MessageHeader.Schema
110
+
111
+ schema = arrow_flatbuffers.Schema()
112
+ schema.Init(header.Bytes, header.Pos)
113
+ nb_fields = schema.FieldsLength()
114
+ arrow_fields = []
115
+ for x in range(nb_fields):
116
+ field = schema.Fields(x)
117
+ arrow_f = fb_field_to_arrow(field)
118
+ arrow_fields.append(arrow_f)
119
+ arrow_schema = pyarrow.schema(arrow_fields)
120
+ return arrow_schema
121
+
122
+
123
+ def read_record_batch_from_flight_data(arrow_schema, flight_data):
124
+ msg = arrow_flatbuffers.Message.GetRootAs(flight_data.data_header, 0)
125
+ assert msg.HeaderType() == arrow_flatbuffers.MessageHeader.RecordBatch
126
+ header = msg.Header()
127
+ fb_record_batch = arrow_flatbuffers.RecordBatch()
128
+ fb_record_batch.Init(header.Bytes, header.Pos)
129
+ nodes = []
130
+ for node_index in range(fb_record_batch.NodesLength()):
131
+ node = fb_record_batch.Nodes(node_index)
132
+ nodes.append(node)
133
+
134
+ buffers = []
135
+ for buffer_index in range(fb_record_batch.BuffersLength()):
136
+ buffer = fb_record_batch.Buffers(buffer_index)
137
+ buffers.append(buffer)
138
+
139
+ body = pyarrow.py_buffer(flight_data.data_body)
140
+ arrow_buffers = []
141
+ for b in buffers:
142
+ s = body.slice(b.Offset(), b.Length())
143
+ arrow_buffers.append(s)
144
+ rb = arrow_ipc_reader.read_record_batch(arrow_schema, nodes, arrow_buffers)
145
+ return rb
146
+
147
+
148
+ def channel_creds_from_token(token):
149
+ call_credentials = grpc.access_token_call_credentials(token)
150
+ channel_cred = grpc.composite_channel_credentials(
151
+ grpc.ssl_channel_credentials(), call_credentials
152
+ )
153
+ return channel_cred
154
+
155
+
156
+ class FlightSQLAuthMetadataPlugin(grpc.AuthMetadataPlugin):
157
+ def __init__(self, headers):
158
+ # we transform the keys into lowercase to avoid illegal grpc metadata (like 'Authorization', for example)
159
+ self.__headers = [(k.lower(), v) for (k, v) in headers.items()]
160
+
161
+ def __call__(self, context, callback):
162
+ callback(self.__headers, None)
163
+
164
+
165
+ def channel_creds_from_headers(headers):
166
+ auth_plugin = FlightSQLAuthMetadataPlugin(headers)
167
+ call_credentials = grpc.metadata_call_credentials(auth_plugin)
168
+ channel_cred = grpc.composite_channel_credentials(
169
+ grpc.ssl_channel_credentials(), call_credentials
170
+ )
171
+ return channel_cred
172
+
173
+
174
+ class FlightSQLClient:
175
+ def __init__(self, host_port, channel_creds):
176
+ self.__host_port = host_port
177
+ self.__channel_creds = channel_creds
178
+
179
+ def make_channel(self):
180
+ if self.__channel_creds is None:
181
+ return grpc.insecure_channel(self.__host_port)
182
+ else:
183
+ return grpc.secure_channel(self.__host_port, self.__channel_creds)
184
+
185
+ def query(self, sql, begin=None, end=None):
186
+ metadata = []
187
+ if begin is not None:
188
+ metadata.append(("query_range_begin", time.format_datetime(begin)))
189
+ if end is not None:
190
+ metadata.append(("query_range_end", time.format_datetime(end)))
191
+
192
+ channel = self.make_channel()
193
+ stub = Flight_pb2_grpc.FlightServiceStub(channel)
194
+ desc = make_query_flight_descriptor(sql)
195
+ info = stub.GetFlightInfo(desc)
196
+ grpc_rdv = stub.DoGet(info.endpoint[0].ticket, metadata=metadata)
197
+ flight_data_list = list(grpc_rdv)
198
+ if len(flight_data_list) < 1:
199
+ raise RuntimeError("too few flightdata messages {}", len(flight_data_list))
200
+ schema_message = flight_data_list[0]
201
+ data_messages = flight_data_list[1:]
202
+ schema = read_schema_from_flight_data(schema_message)
203
+ record_batches = []
204
+ for msg in data_messages:
205
+ record_batches.append(read_record_batch_from_flight_data(schema, msg))
206
+ table = pyarrow.Table.from_batches(record_batches, schema)
207
+ return table.to_pandas()
208
+
209
+ def query_stream(self, sql, begin=None, end=None):
210
+ metadata = []
211
+ if begin is not None:
212
+ metadata.append(("query_range_begin", time.format_datetime(begin)))
213
+ if end is not None:
214
+ metadata.append(("query_range_end", time.format_datetime(end)))
215
+
216
+ channel = self.make_channel()
217
+ stub = Flight_pb2_grpc.FlightServiceStub(channel)
218
+ desc = make_query_flight_descriptor(sql)
219
+ info = stub.GetFlightInfo(desc)
220
+ grpc_rdv = stub.DoGet(info.endpoint[0].ticket, metadata=metadata)
221
+ schema_message = grpc_rdv.next()
222
+ schema = read_schema_from_flight_data(schema_message)
223
+ for msg in grpc_rdv:
224
+ yield read_record_batch_from_flight_data(schema, msg)
225
+
226
+ def retire_partitions(self, view_set_name, view_instance_id, begin, end):
227
+ sql = """
228
+ SELECT time, msg
229
+ FROM retire_partitions('{view_set_name}', '{view_instance_id}', '{begin}', '{end}')
230
+ """.format(
231
+ view_set_name=view_set_name,
232
+ view_instance_id=view_instance_id,
233
+ begin=begin.isoformat(),
234
+ end=end.isoformat(),
235
+ )
236
+ for rb in self.query_stream(sql):
237
+ for index, row in rb.to_pandas().iterrows():
238
+ print(row["time"], row["msg"])
239
+
240
+ def materialize_partitions(
241
+ self, view_set_name, view_instance_id, begin, end, partition_delta_seconds
242
+ ):
243
+ sql = """
244
+ SELECT time, msg
245
+ FROM materialize_partitions('{view_set_name}', '{view_instance_id}', '{begin}', '{end}', {partition_delta_seconds})
246
+ """.format(
247
+ view_set_name=view_set_name,
248
+ view_instance_id=view_instance_id,
249
+ begin=begin.isoformat(),
250
+ end=end.isoformat(),
251
+ partition_delta_seconds=partition_delta_seconds,
252
+ )
253
+ for rb in self.query_stream(sql):
254
+ for index, row in rb.to_pandas().iterrows():
255
+ print(row["time"], row["msg"])
256
+
257
+ def find_process(self, process_id):
258
+ sql = """
259
+ SELECT *
260
+ FROM processes
261
+ WHERE process_id='{process_id}';
262
+ """.format(
263
+ process_id=process_id
264
+ )
265
+ return self.query(sql)
266
+
267
+ def query_streams(self, begin, end, limit, process_id=None, tag_filter=None):
268
+ conditions = []
269
+ if process_id is not None:
270
+ conditions.append("process_id='{process_id}'".format(process_id=process_id))
271
+ if tag_filter is not None:
272
+ conditions.append(
273
+ "(array_position(tags, '{tag}') is not NULL)".format(tag=tag_filter)
274
+ )
275
+ where = ""
276
+ if len(conditions) > 0:
277
+ where = "WHERE " + " AND ".join(conditions)
278
+ sql = """
279
+ SELECT *
280
+ FROM streams
281
+ {where}
282
+ LIMIT {limit};
283
+ """.format(
284
+ where=where, limit=limit
285
+ )
286
+ return self.query(sql, begin, end)
287
+
288
+ def query_blocks(self, begin, end, limit, stream_id):
289
+ sql = """
290
+ SELECT *
291
+ FROM blocks
292
+ WHERE stream_id='{stream_id}'
293
+ LIMIT {limit};
294
+ """.format(
295
+ limit=limit,stream_id=stream_id
296
+ )
297
+ return self.query(sql, begin, end)
298
+
299
+ def query_spans(self, begin, end, limit, stream_id):
300
+ sql = """
301
+ SELECT *
302
+ FROM view_instance('thread_spans', '{stream_id}')
303
+ LIMIT {limit};
304
+ """.format(
305
+ limit=limit,stream_id=stream_id
306
+ )
307
+ return self.query(sql, begin, end)
308
+
@@ -0,0 +1,17 @@
1
+ import datetime
2
+ import pandas
3
+
4
+ def format_datetime(value):
5
+ nonetype = type(None)
6
+ value_type = type(value)
7
+ if value_type == datetime.datetime:
8
+ if value.tzinfo is None:
9
+ raise RuntimeError("datetime needs a valid time zone")
10
+ return value.isoformat()
11
+ elif value_type == pandas.Timestamp:
12
+ return value.isoformat()
13
+ elif value_type == str:
14
+ return format_datetime(datetime.datetime.fromisoformat(value))
15
+ elif value_type == type(None):
16
+ return None
17
+ raise RuntimeError("value of unknown type in format_datetime")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: micromegas
3
- Version: 0.2.3
3
+ Version: 0.3.1
4
4
  Summary: Python analytics client for https://github.com/madesroches/micromegas/
5
5
  Author: Marc-Antoine Desroches
6
6
  Author-email: madesroches@gmail.com
@@ -11,9 +11,11 @@ Classifier: Programming Language :: Python :: 3.11
11
11
  Classifier: Programming Language :: Python :: 3.12
12
12
  Requires-Dist: cbor2 (>=5.6.3,<6.0.0)
13
13
  Requires-Dist: crc (>=7.0.0,<8.0.0)
14
+ Requires-Dist: flatbuffers (>=24.12.23,<25.0.0)
15
+ Requires-Dist: grpcio (>=1.69.0,<2.0.0)
14
16
  Requires-Dist: pandas (>=2.2.2,<3.0.0)
15
- Requires-Dist: protobuf (>=5.27.1,<6.0.0)
16
- Requires-Dist: pyarrow (>=16.0.0,<17.0.0)
17
+ Requires-Dist: protobuf (>=5.29.1,<6.0.0)
18
+ Requires-Dist: pyarrow (>=18.1.0,<19.0.0)
17
19
  Requires-Dist: requests (>=2.31.0,<3.0.0)
18
20
  Requires-Dist: tabulate (>=0.9.0,<0.10.0)
19
21
  Requires-Dist: tqdm (>=4.66.5,<5.0.0)
@@ -25,20 +27,22 @@ Python analytics client for https://github.com/madesroches/micromegas/
25
27
 
26
28
  ## Example usage
27
29
 
28
- Query the 2 most recent log entries from the analytics service
30
+ Query the 2 most recent log entries from the flightsql service
29
31
 
30
32
  ```python
31
33
  import datetime
32
34
  import pandas as pd
33
35
  import micromegas
36
+ import grpc
34
37
 
35
- BASE_URL = "http://localhost:8082/"
36
- client = micromegas.client.Client(BASE_URL)
38
+ host_port = "localhost:50051"
39
+ channel_cred = grpc.local_channel_credentials()
40
+ client = micromegas.flightsql.client.FlightSQLClient(host_port, channel_cred)
37
41
  sql = """
38
42
  SELECT time, process_id, level, target, msg
39
43
  FROM log_entries
40
44
  WHERE level <= 4
41
- AND exe LIKE '%analytics%'
45
+ AND exe LIKE '%flight%'
42
46
  ORDER BY time DESC
43
47
  LIMIT 2
44
48
  """
@@ -1,5 +1,14 @@
1
- micromegas/__init__.py,sha256=E_j3LFxMk9rSMJunwDCi_90NsRHm1fKwjj_6KGMYCjQ,246
1
+ micromegas/__init__.py,sha256=0O7EfbXxIVlykEWkGYGc9y2EhrPcVoi_k3VILKTZKd8,337
2
2
  micromegas/client.py,sha256=7d0aah179a5cfjmBRxcaq7jTPvmGg625D_ixzdvtgCw,3670
3
+ micromegas/flightsql/FlightSql_pb2.py,sha256=s3CVAQ4W77ALQ9QYFlPZnPqVsRHTD5Ma0fzabQv_8Mc,28207
4
+ micromegas/flightsql/FlightSql_pb2_grpc.py,sha256=dCQT-tKxsz3dQgD4Cr0-8F4H0zCwNkXLHzlrbWfMiBA,890
5
+ micromegas/flightsql/Flight_pb2.py,sha256=nPe1utv8n59IdNsiv5sJYY10-K04pDVOhKDgV5t-h1A,12513
6
+ micromegas/flightsql/Flight_pb2_grpc.py,sha256=mj9Nlla3wpf5xFAGhMZpLDX8vLBClWfZ4w03AbBvrgs,23483
7
+ micromegas/flightsql/__init__.py,sha256=SRq2X59uKG-iuwFnSyT7wVfiTMSSryAAEWnQVjGWOM8,249
8
+ micromegas/flightsql/arrow_flatbuffers.py,sha256=egpmS59sNFwWmtG2wMeE92MMIyGyZbsm9o24mUnY1MQ,100142
9
+ micromegas/flightsql/arrow_ipc_reader.py,sha256=3rxyEgqo5100e0TT9ZKZxNe7lX6Lk0mS6yRoiIJtH6Q,3163
10
+ micromegas/flightsql/client.py,sha256=IFSUlf67m_2Pk3aleiL34ZeU7iptnA_-9WZ9yqOxQ9M,11328
11
+ micromegas/flightsql/time.py,sha256=EH3SUEpFvY0lNMj9mOcvfUJuSgrQ3YX4aJnwteK2qhk,582
3
12
  micromegas/perfetto.py,sha256=yuIe5iKvca61aWMBQNziSGM-DHcOEsiobtKx2SsNQ3E,7829
4
13
  micromegas/request.py,sha256=NV0urom5P3_P2q94gX51hxW_Fnrp_DDRorsP3mUb5NM,941
5
14
  micromegas/thirdparty/perfetto/protos/perfetto/common/android_energy_consumer_descriptor_pb2.py,sha256=l8QNXqnB-mJIkuFr2s1YoLQXHm3G-ZcOGp_OW_hQ0TE,1887
@@ -208,6 +217,6 @@ micromegas/thirdparty/perfetto/protos/perfetto/trace/translation/translation_tab
208
217
  micromegas/thirdparty/perfetto/protos/perfetto/trace/trigger_pb2.py,sha256=We7Yi8o3cEcrSNxY1zLUUO6tEWnD36C2f3O_s8_qv0I,1435
209
218
  micromegas/thirdparty/perfetto/protos/perfetto/trace/ui_state_pb2.py,sha256=Af-SXwhroNhRXMrtw6e2eU1liCImMRxSdmkt_AuSHf8,1752
210
219
  micromegas/time.py,sha256=eD9fWF2UHxaf-92yd1X2SEgUcpKypqPsvjBosLdpnQA,1026
211
- micromegas-0.2.3.dist-info/METADATA,sha256=aHUqasX8WUEKzqH0O9R0DJmPenBvdMM_SB_hYurAsFE,30467
212
- micromegas-0.2.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
213
- micromegas-0.2.3.dist-info/RECORD,,
220
+ micromegas-0.3.1.dist-info/METADATA,sha256=O_YfkAcg7sEMv3FeclRmF1t91QQja7C0zz52Q0ilNAs,30640
221
+ micromegas-0.3.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
222
+ micromegas-0.3.1.dist-info/RECORD,,