micromegas 0.10.0__py3-none-any.whl → 0.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- micromegas/flightsql/FlightSql_pb2.py +148 -140
- micromegas/flightsql/__init__.py +1 -0
- micromegas/flightsql/client.py +512 -7
- micromegas/flightsql/time.py +1 -0
- micromegas/perfetto.py +100 -187
- micromegas/time.py +97 -1
- micromegas-0.12.0.dist-info/METADATA +120 -0
- micromegas-0.12.0.dist-info/RECORD +10 -0
- micromegas/thirdparty/perfetto/protos/perfetto/common/android_energy_consumer_descriptor_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/common/android_log_constants_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/common/builtin_clock_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/common/descriptor_pb2.py +0 -62
- micromegas/thirdparty/perfetto/protos/perfetto/common/gpu_counter_descriptor_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/common/perf_events_pb2.py +0 -46
- micromegas/thirdparty/perfetto/protos/perfetto/common/protolog_common_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/common/sys_stats_counters_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/common/trace_stats_pb2.py +0 -48
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/android_game_intervention_list_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/android_input_event_config_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/android_log_config_pb2.py +0 -37
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/android_polled_state_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/android_sdk_sysprop_guard_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/android_system_property_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/network_trace_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/packages_list_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/pixel_modem_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/protolog_config_pb2.py +0 -41
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/surfaceflinger_layers_config_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/config/android/surfaceflinger_transactions_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/chrome/chrome_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/chrome/v8_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/data_source_config_pb2.py +0 -120
- micromegas/thirdparty/perfetto/protos/perfetto/config/etw/etw_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/ftrace/ftrace_config_pb2.py +0 -48
- micromegas/thirdparty/perfetto/protos/perfetto/config/gpu/gpu_counter_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/gpu/vulkan_memory_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/inode_file/inode_file_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/interceptor_config_pb2.py +0 -37
- micromegas/thirdparty/perfetto/protos/perfetto/config/interceptors/console_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/power/android_power_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/process_stats/process_stats_config_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/config/profiling/heapprofd_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/profiling/java_hprof_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/profiling/perf_event_config_pb2.py +0 -43
- micromegas/thirdparty/perfetto/protos/perfetto/config/statsd/atom_ids_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/statsd/statsd_tracing_config_pb2.py +0 -39
- micromegas/thirdparty/perfetto/protos/perfetto/config/sys_stats/sys_stats_config_pb2.py +0 -39
- micromegas/thirdparty/perfetto/protos/perfetto/config/system_info/system_info_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/config/test_config_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/config/trace_config_pb2.py +0 -90
- micromegas/thirdparty/perfetto/protos/perfetto/config/track_event/track_event_config_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/android_game_intervention_list_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/android_log_pb2.py +0 -43
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/android_system_property_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/camera_event_pb2.py +0 -48
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/frame_timeline_event_pb2.py +0 -54
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/gpu_mem_event_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/graphics/rect_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/graphics_frame_event_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/initial_display_state_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/network_trace_pb2.py +0 -46
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/packages_list_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/pixel_modem_events_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/protolog_pb2.py +0 -43
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/shell_transition_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/surfaceflinger_common_pb2.py +0 -59
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/surfaceflinger_layers_pb2.py +0 -72
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/surfaceflinger_transactions_pb2.py +0 -76
- micromegas/thirdparty/perfetto/protos/perfetto/trace/android/winscope_extensions_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/chrome/chrome_benchmark_metadata_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/chrome/chrome_metadata_pb2.py +0 -50
- micromegas/thirdparty/perfetto/protos/perfetto/trace/chrome/chrome_trace_event_pb2.py +0 -56
- micromegas/thirdparty/perfetto/protos/perfetto/trace/chrome/chrome_trigger_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/chrome/v8_pb2.py +0 -70
- micromegas/thirdparty/perfetto/protos/perfetto/trace/clock_snapshot_pb2.py +0 -41
- micromegas/thirdparty/perfetto/protos/perfetto/trace/etw/etw_event_bundle_pb2.py +0 -37
- micromegas/thirdparty/perfetto/protos/perfetto/trace/etw/etw_event_pb2.py +0 -37
- micromegas/thirdparty/perfetto/protos/perfetto/trace/etw/etw_pb2.py +0 -48
- micromegas/thirdparty/perfetto/protos/perfetto/trace/extension_descriptor_pb2.py +0 -37
- micromegas/thirdparty/perfetto/protos/perfetto/trace/filesystem/inode_file_map_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/android_fs_pb2.py +0 -46
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/binder_pb2.py +0 -52
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/block_pb2.py +0 -72
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/cgroup_pb2.py +0 -52
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/clk_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/cma_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/compaction_pb2.py +0 -62
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/cpuhp_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/cros_ec_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/dcvsh_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/dma_fence_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/dmabuf_heap_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/dpu_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/drm_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/ext4_pb2.py +0 -224
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/f2fs_pb2.py +0 -106
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/fastrpc_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/fence_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/filemap_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/ftrace_event_bundle_pb2.py +0 -66
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/ftrace_event_pb2.py +0 -105
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/ftrace_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/ftrace_stats_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/g2d_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/generic_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/google_icc_trace_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/google_irm_trace_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/gpu_mem_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/gpu_scheduler_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/hyp_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/i2c_pb2.py +0 -50
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/ion_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/ipi_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/irq_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/kgsl_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/kmem_pb2.py +0 -122
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/kvm_pb2.py +0 -106
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/lowmemorykiller_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/lwis_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/mali_pb2.py +0 -98
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/mdss_pb2.py +0 -76
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/mm_event_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/net_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/oom_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/panel_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/perf_trace_counters_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/power_pb2.py +0 -60
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/printk_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/raw_syscalls_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/regulator_pb2.py +0 -48
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/rpm_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/samsung_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/sched_pb2.py +0 -64
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/scm_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/sde_pb2.py +0 -46
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/signal_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/skb_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/sock_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/sync_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/synthetic_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/systrace_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/task_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/tcp_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/thermal_exynos_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/thermal_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/trusty_pb2.py +0 -70
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/ufs_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/v4l2_pb2.py +0 -46
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/virtio_gpu_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/virtio_video_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/vmscan_pb2.py +0 -46
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ftrace/workqueue_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/gpu/gpu_counter_event_pb2.py +0 -39
- micromegas/thirdparty/perfetto/protos/perfetto/trace/gpu/gpu_log_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/gpu/gpu_render_stage_event_pb2.py +0 -58
- micromegas/thirdparty/perfetto/protos/perfetto/trace/gpu/vulkan_api_event_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/gpu/vulkan_memory_event_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/trace/interned_data/interned_data_pb2.py +0 -45
- micromegas/thirdparty/perfetto/protos/perfetto/trace/memory_graph_pb2.py +0 -48
- micromegas/thirdparty/perfetto/protos/perfetto/trace/perfetto/perfetto_metatrace_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/perfetto/tracing_service_event_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/power/android_energy_estimation_breakdown_pb2.py +0 -39
- micromegas/thirdparty/perfetto/protos/perfetto/trace/power/android_entity_state_residency_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/power/battery_counters_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/power/power_rails_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/profiling/deobfuscation_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/profiling/heap_graph_pb2.py +0 -57
- micromegas/thirdparty/perfetto/protos/perfetto/trace/profiling/profile_common_pb2.py +0 -50
- micromegas/thirdparty/perfetto/protos/perfetto/trace/profiling/profile_packet_pb2.py +0 -72
- micromegas/thirdparty/perfetto/protos/perfetto/trace/profiling/smaps_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ps/process_stats_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ps/process_tree_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/remote_clock_sync_pb2.py +0 -39
- micromegas/thirdparty/perfetto/protos/perfetto/trace/statsd/statsd_atom_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/sys_stats/sys_stats_pb2.py +0 -55
- micromegas/thirdparty/perfetto/protos/perfetto/trace/system_info/cpu_info_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/system_info_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/test_event_pb2.py +0 -39
- micromegas/thirdparty/perfetto/protos/perfetto/trace/trace_packet_defaults_pb2.py +0 -39
- micromegas/thirdparty/perfetto/protos/perfetto/trace/trace_packet_pb2.py +0 -107
- micromegas/thirdparty/perfetto/protos/perfetto/trace/trace_pb2.py +0 -37
- micromegas/thirdparty/perfetto/protos/perfetto/trace/trace_uuid_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_active_processes_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_application_state_info_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state_pb2.py +0 -75
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_content_settings_event_info_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_frame_reporter_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_histogram_sample_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_keyed_service_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_latency_info_pb2.py +0 -42
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_legacy_ipc_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_message_pump_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_mojo_event_info_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_renderer_scheduler_state_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_thread_descriptor_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_user_event_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/chrome_window_handle_event_info_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/counter_descriptor_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/debug_annotation_pb2.py +0 -44
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/log_message_pb2.py +0 -40
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/pixel_modem_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/process_descriptor_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/range_of_interest_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/screenshot_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/source_location_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/task_execution_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/thread_descriptor_pb2.py +0 -38
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/track_descriptor_pb2.py +0 -41
- micromegas/thirdparty/perfetto/protos/perfetto/trace/track_event/track_event_pb2.py +0 -74
- micromegas/thirdparty/perfetto/protos/perfetto/trace/translation/translation_table_pb2.py +0 -70
- micromegas/thirdparty/perfetto/protos/perfetto/trace/trigger_pb2.py +0 -36
- micromegas/thirdparty/perfetto/protos/perfetto/trace/ui_state_pb2.py +0 -38
- micromegas-0.10.0.dist-info/METADATA +0 -251
- micromegas-0.10.0.dist-info/RECORD +0 -215
- {micromegas-0.10.0.dist-info → micromegas-0.12.0.dist-info}/WHEEL +0 -0
micromegas/flightsql/client.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1
|
-
import
|
2
|
-
import
|
1
|
+
from . import FlightSql_pb2
|
2
|
+
from . import time
|
3
|
+
from google.protobuf import any_pb2
|
3
4
|
from pyarrow import flight
|
4
5
|
from typing import Any
|
6
|
+
import certifi
|
7
|
+
import pyarrow
|
5
8
|
import sys
|
6
|
-
from google.protobuf import any_pb2
|
7
|
-
from . import FlightSql_pb2
|
8
|
-
from . import time
|
9
9
|
|
10
10
|
|
11
11
|
class MicromegasMiddleware(flight.ClientMiddleware):
|
@@ -30,7 +30,8 @@ class MicromegasMiddlewareFactory(flight.ClientMiddlewareFactory):
|
|
30
30
|
def start_call(self, info):
|
31
31
|
return MicromegasMiddleware(self.headers)
|
32
32
|
|
33
|
-
|
33
|
+
|
34
|
+
def make_call_headers(begin, end):
|
34
35
|
call_headers = []
|
35
36
|
if begin is not None:
|
36
37
|
call_headers.append(
|
@@ -48,6 +49,15 @@ def make_call_headers( begin, end ):
|
|
48
49
|
)
|
49
50
|
return call_headers
|
50
51
|
|
52
|
+
|
53
|
+
def make_prepared_statement_action(sql):
|
54
|
+
request = FlightSql_pb2.ActionCreatePreparedStatementRequest(query=sql)
|
55
|
+
any = any_pb2.Any()
|
56
|
+
any.Pack(request)
|
57
|
+
action_type = "CreatePreparedStatement"
|
58
|
+
return flight.Action(action_type, any.SerializeToString())
|
59
|
+
|
60
|
+
|
51
61
|
def make_query_ticket(sql):
|
52
62
|
ticket_statement_query = FlightSql_pb2.TicketStatementQuery(
|
53
63
|
statement_handle=sql.encode("utf8")
|
@@ -57,18 +67,88 @@ def make_query_ticket(sql):
|
|
57
67
|
ticket = flight.Ticket(any.SerializeToString())
|
58
68
|
return ticket
|
59
69
|
|
70
|
+
|
60
71
|
def make_arrow_flight_descriptor(command: Any) -> flight.FlightDescriptor:
|
61
72
|
any = any_pb2.Any()
|
62
73
|
any.Pack(command)
|
63
74
|
return flight.FlightDescriptor.for_command(any.SerializeToString())
|
64
75
|
|
76
|
+
|
65
77
|
def make_ingest_flight_desc(table_name):
|
66
|
-
ingest_statement = FlightSql_pb2.CommandStatementIngest(
|
78
|
+
ingest_statement = FlightSql_pb2.CommandStatementIngest(
|
79
|
+
table=table_name, temporary=False
|
80
|
+
)
|
67
81
|
desc = make_arrow_flight_descriptor(ingest_statement)
|
68
82
|
return desc
|
69
83
|
|
84
|
+
|
85
|
+
class PreparedStatement:
|
86
|
+
"""Represents a prepared SQL statement with its result schema.
|
87
|
+
|
88
|
+
Prepared statements in Micromegas are primarily used for schema discovery -
|
89
|
+
determining the structure of query results without executing the query.
|
90
|
+
This is useful for query validation and building dynamic interfaces.
|
91
|
+
|
92
|
+
Attributes:
|
93
|
+
query (str): The SQL query string for this prepared statement.
|
94
|
+
dataset_schema (pyarrow.Schema): The schema (column names and types) of the result set.
|
95
|
+
|
96
|
+
Example:
|
97
|
+
>>> stmt = client.prepare_statement("SELECT time, level, msg FROM log_entries")
|
98
|
+
>>> print(stmt.query)
|
99
|
+
>>> # Output: "SELECT time, level, msg FROM log_entries"
|
100
|
+
>>>
|
101
|
+
>>> # Inspect the schema without running the query
|
102
|
+
>>> for field in stmt.dataset_schema:
|
103
|
+
... print(f"{field.name}: {field.type}")
|
104
|
+
>>> # Output: time: timestamp[ns]
|
105
|
+
>>> # level: int32
|
106
|
+
>>> # msg: string
|
107
|
+
"""
|
108
|
+
|
109
|
+
def __init__(self, prepared_statement_result):
|
110
|
+
"""Initialize a PreparedStatement from server response.
|
111
|
+
|
112
|
+
Args:
|
113
|
+
prepared_statement_result: The server's response containing the prepared
|
114
|
+
statement handle and dataset schema.
|
115
|
+
"""
|
116
|
+
self.query = prepared_statement_result.prepared_statement_handle.decode("utf8")
|
117
|
+
reader = pyarrow.ipc.open_stream(prepared_statement_result.dataset_schema)
|
118
|
+
self.dataset_schema = reader.schema
|
119
|
+
reader.close()
|
120
|
+
|
121
|
+
|
70
122
|
class FlightSQLClient:
|
123
|
+
"""Client for querying Micromegas observability data using Apache Arrow FlightSQL.
|
124
|
+
|
125
|
+
This client provides high-performance access to telemetry data stored in Micromegas,
|
126
|
+
supporting both simple queries and advanced features like prepared statements,
|
127
|
+
bulk ingestion, and partition management.
|
128
|
+
|
129
|
+
The client uses Apache Arrow's columnar format for efficient data transfer and
|
130
|
+
supports streaming for large result sets.
|
131
|
+
"""
|
132
|
+
|
71
133
|
def __init__(self, uri, headers=None):
|
134
|
+
"""Initialize a FlightSQL client connection.
|
135
|
+
|
136
|
+
Args:
|
137
|
+
uri (str): The FlightSQL server URI (e.g., "grpc://localhost:50051").
|
138
|
+
Use "grpc://" for unencrypted connections or "grpc+tls://" for TLS.
|
139
|
+
headers (dict, optional): Custom headers for authentication or metadata.
|
140
|
+
Example: {"authorization": "Bearer token123"}
|
141
|
+
|
142
|
+
Example:
|
143
|
+
>>> # Connect to local server
|
144
|
+
>>> client = FlightSQLClient("grpc://localhost:50051")
|
145
|
+
>>>
|
146
|
+
>>> # Connect with authentication
|
147
|
+
>>> client = FlightSQLClient(
|
148
|
+
... "grpc+tls://remote-server:50051",
|
149
|
+
... headers={"authorization": "Bearer mytoken"}
|
150
|
+
... )
|
151
|
+
"""
|
72
152
|
fh = open(certifi.where(), "r")
|
73
153
|
cert = fh.read()
|
74
154
|
fh.close()
|
@@ -78,6 +158,44 @@ class FlightSQLClient:
|
|
78
158
|
)
|
79
159
|
|
80
160
|
def query(self, sql, begin=None, end=None):
|
161
|
+
"""Execute a SQL query and return results as a pandas DataFrame.
|
162
|
+
|
163
|
+
This method executes the provided SQL query and returns all results in memory
|
164
|
+
as a pandas DataFrame. For large result sets, consider using query_stream() instead.
|
165
|
+
|
166
|
+
Args:
|
167
|
+
sql (str): The SQL query to execute. Can use any supported SQL syntax including
|
168
|
+
JOINs, aggregations, window functions, etc.
|
169
|
+
begin (datetime or str, optional): Start time for partition pruning. Significantly
|
170
|
+
improves performance by eliminating irrelevant partitions before query execution.
|
171
|
+
Can be a timezone-aware datetime or RFC3339 string (e.g., "2024-01-01T00:00:00Z").
|
172
|
+
end (datetime or str, optional): End time for partition pruning. Should be used
|
173
|
+
together with begin for optimal performance.
|
174
|
+
|
175
|
+
Returns:
|
176
|
+
pandas.DataFrame: Query results with appropriate column types.
|
177
|
+
|
178
|
+
Raises:
|
179
|
+
Exception: If the query fails due to syntax errors, missing tables, or server issues.
|
180
|
+
|
181
|
+
Example:
|
182
|
+
>>> import datetime
|
183
|
+
>>>
|
184
|
+
>>> # Query with time range for optimal performance
|
185
|
+
>>> end = datetime.datetime.now(datetime.timezone.utc)
|
186
|
+
>>> begin = end - datetime.timedelta(hours=1)
|
187
|
+
>>> df = client.query(
|
188
|
+
... "SELECT time, level, msg FROM log_entries WHERE level <= 3",
|
189
|
+
... begin, end
|
190
|
+
... )
|
191
|
+
>>>
|
192
|
+
>>> # Query without time range (less efficient for time-series data)
|
193
|
+
>>> processes = client.query("SELECT * FROM processes LIMIT 10")
|
194
|
+
|
195
|
+
Performance Note:
|
196
|
+
Always provide begin/end parameters when querying time-series data to enable
|
197
|
+
partition pruning, which can improve query performance by 10-100x.
|
198
|
+
"""
|
81
199
|
call_headers = make_call_headers(begin, end)
|
82
200
|
options = flight.FlightCallOptions(headers=call_headers)
|
83
201
|
ticket = make_query_ticket(sql)
|
@@ -89,6 +207,39 @@ class FlightSQLClient:
|
|
89
207
|
return table.to_pandas()
|
90
208
|
|
91
209
|
def query_stream(self, sql, begin=None, end=None):
|
210
|
+
"""Execute a SQL query and stream results as Arrow RecordBatch objects.
|
211
|
+
|
212
|
+
This method is ideal for large result sets as it processes data in chunks,
|
213
|
+
avoiding memory issues and allowing processing to start before the query completes.
|
214
|
+
|
215
|
+
Args:
|
216
|
+
sql (str): The SQL query to execute.
|
217
|
+
begin (datetime or str, optional): Start time for partition pruning.
|
218
|
+
Can be a timezone-aware datetime or RFC3339 string.
|
219
|
+
end (datetime or str, optional): End time for partition pruning.
|
220
|
+
|
221
|
+
Yields:
|
222
|
+
pyarrow.RecordBatch: Chunks of query results. Each batch contains a subset
|
223
|
+
of rows with all columns from the query.
|
224
|
+
|
225
|
+
Example:
|
226
|
+
>>> # Stream and process large dataset
|
227
|
+
>>> total_errors = 0
|
228
|
+
>>> for batch in client.query_stream(
|
229
|
+
... "SELECT * FROM log_entries WHERE level <= 2",
|
230
|
+
... begin, end
|
231
|
+
... ):
|
232
|
+
... df_chunk = batch.to_pandas()
|
233
|
+
... total_errors += len(df_chunk)
|
234
|
+
... # Process chunk and release memory
|
235
|
+
... print(f"Total errors: {total_errors}")
|
236
|
+
|
237
|
+
Performance Note:
|
238
|
+
Streaming is recommended when:
|
239
|
+
- Result set is larger than 100MB
|
240
|
+
- You want to start processing before the query completes
|
241
|
+
- Memory usage needs to be controlled
|
242
|
+
"""
|
92
243
|
ticket = make_query_ticket(sql)
|
93
244
|
call_headers = make_call_headers(begin, end)
|
94
245
|
options = flight.FlightCallOptions(headers=call_headers)
|
@@ -97,7 +248,138 @@ class FlightSQLClient:
|
|
97
248
|
for chunk in reader:
|
98
249
|
yield chunk.data
|
99
250
|
|
251
|
+
def prepare_statement(self, sql):
|
252
|
+
"""Create a prepared statement to retrieve query schema without executing it.
|
253
|
+
|
254
|
+
Prepared statements in Micromegas are primarily used to determine the schema
|
255
|
+
(column names and types) of a query result without actually executing the query
|
256
|
+
and retrieving data. This is useful for validating queries or building dynamic
|
257
|
+
interfaces that need to know the result structure in advance.
|
258
|
+
|
259
|
+
Args:
|
260
|
+
sql (str): The SQL query to prepare and analyze.
|
261
|
+
|
262
|
+
Returns:
|
263
|
+
PreparedStatement: An object containing the query and its result schema.
|
264
|
+
|
265
|
+
Example:
|
266
|
+
>>> # Get schema information without executing the query
|
267
|
+
>>> stmt = client.prepare_statement(
|
268
|
+
... "SELECT time, level, msg FROM log_entries WHERE level <= 3"
|
269
|
+
... )
|
270
|
+
>>>
|
271
|
+
>>> # Access the schema
|
272
|
+
>>> print(stmt.dataset_schema)
|
273
|
+
>>> # Output: time: timestamp[ns]
|
274
|
+
>>> # level: int32
|
275
|
+
>>> # msg: string
|
276
|
+
>>>
|
277
|
+
>>> # The query text is also available
|
278
|
+
>>> print(stmt.query)
|
279
|
+
>>> # Output: "SELECT time, level, msg FROM log_entries WHERE level <= 3"
|
280
|
+
|
281
|
+
Note:
|
282
|
+
The primary purpose is schema discovery. The prepared statement can be
|
283
|
+
executed via prepared_statement_stream(), but this offers no performance
|
284
|
+
benefit over regular query_stream() in the current implementation.
|
285
|
+
"""
|
286
|
+
action = make_prepared_statement_action(sql)
|
287
|
+
results = self.__flight_client.do_action(action)
|
288
|
+
for result in list(results):
|
289
|
+
any = any_pb2.Any()
|
290
|
+
any.ParseFromString(result.body.to_pybytes())
|
291
|
+
res = FlightSql_pb2.ActionCreatePreparedStatementResult()
|
292
|
+
any.Unpack(res)
|
293
|
+
return PreparedStatement(res)
|
294
|
+
|
295
|
+
def prepared_statement_stream(self, statement):
|
296
|
+
"""Execute a prepared statement and stream results.
|
297
|
+
|
298
|
+
Executes a previously prepared statement and returns results as a stream of
|
299
|
+
Arrow RecordBatch objects. This is functionally equivalent to calling
|
300
|
+
query_stream() with the statement's SQL query.
|
301
|
+
|
302
|
+
Args:
|
303
|
+
statement (PreparedStatement): The prepared statement to execute,
|
304
|
+
obtained from prepare_statement().
|
305
|
+
|
306
|
+
Yields:
|
307
|
+
pyarrow.RecordBatch: Chunks of query results.
|
308
|
+
|
309
|
+
Example:
|
310
|
+
>>> # Prepare statement (mainly for schema discovery)
|
311
|
+
>>> stmt = client.prepare_statement("SELECT time, level, msg FROM log_entries")
|
312
|
+
>>>
|
313
|
+
>>> # Check schema before execution
|
314
|
+
>>> print(f"Query will return {len(stmt.dataset_schema)} columns")
|
315
|
+
>>>
|
316
|
+
>>> # Execute the prepared statement
|
317
|
+
>>> for batch in client.prepared_statement_stream(stmt):
|
318
|
+
... df = batch.to_pandas()
|
319
|
+
... print(f"Received batch with {len(df)} rows")
|
320
|
+
|
321
|
+
Note:
|
322
|
+
This offers no performance advantage over query_stream(statement.query).
|
323
|
+
The main benefit of prepared statements is schema discovery via prepare_statement().
|
324
|
+
"""
|
325
|
+
# because we are not serializing the logical plan in the prepared statement, we can just execute the query normally
|
326
|
+
return self.query_stream(statement.query)
|
327
|
+
|
100
328
|
def bulk_ingest(self, table_name, df):
|
329
|
+
"""Bulk ingest a pandas DataFrame into a Micromegas metadata table.
|
330
|
+
|
331
|
+
This method efficiently loads metadata or replication data into Micromegas
|
332
|
+
tables using Arrow's columnar format. Primarily used for ingesting:
|
333
|
+
- processes: Process metadata and information
|
334
|
+
- streams: Event stream metadata
|
335
|
+
- blocks: Data block metadata
|
336
|
+
- payloads: Raw binary telemetry payloads (for replication)
|
337
|
+
|
338
|
+
Args:
|
339
|
+
table_name (str): The name of the target table. Supported tables:
|
340
|
+
'processes', 'streams', 'blocks', 'payloads'.
|
341
|
+
df (pandas.DataFrame): The DataFrame to ingest. Column names and types
|
342
|
+
must exactly match the target table schema.
|
343
|
+
|
344
|
+
Returns:
|
345
|
+
DoPutUpdateResult or None: Server response containing ingestion statistics
|
346
|
+
such as number of records ingested, or None if no response.
|
347
|
+
|
348
|
+
Raises:
|
349
|
+
Exception: If ingestion fails due to schema mismatch, unsupported table,
|
350
|
+
or invalid data.
|
351
|
+
|
352
|
+
Example:
|
353
|
+
>>> import pandas as pd
|
354
|
+
>>> from datetime import datetime, timezone
|
355
|
+
>>>
|
356
|
+
>>> # Example: Replicate process metadata
|
357
|
+
>>> processes_df = pd.DataFrame({
|
358
|
+
... 'process_id': ['550e8400-e29b-41d4-a716-446655440000'],
|
359
|
+
... 'exe': ['/usr/bin/myapp'],
|
360
|
+
... 'username': ['user'],
|
361
|
+
... 'realname': ['User Name'],
|
362
|
+
... 'computer': ['hostname'],
|
363
|
+
... 'distro': ['Ubuntu 22.04'],
|
364
|
+
... 'cpu_brand': ['Intel Core i7'],
|
365
|
+
... 'tsc_frequency': [2400000000],
|
366
|
+
... 'start_time': [datetime.now(timezone.utc)],
|
367
|
+
... 'start_ticks': [1234567890],
|
368
|
+
... 'insert_time': [datetime.now(timezone.utc)],
|
369
|
+
... 'parent_process_id': [''],
|
370
|
+
... 'properties': [[]]
|
371
|
+
... })
|
372
|
+
>>>
|
373
|
+
>>> # Bulk ingest process metadata
|
374
|
+
>>> result = client.bulk_ingest('processes', processes_df)
|
375
|
+
>>> if result:
|
376
|
+
... print(f"Ingested {result.record_count} process records")
|
377
|
+
|
378
|
+
Note:
|
379
|
+
This method is primarily intended for metadata replication and
|
380
|
+
administrative tasks. For normal telemetry data ingestion, use
|
381
|
+
the telemetry ingestion service HTTP API instead.
|
382
|
+
"""
|
101
383
|
desc = make_ingest_flight_desc(table_name)
|
102
384
|
table = pyarrow.Table.from_pandas(df)
|
103
385
|
writer, reader = self.__flight_client.do_put(desc, table.schema)
|
@@ -113,6 +395,41 @@ class FlightSQLClient:
|
|
113
395
|
return None
|
114
396
|
|
115
397
|
def retire_partitions(self, view_set_name, view_instance_id, begin, end):
|
398
|
+
"""Remove materialized view partitions for a specific time range.
|
399
|
+
|
400
|
+
This method removes previously materialized partitions, which is useful for:
|
401
|
+
- Freeing up storage space
|
402
|
+
- Removing outdated materialized data
|
403
|
+
- Preparing for re-materialization with different settings
|
404
|
+
|
405
|
+
Args:
|
406
|
+
view_set_name (str): The name of the view set containing the partitions.
|
407
|
+
view_instance_id (str): The specific view instance identifier (usually a process_id).
|
408
|
+
begin (datetime): Start time of partitions to retire (inclusive).
|
409
|
+
end (datetime): End time of partitions to retire (exclusive).
|
410
|
+
|
411
|
+
Returns:
|
412
|
+
None: Prints status messages as partitions are retired.
|
413
|
+
|
414
|
+
Example:
|
415
|
+
>>> from datetime import datetime, timedelta, timezone
|
416
|
+
>>>
|
417
|
+
>>> # Retire partitions for the last 7 days
|
418
|
+
>>> end = datetime.now(timezone.utc)
|
419
|
+
>>> begin = end - timedelta(days=7)
|
420
|
+
>>>
|
421
|
+
>>> client.retire_partitions(
|
422
|
+
... 'log_entries',
|
423
|
+
... 'process-123-456',
|
424
|
+
... begin,
|
425
|
+
... end
|
426
|
+
... )
|
427
|
+
# Output: Timestamps and status messages for each retired partition
|
428
|
+
|
429
|
+
Note:
|
430
|
+
This operation cannot be undone. Retired partitions must be re-materialized
|
431
|
+
if the data is needed again.
|
432
|
+
"""
|
116
433
|
sql = """
|
117
434
|
SELECT time, msg
|
118
435
|
FROM retire_partitions('{view_set_name}', '{view_instance_id}', '{begin}', '{end}')
|
@@ -129,6 +446,42 @@ class FlightSQLClient:
|
|
129
446
|
def materialize_partitions(
|
130
447
|
self, view_set_name, begin, end, partition_delta_seconds
|
131
448
|
):
|
449
|
+
"""Create materialized view partitions for faster query performance.
|
450
|
+
|
451
|
+
Materialized partitions pre-compute and store query results in optimized format,
|
452
|
+
significantly improving query performance for frequently accessed data.
|
453
|
+
|
454
|
+
Args:
|
455
|
+
view_set_name (str): The name of the view set to materialize.
|
456
|
+
begin (datetime): Start time for materialization (inclusive).
|
457
|
+
end (datetime): End time for materialization (exclusive).
|
458
|
+
partition_delta_seconds (int): Size of each partition in seconds.
|
459
|
+
Common values: 3600 (hourly), 86400 (daily).
|
460
|
+
|
461
|
+
Returns:
|
462
|
+
None: Prints status messages as partitions are created.
|
463
|
+
|
464
|
+
Example:
|
465
|
+
>>> from datetime import datetime, timedelta, timezone
|
466
|
+
>>>
|
467
|
+
>>> # Materialize hourly partitions for the last 24 hours
|
468
|
+
>>> end = datetime.now(timezone.utc)
|
469
|
+
>>> begin = end - timedelta(days=1)
|
470
|
+
>>>
|
471
|
+
>>> client.materialize_partitions(
|
472
|
+
... 'log_entries',
|
473
|
+
... begin,
|
474
|
+
... end,
|
475
|
+
... 3600 # 1-hour partitions
|
476
|
+
... )
|
477
|
+
# Output: Progress messages for each materialized partition
|
478
|
+
|
479
|
+
Performance Note:
|
480
|
+
Materialized partitions can improve query performance by 10-100x but
|
481
|
+
require additional storage. Choose partition size based on query patterns:
|
482
|
+
- Hourly (3600): For high-frequency queries on recent data
|
483
|
+
- Daily (86400): For historical analysis and reporting
|
484
|
+
"""
|
132
485
|
sql = """
|
133
486
|
SELECT time, msg
|
134
487
|
FROM materialize_partitions('{view_set_name}', '{begin}', '{end}', {partition_delta_seconds})
|
@@ -143,6 +496,38 @@ class FlightSQLClient:
|
|
143
496
|
print(row["time"], row["msg"])
|
144
497
|
|
145
498
|
def find_process(self, process_id):
|
499
|
+
"""Find and retrieve metadata for a specific process.
|
500
|
+
|
501
|
+
Queries the processes table to get detailed information about a process
|
502
|
+
including its executable path, command line arguments, start time, and metadata.
|
503
|
+
|
504
|
+
Args:
|
505
|
+
process_id (str): The unique identifier of the process to find.
|
506
|
+
This is typically a UUID string.
|
507
|
+
|
508
|
+
Returns:
|
509
|
+
pandas.DataFrame: A DataFrame containing process information with columns:
|
510
|
+
- process_id: Unique process identifier
|
511
|
+
- exe: Executable path
|
512
|
+
- username: User who started the process
|
513
|
+
- realname: Real name of the user
|
514
|
+
- computer: Hostname where process is running
|
515
|
+
- start_time: When the process started
|
516
|
+
- parent_process_id: Parent process identifier
|
517
|
+
- metadata: Additional process metadata as JSON
|
518
|
+
|
519
|
+
Example:
|
520
|
+
>>> # Find a specific process
|
521
|
+
>>> process_info = client.find_process('550e8400-e29b-41d4-a716-446655440000')
|
522
|
+
>>> if not process_info.empty:
|
523
|
+
... print(f"Process: {process_info['exe'].iloc[0]}")
|
524
|
+
... print(f"Started: {process_info['start_time'].iloc[0]}")
|
525
|
+
... else:
|
526
|
+
... print("Process not found")
|
527
|
+
|
528
|
+
Note:
|
529
|
+
Returns an empty DataFrame if the process is not found.
|
530
|
+
"""
|
146
531
|
sql = """
|
147
532
|
SELECT *
|
148
533
|
FROM processes
|
@@ -153,6 +538,50 @@ class FlightSQLClient:
|
|
153
538
|
return self.query(sql)
|
154
539
|
|
155
540
|
def query_streams(self, begin, end, limit, process_id=None, tag_filter=None):
|
541
|
+
"""Query event streams with optional filtering.
|
542
|
+
|
543
|
+
Retrieves information about event streams (collections of telemetry data)
|
544
|
+
within a time range, with optional filtering by process or tags.
|
545
|
+
|
546
|
+
Args:
|
547
|
+
begin (datetime): Start time for the query (inclusive).
|
548
|
+
end (datetime): End time for the query (exclusive).
|
549
|
+
limit (int): Maximum number of streams to return.
|
550
|
+
process_id (str, optional): Filter streams to a specific process.
|
551
|
+
tag_filter (str, optional): Filter streams that contain a specific tag.
|
552
|
+
Valid stream tags: 'log', 'metrics', 'cpu'.
|
553
|
+
|
554
|
+
Returns:
|
555
|
+
pandas.DataFrame: DataFrame containing stream information with columns:
|
556
|
+
- stream_id: Unique stream identifier
|
557
|
+
- process_id: Process that created the stream
|
558
|
+
- stream_type: Type of stream (e.g., 'LOG', 'METRIC', 'SPAN')
|
559
|
+
- tags: Array of tags associated with the stream
|
560
|
+
- properties: Additional stream properties
|
561
|
+
- time: Stream creation time
|
562
|
+
|
563
|
+
Example:
|
564
|
+
>>> from datetime import datetime, timedelta, timezone
|
565
|
+
>>>
|
566
|
+
>>> # Query all streams from the last hour
|
567
|
+
>>> end = datetime.now(timezone.utc)
|
568
|
+
>>> begin = end - timedelta(hours=1)
|
569
|
+
>>> streams = client.query_streams(begin, end, limit=100)
|
570
|
+
>>>
|
571
|
+
>>> # Query streams for a specific process
|
572
|
+
>>> streams = client.query_streams(
|
573
|
+
... begin, end,
|
574
|
+
... limit=50,
|
575
|
+
... process_id='550e8400-e29b-41d4-a716-446655440000'
|
576
|
+
... )
|
577
|
+
>>>
|
578
|
+
>>> # Query streams with a specific tag
|
579
|
+
>>> log_streams = client.query_streams(
|
580
|
+
... begin, end,
|
581
|
+
... limit=20,
|
582
|
+
... tag_filter='log'
|
583
|
+
... )
|
584
|
+
"""
|
156
585
|
conditions = []
|
157
586
|
if process_id is not None:
|
158
587
|
conditions.append("process_id='{process_id}'".format(process_id=process_id))
|
@@ -174,6 +603,42 @@ class FlightSQLClient:
|
|
174
603
|
return self.query(sql, begin, end)
|
175
604
|
|
176
605
|
def query_blocks(self, begin, end, limit, stream_id):
|
606
|
+
"""Query data blocks within a specific stream.
|
607
|
+
|
608
|
+
Retrieves detailed information about data blocks (chunks of events) within
|
609
|
+
a stream. Blocks are the fundamental storage unit for telemetry data.
|
610
|
+
|
611
|
+
Args:
|
612
|
+
begin (datetime): Start time for the query (inclusive).
|
613
|
+
end (datetime): End time for the query (exclusive).
|
614
|
+
limit (int): Maximum number of blocks to return.
|
615
|
+
stream_id (str): The unique identifier of the stream to query.
|
616
|
+
|
617
|
+
Returns:
|
618
|
+
pandas.DataFrame: DataFrame containing block information with columns:
|
619
|
+
- block_id: Unique block identifier
|
620
|
+
- stream_id: Parent stream identifier
|
621
|
+
- begin_time: Earliest event time in the block
|
622
|
+
- end_time: Latest event time in the block
|
623
|
+
- nb_events: Number of events in the block
|
624
|
+
- payload_size: Size of the block in bytes
|
625
|
+
- metadata: Additional block metadata
|
626
|
+
|
627
|
+
Example:
|
628
|
+
>>> # First, find a stream
|
629
|
+
>>> streams = client.query_streams(begin, end, limit=1)
|
630
|
+
>>> if not streams.empty:
|
631
|
+
... stream_id = streams['stream_id'].iloc[0]
|
632
|
+
...
|
633
|
+
... # Query blocks in that stream
|
634
|
+
... blocks = client.query_blocks(begin, end, 100, stream_id)
|
635
|
+
... print(f"Found {len(blocks)} blocks")
|
636
|
+
... print(f"Total events: {blocks['nb_events'].sum()}")
|
637
|
+
|
638
|
+
Note:
|
639
|
+
Blocks are typically used for low-level data inspection and debugging.
|
640
|
+
For normal queries, use higher-level methods like query() or query_stream().
|
641
|
+
"""
|
177
642
|
sql = """
|
178
643
|
SELECT *
|
179
644
|
FROM blocks
|
@@ -185,6 +650,46 @@ class FlightSQLClient:
|
|
185
650
|
return self.query(sql, begin, end)
|
186
651
|
|
187
652
|
def query_spans(self, begin, end, limit, stream_id):
|
653
|
+
"""Query thread spans (execution traces) for a specific stream.
|
654
|
+
|
655
|
+
Retrieves detailed span information showing the execution flow and timing
|
656
|
+
of operations within a stream. Spans are hierarchical and represent
|
657
|
+
function calls, operations, or logical units of work.
|
658
|
+
|
659
|
+
Args:
|
660
|
+
begin (datetime): Start time for the query (inclusive).
|
661
|
+
end (datetime): End time for the query (exclusive).
|
662
|
+
limit (int): Maximum number of spans to return.
|
663
|
+
stream_id (str): The stream identifier to query spans from.
|
664
|
+
|
665
|
+
Returns:
|
666
|
+
pandas.DataFrame: DataFrame containing span information with columns:
|
667
|
+
- span_id: Unique span identifier
|
668
|
+
- parent_span_id: Parent span for hierarchical traces
|
669
|
+
- name: Name of the operation or function
|
670
|
+
- begin_time: When the span started
|
671
|
+
- end_time: When the span completed
|
672
|
+
- duration: Duration in nanoseconds
|
673
|
+
- thread_id: Thread that executed the span
|
674
|
+
- properties: Additional span attributes
|
675
|
+
|
676
|
+
Example:
|
677
|
+
>>> # Query spans to analyze performance
|
678
|
+
>>> spans = client.query_spans(begin, end, 1000, stream_id)
|
679
|
+
>>>
|
680
|
+
>>> # Find slowest operations
|
681
|
+
>>> slow_spans = spans.nlargest(10, 'duration')
|
682
|
+
>>> for _, span in slow_spans.iterrows():
|
683
|
+
... print(f"{span['name']}: {span['duration']/1000000:.2f}ms")
|
684
|
+
>>>
|
685
|
+
>>> # Analyze span hierarchy
|
686
|
+
>>> root_spans = spans[spans['parent_span_id'].isna()]
|
687
|
+
>>> print(f"Found {len(root_spans)} root spans")
|
688
|
+
|
689
|
+
Note:
|
690
|
+
Spans are essential for performance analysis and distributed tracing.
|
691
|
+
Use with Perfetto trace generation for visualization.
|
692
|
+
"""
|
188
693
|
sql = """
|
189
694
|
SELECT *
|
190
695
|
FROM view_instance('thread_spans', '{stream_id}')
|