oscura 0.0.1__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (465) hide show
  1. oscura/__init__.py +813 -8
  2. oscura/__main__.py +392 -0
  3. oscura/analyzers/__init__.py +37 -0
  4. oscura/analyzers/digital/__init__.py +177 -0
  5. oscura/analyzers/digital/bus.py +691 -0
  6. oscura/analyzers/digital/clock.py +805 -0
  7. oscura/analyzers/digital/correlation.py +720 -0
  8. oscura/analyzers/digital/edges.py +632 -0
  9. oscura/analyzers/digital/extraction.py +413 -0
  10. oscura/analyzers/digital/quality.py +878 -0
  11. oscura/analyzers/digital/signal_quality.py +877 -0
  12. oscura/analyzers/digital/thresholds.py +708 -0
  13. oscura/analyzers/digital/timing.py +1104 -0
  14. oscura/analyzers/eye/__init__.py +46 -0
  15. oscura/analyzers/eye/diagram.py +434 -0
  16. oscura/analyzers/eye/metrics.py +555 -0
  17. oscura/analyzers/jitter/__init__.py +83 -0
  18. oscura/analyzers/jitter/ber.py +333 -0
  19. oscura/analyzers/jitter/decomposition.py +759 -0
  20. oscura/analyzers/jitter/measurements.py +413 -0
  21. oscura/analyzers/jitter/spectrum.py +220 -0
  22. oscura/analyzers/measurements.py +40 -0
  23. oscura/analyzers/packet/__init__.py +171 -0
  24. oscura/analyzers/packet/daq.py +1077 -0
  25. oscura/analyzers/packet/metrics.py +437 -0
  26. oscura/analyzers/packet/parser.py +327 -0
  27. oscura/analyzers/packet/payload.py +2156 -0
  28. oscura/analyzers/packet/payload_analysis.py +1312 -0
  29. oscura/analyzers/packet/payload_extraction.py +236 -0
  30. oscura/analyzers/packet/payload_patterns.py +670 -0
  31. oscura/analyzers/packet/stream.py +359 -0
  32. oscura/analyzers/patterns/__init__.py +266 -0
  33. oscura/analyzers/patterns/clustering.py +1036 -0
  34. oscura/analyzers/patterns/discovery.py +539 -0
  35. oscura/analyzers/patterns/learning.py +797 -0
  36. oscura/analyzers/patterns/matching.py +1091 -0
  37. oscura/analyzers/patterns/periodic.py +650 -0
  38. oscura/analyzers/patterns/sequences.py +767 -0
  39. oscura/analyzers/power/__init__.py +116 -0
  40. oscura/analyzers/power/ac_power.py +391 -0
  41. oscura/analyzers/power/basic.py +383 -0
  42. oscura/analyzers/power/conduction.py +314 -0
  43. oscura/analyzers/power/efficiency.py +297 -0
  44. oscura/analyzers/power/ripple.py +356 -0
  45. oscura/analyzers/power/soa.py +372 -0
  46. oscura/analyzers/power/switching.py +479 -0
  47. oscura/analyzers/protocol/__init__.py +150 -0
  48. oscura/analyzers/protocols/__init__.py +150 -0
  49. oscura/analyzers/protocols/base.py +500 -0
  50. oscura/analyzers/protocols/can.py +620 -0
  51. oscura/analyzers/protocols/can_fd.py +448 -0
  52. oscura/analyzers/protocols/flexray.py +405 -0
  53. oscura/analyzers/protocols/hdlc.py +399 -0
  54. oscura/analyzers/protocols/i2c.py +368 -0
  55. oscura/analyzers/protocols/i2s.py +296 -0
  56. oscura/analyzers/protocols/jtag.py +393 -0
  57. oscura/analyzers/protocols/lin.py +445 -0
  58. oscura/analyzers/protocols/manchester.py +333 -0
  59. oscura/analyzers/protocols/onewire.py +501 -0
  60. oscura/analyzers/protocols/spi.py +334 -0
  61. oscura/analyzers/protocols/swd.py +325 -0
  62. oscura/analyzers/protocols/uart.py +393 -0
  63. oscura/analyzers/protocols/usb.py +495 -0
  64. oscura/analyzers/signal_integrity/__init__.py +63 -0
  65. oscura/analyzers/signal_integrity/embedding.py +294 -0
  66. oscura/analyzers/signal_integrity/equalization.py +370 -0
  67. oscura/analyzers/signal_integrity/sparams.py +484 -0
  68. oscura/analyzers/spectral/__init__.py +53 -0
  69. oscura/analyzers/spectral/chunked.py +273 -0
  70. oscura/analyzers/spectral/chunked_fft.py +571 -0
  71. oscura/analyzers/spectral/chunked_wavelet.py +391 -0
  72. oscura/analyzers/spectral/fft.py +92 -0
  73. oscura/analyzers/statistical/__init__.py +250 -0
  74. oscura/analyzers/statistical/checksum.py +923 -0
  75. oscura/analyzers/statistical/chunked_corr.py +228 -0
  76. oscura/analyzers/statistical/classification.py +778 -0
  77. oscura/analyzers/statistical/entropy.py +1113 -0
  78. oscura/analyzers/statistical/ngrams.py +614 -0
  79. oscura/analyzers/statistics/__init__.py +119 -0
  80. oscura/analyzers/statistics/advanced.py +885 -0
  81. oscura/analyzers/statistics/basic.py +263 -0
  82. oscura/analyzers/statistics/correlation.py +630 -0
  83. oscura/analyzers/statistics/distribution.py +298 -0
  84. oscura/analyzers/statistics/outliers.py +463 -0
  85. oscura/analyzers/statistics/streaming.py +93 -0
  86. oscura/analyzers/statistics/trend.py +520 -0
  87. oscura/analyzers/validation.py +598 -0
  88. oscura/analyzers/waveform/__init__.py +36 -0
  89. oscura/analyzers/waveform/measurements.py +943 -0
  90. oscura/analyzers/waveform/measurements_with_uncertainty.py +371 -0
  91. oscura/analyzers/waveform/spectral.py +1689 -0
  92. oscura/analyzers/waveform/wavelets.py +298 -0
  93. oscura/api/__init__.py +62 -0
  94. oscura/api/dsl.py +538 -0
  95. oscura/api/fluent.py +571 -0
  96. oscura/api/operators.py +498 -0
  97. oscura/api/optimization.py +392 -0
  98. oscura/api/profiling.py +396 -0
  99. oscura/automotive/__init__.py +73 -0
  100. oscura/automotive/can/__init__.py +52 -0
  101. oscura/automotive/can/analysis.py +356 -0
  102. oscura/automotive/can/checksum.py +250 -0
  103. oscura/automotive/can/correlation.py +212 -0
  104. oscura/automotive/can/discovery.py +355 -0
  105. oscura/automotive/can/message_wrapper.py +375 -0
  106. oscura/automotive/can/models.py +385 -0
  107. oscura/automotive/can/patterns.py +381 -0
  108. oscura/automotive/can/session.py +452 -0
  109. oscura/automotive/can/state_machine.py +300 -0
  110. oscura/automotive/can/stimulus_response.py +461 -0
  111. oscura/automotive/dbc/__init__.py +15 -0
  112. oscura/automotive/dbc/generator.py +156 -0
  113. oscura/automotive/dbc/parser.py +146 -0
  114. oscura/automotive/dtc/__init__.py +30 -0
  115. oscura/automotive/dtc/database.py +3036 -0
  116. oscura/automotive/j1939/__init__.py +14 -0
  117. oscura/automotive/j1939/decoder.py +745 -0
  118. oscura/automotive/loaders/__init__.py +35 -0
  119. oscura/automotive/loaders/asc.py +98 -0
  120. oscura/automotive/loaders/blf.py +77 -0
  121. oscura/automotive/loaders/csv_can.py +136 -0
  122. oscura/automotive/loaders/dispatcher.py +136 -0
  123. oscura/automotive/loaders/mdf.py +331 -0
  124. oscura/automotive/loaders/pcap.py +132 -0
  125. oscura/automotive/obd/__init__.py +14 -0
  126. oscura/automotive/obd/decoder.py +707 -0
  127. oscura/automotive/uds/__init__.py +48 -0
  128. oscura/automotive/uds/decoder.py +265 -0
  129. oscura/automotive/uds/models.py +64 -0
  130. oscura/automotive/visualization.py +369 -0
  131. oscura/batch/__init__.py +55 -0
  132. oscura/batch/advanced.py +627 -0
  133. oscura/batch/aggregate.py +300 -0
  134. oscura/batch/analyze.py +139 -0
  135. oscura/batch/logging.py +487 -0
  136. oscura/batch/metrics.py +556 -0
  137. oscura/builders/__init__.py +41 -0
  138. oscura/builders/signal_builder.py +1131 -0
  139. oscura/cli/__init__.py +14 -0
  140. oscura/cli/batch.py +339 -0
  141. oscura/cli/characterize.py +273 -0
  142. oscura/cli/compare.py +775 -0
  143. oscura/cli/decode.py +551 -0
  144. oscura/cli/main.py +247 -0
  145. oscura/cli/shell.py +350 -0
  146. oscura/comparison/__init__.py +66 -0
  147. oscura/comparison/compare.py +397 -0
  148. oscura/comparison/golden.py +487 -0
  149. oscura/comparison/limits.py +391 -0
  150. oscura/comparison/mask.py +434 -0
  151. oscura/comparison/trace_diff.py +30 -0
  152. oscura/comparison/visualization.py +481 -0
  153. oscura/compliance/__init__.py +70 -0
  154. oscura/compliance/advanced.py +756 -0
  155. oscura/compliance/masks.py +363 -0
  156. oscura/compliance/reporting.py +483 -0
  157. oscura/compliance/testing.py +298 -0
  158. oscura/component/__init__.py +38 -0
  159. oscura/component/impedance.py +365 -0
  160. oscura/component/reactive.py +598 -0
  161. oscura/component/transmission_line.py +312 -0
  162. oscura/config/__init__.py +191 -0
  163. oscura/config/defaults.py +254 -0
  164. oscura/config/loader.py +348 -0
  165. oscura/config/memory.py +271 -0
  166. oscura/config/migration.py +458 -0
  167. oscura/config/pipeline.py +1077 -0
  168. oscura/config/preferences.py +530 -0
  169. oscura/config/protocol.py +875 -0
  170. oscura/config/schema.py +713 -0
  171. oscura/config/settings.py +420 -0
  172. oscura/config/thresholds.py +599 -0
  173. oscura/convenience.py +457 -0
  174. oscura/core/__init__.py +299 -0
  175. oscura/core/audit.py +457 -0
  176. oscura/core/backend_selector.py +405 -0
  177. oscura/core/cache.py +590 -0
  178. oscura/core/cancellation.py +439 -0
  179. oscura/core/confidence.py +225 -0
  180. oscura/core/config.py +506 -0
  181. oscura/core/correlation.py +216 -0
  182. oscura/core/cross_domain.py +422 -0
  183. oscura/core/debug.py +301 -0
  184. oscura/core/edge_cases.py +541 -0
  185. oscura/core/exceptions.py +535 -0
  186. oscura/core/gpu_backend.py +523 -0
  187. oscura/core/lazy.py +832 -0
  188. oscura/core/log_query.py +540 -0
  189. oscura/core/logging.py +931 -0
  190. oscura/core/logging_advanced.py +952 -0
  191. oscura/core/memoize.py +171 -0
  192. oscura/core/memory_check.py +274 -0
  193. oscura/core/memory_guard.py +290 -0
  194. oscura/core/memory_limits.py +336 -0
  195. oscura/core/memory_monitor.py +453 -0
  196. oscura/core/memory_progress.py +465 -0
  197. oscura/core/memory_warnings.py +315 -0
  198. oscura/core/numba_backend.py +362 -0
  199. oscura/core/performance.py +352 -0
  200. oscura/core/progress.py +524 -0
  201. oscura/core/provenance.py +358 -0
  202. oscura/core/results.py +331 -0
  203. oscura/core/types.py +504 -0
  204. oscura/core/uncertainty.py +383 -0
  205. oscura/discovery/__init__.py +52 -0
  206. oscura/discovery/anomaly_detector.py +672 -0
  207. oscura/discovery/auto_decoder.py +415 -0
  208. oscura/discovery/comparison.py +497 -0
  209. oscura/discovery/quality_validator.py +528 -0
  210. oscura/discovery/signal_detector.py +769 -0
  211. oscura/dsl/__init__.py +73 -0
  212. oscura/dsl/commands.py +246 -0
  213. oscura/dsl/interpreter.py +455 -0
  214. oscura/dsl/parser.py +689 -0
  215. oscura/dsl/repl.py +172 -0
  216. oscura/exceptions.py +59 -0
  217. oscura/exploratory/__init__.py +111 -0
  218. oscura/exploratory/error_recovery.py +642 -0
  219. oscura/exploratory/fuzzy.py +513 -0
  220. oscura/exploratory/fuzzy_advanced.py +786 -0
  221. oscura/exploratory/legacy.py +831 -0
  222. oscura/exploratory/parse.py +358 -0
  223. oscura/exploratory/recovery.py +275 -0
  224. oscura/exploratory/sync.py +382 -0
  225. oscura/exploratory/unknown.py +707 -0
  226. oscura/export/__init__.py +25 -0
  227. oscura/export/wireshark/README.md +265 -0
  228. oscura/export/wireshark/__init__.py +47 -0
  229. oscura/export/wireshark/generator.py +312 -0
  230. oscura/export/wireshark/lua_builder.py +159 -0
  231. oscura/export/wireshark/templates/dissector.lua.j2 +92 -0
  232. oscura/export/wireshark/type_mapping.py +165 -0
  233. oscura/export/wireshark/validator.py +105 -0
  234. oscura/exporters/__init__.py +94 -0
  235. oscura/exporters/csv.py +303 -0
  236. oscura/exporters/exporters.py +44 -0
  237. oscura/exporters/hdf5.py +219 -0
  238. oscura/exporters/html_export.py +701 -0
  239. oscura/exporters/json_export.py +291 -0
  240. oscura/exporters/markdown_export.py +367 -0
  241. oscura/exporters/matlab_export.py +354 -0
  242. oscura/exporters/npz_export.py +219 -0
  243. oscura/exporters/spice_export.py +210 -0
  244. oscura/extensibility/__init__.py +131 -0
  245. oscura/extensibility/docs.py +752 -0
  246. oscura/extensibility/extensions.py +1125 -0
  247. oscura/extensibility/logging.py +259 -0
  248. oscura/extensibility/measurements.py +485 -0
  249. oscura/extensibility/plugins.py +414 -0
  250. oscura/extensibility/registry.py +346 -0
  251. oscura/extensibility/templates.py +913 -0
  252. oscura/extensibility/validation.py +651 -0
  253. oscura/filtering/__init__.py +89 -0
  254. oscura/filtering/base.py +563 -0
  255. oscura/filtering/convenience.py +564 -0
  256. oscura/filtering/design.py +725 -0
  257. oscura/filtering/filters.py +32 -0
  258. oscura/filtering/introspection.py +605 -0
  259. oscura/guidance/__init__.py +24 -0
  260. oscura/guidance/recommender.py +429 -0
  261. oscura/guidance/wizard.py +518 -0
  262. oscura/inference/__init__.py +251 -0
  263. oscura/inference/active_learning/README.md +153 -0
  264. oscura/inference/active_learning/__init__.py +38 -0
  265. oscura/inference/active_learning/lstar.py +257 -0
  266. oscura/inference/active_learning/observation_table.py +230 -0
  267. oscura/inference/active_learning/oracle.py +78 -0
  268. oscura/inference/active_learning/teachers/__init__.py +15 -0
  269. oscura/inference/active_learning/teachers/simulator.py +192 -0
  270. oscura/inference/adaptive_tuning.py +453 -0
  271. oscura/inference/alignment.py +653 -0
  272. oscura/inference/bayesian.py +943 -0
  273. oscura/inference/binary.py +1016 -0
  274. oscura/inference/crc_reverse.py +711 -0
  275. oscura/inference/logic.py +288 -0
  276. oscura/inference/message_format.py +1305 -0
  277. oscura/inference/protocol.py +417 -0
  278. oscura/inference/protocol_dsl.py +1084 -0
  279. oscura/inference/protocol_library.py +1230 -0
  280. oscura/inference/sequences.py +809 -0
  281. oscura/inference/signal_intelligence.py +1509 -0
  282. oscura/inference/spectral.py +215 -0
  283. oscura/inference/state_machine.py +634 -0
  284. oscura/inference/stream.py +918 -0
  285. oscura/integrations/__init__.py +59 -0
  286. oscura/integrations/llm.py +1827 -0
  287. oscura/jupyter/__init__.py +32 -0
  288. oscura/jupyter/display.py +268 -0
  289. oscura/jupyter/magic.py +334 -0
  290. oscura/loaders/__init__.py +526 -0
  291. oscura/loaders/binary.py +69 -0
  292. oscura/loaders/configurable.py +1255 -0
  293. oscura/loaders/csv.py +26 -0
  294. oscura/loaders/csv_loader.py +473 -0
  295. oscura/loaders/hdf5.py +9 -0
  296. oscura/loaders/hdf5_loader.py +510 -0
  297. oscura/loaders/lazy.py +370 -0
  298. oscura/loaders/mmap_loader.py +583 -0
  299. oscura/loaders/numpy_loader.py +436 -0
  300. oscura/loaders/pcap.py +432 -0
  301. oscura/loaders/preprocessing.py +368 -0
  302. oscura/loaders/rigol.py +287 -0
  303. oscura/loaders/sigrok.py +321 -0
  304. oscura/loaders/tdms.py +367 -0
  305. oscura/loaders/tektronix.py +711 -0
  306. oscura/loaders/validation.py +584 -0
  307. oscura/loaders/vcd.py +464 -0
  308. oscura/loaders/wav.py +233 -0
  309. oscura/math/__init__.py +45 -0
  310. oscura/math/arithmetic.py +824 -0
  311. oscura/math/interpolation.py +413 -0
  312. oscura/onboarding/__init__.py +39 -0
  313. oscura/onboarding/help.py +498 -0
  314. oscura/onboarding/tutorials.py +405 -0
  315. oscura/onboarding/wizard.py +466 -0
  316. oscura/optimization/__init__.py +19 -0
  317. oscura/optimization/parallel.py +440 -0
  318. oscura/optimization/search.py +532 -0
  319. oscura/pipeline/__init__.py +43 -0
  320. oscura/pipeline/base.py +338 -0
  321. oscura/pipeline/composition.py +242 -0
  322. oscura/pipeline/parallel.py +448 -0
  323. oscura/pipeline/pipeline.py +375 -0
  324. oscura/pipeline/reverse_engineering.py +1119 -0
  325. oscura/plugins/__init__.py +122 -0
  326. oscura/plugins/base.py +272 -0
  327. oscura/plugins/cli.py +497 -0
  328. oscura/plugins/discovery.py +411 -0
  329. oscura/plugins/isolation.py +418 -0
  330. oscura/plugins/lifecycle.py +959 -0
  331. oscura/plugins/manager.py +493 -0
  332. oscura/plugins/registry.py +421 -0
  333. oscura/plugins/versioning.py +372 -0
  334. oscura/py.typed +0 -0
  335. oscura/quality/__init__.py +65 -0
  336. oscura/quality/ensemble.py +740 -0
  337. oscura/quality/explainer.py +338 -0
  338. oscura/quality/scoring.py +616 -0
  339. oscura/quality/warnings.py +456 -0
  340. oscura/reporting/__init__.py +248 -0
  341. oscura/reporting/advanced.py +1234 -0
  342. oscura/reporting/analyze.py +448 -0
  343. oscura/reporting/argument_preparer.py +596 -0
  344. oscura/reporting/auto_report.py +507 -0
  345. oscura/reporting/batch.py +615 -0
  346. oscura/reporting/chart_selection.py +223 -0
  347. oscura/reporting/comparison.py +330 -0
  348. oscura/reporting/config.py +615 -0
  349. oscura/reporting/content/__init__.py +39 -0
  350. oscura/reporting/content/executive.py +127 -0
  351. oscura/reporting/content/filtering.py +191 -0
  352. oscura/reporting/content/minimal.py +257 -0
  353. oscura/reporting/content/verbosity.py +162 -0
  354. oscura/reporting/core.py +508 -0
  355. oscura/reporting/core_formats/__init__.py +17 -0
  356. oscura/reporting/core_formats/multi_format.py +210 -0
  357. oscura/reporting/engine.py +836 -0
  358. oscura/reporting/export.py +366 -0
  359. oscura/reporting/formatting/__init__.py +129 -0
  360. oscura/reporting/formatting/emphasis.py +81 -0
  361. oscura/reporting/formatting/numbers.py +403 -0
  362. oscura/reporting/formatting/standards.py +55 -0
  363. oscura/reporting/formatting.py +466 -0
  364. oscura/reporting/html.py +578 -0
  365. oscura/reporting/index.py +590 -0
  366. oscura/reporting/multichannel.py +296 -0
  367. oscura/reporting/output.py +379 -0
  368. oscura/reporting/pdf.py +373 -0
  369. oscura/reporting/plots.py +731 -0
  370. oscura/reporting/pptx_export.py +360 -0
  371. oscura/reporting/renderers/__init__.py +11 -0
  372. oscura/reporting/renderers/pdf.py +94 -0
  373. oscura/reporting/sections.py +471 -0
  374. oscura/reporting/standards.py +680 -0
  375. oscura/reporting/summary_generator.py +368 -0
  376. oscura/reporting/tables.py +397 -0
  377. oscura/reporting/template_system.py +724 -0
  378. oscura/reporting/templates/__init__.py +15 -0
  379. oscura/reporting/templates/definition.py +205 -0
  380. oscura/reporting/templates/index.html +649 -0
  381. oscura/reporting/templates/index.md +173 -0
  382. oscura/schemas/__init__.py +158 -0
  383. oscura/schemas/bus_configuration.json +322 -0
  384. oscura/schemas/device_mapping.json +182 -0
  385. oscura/schemas/packet_format.json +418 -0
  386. oscura/schemas/protocol_definition.json +363 -0
  387. oscura/search/__init__.py +16 -0
  388. oscura/search/anomaly.py +292 -0
  389. oscura/search/context.py +149 -0
  390. oscura/search/pattern.py +160 -0
  391. oscura/session/__init__.py +34 -0
  392. oscura/session/annotations.py +289 -0
  393. oscura/session/history.py +313 -0
  394. oscura/session/session.py +445 -0
  395. oscura/streaming/__init__.py +43 -0
  396. oscura/streaming/chunked.py +611 -0
  397. oscura/streaming/progressive.py +393 -0
  398. oscura/streaming/realtime.py +622 -0
  399. oscura/testing/__init__.py +54 -0
  400. oscura/testing/synthetic.py +808 -0
  401. oscura/triggering/__init__.py +68 -0
  402. oscura/triggering/base.py +229 -0
  403. oscura/triggering/edge.py +353 -0
  404. oscura/triggering/pattern.py +344 -0
  405. oscura/triggering/pulse.py +581 -0
  406. oscura/triggering/window.py +453 -0
  407. oscura/ui/__init__.py +48 -0
  408. oscura/ui/formatters.py +526 -0
  409. oscura/ui/progressive_display.py +340 -0
  410. oscura/utils/__init__.py +99 -0
  411. oscura/utils/autodetect.py +338 -0
  412. oscura/utils/buffer.py +389 -0
  413. oscura/utils/lazy.py +407 -0
  414. oscura/utils/lazy_imports.py +147 -0
  415. oscura/utils/memory.py +836 -0
  416. oscura/utils/memory_advanced.py +1326 -0
  417. oscura/utils/memory_extensions.py +465 -0
  418. oscura/utils/progressive.py +352 -0
  419. oscura/utils/windowing.py +362 -0
  420. oscura/visualization/__init__.py +321 -0
  421. oscura/visualization/accessibility.py +526 -0
  422. oscura/visualization/annotations.py +374 -0
  423. oscura/visualization/axis_scaling.py +305 -0
  424. oscura/visualization/colors.py +453 -0
  425. oscura/visualization/digital.py +337 -0
  426. oscura/visualization/eye.py +420 -0
  427. oscura/visualization/histogram.py +281 -0
  428. oscura/visualization/interactive.py +858 -0
  429. oscura/visualization/jitter.py +702 -0
  430. oscura/visualization/keyboard.py +394 -0
  431. oscura/visualization/layout.py +365 -0
  432. oscura/visualization/optimization.py +1028 -0
  433. oscura/visualization/palettes.py +446 -0
  434. oscura/visualization/plot.py +92 -0
  435. oscura/visualization/power.py +290 -0
  436. oscura/visualization/power_extended.py +626 -0
  437. oscura/visualization/presets.py +467 -0
  438. oscura/visualization/protocols.py +932 -0
  439. oscura/visualization/render.py +207 -0
  440. oscura/visualization/rendering.py +444 -0
  441. oscura/visualization/reverse_engineering.py +791 -0
  442. oscura/visualization/signal_integrity.py +808 -0
  443. oscura/visualization/specialized.py +553 -0
  444. oscura/visualization/spectral.py +811 -0
  445. oscura/visualization/styles.py +381 -0
  446. oscura/visualization/thumbnails.py +311 -0
  447. oscura/visualization/time_axis.py +351 -0
  448. oscura/visualization/waveform.py +367 -0
  449. oscura/workflow/__init__.py +13 -0
  450. oscura/workflow/dag.py +377 -0
  451. oscura/workflows/__init__.py +58 -0
  452. oscura/workflows/compliance.py +280 -0
  453. oscura/workflows/digital.py +272 -0
  454. oscura/workflows/multi_trace.py +502 -0
  455. oscura/workflows/power.py +178 -0
  456. oscura/workflows/protocol.py +492 -0
  457. oscura/workflows/reverse_engineering.py +639 -0
  458. oscura/workflows/signal_integrity.py +227 -0
  459. oscura-0.1.1.dist-info/METADATA +300 -0
  460. oscura-0.1.1.dist-info/RECORD +463 -0
  461. oscura-0.1.1.dist-info/entry_points.txt +2 -0
  462. {oscura-0.0.1.dist-info → oscura-0.1.1.dist-info}/licenses/LICENSE +1 -1
  463. oscura-0.0.1.dist-info/METADATA +0 -63
  464. oscura-0.0.1.dist-info/RECORD +0 -5
  465. {oscura-0.0.1.dist-info → oscura-0.1.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1827 @@
1
+ """LLM Integration for Oscura.
2
+
3
+ Provides hooks for Large Language Model integration to enable natural language
4
+ analysis and assistance.
5
+
6
+
7
+ Examples:
8
+ Basic usage with auto-selection:
9
+
10
+ >>> from oscura.integrations import llm
11
+ >>> client = llm.get_client() # Auto-selects available provider
12
+ >>> response = client.chat_completion("What is signal rise time?")
13
+
14
+ Provider-specific usage:
15
+
16
+ >>> client = llm.get_client("openai", model="gpt-4")
17
+ >>> analysis = client.analyze_trace({"sample_rate": 1e9, "mean": 0.5})
18
+
19
+ With failover:
20
+
21
+ >>> client = llm.get_client_with_failover(
22
+ ... providers=["openai", "anthropic", "local"]
23
+ ... )
24
+ """
25
+
26
+ import hashlib
27
+ import json
28
+ import os
29
+ import time
30
+ from collections.abc import Callable
31
+ from dataclasses import dataclass, field
32
+ from enum import Enum
33
+ from threading import Lock
34
+ from typing import Any, Protocol
35
+
36
+ from oscura.core.exceptions import OscuraError
37
+
38
+ # ==============================================================================
39
+ # Cost Constants (API-020: Cost Tracking)
40
+ # ==============================================================================
41
+
42
+ # Pricing per 1K tokens (approximate, as of 2024)
43
+ TOKEN_COSTS: dict[str, dict[str, float]] = {
44
+ "gpt-4": {"input": 0.03, "output": 0.06},
45
+ "gpt-4-turbo": {"input": 0.01, "output": 0.03},
46
+ "gpt-4o": {"input": 0.005, "output": 0.015},
47
+ "gpt-4o-mini": {"input": 0.00015, "output": 0.0006},
48
+ "gpt-3.5-turbo": {"input": 0.0005, "output": 0.0015},
49
+ "claude-3-opus": {"input": 0.015, "output": 0.075},
50
+ "claude-3-opus-20240229": {"input": 0.015, "output": 0.075},
51
+ "claude-3-sonnet": {"input": 0.003, "output": 0.015},
52
+ "claude-3-sonnet-20240229": {"input": 0.003, "output": 0.015},
53
+ "claude-3-haiku": {"input": 0.00025, "output": 0.00125},
54
+ "claude-3-haiku-20240307": {"input": 0.00025, "output": 0.00125},
55
+ "claude-3-5-sonnet": {"input": 0.003, "output": 0.015},
56
+ "claude-3-5-sonnet-20241022": {"input": 0.003, "output": 0.015},
57
+ "default": {"input": 0.001, "output": 0.002},
58
+ }
59
+
60
+
61
+ @dataclass
62
+ class CostTracker:
63
+ """Tracks API usage costs.
64
+
65
+ Attributes:
66
+ total_input_tokens: Total input tokens used across all requests
67
+ total_output_tokens: Total output tokens used across all requests
68
+ total_cost: Total estimated cost in USD
69
+ request_count: Number of API requests made
70
+ """
71
+
72
+ total_input_tokens: int = 0
73
+ total_output_tokens: int = 0
74
+ total_cost: float = 0.0
75
+ request_count: int = 0
76
+ _lock: Lock = field(default_factory=Lock)
77
+
78
+ def record(self, model: str, input_tokens: int, output_tokens: int) -> float:
79
+ """Record token usage and return estimated cost.
80
+
81
+ Args:
82
+ model: Model name for cost lookup
83
+ input_tokens: Number of input/prompt tokens
84
+ output_tokens: Number of output/completion tokens
85
+
86
+ Returns:
87
+ Estimated cost in USD for this request
88
+ """
89
+ # Get cost rates for model, fall back to default
90
+ rates = TOKEN_COSTS.get(model, TOKEN_COSTS["default"])
91
+
92
+ cost = input_tokens / 1000 * rates["input"] + output_tokens / 1000 * rates["output"]
93
+
94
+ with self._lock:
95
+ self.total_input_tokens += input_tokens
96
+ self.total_output_tokens += output_tokens
97
+ self.total_cost += cost
98
+ self.request_count += 1
99
+
100
+ return cost
101
+
102
+ def reset(self) -> None:
103
+ """Reset all tracking counters."""
104
+ with self._lock:
105
+ self.total_input_tokens = 0
106
+ self.total_output_tokens = 0
107
+ self.total_cost = 0.0
108
+ self.request_count = 0
109
+
110
+ def get_summary(self) -> dict[str, Any]:
111
+ """Get summary of usage statistics.
112
+
113
+ Returns:
114
+ Dictionary with usage statistics
115
+ """
116
+ with self._lock:
117
+ return {
118
+ "total_input_tokens": self.total_input_tokens,
119
+ "total_output_tokens": self.total_output_tokens,
120
+ "total_tokens": self.total_input_tokens + self.total_output_tokens,
121
+ "total_cost_usd": round(self.total_cost, 6),
122
+ "request_count": self.request_count,
123
+ "avg_cost_per_request": (
124
+ round(self.total_cost / self.request_count, 6)
125
+ if self.request_count > 0
126
+ else 0.0
127
+ ),
128
+ }
129
+
130
+
131
+ class ResponseCache:
132
+ """Simple LRU cache for LLM responses.
133
+
134
+ Caches responses based on prompt hash to avoid repeated API calls
135
+ for identical queries. Thread-safe implementation.
136
+ """
137
+
138
+ def __init__(self, max_size: int = 100, ttl_seconds: float = 3600.0):
139
+ """Initialize response cache.
140
+
141
+ Args:
142
+ max_size: Maximum number of cached responses
143
+ ttl_seconds: Time-to-live for cache entries in seconds
144
+ """
145
+ self.max_size = max_size
146
+ self.ttl_seconds = ttl_seconds
147
+ self._cache: dict[str, tuple[Any, float]] = {}
148
+ self._lock = Lock()
149
+
150
+ def _make_key(self, prompt: str, model: str, **kwargs: Any) -> str:
151
+ """Create cache key from request parameters.
152
+
153
+ Args:
154
+ prompt: The prompt text
155
+ model: Model name
156
+ **kwargs: Additional parameters affecting response
157
+
158
+ Returns:
159
+ Hash key for cache lookup
160
+ """
161
+ key_data = json.dumps(
162
+ {"prompt": prompt, "model": model, "kwargs": sorted(kwargs.items())}, sort_keys=True
163
+ )
164
+ return hashlib.sha256(key_data.encode()).hexdigest()
165
+
166
+ def get(self, prompt: str, model: str, **kwargs: Any) -> Any | None:
167
+ """Get cached response if available and not expired.
168
+
169
+ Args:
170
+ prompt: The prompt text
171
+ model: Model name
172
+ **kwargs: Additional parameters
173
+
174
+ Returns:
175
+ Cached response or None if not found/expired
176
+ """
177
+ key = self._make_key(prompt, model, **kwargs)
178
+
179
+ with self._lock:
180
+ if key in self._cache:
181
+ response, timestamp = self._cache[key]
182
+ if time.time() - timestamp < self.ttl_seconds:
183
+ return response
184
+ # Expired entry
185
+ del self._cache[key]
186
+ return None
187
+
188
+ def set(self, prompt: str, model: str, response: Any, **kwargs: Any) -> None:
189
+ """Cache a response.
190
+
191
+ Args:
192
+ prompt: The prompt text
193
+ model: Model name
194
+ response: Response to cache
195
+ **kwargs: Additional parameters
196
+ """
197
+ key = self._make_key(prompt, model, **kwargs)
198
+
199
+ with self._lock:
200
+ # Evict oldest entries if at capacity
201
+ while len(self._cache) >= self.max_size:
202
+ oldest_key = min(self._cache.keys(), key=lambda k: self._cache[k][1])
203
+ del self._cache[oldest_key]
204
+
205
+ self._cache[key] = (response, time.time())
206
+
207
+ def clear(self) -> None:
208
+ """Clear all cached entries."""
209
+ with self._lock:
210
+ self._cache.clear()
211
+
212
+ @property
213
+ def size(self) -> int:
214
+ """Current number of cached entries."""
215
+ with self._lock:
216
+ return len(self._cache)
217
+
218
+
219
+ # Global instances for tracking
220
+ _global_cost_tracker = CostTracker()
221
+ _global_response_cache = ResponseCache()
222
+
223
+
224
+ def get_cost_tracker() -> CostTracker:
225
+ """Get global cost tracker instance.
226
+
227
+ Returns:
228
+ Global CostTracker for monitoring API costs
229
+ """
230
+ return _global_cost_tracker
231
+
232
+
233
+ def get_response_cache() -> ResponseCache:
234
+ """Get global response cache instance.
235
+
236
+ Returns:
237
+ Global ResponseCache for caching LLM responses
238
+ """
239
+ return _global_response_cache
240
+
241
+
242
+ class LLMProvider(Enum):
243
+ """Supported LLM providers."""
244
+
245
+ OPENAI = "openai"
246
+ ANTHROPIC = "anthropic"
247
+ LOCAL = "local"
248
+ CUSTOM = "custom"
249
+
250
+
251
+ class AnalysisHook(Enum):
252
+ """Hook points for LLM integration."""
253
+
254
+ BEFORE_ANALYSIS = "before_analysis"
255
+ AFTER_ANALYSIS = "after_analysis"
256
+ ON_ERROR = "on_error"
257
+
258
+
259
+ class RateLimiter:
260
+ """Rate limiter for API requests.
261
+
262
+ Implements token bucket algorithm for rate limiting.
263
+ .: Rate limiting (configurable requests/minute).
264
+ """
265
+
266
+ def __init__(self, requests_per_minute: int = 60):
267
+ """Initialize rate limiter.
268
+
269
+ Args:
270
+ requests_per_minute: Maximum requests allowed per minute
271
+ """
272
+ self.requests_per_minute = requests_per_minute
273
+ self.min_interval = 60.0 / requests_per_minute if requests_per_minute > 0 else 0
274
+ self.last_request_time = 0.0
275
+ self.lock = Lock()
276
+
277
+ def acquire(self) -> None:
278
+ """Wait if necessary to respect rate limit."""
279
+ if self.requests_per_minute <= 0:
280
+ return # No rate limiting
281
+
282
+ with self.lock:
283
+ now = time.time()
284
+ time_since_last = now - self.last_request_time
285
+ if time_since_last < self.min_interval:
286
+ sleep_time = self.min_interval - time_since_last
287
+ time.sleep(sleep_time)
288
+ self.last_request_time = time.time()
289
+
290
+
291
+ @dataclass
292
+ class LLMConfig:
293
+ """Configuration for LLM integration.
294
+
295
+ Attributes:
296
+ provider: LLM provider to use
297
+ model: Model identifier (e.g., 'gpt-4', 'claude-3-opus')
298
+ api_key: API key for cloud providers (optional)
299
+ base_url: Custom API endpoint (for local/custom providers)
300
+ privacy_mode: If True, no data sent to cloud (local only)
301
+ timeout: Request timeout in seconds
302
+ max_retries: Maximum retry attempts for failed requests
303
+ requests_per_minute: Rate limit for API requests (API-020)
304
+ enable_cache: If True, cache responses for repeated queries (API-020)
305
+ track_costs: If True, track token usage and costs (API-020)
306
+ """
307
+
308
+ provider: LLMProvider = LLMProvider.LOCAL
309
+ model: str = "default"
310
+ api_key: str | None = None
311
+ base_url: str | None = None
312
+ privacy_mode: bool = True
313
+ timeout: float = 30.0
314
+ max_retries: int = 3
315
+ requests_per_minute: int = 60
316
+ enable_cache: bool = False
317
+ track_costs: bool = True
318
+
319
+
320
+ def estimate_tokens(text: str) -> int:
321
+ """Estimate token count for text (API-019: token counting).
322
+
323
+ Uses approximate character-to-token ratio. Actual count varies by model.
324
+
325
+ Args:
326
+ text: Input text to estimate tokens for
327
+
328
+ Returns:
329
+ Estimated token count (roughly 4 characters per token)
330
+ """
331
+ # Average ~4 chars per token for English text
332
+ return max(1, len(text) // 4)
333
+
334
+
335
+ @dataclass
336
+ class LLMResponse:
337
+ """Response from LLM query.
338
+
339
+ Attributes:
340
+ answer: Main text response
341
+ confidence: Confidence score (0-1) if available
342
+ suggested_commands: List of suggested Oscura commands
343
+ metadata: Additional metadata from LLM
344
+ raw_response: Raw response data for debugging
345
+ estimated_cost: Estimated cost in USD for this request (API-020)
346
+ cached: Whether this response was served from cache (API-020)
347
+ """
348
+
349
+ answer: str
350
+ confidence: float | None = None
351
+ suggested_commands: list[str] = field(default_factory=list)
352
+ metadata: dict[str, Any] = field(default_factory=dict)
353
+ raw_response: dict[str, Any] | None = None
354
+ estimated_cost: float = 0.0
355
+ cached: bool = False
356
+
357
+
358
+ class LLMClient(Protocol):
359
+ """Protocol for LLM client implementations."""
360
+
361
+ def query(self, prompt: str, context: dict[str, Any]) -> LLMResponse:
362
+ """Send query to LLM.
363
+
364
+ Args:
365
+ prompt: User prompt
366
+ context: Analysis context (trace metadata, etc.)
367
+ """
368
+ ...
369
+
370
+ def analyze(self, trace: Any, question: str) -> LLMResponse:
371
+ """Analyze trace with natural language question.
372
+
373
+ Args:
374
+ trace: Trace object
375
+ question: Natural language question
376
+ """
377
+ ...
378
+
379
+ def explain(self, measurement: Any) -> str:
380
+ """Explain a measurement result.
381
+
382
+ Args:
383
+ measurement: Measurement result
384
+ """
385
+ ...
386
+
387
+
388
+ class LLMError(OscuraError):
389
+ """LLM integration error."""
390
+
391
+
392
+ class LLMIntegration:
393
+ """LLM integration manager.
394
+
395
+ Provides hooks for LLM-assisted analysis and natural language interfaces.
396
+ """
397
+
398
+ def __init__(self, config: LLMConfig | None = None):
399
+ """Initialize LLM integration.
400
+
401
+ Args:
402
+ config: LLM configuration (defaults to privacy mode)
403
+ """
404
+ self.config = config or LLMConfig()
405
+ self._client: LLMClient | None = None
406
+ self._hooks: dict[AnalysisHook, list[Callable]] = { # type: ignore[type-arg]
407
+ AnalysisHook.BEFORE_ANALYSIS: [],
408
+ AnalysisHook.AFTER_ANALYSIS: [],
409
+ AnalysisHook.ON_ERROR: [],
410
+ }
411
+
412
+ def configure(
413
+ self, provider: str, model: str, api_key: str | None = None, **kwargs: Any
414
+ ) -> None:
415
+ """Configure LLM provider.
416
+
417
+ Args:
418
+ provider: Provider name ('openai', 'anthropic', 'local', 'custom')
419
+ model: Model identifier
420
+ api_key: API key for cloud providers
421
+ **kwargs: Additional configuration options
422
+
423
+ Raises:
424
+ LLMError: If provider is unknown
425
+ """
426
+ try:
427
+ provider_enum = LLMProvider(provider.lower())
428
+ except ValueError:
429
+ raise LLMError(f"Unknown provider: {provider}") # noqa: B904
430
+
431
+ self.config = LLMConfig(
432
+ provider=provider_enum,
433
+ model=model,
434
+ api_key=api_key,
435
+ base_url=kwargs.get("base_url"),
436
+ privacy_mode=kwargs.get("privacy_mode", provider_enum == LLMProvider.LOCAL),
437
+ timeout=kwargs.get("timeout", 30.0),
438
+ max_retries=kwargs.get("max_retries", 3),
439
+ requests_per_minute=kwargs.get("requests_per_minute", 60),
440
+ )
441
+
442
+ # Reset client to force reinitialization
443
+ self._client = None
444
+
445
+ def _get_client(self) -> LLMClient:
446
+ """Get or create LLM client.
447
+
448
+ Returns:
449
+ LLM client instance
450
+ """
451
+ if self._client is None:
452
+ self._client = self._create_client()
453
+ return self._client
454
+
455
+ def _create_client(self) -> LLMClient:
456
+ """Create LLM client based on configuration.
457
+
458
+ Returns:
459
+ LLM client instance
460
+
461
+ Raises:
462
+ LLMError: If client cannot be created
463
+ """
464
+ if self.config.provider == LLMProvider.OPENAI:
465
+ return self._create_openai_client()
466
+ elif self.config.provider == LLMProvider.ANTHROPIC:
467
+ return self._create_anthropic_client()
468
+ elif self.config.provider == LLMProvider.LOCAL:
469
+ return self._create_local_client()
470
+ else:
471
+ raise LLMError(f"Provider not implemented: {self.config.provider.value}")
472
+
473
+ def _create_openai_client(self) -> LLMClient:
474
+ """Create OpenAI client.
475
+
476
+ Returns:
477
+ OpenAI client
478
+
479
+ Raises:
480
+ LLMError: If OpenAI package not available or configuration invalid
481
+ """
482
+ try:
483
+ import openai # type: ignore[import-not-found]
484
+ except ImportError:
485
+ raise LLMError( # noqa: B904
486
+ "OpenAI package not installed. Install with: pip install openai"
487
+ )
488
+
489
+ if not self.config.api_key:
490
+ raise LLMError("OpenAI API key required")
491
+
492
+ if self.config.privacy_mode:
493
+ raise LLMError("Privacy mode not compatible with OpenAI (cloud provider)")
494
+
495
+ return OpenAIClient(self.config)
496
+
497
+ def _create_anthropic_client(self) -> LLMClient:
498
+ """Create Anthropic client.
499
+
500
+ Returns:
501
+ Anthropic client
502
+
503
+ Raises:
504
+ LLMError: If Anthropic package not available or configuration invalid
505
+ """
506
+ try:
507
+ import anthropic # type: ignore[import-not-found]
508
+ except ImportError:
509
+ raise LLMError( # noqa: B904
510
+ "Anthropic package not installed. Install with: pip install anthropic"
511
+ )
512
+
513
+ if not self.config.api_key:
514
+ raise LLMError("Anthropic API key required")
515
+
516
+ if self.config.privacy_mode:
517
+ raise LLMError("Privacy mode not compatible with Anthropic (cloud provider)")
518
+
519
+ return AnthropicClient(self.config)
520
+
521
+ def _create_local_client(self) -> LLMClient:
522
+ """Create local LLM client.
523
+
524
+ Returns:
525
+ Local client (mock/stub for now)
526
+ """
527
+ return LocalLLMClient(self.config)
528
+
529
+ def register_hook(self, hook: AnalysisHook, callback: Callable) -> None: # type: ignore[type-arg]
530
+ """Register callback for analysis hook.
531
+
532
+ Args:
533
+ hook: Hook point
534
+ callback: Callback function
535
+ """
536
+ self._hooks[hook].append(callback)
537
+
538
+ def trigger_hook(self, hook: AnalysisHook, *args: Any, **kwargs: Any) -> None:
539
+ """Trigger all callbacks for a hook.
540
+
541
+ Args:
542
+ hook: Hook point
543
+ *args: Positional arguments for callbacks
544
+ **kwargs: Keyword arguments for callbacks
545
+ """
546
+ for callback in self._hooks[hook]:
547
+ try:
548
+ callback(*args, **kwargs)
549
+ except Exception as e:
550
+ # Don't let hook errors break analysis
551
+ print(f"Warning: Hook {hook.value} failed: {e}")
552
+
553
+ def prepare_context(self, trace: Any) -> dict[str, Any]:
554
+ """Prepare trace metadata for LLM context.
555
+
556
+ Args:
557
+ trace: Trace object
558
+
559
+ Returns:
560
+ Context dictionary with trace metadata
561
+ """
562
+ context = {
563
+ "type": type(trace).__name__,
564
+ }
565
+
566
+ # Extract common metadata
567
+ if hasattr(trace, "metadata"):
568
+ meta = trace.metadata
569
+ context.update(
570
+ {
571
+ "sample_rate": getattr(meta, "sample_rate", None), # type: ignore[dict-item]
572
+ "num_samples": getattr(meta, "num_samples", None), # type: ignore[dict-item]
573
+ "duration": getattr(meta, "duration", None), # type: ignore[dict-item]
574
+ }
575
+ )
576
+
577
+ # Data statistics (without sending actual data in privacy mode)
578
+ if hasattr(trace, "data") and not self.config.privacy_mode:
579
+ import numpy as np
580
+
581
+ data = trace.data
582
+ context["statistics"] = { # type: ignore[assignment]
583
+ "mean": float(np.mean(data)),
584
+ "std": float(np.std(data)),
585
+ "min": float(np.min(data)),
586
+ "max": float(np.max(data)),
587
+ }
588
+ elif self.config.privacy_mode:
589
+ # Compute hash of data for change detection without sending data
590
+ if hasattr(trace, "data"):
591
+ import numpy as np
592
+
593
+ data_bytes = trace.data.tobytes()
594
+ context["data_hash"] = hashlib.sha256(data_bytes).hexdigest()[:16]
595
+
596
+ return context
597
+
598
+ def analyze(self, trace: Any, question: str) -> LLMResponse:
599
+ """Analyze trace with natural language question.
600
+
601
+ Args:
602
+ trace: Trace object
603
+ question: Natural language question
604
+
605
+ Returns:
606
+ LLM response with answer and suggestions
607
+
608
+ Raises:
609
+ LLMError: If analysis fails
610
+ """
611
+ self.trigger_hook(AnalysisHook.BEFORE_ANALYSIS, trace, question)
612
+
613
+ try:
614
+ client = self._get_client()
615
+ response = client.analyze(trace, question)
616
+ self.trigger_hook(AnalysisHook.AFTER_ANALYSIS, trace, response)
617
+ return response
618
+
619
+ except Exception as e:
620
+ self.trigger_hook(AnalysisHook.ON_ERROR, trace, question, e)
621
+ raise LLMError(f"LLM analysis failed: {e}") # noqa: B904
622
+
623
+ def explain(self, measurement: Any) -> str:
624
+ """Explain a measurement result.
625
+
626
+ Args:
627
+ measurement: Measurement result to explain
628
+
629
+ Returns:
630
+ Explanation text
631
+ """
632
+ client = self._get_client()
633
+ return client.explain(measurement)
634
+
635
+
636
+ # Stub implementations for different providers
637
+
638
+
639
+ class OpenAIClient:
640
+ """OpenAI client implementation.
641
+
642
+ Full implementation.:
643
+ - chat_completion() with retry logic
644
+ - analyze_trace() for trace analysis
645
+ - suggest_measurements() for measurement recommendations
646
+ - Error handling for API failures, rate limits, timeouts
647
+ - API key from OPENAI_API_KEY environment variable
648
+ """
649
+
650
+ def __init__(self, config: LLMConfig):
651
+ """Initialize OpenAI client.
652
+
653
+ Args:
654
+ config: LLM configuration
655
+
656
+ Raises:
657
+ LLMError: If openai package not available
658
+ """
659
+ self.config = config
660
+ self.rate_limiter = RateLimiter(config.requests_per_minute)
661
+
662
+ # Import and initialize OpenAI client
663
+ try:
664
+ import openai # type: ignore[ignore-without-code]
665
+
666
+ self._openai = openai
667
+ except ImportError:
668
+ raise LLMError( # noqa: B904
669
+ "OpenAI package not installed. Install with: pip install openai"
670
+ )
671
+
672
+ # Get API key from config or environment
673
+ api_key = config.api_key or os.environ.get("OPENAI_API_KEY")
674
+ if not api_key:
675
+ raise LLMError(
676
+ "OpenAI API key required. Set OPENAI_API_KEY environment variable "
677
+ "or pass api_key to configure()"
678
+ )
679
+
680
+ # Initialize OpenAI client
681
+ self.client = self._openai.OpenAI(api_key=api_key, timeout=config.timeout)
682
+
683
+ def chat_completion(self, messages: list[dict[str, str]], **kwargs: Any) -> LLMResponse:
684
+ """Send chat completion request with retry logic.
685
+
686
+ Full implementation with retry logic.
687
+
688
+ Args:
689
+ messages: List of message dicts with 'role' and 'content'
690
+ **kwargs: Additional parameters for OpenAI API
691
+
692
+ Returns:
693
+ LLM response with answer and metadata
694
+
695
+ Raises:
696
+ LLMError: If API request fails after retries
697
+ """
698
+ self.rate_limiter.acquire()
699
+
700
+ last_exception = None
701
+ for attempt in range(self.config.max_retries):
702
+ try:
703
+ response = self.client.chat.completions.create(
704
+ model=self.config.model, messages=messages, **kwargs
705
+ )
706
+
707
+ # Extract response content
708
+ answer = response.choices[0].message.content or ""
709
+
710
+ # Track costs.
711
+ input_tokens = response.usage.prompt_tokens if response.usage else 0
712
+ output_tokens = response.usage.completion_tokens if response.usage else 0
713
+ estimated_cost = 0.0
714
+
715
+ if self.config.track_costs:
716
+ estimated_cost = _global_cost_tracker.record(
717
+ response.model, input_tokens, output_tokens
718
+ )
719
+
720
+ return LLMResponse(
721
+ answer=answer,
722
+ confidence=None, # OpenAI doesn't provide confidence scores
723
+ suggested_commands=[],
724
+ metadata={
725
+ "model": response.model,
726
+ "usage": {
727
+ "prompt_tokens": input_tokens,
728
+ "completion_tokens": output_tokens,
729
+ "total_tokens": response.usage.total_tokens if response.usage else 0,
730
+ },
731
+ "finish_reason": response.choices[0].finish_reason,
732
+ },
733
+ raw_response={
734
+ "id": response.id,
735
+ "created": response.created,
736
+ },
737
+ estimated_cost=estimated_cost,
738
+ )
739
+
740
+ except self._openai.RateLimitError as e:
741
+ last_exception = e
742
+ if attempt < self.config.max_retries - 1:
743
+ # Exponential backoff for rate limits
744
+ wait_time = 2**attempt
745
+ time.sleep(wait_time)
746
+ continue
747
+ raise LLMError(f"OpenAI rate limit exceeded: {e}") # noqa: B904
748
+
749
+ except self._openai.APITimeoutError as e:
750
+ last_exception = e
751
+ if attempt < self.config.max_retries - 1:
752
+ time.sleep(1)
753
+ continue
754
+ raise LLMError(f"OpenAI request timeout: {e}") # noqa: B904
755
+
756
+ except self._openai.APIError as e:
757
+ last_exception = e
758
+ if attempt < self.config.max_retries - 1:
759
+ time.sleep(1)
760
+ continue
761
+ raise LLMError(f"OpenAI API error: {e}") # noqa: B904
762
+
763
+ except Exception as e:
764
+ last_exception = e
765
+ raise LLMError(f"OpenAI request failed: {e}") # noqa: B904
766
+
767
+ raise LLMError(
768
+ f"OpenAI request failed after {self.config.max_retries} retries: {last_exception}"
769
+ )
770
+
771
+ def analyze_trace(self, trace: Any, question: str) -> LLMResponse:
772
+ """Analyze trace with question.
773
+
774
+ Send trace summary, get insights.
775
+
776
+ Args:
777
+ trace: Trace object
778
+ question: Natural language question about the trace
779
+
780
+ Returns:
781
+ LLM response with analysis
782
+ """
783
+ # Prepare trace summary
784
+ trace_summary = self._summarize_trace(trace)
785
+
786
+ messages = [
787
+ {
788
+ "role": "system",
789
+ "content": (
790
+ "You are an expert in signal analysis and oscilloscope data. "
791
+ "Analyze the provided trace data and answer questions accurately. "
792
+ "Provide specific, actionable insights."
793
+ ),
794
+ },
795
+ {
796
+ "role": "user",
797
+ "content": f"Trace Summary:\n{trace_summary}\n\nQuestion: {question}",
798
+ },
799
+ ]
800
+
801
+ return self.chat_completion(messages)
802
+
803
+ def suggest_measurements(self, trace: Any) -> LLMResponse:
804
+ """Suggest measurements based on trace characteristics.
805
+
806
+ Recommend measurements based on trace.
807
+
808
+ Args:
809
+ trace: Trace object
810
+
811
+ Returns:
812
+ LLM response with measurement suggestions
813
+ """
814
+ trace_summary = self._summarize_trace(trace)
815
+
816
+ messages = [
817
+ {
818
+ "role": "system",
819
+ "content": (
820
+ "You are an expert in signal analysis. Based on trace characteristics, "
821
+ "suggest relevant measurements. Provide 3-5 specific measurement recommendations "
822
+ "with brief explanations."
823
+ ),
824
+ },
825
+ {
826
+ "role": "user",
827
+ "content": f"Trace Summary:\n{trace_summary}\n\nWhat measurements would be most informative for this trace?",
828
+ },
829
+ ]
830
+
831
+ response = self.chat_completion(messages)
832
+
833
+ # Try to extract suggested commands from the response
834
+ suggested_commands = self._extract_commands(response.answer)
835
+ response.suggested_commands = suggested_commands
836
+
837
+ return response
838
+
839
+ def _summarize_trace(self, trace: Any) -> str:
840
+ """Create a text summary of trace for LLM context.
841
+
842
+ Args:
843
+ trace: Trace object
844
+
845
+ Returns:
846
+ Text summary of trace characteristics
847
+ """
848
+ summary_parts = [f"Trace Type: {type(trace).__name__}"]
849
+
850
+ # Extract metadata
851
+ if hasattr(trace, "metadata"):
852
+ meta = trace.metadata
853
+ if hasattr(meta, "sample_rate"):
854
+ summary_parts.append(f"Sample Rate: {meta.sample_rate:.2e} Hz")
855
+ if hasattr(meta, "num_samples"):
856
+ summary_parts.append(f"Number of Samples: {meta.num_samples:,}")
857
+ if hasattr(meta, "duration"):
858
+ summary_parts.append(f"Duration: {meta.duration:.6f} s")
859
+
860
+ # Data statistics
861
+ if hasattr(trace, "data"):
862
+ import numpy as np
863
+
864
+ data = trace.data
865
+ summary_parts.extend(
866
+ [
867
+ f"Mean: {np.mean(data):.6e}",
868
+ f"Std Dev: {np.std(data):.6e}",
869
+ f"Min: {np.min(data):.6e}",
870
+ f"Max: {np.max(data):.6e}",
871
+ f"Peak-to-Peak: {np.ptp(data):.6e}",
872
+ ]
873
+ )
874
+
875
+ return "\n".join(summary_parts)
876
+
877
+ def _extract_commands(self, text: str) -> list[str]:
878
+ """Extract suggested Oscura commands from LLM response.
879
+
880
+ Args:
881
+ text: LLM response text
882
+
883
+ Returns:
884
+ List of extracted command strings
885
+ """
886
+ commands = []
887
+ # Look for common measurement names
888
+ measurement_keywords = [
889
+ "rise_time",
890
+ "fall_time",
891
+ "frequency",
892
+ "period",
893
+ "amplitude",
894
+ "rms",
895
+ "thd",
896
+ "snr",
897
+ "fft",
898
+ "psd",
899
+ "peak",
900
+ "duty_cycle",
901
+ ]
902
+
903
+ text_lower = text.lower()
904
+ for keyword in measurement_keywords:
905
+ if keyword in text_lower:
906
+ commands.append(f"measure {keyword}")
907
+
908
+ return commands
909
+
910
+ def query(self, prompt: str, context: dict[str, Any]) -> LLMResponse:
911
+ """Send query to LLM with context.
912
+
913
+ Args:
914
+ prompt: User prompt
915
+ context: Analysis context
916
+
917
+ Returns:
918
+ LLM response
919
+ """
920
+ context_str = json.dumps(context, indent=2)
921
+ messages = [
922
+ {
923
+ "role": "system",
924
+ "content": "You are a helpful assistant for signal analysis.",
925
+ },
926
+ {
927
+ "role": "user",
928
+ "content": f"Context:\n{context_str}\n\nQuery: {prompt}",
929
+ },
930
+ ]
931
+ return self.chat_completion(messages)
932
+
933
+ def analyze(self, trace: Any, question: str) -> LLMResponse:
934
+ """Analyze trace with natural language question.
935
+
936
+ Args:
937
+ trace: Trace object
938
+ question: Natural language question
939
+
940
+ Returns:
941
+ Analysis response
942
+ """
943
+ return self.analyze_trace(trace, question)
944
+
945
+ def explain(self, measurement: Any) -> str:
946
+ """Explain a measurement result.
947
+
948
+ Args:
949
+ measurement: Measurement result
950
+
951
+ Returns:
952
+ Explanation text
953
+ """
954
+ messages = [
955
+ {
956
+ "role": "system",
957
+ "content": "You are an expert in signal measurement interpretation. Explain measurement results clearly and concisely.",
958
+ },
959
+ {
960
+ "role": "user",
961
+ "content": f"Explain this measurement result: {measurement}",
962
+ },
963
+ ]
964
+ response = self.chat_completion(messages)
965
+ return response.answer
966
+
967
+
968
+ class AnthropicClient:
969
+ """Anthropic client implementation.
970
+
971
+ Full implementation.:
972
+ - chat_completion() with retry logic
973
+ - analyze_trace() for trace analysis
974
+ - suggest_measurements() for measurement recommendations
975
+ - API key from ANTHROPIC_API_KEY environment variable
976
+ """
977
+
978
+ def __init__(self, config: LLMConfig):
979
+ """Initialize Anthropic client.
980
+
981
+ Args:
982
+ config: LLM configuration
983
+
984
+ Raises:
985
+ LLMError: If anthropic package not available
986
+ """
987
+ self.config = config
988
+ self.rate_limiter = RateLimiter(config.requests_per_minute)
989
+
990
+ # Import and initialize Anthropic client
991
+ try:
992
+ import anthropic # type: ignore[ignore-without-code]
993
+
994
+ self._anthropic = anthropic
995
+ except ImportError:
996
+ raise LLMError( # noqa: B904
997
+ "Anthropic package not installed. Install with: pip install anthropic"
998
+ )
999
+
1000
+ # Get API key from config or environment
1001
+ api_key = config.api_key or os.environ.get("ANTHROPIC_API_KEY")
1002
+ if not api_key:
1003
+ raise LLMError(
1004
+ "Anthropic API key required. Set ANTHROPIC_API_KEY environment variable "
1005
+ "or pass api_key to configure()"
1006
+ )
1007
+
1008
+ # Initialize Anthropic client
1009
+ self.client = self._anthropic.Anthropic(api_key=api_key, timeout=config.timeout)
1010
+
1011
+ def chat_completion(
1012
+ self, messages: list[dict[str, str]], system: str | None = None, **kwargs: Any
1013
+ ) -> LLMResponse:
1014
+ """Send chat completion request with retry logic.
1015
+
1016
+ Full implementation with retry logic.
1017
+
1018
+ Args:
1019
+ messages: List of message dicts with 'role' and 'content'
1020
+ system: System prompt (optional)
1021
+ **kwargs: Additional parameters for Anthropic API
1022
+
1023
+ Returns:
1024
+ LLM response with answer and metadata
1025
+
1026
+ Raises:
1027
+ LLMError: If API request fails after retries
1028
+ """
1029
+ self.rate_limiter.acquire()
1030
+
1031
+ # Convert messages format (filter out system messages for Anthropic)
1032
+ user_messages = []
1033
+ system_message = system
1034
+ for msg in messages:
1035
+ if msg["role"] == "system" and not system_message:
1036
+ system_message = msg["content"]
1037
+ elif msg["role"] in ["user", "assistant"]:
1038
+ user_messages.append(msg)
1039
+
1040
+ last_exception = None
1041
+ for attempt in range(self.config.max_retries):
1042
+ try:
1043
+ # Build request parameters
1044
+ request_params = {
1045
+ "model": self.config.model,
1046
+ "messages": user_messages,
1047
+ "max_tokens": kwargs.get("max_tokens", 1024),
1048
+ }
1049
+ if system_message:
1050
+ request_params["system"] = system_message
1051
+
1052
+ # Add any additional kwargs
1053
+ for key in ["temperature", "top_p", "top_k"]:
1054
+ if key in kwargs:
1055
+ request_params[key] = kwargs[key]
1056
+
1057
+ response = self.client.messages.create(**request_params)
1058
+
1059
+ # Extract response content
1060
+ answer = ""
1061
+ for block in response.content:
1062
+ if hasattr(block, "text"):
1063
+ answer += block.text
1064
+
1065
+ # Track costs./API-020
1066
+ input_tokens = response.usage.input_tokens
1067
+ output_tokens = response.usage.output_tokens
1068
+ estimated_cost = 0.0
1069
+
1070
+ if self.config.track_costs:
1071
+ estimated_cost = _global_cost_tracker.record(
1072
+ response.model, input_tokens, output_tokens
1073
+ )
1074
+
1075
+ return LLMResponse(
1076
+ answer=answer,
1077
+ confidence=None, # Anthropic doesn't provide confidence scores
1078
+ suggested_commands=[],
1079
+ metadata={
1080
+ "model": response.model,
1081
+ "usage": {
1082
+ "input_tokens": input_tokens,
1083
+ "output_tokens": output_tokens,
1084
+ },
1085
+ "stop_reason": response.stop_reason,
1086
+ },
1087
+ raw_response={
1088
+ "id": response.id,
1089
+ "type": response.type,
1090
+ },
1091
+ estimated_cost=estimated_cost,
1092
+ )
1093
+
1094
+ except self._anthropic.RateLimitError as e:
1095
+ last_exception = e
1096
+ if attempt < self.config.max_retries - 1:
1097
+ # Exponential backoff for rate limits
1098
+ wait_time = 2**attempt
1099
+ time.sleep(wait_time)
1100
+ continue
1101
+ raise LLMError(f"Anthropic rate limit exceeded: {e}") # noqa: B904
1102
+
1103
+ except self._anthropic.APITimeoutError as e:
1104
+ last_exception = e
1105
+ if attempt < self.config.max_retries - 1:
1106
+ time.sleep(1)
1107
+ continue
1108
+ raise LLMError(f"Anthropic request timeout: {e}") # noqa: B904
1109
+
1110
+ except self._anthropic.APIError as e:
1111
+ last_exception = e
1112
+ if attempt < self.config.max_retries - 1:
1113
+ time.sleep(1)
1114
+ continue
1115
+ raise LLMError(f"Anthropic API error: {e}") # noqa: B904
1116
+
1117
+ except Exception as e:
1118
+ last_exception = e
1119
+ raise LLMError(f"Anthropic request failed: {e}") # noqa: B904
1120
+
1121
+ raise LLMError(
1122
+ f"Anthropic request failed after {self.config.max_retries} retries: {last_exception}"
1123
+ )
1124
+
1125
+ def analyze_trace(self, trace: Any, question: str) -> LLMResponse:
1126
+ """Analyze trace with question.
1127
+
1128
+ Trace analysis with Anthropic.
1129
+
1130
+ Args:
1131
+ trace: Trace object
1132
+ question: Natural language question about the trace
1133
+
1134
+ Returns:
1135
+ LLM response with analysis
1136
+ """
1137
+ # Prepare trace summary
1138
+ trace_summary = self._summarize_trace(trace)
1139
+
1140
+ system_prompt = (
1141
+ "You are an expert in signal analysis and oscilloscope data. "
1142
+ "Analyze the provided trace data and answer questions accurately. "
1143
+ "Provide specific, actionable insights."
1144
+ )
1145
+
1146
+ messages = [
1147
+ {
1148
+ "role": "user",
1149
+ "content": f"Trace Summary:\n{trace_summary}\n\nQuestion: {question}",
1150
+ },
1151
+ ]
1152
+
1153
+ return self.chat_completion(messages, system=system_prompt)
1154
+
1155
+ def suggest_measurements(self, trace: Any) -> LLMResponse:
1156
+ """Suggest measurements based on trace characteristics.
1157
+
1158
+ Measurement recommendations.
1159
+
1160
+ Args:
1161
+ trace: Trace object
1162
+
1163
+ Returns:
1164
+ LLM response with measurement suggestions
1165
+ """
1166
+ trace_summary = self._summarize_trace(trace)
1167
+
1168
+ system_prompt = (
1169
+ "You are an expert in signal analysis. Based on trace characteristics, "
1170
+ "suggest relevant measurements. Provide 3-5 specific measurement recommendations "
1171
+ "with brief explanations."
1172
+ )
1173
+
1174
+ messages = [
1175
+ {
1176
+ "role": "user",
1177
+ "content": f"Trace Summary:\n{trace_summary}\n\nWhat measurements would be most informative for this trace?",
1178
+ },
1179
+ ]
1180
+
1181
+ response = self.chat_completion(messages, system=system_prompt)
1182
+
1183
+ # Try to extract suggested commands from the response
1184
+ suggested_commands = self._extract_commands(response.answer)
1185
+ response.suggested_commands = suggested_commands
1186
+
1187
+ return response
1188
+
1189
+ def _summarize_trace(self, trace: Any) -> str:
1190
+ """Create a text summary of trace for LLM context.
1191
+
1192
+ Args:
1193
+ trace: Trace object
1194
+
1195
+ Returns:
1196
+ Text summary of trace characteristics
1197
+ """
1198
+ summary_parts = [f"Trace Type: {type(trace).__name__}"]
1199
+
1200
+ # Extract metadata
1201
+ if hasattr(trace, "metadata"):
1202
+ meta = trace.metadata
1203
+ if hasattr(meta, "sample_rate"):
1204
+ summary_parts.append(f"Sample Rate: {meta.sample_rate:.2e} Hz")
1205
+ if hasattr(meta, "num_samples"):
1206
+ summary_parts.append(f"Number of Samples: {meta.num_samples:,}")
1207
+ if hasattr(meta, "duration"):
1208
+ summary_parts.append(f"Duration: {meta.duration:.6f} s")
1209
+
1210
+ # Data statistics
1211
+ if hasattr(trace, "data"):
1212
+ import numpy as np
1213
+
1214
+ data = trace.data
1215
+ summary_parts.extend(
1216
+ [
1217
+ f"Mean: {np.mean(data):.6e}",
1218
+ f"Std Dev: {np.std(data):.6e}",
1219
+ f"Min: {np.min(data):.6e}",
1220
+ f"Max: {np.max(data):.6e}",
1221
+ f"Peak-to-Peak: {np.ptp(data):.6e}",
1222
+ ]
1223
+ )
1224
+
1225
+ return "\n".join(summary_parts)
1226
+
1227
+ def _extract_commands(self, text: str) -> list[str]:
1228
+ """Extract suggested Oscura commands from LLM response.
1229
+
1230
+ Args:
1231
+ text: LLM response text
1232
+
1233
+ Returns:
1234
+ List of extracted command strings
1235
+ """
1236
+ commands = []
1237
+ # Look for common measurement names
1238
+ measurement_keywords = [
1239
+ "rise_time",
1240
+ "fall_time",
1241
+ "frequency",
1242
+ "period",
1243
+ "amplitude",
1244
+ "rms",
1245
+ "thd",
1246
+ "snr",
1247
+ "fft",
1248
+ "psd",
1249
+ "peak",
1250
+ "duty_cycle",
1251
+ ]
1252
+
1253
+ text_lower = text.lower()
1254
+ for keyword in measurement_keywords:
1255
+ if keyword in text_lower:
1256
+ commands.append(f"measure {keyword}")
1257
+
1258
+ return commands
1259
+
1260
+ def query(self, prompt: str, context: dict[str, Any]) -> LLMResponse:
1261
+ """Send query to LLM with context.
1262
+
1263
+ Args:
1264
+ prompt: User prompt
1265
+ context: Analysis context
1266
+
1267
+ Returns:
1268
+ LLM response
1269
+ """
1270
+ context_str = json.dumps(context, indent=2)
1271
+ system_prompt = "You are a helpful assistant for signal analysis."
1272
+ messages = [
1273
+ {
1274
+ "role": "user",
1275
+ "content": f"Context:\n{context_str}\n\nQuery: {prompt}",
1276
+ },
1277
+ ]
1278
+ return self.chat_completion(messages, system=system_prompt)
1279
+
1280
+ def analyze(self, trace: Any, question: str) -> LLMResponse:
1281
+ """Analyze trace with natural language question.
1282
+
1283
+ Args:
1284
+ trace: Trace object
1285
+ question: Natural language question
1286
+
1287
+ Returns:
1288
+ Analysis response
1289
+ """
1290
+ return self.analyze_trace(trace, question)
1291
+
1292
+ def explain(self, measurement: Any) -> str:
1293
+ """Explain a measurement result.
1294
+
1295
+ Args:
1296
+ measurement: Measurement result
1297
+
1298
+ Returns:
1299
+ Explanation text
1300
+ """
1301
+ system_prompt = "You are an expert in signal measurement interpretation. Explain measurement results clearly and concisely."
1302
+ messages = [
1303
+ {
1304
+ "role": "user",
1305
+ "content": f"Explain this measurement result: {measurement}",
1306
+ },
1307
+ ]
1308
+ response = self.chat_completion(messages, system=system_prompt)
1309
+ return response.answer
1310
+
1311
+
1312
+ class LocalLLMClient:
1313
+ """Local LLM client (mock implementation)."""
1314
+
1315
+ def __init__(self, config: LLMConfig):
1316
+ self.config = config
1317
+
1318
+ def query(self, prompt: str, context: dict[str, Any]) -> LLMResponse:
1319
+ """Mock query implementation."""
1320
+ return LLMResponse(
1321
+ answer="Local LLM not configured. This is a mock response.",
1322
+ confidence=0.0,
1323
+ suggested_commands=[],
1324
+ metadata={"mock": True},
1325
+ )
1326
+
1327
+ def analyze(self, trace: Any, question: str) -> LLMResponse:
1328
+ """Mock analysis implementation."""
1329
+ # Simple heuristic-based responses
1330
+ question_lower = question.lower()
1331
+
1332
+ if "protocol" in question_lower:
1333
+ return LLMResponse(
1334
+ answer="Unable to determine protocol without LLM. Try manual inspection.",
1335
+ confidence=0.0,
1336
+ suggested_commands=[
1337
+ "measure frequency",
1338
+ "plot $trace",
1339
+ ],
1340
+ )
1341
+
1342
+ return LLMResponse(
1343
+ answer=f"Local LLM analysis not available. Question was: {question}",
1344
+ confidence=0.0,
1345
+ suggested_commands=["measure all"],
1346
+ )
1347
+
1348
+ def explain(self, measurement: Any) -> str:
1349
+ """Mock explanation implementation."""
1350
+ return f"Measurement result: {measurement}. Local LLM explanation not available."
1351
+
1352
+
1353
+ def get_provider(name: str, **config_kwargs: Any) -> LLMClient:
1354
+ """Get LLM provider by name with unified interface.
1355
+
1356
+ get_provider(name: str) factory function.
1357
+
1358
+ Args:
1359
+ name: Provider name ('openai', 'anthropic', 'local')
1360
+ **config_kwargs: Configuration parameters for the provider
1361
+
1362
+ Returns:
1363
+ LLM client instance
1364
+
1365
+ Raises:
1366
+ LLMError: If provider unknown or configuration invalid
1367
+
1368
+ Examples:
1369
+ >>> # Get OpenAI provider
1370
+ >>> client = get_provider('openai', model='gpt-4', api_key='...')
1371
+ >>> response = client.analyze(trace, "What is the frequency?")
1372
+ >>>
1373
+ >>> # Get Anthropic provider with rate limiting
1374
+ >>> client = get_provider('anthropic', model='claude-3-opus-20240229',
1375
+ ... requests_per_minute=30)
1376
+ >>> response = client.suggest_measurements(trace)
1377
+ >>>
1378
+ >>> # Get local provider (no API key needed)
1379
+ >>> client = get_provider('local')
1380
+ >>> response = client.analyze(trace, "Analyze this signal")
1381
+ """
1382
+ try:
1383
+ provider_enum = LLMProvider(name.lower())
1384
+ except ValueError:
1385
+ raise LLMError( # noqa: B904
1386
+ f"Unknown provider: {name}. Available: {[p.value for p in LLMProvider]}"
1387
+ )
1388
+
1389
+ # Build config with sensible defaults
1390
+ config = LLMConfig(
1391
+ provider=provider_enum,
1392
+ model=config_kwargs.get("model", "default"),
1393
+ api_key=config_kwargs.get("api_key"),
1394
+ base_url=config_kwargs.get("base_url"),
1395
+ privacy_mode=config_kwargs.get("privacy_mode", provider_enum == LLMProvider.LOCAL),
1396
+ timeout=config_kwargs.get("timeout", 30.0),
1397
+ max_retries=config_kwargs.get("max_retries", 3),
1398
+ requests_per_minute=config_kwargs.get("requests_per_minute", 60),
1399
+ )
1400
+
1401
+ # Create appropriate client with graceful degradation
1402
+ try:
1403
+ if provider_enum == LLMProvider.OPENAI:
1404
+ return OpenAIClient(config)
1405
+ elif provider_enum == LLMProvider.ANTHROPIC:
1406
+ return AnthropicClient(config)
1407
+ elif provider_enum == LLMProvider.LOCAL:
1408
+ return LocalLLMClient(config)
1409
+ else:
1410
+ # .: Graceful degradation
1411
+ raise LLMError(
1412
+ f"Provider {name} not yet implemented. "
1413
+ "Falling back to local provider is recommended."
1414
+ )
1415
+ except ImportError as e:
1416
+ # .: Graceful degradation when API unavailable
1417
+ raise LLMError( # noqa: B904
1418
+ f"Provider {name} unavailable: {e}. "
1419
+ "Install the required package or use 'local' provider."
1420
+ )
1421
+
1422
+
1423
+ # Global LLM integration instance
1424
+ _global_llm: LLMIntegration | None = None
1425
+
1426
+
1427
+ def get_llm() -> LLMIntegration:
1428
+ """Get global LLM integration instance.
1429
+
1430
+ Returns:
1431
+ Global LLM integration instance
1432
+ """
1433
+ global _global_llm
1434
+ if _global_llm is None:
1435
+ _global_llm = LLMIntegration()
1436
+ return _global_llm
1437
+
1438
+
1439
+ def configure(provider: str, model: str, **kwargs: Any) -> None:
1440
+ """Configure global LLM integration.
1441
+
1442
+ Args:
1443
+ provider: Provider name
1444
+ model: Model identifier
1445
+ **kwargs: Additional configuration
1446
+ """
1447
+ llm = get_llm()
1448
+ llm.configure(provider, model, **kwargs)
1449
+
1450
+
1451
+ def analyze(trace: Any, question: str) -> LLMResponse:
1452
+ """Analyze trace with LLM.
1453
+
1454
+ Args:
1455
+ trace: Trace object
1456
+ question: Natural language question
1457
+
1458
+ Returns:
1459
+ LLM response
1460
+ """
1461
+ llm = get_llm()
1462
+ return llm.analyze(trace, question)
1463
+
1464
+
1465
+ def explain(measurement: Any) -> str:
1466
+ """Explain measurement with LLM.
1467
+
1468
+ Args:
1469
+ measurement: Measurement result
1470
+
1471
+ Returns:
1472
+ Explanation text
1473
+ """
1474
+ llm = get_llm()
1475
+ return llm.explain(measurement)
1476
+
1477
+
1478
+ # ==============================================================================
1479
+ # ==============================================================================
1480
+
1481
+
1482
+ def get_client(provider: str | None = None, **config_kwargs: Any) -> LLMClient:
1483
+ """Get LLM client with optional auto-selection.
1484
+
1485
+ get_client(provider: str) -> LLMClient.
1486
+ Alias for get_provider() with auto-selection support.
1487
+
1488
+ Args:
1489
+ provider: Provider name ('openai', 'anthropic', 'local'), or None for auto-select
1490
+ **config_kwargs: Configuration parameters for the provider
1491
+
1492
+ Returns:
1493
+ LLM client instance
1494
+
1495
+ Examples:
1496
+ >>> # Auto-select based on available API keys
1497
+ >>> client = get_client()
1498
+ >>>
1499
+ >>> # Explicit provider selection
1500
+ >>> client = get_client("openai", model="gpt-4")
1501
+ """
1502
+ if provider is not None:
1503
+ return get_provider(provider, **config_kwargs)
1504
+
1505
+ # Auto-selection: try providers in preference order
1506
+ return get_client_auto(**config_kwargs)
1507
+
1508
+
1509
+ def get_client_auto(**config_kwargs: Any) -> LLMClient:
1510
+ """Automatically select an available LLM provider.
1511
+
1512
+ Automatic provider selection based on availability.
1513
+
1514
+ Checks for API keys in environment and returns first available provider:
1515
+ 1. OpenAI (if OPENAI_API_KEY set)
1516
+ 2. Anthropic (if ANTHROPIC_API_KEY set)
1517
+ 3. Local (fallback, always available)
1518
+
1519
+ Args:
1520
+ **config_kwargs: Configuration parameters for the provider
1521
+
1522
+ Returns:
1523
+ LLM client instance for the first available provider
1524
+
1525
+ Examples:
1526
+ >>> client = get_client_auto(model="gpt-4") # Uses OpenAI if key available
1527
+ """
1528
+ # Check for OpenAI
1529
+ if os.environ.get("OPENAI_API_KEY"):
1530
+ try:
1531
+ return get_provider("openai", **config_kwargs)
1532
+ except LLMError:
1533
+ pass # Fall through to next provider
1534
+
1535
+ # Check for Anthropic
1536
+ if os.environ.get("ANTHROPIC_API_KEY"):
1537
+ try:
1538
+ return get_provider("anthropic", **config_kwargs)
1539
+ except LLMError:
1540
+ pass # Fall through to next provider
1541
+
1542
+ # Default to local
1543
+ return get_provider("local", **config_kwargs)
1544
+
1545
+
1546
+ def get_client_with_failover(
1547
+ providers: list[str] | None = None, **config_kwargs: Any
1548
+ ) -> "FailoverLLMClient":
1549
+ """Get LLM client with automatic failover between providers.
1550
+
1551
+ Failover logic (try OpenAI, fallback to Anthropic).
1552
+
1553
+ Args:
1554
+ providers: List of provider names in preference order.
1555
+ Default: ["openai", "anthropic", "local"]
1556
+ **config_kwargs: Configuration parameters for providers
1557
+
1558
+ Returns:
1559
+ FailoverLLMClient that tries providers in order
1560
+
1561
+ Examples:
1562
+ >>> client = get_client_with_failover(
1563
+ ... providers=["openai", "anthropic"],
1564
+ ... model="gpt-4"
1565
+ ... )
1566
+ >>> response = client.chat_completion("Hello") # Tries OpenAI, then Anthropic
1567
+ """
1568
+ if providers is None:
1569
+ providers = ["openai", "anthropic", "local"]
1570
+
1571
+ return FailoverLLMClient(providers, **config_kwargs)
1572
+
1573
+
1574
+ class FailoverLLMClient:
1575
+ """LLM client wrapper with automatic failover between providers.
1576
+
1577
+ .: Failover logic for provider availability.
1578
+
1579
+ Attempts each provider in order until one succeeds. Useful for
1580
+ handling API outages or rate limiting gracefully.
1581
+ """
1582
+
1583
+ def __init__(self, providers: list[str], **config_kwargs: Any):
1584
+ """Initialize failover client.
1585
+
1586
+ Args:
1587
+ providers: List of provider names in preference order
1588
+ **config_kwargs: Configuration parameters for providers
1589
+ """
1590
+ self.providers = providers
1591
+ self.config_kwargs = config_kwargs
1592
+ self._clients: dict[str, LLMClient] = {}
1593
+ self._last_successful_provider: str | None = None
1594
+
1595
+ def _get_or_create_client(self, provider: str) -> LLMClient | None:
1596
+ """Get or create client for provider.
1597
+
1598
+ Args:
1599
+ provider: Provider name
1600
+
1601
+ Returns:
1602
+ LLM client or None if unavailable
1603
+ """
1604
+ if provider not in self._clients:
1605
+ try:
1606
+ self._clients[provider] = get_provider(provider, **self.config_kwargs)
1607
+ except LLMError:
1608
+ return None
1609
+ return self._clients.get(provider)
1610
+
1611
+ def _try_providers(self, operation: Callable[[LLMClient], Any]) -> Any:
1612
+ """Try operation on each provider until one succeeds.
1613
+
1614
+ Args:
1615
+ operation: Callable that takes a client and returns result
1616
+
1617
+ Returns:
1618
+ Result from first successful provider
1619
+
1620
+ Raises:
1621
+ LLMError: If all providers fail
1622
+ """
1623
+ errors = []
1624
+
1625
+ # Try last successful provider first for efficiency
1626
+ if self._last_successful_provider:
1627
+ reordered = [self._last_successful_provider] + [
1628
+ p for p in self.providers if p != self._last_successful_provider
1629
+ ]
1630
+ else:
1631
+ reordered = self.providers
1632
+
1633
+ for provider in reordered:
1634
+ client = self._get_or_create_client(provider)
1635
+ if client is None:
1636
+ errors.append(f"{provider}: not available")
1637
+ continue
1638
+
1639
+ try:
1640
+ result = operation(client)
1641
+ self._last_successful_provider = provider
1642
+ return result
1643
+ except Exception as e:
1644
+ errors.append(f"{provider}: {e}")
1645
+ continue
1646
+
1647
+ raise LLMError(f"All providers failed: {'; '.join(errors)}")
1648
+
1649
+ def chat_completion(
1650
+ self,
1651
+ prompt: str,
1652
+ model: str | None = None,
1653
+ **kwargs: Any,
1654
+ ) -> str:
1655
+ """Send chat completion with failover.
1656
+
1657
+ Args:
1658
+ prompt: User prompt
1659
+ model: Model name (optional, uses config default)
1660
+ **kwargs: Additional parameters
1661
+
1662
+ Returns:
1663
+ Response text from first successful provider
1664
+ """
1665
+
1666
+ def operation(client: LLMClient) -> str:
1667
+ if hasattr(client, "chat_completion"):
1668
+ messages = [{"role": "user", "content": prompt}]
1669
+ response = client.chat_completion(messages, **kwargs) # type: ignore[ignore-without-code]
1670
+ return response.answer # type: ignore[no-any-return]
1671
+ else:
1672
+ response = client.query(prompt, {})
1673
+ return response.answer
1674
+
1675
+ return self._try_providers(operation) # type: ignore[no-any-return]
1676
+
1677
+ def analyze_trace(self, trace_data: dict[str, Any]) -> dict[str, Any]:
1678
+ """Analyze trace data with failover.
1679
+
1680
+ Args:
1681
+ trace_data: Dictionary containing trace information
1682
+
1683
+ Returns:
1684
+ Analysis results dictionary
1685
+ """
1686
+
1687
+ def operation(client: LLMClient) -> dict[str, Any]:
1688
+ # Create mock trace object from dict
1689
+ class DictTrace:
1690
+ def __init__(self, data: dict[str, Any]):
1691
+ self._data = data
1692
+ for k, v in data.items():
1693
+ setattr(self, k, v)
1694
+
1695
+ trace = DictTrace(trace_data)
1696
+
1697
+ if hasattr(client, "analyze_trace"):
1698
+ response = client.analyze_trace(trace, "Analyze this signal") # type: ignore[ignore-without-code]
1699
+ else:
1700
+ response = client.analyze(trace, "Analyze this signal")
1701
+
1702
+ return {
1703
+ "answer": response.answer,
1704
+ "suggested_commands": response.suggested_commands,
1705
+ "metadata": response.metadata,
1706
+ }
1707
+
1708
+ return self._try_providers(operation) # type: ignore[no-any-return]
1709
+
1710
+ def suggest_measurements(self, signal_characteristics: dict[str, Any]) -> list[str]:
1711
+ """Suggest measurements based on signal characteristics.
1712
+
1713
+ Args:
1714
+ signal_characteristics: Dictionary describing the signal
1715
+
1716
+ Returns:
1717
+ List of suggested measurement names
1718
+ """
1719
+
1720
+ def operation(client: LLMClient) -> list[str]:
1721
+ # Create mock trace from characteristics
1722
+ class CharTrace:
1723
+ def __init__(self, chars: dict[str, Any]):
1724
+ self.metadata = type("Meta", (), chars)()
1725
+ self.data = None
1726
+
1727
+ trace = CharTrace(signal_characteristics)
1728
+
1729
+ if hasattr(client, "suggest_measurements"):
1730
+ response = client.suggest_measurements(trace) # type: ignore[ignore-without-code]
1731
+ else:
1732
+ response = client.analyze(trace, "What measurements should I perform?")
1733
+
1734
+ return response.suggested_commands # type: ignore[no-any-return]
1735
+
1736
+ return self._try_providers(operation) # type: ignore[no-any-return]
1737
+
1738
+ def query(self, prompt: str, context: dict[str, Any]) -> LLMResponse:
1739
+ """Send query with failover.
1740
+
1741
+ Args:
1742
+ prompt: User prompt
1743
+ context: Analysis context
1744
+
1745
+ Returns:
1746
+ LLM response
1747
+ """
1748
+ return self._try_providers(lambda c: c.query(prompt, context)) # type: ignore[no-any-return]
1749
+
1750
+ def analyze(self, trace: Any, question: str) -> LLMResponse:
1751
+ """Analyze trace with failover.
1752
+
1753
+ Args:
1754
+ trace: Trace object
1755
+ question: Natural language question
1756
+
1757
+ Returns:
1758
+ Analysis response
1759
+ """
1760
+ return self._try_providers(lambda c: c.analyze(trace, question)) # type: ignore[no-any-return]
1761
+
1762
+ def explain(self, measurement: Any) -> str:
1763
+ """Explain measurement with failover.
1764
+
1765
+ Args:
1766
+ measurement: Measurement result
1767
+
1768
+ Returns:
1769
+ Explanation text
1770
+ """
1771
+ return self._try_providers(lambda c: c.explain(measurement)) # type: ignore[no-any-return]
1772
+
1773
+
1774
+ def is_provider_available(provider: str) -> bool:
1775
+ """Check if a provider is available (API key set, package installed).
1776
+
1777
+ Check provider availability.
1778
+
1779
+ Args:
1780
+ provider: Provider name to check
1781
+
1782
+ Returns:
1783
+ True if provider can be initialized
1784
+
1785
+ Examples:
1786
+ >>> if is_provider_available("openai"):
1787
+ ... client = get_client("openai")
1788
+ """
1789
+ if provider == "local":
1790
+ return True
1791
+
1792
+ if provider == "openai":
1793
+ if not os.environ.get("OPENAI_API_KEY"):
1794
+ return False
1795
+ try:
1796
+ import openai # type: ignore[ignore-without-code]
1797
+
1798
+ return True
1799
+ except ImportError:
1800
+ return False
1801
+
1802
+ if provider == "anthropic":
1803
+ if not os.environ.get("ANTHROPIC_API_KEY"):
1804
+ return False
1805
+ try:
1806
+ import anthropic # type: ignore[ignore-without-code]
1807
+
1808
+ return True
1809
+ except ImportError:
1810
+ return False
1811
+
1812
+ return False
1813
+
1814
+
1815
+ def list_available_providers() -> list[str]:
1816
+ """List all currently available LLM providers.
1817
+
1818
+ Discover available providers.
1819
+
1820
+ Returns:
1821
+ List of provider names that can be used
1822
+
1823
+ Examples:
1824
+ >>> providers = list_available_providers()
1825
+ >>> print(providers) # ['openai', 'local'] if OpenAI key is set
1826
+ """
1827
+ return [provider.value for provider in LLMProvider if is_provider_available(provider.value)]