cr-proc 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,13 @@ from typing import Any
2
2
  from datetime import datetime
3
3
  import difflib
4
4
 
5
+ # ============================================================================
6
+ # Constants for detection thresholds
7
+ # ============================================================================
8
+ MIN_WHITELIST_SIZE = 10 # Minimum fragment size to add to whitelist
9
+ MIN_MULTILINE_SIZE = 20 # Minimum size for multiline external paste detection
10
+ MIN_AUTOCOMPLETE_SIZE = 10 # Minimum size for autocomplete detection
11
+ MIN_RAPID_PASTE_CHARS = 5 # Minimum chars for a "paste" in rapid detection
5
12
 
6
13
  def _normalize_newlines(text: str) -> str:
7
14
  """Normalize CRLF to LF to avoid offset and diff noise."""
@@ -152,13 +159,13 @@ def _build_document_states(jsonData: tuple[dict[str, Any], ...]) -> tuple[list[s
152
159
 
153
160
  # Build whitelist of all content fragments seen
154
161
  # Add both old and new fragments to whitelist for comprehensive coverage
155
- if len(old_frag) > 10: # Ignore tiny fragments
162
+ if len(old_frag) > MIN_WHITELIST_SIZE:
156
163
  content_whitelist.add(old_frag)
157
- if len(new_frag) > 10:
164
+ if len(new_frag) > MIN_WHITELIST_SIZE:
158
165
  content_whitelist.add(new_frag)
159
166
 
160
167
  # Also add the full document state to whitelist
161
- if len(current_state) > 10:
168
+ if len(current_state) > MIN_WHITELIST_SIZE:
162
169
  content_whitelist.add(current_state)
163
170
 
164
171
  return document_states, content_whitelist
@@ -191,65 +198,74 @@ def _detect_multiline_external_pastes(
191
198
  """
192
199
  suspicious_events = []
193
200
 
201
+ # Build whitelist incrementally to only include content from BEFORE each event
202
+ past_whitelist = set()
203
+
194
204
  for idx, event in enumerate(jsonData):
195
205
  old_frag = _normalize_newlines(event.get("oldFragment", ""))
196
206
  new_frag = _normalize_newlines(event.get("newFragment", ""))
197
207
 
198
208
  # Skip if no actual change
199
209
  if new_frag == old_frag or new_frag.strip() == "":
200
- continue
201
-
210
+ pass # Still add to whitelist below
202
211
  # Only check multi-line content (more than 2 lines means at least 2 actual lines)
203
- new_lines = new_frag.split("\n")
204
- if len(new_lines) <= 2: # Single line or line + empty
205
- continue
206
-
207
- # Check if the new content already existed in the document at any prior point
208
- is_internal_copy = False
209
-
210
- # Check against document state BEFORE this event
211
- if idx > 0:
212
- prior_state = document_states[idx - 1]
213
- if new_frag in prior_state:
214
- is_internal_copy = True
215
-
216
- # Also check against whitelist of all content seen
217
- if not is_internal_copy:
218
- for hist_content in content_whitelist:
219
- # Ignore tiny fragments
220
- if len(hist_content) < 20:
221
- continue
222
-
223
- # Require substantial overlap in size to count as an internal copy
224
- similar_length = (
225
- len(hist_content) >= 0.8 * len(new_frag)
226
- and len(hist_content) <= 1.25 * len(new_frag)
227
- )
212
+ elif len(new_frag.split("\n")) > 2:
213
+ new_lines = new_frag.split("\n")
228
214
 
229
- if new_frag == hist_content:
230
- is_internal_copy = True
231
- break
215
+ # Check if the new content already existed in the document at any prior point
216
+ is_internal_copy = False
232
217
 
233
- if new_frag in hist_content and similar_length:
218
+ # Check against document state BEFORE this event
219
+ if idx > 0:
220
+ prior_state = document_states[idx - 1]
221
+ if new_frag in prior_state:
234
222
  is_internal_copy = True
235
- break
236
223
 
237
- if hist_content in new_frag and similar_length:
238
- is_internal_copy = True
239
- break
224
+ # Also check against whitelist of content from BEFORE this event
225
+ if not is_internal_copy:
226
+ for hist_content in past_whitelist:
227
+ # Ignore tiny fragments - multiline external pastes should be significant
228
+ if len(hist_content) < MIN_MULTILINE_SIZE:
229
+ continue
230
+
231
+ # Require substantial overlap in size to count as an internal copy
232
+ similar_length = (
233
+ len(hist_content) >= 0.8 * len(new_frag)
234
+ and len(hist_content) <= 1.25 * len(new_frag)
235
+ )
236
+
237
+ if new_frag == hist_content:
238
+ is_internal_copy = True
239
+ break
240
+
241
+ if new_frag in hist_content and similar_length:
242
+ is_internal_copy = True
243
+ break
244
+
245
+ if hist_content in new_frag and similar_length:
246
+ is_internal_copy = True
247
+ break
248
+
249
+ # Also check if it's in the old fragment (internal move/copy)
250
+ if not is_internal_copy and old_frag and (new_frag in old_frag or old_frag in new_frag):
251
+ is_internal_copy = True
240
252
 
241
- # Also check if it's in the old fragment (internal move/copy)
242
- if not is_internal_copy and old_frag and (new_frag in old_frag or old_frag in new_frag):
243
- is_internal_copy = True
253
+ if not is_internal_copy:
254
+ suspicious_events.append({
255
+ "event_index": idx,
256
+ "line_count": len(new_lines),
257
+ "char_count": len(new_frag),
258
+ "reason": "multi-line external paste",
259
+ "newFragment": new_frag
260
+ })
244
261
 
245
- if not is_internal_copy:
246
- suspicious_events.append({
247
- "event_index": idx,
248
- "line_count": len(new_lines),
249
- "char_count": len(new_frag),
250
- "reason": "multi-line external paste",
251
- "newFragment": new_frag
252
- })
262
+ # Add current event's content to whitelist for future events
263
+ if len(old_frag) > MIN_MULTILINE_SIZE:
264
+ past_whitelist.add(old_frag)
265
+ if len(new_frag) > MIN_MULTILINE_SIZE:
266
+ past_whitelist.add(new_frag)
267
+ if idx > 0 and len(document_states[idx - 1]) > MIN_MULTILINE_SIZE:
268
+ past_whitelist.add(document_states[idx - 1])
253
269
 
254
270
  return suspicious_events
255
271
 
@@ -281,7 +297,7 @@ def _detect_rapid_paste_sequences(jsonData: tuple[dict[str, Any], ...]) -> list[
281
297
  new_lines = new_frag.split("\n")
282
298
  if len(new_lines) == 2:
283
299
  # Heuristic: if it's more than a few characters, it might be pasted
284
- if len(new_frag.strip()) > 5:
300
+ if len(new_frag.strip()) > MIN_RAPID_PASTE_CHARS:
285
301
  one_line_pastes.append({
286
302
  "event_index": idx,
287
303
  "timestamp": timestamp,
@@ -385,9 +401,13 @@ def _detect_fullline_autocomplete(
385
401
  """
386
402
  suspicious_events = []
387
403
 
404
+ # Build whitelist incrementally to only include content from BEFORE each event
405
+ past_whitelist = set()
406
+
388
407
  for idx, event in enumerate(jsonData):
389
408
  # Skip if already flagged by another detector
390
409
  if idx in excluded_indices:
410
+ past_whitelist_update(idx, event, document_states, past_whitelist)
391
411
  continue
392
412
 
393
413
  old_frag = _normalize_newlines(event.get("oldFragment", ""))
@@ -395,6 +415,7 @@ def _detect_fullline_autocomplete(
395
415
 
396
416
  # Skip first event (template) and no-change events
397
417
  if idx == 0 or new_frag == old_frag:
418
+ past_whitelist_update(idx, event, document_states, past_whitelist)
398
419
  continue
399
420
 
400
421
  old_len = len(old_frag)
@@ -403,6 +424,7 @@ def _detect_fullline_autocomplete(
403
424
  # At keystroke level, oldFragment is typically empty for insertions
404
425
  # Allow up to 3 chars for prefix-based triggers (e.g., "de" -> "def")
405
426
  if old_len > 3:
427
+ past_whitelist_update(idx, event, document_states, past_whitelist)
406
428
  continue
407
429
 
408
430
  # Check line count - we care about complete statements
@@ -417,10 +439,12 @@ def _detect_fullline_autocomplete(
417
439
 
418
440
  if not (is_single_line or is_multi_line):
419
441
  # Shouldn't happen, but skip if malformed
442
+ past_whitelist_update(idx, event, document_states, past_whitelist)
420
443
  continue
421
444
 
422
445
  # The new fragment should not be just whitespace
423
446
  if not new_frag.strip():
447
+ past_whitelist_update(idx, event, document_states, past_whitelist)
424
448
  continue
425
449
 
426
450
  # Check if the new fragment contains code structure indicators
@@ -443,21 +467,25 @@ def _detect_fullline_autocomplete(
443
467
 
444
468
  if not has_complete_statement:
445
469
  # No complete statement - skip basic identifier completion
470
+ past_whitelist_update(idx, event, document_states, past_whitelist)
446
471
  continue
447
472
 
448
473
  # Minimum size for meaningful completion
449
- if new_len < 10:
474
+ if new_len < MIN_AUTOCOMPLETE_SIZE:
475
+ past_whitelist_update(idx, event, document_states, past_whitelist)
450
476
  continue
451
477
 
452
478
  # For multi-line: maximum size to distinguish from external pastes
453
479
  # External pastes are typically much larger (100+ chars)
454
480
  # Multi-line completions are usually 20-300 chars for a small function/block
455
481
  if is_multi_line and new_len > 300:
482
+ past_whitelist_update(idx, event, document_states, past_whitelist)
456
483
  continue
457
484
 
458
485
  # For single-line: could be larger due to chained methods or long statements
459
486
  # but cap at 200 chars to avoid flagging user-typed long lines
460
487
  if is_single_line and new_len > 200:
488
+ past_whitelist_update(idx, event, document_states, past_whitelist)
461
489
  continue
462
490
 
463
491
  # Check if this content already existed in the document state BEFORE this event
@@ -468,6 +496,28 @@ def _detect_fullline_autocomplete(
468
496
  if new_frag in prior_state:
469
497
  is_internal_copy = True
470
498
 
499
+ # Also check against whitelist of content from BEFORE this event
500
+ if not is_internal_copy:
501
+ for hist_content in past_whitelist:
502
+ # Ignore tiny fragments
503
+ if len(hist_content) < MIN_AUTOCOMPLETE_SIZE:
504
+ continue
505
+
506
+ # Check for exact match or significant overlap
507
+ if new_frag == hist_content:
508
+ is_internal_copy = True
509
+ break
510
+
511
+ # Check for substring matches with similar length
512
+ similar_length = (
513
+ len(hist_content) >= 0.8 * len(new_frag)
514
+ and len(hist_content) <= 1.25 * len(new_frag)
515
+ )
516
+
517
+ if (new_frag in hist_content or hist_content in new_frag) and similar_length:
518
+ is_internal_copy = True
519
+ break
520
+
471
521
  if not is_internal_copy:
472
522
  line_desc = "line" if is_single_line else "lines"
473
523
  suspicious_events.append({
@@ -478,9 +528,30 @@ def _detect_fullline_autocomplete(
478
528
  "newFragment": new_frag,
479
529
  })
480
530
 
531
+ # Add current event's content to whitelist for future events
532
+ past_whitelist_update(idx, event, document_states, past_whitelist)
533
+
481
534
  return suspicious_events
482
535
 
483
536
 
537
+ def past_whitelist_update(
538
+ idx: int,
539
+ event: dict[str, Any],
540
+ document_states: list[str],
541
+ past_whitelist: set[str]
542
+ ) -> None:
543
+ """Helper to update the past_whitelist with content from current event."""
544
+ old_frag = _normalize_newlines(event.get("oldFragment", ""))
545
+ new_frag = _normalize_newlines(event.get("newFragment", ""))
546
+
547
+ if len(old_frag) > MIN_AUTOCOMPLETE_SIZE:
548
+ past_whitelist.add(old_frag)
549
+ if len(new_frag) > MIN_AUTOCOMPLETE_SIZE:
550
+ past_whitelist.add(new_frag)
551
+ if idx < len(document_states) and len(document_states[idx]) > MIN_AUTOCOMPLETE_SIZE:
552
+ past_whitelist.add(document_states[idx])
553
+
554
+
484
555
  def detect_external_copypaste(jsonData: tuple[dict[str, Any], ...]) -> list[dict[str, Any]]:
485
556
  """
486
557
  Detect copy-paste events from external sources and AI-assisted coding patterns.
@@ -1,6 +1,8 @@
1
1
  import argparse
2
2
  import json
3
+ import os
3
4
  import sys
5
+ import time
4
6
  from datetime import datetime
5
7
  from pathlib import Path
6
8
  from typing import Any
@@ -268,6 +270,8 @@ def write_json_output(
268
270
  document: str,
269
271
  time_info: dict[str, Any] | None,
270
272
  suspicious_events: list[dict[str, Any]],
273
+ reconstructed_code: str,
274
+ verified: bool,
271
275
  ) -> None:
272
276
  """
273
277
  Write verification results to JSON file.
@@ -282,6 +286,10 @@ def write_json_output(
282
286
  Time information from verification
283
287
  suspicious_events : list[dict[str, Any]]
284
288
  List of suspicious events detected
289
+ reconstructed_code : str
290
+ The reconstructed file content
291
+ verified : bool
292
+ Whether the file passed verification
285
293
 
286
294
  Raises
287
295
  ------
@@ -290,8 +298,10 @@ def write_json_output(
290
298
  """
291
299
  results = {
292
300
  "document": document,
301
+ "verified": verified,
293
302
  "time_info": time_info,
294
303
  "suspicious_events": suspicious_events,
304
+ "reconstructed_code": reconstructed_code,
295
305
  }
296
306
 
297
307
  output_path.parent.mkdir(parents=True, exist_ok=True)
@@ -300,6 +310,110 @@ def write_json_output(
300
310
  print(f"Results written to {output_path}", file=sys.stderr)
301
311
 
302
312
 
313
+ def playback_recording(
314
+ json_data: tuple[dict[str, Any], ...],
315
+ document: str,
316
+ template: str,
317
+ speed: float = 1.0,
318
+ ) -> None:
319
+ """
320
+ Play back a recording, showing the code evolving in real-time.
321
+
322
+ Parameters
323
+ ----------
324
+ json_data : tuple[dict[str, Any], ...]
325
+ The recording events
326
+ document : str
327
+ The document to play back
328
+ template : str
329
+ The initial template content
330
+ speed : float
331
+ Playback speed multiplier (1.0 = real-time, 2.0 = 2x speed, 0.5 = half speed)
332
+ """
333
+ # Filter events for the target document
334
+ doc_events = [e for e in json_data if e.get("document") == document]
335
+
336
+ if not doc_events:
337
+ print(f"No events found for document: {document}", file=sys.stderr)
338
+ return
339
+
340
+ # Start with template
341
+ current_content = template
342
+ last_timestamp = None
343
+
344
+ def clear_screen():
345
+ """Clear the terminal screen."""
346
+ os.system('cls' if os.name == 'nt' else 'clear')
347
+
348
+ def parse_timestamp(ts_str: str) -> datetime:
349
+ """Parse ISO timestamp string."""
350
+ return datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
351
+
352
+ # Show initial template
353
+ clear_screen()
354
+ print(f"=" * 80)
355
+ print(f"PLAYBACK: {document} (Speed: {speed}x)")
356
+ print(f"Event 0 / {len(doc_events)} - Initial Template")
357
+ print(f"=" * 80)
358
+ print(current_content)
359
+ print(f"\n{'=' * 80}")
360
+ print("Press Ctrl+C to stop playback")
361
+ time.sleep(2.0 / speed)
362
+
363
+ try:
364
+ for idx, event in enumerate(doc_events, 1):
365
+ old_frag = event.get("oldFragment", "")
366
+ new_frag = event.get("newFragment", "")
367
+ offset = event.get("offset", 0)
368
+ timestamp = event.get("timestamp")
369
+
370
+ # Calculate delay based on timestamp difference
371
+ if last_timestamp and timestamp:
372
+ try:
373
+ ts1 = parse_timestamp(last_timestamp)
374
+ ts2 = parse_timestamp(timestamp)
375
+ delay = (ts2 - ts1).total_seconds() / speed
376
+ # Cap delay at 5 seconds for very long pauses
377
+ delay = min(delay, 5.0)
378
+ if delay > 0:
379
+ time.sleep(delay)
380
+ except (ValueError, KeyError):
381
+ time.sleep(0.1 / speed)
382
+ else:
383
+ time.sleep(0.1 / speed)
384
+
385
+ last_timestamp = timestamp
386
+
387
+ # Apply the edit
388
+ if new_frag != old_frag:
389
+ current_content = current_content[:offset] + new_frag + current_content[offset + len(old_frag):]
390
+
391
+ # Display current state
392
+ clear_screen()
393
+ print(f"=" * 80)
394
+ print(f"PLAYBACK: {document} (Speed: {speed}x)")
395
+ print(f"Event {idx} / {len(doc_events)} - {timestamp or 'unknown time'}")
396
+
397
+ # Show what changed
398
+ if new_frag != old_frag:
399
+ change_type = "INSERT" if not old_frag else ("DELETE" if not new_frag else "REPLACE")
400
+ print(f"Action: {change_type} at offset {offset} ({len(new_frag)} chars)")
401
+
402
+ print(f"=" * 80)
403
+ print(current_content)
404
+ print(f"\n{'=' * 80}")
405
+ print(f"Progress: [{('#' * (idx * 40 // len(doc_events))).ljust(40)}] {idx}/{len(doc_events)}")
406
+ print("Press Ctrl+C to stop playback")
407
+
408
+ except KeyboardInterrupt:
409
+ print("\n\nPlayback stopped by user.", file=sys.stderr)
410
+ return
411
+
412
+ # Final summary
413
+ print("\n\nPlayback complete!", file=sys.stderr)
414
+ print(f"Total events: {len(doc_events)}", file=sys.stderr)
415
+
416
+
303
417
  def create_parser() -> argparse.ArgumentParser:
304
418
  """
305
419
  Create and configure the argument parser.
@@ -353,6 +467,24 @@ def create_parser() -> argparse.ArgumentParser:
353
467
  help="Show individual auto-complete events in addition to "
354
468
  "aggregate statistics",
355
469
  )
470
+ parser.add_argument(
471
+ "-q",
472
+ "--quiet",
473
+ action="store_true",
474
+ help="Suppress output of reconstructed code to stdout",
475
+ )
476
+ parser.add_argument(
477
+ "-p",
478
+ "--playback",
479
+ action="store_true",
480
+ help="Play back the recording in real-time, showing code evolution",
481
+ )
482
+ parser.add_argument(
483
+ "--playback-speed",
484
+ type=float,
485
+ default=1.0,
486
+ help="Playback speed multiplier (1.0 = real-time, 2.0 = 2x speed, 0.5 = half speed)",
487
+ )
356
488
  return parser
357
489
 
358
490
 
@@ -388,6 +520,21 @@ def main() -> int:
388
520
  print(f"Error determining document: {e}", file=sys.stderr)
389
521
  return 1
390
522
 
523
+ # Handle playback mode
524
+ if args.playback:
525
+ try:
526
+ template_content = args.template_file.read_text()
527
+ except FileNotFoundError:
528
+ print(f"Error: Template file not found: {args.template_file}", file=sys.stderr)
529
+ return 1
530
+
531
+ if target_document:
532
+ playback_recording(json_data, target_document, template_content, args.playback_speed)
533
+ return 0
534
+ else:
535
+ print("Error: No documents found in recording", file=sys.stderr)
536
+ return 1
537
+
391
538
  # Filter events for target document
392
539
  doc_events = filter_events_by_document(json_data, target_document)
393
540
  if target_document and not doc_events:
@@ -416,29 +563,21 @@ def main() -> int:
416
563
  display_time_info(time_info)
417
564
 
418
565
  # Verify and process the recording
566
+ verified = False
567
+ reconstructed = ""
568
+ suspicious_events = []
419
569
  try:
420
570
  template_data, suspicious_events = verify(template_data, doc_events)
421
571
  reconstructed = reconstruct_file_from_events(
422
572
  doc_events, template_data, document_path=target_document
423
573
  )
424
- print(reconstructed)
574
+ verified = True
575
+ if not args.quiet:
576
+ print(reconstructed)
425
577
 
426
578
  # Display suspicious events
427
579
  display_suspicious_events(suspicious_events, args.show_autocomplete_details)
428
580
 
429
- # Write JSON output if requested
430
- if args.output_json:
431
- try:
432
- write_json_output(
433
- args.output_json,
434
- target_document or str(args.template_file),
435
- time_info,
436
- suspicious_events,
437
- )
438
- except Exception as e:
439
- print(f"Error writing JSON output: {e}", file=sys.stderr)
440
- return 1
441
-
442
581
  except ValueError as e:
443
582
  print("File failed verification from template!", file=sys.stderr)
444
583
  print(str(e), file=sys.stderr)
@@ -446,12 +585,27 @@ def main() -> int:
446
585
  print(template_diff(template_data, doc_events), file=sys.stderr)
447
586
  except Exception:
448
587
  pass
449
- return 1
588
+ verified = False
450
589
  except Exception as e:
451
590
  print(f"Error processing file: {type(e).__name__}: {e}", file=sys.stderr)
452
- return 1
591
+ verified = False
592
+
593
+ # Write JSON output to file if requested
594
+ if args.output_json:
595
+ try:
596
+ write_json_output(
597
+ args.output_json,
598
+ target_document or str(args.template_file),
599
+ time_info,
600
+ suspicious_events,
601
+ reconstructed,
602
+ verified,
603
+ )
604
+ except Exception as e:
605
+ print(f"Error writing JSON output: {e}", file=sys.stderr)
606
+ return 1
453
607
 
454
- return 0
608
+ return 0 if verified else 1
455
609
 
456
610
 
457
611
  if __name__ == "__main__":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cr_proc
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: A tool for processing BYU CS code recording files.
5
5
  Author: Ethan Dye
6
6
  Author-email: mrtops03@gmail.com
@@ -0,0 +1,9 @@
1
+ code_recorder_processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ code_recorder_processor/api/build.py,sha256=-EMg0w-llblJ_N_vs_B1kOsAOwiV-TYetAXiOY6PcWs,7643
3
+ code_recorder_processor/api/load.py,sha256=ZKoheLsEoGJ3fpAtPauoeEyNUhGLhUYSwjRsqt1m-TI,3947
4
+ code_recorder_processor/api/verify.py,sha256=bElag22J16vUjfg58-6gtqksuhyef4_2VIPxht8jp8o,29038
5
+ code_recorder_processor/cli.py,sha256=_3HA2wVSciQNIOVni8LmUT0inK_QXkYPBC1WGsWmYLw,19221
6
+ cr_proc-0.1.8.dist-info/METADATA,sha256=QNZNA2wJPWEhdhfSB0ZhOzZza-wJbseLL_mQpYmS3tM,4070
7
+ cr_proc-0.1.8.dist-info/WHEEL,sha256=3ny-bZhpXrU6vSQ1UPG34FoxZBp3lVcvK0LkgUz6VLk,88
8
+ cr_proc-0.1.8.dist-info/entry_points.txt,sha256=xb5dPAAWN1Z9NUHpvZgNakaslR1MVOERf_IfpG_M04M,77
9
+ cr_proc-0.1.8.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- code_recorder_processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- code_recorder_processor/api/build.py,sha256=-EMg0w-llblJ_N_vs_B1kOsAOwiV-TYetAXiOY6PcWs,7643
3
- code_recorder_processor/api/load.py,sha256=ZKoheLsEoGJ3fpAtPauoeEyNUhGLhUYSwjRsqt1m-TI,3947
4
- code_recorder_processor/api/verify.py,sha256=5ZA56mZxEHak0HMT7BVBFFHeRfzn0s7Kq1fdvn6UjJM,25278
5
- code_recorder_processor/cli.py,sha256=B4vz_V5ZCxa8eKyj17dxopWu9_z_6-iC_vgQwNpgXoc,14109
6
- cr_proc-0.1.7.dist-info/METADATA,sha256=SbwNJwCifvRGKFkPRGr8aFwGSApNzoHSWaizUbGZ3gQ,4070
7
- cr_proc-0.1.7.dist-info/WHEEL,sha256=3ny-bZhpXrU6vSQ1UPG34FoxZBp3lVcvK0LkgUz6VLk,88
8
- cr_proc-0.1.7.dist-info/entry_points.txt,sha256=xb5dPAAWN1Z9NUHpvZgNakaslR1MVOERf_IfpG_M04M,77
9
- cr_proc-0.1.7.dist-info/RECORD,,