@fastino-ai/pioneer-cli 0.2.5 → 0.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +15 -1
- package/REPRODUCTION_REPORT.md +195 -0
- package/alphago_reproduction.ipynb +902 -0
- package/compare_results.py +141 -0
- package/monitor_and_test.py +111 -0
- package/package.json +2 -2
- package/quick_test.py +39 -0
- package/reproduce_degradation.py +147 -0
- package/src/api.ts +845 -35
- package/src/index.tsx +226 -18
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Compare base vs fine-tuned model results."""
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
|
|
6
|
+
# Base model results
|
|
7
|
+
base_results = [
|
|
8
|
+
{
|
|
9
|
+
'text': 'Example 1',
|
|
10
|
+
'entities': {
|
|
11
|
+
'person': [{'text': 'A. Müller', 'confidence': 1.0}],
|
|
12
|
+
'organization': [{'text': 'VoltEdge', 'confidence': 0.99951171875}],
|
|
13
|
+
'location': [{'text': 'Berlin Loft', 'confidence': 0.98291015625}],
|
|
14
|
+
'product': [{'text': 'digital wallet solution', 'confidence': 0.9716796875}]
|
|
15
|
+
}
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
'text': 'Example 2',
|
|
19
|
+
'entities': {
|
|
20
|
+
'person': [],
|
|
21
|
+
'organization': [{'text': 'NovaGrid', 'confidence': 0.9990234375}],
|
|
22
|
+
'location': [],
|
|
23
|
+
'product': [{'text': 'PulseCharge power bank', 'confidence': 0.94775390625}]
|
|
24
|
+
}
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
'text': 'Example 3',
|
|
28
|
+
'entities': {
|
|
29
|
+
'person': [{'text': 'Tim Cook', 'confidence': 1.0}],
|
|
30
|
+
'organization': [{'text': 'Apple', 'confidence': 0.99951171875}],
|
|
31
|
+
'location': [
|
|
32
|
+
{'text': 'Cupertino', 'confidence': 0.89892578125},
|
|
33
|
+
{'text': 'Steve Jobs Theater', 'confidence': 0.88330078125},
|
|
34
|
+
{'text': 'California', 'confidence': 0.59033203125}
|
|
35
|
+
],
|
|
36
|
+
'product': [{'text': 'iPhone 15 Pro', 'confidence': 0.9990234375}]
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
# Fine-tuned model results
|
|
42
|
+
finetuned_results = [
|
|
43
|
+
{
|
|
44
|
+
'text': 'Example 1',
|
|
45
|
+
'entities': {
|
|
46
|
+
'person': ['A. Müller'],
|
|
47
|
+
'organization': ['VoltEdge'],
|
|
48
|
+
'location': ['Berlin Loft'],
|
|
49
|
+
'product': ['digital wallet solution']
|
|
50
|
+
}
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
'text': 'Example 2',
|
|
54
|
+
'entities': {
|
|
55
|
+
'person': [],
|
|
56
|
+
'organization': ['NovaGrid'],
|
|
57
|
+
'location': [],
|
|
58
|
+
'product': ['PulseCharge power bank']
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
'text': 'Example 3',
|
|
63
|
+
'entities': {
|
|
64
|
+
'person': ['Tim Cook'],
|
|
65
|
+
'organization': ['Apple'],
|
|
66
|
+
'location': ['California', 'Cupertino'], # Missing: Steve Jobs Theater
|
|
67
|
+
'product': ['iPhone 15 Pro']
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
print("="*70)
|
|
73
|
+
print("COMPARISON: Base GLiNER-2 vs Fine-tuned Model")
|
|
74
|
+
print("="*70)
|
|
75
|
+
|
|
76
|
+
for i, (base, ft) in enumerate(zip(base_results, finetuned_results), 1):
|
|
77
|
+
print(f"\n{base['text']}:")
|
|
78
|
+
print("-"*70)
|
|
79
|
+
|
|
80
|
+
for label in ['person', 'organization', 'location', 'product']:
|
|
81
|
+
base_entities = base['entities'].get(label, [])
|
|
82
|
+
ft_entities = ft['entities'].get(label, [])
|
|
83
|
+
|
|
84
|
+
# Extract text only for comparison
|
|
85
|
+
if base_entities and isinstance(base_entities[0], dict):
|
|
86
|
+
base_texts = [e['text'] for e in base_entities]
|
|
87
|
+
else:
|
|
88
|
+
base_texts = base_entities
|
|
89
|
+
|
|
90
|
+
if ft_entities and isinstance(ft_entities[0], dict):
|
|
91
|
+
ft_texts = [e['text'] for e in ft_entities]
|
|
92
|
+
else:
|
|
93
|
+
ft_texts = ft_entities
|
|
94
|
+
|
|
95
|
+
base_count = len(base_texts)
|
|
96
|
+
ft_count = len(ft_texts)
|
|
97
|
+
|
|
98
|
+
if base_count == ft_count == 0:
|
|
99
|
+
continue
|
|
100
|
+
|
|
101
|
+
print(f"\n {label.upper()}:")
|
|
102
|
+
print(f" Base: {base_count} entities - {base_texts}")
|
|
103
|
+
print(f" Fine-tuned: {ft_count} entities - {ft_texts}")
|
|
104
|
+
|
|
105
|
+
# Check for missing entities
|
|
106
|
+
missing = set(base_texts) - set(ft_texts)
|
|
107
|
+
extra = set(ft_texts) - set(base_texts)
|
|
108
|
+
|
|
109
|
+
if missing:
|
|
110
|
+
print(f" ⚠️ MISSING in fine-tuned: {list(missing)}")
|
|
111
|
+
if extra:
|
|
112
|
+
print(f" ℹ️ EXTRA in fine-tuned: {list(extra)}")
|
|
113
|
+
|
|
114
|
+
print("\n" + "="*70)
|
|
115
|
+
print("SUMMARY")
|
|
116
|
+
print("="*70)
|
|
117
|
+
|
|
118
|
+
total_base = sum(
|
|
119
|
+
len([e['text'] if isinstance(e, dict) else e for e in result['entities'].get(label, [])])
|
|
120
|
+
for result in base_results
|
|
121
|
+
for label in ['person', 'organization', 'location', 'product']
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
total_ft = sum(
|
|
125
|
+
len(result['entities'].get(label, []))
|
|
126
|
+
for result in finetuned_results
|
|
127
|
+
for label in ['person', 'organization', 'location', 'product']
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
print(f"\nTotal entities extracted:")
|
|
131
|
+
print(f" Base model: {total_base} entities")
|
|
132
|
+
print(f" Fine-tuned: {total_ft} entities")
|
|
133
|
+
print(f" Difference: {total_base - total_ft} entities")
|
|
134
|
+
|
|
135
|
+
if total_ft < total_base:
|
|
136
|
+
print(f"\n⚠️ DEGRADATION CONFIRMED: Fine-tuned model found {total_base - total_ft} fewer entities!")
|
|
137
|
+
print(f" Performance drop: {((total_base - total_ft) / total_base * 100):.1f}%")
|
|
138
|
+
elif total_ft == total_base:
|
|
139
|
+
print("\n✓ Same number of entities extracted")
|
|
140
|
+
else:
|
|
141
|
+
print(f"\nℹ️ Fine-tuned found {total_ft - total_base} more entities")
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Monitor training and automatically run comparison when complete.
|
|
4
|
+
This reproduces the reported NER fine-tuning degradation issue.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import time
|
|
8
|
+
import json
|
|
9
|
+
|
|
10
|
+
TRAINING_JOB_ID = "1255a2c6-cc5a-4d51-9e83-ed796059513e"
|
|
11
|
+
|
|
12
|
+
# Test examples
|
|
13
|
+
TEST_EXAMPLES = [
|
|
14
|
+
{
|
|
15
|
+
"text": "Samsung Electronics introduced the Galaxy Z Fold3 at a press conference in Seoul, with company president Yoon Jae-sung highlighting the device's improved durability.",
|
|
16
|
+
"expected": {
|
|
17
|
+
"person": ["Yoon Jae-sung"],
|
|
18
|
+
"organization": ["Samsung Electronics"],
|
|
19
|
+
"location": ["Seoul"],
|
|
20
|
+
"product": ["Galaxy Z Fold3"]
|
|
21
|
+
}
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"text": "Following several customer complaints, tech company NovaGrid issued a recall for its PulseCharge power bank after reports of overheating. Jamie, the company's head of product safety, confirmed the recall affects units sold in North America.",
|
|
25
|
+
"expected": {
|
|
26
|
+
"person": ["Jamie"],
|
|
27
|
+
"organization": ["NovaGrid"],
|
|
28
|
+
"location": ["North America"],
|
|
29
|
+
"product": ["PulseCharge power bank"]
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
LABELS = ["person", "organization", "location", "product"]
|
|
35
|
+
|
|
36
|
+
def check_training_status():
|
|
37
|
+
"""Check if training is complete."""
|
|
38
|
+
print("Checking training status...")
|
|
39
|
+
# This would use the Felix API
|
|
40
|
+
# For now, return a placeholder
|
|
41
|
+
return "running" # Will be "complete" or "errored"
|
|
42
|
+
|
|
43
|
+
def test_inference(text, labels, job_id=None):
|
|
44
|
+
"""Run inference on text."""
|
|
45
|
+
# This would use the Felix inference API
|
|
46
|
+
# Placeholder for demonstration
|
|
47
|
+
return {
|
|
48
|
+
"entities": {}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
def main():
|
|
52
|
+
print("="*70)
|
|
53
|
+
print("NER FINE-TUNING DEGRADATION REPRODUCTION")
|
|
54
|
+
print("="*70)
|
|
55
|
+
print(f"\nTraining Job ID: {TRAINING_JOB_ID}")
|
|
56
|
+
print(f"Datasets: ner-reproduction-test (100 examples)")
|
|
57
|
+
print(f"Eval Set: ner-reproduction-eval (50 examples)")
|
|
58
|
+
print(f"Labels: {', '.join(LABELS)}")
|
|
59
|
+
|
|
60
|
+
print("\n" + "="*70)
|
|
61
|
+
print("WAITING FOR TRAINING TO COMPLETE")
|
|
62
|
+
print("="*70)
|
|
63
|
+
print("\nThis will take approximately 5-10 minutes...")
|
|
64
|
+
print("The script will automatically run the comparison when ready.\n")
|
|
65
|
+
|
|
66
|
+
# In a real scenario, we'd poll the training status
|
|
67
|
+
# For now, just show what we'll test
|
|
68
|
+
|
|
69
|
+
print("\n" + "="*70)
|
|
70
|
+
print("TEST PLAN")
|
|
71
|
+
print("="*70)
|
|
72
|
+
|
|
73
|
+
for i, example in enumerate(TEST_EXAMPLES, 1):
|
|
74
|
+
print(f"\nExample {i}:")
|
|
75
|
+
print(f"Text: {example['text'][:80]}...")
|
|
76
|
+
print(f"Expected entities:")
|
|
77
|
+
for label, entities in example['expected'].items():
|
|
78
|
+
print(f" {label}: {entities}")
|
|
79
|
+
|
|
80
|
+
print("\n" + "="*70)
|
|
81
|
+
print("WHAT WE'RE TESTING")
|
|
82
|
+
print("="*70)
|
|
83
|
+
print("""
|
|
84
|
+
1. Base GLiNER-2 Model Performance
|
|
85
|
+
- Should extract entities with high accuracy (99%+ confidence)
|
|
86
|
+
- Already tested: performs excellently on these examples
|
|
87
|
+
|
|
88
|
+
2. Fine-tuned Model Performance
|
|
89
|
+
- After training on 100 synthetic examples
|
|
90
|
+
- Will compare entity extraction quality vs base model
|
|
91
|
+
|
|
92
|
+
3. Expected Issue
|
|
93
|
+
- Reports indicate fine-tuning WORSENS performance
|
|
94
|
+
- We'll measure: precision, recall, entity counts
|
|
95
|
+
- Look for: missing entities, lower confidence, incorrect labels
|
|
96
|
+
""")
|
|
97
|
+
|
|
98
|
+
print("\n" + "="*70)
|
|
99
|
+
print("TO RUN THE COMPARISON MANUALLY")
|
|
100
|
+
print("="*70)
|
|
101
|
+
print("""
|
|
102
|
+
Once training completes, you can test manually by asking me to:
|
|
103
|
+
1. Run inference with base model on test examples
|
|
104
|
+
2. Run inference with fine-tuned model (job_id: 1255a2c6-cc5a-4d51-9e83-ed796059513e)
|
|
105
|
+
3. Compare the results
|
|
106
|
+
|
|
107
|
+
Or just ask: "Check if training is done and run the comparison"
|
|
108
|
+
""")
|
|
109
|
+
|
|
110
|
+
if __name__ == "__main__":
|
|
111
|
+
main()
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fastino-ai/pioneer-cli",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.6",
|
|
4
4
|
"description": "Pioneer CLI - AI training platform with chat agent",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"publishConfig": {
|
|
@@ -36,4 +36,4 @@
|
|
|
36
36
|
"engines": {
|
|
37
37
|
"bun": ">=1.1.0"
|
|
38
38
|
}
|
|
39
|
-
}
|
|
39
|
+
}
|
package/quick_test.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Quick test to compare base vs fine-tuned model performance."""
|
|
3
|
+
|
|
4
|
+
import sys
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
# Add parent directory to path if running from subdirectory
|
|
8
|
+
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
9
|
+
|
|
10
|
+
# Note: This script uses the Felix MLE agent's API, not the CLI
|
|
11
|
+
# It will call the same functions that the agent uses
|
|
12
|
+
|
|
13
|
+
print("Checking training status...")
|
|
14
|
+
print("=" * 60)
|
|
15
|
+
|
|
16
|
+
# We'll need to check if training is complete first
|
|
17
|
+
# If you want to run this manually, you'll need to wait for training to complete
|
|
18
|
+
# The training job ID is: 1255a2c6-cc5a-4d51-9e83-ed796059513e
|
|
19
|
+
|
|
20
|
+
print("""
|
|
21
|
+
To check training status, run:
|
|
22
|
+
python -c "from mle_agent import manage_training; print(manage_training('get', job_id='1255a2c6-cc5a-4d51-9e83-ed796059513e'))"
|
|
23
|
+
|
|
24
|
+
Once training is complete, this script will compare:
|
|
25
|
+
- Base GLiNER-2 model performance
|
|
26
|
+
- Fine-tuned model performance
|
|
27
|
+
|
|
28
|
+
On the same test examples to identify any degradation.
|
|
29
|
+
""")
|
|
30
|
+
|
|
31
|
+
# Test text
|
|
32
|
+
text = "Samsung Electronics introduced the Galaxy Z Fold3 at a press conference in Seoul, with company president Yoon Jae-sung highlighting the device's improved durability."
|
|
33
|
+
|
|
34
|
+
labels = ["person", "organization", "location", "product"]
|
|
35
|
+
|
|
36
|
+
print("\nTest example:")
|
|
37
|
+
print(f"Text: {text}")
|
|
38
|
+
print(f"Labels: {labels}")
|
|
39
|
+
print("\nWaiting for training to complete before running comparison...")
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Reproduce NER fine-tuning degradation issue.
|
|
4
|
+
|
|
5
|
+
This script:
|
|
6
|
+
1. Monitors the training job
|
|
7
|
+
2. Once complete, tests both base and fine-tuned models
|
|
8
|
+
3. Compares performance to identify degradation
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import time
|
|
12
|
+
import json
|
|
13
|
+
from felix import Felix
|
|
14
|
+
|
|
15
|
+
# Initialize Felix client
|
|
16
|
+
felix = Felix()
|
|
17
|
+
|
|
18
|
+
# Job and dataset IDs
|
|
19
|
+
TRAINING_JOB_ID = "1255a2c6-cc5a-4d51-9e83-ed796059513e"
|
|
20
|
+
EVAL_DATASET = "ner-reproduction-eval"
|
|
21
|
+
|
|
22
|
+
# Test examples from eval set
|
|
23
|
+
TEST_EXAMPLES = [
|
|
24
|
+
"Samsung Electronics introduced the Galaxy Z Fold3 at a press conference in Seoul, with company president Yoon Jae-sung highlighting the device's improved durability.",
|
|
25
|
+
"Following several customer complaints, tech company NovaGrid issued a recall for its PulseCharge power bank after reports of overheating. Jamie, the company's head of product safety, confirmed the recall affects units sold in North America.",
|
|
26
|
+
"At the recent Horizon Launchpad in Berlin Loft, marketing chief A. Müller from the rising fintech VoltEdge presented the company's new digital wallet solution.",
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
LABELS = ["person", "organization", "location", "product"]
|
|
30
|
+
|
|
31
|
+
def wait_for_training():
|
|
32
|
+
"""Wait for training to complete."""
|
|
33
|
+
print("Waiting for training to complete...")
|
|
34
|
+
while True:
|
|
35
|
+
status = felix.get_training_job(TRAINING_JOB_ID)
|
|
36
|
+
print(f"Status: {status['status']}")
|
|
37
|
+
|
|
38
|
+
if status['status'] == 'complete':
|
|
39
|
+
print("✓ Training completed!")
|
|
40
|
+
return True
|
|
41
|
+
elif status['status'] == 'errored':
|
|
42
|
+
print("✗ Training failed!")
|
|
43
|
+
return False
|
|
44
|
+
|
|
45
|
+
time.sleep(30)
|
|
46
|
+
|
|
47
|
+
def test_model(model_id, model_name):
|
|
48
|
+
"""Test a model on examples."""
|
|
49
|
+
print(f"\n{'='*60}")
|
|
50
|
+
print(f"Testing {model_name}")
|
|
51
|
+
print(f"{'='*60}\n")
|
|
52
|
+
|
|
53
|
+
results = []
|
|
54
|
+
for i, text in enumerate(TEST_EXAMPLES, 1):
|
|
55
|
+
print(f"Example {i}:")
|
|
56
|
+
print(f"Text: {text[:80]}...")
|
|
57
|
+
|
|
58
|
+
# Run inference
|
|
59
|
+
if model_id == "base":
|
|
60
|
+
result = felix.run_inference(
|
|
61
|
+
task="extract_entities",
|
|
62
|
+
text=text,
|
|
63
|
+
schema=LABELS,
|
|
64
|
+
include_confidence=True
|
|
65
|
+
)
|
|
66
|
+
else:
|
|
67
|
+
result = felix.run_inference(
|
|
68
|
+
task="extract_entities",
|
|
69
|
+
text=text,
|
|
70
|
+
schema=LABELS,
|
|
71
|
+
job_id=model_id,
|
|
72
|
+
include_confidence=True
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
entities = result.get('result', {}).get('entities', {})
|
|
76
|
+
print(f"Entities found: {json.dumps(entities, indent=2)}")
|
|
77
|
+
print()
|
|
78
|
+
|
|
79
|
+
results.append({
|
|
80
|
+
'text': text,
|
|
81
|
+
'entities': entities
|
|
82
|
+
})
|
|
83
|
+
|
|
84
|
+
return results
|
|
85
|
+
|
|
86
|
+
def compare_results(base_results, finetuned_results):
|
|
87
|
+
"""Compare base vs fine-tuned results."""
|
|
88
|
+
print(f"\n{'='*60}")
|
|
89
|
+
print("COMPARISON")
|
|
90
|
+
print(f"{'='*60}\n")
|
|
91
|
+
|
|
92
|
+
for i, (base, ft) in enumerate(zip(base_results, finetuned_results), 1):
|
|
93
|
+
print(f"Example {i}:")
|
|
94
|
+
|
|
95
|
+
# Count entities
|
|
96
|
+
base_count = sum(len(v) for v in base['entities'].values())
|
|
97
|
+
ft_count = sum(len(v) for v in ft['entities'].values())
|
|
98
|
+
|
|
99
|
+
print(f" Base model: {base_count} entities")
|
|
100
|
+
print(f" Fine-tuned: {ft_count} entities")
|
|
101
|
+
|
|
102
|
+
if ft_count < base_count:
|
|
103
|
+
print(f" ⚠️ DEGRADATION: Fine-tuned found {base_count - ft_count} fewer entities")
|
|
104
|
+
elif ft_count > base_count:
|
|
105
|
+
print(f" ℹ️ Fine-tuned found {ft_count - base_count} more entities")
|
|
106
|
+
else:
|
|
107
|
+
print(f" ✓ Same number of entities")
|
|
108
|
+
print()
|
|
109
|
+
|
|
110
|
+
def main():
|
|
111
|
+
# Wait for training
|
|
112
|
+
if not wait_for_training():
|
|
113
|
+
print("Training failed, cannot proceed with comparison")
|
|
114
|
+
return
|
|
115
|
+
|
|
116
|
+
# Test base model
|
|
117
|
+
print("\n" + "="*60)
|
|
118
|
+
print("STEP 1: Testing Base GLiNER Model")
|
|
119
|
+
print("="*60)
|
|
120
|
+
base_results = test_model("base", "Base GLiNER-2")
|
|
121
|
+
|
|
122
|
+
# Test fine-tuned model
|
|
123
|
+
print("\n" + "="*60)
|
|
124
|
+
print("STEP 2: Testing Fine-tuned Model")
|
|
125
|
+
print("="*60)
|
|
126
|
+
finetuned_results = test_model(TRAINING_JOB_ID, "Fine-tuned Model")
|
|
127
|
+
|
|
128
|
+
# Compare
|
|
129
|
+
compare_results(base_results, finetuned_results)
|
|
130
|
+
|
|
131
|
+
# Try formal evaluation
|
|
132
|
+
print("\n" + "="*60)
|
|
133
|
+
print("STEP 3: Running Formal Evaluations")
|
|
134
|
+
print("="*60)
|
|
135
|
+
|
|
136
|
+
print("\nEvaluating fine-tuned model on eval dataset...")
|
|
137
|
+
eval_result = felix.create_evaluation(
|
|
138
|
+
model_id=TRAINING_JOB_ID,
|
|
139
|
+
dataset_name=EVAL_DATASET,
|
|
140
|
+
provider="felix"
|
|
141
|
+
)
|
|
142
|
+
print(f"Evaluation created: {eval_result.get('evaluation_id')}")
|
|
143
|
+
|
|
144
|
+
print("\nCheck leaderboard with: felix.get_leaderboard('ner-reproduction-eval')")
|
|
145
|
+
|
|
146
|
+
if __name__ == "__main__":
|
|
147
|
+
main()
|