scopemate 0.1.2__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
scopemate/__init__.py CHANGED
@@ -4,7 +4,7 @@ This package provides tools for breaking down complex tasks using
4
4
  the Purpose/Scope/Outcome planning approach.
5
5
  """
6
6
 
7
- __version__ = "0.1.2"
7
+ __version__ = "0.2.0"
8
8
 
9
9
  # Public API
10
10
  from .models import (
scopemate/llm.py CHANGED
@@ -6,8 +6,14 @@ This module provides functions for interacting with LLMs for task estimation,
6
6
  breakdown, and optimization.
7
7
  """
8
8
  import json
9
+ import os
9
10
  from typing import Dict, Any, List, Optional
11
+ from enum import Enum, auto
12
+
13
+ # Import LLM providers
10
14
  from openai import OpenAI
15
+ import google.generativeai as genai
16
+ from anthropic import Anthropic
11
17
 
12
18
  from .models import (
13
19
  ScopeMateTask, Scope, TIME_COMPLEXITY, SIZE_COMPLEXITY,
@@ -17,25 +23,68 @@ from .models import (
17
23
  # -------------------------------
18
24
  # Configuration
19
25
  # -------------------------------
20
- DEFAULT_MODEL = "o4-mini"
26
+ class LLMProvider(Enum):
27
+ """Supported LLM providers"""
28
+ OPENAI = auto()
29
+ GEMINI = auto()
30
+ CLAUDE = auto()
31
+
32
+ # Default configuration
33
+ DEFAULT_PROVIDER = LLMProvider.OPENAI
34
+ DEFAULT_OPENAI_MODEL = "o4-mini"
35
+ DEFAULT_GEMINI_MODEL = "gemini-2.0-flash"
36
+ DEFAULT_CLAUDE_MODEL = "claude-3-7-sonnet-20250219"
37
+
38
+ # Provider-specific model mapping
39
+ DEFAULT_MODELS = {
40
+ LLMProvider.OPENAI: DEFAULT_OPENAI_MODEL,
41
+ LLMProvider.GEMINI: DEFAULT_GEMINI_MODEL,
42
+ LLMProvider.CLAUDE: DEFAULT_CLAUDE_MODEL
43
+ }
44
+
45
+ # Get provider from environment variable or use default
46
+ def get_llm_provider() -> LLMProvider:
47
+ """Get the LLM provider from environment variable or use default"""
48
+ provider_str = os.environ.get("SCOPEMATE_LLM_PROVIDER", "").upper()
49
+ if provider_str == "OPENAI":
50
+ return LLMProvider.OPENAI
51
+ elif provider_str == "GEMINI":
52
+ return LLMProvider.GEMINI
53
+ elif provider_str == "CLAUDE":
54
+ return LLMProvider.CLAUDE
55
+ return DEFAULT_PROVIDER
56
+
57
+ # Get model for the provider from environment variable or use default
58
+ def get_llm_model(provider: LLMProvider = None) -> str:
59
+ """Get the LLM model for the provider from environment variable or use default"""
60
+ if provider is None:
61
+ provider = get_llm_provider()
62
+
63
+ if provider == LLMProvider.OPENAI:
64
+ return os.environ.get("SCOPEMATE_OPENAI_MODEL", DEFAULT_OPENAI_MODEL)
65
+ elif provider == LLMProvider.GEMINI:
66
+ return os.environ.get("SCOPEMATE_GEMINI_MODEL", DEFAULT_GEMINI_MODEL)
67
+ elif provider == LLMProvider.CLAUDE:
68
+ return os.environ.get("SCOPEMATE_CLAUDE_MODEL", DEFAULT_CLAUDE_MODEL)
69
+
70
+ return DEFAULT_MODELS[DEFAULT_PROVIDER]
21
71
 
22
72
  # -------------------------------
23
73
  # LLM Interaction
24
74
  # -------------------------------
25
- def call_llm(prompt: str, model: str = DEFAULT_MODEL) -> dict:
75
+ def call_llm(prompt: str, system_prompt: str = None, model: str = None, provider: LLMProvider = None) -> dict:
26
76
  """
27
77
  Invoke LLM to get a structured JSON response.
28
78
 
29
79
  This function is the core LLM integration point for scopemate, handling all
30
- communication with the OpenAI API. It's designed to always return structured
80
+ communication with the supported LLM APIs. It's designed to always return structured
31
81
  JSON data that can be easily processed by the application.
32
82
 
33
83
  The function:
34
- 1. Creates an OpenAI client using the default API credentials
84
+ 1. Creates a client for the selected provider (OpenAI, Gemini, or Claude)
35
85
  2. Configures a system prompt that instructs the model to return valid JSON
36
86
  3. Sends the user's prompt with the task-specific instructions
37
- 4. Sets response_format to force JSON output
38
- 5. Parses and returns the JSON response
87
+ 4. Parses and returns the JSON response
39
88
 
40
89
  Error handling is built in to gracefully handle JSON parsing failures by
41
90
  printing diagnostic information and returning an empty dictionary rather
@@ -44,50 +93,188 @@ def call_llm(prompt: str, model: str = DEFAULT_MODEL) -> dict:
44
93
  Args:
45
94
  prompt (str): The prompt to send to the LLM, containing full instructions
46
95
  and any task data needed for context
47
- model (str): The OpenAI model identifier to use (defaults to DEFAULT_MODEL)
96
+ model (str, optional): The model identifier to use (defaults to provider's default model)
97
+ provider (LLMProvider, optional): The LLM provider to use (defaults to configured provider)
48
98
 
49
99
  Returns:
50
100
  dict: A dictionary containing the parsed JSON response from the LLM.
51
101
  Returns an empty dict {} if parsing fails.
52
-
53
- Example:
54
- ```python
55
- # Create a prompt asking for task breakdown
56
- prompt = f"Break down this task into subtasks: {task.title}"
57
-
58
- # Call the LLM and get structured data back
59
- response = call_llm(prompt)
60
-
61
- # Process the structured response
62
- if "subtasks" in response:
63
- for subtask_data in response["subtasks"]:
64
- # Create a new subtask from the data
65
- subtask = ScopeMateTask(**subtask_data)
66
- tasks.append(subtask)
67
- ```
68
102
  """
69
- client = OpenAI()
70
- response = client.chat.completions.create(
71
- model=model,
72
- messages=[
73
- {
74
- "role": "system",
75
- "content": "You are a JSON assistant specialized in structured data for product management tasks. "
76
- "Respond only with valid JSON. Follow the exact requested format in the user's prompt, "
77
- "using the exact field names and adhering to all constraints on field values."
78
- },
79
- {"role": "user", "content": prompt}
80
- ],
81
- response_format={"type": "json_object"}
82
- )
103
+ # Determine which provider to use
104
+ if provider is None:
105
+ provider = get_llm_provider()
106
+
107
+ # Determine which model to use
108
+ if model is None:
109
+ model = get_llm_model(provider)
110
+
111
+ # System prompt is common across providers
112
+ if system_prompt is None:
113
+ system_prompt = (
114
+ "You are a JSON assistant specialized in structured data for product management tasks. "
115
+ "Respond only with valid JSON. Follow the exact requested format in the user's prompt, "
116
+ "using the exact field names and adhering to all constraints on field values."
117
+ )
118
+
119
+ # Call the appropriate provider with JSON response format
120
+ response_text = _call_provider(prompt, system_prompt, model, provider, response_format="json")
83
121
 
122
+ # Parse JSON response
84
123
  try:
85
- return json.loads(response.choices[0].message.content)
124
+ if response_text:
125
+ return json.loads(response_text)
126
+ return {}
86
127
  except json.JSONDecodeError as e:
87
128
  print(f"[Error] Failed to parse LLM response as JSON: {e}")
88
- print(f"Raw response: {response.choices[0].message.content}")
129
+ print(f"Raw response: {response_text}")
89
130
  return {}
90
131
 
132
+ def call_llm_text(prompt: str, system_prompt: str = None, model: str = None, provider: LLMProvider = None) -> str:
133
+ """
134
+ Invoke LLM to get a plain text response (not JSON).
135
+
136
+ This is similar to call_llm but returns plain text instead of JSON.
137
+
138
+ Args:
139
+ prompt (str): The prompt to send to the LLM
140
+ system_prompt (str, optional): The system prompt to use
141
+ model (str, optional): The model identifier to use (defaults to provider's default model)
142
+ provider (LLMProvider, optional): The LLM provider to use (defaults to configured provider)
143
+
144
+ Returns:
145
+ str: The text response from the LLM, or empty string on error
146
+ """
147
+ # Determine which provider to use
148
+ if provider is None:
149
+ provider = get_llm_provider()
150
+
151
+ # Determine which model to use
152
+ if model is None:
153
+ model = get_llm_model(provider)
154
+
155
+ # System prompt is common across providers
156
+ if system_prompt is None:
157
+ system_prompt = (
158
+ "You are a helpful assistant that provides clear and concise answers. "
159
+ "Respond directly to the question without adding additional explanation or context."
160
+ )
161
+
162
+ print(f"Calling LLM (text mode) with provider: {provider}, model: {model}")
163
+
164
+ # Call the appropriate provider with text response format
165
+ return _call_provider(prompt, system_prompt, model, provider, response_format="text")
166
+
167
+ def _call_provider(prompt: str, system_prompt: str, model: str, provider: LLMProvider, response_format: str = "json") -> str:
168
+ """
169
+ Internal helper function to call the appropriate LLM provider.
170
+
171
+ Args:
172
+ prompt (str): The prompt to send to the LLM
173
+ system_prompt (str): The system prompt to use
174
+ model (str): The model to use
175
+ provider (LLMProvider): The provider to use
176
+ response_format (str): Either "json" or "text"
177
+
178
+ Returns:
179
+ str: The raw text response from the LLM
180
+ """
181
+ try:
182
+ if provider == LLMProvider.OPENAI:
183
+ return _call_openai_provider(prompt, system_prompt, model, response_format)
184
+ elif provider == LLMProvider.GEMINI:
185
+ return _call_gemini_provider(prompt, system_prompt, model, response_format)
186
+ elif provider == LLMProvider.CLAUDE:
187
+ return _call_claude_provider(prompt, system_prompt, model, response_format)
188
+
189
+ # Fallback to OpenAI if unknown provider
190
+ print(f"[Warning] Unknown provider {provider}, falling back to OpenAI")
191
+ return _call_openai_provider(prompt, system_prompt, DEFAULT_OPENAI_MODEL, response_format)
192
+ except Exception as e:
193
+ print(f"[Error] LLM API call failed: {e}")
194
+ return ""
195
+
196
+ def _call_openai_provider(prompt: str, system_prompt: str, model: str, response_format: str) -> str:
197
+ """Internal helper function to call OpenAI API"""
198
+ try:
199
+ client = OpenAI()
200
+
201
+ # Configure response format for JSON if requested
202
+ kwargs = {}
203
+ if response_format == "json":
204
+ kwargs["response_format"] = {"type": "json_object"}
205
+
206
+ response = client.chat.completions.create(
207
+ model=model,
208
+ messages=[
209
+ {"role": "system", "content": system_prompt},
210
+ {"role": "user", "content": prompt}
211
+ ],
212
+ **kwargs
213
+ )
214
+
215
+ # Return raw content text
216
+ return response.choices[0].message.content.strip()
217
+ except Exception as e:
218
+ print(f"[Error] OpenAI API call failed: {e}")
219
+ return ""
220
+
221
+ def _call_gemini_provider(prompt: str, system_prompt: str, model: str, response_format: str) -> str:
222
+ """Internal helper function to call Gemini API"""
223
+ try:
224
+ # Check for API key in environment
225
+ api_key = os.environ.get("GEMINI_API_KEY", None)
226
+ if not api_key:
227
+ print("[Error] No API key found for Gemini. Set GEMINI_API_KEY environment variable.")
228
+ return ""
229
+ # Initialize the Gemini client
230
+ genai.configure(api_key=api_key)
231
+
232
+ # Since system role is not supported, combine system prompt and user prompt
233
+ combined_prompt = f"{system_prompt}\n\n{prompt}"
234
+
235
+ # Configure response format for JSON if requested
236
+ generation_config = {}
237
+ if response_format == "json":
238
+ generation_config["response_mime_type"] = "application/json"
239
+
240
+ # Generate response using Gemini
241
+ model_name = model if model != system_prompt else DEFAULT_GEMINI_MODEL
242
+ model_obj = genai.GenerativeModel(model_name=model_name, generation_config=generation_config)
243
+ response = model_obj.generate_content(combined_prompt)
244
+
245
+ text = response.text.strip()
246
+
247
+ # Remove quotes if present for text responses
248
+ if response_format == "text" and text.startswith('"') and text.endswith('"'):
249
+ text = text[1:-1]
250
+
251
+ return text
252
+ except Exception as e:
253
+ print(f"[Error] Gemini API call failed: {e}")
254
+ return ""
255
+
256
+ def _call_claude_provider(prompt: str, system_prompt: str, model: str, response_format: str) -> str:
257
+ """Internal helper function to call Claude API"""
258
+ try:
259
+ client = Anthropic()
260
+
261
+ # Configure temperature - lower for JSON for more deterministic output
262
+ temperature = 0.1 if response_format == "json" else 0.2
263
+
264
+ response = client.messages.create(
265
+ model=model,
266
+ system=system_prompt,
267
+ max_tokens=4096,
268
+ messages=[
269
+ {"role": "user", "content": prompt}
270
+ ],
271
+ temperature=temperature
272
+ )
273
+
274
+ return response.content[0].text.strip()
275
+ except Exception as e:
276
+ print(f"[Error] Claude API call failed: {e}")
277
+ return ""
91
278
 
92
279
  def estimate_scope(task: ScopeMateTask) -> Scope:
93
280
  """
@@ -340,35 +527,34 @@ def update_parent_with_child_context(parent_task: ScopeMateTask, child_task: Sco
340
527
  return updated_parent
341
528
 
342
529
 
343
- def generate_title_from_purpose_outcome(purpose: str, outcome: str) -> str:
530
+ def generate_title_from_purpose_outcome(purpose: str, outcome: str, model: str = None, provider: LLMProvider = None) -> str:
344
531
  """
345
532
  Use LLM to generate a concise title from purpose and outcome descriptions.
346
533
 
347
534
  Args:
348
535
  purpose: The purpose description
349
536
  outcome: The outcome description
537
+ model (str, optional): The model identifier to use (defaults to provider's default model)
538
+ provider (LLMProvider, optional): The LLM provider to use (defaults to configured provider)
350
539
 
351
540
  Returns:
352
541
  A concise title string
353
542
  """
354
- client = OpenAI()
355
- response = client.chat.completions.create(
356
- model=DEFAULT_MODEL,
357
- messages=[
358
- {
359
- "role": "system",
360
- "content": "You are a concise title generator. Generate a brief, clear title (maximum 60 characters) "
361
- "that captures the essence of a task based on its purpose and outcome description."
362
- },
363
- {
364
- "role": "user",
365
- "content": f"Purpose: {purpose}\n\nOutcome: {outcome}\n\nGenerate a concise title (max 60 chars):"
366
- }
367
- ]
543
+ system_prompt = (
544
+ "You are a concise title generator. Generate a brief, clear title (maximum 60 characters) "
545
+ "that captures the essence of a task based on its purpose and outcome description. "
546
+ "Return ONLY the title with no additional text or quotes."
368
547
  )
369
548
 
370
- # Extract title from LLM response
371
- title = response.choices[0].message.content.strip()
549
+ user_prompt = f"Purpose: {purpose}\n\nOutcome: {outcome}\n\nGenerate a concise title (max 60 chars):"
550
+
551
+ # Use the common text-based LLM function
552
+ title = call_llm_text(user_prompt, system_prompt, model, provider)
553
+
554
+ # Handle empty response
555
+ if not title:
556
+ return "Task Title"
557
+
372
558
  # Limit title length if needed
373
559
  if len(title) > 60:
374
560
  title = title[:57] + "..."
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: scopemate
3
- Version: 0.1.2
3
+ Version: 0.2.0
4
4
  Summary: 🪜 A CLI tool for Purpose/Scope/Outcome planning
5
5
  Author: Anoop Thomas Mathew
6
6
  Author-email: Anoop Thomas Mathew <atmb4u@gmail.com>
@@ -24,6 +24,10 @@ License-File: LICENSE
24
24
  Requires-Dist: openai>=1.0.0
25
25
  Requires-Dist: pydantic>=2.0.0
26
26
  Requires-Dist: twine>=6.1.0
27
+ Requires-Dist: google-generativeai>=0.3.0
28
+ Requires-Dist: anthropic>=0.12.0
29
+ Requires-Dist: ipython>=8.35.0
30
+ Requires-Dist: ipdb>=0.13.13
27
31
  Dynamic: author
28
32
  Dynamic: license-file
29
33
  Dynamic: requires-python
@@ -83,7 +87,10 @@ Scopemate is built around a three-part framework for strategic decision making:
83
87
  ## Requirements
84
88
 
85
89
  - Python 3.10 or higher
86
- - OpenAI API key set as environment variable (`OPENAI_API_KEY`)
90
+ - An API key for one of the supported LLM providers:
91
+ - OpenAI API key (default)
92
+ - Google AI (Gemini) API key
93
+ - Anthropic (Claude) API key
87
94
 
88
95
  ## Installation
89
96
 
@@ -134,26 +141,123 @@ cd scopemate
134
141
  pip install -e .
135
142
  ```
136
143
 
137
- ### Setting up the OpenAI API Key
144
+ ### Setting up API Keys
138
145
 
139
- scopemate requires an OpenAI API key to function. Set it as an environment variable:
146
+ scopemate now supports multiple LLM providers. Set up the API key for your preferred provider:
140
147
 
141
- #### macOS/Linux
148
+ #### OpenAI (Default)
149
+
150
+ Set the OpenAI API key as an environment variable:
151
+
152
+ ##### macOS/Linux
142
153
  ```bash
143
154
  export OPENAI_API_KEY=your-api-key-here
144
155
  ```
145
156
 
146
- #### Windows Command Prompt
157
+ ##### Windows Command Prompt
147
158
  ```cmd
148
159
  set OPENAI_API_KEY=your-api-key-here
149
160
  ```
150
161
 
151
- #### Windows PowerShell
162
+ ##### Windows PowerShell
152
163
  ```powershell
153
164
  $env:OPENAI_API_KEY = "your-api-key-here"
154
165
  ```
155
166
 
156
- For permanent setup, add this to your shell profile or environment variables.
167
+ #### Google AI (Gemini)
168
+
169
+ Set the Google AI API key as an environment variable:
170
+
171
+ ##### macOS/Linux
172
+ ```bash
173
+ export GEMINI_API_KEY=your-api-key-here
174
+ ```
175
+
176
+ ##### Windows Command Prompt
177
+ ```cmd
178
+ set GEMINI_API_KEY=your-api-key-here
179
+ ```
180
+
181
+ ##### Windows PowerShell
182
+ ```powershell
183
+ $env:GEMINI_API_KEY = "your-api-key-here"
184
+ ```
185
+
186
+ #### Anthropic (Claude)
187
+
188
+ Set the Anthropic API key as an environment variable:
189
+
190
+ ##### macOS/Linux
191
+ ```bash
192
+ export ANTHROPIC_API_KEY=your-api-key-here
193
+ ```
194
+
195
+ ##### Windows Command Prompt
196
+ ```cmd
197
+ set ANTHROPIC_API_KEY=your-api-key-here
198
+ ```
199
+
200
+ ##### Windows PowerShell
201
+ ```powershell
202
+ $env:ANTHROPIC_API_KEY = "your-api-key-here"
203
+ ```
204
+
205
+ ### Selecting LLM Provider
206
+
207
+ You can select which LLM provider to use by setting the `SCOPEMATE_LLM_PROVIDER` environment variable:
208
+
209
+ #### macOS/Linux
210
+ ```bash
211
+ # Use OpenAI (default)
212
+ export SCOPEMATE_LLM_PROVIDER=OPENAI
213
+
214
+ # Use Gemini
215
+ export SCOPEMATE_LLM_PROVIDER=GEMINI
216
+
217
+ # Use Claude
218
+ export SCOPEMATE_LLM_PROVIDER=CLAUDE
219
+ ```
220
+
221
+ #### Windows Command Prompt
222
+ ```cmd
223
+ # Use OpenAI (default)
224
+ set SCOPEMATE_LLM_PROVIDER=OPENAI
225
+
226
+ # Use Gemini
227
+ set SCOPEMATE_LLM_PROVIDER=GEMINI
228
+
229
+ # Use Claude
230
+ set SCOPEMATE_LLM_PROVIDER=CLAUDE
231
+ ```
232
+
233
+ #### Windows PowerShell
234
+ ```powershell
235
+ # Use OpenAI (default)
236
+ $env:SCOPEMATE_LLM_PROVIDER = "OPENAI"
237
+
238
+ # Use Gemini
239
+ $env:SCOPEMATE_LLM_PROVIDER = "GEMINI"
240
+
241
+ # Use Claude
242
+ $env:SCOPEMATE_LLM_PROVIDER = "CLAUDE"
243
+ ```
244
+
245
+ ### Selecting Model
246
+
247
+ You can also specify which model to use for each provider:
248
+
249
+ ```bash
250
+ # OpenAI model (default is o4-mini)
251
+ export SCOPEMATE_OPENAI_MODEL=gpt-4-turbo
252
+
253
+ # Gemini model (default is gemini-flash)
254
+ export SCOPEMATE_GEMINI_MODEL=gemini-2.0-flash
255
+
256
+ # Claude model (default is claude-3-haiku-20240307)
257
+ export SCOPEMATE_CLAUDE_MODEL=claude-3-7-sonnet-20250219
258
+ ```
259
+
260
+ For permanent setup, add these environment variables to your shell profile or system environment variables.
157
261
 
158
262
  ## Usage
159
263
 
@@ -1,17 +1,17 @@
1
- scopemate/__init__.py,sha256=sMbeuIqGXqrO47d1LD4gk6r5cLHuZmRtVXR797c8K2s,472
1
+ scopemate/__init__.py,sha256=xp32RvKQnWl1JCBMwuDsSDYg8awCryN2iBWSpJVB2es,472
2
2
  scopemate/__main__.py,sha256=nPNZe_QEoOHQ_hXf17w72BHz1UFPKuW2g3whTLwuM8E,195
3
3
  scopemate/breakdown.py,sha256=mwIDzf7m2GHVkDrRmMyeS8v2pNd99U3vlcPCtOajvs0,21048
4
4
  scopemate/cli.py,sha256=qh6iFleQc8Bld0iptyUgRm5Ga3GtZsvE6Y-q6vbm3dk,6891
5
5
  scopemate/core.py,sha256=wpXCpb5Kdpqul9edNCx2Da94137XCc1w-3KQc9-Tf3s,700
6
6
  scopemate/engine.py,sha256=8yQoxSECJCGuNSIwS-qFoaOGM1iaZ-y4Lo7k5Q6v-mk,16992
7
7
  scopemate/interaction.py,sha256=SeqQVME0MATK-m-M7T9nNHkcJ_VCRhlqydL_vQaSMWk,10893
8
- scopemate/llm.py,sha256=k_1FQdg_TRt_51Yu_g_PjO0pBnmFmWHoK_-KEPGlASM,15526
8
+ scopemate/llm.py,sha256=DLa2cL4pTYjnUG__gS_Lfifx3Mnpfm2QACpJSO9ooJo,23038
9
9
  scopemate/models.py,sha256=Q3SUoHu_4RejDAocEr83I00wGvxhDoJ1COVqjPsr4DQ,7738
10
10
  scopemate/storage.py,sha256=uV1-7IdwJwBENeNoO9Y3WwUUXd-jA2NvKdiERGGhmV8,11642
11
11
  scopemate/task_analysis.py,sha256=Mic0FOOy_BWI1_5TQmh__39miOcZBZ7mTUcCkv1DvkI,14967
12
- scopemate-0.1.2.dist-info/licenses/LICENSE,sha256=4fqQFK5AkkXmg6FBG9Wr06gCR7BMQl02TvsPYt-YL6s,1076
13
- scopemate-0.1.2.dist-info/METADATA,sha256=APn2Igy4cl3jhcP9gh6gwOHnBLMnN3CPsL4MrvDVabc,11774
14
- scopemate-0.1.2.dist-info/WHEEL,sha256=lTU6B6eIfYoiQJTZNc-fyaR6BpL6ehTzU3xGYxn2n8k,91
15
- scopemate-0.1.2.dist-info/entry_points.txt,sha256=XXusGEDxI6NlrYmSBcPDtjV3QvsHWVWPSrt4zD4UcLg,49
16
- scopemate-0.1.2.dist-info/top_level.txt,sha256=riMrI_jMCfZMb7-ecWBwqOBLdUsnPOxSu2Pgvqx7Too,10
17
- scopemate-0.1.2.dist-info/RECORD,,
12
+ scopemate-0.2.0.dist-info/licenses/LICENSE,sha256=4fqQFK5AkkXmg6FBG9Wr06gCR7BMQl02TvsPYt-YL6s,1076
13
+ scopemate-0.2.0.dist-info/METADATA,sha256=35WLUUsU01syRayWxXrx5Lt-bOqHwY31FBplRN9wCJ8,13835
14
+ scopemate-0.2.0.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
15
+ scopemate-0.2.0.dist-info/entry_points.txt,sha256=XXusGEDxI6NlrYmSBcPDtjV3QvsHWVWPSrt4zD4UcLg,49
16
+ scopemate-0.2.0.dist-info/top_level.txt,sha256=riMrI_jMCfZMb7-ecWBwqOBLdUsnPOxSu2Pgvqx7Too,10
17
+ scopemate-0.2.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.1)
2
+ Generator: setuptools (79.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5