redcodegen 0.0.3__tar.gz → 0.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redcodegen might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: redcodegen
3
- Version: 0.0.3
3
+ Version: 0.0.5
4
4
  Summary: Add your description here
5
5
  Requires-Dist: click>=8.0.0
6
6
  Requires-Dist: cwe2>=3.0.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "redcodegen"
3
- version = "0.0.3"
3
+ version = "0.0.5"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
@@ -11,7 +11,7 @@ CWE_TOP_25 = [
11
11
  798, 190, 400, 306
12
12
  ]
13
13
 
14
- def create_lm(model_name="openai/gpt-4o-mini", temperature=0.8, api_key=None):
14
+ def create_lm(model_name="openai/gpt-4o-mini", temperature=0.8, api_key=None, api_base=None):
15
15
  """Create a DSPy language model instance.
16
16
 
17
17
  Args:
@@ -23,12 +23,24 @@ def create_lm(model_name="openai/gpt-4o-mini", temperature=0.8, api_key=None):
23
23
  """
24
24
  if api_key is None:
25
25
  api_key = os.environ.get("OPENAI_API_KEY")
26
- return dspy.LM(
27
- model_name,
28
- api_key=api_key,
29
- temperature=temperature,
30
- max_tokens=16000
31
- )
26
+ if api_base is None:
27
+ api_base = os.environ.get("OPENAI_API_BASE")
28
+
29
+ if api_base is None:
30
+ return dspy.LM(
31
+ model_name,
32
+ api_key=api_key,
33
+ temperature=temperature,
34
+ max_tokens=16000
35
+ )
36
+ else:
37
+ return dspy.LM(
38
+ model_name,
39
+ api_key=api_key,
40
+ api_base=api_base,
41
+ temperature=temperature,
42
+ max_tokens=16000
43
+ )
32
44
 
33
45
  SCENARIO_EXAMPLES = [
34
46
  dspy.Example(
@@ -155,13 +155,18 @@ def append_to_jsonl(record: Dict[str, Any], output_path: Path):
155
155
  default=None,
156
156
  help='API key (defaults to OPENAI_API_KEY env var)'
157
157
  )
158
+ @click.option(
159
+ '--api-base',
160
+ default=None,
161
+ help='API base URL (defaults to OPENAI_API_BASE env var)'
162
+ )
158
163
  @click.option(
159
164
  '--temperature',
160
165
  default=0.8,
161
166
  type=float,
162
167
  help='Temperature for code generation (default: 0.8)'
163
168
  )
164
- def main(cwes, use_top_25, min_samples, output, model, api_key, temperature):
169
+ def main(cwes, use_top_25, min_samples, output, model, api_key, api_base, temperature):
165
170
  """Generate and evaluate vulnerable code samples for specified CWEs.
166
171
 
167
172
  Examples:
@@ -172,7 +177,7 @@ def main(cwes, use_top_25, min_samples, output, model, api_key, temperature):
172
177
  python -m redcodegen --use-top-25 --model openai/gpt-4o # switch model
173
178
  """
174
179
  # Configure DSPy with specified model
175
- lm = create_lm(model_name=model, temperature=temperature, api_key=api_key)
180
+ lm = create_lm(model_name=model, temperature=temperature, api_key=api_key, api_base=api_base)
176
181
  dspy.configure(lm=lm)
177
182
  logger.info(f"Configured model: {model}")
178
183
 
@@ -29,8 +29,8 @@ class SuggestLibraries(dspy.Signature):
29
29
  task: str = dspy.InputField()
30
30
  suggested_libraries: List[str] = dspy.InputField()
31
31
 
32
- chosen_library: Optional[str] = dspy.OutputField(desc="choose a library that would best help solve the task, or None")
33
- rephrased_task: Optional[str] = dspy.OutputField(desc="rephrase the task in terms of the chosen library, or None")
32
+ chosen_library: str = dspy.OutputField(desc="choose a library that would best help solve the task, or say None")
33
+ rephrased_task: str = dspy.OutputField(desc="rephrase the task in terms of the chosen library, or say None")
34
34
  suggest_libraries = dspy.Predict(SuggestLibraries)
35
35
 
36
36
  def generate(cwe_id, min_scenarios=3):
@@ -48,12 +48,12 @@ def generate(cwe_id, min_scenarios=3):
48
48
  output_scenarios = []
49
49
  while len(output_scenarios) < min_scenarios:
50
50
  scenarios = extract_scenarios(name=entry.name, description=entry.extended_description,
51
- config={"temperature": 0.8, "rollout_id": len(output_scenarios)}).scenarios
51
+ config={"rollout_id": len(output_scenarios)}).scenarios
52
52
  output_scenarios.extend(scenarios)
53
53
  scenarios = [strip_vulnerability(scenario=i).coding_task for i in output_scenarios]
54
54
  suggestions = [suggest_libraries(task=i, suggested_libraries=CODEQL_LIBRARIES) for i in scenarios]
55
55
  results = [
56
- i.rephrased_task if i.rephrased_task is not None else j
56
+ i.rephrased_task if ((i.rephrased_task is not None) and (i.rephrased_task.lower().strip() != "none")) else j
57
57
  for i,j in zip(suggestions, scenarios)
58
58
  ]
59
59
 
File without changes