compair-core 0.4.0__tar.gz → 0.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of compair-core might be problematic. Click here for more details.

Files changed (44) hide show
  1. {compair_core-0.4.0 → compair_core-0.4.2}/PKG-INFO +1 -1
  2. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/api.py +23 -10
  3. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/feedback.py +106 -39
  4. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/main.py +10 -3
  5. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core.egg-info/PKG-INFO +1 -1
  6. {compair_core-0.4.0 → compair_core-0.4.2}/pyproject.toml +1 -1
  7. {compair_core-0.4.0 → compair_core-0.4.2}/LICENSE +0 -0
  8. {compair_core-0.4.0 → compair_core-0.4.2}/README.md +0 -0
  9. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/__init__.py +0 -0
  10. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/__init__.py +0 -0
  11. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/celery_app.py +0 -0
  12. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/default_groups.py +0 -0
  13. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/embeddings.py +0 -0
  14. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/logger.py +0 -0
  15. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/models.py +0 -0
  16. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/schema.py +0 -0
  17. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/tasks.py +0 -0
  18. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair/utils.py +0 -0
  19. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair_email/__init__.py +0 -0
  20. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair_email/email.py +0 -0
  21. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair_email/email_core.py +0 -0
  22. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair_email/templates.py +0 -0
  23. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/compair_email/templates_core.py +0 -0
  24. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/__init__.py +0 -0
  25. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/app.py +0 -0
  26. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/deps.py +0 -0
  27. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/local_model/__init__.py +0 -0
  28. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/local_model/app.py +0 -0
  29. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/local_model/ocr.py +0 -0
  30. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/providers/__init__.py +0 -0
  31. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/providers/console_mailer.py +0 -0
  32. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/providers/contracts.py +0 -0
  33. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/providers/local_storage.py +0 -0
  34. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/providers/noop_analytics.py +0 -0
  35. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/providers/noop_billing.py +0 -0
  36. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/providers/noop_ocr.py +0 -0
  37. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/routers/__init__.py +0 -0
  38. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/routers/capabilities.py +0 -0
  39. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core/server/settings.py +0 -0
  40. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core.egg-info/SOURCES.txt +0 -0
  41. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core.egg-info/dependency_links.txt +0 -0
  42. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core.egg-info/requires.txt +0 -0
  43. {compair_core-0.4.0 → compair_core-0.4.2}/compair_core.egg-info/top_level.txt +0 -0
  44. {compair_core-0.4.0 → compair_core-0.4.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: compair-core
3
- Version: 0.4.0
3
+ Version: 0.4.2
4
4
  Summary: Open-source foundation of the Compair collaboration platform.
5
5
  Author: RocketResearch, Inc.
6
6
  License: MIT
@@ -1373,22 +1373,35 @@ def create_doc(
1373
1373
  current_user.status_change_date = datetime.now(timezone.utc)
1374
1374
  session.commit()
1375
1375
 
1376
- # Enforce document limits
1376
+ # Enforce document limits (cloud plans) – core runs are unrestricted unless explicitly configured
1377
1377
  team = _user_team(current_user)
1378
+ document_limit: int | None = None
1378
1379
  if IS_CLOUD and HAS_TEAM and team and current_user.status == "active":
1379
1380
  document_limit = team.total_documents_limit # type: ignore[union-attr]
1381
+ elif IS_CLOUD and _user_plan(current_user) == "individual" and current_user.status == "active":
1382
+ document_limit = 100
1380
1383
  else:
1381
- if IS_CLOUD and _user_plan(current_user) == 'individual' and current_user.status == "active":
1382
- document_limit = 100
1383
- else:
1384
- document_limit = int(os.getenv("COMPAIR_CORE_DOCUMENT_LIMIT", "10"))
1384
+ raw_core_limit = os.getenv("COMPAIR_CORE_DOCUMENT_LIMIT")
1385
+ if raw_core_limit:
1386
+ try:
1387
+ document_limit = int(raw_core_limit)
1388
+ except ValueError:
1389
+ document_limit = None
1390
+
1385
1391
  document_count = session.query(models.Document).filter(models.Document.user_id == current_user.user_id).count()
1386
1392
 
1387
- if document_count >= document_limit:
1388
- raise HTTPException(
1389
- status_code=403,
1390
- detail=f"Document limit reached. Individual plan users can have 100, team plans have 100 times the number of users (pooled); other plans can have 10",
1391
- )
1393
+ if document_limit is not None and document_count >= document_limit:
1394
+ if IS_CLOUD:
1395
+ detail_msg = (
1396
+ "Document limit reached. Individual plan users can have 100, team plans have 100 times "
1397
+ "the number of users (pooled); other plans can have 10"
1398
+ )
1399
+ else:
1400
+ detail_msg = (
1401
+ f"Document limit of {document_limit} reached. Adjust COMPAIR_CORE_DOCUMENT_LIMIT to raise "
1402
+ "or unset it to remove limits in core deployments."
1403
+ )
1404
+ raise HTTPException(status_code=403, detail=detail_msg)
1392
1405
 
1393
1406
  if not authorid:
1394
1407
  authorid = current_user.user_id
@@ -96,6 +96,33 @@ def _fallback_feedback(text: str, references: list[Any]) -> str:
96
96
  return f"Consider aligning with these reference passages: {joined}"
97
97
 
98
98
 
99
+
100
+ def _local_reference_feedback(
101
+ reviewer: Reviewer,
102
+ references: list[Any],
103
+ user: User,
104
+ ) -> str | None:
105
+ if not references:
106
+ return None
107
+ summaries: list[str] = []
108
+ for ref in references[:3]:
109
+ doc = getattr(ref, "document", None)
110
+ title = getattr(doc, "title", None) or "a related document"
111
+ snippet = getattr(ref, "content", "") or getattr(ref, "text", "")
112
+ snippet = snippet.replace("\n", " ").strip()
113
+ if not snippet:
114
+ continue
115
+ summaries.append(f'"{title}" — {snippet[:200]}')
116
+ if not summaries:
117
+ return None
118
+ instruction = reviewer.length_map.get(user.preferred_feedback_length, "1–2 short sentences")
119
+ if len(summaries) == 1:
120
+ body = summaries[0]
121
+ else:
122
+ body = "; ".join(summaries)
123
+ return f"[local-feedback] {instruction}: Consider the guidance from {body}"
124
+
125
+
99
126
  def _openai_feedback(
100
127
  reviewer: Reviewer,
101
128
  doc: Document,
@@ -107,56 +134,92 @@ def _openai_feedback(
107
134
  return None
108
135
  instruction = reviewer.length_map.get(user.preferred_feedback_length, "1–2 short sentences")
109
136
  ref_text = "\n\n".join(_reference_snippets(references, limit=3))
110
- messages = [
111
- {
112
- "role": "system",
113
- "content": (
114
- "You are Compair, an assistant that delivers concise, actionable feedback on a user's document. "
115
- "Focus on clarity, cohesion, and usefulness."
116
- ),
117
- },
118
- {
119
- "role": "user",
120
- "content": (
121
- f"Document:\n{text}\n\nHelpful reference excerpts:\n{ref_text or 'None provided'}\n\n"
122
- f"Respond with {instruction} that highlights the most valuable revision to make next."
123
- ),
124
- },
125
- ]
137
+ system_prompt = (
138
+ "You are Compair, an assistant that delivers concise, actionable feedback on a user's document. "
139
+ "Focus on clarity, cohesion, and usefulness."
140
+ )
141
+ user_prompt = (
142
+ f"Document:\n{text}\n\nHelpful reference excerpts:\n{ref_text or 'None provided'}\n\n"
143
+ f"Respond with {instruction} that highlights the most valuable revision to make next."
144
+ )
145
+
146
+ def _extract_response_text(response: Any) -> str | None:
147
+ if response is None:
148
+ return None
149
+ text_out = getattr(response, "output_text", None)
150
+ if isinstance(text_out, str) and text_out.strip():
151
+ return text_out.strip()
152
+ outputs = getattr(response, "output", None) or getattr(response, "outputs", None)
153
+ pieces: list[str] = []
154
+ if outputs:
155
+ for item in outputs:
156
+ content_field = None
157
+ if isinstance(item, dict):
158
+ content_field = item.get("content")
159
+ else:
160
+ content_field = getattr(item, "content", None)
161
+ if not content_field:
162
+ continue
163
+ for part in content_field:
164
+ if isinstance(part, dict):
165
+ val = part.get("text") or part.get("output_text")
166
+ if val:
167
+ pieces.append(str(val))
168
+ elif part:
169
+ pieces.append(str(part))
170
+ if pieces:
171
+ merged = "\n".join(pieces).strip()
172
+ return merged or None
173
+ return None
126
174
 
127
175
  try:
128
- if reviewer._openai_client is not None and hasattr(reviewer._openai_client, "responses"):
129
- response = reviewer._openai_client.responses.create( # type: ignore[union-attr]
176
+ client = reviewer._openai_client
177
+ if client is None and hasattr(openai, "OpenAI"):
178
+ api_key = os.getenv("COMPAIR_OPENAI_API_KEY") or None
179
+ try: # pragma: no cover - optional dependency differences
180
+ client = openai.OpenAI(api_key=api_key) if api_key else openai.OpenAI()
181
+ except TypeError:
182
+ client = openai.OpenAI()
183
+ reviewer._openai_client = client
184
+
185
+ content: str | None = None
186
+ if client is not None and hasattr(client, "responses"):
187
+ response = client.responses.create(
130
188
  model=reviewer.openai_model,
131
- input=messages,
189
+ instructions=system_prompt,
190
+ input=user_prompt,
132
191
  max_output_tokens=256,
192
+ store=False,
133
193
  )
134
- content = getattr(response, "output_text", None)
135
- if not content and hasattr(response, "outputs"):
136
- # Legacy compatibility: join content parts
137
- parts = []
138
- for item in getattr(response, "outputs", []):
139
- parts.extend(getattr(item, "content", []))
140
- content = " ".join(getattr(part, "text", "") for part in parts)
194
+ content = _extract_response_text(response)
195
+ elif client is not None and hasattr(client, "chat") and hasattr(client.chat, "completions"):
196
+ response = client.chat.completions.create(
197
+ model=reviewer.openai_model,
198
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
199
+ temperature=0.3,
200
+ max_tokens=256,
201
+ )
202
+ choices = getattr(response, "choices", None) or []
203
+ if choices:
204
+ message = getattr(choices[0], "message", None)
205
+ if message is not None:
206
+ content = getattr(message, "content", None)
207
+ if not content:
208
+ content = getattr(choices[0], "text", None)
209
+ if isinstance(content, str):
210
+ content = content.strip()
141
211
  elif hasattr(openai, "ChatCompletion"):
142
212
  chat_response = openai.ChatCompletion.create( # type: ignore[attr-defined]
143
213
  model=reviewer.openai_model,
144
- messages=messages,
214
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
145
215
  temperature=0.3,
146
216
  max_tokens=256,
147
217
  )
148
- content = (
149
- chat_response["choices"][0]["message"]["content"].strip() # type: ignore[index, assignment]
150
- )
151
- else:
152
- content = None
218
+ content = chat_response["choices"][0]["message"]["content"].strip() # type: ignore[index, assignment]
219
+ if content:
220
+ return content.strip()
153
221
  except Exception as exc: # pragma: no cover - network/API failure
154
222
  log_event("openai_feedback_failed", error=str(exc))
155
- content = None
156
- if content:
157
- content = content.strip()
158
- if content:
159
- return content
160
223
  return None
161
224
 
162
225
 
@@ -238,9 +301,13 @@ def get_feedback(
238
301
  if feedback:
239
302
  return feedback
240
303
 
241
- if reviewer.provider == "local" and getattr(reviewer, "endpoint", None):
242
- feedback = _local_feedback(reviewer, text, references, user)
304
+ if reviewer.provider == "local":
305
+ feedback = _local_reference_feedback(reviewer, references, user)
243
306
  if feedback:
244
307
  return feedback
308
+ if getattr(reviewer, "endpoint", None):
309
+ feedback = _local_feedback(reviewer, text, references, user)
310
+ if feedback:
311
+ return feedback
245
312
 
246
313
  return _fallback_feedback(text, references)
@@ -41,7 +41,11 @@ def process_document(
41
41
  if prev_content:
42
42
  prev_chunks = chunk_text(prev_content[-1])
43
43
 
44
- feedback_limit = int(os.getenv("COMPAIR_CORE_FEEDBACK_LIMIT", "5"))
44
+ feedback_limit_env = os.getenv("COMPAIR_CORE_FEEDBACK_LIMIT")
45
+ try:
46
+ feedback_limit = int(feedback_limit_env) if feedback_limit_env else None
47
+ except ValueError:
48
+ feedback_limit = None
45
49
  time_cutoff = datetime.now(timezone.utc) - timedelta(hours=24)
46
50
 
47
51
  recent_feedback_count = session.query(Feedback).filter(
@@ -59,8 +63,11 @@ def process_document(
59
63
  if generate_feedback:
60
64
  prioritized_chunk_indices = detect_significant_edits(prev_chunks=prev_chunks, new_chunks=new_chunks)
61
65
 
62
- num_chunks_can_generate_feedback = max((feedback_limit - recent_feedback_count), 0)
63
- indices_to_generate_feedback = prioritized_chunk_indices[:num_chunks_can_generate_feedback]
66
+ if feedback_limit is None:
67
+ indices_to_generate_feedback = prioritized_chunk_indices
68
+ else:
69
+ num_chunks_can_generate_feedback = max((feedback_limit - recent_feedback_count), 0)
70
+ indices_to_generate_feedback = prioritized_chunk_indices[:num_chunks_can_generate_feedback]
64
71
 
65
72
  for i, chunk in enumerate(new_chunks):
66
73
  should_generate_feedback = i in indices_to_generate_feedback
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: compair-core
3
- Version: 0.4.0
3
+ Version: 0.4.2
4
4
  Summary: Open-source foundation of the Compair collaboration platform.
5
5
  Author: RocketResearch, Inc.
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "compair-core"
7
- version = "0.4.0"
7
+ version = "0.4.2"
8
8
  description = "Open-source foundation of the Compair collaboration platform."
9
9
  readme = "README.md"
10
10
  license = { text = "MIT" }
File without changes
File without changes
File without changes