agentgear-ai 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. agentgear/__init__.py +18 -0
  2. agentgear/cli/__init__.py +1 -0
  3. agentgear/cli/main.py +125 -0
  4. agentgear/sdk/__init__.py +6 -0
  5. agentgear/sdk/client.py +276 -0
  6. agentgear/sdk/decorators.py +65 -0
  7. agentgear/sdk/integrations/openai.py +52 -0
  8. agentgear/sdk/prompt.py +23 -0
  9. agentgear/sdk/trace.py +59 -0
  10. agentgear/server/__init__.py +1 -0
  11. agentgear/server/app/__init__.py +1 -0
  12. agentgear/server/app/api/__init__.py +1 -0
  13. agentgear/server/app/api/auth.py +156 -0
  14. agentgear/server/app/api/datasets.py +185 -0
  15. agentgear/server/app/api/evaluations.py +69 -0
  16. agentgear/server/app/api/evaluators.py +157 -0
  17. agentgear/server/app/api/llm_models.py +39 -0
  18. agentgear/server/app/api/metrics.py +18 -0
  19. agentgear/server/app/api/projects.py +139 -0
  20. agentgear/server/app/api/prompts.py +227 -0
  21. agentgear/server/app/api/runs.py +75 -0
  22. agentgear/server/app/api/seed.py +106 -0
  23. agentgear/server/app/api/settings.py +135 -0
  24. agentgear/server/app/api/spans.py +56 -0
  25. agentgear/server/app/api/tokens.py +67 -0
  26. agentgear/server/app/api/users.py +116 -0
  27. agentgear/server/app/auth.py +80 -0
  28. agentgear/server/app/config.py +26 -0
  29. agentgear/server/app/db.py +41 -0
  30. agentgear/server/app/deps.py +46 -0
  31. agentgear/server/app/main.py +77 -0
  32. agentgear/server/app/migrations.py +88 -0
  33. agentgear/server/app/models.py +339 -0
  34. agentgear/server/app/schemas.py +343 -0
  35. agentgear/server/app/utils/email.py +30 -0
  36. agentgear/server/app/utils/llm.py +27 -0
  37. agentgear/server/static/assets/index-BAAzXAln.js +121 -0
  38. agentgear/server/static/assets/index-CE45MZx1.css +1 -0
  39. agentgear/server/static/index.html +13 -0
  40. agentgear_ai-0.1.16.dist-info/METADATA +387 -0
  41. agentgear_ai-0.1.16.dist-info/RECORD +44 -0
  42. agentgear_ai-0.1.16.dist-info/WHEEL +4 -0
  43. agentgear_ai-0.1.16.dist-info/entry_points.txt +2 -0
  44. agentgear_ai-0.1.16.dist-info/licenses/LICENSE +201 -0
agentgear/__init__.py ADDED
@@ -0,0 +1,18 @@
1
+ """
2
+ AgentGear SDK and utilities.
3
+
4
+ Primary exports:
5
+ - AgentGearClient: backend client for logging runs/spans/prompts
6
+ - observe: decorator for capturing LLM calls
7
+ - trace: context manager for spans
8
+ - Prompt: prompt templating and version metadata helper
9
+ """
10
+
11
+ from agentgear.sdk.client import AgentGearClient
12
+ from agentgear.sdk.decorators import observe
13
+ from agentgear.sdk.trace import trace
14
+ from agentgear.sdk.prompt import Prompt
15
+
16
+ __version__ = "0.1.9"
17
+
18
+ __all__ = ["AgentGearClient", "observe", "trace", "Prompt", "__version__"]
@@ -0,0 +1 @@
1
+ # CLI package
agentgear/cli/main.py ADDED
@@ -0,0 +1,125 @@
1
+ import subprocess
2
+ from typing import List, Optional
3
+
4
+ import typer
5
+
6
+ from agentgear.server.app.auth import generate_token
7
+ from agentgear.server.app.config import get_settings
8
+ from agentgear.server.app.db import Base, SessionLocal, engine
9
+ from agentgear.server.app.migrations import apply_migrations
10
+ from agentgear.server.app.models import APIKey, Project, Prompt, PromptVersion, Run
11
+
12
+ app = typer.Typer(help="AgentGear CLI")
13
+
14
+
15
+ @app.command()
16
+ def init_db():
17
+ """Initialize database tables."""
18
+ Base.metadata.create_all(bind=engine)
19
+ apply_migrations(engine)
20
+ typer.echo("Database initialized.")
21
+
22
+
23
+ @app.command()
24
+ def create_project(name: str = typer.Option(...), description: Optional[str] = typer.Option(None)):
25
+ """Create a new project."""
26
+ db = SessionLocal()
27
+ try:
28
+ project = Project(name=name, description=description)
29
+ db.add(project)
30
+ db.commit()
31
+ db.refresh(project)
32
+ typer.echo(f"Created project {project.id} ({project.name})")
33
+ finally:
34
+ db.close()
35
+
36
+
37
+ @app.command()
38
+ def list_projects():
39
+ """List projects."""
40
+ db = SessionLocal()
41
+ try:
42
+ projects = db.query(Project).all()
43
+ for p in projects:
44
+ typer.echo(f"{p.id}\t{p.name}\t{p.created_at}")
45
+ finally:
46
+ db.close()
47
+
48
+
49
+ @app.command()
50
+ def create_token(
51
+ project_id: str,
52
+ scopes: List[str] = typer.Option(["runs.write", "prompts.read", "prompts.write", "tokens.manage"]),
53
+ ):
54
+ """Create an API token for a project."""
55
+ db = SessionLocal()
56
+ try:
57
+ project = db.query(Project).filter(Project.id == project_id).first()
58
+ if not project:
59
+ typer.echo("Project not found")
60
+ raise typer.Exit(code=1)
61
+ raw, hashed = generate_token()
62
+ record = APIKey(project_id=project.id, key_hash=hashed, scopes=scopes)
63
+ db.add(record)
64
+ db.commit()
65
+ db.refresh(record)
66
+ typer.echo(f"Token created for project {project.name}")
67
+ typer.echo("Save this token now; it will not be shown again:")
68
+ typer.echo(raw)
69
+ finally:
70
+ db.close()
71
+
72
+
73
+ @app.command()
74
+ def demo_data():
75
+ """Seed demo data."""
76
+ db = SessionLocal()
77
+ try:
78
+ project = Project(name="Demo Project", description="Sample project")
79
+ db.add(project)
80
+ db.commit()
81
+ db.refresh(project)
82
+
83
+ prompt = Prompt(project_id=project.id, name="greeting", description="Greeting prompt")
84
+ db.add(prompt)
85
+ db.commit()
86
+ db.refresh(prompt)
87
+
88
+ pv = PromptVersion(prompt_id=prompt.id, version=1, content="Hello {{name}}")
89
+ db.add(pv)
90
+ db.commit()
91
+
92
+ run = Run(
93
+ project_id=project.id,
94
+ prompt_version_id=pv.id,
95
+ name="demo-run",
96
+ input_text="name=Agent",
97
+ output_text="Hello Agent",
98
+ token_input=5,
99
+ token_output=5,
100
+ cost=0.0001,
101
+ latency_ms=120,
102
+ )
103
+ db.add(run)
104
+ db.commit()
105
+ typer.echo("Demo data created.")
106
+ typer.echo(f"Project ID: {project.id}")
107
+ finally:
108
+ db.close()
109
+
110
+
111
+ @app.command()
112
+ def ui(host: Optional[str] = None, port: Optional[int] = None, reload: bool = True):
113
+ """Run FastAPI server with uvicorn."""
114
+ settings = get_settings()
115
+ host = host or settings.api_host
116
+ port = port or settings.api_port
117
+ args = ["uvicorn", "agentgear.server.app.main:app", "--host", host, "--port", str(port)]
118
+ if reload:
119
+ args.append("--reload")
120
+ typer.echo(f"Starting AgentGear API on {host}:{port}")
121
+ subprocess.run(args, check=False)
122
+
123
+
124
+ if __name__ == "__main__":
125
+ app()
@@ -0,0 +1,6 @@
1
+ from agentgear.sdk.client import AgentGearClient
2
+ from agentgear.sdk.decorators import observe
3
+ from agentgear.sdk.trace import trace
4
+ from agentgear.sdk.prompt import Prompt
5
+
6
+ __all__ = ["AgentGearClient", "observe", "trace", "Prompt"]
@@ -0,0 +1,276 @@
1
+ import time
2
+ from typing import Any, Dict, Optional
3
+
4
+ import httpx
5
+
6
+ from agentgear.server.app.db import SessionLocal
7
+ from agentgear.server.app.models import Project, Prompt, PromptVersion, Run, Span
8
+
9
+
10
+ class AgentGearClient:
11
+ """Client for sending observability data to AgentGear backend."""
12
+
13
+ def __init__(
14
+ self,
15
+ base_url: str = "http://localhost:8000",
16
+ api_key: Optional[str] = None,
17
+ project_id: Optional[str] = None,
18
+ local: bool = False,
19
+ ):
20
+ self.base_url = base_url.rstrip("/")
21
+ self.api_key = api_key
22
+ self.project_id = project_id
23
+ self.local = local
24
+ if not self.local and not self.api_key:
25
+ raise ValueError("api_key is required in remote mode")
26
+ self._http = httpx.Client(base_url=self.base_url, timeout=10.0)
27
+
28
+ def _headers(self) -> Dict[str, str]:
29
+ headers = {}
30
+ if self.api_key:
31
+ headers["X-AgentGear-Key"] = self.api_key
32
+ return headers
33
+
34
+ def log_run(
35
+ self,
36
+ name: Optional[str] = None,
37
+ input_text: Optional[str] = None,
38
+ output_text: Optional[str] = None,
39
+ prompt_version_id: Optional[str] = None,
40
+ token_usage: Optional[dict[str, Any]] = None,
41
+ cost: Optional[float] = None,
42
+ latency_ms: Optional[float] = None,
43
+ error: Optional[str] = None,
44
+ metadata: Optional[dict[str, Any]] = None,
45
+ ) -> dict[str, Any]:
46
+ project_id = self._require_project()
47
+ payload = {
48
+ "project_id": project_id,
49
+ "prompt_version_id": prompt_version_id,
50
+ "name": name,
51
+ "input_text": input_text,
52
+ "output_text": output_text,
53
+ "token_usage": token_usage,
54
+ "cost": cost,
55
+ "latency_ms": latency_ms,
56
+ "error": error,
57
+ "metadata": metadata,
58
+ }
59
+ if self.local:
60
+ return self._log_run_local(payload)
61
+ resp = self._http.post("/api/runs", json=payload, headers=self._headers())
62
+ resp.raise_for_status()
63
+ return resp.json()
64
+
65
+ def log_span(
66
+ self,
67
+ run_id: str,
68
+ name: str,
69
+ parent_id: Optional[str] = None,
70
+ start_time: Optional[str] = None,
71
+ end_time: Optional[str] = None,
72
+ latency_ms: Optional[float] = None,
73
+ metadata: Optional[dict[str, Any]] = None,
74
+ ) -> dict[str, Any]:
75
+ project_id = self._require_project()
76
+ payload = {
77
+ "project_id": project_id,
78
+ "run_id": run_id,
79
+ "parent_id": parent_id,
80
+ "name": name,
81
+ "start_time": start_time,
82
+ "end_time": end_time,
83
+ "latency_ms": latency_ms,
84
+ "metadata": metadata,
85
+ }
86
+ if self.local:
87
+ return self._log_span_local(payload)
88
+ resp = self._http.post("/api/spans", json=payload, headers=self._headers())
89
+ resp.raise_for_status()
90
+ return resp.json()
91
+
92
+ def register_prompt(
93
+ self,
94
+ name: str,
95
+ content: str,
96
+ description: Optional[str] = None,
97
+ metadata: Optional[dict[str, Any]] = None,
98
+ ) -> dict[str, Any]:
99
+ project_id = self._require_project()
100
+ payload = {
101
+ "project_id": project_id,
102
+ "name": name,
103
+ "description": description,
104
+ "content": content,
105
+ "metadata": metadata,
106
+ }
107
+ if self.local:
108
+ return self._register_prompt_local(payload)
109
+ resp = self._http.post("/api/prompts", json=payload, headers=self._headers())
110
+ resp.raise_for_status()
111
+ return resp.json()
112
+
113
+ def create_prompt_version(
114
+ self, prompt_id: str, content: str, metadata: Optional[dict[str, Any]] = None
115
+ ) -> dict[str, Any]:
116
+ payload = {"content": content, "metadata": metadata}
117
+ if self.local:
118
+ return self._create_prompt_version_local(prompt_id, payload)
119
+ resp = self._http.post(f"/api/prompts/{prompt_id}/versions", json=payload, headers=self._headers())
120
+ resp.raise_for_status()
121
+ return resp.json()
122
+
123
+ def _require_project(self) -> str:
124
+ if not self.project_id:
125
+ raise ValueError("project_id is required")
126
+ return self.project_id
127
+
128
+ # Local mode helpers (direct DB writes) ---------------------------------
129
+ def _log_run_local(self, payload: dict[str, Any]) -> dict[str, Any]:
130
+ db = SessionLocal()
131
+ try:
132
+ run = Run(
133
+ project_id=payload["project_id"],
134
+ prompt_version_id=payload.get("prompt_version_id"),
135
+ name=payload.get("name"),
136
+ input_text=payload.get("input_text"),
137
+ output_text=payload.get("output_text"),
138
+ token_input=(payload.get("token_usage") or {}).get("prompt"),
139
+ token_output=(payload.get("token_usage") or {}).get("completion"),
140
+ cost=payload.get("cost"),
141
+ latency_ms=payload.get("latency_ms"),
142
+ error=payload.get("error"),
143
+ metadata_=payload.get("metadata"),
144
+ )
145
+ db.add(run)
146
+ db.commit()
147
+ db.refresh(run)
148
+ return {
149
+ "id": run.id,
150
+ "project_id": run.project_id,
151
+ "prompt_version_id": run.prompt_version_id,
152
+ "name": run.name,
153
+ "input_text": run.input_text,
154
+ "output_text": run.output_text,
155
+ "token_input": run.token_input,
156
+ "token_output": run.token_output,
157
+ "cost": run.cost,
158
+ "latency_ms": run.latency_ms,
159
+ "error": run.error,
160
+ "metadata": run.metadata_,
161
+ "created_at": run.created_at.isoformat(),
162
+ }
163
+ finally:
164
+ db.close()
165
+
166
+ def _log_span_local(self, payload: dict[str, Any]) -> dict[str, Any]:
167
+ db = SessionLocal()
168
+ try:
169
+ start_time = payload.get("start_time")
170
+ end_time = payload.get("end_time")
171
+ if isinstance(start_time, str):
172
+ try:
173
+ from datetime import datetime
174
+
175
+ start_time = datetime.fromisoformat(start_time)
176
+ except Exception:
177
+ start_time = None
178
+ if isinstance(end_time, str):
179
+ try:
180
+ from datetime import datetime
181
+
182
+ end_time = datetime.fromisoformat(end_time)
183
+ except Exception:
184
+ end_time = None
185
+ span = Span(
186
+ project_id=payload["project_id"],
187
+ run_id=payload["run_id"],
188
+ parent_id=payload.get("parent_id"),
189
+ name=payload["name"],
190
+ start_time=start_time or None,
191
+ end_time=end_time or None,
192
+ latency_ms=payload.get("latency_ms"),
193
+ metadata_=payload.get("metadata"),
194
+ )
195
+ db.add(span)
196
+ db.commit()
197
+ db.refresh(span)
198
+ return {
199
+ "id": span.id,
200
+ "project_id": span.project_id,
201
+ "run_id": span.run_id,
202
+ "parent_id": span.parent_id,
203
+ "name": span.name,
204
+ "start_time": span.start_time.isoformat(),
205
+ "end_time": span.end_time.isoformat() if span.end_time else None,
206
+ "latency_ms": span.latency_ms,
207
+ "metadata": span.metadata_,
208
+ }
209
+ finally:
210
+ db.close()
211
+
212
+ def _register_prompt_local(self, payload: dict[str, Any]) -> dict[str, Any]:
213
+ db = SessionLocal()
214
+ try:
215
+ prompt = Prompt(
216
+ project_id=payload["project_id"],
217
+ name=payload["name"],
218
+ description=payload.get("description"),
219
+ )
220
+ db.add(prompt)
221
+ db.commit()
222
+ db.refresh(prompt)
223
+ version = PromptVersion(
224
+ prompt_id=prompt.id,
225
+ version=1,
226
+ content=payload["content"],
227
+ metadata_=payload.get("metadata"),
228
+ )
229
+ db.add(version)
230
+ db.commit()
231
+ return {
232
+ "id": prompt.id,
233
+ "project_id": prompt.project_id,
234
+ "name": prompt.name,
235
+ "description": prompt.description,
236
+ "created_at": prompt.created_at.isoformat(),
237
+ }
238
+ finally:
239
+ db.close()
240
+
241
+ def _create_prompt_version_local(self, prompt_id: str, payload: dict[str, Any]) -> dict[str, Any]:
242
+ db = SessionLocal()
243
+ try:
244
+ latest = (
245
+ db.query(PromptVersion).filter(PromptVersion.prompt_id == prompt_id).order_by(PromptVersion.version.desc()).first()
246
+ )
247
+ next_version = (latest.version + 1) if latest else 1
248
+ version = PromptVersion(
249
+ prompt_id=prompt_id,
250
+ version=next_version,
251
+ content=payload["content"],
252
+ metadata_=payload.get("metadata"),
253
+ )
254
+ db.add(version)
255
+ db.commit()
256
+ db.refresh(version)
257
+ return {
258
+ "id": version.id,
259
+ "prompt_id": version.prompt_id,
260
+ "version": version.version,
261
+ "content": version.content,
262
+ "metadata": version.metadata_,
263
+ "created_at": version.created_at.isoformat(),
264
+ }
265
+ finally:
266
+ db.close()
267
+
268
+
269
+ def timed(func):
270
+ def wrapper(*args, **kwargs):
271
+ start = time.perf_counter()
272
+ result = func(*args, **kwargs)
273
+ elapsed = (time.perf_counter() - start) * 1000
274
+ return result, elapsed
275
+
276
+ return wrapper
@@ -0,0 +1,65 @@
1
+ import functools
2
+ import json
3
+ import time
4
+ from typing import Any, Callable, Optional
5
+
6
+ from agentgear.sdk.client import AgentGearClient
7
+
8
+
9
+ def observe(
10
+ client: AgentGearClient,
11
+ name: Optional[str] = None,
12
+ prompt_version_id: Optional[str] = None,
13
+ metadata: Optional[dict[str, Any]] = None,
14
+ ):
15
+ """
16
+ Decorator to capture function inputs/outputs and log a run.
17
+ """
18
+
19
+ def decorator(func: Callable):
20
+ @functools.wraps(func)
21
+ def wrapper(*args, **kwargs):
22
+ start = time.perf_counter()
23
+ error_text = None
24
+ output_text = None
25
+ try:
26
+ result = func(*args, **kwargs)
27
+ output_text = _safe_string(result)
28
+ return result
29
+ except Exception as exc: # noqa: BLE001
30
+ error_text = str(exc)
31
+ raise
32
+ finally:
33
+ elapsed_ms = (time.perf_counter() - start) * 1000
34
+ input_repr = _inputs_to_string(args, kwargs)
35
+ try:
36
+ client.log_run(
37
+ name=name or func.__name__,
38
+ input_text=input_repr,
39
+ output_text=output_text,
40
+ prompt_version_id=prompt_version_id,
41
+ latency_ms=elapsed_ms,
42
+ error=error_text,
43
+ metadata=metadata,
44
+ )
45
+ except Exception:
46
+ # Do not break caller; best-effort logging.
47
+ pass
48
+
49
+ return wrapper
50
+
51
+ return decorator
52
+
53
+
54
+ def _inputs_to_string(args, kwargs) -> str:
55
+ try:
56
+ return json.dumps({"args": args, "kwargs": kwargs}, default=str)
57
+ except Exception:
58
+ return f"args={args}, kwargs={kwargs}"
59
+
60
+
61
+ def _safe_string(value: Any) -> str:
62
+ try:
63
+ return json.dumps(value, default=str)
64
+ except Exception:
65
+ return str(value)
@@ -0,0 +1,52 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from agentgear.sdk.client import AgentGearClient
4
+
5
+
6
+ def instrument_openai_chat(
7
+ openai_client: Any,
8
+ agentgear: AgentGearClient,
9
+ model: str,
10
+ prompt_version_id: Optional[str] = None,
11
+ ):
12
+ """
13
+ Wraps OpenAI chat completions to automatically log runs.
14
+ """
15
+
16
+ original = openai_client.chat.completions.create
17
+
18
+ def wrapped(**kwargs):
19
+ messages = kwargs.get("messages")
20
+ input_text = str(messages)
21
+ response = original(**kwargs)
22
+ output_text = _response_text(response)
23
+
24
+ usage = getattr(response, "usage", None) or {}
25
+ token_usage = {
26
+ "prompt": getattr(usage, "prompt_tokens", None) or usage.get("prompt_tokens"),
27
+ "completion": getattr(usage, "completion_tokens", None) or usage.get("completion_tokens"),
28
+ "total": getattr(usage, "total_tokens", None) or usage.get("total_tokens"),
29
+ }
30
+ agentgear.log_run(
31
+ name=f"openai:{model}",
32
+ input_text=input_text,
33
+ output_text=output_text,
34
+ prompt_version_id=prompt_version_id,
35
+ token_usage=token_usage,
36
+ latency_ms=None,
37
+ )
38
+ return response
39
+
40
+ openai_client.chat.completions.create = wrapped
41
+ return openai_client
42
+
43
+
44
+ def _response_text(response: Any) -> str:
45
+ try:
46
+ if hasattr(response, "choices") and response.choices:
47
+ content = getattr(response.choices[0].message, "content", None)
48
+ if content:
49
+ return content
50
+ return str(response)
51
+ except Exception:
52
+ return str(response)
@@ -0,0 +1,23 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, Dict, Optional
3
+
4
+ from jinja2 import Template
5
+
6
+
7
+ @dataclass
8
+ class Prompt:
9
+ name: str
10
+ template: str
11
+ version_id: Optional[str] = None
12
+ metadata: Optional[Dict[str, Any]] = None
13
+
14
+ def render(self, **kwargs) -> str:
15
+ return Template(self.template).render(**kwargs)
16
+
17
+ def with_version(self, version_id: str) -> "Prompt":
18
+ return Prompt(
19
+ name=self.name,
20
+ template=self.template,
21
+ version_id=version_id,
22
+ metadata=self.metadata,
23
+ )
agentgear/sdk/trace.py ADDED
@@ -0,0 +1,59 @@
1
+ import time
2
+ from typing import Any, Optional
3
+
4
+ from agentgear.sdk.client import AgentGearClient
5
+
6
+
7
+ class trace:
8
+ """
9
+ Context manager for recording spans.
10
+ Usage:
11
+ with trace(client, run_id, name="step"):
12
+ ...
13
+ """
14
+
15
+ def __init__(
16
+ self,
17
+ client: AgentGearClient,
18
+ run_id: str,
19
+ name: str,
20
+ parent_id: Optional[str] = None,
21
+ metadata: Optional[dict[str, Any]] = None,
22
+ ):
23
+ self.client = client
24
+ self.run_id = run_id
25
+ self.name = name
26
+ self.parent_id = parent_id
27
+ self.metadata = metadata
28
+ self.span_id: Optional[str] = None
29
+ self._start = None
30
+
31
+ def __enter__(self):
32
+ self._start = time.perf_counter()
33
+ return self
34
+
35
+ def __exit__(self, exc_type, exc, tb):
36
+ latency_ms = (time.perf_counter() - self._start) * 1000 if self._start else None
37
+ try:
38
+ span = self.client.log_span(
39
+ run_id=self.run_id,
40
+ name=self.name,
41
+ parent_id=self.parent_id,
42
+ latency_ms=latency_ms,
43
+ metadata=self.metadata,
44
+ )
45
+ self.span_id = span.get("id")
46
+ except Exception:
47
+ # Best-effort
48
+ pass
49
+ return False
50
+
51
+ def child(self, name: str, metadata: Optional[dict[str, Any]] = None) -> "trace":
52
+ """Create a child span context."""
53
+ return trace(
54
+ client=self.client,
55
+ run_id=self.run_id,
56
+ name=name,
57
+ parent_id=self.span_id or self.parent_id,
58
+ metadata=metadata,
59
+ )
@@ -0,0 +1 @@
1
+ # AgentGear server package
@@ -0,0 +1 @@
1
+ # AgentGear FastAPI application package
@@ -0,0 +1 @@
1
+ # API package marker