cycls 0.0.2.74__py3-none-any.whl → 0.0.2.75__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cycls/grpc/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ from .client import RuntimeClient
2
+
3
+ __all__ = ["RuntimeClient"]
cycls/grpc/client.py ADDED
@@ -0,0 +1,71 @@
1
+ import cloudpickle
2
+ import grpc
3
+
4
+ try:
5
+ from . import runtime_pb2
6
+ from . import runtime_pb2_grpc
7
+ except ImportError:
8
+ import runtime_pb2
9
+ import runtime_pb2_grpc
10
+
11
+
12
+ class RuntimeClient:
13
+ def __init__(self, host='localhost', port=50051, timeout=None):
14
+ self.host = host
15
+ self.port = port
16
+ self.timeout = timeout
17
+ self._channel = None
18
+ self._stub = None
19
+
20
+ def _connect(self):
21
+ if self._channel is None:
22
+ self._channel = grpc.insecure_channel(f'{self.host}:{self.port}')
23
+ self._stub = runtime_pb2_grpc.RuntimeStub(self._channel)
24
+ return self._stub
25
+
26
+ def execute(self, func, *args, **kwargs):
27
+ """Execute function and yield streamed results."""
28
+ stub = self._connect()
29
+ payload = cloudpickle.dumps((func, args, kwargs))
30
+ request = runtime_pb2.Request(payload=payload)
31
+
32
+ for response in stub.Execute(request, timeout=self.timeout):
33
+ result = cloudpickle.loads(response.data)
34
+ if response.error:
35
+ raise RuntimeError(result)
36
+ yield result
37
+
38
+ def call(self, func, *args, **kwargs):
39
+ """Execute and return single result (or list if multiple)."""
40
+ results = list(self.execute(func, *args, **kwargs))
41
+ return results[0] if len(results) == 1 else results
42
+
43
+ def fire(self, func, *args, **kwargs):
44
+ """Fire off execution without waiting for response."""
45
+ stub = self._connect()
46
+ payload = cloudpickle.dumps((func, args, kwargs))
47
+ request = runtime_pb2.Request(payload=payload)
48
+ # Start the stream - gRPC sends request immediately
49
+ self._active_stream = stub.Execute(request)
50
+
51
+ def wait_ready(self, timeout=10):
52
+ """Wait for channel to be ready."""
53
+ if self._channel is None:
54
+ self._connect()
55
+ try:
56
+ grpc.channel_ready_future(self._channel).result(timeout=timeout)
57
+ return True
58
+ except grpc.FutureTimeoutError:
59
+ return False
60
+
61
+ def close(self):
62
+ if self._channel:
63
+ self._channel.close()
64
+ self._channel = None
65
+ self._stub = None
66
+
67
+ def __enter__(self):
68
+ return self
69
+
70
+ def __exit__(self, *args):
71
+ self.close()
@@ -0,0 +1,18 @@
1
+ syntax = "proto3";
2
+
3
+ package runtime;
4
+
5
+ service Runtime {
6
+ rpc Execute(Request) returns (stream Response);
7
+ }
8
+
9
+ message Request {
10
+ bytes payload = 1;
11
+ }
12
+
13
+ message Response {
14
+ bytes data = 1;
15
+ bool error = 2;
16
+ bytes log = 3;
17
+ bool is_log = 4;
18
+ }
@@ -0,0 +1,40 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # NO CHECKED-IN PROTOBUF GENCODE
4
+ # source: runtime.proto
5
+ # Protobuf Python Version: 6.31.1
6
+ """Generated protocol buffer code."""
7
+ from google.protobuf import descriptor as _descriptor
8
+ from google.protobuf import descriptor_pool as _descriptor_pool
9
+ from google.protobuf import runtime_version as _runtime_version
10
+ from google.protobuf import symbol_database as _symbol_database
11
+ from google.protobuf.internal import builder as _builder
12
+ _runtime_version.ValidateProtobufRuntimeVersion(
13
+ _runtime_version.Domain.PUBLIC,
14
+ 6,
15
+ 31,
16
+ 1,
17
+ '',
18
+ 'runtime.proto'
19
+ )
20
+ # @@protoc_insertion_point(imports)
21
+
22
+ _sym_db = _symbol_database.Default()
23
+
24
+
25
+
26
+
27
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rruntime.proto\x12\x07runtime\"\x1a\n\x07Request\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\"D\n\x08Response\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\r\n\x05\x65rror\x18\x02 \x01(\x08\x12\x0b\n\x03log\x18\x03 \x01(\x0c\x12\x0e\n\x06is_log\x18\x04 \x01(\x08\x32;\n\x07Runtime\x12\x30\n\x07\x45xecute\x12\x10.runtime.Request\x1a\x11.runtime.Response0\x01\x62\x06proto3')
28
+
29
+ _globals = globals()
30
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
31
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'runtime_pb2', _globals)
32
+ if not _descriptor._USE_C_DESCRIPTORS:
33
+ DESCRIPTOR._loaded_options = None
34
+ _globals['_REQUEST']._serialized_start=26
35
+ _globals['_REQUEST']._serialized_end=52
36
+ _globals['_RESPONSE']._serialized_start=54
37
+ _globals['_RESPONSE']._serialized_end=122
38
+ _globals['_RUNTIME']._serialized_start=124
39
+ _globals['_RUNTIME']._serialized_end=183
40
+ # @@protoc_insertion_point(module_scope)
@@ -0,0 +1,100 @@
1
+ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2
+ """Client and server classes corresponding to protobuf-defined services."""
3
+ import grpc
4
+ import warnings
5
+
6
+ try:
7
+ from . import runtime_pb2 as runtime__pb2
8
+ except ImportError:
9
+ import runtime_pb2 as runtime__pb2
10
+
11
+ GRPC_GENERATED_VERSION = '1.76.0'
12
+ GRPC_VERSION = grpc.__version__
13
+ _version_not_supported = False
14
+
15
+ try:
16
+ from grpc._utilities import first_version_is_lower
17
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
18
+ except ImportError:
19
+ _version_not_supported = True
20
+
21
+ if _version_not_supported:
22
+ raise RuntimeError(
23
+ f'The grpc package installed is at version {GRPC_VERSION},'
24
+ + ' but the generated code in runtime_pb2_grpc.py depends on'
25
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
26
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
27
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
28
+ )
29
+
30
+
31
+ class RuntimeStub(object):
32
+ """Missing associated documentation comment in .proto file."""
33
+
34
+ def __init__(self, channel):
35
+ """Constructor.
36
+
37
+ Args:
38
+ channel: A grpc.Channel.
39
+ """
40
+ self.Execute = channel.unary_stream(
41
+ '/runtime.Runtime/Execute',
42
+ request_serializer=runtime__pb2.Request.SerializeToString,
43
+ response_deserializer=runtime__pb2.Response.FromString,
44
+ _registered_method=True)
45
+
46
+
47
+ class RuntimeServicer(object):
48
+ """Missing associated documentation comment in .proto file."""
49
+
50
+ def Execute(self, request, context):
51
+ """Missing associated documentation comment in .proto file."""
52
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
53
+ context.set_details('Method not implemented!')
54
+ raise NotImplementedError('Method not implemented!')
55
+
56
+
57
+ def add_RuntimeServicer_to_server(servicer, server):
58
+ rpc_method_handlers = {
59
+ 'Execute': grpc.unary_stream_rpc_method_handler(
60
+ servicer.Execute,
61
+ request_deserializer=runtime__pb2.Request.FromString,
62
+ response_serializer=runtime__pb2.Response.SerializeToString,
63
+ ),
64
+ }
65
+ generic_handler = grpc.method_handlers_generic_handler(
66
+ 'runtime.Runtime', rpc_method_handlers)
67
+ server.add_generic_rpc_handlers((generic_handler,))
68
+ server.add_registered_method_handlers('runtime.Runtime', rpc_method_handlers)
69
+
70
+
71
+ # This class is part of an EXPERIMENTAL API.
72
+ class Runtime(object):
73
+ """Missing associated documentation comment in .proto file."""
74
+
75
+ @staticmethod
76
+ def Execute(request,
77
+ target,
78
+ options=(),
79
+ channel_credentials=None,
80
+ call_credentials=None,
81
+ insecure=False,
82
+ compression=None,
83
+ wait_for_ready=None,
84
+ timeout=None,
85
+ metadata=None):
86
+ return grpc.experimental.unary_stream(
87
+ request,
88
+ target,
89
+ '/runtime.Runtime/Execute',
90
+ runtime__pb2.Request.SerializeToString,
91
+ runtime__pb2.Response.FromString,
92
+ options,
93
+ channel_credentials,
94
+ insecure,
95
+ call_credentials,
96
+ compression,
97
+ wait_for_ready,
98
+ timeout,
99
+ metadata,
100
+ _registered_method=True)
cycls/grpc/server.py ADDED
@@ -0,0 +1,60 @@
1
+ import asyncio
2
+ import inspect
3
+ import traceback
4
+ import cloudpickle
5
+ import grpc
6
+ from concurrent import futures
7
+
8
+ try:
9
+ from . import runtime_pb2
10
+ from . import runtime_pb2_grpc
11
+ except ImportError:
12
+ import runtime_pb2
13
+ import runtime_pb2_grpc
14
+
15
+
16
+ class RuntimeServicer(runtime_pb2_grpc.RuntimeServicer):
17
+ def Execute(self, request, context):
18
+ try:
19
+ func, args, kwargs = cloudpickle.loads(request.payload)
20
+ result = func(*args, **kwargs)
21
+
22
+ # Handle coroutines
23
+ if inspect.iscoroutine(result):
24
+ result = asyncio.run(result)
25
+
26
+ # Handle async generators
27
+ if inspect.isasyncgen(result):
28
+ async def collect():
29
+ items = []
30
+ async for item in result:
31
+ items.append(item)
32
+ return items
33
+ result = iter(asyncio.run(collect()))
34
+
35
+ # Stream results for generators, single response otherwise
36
+ if inspect.isgenerator(result):
37
+ for chunk in result:
38
+ yield runtime_pb2.Response(data=cloudpickle.dumps(chunk))
39
+ else:
40
+ yield runtime_pb2.Response(data=cloudpickle.dumps(result))
41
+
42
+ except Exception as e:
43
+ error_msg = f"{type(e).__name__}: {e}\n{traceback.format_exc()}"
44
+ yield runtime_pb2.Response(data=cloudpickle.dumps(error_msg), error=True)
45
+
46
+
47
+ def serve(port=50051):
48
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
49
+ runtime_pb2_grpc.add_RuntimeServicer_to_server(RuntimeServicer(), server)
50
+ server.add_insecure_port(f'[::]:{port}')
51
+ server.start()
52
+ server.wait_for_termination()
53
+
54
+
55
+ if __name__ == '__main__':
56
+ import argparse
57
+ parser = argparse.ArgumentParser()
58
+ parser.add_argument('--port', type=int, default=50051)
59
+ args = parser.parse_args()
60
+ serve(args.port)
cycls/runtime.py CHANGED
@@ -1,3 +1,4 @@
1
+ import contextlib
1
2
  import docker
2
3
  import cloudpickle
3
4
  import tempfile
@@ -6,44 +7,27 @@ import os
6
7
  import sys
7
8
  import shutil
8
9
  from pathlib import Path
9
- from contextlib import contextmanager
10
10
  import tarfile
11
11
 
12
- # Enable BuildKit for faster builds with better caching
13
- os.environ["DOCKER_BUILDKIT"] = "1"
14
-
15
- # --- Top-Level Helper Functions ---
12
+ from .grpc import RuntimeClient
16
13
 
17
- def _bootstrap_script(payload_file: str, result_file: str) -> str:
18
- """Generates the Python script that runs inside the Docker container."""
19
- return f"""
20
- import cloudpickle
21
- import sys
22
- import os
23
- import traceback
24
- from pathlib import Path
25
-
26
- if __name__ == "__main__":
27
- io_dir = Path(sys.argv[1])
28
- payload_path = io_dir / '{payload_file}'
29
- result_path = io_dir / '{result_file}'
30
-
31
- try:
32
- with open(payload_path, 'rb') as f:
33
- func, args, kwargs = cloudpickle.load(f)
14
+ os.environ["DOCKER_BUILDKIT"] = "1"
34
15
 
35
- result = func(*args, **kwargs)
16
+ GRPC_PORT = 50051
17
+ BASE_IMAGE = "ghcr.io/cycls/base:python3.12"
18
+ BASE_PACKAGES = {"cloudpickle", "cryptography", "fastapi", "fastapi[standard]",
19
+ "pydantic", "pyjwt", "uvicorn", "uvicorn[standard]", "httpx"}
20
+ GRPC_PACKAGES = {"grpcio", "protobuf"}
36
21
 
37
- with open(result_path, 'wb') as f:
38
- cloudpickle.dump(result, f)
22
+ # Simple entrypoint for deployed services - loads pickled function+args and runs it
23
+ ENTRYPOINT_PY = '''import cloudpickle
24
+ with open("/app/function.pkl", "rb") as f:
25
+ func, args, kwargs = cloudpickle.load(f)
26
+ func(*args, **kwargs)
27
+ '''
39
28
 
40
- except Exception as e:
41
- traceback.print_exc(file=sys.stderr)
42
- sys.exit(1)
43
- """
44
29
 
45
30
  def _hash_path(path_str: str) -> str:
46
- """Hashes a file or a directory's contents to create a deterministic signature."""
47
31
  h = hashlib.sha256()
48
32
  p = Path(path_str)
49
33
  if p.is_file():
@@ -56,478 +40,416 @@ def _hash_path(path_str: str) -> str:
56
40
  files.sort()
57
41
  for name in files:
58
42
  filepath = Path(root) / name
59
- relpath = filepath.relative_to(p)
60
- h.update(str(relpath).encode())
43
+ h.update(str(filepath.relative_to(p)).encode())
61
44
  with filepath.open('rb') as f:
62
45
  while chunk := f.read(65536):
63
46
  h.update(chunk)
64
47
  return h.hexdigest()
65
48
 
49
+
66
50
  def _copy_path(src_path: Path, dest_path: Path):
67
- """Recursively copies a file or directory to a destination path."""
68
51
  if src_path.is_dir():
69
52
  shutil.copytree(src_path, dest_path, dirs_exist_ok=True)
70
53
  else:
71
54
  dest_path.parent.mkdir(parents=True, exist_ok=True)
72
55
  shutil.copy(src_path, dest_path)
73
56
 
74
- # Pre-built base image with common dependencies
75
- BASE_IMAGE = "ghcr.io/cycls/base:python3.12"
76
- BASE_PACKAGES = {
77
- "cloudpickle", "cryptography", "fastapi", "fastapi[standard]",
78
- "pydantic", "pyjwt", "uvicorn", "uvicorn[standard]", "httpx"
79
- }
80
-
81
- # --- Main Runtime Class ---
82
57
 
83
58
  class Runtime:
84
- """
85
- Handles building a Docker image and executing a function within a container.
86
- """
87
- def __init__(self, func, name, python_version=None, pip_packages=None, apt_packages=None, run_commands=None, copy=None, base_url=None, api_key=None, base_image=None):
59
+ """Executes functions in Docker containers. Uses gRPC for local dev, pickle for deploy."""
60
+
61
+ def __init__(self, func, name, python_version=None, pip_packages=None, apt_packages=None,
62
+ run_commands=None, copy=None, base_url=None, api_key=None, base_image=None):
88
63
  self.func = func
64
+ self.name = name
89
65
  self.python_version = python_version or f"{sys.version_info.major}.{sys.version_info.minor}"
90
66
  self.apt_packages = sorted(apt_packages or [])
91
67
  self.run_commands = sorted(run_commands or [])
92
68
  self.copy = copy or {}
93
- self.name = name
94
- self.base_url = base_url or "https://service-core-280879789566.me-central1.run.app"
95
- self.image_prefix = f"cycls/{name}"
96
-
97
- # Use pre-built base image by default, filter out already-installed packages
98
69
  self.base_image = base_image or BASE_IMAGE
99
- all_pip = set(pip_packages or [])
100
- self.pip_packages = sorted(all_pip - BASE_PACKAGES) if self.base_image == BASE_IMAGE else sorted(all_pip)
101
-
102
- # Standard paths and filenames used inside the container
103
- self.io_dir = "/app/io"
104
- self.runner_filename = "runner.py"
105
- self.runner_path = f"/app/{self.runner_filename}"
106
- self.payload_file = "payload.pkl"
107
- self.result_file = "result.pkl"
70
+ self.base_url = base_url or "https://service-core-280879789566.me-central1.run.app"
71
+ self.api_key = api_key
108
72
 
109
- self.runner_script = _bootstrap_script(self.payload_file, self.result_file)
110
- self.tag = self._generate_base_tag()
73
+ # Compute pip packages (gRPC only needed for local dev, added dynamically)
74
+ user_packages = set(pip_packages or [])
75
+ if self.base_image == BASE_IMAGE:
76
+ self.pip_packages = sorted(user_packages - BASE_PACKAGES)
77
+ else:
78
+ self.pip_packages = sorted(user_packages | {"cloudpickle"})
111
79
 
112
- self.api_key = api_key
80
+ self.image_prefix = f"cycls/{name}"
81
+ self.managed_label = "cycls.runtime"
113
82
  self._docker_client = None
114
- self.managed_label = f"cycls.runtime"
83
+
84
+ # Local dev state (gRPC container)
85
+ self._container = None
86
+ self._client = None
87
+ self._host_port = None
115
88
 
116
89
  @property
117
90
  def docker_client(self):
118
- """
119
- Lazily initializes and returns a Docker client.
120
- This ensures Docker is only required for methods that actually use it.
121
- """
91
+ """Lazily initializes and returns a Docker client."""
122
92
  if self._docker_client is None:
123
93
  try:
124
- print("🐳 Initializing Docker client...")
94
+ print("Initializing Docker client...")
125
95
  client = docker.from_env()
126
96
  client.ping()
127
97
  self._docker_client = client
128
98
  except docker.errors.DockerException:
129
- print("\n❌ Error: Docker is not running or is not installed.")
130
- print(" This is required for local 'run' and 'build' operations.")
131
- print(" Please start the Docker daemon and try again.")
99
+ print("\nError: Docker is not running or is not installed.")
100
+ print("Please start the Docker daemon and try again.")
132
101
  sys.exit(1)
133
102
  return self._docker_client
134
-
135
- # docker system prune -af
136
- def _perform_auto_cleanup(self):
137
- """Performs a simple, automatic cleanup of old Docker resources."""
103
+
104
+ def _perform_auto_cleanup(self, keep_tag=None):
105
+ """Clean up old containers and dev images (preserve deploy-* images)."""
138
106
  try:
107
+ # Remove old containers
108
+ current_id = self._container.id if self._container else None
139
109
  for container in self.docker_client.containers.list(all=True, filters={"label": self.managed_label}):
140
- container.remove(force=True)
141
-
142
- cleaned_images = 0
143
- for image in self.docker_client.images.list(all=True, filters={"label": self.managed_label}):
144
- is_current = self.tag in image.tags
145
- is_deployable = any(t.startswith(f"{self.image_prefix}:deploy-") for t in image.tags)
146
-
147
- if not is_current and not is_deployable:
110
+ if container.id != current_id:
111
+ container.remove(force=True)
112
+
113
+ # Remove old dev images globally (keep deploy-* and current)
114
+ cleaned = 0
115
+ for image in self.docker_client.images.list(filters={"label": self.managed_label}):
116
+ is_deploy = any(":deploy-" in t for t in image.tags)
117
+ is_current = keep_tag and keep_tag in image.tags
118
+ if not is_deploy and not is_current:
148
119
  self.docker_client.images.remove(image.id, force=True)
149
- cleaned_images += 1
150
-
151
- if cleaned_images > 0:
152
- print(f"🧹 Cleaned up {cleaned_images} old image version(s).")
153
-
154
- self.docker_client.images.prune(filters={'label': self.managed_label})
155
-
120
+ cleaned += 1
121
+ if cleaned:
122
+ print(f"Cleaned up {cleaned} old dev image(s).")
156
123
  except Exception as e:
157
- print(f"⚠️ An error occurred during cleanup: {e}")
158
-
159
- def _generate_base_tag(self) -> str:
160
- """Creates a unique tag for the base Docker image based on its dependencies."""
161
- signature_parts = [
162
- self.base_image,
163
- "".join(self.python_version),
164
- "".join(self.pip_packages),
165
- "".join(self.apt_packages),
166
- "".join(self.run_commands),
167
- self.runner_script
168
- ]
124
+ print(f"Warning: cleanup error: {e}")
125
+
126
+ def _image_tag(self, extra_parts=None) -> str:
127
+ """Creates a unique tag based on image configuration."""
128
+ parts = [self.base_image, self.python_version, "".join(self.pip_packages),
129
+ "".join(self.apt_packages), "".join(self.run_commands)]
169
130
  for src, dst in sorted(self.copy.items()):
170
131
  if not Path(src).exists():
171
132
  raise FileNotFoundError(f"Path in 'copy' not found: {src}")
172
- content_hash = _hash_path(src)
173
- signature_parts.append(f"copy:{src}>{dst}:{content_hash}")
133
+ parts.append(f"{src}>{dst}:{_hash_path(src)}")
134
+ if extra_parts:
135
+ parts.extend(extra_parts)
136
+ return f"{self.image_prefix}:{hashlib.sha256(''.join(parts).encode()).hexdigest()[:16]}"
174
137
 
175
- signature = "".join(signature_parts)
176
- image_hash = hashlib.sha256(signature.encode()).hexdigest()
177
- return f"{self.image_prefix}:{image_hash[:16]}"
138
+ def _dockerfile_preamble(self, pip_extras=None) -> str:
139
+ """Common Dockerfile setup: base image, apt, pip, run commands, copy."""
140
+ lines = [f"FROM {self.base_image}"]
178
141
 
179
- def _generate_dockerfile(self, port=None) -> str:
180
- """Generates a multi-stage Dockerfile string."""
181
- using_base = self.base_image == BASE_IMAGE
142
+ if self.base_image != BASE_IMAGE:
143
+ lines.append("ENV PIP_ROOT_USER_ACTION=ignore PYTHONUNBUFFERED=1")
144
+ lines.append("WORKDIR /app")
182
145
 
183
- # Only install extra packages not in base image (use uv if available in base)
184
- run_pip_install = (
185
- f"RUN uv pip install --system --no-cache {' '.join(self.pip_packages)}"
186
- if self.pip_packages else ""
187
- )
188
- run_apt_install = (
189
- f"RUN apt-get update && apt-get install -y --no-install-recommends {' '.join(self.apt_packages)}"
190
- if self.apt_packages else ""
191
- )
192
- run_shell_commands = "\n".join([f"RUN {cmd}" for cmd in self.run_commands]) if self.run_commands else ""
193
- copy_lines = "\n".join([f"COPY context_files/{dst} {dst}" for dst in self.copy.values()])
194
- expose_line = f"EXPOSE {port}" if port else ""
195
-
196
- # Skip env/mkdir/workdir if using pre-built base (already configured)
197
- env_lines = "" if using_base else f"""ENV PIP_ROOT_USER_ACTION=ignore \\
198
- PYTHONUNBUFFERED=1
199
- RUN mkdir -p {self.io_dir}
200
- WORKDIR /app"""
201
-
202
- return f"""
203
- # STAGE 1: Base image with all dependencies
204
- FROM {self.base_image} as base
205
- {env_lines}
206
- {run_apt_install}
207
- {run_pip_install}
208
- {run_shell_commands}
209
- {copy_lines}
210
- COPY {self.runner_filename} {self.runner_path}
211
- ENTRYPOINT ["python", "{self.runner_path}", "{self.io_dir}"]
212
-
213
- # STAGE 2: Final deployable image with the payload "baked in"
214
- FROM base
215
- {expose_line}
216
- COPY {self.payload_file} {self.io_dir}/
217
- """
146
+ if self.apt_packages:
147
+ lines.append(f"RUN apt-get update && apt-get install -y --no-install-recommends {' '.join(self.apt_packages)}")
218
148
 
219
- def _prepare_build_context(self, workdir: Path, include_payload=False, args=None, kwargs=None):
220
- """Prepares a complete build context in the given directory."""
221
- port = kwargs.get('port') if kwargs else None
222
-
223
- # Create a dedicated subdirectory for all user-copied files
224
- context_files_dir = workdir / "context_files"
225
- context_files_dir.mkdir()
149
+ all_pip = list(self.pip_packages) + list(pip_extras or [])
150
+ if all_pip:
151
+ lines.append(f"RUN uv pip install --system --no-cache {' '.join(all_pip)}")
152
+
153
+ for cmd in self.run_commands:
154
+ lines.append(f"RUN {cmd}")
226
155
 
227
- if self.copy:
228
- for src, dst in self.copy.items():
229
- src_path = Path(src).resolve() # Resolve to an absolute path
230
- dest_in_context = context_files_dir / dst
231
- _copy_path(src_path, dest_in_context)
156
+ for dst in self.copy.values():
157
+ lines.append(f"COPY context_files/{dst} /app/{dst}")
232
158
 
233
- (workdir / "Dockerfile").write_text(self._generate_dockerfile(port=port))
234
- (workdir / self.runner_filename).write_text(self.runner_script)
159
+ return "\n".join(lines)
235
160
 
236
- if include_payload:
237
- payload_bytes = cloudpickle.dumps((self.func, args or [], kwargs or {}))
238
- (workdir / self.payload_file).write_bytes(payload_bytes)
161
+ def _dockerfile_grpc(self) -> str:
162
+ """Dockerfile for local dev: gRPC server."""
163
+ return f"""{self._dockerfile_preamble(pip_extras=GRPC_PACKAGES)}
164
+ COPY grpc_runtime/ /app/grpc_runtime/
165
+ EXPOSE {GRPC_PORT}
166
+ CMD ["python", "-m", "grpc_runtime.server", "--port", "{GRPC_PORT}"]
167
+ """
168
+
169
+ def _dockerfile_deploy(self, port: int) -> str:
170
+ """Dockerfile for deploy: baked-in function via pickle."""
171
+ return f"""{self._dockerfile_preamble()}
172
+ COPY function.pkl /app/function.pkl
173
+ COPY entrypoint.py /app/entrypoint.py
174
+ EXPOSE {port}
175
+ CMD ["python", "entrypoint.py"]
176
+ """
239
177
 
240
- def _build_image_if_needed(self):
241
- """Checks if the base Docker image exists locally and builds it if not."""
178
+ def _copy_user_files(self, workdir: Path):
179
+ """Copy user-specified files to build context."""
180
+ context_files_dir = workdir / "context_files"
181
+ context_files_dir.mkdir()
182
+ for src, dst in self.copy.items():
183
+ _copy_path(Path(src).resolve(), context_files_dir / dst)
184
+
185
+ def _build_image(self, tag: str, workdir: Path) -> str:
186
+ """Build a Docker image from a prepared context."""
187
+ print("--- Docker Build Logs ---")
242
188
  try:
243
- self.docker_client.images.get(self.tag)
244
- print(f"✅ Found cached base image: {self.tag}")
245
- return
189
+ for chunk in self.docker_client.api.build(
190
+ path=str(workdir), tag=tag, forcerm=True, decode=True,
191
+ labels={self.managed_label: "true"}
192
+ ):
193
+ if 'stream' in chunk:
194
+ print(chunk['stream'].strip())
195
+ print("-------------------------")
196
+ print(f"Image built: {tag}")
197
+ return tag
198
+ except docker.errors.BuildError as e:
199
+ print(f"\nDocker build failed: {e}")
200
+ raise
201
+
202
+ def _ensure_grpc_image(self) -> str:
203
+ """Build local dev image with gRPC server if needed."""
204
+ tag = self._image_tag(extra_parts=["grpc-v2"])
205
+ try:
206
+ self.docker_client.images.get(tag)
207
+ print(f"Found cached image: {tag}")
208
+ return tag
246
209
  except docker.errors.ImageNotFound:
247
- print(f"🛠️ Building new base image: {self.tag}")
248
-
249
- with tempfile.TemporaryDirectory() as tmpdir_str:
250
- tmpdir = Path(tmpdir_str)
251
- # Prepare context without payload for the base image
252
- self._prepare_build_context(tmpdir)
253
-
254
- print("--- 🐳 Docker Build Logs (Base Image) ---")
255
- response_generator = self.docker_client.api.build(
256
- path=str(tmpdir),
257
- tag=self.tag,
258
- forcerm=True,
259
- decode=True,
260
- target='base', # Only build the 'base' stage
261
- labels={self.managed_label: "true"}, # image label
262
- )
263
- try:
264
- for chunk in response_generator:
265
- if 'stream' in chunk:
266
- print(chunk['stream'].strip())
267
- print("----------------------------------------")
268
- print(f"✅ Base image built successfully: {self.tag}")
269
- except docker.errors.BuildError as e:
270
- print(f"\n❌ Docker build failed. Reason: {e}")
271
- raise
272
-
273
- @contextmanager
274
- def runner(self, *args, **kwargs):
275
- """Context manager to set up, run, and tear down the container for local execution."""
276
- port = kwargs.get('port', None)
277
- self._perform_auto_cleanup()
278
- self._build_image_if_needed()
279
- container = None
280
- ports_mapping = {f'{port}/tcp': port} if port else None
210
+ print(f"Building new image: {tag}")
281
211
 
282
- with tempfile.TemporaryDirectory() as tmpdir_str:
283
- tmpdir = Path(tmpdir_str)
284
- payload_path = tmpdir / self.payload_file
285
- result_path = tmpdir / self.result_file
212
+ with tempfile.TemporaryDirectory() as tmpdir:
213
+ workdir = Path(tmpdir)
214
+ self._copy_user_files(workdir)
215
+ (workdir / "Dockerfile").write_text(self._dockerfile_grpc())
286
216
 
287
- with payload_path.open('wb') as f:
288
- cloudpickle.dump((self.func, args, kwargs), f)
217
+ # Copy gRPC runtime
218
+ grpc_src = Path(__file__).parent / "grpc"
219
+ shutil.copytree(grpc_src, workdir / "grpc_runtime",
220
+ ignore=shutil.ignore_patterns('*.proto', '__pycache__'))
289
221
 
222
+ return self._build_image(tag, workdir)
223
+
224
+ def _ensure_container(self, service_port=None):
225
+ """Start container if not running, return gRPC client."""
226
+ if self._client and self._container:
227
+ try:
228
+ self._container.reload()
229
+ if self._container.status == 'running':
230
+ return self._client
231
+ except:
232
+ pass
233
+ self._cleanup_container()
234
+
235
+ tag = self._ensure_grpc_image()
236
+ self._perform_auto_cleanup(keep_tag=tag)
237
+
238
+ # Port mappings
239
+ ports = {f'{GRPC_PORT}/tcp': None}
240
+ if service_port:
241
+ ports[f'{service_port}/tcp'] = service_port
242
+
243
+ self._container = self.docker_client.containers.run(
244
+ tag, detach=True, ports=ports, labels={self.managed_label: "true"}
245
+ )
246
+ self._container.reload()
247
+ self._host_port = int(self._container.ports[f'{GRPC_PORT}/tcp'][0]['HostPort'])
248
+
249
+ self._client = RuntimeClient(port=self._host_port)
250
+ if not self._client.wait_ready(timeout=10):
251
+ raise RuntimeError("Container failed to start")
252
+ print(f"Container ready on port {self._host_port}")
253
+ return self._client
254
+
255
+ def _cleanup_container(self):
256
+ """Stop and remove the warm container."""
257
+ if self._client:
258
+ self._client.close()
259
+ self._client = None
260
+ if self._container:
290
261
  try:
291
- container = self.docker_client.containers.create(
292
- image=self.tag,
293
- volumes={str(tmpdir): {'bind': self.io_dir, 'mode': 'rw'}},
294
- ports=ports_mapping,
295
- labels={self.managed_label: "true"} # container label
296
- )
297
- container.start()
298
- yield container, result_path
299
- finally:
300
- if container:
301
- print("\n🧹 Cleaning up container...")
302
- try:
303
- container.stop(timeout=5)
304
- container.remove()
305
- print("✅ Container stopped and removed.")
306
- except docker.errors.APIError as e:
307
- print(f"⚠️ Could not clean up container: {e}")
262
+ self._container.stop(timeout=3)
263
+ self._container.remove()
264
+ except:
265
+ pass
266
+ self._container = None
267
+ self._host_port = None
308
268
 
309
269
  def run(self, *args, **kwargs):
310
- """Executes the function in a new Docker container and waits for the result."""
311
- print(f"🚀 Running function '{self.name}' in container...")
270
+ """Execute the function in a container and return the result."""
271
+ service_port = kwargs.get('port')
272
+ print(f"Running '{self.name}'...")
312
273
  try:
313
- with self.runner(*args, **kwargs) as (container, result_path):
314
- print("--- 🪵 Container Logs (streaming) ---")
315
- for chunk in container.logs(stream=True, follow=True):
316
- print(chunk.decode('utf-8').strip())
317
- print("------------------------------------")
318
-
319
- result_status = container.wait()
320
- if result_status['StatusCode'] != 0:
321
- print(f"\n❌ Error: Container exited with code: {result_status['StatusCode']}")
322
- return None
323
-
324
- if result_path.exists():
325
- with result_path.open('rb') as f:
326
- result = cloudpickle.load(f)
327
- print("✅ Function executed successfully.")
328
- return result
329
- else:
330
- print("\n❌ Error: Result file not found.")
331
- return None
332
- except (KeyboardInterrupt, docker.errors.DockerException) as e:
333
- print(f"\n🛑 Operation stopped: {e}")
274
+ client = self._ensure_container(service_port=service_port)
275
+
276
+ # Blocking service: fire gRPC, stream Docker logs
277
+ if service_port:
278
+ client.fire(self.func, *args, **kwargs)
279
+ print(f"Service running on port {service_port}")
280
+ print("--- 🪵 Container Logs ---")
281
+ for chunk in self._container.logs(stream=True, follow=True):
282
+ print(chunk.decode(), end='')
283
+ return None
284
+
285
+ # Regular function: execute, then print logs
286
+ result = client.call(self.func, *args, **kwargs)
287
+ logs = self._container.logs().decode()
288
+ if logs.strip():
289
+ print("--- 🪵 Container Logs ---")
290
+ print(logs, end='')
291
+ print("-------------------------")
292
+ return result
293
+
294
+ except KeyboardInterrupt:
295
+ print("\n-------------------------")
296
+ print("Stopping...")
297
+ self._cleanup_container()
334
298
  return None
299
+ except Exception as e:
300
+ print(f"Error: {e}")
301
+ return None
302
+
303
+ def stream(self, *args, **kwargs):
304
+ """Execute the function and yield streamed results."""
305
+ service_port = kwargs.get('port')
306
+ client = self._ensure_container(service_port=service_port)
307
+ yield from client.execute(self.func, *args, **kwargs)
308
+
309
+ @contextlib.contextmanager
310
+ def runner(self, *args, **kwargs):
311
+ """Context manager for running a service. Yields (container, client)."""
312
+ service_port = kwargs.get('port')
313
+ try:
314
+ client = self._ensure_container(service_port=service_port)
315
+ client.fire(self.func, *args, **kwargs)
316
+ yield self._container, client
317
+ finally:
318
+ self._cleanup_container()
335
319
 
336
320
  def watch(self, *args, **kwargs):
337
- """Runs the container with file watching - restarts script on changes."""
321
+ """Run with file watching - restarts script on changes."""
338
322
  try:
339
323
  from watchfiles import watch as watchfiles_watch
340
324
  except ImportError:
341
- print("watchfiles not installed. Run: pip install watchfiles")
325
+ print("watchfiles not installed. Run: pip install watchfiles")
342
326
  return
343
327
 
344
328
  import inspect
345
329
  import subprocess
346
330
 
347
- # Get the main script (the outermost .py file in the stack)
331
+ # Find the user's script (outside cycls package)
332
+ cycls_pkg = Path(__file__).parent.resolve()
348
333
  main_script = None
349
334
  for frame_info in inspect.stack():
350
- filename = frame_info.filename
351
- if filename.endswith('.py') and not filename.startswith('<'):
352
- main_script = Path(filename).resolve()
353
- # main_script is now the outermost/first script in the call chain
354
-
355
- # Build watch paths: main script + copy sources
356
- watch_paths = []
357
- if main_script and main_script.exists():
358
- watch_paths.append(main_script)
359
- watch_paths.extend([Path(src).resolve() for src in self.copy.keys() if Path(src).exists()])
335
+ filepath = Path(frame_info.filename).resolve()
336
+ if filepath.suffix == '.py' and not str(filepath).startswith(str(cycls_pkg)):
337
+ main_script = filepath
338
+ break
360
339
 
361
- if not watch_paths:
362
- print("⚠️ No files to watch. Running without watch mode.")
340
+ if not main_script:
341
+ print("Could not find script to watch.")
363
342
  return self.run(*args, **kwargs)
364
343
 
365
- print(f"👀 Watching for changes:")
344
+ # Build watch paths
345
+ watch_paths = [main_script]
346
+ watch_paths.extend([Path(src).resolve() for src in self.copy.keys() if Path(src).exists()])
347
+
348
+ print(f"👀 Watching:")
366
349
  for p in watch_paths:
367
350
  print(f" {p}")
368
351
  print()
369
352
 
370
353
  while True:
371
- # Run the script in a subprocess so we survive errors
372
- print(f"🚀 Running {main_script.name}...")
354
+ print(f"🚀 Starting {main_script.name}...")
373
355
  proc = subprocess.Popen(
374
356
  [sys.executable, str(main_script)],
375
- env={**os.environ, '_CYCLS_WATCH_CHILD': '1'}
357
+ env={**os.environ, '_CYCLS_WATCH': '1'}
376
358
  )
377
359
 
378
360
  try:
379
- # Watch for changes
380
361
  for changes in watchfiles_watch(*watch_paths):
381
- changed_files = [str(c[1]) for c in changes]
382
- print(f"\n🔄 Changes detected:")
383
- for f in changed_files:
384
- print(f" {f}")
362
+ print(f"\n🔄 Changed: {[Path(c[1]).name for c in changes]}")
385
363
  break
386
364
 
387
- print("\n🔄 Restarting...\n")
388
365
  proc.terminate()
389
- try:
390
- proc.wait(timeout=3)
391
- except subprocess.TimeoutExpired:
392
- proc.kill()
393
-
366
+ proc.wait(timeout=3)
367
+ except subprocess.TimeoutExpired:
368
+ proc.kill()
394
369
  except KeyboardInterrupt:
395
- print("\n🛑 Stopping...")
370
+ print("\nStopping...")
396
371
  proc.terminate()
397
- try:
398
- proc.wait(timeout=3)
399
- except subprocess.TimeoutExpired:
400
- proc.kill()
372
+ proc.wait(timeout=3)
401
373
  return
402
374
 
375
+ print()
376
+
377
+ def _prepare_deploy_context(self, workdir: Path, port: int, args=(), kwargs=None):
378
+ """Prepare build context for deploy: pickle function+args + entrypoint."""
379
+ kwargs = kwargs or {}
380
+ kwargs['port'] = port # Ensure port is in kwargs
381
+ self._copy_user_files(workdir)
382
+ (workdir / "Dockerfile").write_text(self._dockerfile_deploy(port))
383
+ (workdir / "entrypoint.py").write_text(ENTRYPOINT_PY)
384
+ with open(workdir / "function.pkl", "wb") as f:
385
+ cloudpickle.dump((self.func, args, kwargs), f)
386
+
403
387
  def build(self, *args, **kwargs):
404
- """Builds a self-contained, deployable Docker image locally."""
405
- print("📦 Building self-contained image for deployment...")
406
- payload_hash = hashlib.sha256(cloudpickle.dumps((self.func, args, kwargs))).hexdigest()[:16]
407
- final_tag = f"{self.image_prefix}:deploy-{payload_hash}"
388
+ """Build a deployable Docker image locally."""
389
+ port = kwargs.pop('port', 8080)
390
+ payload = cloudpickle.dumps((self.func, args, {**kwargs, 'port': port}))
391
+ tag = f"{self.image_prefix}:deploy-{hashlib.sha256(payload).hexdigest()[:16]}"
408
392
 
409
393
  try:
410
- self.docker_client.images.get(final_tag)
411
- print(f"Found cached deployable image: {final_tag}")
412
- return final_tag
394
+ self.docker_client.images.get(tag)
395
+ print(f"Found cached image: {tag}")
396
+ return tag
413
397
  except docker.errors.ImageNotFound:
414
- print(f"🛠️ Building new deployable image: {final_tag}")
398
+ print(f"Building: {tag}")
415
399
 
416
- with tempfile.TemporaryDirectory() as tmpdir_str:
417
- tmpdir = Path(tmpdir_str)
418
- self._prepare_build_context(tmpdir, include_payload=True, args=args, kwargs=kwargs)
419
-
420
- print("--- 🐳 Docker Build Logs (Final Image) ---")
421
- response_generator = self.docker_client.api.build(
422
- path=str(tmpdir), tag=final_tag, forcerm=True, decode=True
423
- )
424
- try:
425
- for chunk in response_generator:
426
- if 'stream' in chunk:
427
- print(chunk['stream'].strip())
428
- print("-----------------------------------------")
429
- print(f"✅ Image built successfully: {final_tag}")
430
- port = kwargs.get('port') if kwargs else None
431
- print(f"🤖 Run: docker run --rm -d -p {port}:{port} {final_tag}")
432
- return final_tag
433
- except docker.errors.BuildError as e:
434
- print(f"\n❌ Docker build failed. Reason: {e}")
435
- return None
400
+ with tempfile.TemporaryDirectory() as tmpdir:
401
+ workdir = Path(tmpdir)
402
+ self._prepare_deploy_context(workdir, port, args, kwargs)
403
+ self._build_image(tag, workdir)
404
+ print(f"Run: docker run --rm -p {port}:{port} {tag}")
405
+ return tag
436
406
 
437
407
  def deploy(self, *args, **kwargs):
438
- """Deploys the function by sending it to a remote build server."""
408
+ """Deploy the function to a remote build server."""
439
409
  import requests
440
410
 
441
- print(f"🚀 Preparing to deploy function '{self.name}'")
411
+ port = kwargs.pop('port', 8080)
412
+ print(f"Deploying '{self.name}'...")
413
+
414
+ payload = cloudpickle.dumps((self.func, args, {**kwargs, 'port': port}))
415
+ archive_name = f"{self.name}-{hashlib.sha256(payload).hexdigest()[:16]}.tar.gz"
442
416
 
443
- # 1. Prepare the build context and compress it into a tarball
444
- payload_hash = hashlib.sha256(cloudpickle.dumps((self.func, args, kwargs))).hexdigest()[:16]
445
- archive_name = f"source-{self.tag.split(':')[1]}-{payload_hash}.tar.gz"
417
+ with tempfile.TemporaryDirectory() as tmpdir:
418
+ workdir = Path(tmpdir)
419
+ self._prepare_deploy_context(workdir, port, args, kwargs)
446
420
 
447
- with tempfile.TemporaryDirectory() as tmpdir_str:
448
- tmpdir = Path(tmpdir_str)
449
- self._prepare_build_context(tmpdir, include_payload=True, args=args, kwargs=kwargs)
450
-
451
- archive_path = Path(tmpdir_str) / archive_name
421
+ archive_path = workdir / archive_name
452
422
  with tarfile.open(archive_path, "w:gz") as tar:
453
- # Add all files from the context to the tar archive
454
- for f in tmpdir.glob("**/*"):
455
- if f.is_file():
456
- tar.add(f, arcname=f.relative_to(tmpdir))
457
-
458
- # 2. Prepare the request payload
459
- port = kwargs.get('port', 8080)
460
- data_payload = {
461
- "function_name": self.name,
462
- "port": port,
463
- # "memory": "1Gi" # You could make this a parameter
464
- }
465
- headers = {
466
- "X-API-Key": self.api_key
467
- }
468
-
469
- # 3. Upload to the deploy server
470
- print("📦 Uploading build context to the deploy server...")
423
+ for f in workdir.glob("**/*"):
424
+ if f.is_file() and f != archive_path:
425
+ tar.add(f, arcname=f.relative_to(workdir))
426
+
427
+ print("Uploading build context...")
471
428
  try:
472
429
  with open(archive_path, 'rb') as f:
473
- files = {'source_archive': (archive_name, f, 'application/gzip')}
474
-
475
430
  response = requests.post(
476
431
  f"{self.base_url}/v1/deploy",
477
- data=data_payload,
478
- files=files,
479
- headers=headers,
480
- timeout=5*1800 # Set a long timeout for the entire process
432
+ data={"function_name": self.name, "port": port},
433
+ files={'source_archive': (archive_name, f, 'application/gzip')},
434
+ headers={"X-API-Key": self.api_key},
435
+ timeout=9000
481
436
  )
482
-
483
- # 4. Handle the server's response
484
- response.raise_for_status() # Raise an exception for 4xx/5xx errors
437
+ response.raise_for_status()
485
438
  result = response.json()
486
-
487
- print(f"✅ Deployment successful!")
488
- print(f"🔗 Service is available at: {result['url']}")
439
+ print(f"Deployed: {result['url']}")
489
440
  return result['url']
490
441
 
491
442
  except requests.exceptions.HTTPError as e:
492
- print(f" Deployment failed. Server returned error: {e.response.status_code}")
443
+ print(f"Deploy failed: {e.response.status_code}")
493
444
  try:
494
- # Try to print the detailed error message from the server
495
- print(f" Reason: {e.response.json()['detail']}")
445
+ print(f" {e.response.json()['detail']}")
496
446
  except:
497
- print(f" Reason: {e.response.text}")
447
+ print(f" {e.response.text}")
498
448
  return None
499
449
  except requests.exceptions.RequestException as e:
500
- print(f" Could not connect to the deploy server: {e}")
450
+ print(f"Connection error: {e}")
501
451
  return None
502
452
 
503
- def Deploy(self, *args, **kwargs):
504
- try:
505
- from .shared import upload_file_to_cloud, build_and_deploy_to_cloud
506
- except ImportError:
507
- print("❌ Shared not found. This is an internal method.")
508
- return None
509
-
510
- port = kwargs.get('port', 8080)
511
-
512
- with tempfile.TemporaryDirectory() as tmpdir_str:
513
- tmpdir = Path(tmpdir_str)
514
- self._prepare_build_context(tmpdir, include_payload=True, args=args, kwargs=kwargs)
515
-
516
- archive_path = Path(tmpdir_str) / "source.tar.gz"
517
- with tarfile.open(archive_path, "w:gz") as tar:
518
- for f in tmpdir.glob("**/*"):
519
- if f.is_file():
520
- tar.add(f, arcname=f.relative_to(tmpdir))
521
-
522
- archive_name = upload_file_to_cloud(self.name, archive_path)
523
-
524
- try:
525
- service = build_and_deploy_to_cloud(
526
- function_name=self.name,
527
- gcs_object_name=archive_name,
528
- port=port,
529
- memory="1Gi"
530
- )
531
- except Exception as e:
532
- print(f"❌ Cloud Deployment Failed: {e}")
533
- return None
453
+ def __del__(self):
454
+ """Cleanup on garbage collection."""
455
+ self._cleanup_container()
cycls/sdk.py CHANGED
@@ -88,7 +88,7 @@ class AgentRuntime:
88
88
 
89
89
  def local(self, port=8080, watch=True):
90
90
  """Run locally in Docker with file watching by default."""
91
- if os.environ.get('_CYCLS_WATCH_CHILD'):
91
+ if os.environ.get('_CYCLS_WATCH'):
92
92
  watch = False
93
93
  runtime = self._runtime(prod=False)
94
94
  runtime.watch(port=port) if watch else runtime.run(port=port)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cycls
3
- Version: 0.0.2.74
3
+ Version: 0.0.2.75
4
4
  Summary: Distribute Intelligence
5
5
  Author: Mohammed J. AlRujayi
6
6
  Author-email: mj@cycls.com
@@ -16,6 +16,7 @@ Provides-Extra: modal
16
16
  Requires-Dist: cloudpickle (>=3.1.1,<4.0.0)
17
17
  Requires-Dist: docker (>=7.1.0,<8.0.0)
18
18
  Requires-Dist: fastapi (>=0.111.0,<0.112.0)
19
+ Requires-Dist: grpcio (>=1.76.0,<2.0.0)
19
20
  Requires-Dist: httpx (>=0.27.0,<0.28.0)
20
21
  Requires-Dist: modal (>=1.1.0,<2.0.0) ; extra == "modal"
21
22
  Requires-Dist: pyjwt (>=2.8.0,<3.0.0)
@@ -0,0 +1,20 @@
1
+ cycls/__init__.py,sha256=vyI1d_8VP4XW7MliFuUs_P3O9KQxyCwQu-JkxrCyhPQ,597
2
+ cycls/auth.py,sha256=xkndHZyCfnlertMMEKerCJjf23N3fVcTRVTTSXTTuzg,247
3
+ cycls/cli.py,sha256=AKf0z7ZLau3GvBVR_IhB7agmq4nVaHkcuUafNyvv2_A,7978
4
+ cycls/default-theme/assets/index-B0ZKcm_V.css,sha256=wK9-NhEB8xPcN9Zv69zpOcfGTlFbMwyC9WqTmSKUaKw,6546
5
+ cycls/default-theme/assets/index-D5EDcI4J.js,sha256=sN4qRcAXa7DBd9JzmVcCoCwH4l8cNCM-U9QGUjBvWSo,1346506
6
+ cycls/default-theme/index.html,sha256=bM-yW_g0cGrV40Q5yY3ccY0fM4zI1Wuu5I8EtGFJIxs,828
7
+ cycls/dev-theme/index.html,sha256=QJBHkdNuMMiwQU7o8dN8__8YQeQB45D37D-NCXIWB2Q,11585
8
+ cycls/grpc/__init__.py,sha256=sr8UQMgJEHyBreBKV8xz8UCd0zDP5lhjXTnfkOB_yOY,63
9
+ cycls/grpc/client.py,sha256=zDFIBABXzuv_RUVn5LllppZ38C7k01RyAS8ZURBjudQ,2270
10
+ cycls/grpc/runtime.proto,sha256=B1AqrNIXOtr3Xsyzfc2Z1OCBepa6hsi4DJ4a3Pf33IQ,244
11
+ cycls/grpc/runtime_pb2.py,sha256=vEJo8FGP5aWPSDqzjZldfctduA2ojiyvoody7vpf-1w,1703
12
+ cycls/grpc/runtime_pb2_grpc.py,sha256=KFd8KqGbiNsKm8X39Q9_BPwXjeZUiDl8O_4aTlEys3k,3394
13
+ cycls/grpc/server.py,sha256=pfb4bo06NKDv0OpknqMSMjB9f8HUR41EZau1c6_XU5A,1911
14
+ cycls/runtime.py,sha256=1jkF1_8LeZewmwn-TztyMSTjcRhsoQK2RxSNq4cjCHk,17637
15
+ cycls/sdk.py,sha256=_1tJ-lRQ1CmZ2y_9taXJhSDbHq0F674GvdiSRpgbTOk,6614
16
+ cycls/web.py,sha256=_QNH8K55vTm90Z7tvcRKal5IybjkB1GY7Pf9p3qu3r8,4659
17
+ cycls-0.0.2.75.dist-info/METADATA,sha256=X_9G2W3jfjl8SNQwzAb030SLmPKf9nru-ygE6sI7MCg,8459
18
+ cycls-0.0.2.75.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
19
+ cycls-0.0.2.75.dist-info/entry_points.txt,sha256=vEhqUxFhhuzCKWtq02LbMnT3wpUqdfgcM3Yh-jjXom8,40
20
+ cycls-0.0.2.75.dist-info/RECORD,,
@@ -1,14 +0,0 @@
1
- cycls/__init__.py,sha256=vyI1d_8VP4XW7MliFuUs_P3O9KQxyCwQu-JkxrCyhPQ,597
2
- cycls/auth.py,sha256=xkndHZyCfnlertMMEKerCJjf23N3fVcTRVTTSXTTuzg,247
3
- cycls/cli.py,sha256=AKf0z7ZLau3GvBVR_IhB7agmq4nVaHkcuUafNyvv2_A,7978
4
- cycls/default-theme/assets/index-B0ZKcm_V.css,sha256=wK9-NhEB8xPcN9Zv69zpOcfGTlFbMwyC9WqTmSKUaKw,6546
5
- cycls/default-theme/assets/index-D5EDcI4J.js,sha256=sN4qRcAXa7DBd9JzmVcCoCwH4l8cNCM-U9QGUjBvWSo,1346506
6
- cycls/default-theme/index.html,sha256=bM-yW_g0cGrV40Q5yY3ccY0fM4zI1Wuu5I8EtGFJIxs,828
7
- cycls/dev-theme/index.html,sha256=QJBHkdNuMMiwQU7o8dN8__8YQeQB45D37D-NCXIWB2Q,11585
8
- cycls/runtime.py,sha256=lg7XKHd9fLV_bYksHv2LHf3Lq7HPAC3K5Tr8pNgQ7sM,21641
9
- cycls/sdk.py,sha256=X8-VAVqtksO0VGJIxlg02HLmeFpwtwMHWu9PNksS5kw,6620
10
- cycls/web.py,sha256=_QNH8K55vTm90Z7tvcRKal5IybjkB1GY7Pf9p3qu3r8,4659
11
- cycls-0.0.2.74.dist-info/METADATA,sha256=IXA6mD4bkkGRnGZjK_rQYDda-fEE8nI2ucgVVVIsJgQ,8419
12
- cycls-0.0.2.74.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
13
- cycls-0.0.2.74.dist-info/entry_points.txt,sha256=vEhqUxFhhuzCKWtq02LbMnT3wpUqdfgcM3Yh-jjXom8,40
14
- cycls-0.0.2.74.dist-info/RECORD,,