dora-openai-server 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,11 @@
1
+ import os
2
+
3
+ # Define the path to the README file relative to the package directory
4
+ readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.md")
5
+
6
+ # Read the content of the README file
7
+ try:
8
+ with open(readme_path, "r", encoding="utf-8") as f:
9
+ __doc__ = f.read()
10
+ except FileNotFoundError:
11
+ __doc__ = "README file not found."
@@ -0,0 +1,135 @@
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ from typing import List, Optional
4
+ import uvicorn
5
+ from dora import Node
6
+ import asyncio
7
+ import pyarrow as pa
8
+ import ast
9
+
10
+ DORA_RESPONSE_TIMEOUT = 10
11
+ app = FastAPI()
12
+
13
+
14
+ class ChatCompletionMessage(BaseModel):
15
+ role: str
16
+ content: str
17
+
18
+
19
+ class ChatCompletionRequest(BaseModel):
20
+ model: str
21
+ messages: List[ChatCompletionMessage]
22
+ temperature: Optional[float] = 1.0
23
+ max_tokens: Optional[int] = 100
24
+
25
+
26
+ class ChatCompletionResponse(BaseModel):
27
+ id: str
28
+ object: str
29
+ created: int
30
+ model: str
31
+ choices: List[dict]
32
+ usage: dict
33
+
34
+
35
+ node = Node() # provide the name to connect to the dataflow if dynamic node
36
+
37
+
38
+ @app.post("/v1/chat/completions")
39
+ async def create_chat_completion(request: ChatCompletionRequest):
40
+ data = next(
41
+ (msg.content for msg in request.messages if msg.role == "user"),
42
+ "No user message found.",
43
+ )
44
+
45
+ # Convert user_message to Arrow array
46
+ # user_message_array = pa.array([user_message])
47
+ # Publish user message to dora-echo
48
+ # node.send_output("user_query", user_message_array)
49
+
50
+ try:
51
+ data = ast.literal_eval(data)
52
+ except ValueError:
53
+ print("Passing input as string")
54
+ except SyntaxError:
55
+ print("Passing input as string")
56
+ if isinstance(data, list):
57
+ data = pa.array(data) # initialize pyarrow array
58
+ elif isinstance(data, str):
59
+ data = pa.array([data])
60
+ elif isinstance(data, int):
61
+ data = pa.array([data])
62
+ elif isinstance(data, float):
63
+ data = pa.array([data])
64
+ elif isinstance(data, dict):
65
+ data = pa.array([data])
66
+ else:
67
+ data = pa.array(data) # initialize pyarrow array
68
+ node.send_output("v1/chat/completions", data)
69
+
70
+ # Wait for response from dora-echo
71
+ while True:
72
+ event = node.next(timeout=DORA_RESPONSE_TIMEOUT)
73
+ if event["type"] == "ERROR":
74
+ response_str = "No response received. Err: " + event["value"][0].as_py()
75
+ break
76
+ elif event["type"] == "INPUT" and event["id"] == "v1/chat/completions":
77
+ response = event["value"]
78
+ response_str = response[0].as_py() if response else "No response received"
79
+ break
80
+ else:
81
+ pass
82
+
83
+ return ChatCompletionResponse(
84
+ id="chatcmpl-1234",
85
+ object="chat.completion",
86
+ created=1234567890,
87
+ model=request.model,
88
+ choices=[
89
+ {
90
+ "index": 0,
91
+ "message": {"role": "assistant", "content": response_str},
92
+ "finish_reason": "stop",
93
+ }
94
+ ],
95
+ usage={
96
+ "prompt_tokens": len(data),
97
+ "completion_tokens": len(response_str),
98
+ "total_tokens": len(data) + len(response_str),
99
+ },
100
+ )
101
+
102
+
103
+ @app.get("/v1/models")
104
+ async def list_models():
105
+ return {
106
+ "object": "list",
107
+ "data": [
108
+ {
109
+ "id": "gpt-3.5-turbo",
110
+ "object": "model",
111
+ "created": 1677610602,
112
+ "owned_by": "openai",
113
+ }
114
+ ],
115
+ }
116
+
117
+
118
+ async def run_fastapi():
119
+ config = uvicorn.Config(app, host="0.0.0.0", port=8000, log_level="info")
120
+ server = uvicorn.Server(config)
121
+
122
+ server = asyncio.gather(server.serve())
123
+ while True:
124
+ await asyncio.sleep(1)
125
+ event = node.next(0.001)
126
+ if event["type"] == "STOP":
127
+ break
128
+
129
+
130
+ def main():
131
+ asyncio.run(run_fastapi())
132
+
133
+
134
+ if __name__ == "__main__":
135
+ asyncio.run(run_fastapi())
@@ -0,0 +1,34 @@
1
+ Metadata-Version: 2.1
2
+ Name: dora-openai-server
3
+ Version: 0.3.7
4
+ Summary: Dora OpenAI API Server
5
+ Home-page: https://github.com/dora-rs/dora.git
6
+ License: MIT
7
+ Author: Haixuan Xavier Tao
8
+ Author-email: tao.xavier@outlook.com
9
+ Requires-Python: >=3.7,<4.0
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.7
13
+ Classifier: Programming Language :: Python :: 3.8
14
+ Classifier: Programming Language :: Python :: 3.9
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Requires-Dist: asyncio (>=3.4,<4.0)
20
+ Requires-Dist: dora-rs (>=0.3.6,<0.4.0)
21
+ Requires-Dist: fastapi (>=0.115,<0.116)
22
+ Requires-Dist: numpy (<2.0.0)
23
+ Requires-Dist: pyarrow (>=5.0.0)
24
+ Requires-Dist: pydantic (>=2.9,<3.0)
25
+ Requires-Dist: uvicorn (>=0.31,<0.32)
26
+ Project-URL: Documentation, https://github.com/dora-rs/dora/blob/main/node-hub/dora-openai-server/README.md
27
+ Description-Content-Type: text/markdown
28
+
29
+ # Dora OpenAI Server
30
+
31
+ This is an experimental to expose an openai server endpoint with dora.
32
+
33
+ Check example at [examples/openai-server](../../examples/openai-server/README.md)
34
+
@@ -0,0 +1,6 @@
1
+ dora_openai_server/__init__.py,sha256=Gy4qL4vCeTyA5HR1Yp3ioL4-ClJyW8oi_38CzMuMsBM,358
2
+ dora_openai_server/main.py,sha256=1TEl9UZYF9DKY9PLMJRdiSAKL7Q5FzaRtE0h09XgaX4,3532
3
+ dora_openai_server-0.3.7.dist-info/METADATA,sha256=twSps5jhzy5CyOn_qgJcSI7vc1bsiwPCyutMaBe457s,1279
4
+ dora_openai_server-0.3.7.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
5
+ dora_openai_server-0.3.7.dist-info/entry_points.txt,sha256=StRfCXeKJyyyd5MzPWZKNm7HS0rP1KLsKCUZSEDBRF4,67
6
+ dora_openai_server-0.3.7.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 1.9.1
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ dora-openai-server=dora_openai_server.main:main
3
+