davidkhala.ai 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. davidkhala/ai/agent/dify/api/__init__.py +2 -2
  2. davidkhala/ai/agent/dify/api/app.py +10 -6
  3. davidkhala/ai/agent/dify/api/knowledge/__init__.py +0 -0
  4. davidkhala/ai/agent/dify/api/knowledge/chunk.py +14 -0
  5. davidkhala/ai/agent/dify/api/knowledge/dataset.py +82 -0
  6. davidkhala/ai/agent/dify/api/knowledge/document.py +42 -0
  7. davidkhala/ai/agent/dify/api/knowledge/model.py +139 -0
  8. davidkhala/ai/agent/dify/{ops/console → console}/__init__.py +7 -1
  9. davidkhala/ai/agent/dify/console/knowledge/__init__.py +0 -0
  10. davidkhala/ai/agent/dify/console/knowledge/dataset.py +61 -0
  11. davidkhala/ai/agent/dify/console/knowledge/pipeline.py +127 -0
  12. davidkhala/ai/agent/dify/{ops/console → console}/plugin.py +21 -7
  13. davidkhala/ai/agent/dify/console/session.py +50 -0
  14. davidkhala/ai/agent/dify/db/orm.py +65 -0
  15. davidkhala/ai/agent/dify/model/__init__.py +7 -0
  16. davidkhala/ai/agent/dify/{model.py → model/knowledge.py} +1 -12
  17. davidkhala/ai/agent/dify/{ops/db/orm.py → model/workflow.py} +24 -62
  18. davidkhala/ai/agent/dify/plugins/popular.py +4 -1
  19. davidkhala/ai/agent/langgraph.py +1 -1
  20. davidkhala/ai/ali/dashscope.py +15 -18
  21. davidkhala/ai/anthropic/__init__.py +6 -0
  22. davidkhala/ai/api/__init__.py +6 -18
  23. davidkhala/ai/api/openrouter.py +14 -10
  24. davidkhala/ai/api/siliconflow.py +2 -4
  25. davidkhala/ai/atlas/__init__.py +24 -0
  26. davidkhala/ai/mistral/__init__.py +15 -0
  27. davidkhala/ai/mistral/agent.py +50 -0
  28. davidkhala/ai/mistral/ai.py +40 -0
  29. davidkhala/ai/mistral/file.py +38 -0
  30. davidkhala/ai/mistral/ocr.py +46 -0
  31. davidkhala/ai/model/__init__.py +28 -0
  32. davidkhala/ai/model/chat.py +75 -0
  33. davidkhala/ai/model/embed.py +8 -0
  34. davidkhala/ai/model/garden.py +9 -0
  35. davidkhala/ai/openai/__init__.py +24 -40
  36. davidkhala/ai/openai/azure.py +55 -3
  37. davidkhala/ai/openai/databricks.py +23 -0
  38. davidkhala/ai/openai/native.py +4 -4
  39. davidkhala/ai/openai/opik.py +10 -0
  40. davidkhala/ai/openrouter/__init__.py +25 -13
  41. davidkhala/ai/you.py +55 -0
  42. {davidkhala_ai-0.2.0.dist-info → davidkhala_ai-0.2.2.dist-info}/METADATA +12 -6
  43. davidkhala_ai-0.2.2.dist-info/RECORD +65 -0
  44. davidkhala/ai/agent/dify/api/knowledge.py +0 -191
  45. davidkhala/ai/agent/dify/ops/__init__.py +0 -1
  46. davidkhala/ai/agent/dify/ops/console/knowledge.py +0 -158
  47. davidkhala/ai/agent/dify/ops/console/session.py +0 -32
  48. davidkhala/ai/huggingface/BAAI.py +0 -10
  49. davidkhala/ai/huggingface/__init__.py +0 -21
  50. davidkhala/ai/huggingface/inference.py +0 -13
  51. davidkhala/ai/model.py +0 -28
  52. davidkhala_ai-0.2.0.dist-info/RECORD +0 -48
  53. /davidkhala/ai/agent/dify/{ops/db → db}/__init__.py +0 -0
  54. /davidkhala/ai/agent/dify/{ops/db → db}/app.py +0 -0
  55. /davidkhala/ai/agent/dify/{ops/db → db}/knowledge.py +0 -0
  56. /davidkhala/ai/agent/dify/{ops/db → db}/sys.py +0 -0
  57. {davidkhala_ai-0.2.0.dist-info → davidkhala_ai-0.2.2.dist-info}/WHEEL +0 -0
@@ -1,191 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import os
4
- from pathlib import Path
5
- from typing import Iterable, TypedDict, Optional
6
- from urllib.parse import urlparse
7
-
8
- import requests
9
-
10
- from davidkhala.ai.agent.dify.api import API, Iterator
11
- from davidkhala.ai.agent.dify.model import Document as DocumentBase
12
-
13
-
14
- class DatasetDict(TypedDict):
15
- id: str
16
- name: str
17
- description: str
18
- provider: str
19
- permission: str
20
- data_source_type: str
21
- indexing_technique: str
22
- doc_form: str
23
- runtime_mode: str
24
- is_published: bool
25
- enable_api: bool
26
- # stats
27
- app_count: int
28
- document_count: int
29
- word_count: int
30
- total_documents: int
31
- total_available_documents: int
32
- # embedding
33
- embedding_available: bool
34
- embedding_model: str
35
- embedding_model_provider: str
36
- retrieval_model_dict: dict
37
- external_retrieval_model: dict
38
- external_knowledge_info: dict
39
-
40
-
41
- class Document(DocumentBase):
42
- data_source_info: dict[str, str]
43
- data_source_detail_dict: dict[str, dict]
44
- dataset_process_rule_id: str
45
- created_from: str
46
- created_by: str
47
- created_at: int
48
- tokens: int
49
- archived: bool
50
- display_status: str
51
- word_count: int
52
- hit_count: int
53
- doc_form: str
54
- doc_metadata: dict
55
- disabled_at: int
56
- disabled_by: str
57
-
58
-
59
- class Dataset(API):
60
- def __init__(self, api_key: str, base_url="https://api.dify.ai/v1"):
61
- super().__init__(api_key, f"{base_url}/datasets")
62
-
63
- def paginate_datasets(self, page=1, size=20):
64
- r = self.request(self.base_url, "GET", params={
65
- 'page': page,
66
- 'limit': size,
67
- })
68
- return r
69
-
70
- def list_datasets(self) -> Iterable[list[DatasetDict]]:
71
- return Iterator(self.paginate_datasets, None)
72
-
73
- @property
74
- def ids(self):
75
- for sub_list in self.list_datasets():
76
- for dataset in sub_list:
77
- yield dataset['id']
78
-
79
- class Instance(API):
80
- def __init__(self, d: Dataset, dataset_id: str):
81
- super().__init__(d.api_key, f"{d.base_url}/{dataset_id}")
82
-
83
- def get(self):
84
- return self.request(self.base_url, "GET")
85
-
86
- def upload(self, filename, *, path=None, url=None, document_id=None):
87
- """
88
- don't work for .html
89
- work for .md
90
- """
91
- files = {}
92
- if path:
93
- with open(path, 'rb') as f:
94
- content = f.read()
95
- if not filename:
96
- filename = os.path.basename(path)
97
- elif url:
98
- r = requests.get(url)
99
- r.raise_for_status()
100
- if not filename:
101
- parsed_url = urlparse(url)
102
- filename = Path(parsed_url.path).name
103
- content = r.content
104
- files['file'] = (filename, content)
105
- if document_id:
106
- # don't work for html
107
- r = requests.post(f"{self.base_url}/documents/{document_id}/update-by-file", files=files,
108
- **self.options)
109
- else:
110
- r = requests.post(f"{self.base_url}/document/create-by-file", files=files, **self.options)
111
- r = self.on_response(r)
112
- return r['document']
113
-
114
- def paginate_documents(self, page=1, size=20):
115
- return self.request(f"{self.base_url}/documents", "GET", params={
116
- 'page': page,
117
- 'limit': size
118
- })
119
-
120
- def list_documents(self) -> Iterable[Document]:
121
- for document_batch in Iterator(self.paginate_documents, None):
122
- for document in document_batch:
123
- yield Document(**document)
124
-
125
- def has_document(self, name) -> bool:
126
- return any(name == item['name'] for row in self.list_documents() for item in row)
127
-
128
-
129
- class ChunkDict(TypedDict):
130
- id: str
131
- position: int
132
- document_id: str
133
- content: str
134
- sign_content: str # trimmed version of content
135
- answer: Optional[str] # only used in QA chunk
136
- word_count: int
137
- tokens: int
138
- keywords: Optional[list[str]]
139
- index_node_id: str # chunk 在向量索引中的节点 ID
140
- index_node_hash: str # hash of sign_content
141
- hit_count: int
142
- enabled: bool
143
- status: str # 'completed'
144
- created_at: int # timestamp
145
- updated_at: int # timestamp
146
- completed_at: int # timestamp
147
- created_by: str # user id
148
- child_chunks: list
149
- error: Optional
150
- stopped_at: Optional[int] # timestamp
151
- disabled_at: Optional[int] # timestamp
152
-
153
-
154
- class Document(API):
155
- def __init__(self, d: Dataset.Instance, document_id: str):
156
- super().__init__(d.api_key, f"{d.base_url}/documents/{document_id}")
157
-
158
- def exist(self):
159
- try:
160
- self.get()
161
- return True
162
- except requests.exceptions.HTTPError as e:
163
- if e.response.status_code == 404:
164
- return False
165
- else:
166
- raise e
167
-
168
- def get(self):
169
- return self.request(self.base_url, "GET")
170
-
171
- def paginate_chunks(self, page=1, size=20):
172
- return self.request(f"{self.base_url}/segments", "GET", params={
173
- 'page': page,
174
- 'limit': size
175
- })
176
-
177
- def list_chunks(self) -> Iterable[ChunkDict]:
178
- for chunk_batch in Iterator(self.paginate_chunks, None):
179
- for chunk in chunk_batch:
180
- yield chunk
181
-
182
- def delete(self):
183
- if self.exist():
184
- self.request(self.base_url, "DELETE")
185
- class Chunk(API):
186
- def __init__(self, d: Document, segment_id: str):
187
- super().__init__(d.api_key, f"{d.base_url}/segments/{segment_id}")
188
- def get(self):
189
- r= self.request(self.base_url, "GET")
190
- assert r['doc_form'] # optional value text_model
191
- return r['data']
@@ -1 +0,0 @@
1
-
@@ -1,158 +0,0 @@
1
- from time import sleep
2
-
3
- from davidkhala.utils.http_request.stream import as_sse, Request as StreamRequest
4
- from pydantic import BaseModel
5
-
6
- from davidkhala.ai.agent.dify.interface import IndexingError
7
- from davidkhala.ai.agent.dify.model import Document, Dataset
8
- from davidkhala.ai.agent.dify.const import IndexingStatus
9
- from davidkhala.ai.agent.dify.ops.console import API
10
- from davidkhala.ai.agent.dify.ops.console.session import ConsoleUser
11
- from davidkhala.ai.agent.dify.ops.db.orm import Node
12
-
13
-
14
- class ConsoleKnowledge(API):
15
- def __init__(self, context: ConsoleUser):
16
- super().__init__()
17
- self.base_url = context.base_url
18
- self.session.cookies = context.session.cookies
19
- self.options = context.options
20
-
21
-
22
- class Datasource(ConsoleKnowledge):
23
- """step 1: Choose a Data Source"""
24
-
25
- class FirecrawlOutput(BaseModel):
26
- source_url: str
27
- description: str
28
- title: str
29
- credential_id: str
30
- content: str
31
-
32
- def run_firecrawl(self, pipeline: str, node: Node,
33
- *,
34
- inputs: dict,
35
- credential_id: str
36
- ):
37
-
38
- url = f"{self.base_url}/rag/pipelines/{pipeline}/workflows/published/datasource/nodes/{node.id}/run"
39
-
40
- stream_request = StreamRequest(self)
41
- response = stream_request.request(url, 'POST', json={
42
- 'inputs': inputs,
43
- 'datasource_type': node.datasource_type,
44
- 'credential_id': credential_id,
45
- "response_mode": "streaming"
46
- })
47
-
48
- for data in as_sse(response):
49
- event = data['event']
50
- if event == 'datasource_completed':
51
- return data['data']
52
- else:
53
- assert event == 'datasource_processing'
54
- print(data)
55
- return None
56
-
57
- def upload(self):
58
- "http://localhost/console/api/files/upload?source=datasets"
59
- # TODO
60
- "form data"
61
- {
62
- "file": "body"
63
- }
64
- r = {
65
- "id": "3898db5b-eb72-4f11-b507-628ad5d28887",
66
- "name": "Professional Diploma Meister Power Electrical Engineering - Technological and Higher Education Institute of Hong Kong.html",
67
- "size": 254362,
68
- "extension": "html",
69
- "mime_type": "text\/html",
70
- "created_by": "dbd0b38b-5ef1-4123-8c3f-0c82eb1feacd",
71
- "created_at": 1764943811,
72
- "source_url": "\/files\/3898db5b-eb72-4f11-b507-628ad5d28887\/file-preview?timestamp=1764943811&nonce=43b0ff5a13372415be79de4cc7ef398c&sign=7OJ2wiVYc4tygl7yvM1sPn7s0WXDlhHxgX76bsGTD94%3D"
73
- }
74
-
75
-
76
- class Operation(ConsoleKnowledge):
77
- def website_sync(self, dataset: str, document: str, *, wait_until=True):
78
- """
79
- cannot be used towards a pipeline dataset. Otherwise, you will see error "no website import info found"
80
- """
81
- doc_url = f"{self.base_url}/datasets/{dataset}/documents/{document}"
82
-
83
- r = self.request(f"{doc_url}/website-sync", "GET")
84
- assert r == {"result": "success"}
85
- if wait_until:
86
- return self.wait_until(dataset, document)
87
- return None
88
-
89
- def retry(self, dataset: str, *documents: str, wait_until=True):
90
- """
91
- It cannot trigger rerun on success documents
92
- """
93
- url = f"{self.base_url}/datasets/{dataset}/retry"
94
- self.request(url, "POST", json={
95
- 'document_ids': documents,
96
- })
97
- # response status code will be 204
98
- if wait_until:
99
- return [self.wait_until(dataset, document) for document in documents]
100
- return None
101
-
102
- def rerun(self, dataset: str, *documents: str):
103
- for document in documents:
104
- try:
105
- self.website_sync(dataset, document)
106
- assert False, "expect IndexingError"
107
- except IndexingError:
108
- pass
109
- return self.retry(dataset, *documents)
110
-
111
- def wait_until(self, dataset: str, document: str, *,
112
- expect_status=None,
113
- from_status=None,
114
- interval=1
115
- ):
116
- if not expect_status:
117
- expect_status = [IndexingStatus.FAILED, IndexingStatus.COMPLETED]
118
- url = f"{self.base_url}/datasets/{dataset}/documents/{document}/indexing-status"
119
- if from_status is None:
120
- from_status = [IndexingStatus.WAITING, IndexingStatus.PARSING]
121
- r = self.request(url, "GET")
122
- status = r['indexing_status']
123
- assert status in from_status, f"current status: {status}, expect: {from_status}"
124
- while status not in expect_status:
125
- sleep(interval)
126
- r = self.request(url, "GET")
127
- status = r['indexing_status']
128
- if status == IndexingStatus.FAILED: raise IndexingError(r['error'])
129
- return r
130
-
131
-
132
- class DatasetResult(Dataset):
133
- chunk_structure: str
134
-
135
- class RunResult(BaseModel):
136
- batch: str
137
- dataset: DatasetResult
138
- documents: list[Document]
139
-
140
- class Load(ConsoleKnowledge):
141
- """
142
- Processing Documents
143
- """
144
-
145
- def async_run(self, pipeline: str, node: Node, inputs: dict, datasource_info_list: list[dict])->RunResult:
146
- """Ingest new document"""
147
- url = f"{self.base_url}/rag/pipelines/{pipeline}/workflows/published/run"
148
- r = self.request(url, "POST", json={
149
- 'inputs': inputs,
150
- 'start_node_id': node.id,
151
- 'is_preview': False,
152
- 'response_mode': "blocking",
153
- "datasource_info_list": datasource_info_list,
154
- 'datasource_type': node.datasource_type
155
- })
156
- return RunResult(**r)
157
-
158
-
@@ -1,32 +0,0 @@
1
- from base64 import b64encode
2
-
3
- from davidkhala.ai.agent.dify.ops.console import API
4
-
5
-
6
- class ConsoleUser(API):
7
- def login(self, email, password,
8
- *,
9
- remember_me=True,
10
- language="en-US"
11
- ):
12
- url = f"{self.base_url}/login"
13
-
14
- r = self.request(url, "POST", json={
15
- 'email': email,
16
- 'password': b64encode(password.encode()).decode(), # use base64 from dify 1.11
17
- 'remember_me': remember_me,
18
- 'language': language,
19
- })
20
- assert r == {"result": "success"}
21
- self.options['headers']['x-csrf-token'] = self.session.cookies.get("csrf_token")
22
- return self.session.cookies
23
-
24
- @property
25
- def me(self) -> dict:
26
- url = f"{self.base_url}/account/profile"
27
- return self.request(url, "GET")
28
-
29
- @property
30
- def workspace(self) -> dict:
31
- url = f"{self.base_url}/features"
32
- return self.request(url, "GET")
@@ -1,10 +0,0 @@
1
- import os
2
-
3
- from davidkhala.ai.huggingface import clone
4
- from pathlib import Path
5
-
6
- def bge_m3_path(git_dir: os.PathLike):
7
- model_dir = clone(git_dir, repo_id="BAAI/bge-m3",allow_patterns=["onnx/*"])
8
- onnx_path = Path(model_dir) / "onnx" / "model.onnx"
9
- assert onnx_path.is_file() and onnx_path.exists()
10
- return onnx_path
@@ -1,21 +0,0 @@
1
- import os
2
- from typing import Optional
3
-
4
- from huggingface_hub import snapshot_download
5
-
6
-
7
- def clone(git_dir: os.PathLike,
8
- *,
9
- owner: Optional[str] = None,
10
- repository: Optional[str] = None,
11
- repo_id: Optional[str] = None,
12
- **kwargs
13
- ) -> str:
14
- if not repo_id:
15
- repo_id = f"{owner}/{repository}"
16
- return snapshot_download(
17
- repo_id=repo_id,
18
- local_dir=git_dir,
19
- local_dir_use_symlinks=False,
20
- **kwargs
21
- )
@@ -1,13 +0,0 @@
1
- from huggingface_hub import InferenceApi
2
-
3
-
4
- class API:
5
- def __init__(self, token):
6
- self.inference = None
7
- self.token = token
8
-
9
- def as_model(self, repo_id):
10
- self.inference = InferenceApi(repo_id=repo_id, token=self.token)
11
-
12
- def call(self, **kwargs):
13
- return self.inference(**kwargs)
davidkhala/ai/model.py DELETED
@@ -1,28 +0,0 @@
1
- from abc import ABC
2
- from typing import Optional
3
-
4
-
5
- class AbstractClient(ABC):
6
- api_key: str
7
- base_url: str
8
- model: Optional[str]
9
- messages = []
10
-
11
- def as_chat(self, model: str, sys_prompt: str = None):
12
- self.model = model
13
- if sys_prompt is not None:
14
- self.messages = [{"role": "system", "content": sys_prompt}]
15
-
16
- def as_embeddings(self, model: str):
17
- self.model = model
18
-
19
- def chat(self, *user_prompt, **kwargs):
20
- ...
21
-
22
- def encode(self, *_input: str) -> list[list[float]]:
23
- ...
24
- def connect(self):
25
- ...
26
-
27
- def disconnect(self):
28
- ...
@@ -1,48 +0,0 @@
1
- davidkhala/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- davidkhala/ai/model.py,sha256=1wcXC8X8oqerMatlcPbZmuxZ-nJWdJKmaDSDgiGlUGw,647
3
- davidkhala/ai/opik.py,sha256=YU1XuweMUAzUkhpjxhltt-SBBDBkR3z-PCNo0DqzBRs,39
4
- davidkhala/ai/agent/README.md,sha256=kIPsx3gOjrpOw7w2qhNEALuCEQkuh4nYp6uBnijdvHE,178
5
- davidkhala/ai/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- davidkhala/ai/agent/langgraph.py,sha256=jrc_Yvgo7eJjd3y5UJn0t1FzpnObDGYscwgsuVl2O_I,1052
7
- davidkhala/ai/agent/ragflow.py,sha256=UaK31us6V0NhAPCthGo07rQsm72vlR-McmihC_NDe1g,273
8
- davidkhala/ai/agent/dify/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- davidkhala/ai/agent/dify/const.py,sha256=gU4lPBe4U2taakN2jhdPMRWXkqlyCg-YRE8JJmtsblo,218
10
- davidkhala/ai/agent/dify/interface.py,sha256=bTOI38ZjtkgoSw-ysgFwBZ1QkKVAa92gjOnERDoagQA,118
11
- davidkhala/ai/agent/dify/model.py,sha256=1LEwKWWkFNmhbBWABEu7I45DRZ_BFGDP5uTHDrvldoo,641
12
- davidkhala/ai/agent/dify/api/__init__.py,sha256=9-8OesuXF_wPmPrh_gEZpEZP51dcZxb0i6ixOBYKcwQ,876
13
- davidkhala/ai/agent/dify/api/app.py,sha256=y1mILC-fvQpeH50ASbFBluD9tFAwXu_IWwtwucMV5jM,3801
14
- davidkhala/ai/agent/dify/api/knowledge.py,sha256=5ePqvzjBHNtQ64Dzt39wBWedYVeQJc23syNe9LFnGw8,5960
15
- davidkhala/ai/agent/dify/ops/__init__.py,sha256=frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN_XKdLCPjaYaY,2
16
- davidkhala/ai/agent/dify/ops/console/__init__.py,sha256=-a81jgCJ3s2B3i1GQ7ge1aZRfbvlALwGDHVu_GEET-A,237
17
- davidkhala/ai/agent/dify/ops/console/knowledge.py,sha256=I1v0iE_b4VPc2Zsyt4ci_oX080Qbgn3oXObP4uVEphg,5788
18
- davidkhala/ai/agent/dify/ops/console/plugin.py,sha256=iJxC0xLBgSIbiumcYc35mKVMuxG6t5gsNUFOtk52ilY,2356
19
- davidkhala/ai/agent/dify/ops/console/session.py,sha256=9IIdQqtTuYrc7QI9XpZxrCMb-K3m6QWH5FlsckSGCgg,991
20
- davidkhala/ai/agent/dify/ops/db/__init__.py,sha256=HYfJEnoFAoJJck2xvTDYx8zpw9Qao7sHXOGvW0diPqw,517
21
- davidkhala/ai/agent/dify/ops/db/app.py,sha256=IRiSiR0v387p4p3J7M9xEkJ7pfQyO5DL6chpx7Z2IzA,1319
22
- davidkhala/ai/agent/dify/ops/db/knowledge.py,sha256=GVaK5QmU_VxB8fDxV60uiYiIeR3JEn3IXJTlJHLiT5U,2917
23
- davidkhala/ai/agent/dify/ops/db/orm.py,sha256=CnZj8mV2RZhw_7hF1YICTUjROQ66hR5_8OCMQvtujnY,4575
24
- davidkhala/ai/agent/dify/ops/db/sys.py,sha256=U_qqopUMlgsilhHaG_ids6gtd-pNiR_Jm0kAr9hIL7M,188
25
- davidkhala/ai/agent/dify/plugins/__init__.py,sha256=iTWvutlkN9bXgptesi05M447nTeF5hKFAIfn4EviFj0,183
26
- davidkhala/ai/agent/dify/plugins/file.py,sha256=o-HjHSFwRTNIYs8IxqZUSnBbh-xr8f-xMUM3iU9wCCQ,390
27
- davidkhala/ai/agent/dify/plugins/firecrawl.py,sha256=lB_f8W_bdg-7PeBKmF0-HdwYyakV_0D3nET5iT-Z1KM,460
28
- davidkhala/ai/agent/dify/plugins/jina.py,sha256=dQ5iJxDLWtChXb1IjCtsHctgUtgjOiDfWOuR2u0aUIM,190
29
- davidkhala/ai/agent/dify/plugins/popular.py,sha256=QseXF_R7-6jFmHmuXgScoCjhrOS_xiC0DTqW8oOoMGM,731
30
- davidkhala/ai/ali/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- davidkhala/ai/ali/agentbay.py,sha256=O5t71GGwtDgBE1zUXJDYe5djMVwSaNOwn5k8zg1xa18,1200
32
- davidkhala/ai/ali/dashscope.py,sha256=SZIzRhVHlLx3s5I2RNUh2-u8OoSdrbvoN5e1k8Mh8N0,1943
33
- davidkhala/ai/api/__init__.py,sha256=q2Ro5nhW5kJx2CYR1MRVamjTT5tTexPZwhrS2hwAvFM,1319
34
- davidkhala/ai/api/openrouter.py,sha256=khccJr5cBnudFy6Jc2O3A1TNCuHH_5W6Q2tXrkwlUYE,2308
35
- davidkhala/ai/api/siliconflow.py,sha256=JbnOSv8LJLtwYSNNB8_SMBMQzOgHDtQYZKA9A2BC4sY,2139
36
- davidkhala/ai/google/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
- davidkhala/ai/google/adk.py,sha256=QwxYoOzT2Hol03V4NM0PF_HAzUGb4fB18VUAYacYbAY,657
38
- davidkhala/ai/google/gemini.py,sha256=Xf4HDOOcK4-jEBERzuLnQNFsU61P2fFx4K0z-ijvNHE,214
39
- davidkhala/ai/huggingface/BAAI.py,sha256=LZ9kp5Gfql4UzuTn4osyekI6VV1H3RIfED2IolXFj5c,341
40
- davidkhala/ai/huggingface/__init__.py,sha256=FJyU8eOfWQWKAvkIa5qwubF9ghsSQ8C0e6p6DKyomgs,521
41
- davidkhala/ai/huggingface/inference.py,sha256=bYN0PtLF2CaIHzdTP4LaTALJhcawvuLnLR7rhMVqwDE,333
42
- davidkhala/ai/openai/__init__.py,sha256=GXzWaw2ER3YFGHG6TPD9SmAHV6Tpsnqxj6tXlaWsrko,1897
43
- davidkhala/ai/openai/azure.py,sha256=WmWSz9pKlUrQLSH25m1jE1l-mNWw9QQARj8uliOv8VU,1138
44
- davidkhala/ai/openai/native.py,sha256=MB0nDnzCOj_M42RMhdK3HTMVnxGnwpLT2GeLwSrepwI,704
45
- davidkhala/ai/openrouter/__init__.py,sha256=P8UvolZihN_CVBQ7BT1Fb6mSMFEQLyLY9G5bBDZhC0o,1037
46
- davidkhala_ai-0.2.0.dist-info/METADATA,sha256=jWA9caQqfyxxVWCj8i9nAw8Ni4Fkf7-kKjjDZvjr0r4,1607
47
- davidkhala_ai-0.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
48
- davidkhala_ai-0.2.0.dist-info/RECORD,,
File without changes
File without changes
File without changes
File without changes