oracle-ads 2.11.19__py3-none-any.whl → 2.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ads/llm/requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- langchain>=0.0.295
2
- pydantic>=1.10.13,<3
1
+ langchain>=0.3
2
+ pydantic>=2,<3
3
3
  typing-extensions>=4.2.0
ads/llm/serialize.py CHANGED
@@ -12,7 +12,6 @@ from typing import Any, Dict, List, Optional
12
12
  import fsspec
13
13
  import yaml
14
14
  from langchain import llms
15
- from langchain.chains import RetrievalQA
16
15
  from langchain.chains.loading import load_chain_from_config
17
16
  from langchain.llms import loading
18
17
  from langchain.load.load import Reviver
@@ -21,7 +20,7 @@ from langchain.schema.runnable import RunnableParallel
21
20
 
22
21
  from ads.common.auth import default_signer
23
22
  from ads.common.object_storage_details import ObjectStorageDetails
24
- from ads.llm import GenerativeAI, ModelDeploymentTGI, ModelDeploymentVLLM
23
+ from ads.llm import OCIModelDeploymentVLLM, OCIModelDeploymentTGI
25
24
  from ads.llm.chain import GuardrailSequence
26
25
  from ads.llm.guardrails.base import CustomGuardrailBase
27
26
  from ads.llm.serializers.runnable_parallel import RunnableParallelSerializer
@@ -29,9 +28,8 @@ from ads.llm.serializers.retrieval_qa import RetrievalQASerializer
29
28
 
30
29
  # This is a temp solution for supporting custom LLM in legacy load_chain
31
30
  __lc_llm_dict = llms.get_type_to_cls_dict()
32
- __lc_llm_dict[GenerativeAI.__name__] = lambda: GenerativeAI
33
- __lc_llm_dict[ModelDeploymentTGI.__name__] = lambda: ModelDeploymentTGI
34
- __lc_llm_dict[ModelDeploymentVLLM.__name__] = lambda: ModelDeploymentVLLM
31
+ __lc_llm_dict[OCIModelDeploymentTGI.__name__] = lambda: OCIModelDeploymentTGI
32
+ __lc_llm_dict[OCIModelDeploymentVLLM.__name__] = lambda: OCIModelDeploymentVLLM
35
33
 
36
34
 
37
35
  def __new_type_to_cls_dict():
@@ -47,7 +45,6 @@ custom_serialization = {
47
45
  GuardrailSequence: GuardrailSequence.save,
48
46
  CustomGuardrailBase: CustomGuardrailBase.save,
49
47
  RunnableParallel: RunnableParallelSerializer.save,
50
- RetrievalQA: RetrievalQASerializer.save,
51
48
  }
52
49
 
53
50
  # Mapping _type to custom deserialization functions
@@ -0,0 +1,130 @@
1
+ {%- macro json_to_python_type(json_spec) %}
2
+ {%- set basic_type_map = {
3
+ "string": "str",
4
+ "number": "float",
5
+ "integer": "int",
6
+ "boolean": "bool"
7
+ } %}
8
+
9
+ {%- if basic_type_map[json_spec.type] is defined %}
10
+ {{- basic_type_map[json_spec.type] }}
11
+ {%- elif json_spec.type == "array" %}
12
+ {{- "list[" + json_to_python_type(json_spec|items) + "]" }}
13
+ {%- elif json_spec.type == "object" %}
14
+ {%- if json_spec.additionalProperties is defined %}
15
+ {{- "dict[str, " + json_to_python_type(json_spec.additionalProperties) + ']' }}
16
+ {%- else %}
17
+ {{- "dict" }}
18
+ {%- endif %}
19
+ {%- elif json_spec.type is iterable %}
20
+ {{- "Union[" }}
21
+ {%- for t in json_spec.type %}
22
+ {{- json_to_python_type({"type": t}) }}
23
+ {%- if not loop.last %}
24
+ {{- "," }}
25
+ {%- endif %}
26
+ {%- endfor %}
27
+ {{- "]" }}
28
+ {%- else %}
29
+ {{- "Any" }}
30
+ {%- endif %}
31
+ {%- endmacro %}
32
+
33
+
34
+ {{- bos_token }}
35
+ {{- "<|im_start|>system\nYou are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> " }}
36
+ {%- if tools is iterable and tools | length > 0 %}
37
+ {%- for tool in tools %}
38
+ {%- if tool.function is defined %}
39
+ {%- set tool = tool.function %}
40
+ {%- endif %}
41
+ {{- '{"type": "function", "function": ' }}
42
+ {{- '{"name": "' + tool.name + '", ' }}
43
+ {{- '"description": "' + tool.name + '(' }}
44
+ {%- for param_name, param_fields in tool.parameters.properties|items %}
45
+ {{- param_name + ": " + json_to_python_type(param_fields) }}
46
+ {%- if not loop.last %}
47
+ {{- ", " }}
48
+ {%- endif %}
49
+ {%- endfor %}
50
+ {{- ")" }}
51
+ {%- if tool.return is defined %}
52
+ {{- " -> " + json_to_python_type(tool.return) }}
53
+ {%- endif %}
54
+ {{- " - " + tool.description + "\n\n" }}
55
+ {%- for param_name, param_fields in tool.parameters.properties|items %}
56
+ {%- if loop.first %}
57
+ {{- " Args:\n" }}
58
+ {%- endif %}
59
+ {{- " " + param_name + "(" + json_to_python_type(param_fields) + "): " + param_fields.description|trim }}
60
+ {%- endfor %}
61
+ {%- if tool.return is defined and tool.return.description is defined %}
62
+ {{- "\n Returns:\n " + tool.return.description }}
63
+ {%- endif %}
64
+ {{- '"' }}
65
+ {{- ', "parameters": ' }}
66
+ {%- if tool.parameters.properties | length == 0 %}
67
+ {{- "{}" }}
68
+ {%- else %}
69
+ {{- tool.parameters|tojson }}
70
+ {%- endif %}
71
+ {{- "}" }}
72
+ {%- if not loop.last %}
73
+ {{- "\n" }}
74
+ {%- endif %}
75
+ {%- endfor %}
76
+ {%- endif %}
77
+ {{- " </tools>" }}
78
+ {{- 'Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}}
79
+ ' }}
80
+ {{- "For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
81
+ " }}
82
+ {{- "<tool_call>
83
+ " }}
84
+ {{- '{"name": <function-name>, "arguments": <args-dict>}
85
+ ' }}
86
+ {{- '</tool_call><|im_end|>' }}
87
+ {%- for message in messages %}
88
+ {%- if message.role == "user" or message.role == "system" or (message.role == "assistant" and message.tool_calls is not defined) %}
89
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
90
+ {%- elif message.role == "assistant" and message.tool_calls is defined %}
91
+ {{- '<|im_start|>' + message.role }}
92
+ {%- for tool_call in message.tool_calls %}
93
+ {{- '\n<tool_call>\n' }}
94
+ {%- if tool_call.function is defined %}
95
+ {%- set tool_call = tool_call.function %}
96
+ {%- endif %}
97
+ {{- '{' }}
98
+ {{- '"name": "' }}
99
+ {{- tool_call.name }}
100
+ {{- '"' }}
101
+ {%- if tool_call.arguments is defined %}
102
+ {{- ', ' }}
103
+ {{- '"arguments": ' }}
104
+ {{- tool_call.arguments|tojson }}
105
+ {%- endif %}
106
+ {{- '}' }}
107
+ {{- '\n</tool_call>' }}
108
+ {%- endfor %}
109
+ {{- '<|im_end|>\n' }}
110
+ {%- elif message.role == "tool" %}
111
+ {%- if loop.previtem and loop.previtem.role != "tool" %}
112
+ {{- '<|im_start|>tool\n' }}
113
+ {%- endif %}
114
+ {{- '<tool_response>\n' }}
115
+ {{- message.content }}
116
+ {%- if not loop.last %}
117
+ {{- '\n</tool_response>\n' }}
118
+ {%- else %}
119
+ {{- '\n</tool_response>' }}
120
+ {%- endif %}
121
+ {%- if not loop.last and loop.nextitem.role != "tool" %}
122
+ {{- '<|im_end|>' }}
123
+ {%- elif loop.last %}
124
+ {{- '<|im_end|>' }}
125
+ {%- endif %}
126
+ {%- endif %}
127
+ {%- endfor %}
128
+ {%- if add_generation_prompt %}
129
+ {{- '<|im_start|>assistant\n' }}
130
+ {%- endif %}
@@ -0,0 +1,94 @@
1
+ {%- if messages[0]["role"] == "system" %}
2
+ {%- set system_message = messages[0]["content"] %}
3
+ {%- set loop_messages = messages[1:] %}
4
+ {%- else %}
5
+ {%- set loop_messages = messages %}
6
+ {%- endif %}
7
+ {%- if not tools is defined %}
8
+ {%- set tools = none %}
9
+ {%- endif %}
10
+ {%- if tools is defined %}
11
+ {%- set parallel_tool_prompt = "You are a helpful assistant that can call tools. If you call one or more tools, format them in a single JSON array or objects, where each object is a tool call, not as separate objects outside of an array or multiple arrays. Use the format [{\"name\": tool call name, \"arguments\": tool call arguments}, additional tool calls] if you call more than one tool. If you call tools, do not attempt to interpret them or otherwise provide a response until you receive a tool call result that you can interpret for the user." %}
12
+ {%- if system_message is defined %}
13
+ {%- set system_message = parallel_tool_prompt + "\n\n" + system_message %}
14
+ {%- else %}
15
+ {%- set system_message = parallel_tool_prompt %}
16
+ {%- endif %}
17
+ {%- endif %}
18
+ {%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %}
19
+
20
+ {%- for message in loop_messages | rejectattr("role", "equalto", "tool") | rejectattr("role", "equalto", "tool_results") | selectattr("tool_calls", "undefined") %}
21
+ {%- if (message["role"] == "user") != (loop.index0 % 2 == 0) %}
22
+ {{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+
26
+ {{- bos_token }}
27
+ {%- for message in loop_messages %}
28
+ {%- if message["role"] == "user" %}
29
+ {%- if tools is not none and (message == user_messages[-1]) %}
30
+ {{- "[AVAILABLE_TOOLS] [" }}
31
+ {%- for tool in tools %}
32
+ {%- set tool = tool.function %}
33
+ {{- '{"type": "function", "function": {' }}
34
+ {%- for key, val in tool.items() if key != "return" %}
35
+ {%- if val is string %}
36
+ {{- '"' + key + '": "' + val + '"' }}
37
+ {%- else %}
38
+ {{- '"' + key + '": ' + val|tojson }}
39
+ {%- endif %}
40
+ {%- if not loop.last %}
41
+ {{- ", " }}
42
+ {%- endif %}
43
+ {%- endfor %}
44
+ {{- "}}" }}
45
+ {%- if not loop.last %}
46
+ {{- ", " }}
47
+ {%- else %}
48
+ {{- "]" }}
49
+ {%- endif %}
50
+ {%- endfor %}
51
+ {{- "[/AVAILABLE_TOOLS]" }}
52
+ {%- endif %}
53
+ {%- if loop.last and system_message is defined %}
54
+ {{- "[INST] " + system_message + "\n\n" + message["content"] + "[/INST]" }}
55
+ {%- else %}
56
+ {{- "[INST] " + message["content"] + "[/INST]" }}
57
+ {%- endif %}
58
+ {%- elif message["role"] == "tool_calls" or message.tool_calls is defined %}
59
+ {%- if message.tool_calls is defined %}
60
+ {%- set tool_calls = message.tool_calls %}
61
+ {%- else %}
62
+ {%- set tool_calls = message.content %}
63
+ {%- endif %}
64
+ {{- "[TOOL_CALLS] [" }}
65
+ {%- for tool_call in tool_calls %}
66
+ {%- set out = tool_call.function|tojson %}
67
+ {{- out[:-1] }}
68
+ {%- if not tool_call.id is defined or tool_call.id|length < 9 %}
69
+ {{- raise_exception("Tool call IDs should be alphanumeric strings with length >= 9! (1)" + tool_call.id) }}
70
+ {%- endif %}
71
+ {{- ', "id": "' + tool_call.id[-9:] + '"}' }}
72
+ {%- if not loop.last %}
73
+ {{- ", " }}
74
+ {%- else %}
75
+ {{- "]" + eos_token }}
76
+ {%- endif %}
77
+ {%- endfor %}
78
+ {%- elif message["role"] == "assistant" %}
79
+ {{- " " + message["content"] + eos_token }}
80
+ {%- elif message["role"] == "tool_results" or message["role"] == "tool" %}
81
+ {%- if message.content is defined and message.content.content is defined %}
82
+ {%- set content = message.content.content %}
83
+ {%- else %}
84
+ {%- set content = message.content %}
85
+ {%- endif %}
86
+ {{- '[TOOL_RESULTS] {"content": ' + content|string + ", " }}
87
+ {%- if not message.tool_call_id is defined or message.tool_call_id|length < 9 %}
88
+ {{- raise_exception("Tool call IDs should be alphanumeric strings with length >= 9! (2)" + message.tool_call_id) }}
89
+ {%- endif %}
90
+ {{- '"call_id": "' + message.tool_call_id[-9:] + '"}[/TOOL_RESULTS]' }}
91
+ {%- else %}
92
+ {{- raise_exception("Only user and assistant roles are supported, with the exception of an initial optional system message!") }}
93
+ {%- endif %}
94
+ {%- endfor %}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: oracle_ads
3
- Version: 2.11.19
3
+ Version: 2.12.0
4
4
  Summary: Oracle Accelerated Data Science SDK
5
5
  Keywords: Oracle Cloud Infrastructure,OCI,Machine Learning,ML,Artificial Intelligence,AI,Data Science,Cloud,Oracle
6
6
  Author: Oracle Data Science
@@ -82,8 +82,10 @@ Requires-Dist: fiona<=1.9.6 ; extra == "geo"
82
82
  Requires-Dist: oracle_ads[viz] ; extra == "geo"
83
83
  Requires-Dist: transformers ; extra == "huggingface"
84
84
  Requires-Dist: tf-keras ; extra == "huggingface"
85
- Requires-Dist: langchain-community<0.0.32 ; extra == "llm"
86
- Requires-Dist: langchain>=0.1.10,<0.1.14 ; extra == "llm"
85
+ Requires-Dist: langchain>=0.2 ; extra == "llm"
86
+ Requires-Dist: langchain-community ; extra == "llm"
87
+ Requires-Dist: langchain_openai ; extra == "llm"
88
+ Requires-Dist: pydantic>=2,<3 ; extra == "llm"
87
89
  Requires-Dist: evaluate>=0.4.0 ; extra == "llm"
88
90
  Requires-Dist: ipython>=7.23.1, <8.0 ; extra == "notebook"
89
91
  Requires-Dist: ipywidgets~=7.6.3 ; extra == "notebook"
@@ -145,7 +147,7 @@ Requires-Dist: statsmodels>=0.14.1 ; extra == "testsuite" and ( python_version>=
145
147
  Requires-Dist: tables ; extra == "testsuite"
146
148
  Requires-Dist: tables>3.9.0 ; extra == "testsuite" and ( python_version>='3.9')
147
149
  Requires-Dist: xlrd>=1.2.0 ; extra == "testsuite"
148
- Requires-Dist: spacy>=3.4.2 ; extra == "text"
150
+ Requires-Dist: spacy>=3.4.2,<3.8 ; extra == "text"
149
151
  Requires-Dist: wordcloud>=1.8.1 ; extra == "text"
150
152
  Requires-Dist: oracle_ads[viz] ; extra == "torch"
151
153
  Requires-Dist: torch ; extra == "torch"
@@ -446,25 +446,27 @@ ads/jobs/templates/driver_pytorch.py,sha256=lZMokjQqcYVqTfXzhQYjc1Z-kXhmYgxZe3pg
446
446
  ads/jobs/templates/driver_utils.py,sha256=tFQOwdspi6eXNftFt1ZLtFChc3HQ37rdf3cQ4kf3ayg,22097
447
447
  ads/jobs/templates/hostname_from_env.c,sha256=SeEoIQnuCiP9fiYWJ5MokWBUi1u0mcSo-nTXbXyZp7w,2309
448
448
  ads/jobs/templates/oci_metrics.py,sha256=3l4h17W_dheSK34thp95pMvG0iqBufoXck3I8_4zX6I,5859
449
- ads/llm/__init__.py,sha256=ku_cx5jqTcYQzEWU6lMisuK1PFJU9Jva6Ncq8VpnrBI,714
449
+ ads/llm/__init__.py,sha256=t5yoDsD5huaEp38qdI5iTtkfXmJQ-5XDSrvcHM_XnHU,830
450
450
  ads/llm/chain.py,sha256=KuQcZGQsrlcl3CjtLk8KOHtSu0XJvFRL_Wv0Gz2RdF4,9526
451
+ ads/llm/chat_template.py,sha256=t2QRfLLR_c_cq3JqABghWqiCSWjjuVc_mfEN-yVYG10,934
451
452
  ads/llm/deploy.py,sha256=VYm_8ML8iXL_y-G8LqSm1VsQQTSFq7rA95VCdhLz1A0,1985
452
- ads/llm/requirements.txt,sha256=ZOtrHBFR6A5LCly-nqbwGD7iWTBVHIWRPrAejN3oQl0,65
453
- ads/llm/serialize.py,sha256=C5Zo8NjO1iAJ4De9tuOB8bQO-5Bcp6VszSULA46LOOM,7423
453
+ ads/llm/requirements.txt,sha256=vaVwhWCteqmo0fRsEk6M8S1LQMjULU_Bt_syBAa2G-s,55
454
+ ads/llm/serialize.py,sha256=WjQNMPACyR8nIh1dB7BLFUmqUrumld6vt91lg1DWzWI,7281
454
455
  ads/llm/guardrails/__init__.py,sha256=sAqmLhogrLXb3xI7dPOj9HmSkpTnLh9wkzysuGd8AXk,204
455
- ads/llm/guardrails/base.py,sha256=3z_gSik-d859fvDGe4UGjSon_aeqE0PqZwHOCS_89J0,16491
456
- ads/llm/guardrails/huggingface.py,sha256=ts54JD6AUgXu7HoqYYU6NCQhwdhlWl0DSxuQ9ZRCQ3k,1314
456
+ ads/llm/guardrails/base.py,sha256=UESjl8VgQGnDwpf8dy0PWpOJxpZKKnGHN6s46qnQUNw,16512
457
+ ads/llm/guardrails/huggingface.py,sha256=4DFanCYb3R1SKYSFdcEyGH2ywQgf2yFDDZGJtOcoph0,1304
457
458
  ads/llm/langchain/__init__.py,sha256=sAqmLhogrLXb3xI7dPOj9HmSkpTnLh9wkzysuGd8AXk,204
458
459
  ads/llm/langchain/plugins/__init__.py,sha256=sAqmLhogrLXb3xI7dPOj9HmSkpTnLh9wkzysuGd8AXk,204
459
- ads/llm/langchain/plugins/base.py,sha256=fmcI7KRCzh1Y-k7FbTzS-3WyAGomanFK0ZPZcqPIkFA,4704
460
- ads/llm/langchain/plugins/contant.py,sha256=p1k9p7LwITygnZttNRauato1PhybUPESnIcZv3kkqLY,1008
461
- ads/llm/langchain/plugins/embeddings.py,sha256=nOm902vY1P1oV6v1M7o9Fh0lSc7Gg0Nj5-puOByO6xI,2046
462
- ads/llm/langchain/plugins/llm_gen_ai.py,sha256=HeFHUV1aqdw661VoyYdjG973m-LntU0YWozOZ9YEKi0,9975
463
- ads/llm/langchain/plugins/llm_md.py,sha256=YhlT1gu4BZShcdQ4sy6S9N4h9BC-FqSbvYTAVoHkgpw,9899
460
+ ads/llm/langchain/plugins/chat_models/__init__.py,sha256=sAqmLhogrLXb3xI7dPOj9HmSkpTnLh9wkzysuGd8AXk,204
461
+ ads/llm/langchain/plugins/chat_models/oci_data_science.py,sha256=q398lxXycKlAtPmkVqeEE-Uaqymj13HZIRaCm2B_xDU,33667
462
+ ads/llm/langchain/plugins/llms/__init__.py,sha256=sAqmLhogrLXb3xI7dPOj9HmSkpTnLh9wkzysuGd8AXk,204
463
+ ads/llm/langchain/plugins/llms/oci_data_science_model_deployment_endpoint.py,sha256=ng3pEoXXEaCc_qSHkXwyJmZC9dGPO-imQT4JN6jAJnc,32353
464
464
  ads/llm/serializers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
465
465
  ads/llm/serializers/retrieval_qa.py,sha256=VQ4rFRrDHOpAcMYNvRbT19LcDGwRrE1lczerLQYKxwU,5133
466
466
  ads/llm/serializers/runnable_parallel.py,sha256=USCVhMNi67AiCmu-s_mmOvc0sK7v4yKVwBTJm60x7wE,835
467
467
  ads/llm/templates/score_chain.jinja2,sha256=H6_riqJlFtnQk7Za4rb7wGffWVLEUxxqgXlsbMPdInY,4869
468
+ ads/llm/templates/tool_chat_template_hermes.jinja,sha256=nQgWGwZludNFmUO7V8emgPQud828l9T4e5QmsDyLq4k,5226
469
+ ads/llm/templates/tool_chat_template_mistral_parallel.jinja,sha256=xkZLgw50a3wPiw9I5HmDlZiEAXPg9wtwnrkhaAiI_1o,4773
468
470
  ads/model/__init__.py,sha256=r4U2NvroKMUa-tqNnXBtND9cA6b1Yefmdj6lgdoKlDk,1900
469
471
  ads/model/artifact.py,sha256=ONKyjZKO5wmAYI-GT63z8yLm_QsmIGXcob9KrnwtF5k,20503
470
472
  ads/model/artifact_downloader.py,sha256=-9IYkjZ0LaMWf5foz5HUGTZCEm67f-3LbDsigNlzEPg,9751
@@ -811,8 +813,8 @@ ads/type_discovery/unknown_detector.py,sha256=yZuYQReO7PUyoWZE7onhhtYaOg6088wf1y
811
813
  ads/type_discovery/zipcode_detector.py,sha256=3AlETg_ZF4FT0u914WXvTT3F3Z6Vf51WiIt34yQMRbw,1421
812
814
  ads/vault/__init__.py,sha256=x9tMdDAOdF5iDHk9u2di_K-ze5Nq068x25EWOBoWwqY,245
813
815
  ads/vault/vault.py,sha256=hFBkpYE-Hfmzu1L0sQwUfYcGxpWmgG18JPndRl0NOXI,8624
814
- oracle_ads-2.11.19.dist-info/entry_points.txt,sha256=9VFnjpQCsMORA4rVkvN8eH6D3uHjtegb9T911t8cqV0,35
815
- oracle_ads-2.11.19.dist-info/LICENSE.txt,sha256=zoGmbfD1IdRKx834U0IzfFFFo5KoFK71TND3K9xqYqo,1845
816
- oracle_ads-2.11.19.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
817
- oracle_ads-2.11.19.dist-info/METADATA,sha256=R8jk8MKEE5pas8WxbiX_VJ02VbhhojP_R83HgtIKUmE,16068
818
- oracle_ads-2.11.19.dist-info/RECORD,,
816
+ oracle_ads-2.12.0.dist-info/entry_points.txt,sha256=9VFnjpQCsMORA4rVkvN8eH6D3uHjtegb9T911t8cqV0,35
817
+ oracle_ads-2.12.0.dist-info/LICENSE.txt,sha256=zoGmbfD1IdRKx834U0IzfFFFo5KoFK71TND3K9xqYqo,1845
818
+ oracle_ads-2.12.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
819
+ oracle_ads-2.12.0.dist-info/METADATA,sha256=-Dmo877fdCzqL0LMAxE0UASxtvPRZ6S_vzvDa--vIsY,16150
820
+ oracle_ads-2.12.0.dist-info/RECORD,,
@@ -1,118 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
-
4
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
- # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
- from typing import Any, Dict, List, Optional
7
-
8
- from langchain.llms.base import LLM
9
- from langchain.pydantic_v1 import BaseModel, Field, root_validator
10
-
11
- from ads import logger
12
- from ads.common.auth import default_signer
13
- from ads.config import COMPARTMENT_OCID
14
-
15
-
16
- class BaseLLM(LLM):
17
- """Base OCI LLM class. Contains common attributes."""
18
-
19
- auth: dict = Field(default_factory=default_signer, exclude=True)
20
- """ADS auth dictionary for OCI authentication.
21
- This can be generated by calling `ads.common.auth.api_keys()` or `ads.common.auth.resource_principal()`.
22
- If this is not provided then the `ads.common.default_signer()` will be used."""
23
-
24
- max_tokens: int = 256
25
- """Denotes the number of tokens to predict per generation."""
26
-
27
- temperature: float = 0.2
28
- """A non-negative float that tunes the degree of randomness in generation."""
29
-
30
- k: int = 0
31
- """Number of most likely tokens to consider at each step."""
32
-
33
- p: int = 0.75
34
- """Total probability mass of tokens to consider at each step."""
35
-
36
- stop: Optional[List[str]] = None
37
- """Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings."""
38
-
39
- def _print_request(self, prompt, params):
40
- if self.verbose:
41
- print(f"LLM API Request:\n{prompt}")
42
-
43
- def _print_response(self, completion, response):
44
- if self.verbose:
45
- print(f"LLM API Completion:\n{completion}")
46
-
47
- @classmethod
48
- def get_lc_namespace(cls) -> List[str]:
49
- """Get the namespace of the LangChain object."""
50
- return ["ads", "llm"]
51
-
52
- @classmethod
53
- def is_lc_serializable(cls) -> bool:
54
- """This class can be serialized with default LangChain serialization."""
55
- return True
56
-
57
-
58
- class GenerativeAiClientModel(BaseModel):
59
- """Base model for generative AI embedding model and LLM."""
60
-
61
- # This auth is the same as the auth in BaseLLM class.
62
- # However, this is needed for the Gen AI embedding model.
63
- # Do not remove this attribute
64
- auth: dict = Field(default_factory=default_signer, exclude=True)
65
- """ADS auth dictionary for OCI authentication.
66
- This can be generated by calling `ads.common.auth.api_keys()` or `ads.common.auth.resource_principal()`.
67
- If this is not provided then the `ads.common.default_signer()` will be used."""
68
-
69
- client: Any #: :meta private:
70
- """OCI GenerativeAiClient."""
71
-
72
- compartment_id: str = None
73
- """Compartment ID of the caller."""
74
-
75
- endpoint_kwargs: Dict[str, Any] = {}
76
- """Optional attributes passed to the OCI API call."""
77
-
78
- client_kwargs: Dict[str, Any] = {}
79
- """Holds any client parameters for creating GenerativeAiClient"""
80
-
81
- @staticmethod
82
- def _import_client():
83
- try:
84
- from oci.generative_ai_inference import GenerativeAiInferenceClient
85
- except ImportError as ex:
86
- raise ImportError(
87
- "Could not import GenerativeAiInferenceClient from oci. "
88
- "The OCI SDK installed does not support generative AI service."
89
- ) from ex
90
- return GenerativeAiInferenceClient
91
-
92
- @root_validator()
93
- def validate_environment( # pylint: disable=no-self-argument
94
- cls, values: Dict
95
- ) -> Dict:
96
- """Validate that python package exists in environment."""
97
- # Initialize client only if user does not pass in client.
98
- # Users may choose to initialize the OCI client by themselves and pass it into this model.
99
- logger.warning(
100
- f"The ads langchain plugin {cls.__name__} will be deprecated soon. "
101
- "Please refer to https://python.langchain.com/v0.2/docs/integrations/providers/oci/ "
102
- "for the latest support."
103
- )
104
- if not values.get("client"):
105
- auth = values.get("auth", {})
106
- client_kwargs = auth.get("client_kwargs") or {}
107
- client_kwargs.update(values["client_kwargs"])
108
- # Import the GenerativeAIClient here so that there will be no error when user import ads.llm
109
- # and the install OCI SDK does not support generative AI service yet.
110
- client_class = cls._import_client()
111
- values["client"] = client_class(**auth, **client_kwargs)
112
- # Set default compartment ID
113
- if not values.get("compartment_id"):
114
- if COMPARTMENT_OCID:
115
- values["compartment_id"] = COMPARTMENT_OCID
116
- else:
117
- raise ValueError("Please specify compartment_id.")
118
- return values
@@ -1,44 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
-
4
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
- # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
- from enum import Enum
7
-
8
-
9
- class StrEnum(str, Enum):
10
- """Enum with string members
11
- https://docs.python.org/3.11/library/enum.html#enum.StrEnum
12
- """
13
-
14
- # Pydantic uses Python's standard enum classes to define choices.
15
- # https://docs.pydantic.dev/latest/api/standard_library_types/#enum
16
-
17
-
18
- DEFAULT_TIME_OUT = 300
19
- DEFAULT_CONTENT_TYPE_JSON = "application/json"
20
-
21
-
22
- class Task(StrEnum):
23
- TEXT_GENERATION = "text_generation"
24
- TEXT_SUMMARIZATION = "text_summarization"
25
-
26
-
27
- class LengthParam(StrEnum):
28
- SHORT = "SHORT"
29
- MEDIUM = "MEDIUM"
30
- LONG = "LONG"
31
- AUTO = "AUTO"
32
-
33
-
34
- class FormatParam(StrEnum):
35
- PARAGRAPH = "PARAGRAPH"
36
- BULLETS = "BULLETS"
37
- AUTO = "AUTO"
38
-
39
-
40
- class ExtractivenessParam(StrEnum):
41
- LOW = "LOW"
42
- MEDIUM = "MEDIUM"
43
- HIGH = "HIGH"
44
- AUTO = "AUTO"
@@ -1,64 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
-
4
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
5
- # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
-
7
- from typing import List, Optional
8
- from langchain.load.serializable import Serializable
9
- from langchain.schema.embeddings import Embeddings
10
- from ads.llm.langchain.plugins.base import GenerativeAiClientModel
11
-
12
-
13
- class GenerativeAIEmbeddings(GenerativeAiClientModel, Embeddings, Serializable):
14
- """OCI Generative AI embedding models."""
15
-
16
- model: str = "cohere.embed-english-light-v2.0"
17
- """Model name to use."""
18
-
19
- truncate: Optional[str] = None
20
- """Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
21
-
22
- @classmethod
23
- def get_lc_namespace(cls) -> List[str]:
24
- """Get the namespace of the LangChain object."""
25
- return ["ads", "llm"]
26
-
27
- @classmethod
28
- def is_lc_serializable(cls) -> bool:
29
- """This class can be serialized with default LangChain serialization."""
30
- return True
31
-
32
- def embed_documents(self, texts: List[str]) -> List[List[float]]:
33
- """Embeds a list of strings.
34
-
35
- Args:
36
- texts: The list of texts to embed.
37
-
38
- Returns:
39
- List of embeddings, one for each text.
40
- """
41
- from oci.generative_ai_inference.models import (
42
- EmbedTextDetails,
43
- OnDemandServingMode,
44
- )
45
-
46
- details = EmbedTextDetails(
47
- compartment_id=self.compartment_id,
48
- inputs=texts,
49
- serving_mode=OnDemandServingMode(model_id=self.model),
50
- truncate=self.truncate,
51
- )
52
- embeddings = self.client.embed_text(details).data.embeddings
53
- return [list(map(float, e)) for e in embeddings]
54
-
55
- def embed_query(self, text: str) -> List[float]:
56
- """Embeds a single string.
57
-
58
- Args:
59
- text: The text to embed.
60
-
61
- Returns:
62
- Embeddings for the text.
63
- """
64
- return self.embed_documents([text])[0]