clarifai 10.5.3__py3-none-any.whl → 10.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. clarifai/client/input.py +116 -45
  2. clarifai/constants/dataset.py +2 -0
  3. clarifai/datasets/export/inputs_annotations.py +6 -2
  4. clarifai/rag/rag.py +8 -0
  5. clarifai/utils/constants.py +1 -0
  6. clarifai/utils/logging.py +3 -1
  7. clarifai/versions.py +1 -1
  8. {clarifai-10.5.3.dist-info → clarifai-10.5.4.dist-info}/METADATA +1 -1
  9. {clarifai-10.5.3.dist-info → clarifai-10.5.4.dist-info}/RECORD +13 -86
  10. {clarifai-10.5.3.dist-info → clarifai-10.5.4.dist-info}/WHEEL +1 -1
  11. clarifai/__pycache__/__init__.cpython-310.pyc +0 -0
  12. clarifai/__pycache__/errors.cpython-310.pyc +0 -0
  13. clarifai/__pycache__/versions.cpython-310.pyc +0 -0
  14. clarifai/client/__pycache__/__init__.cpython-310.pyc +0 -0
  15. clarifai/client/__pycache__/app.cpython-310.pyc +0 -0
  16. clarifai/client/__pycache__/base.cpython-310.pyc +0 -0
  17. clarifai/client/__pycache__/dataset.cpython-310.pyc +0 -0
  18. clarifai/client/__pycache__/input.cpython-310.pyc +0 -0
  19. clarifai/client/__pycache__/lister.cpython-310.pyc +0 -0
  20. clarifai/client/__pycache__/model.cpython-310.pyc +0 -0
  21. clarifai/client/__pycache__/module.cpython-310.pyc +0 -0
  22. clarifai/client/__pycache__/runner.cpython-310.pyc +0 -0
  23. clarifai/client/__pycache__/search.cpython-310.pyc +0 -0
  24. clarifai/client/__pycache__/user.cpython-310.pyc +0 -0
  25. clarifai/client/__pycache__/workflow.cpython-310.pyc +0 -0
  26. clarifai/client/auth/__pycache__/__init__.cpython-310.pyc +0 -0
  27. clarifai/client/auth/__pycache__/helper.cpython-310.pyc +0 -0
  28. clarifai/client/auth/__pycache__/register.cpython-310.pyc +0 -0
  29. clarifai/client/auth/__pycache__/stub.cpython-310.pyc +0 -0
  30. clarifai/constants/__pycache__/dataset.cpython-310.pyc +0 -0
  31. clarifai/constants/__pycache__/model.cpython-310.pyc +0 -0
  32. clarifai/constants/__pycache__/search.cpython-310.pyc +0 -0
  33. clarifai/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  34. clarifai/datasets/export/__pycache__/__init__.cpython-310.pyc +0 -0
  35. clarifai/datasets/export/__pycache__/inputs_annotations.cpython-310.pyc +0 -0
  36. clarifai/datasets/upload/__pycache__/__init__.cpython-310.pyc +0 -0
  37. clarifai/datasets/upload/__pycache__/base.cpython-310.pyc +0 -0
  38. clarifai/datasets/upload/__pycache__/features.cpython-310.pyc +0 -0
  39. clarifai/datasets/upload/__pycache__/image.cpython-310.pyc +0 -0
  40. clarifai/datasets/upload/__pycache__/text.cpython-310.pyc +0 -0
  41. clarifai/datasets/upload/__pycache__/utils.cpython-310.pyc +0 -0
  42. clarifai/models/__pycache__/__init__.cpython-310.pyc +0 -0
  43. clarifai/models/model_serving/__pycache__/__init__.cpython-310.pyc +0 -0
  44. clarifai/models/model_serving/__pycache__/constants.cpython-310.pyc +0 -0
  45. clarifai/models/model_serving/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  46. clarifai/models/model_serving/cli/__pycache__/_utils.cpython-310.pyc +0 -0
  47. clarifai/models/model_serving/cli/__pycache__/base.cpython-310.pyc +0 -0
  48. clarifai/models/model_serving/cli/__pycache__/build.cpython-310.pyc +0 -0
  49. clarifai/models/model_serving/cli/__pycache__/create.cpython-310.pyc +0 -0
  50. clarifai/models/model_serving/model_config/__pycache__/__init__.cpython-310.pyc +0 -0
  51. clarifai/models/model_serving/model_config/__pycache__/base.cpython-310.pyc +0 -0
  52. clarifai/models/model_serving/model_config/__pycache__/config.cpython-310.pyc +0 -0
  53. clarifai/models/model_serving/model_config/__pycache__/inference_parameter.cpython-310.pyc +0 -0
  54. clarifai/models/model_serving/model_config/__pycache__/output.cpython-310.pyc +0 -0
  55. clarifai/models/model_serving/model_config/triton/__pycache__/__init__.cpython-310.pyc +0 -0
  56. clarifai/models/model_serving/model_config/triton/__pycache__/serializer.cpython-310.pyc +0 -0
  57. clarifai/models/model_serving/model_config/triton/__pycache__/triton_config.cpython-310.pyc +0 -0
  58. clarifai/models/model_serving/model_config/triton/__pycache__/wrappers.cpython-310.pyc +0 -0
  59. clarifai/models/model_serving/repo_build/__pycache__/__init__.cpython-310.pyc +0 -0
  60. clarifai/models/model_serving/repo_build/__pycache__/build.cpython-310.pyc +0 -0
  61. clarifai/models/model_serving/repo_build/static_files/__pycache__/base_test.cpython-310-pytest-7.2.0.pyc +0 -0
  62. clarifai/rag/__pycache__/__init__.cpython-310.pyc +0 -0
  63. clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
  64. clarifai/rag/__pycache__/utils.cpython-310.pyc +0 -0
  65. clarifai/runners/deepgram_live_transcribe.py +0 -98
  66. clarifai/runners/deepgram_live_transcribe.py~ +0 -98
  67. clarifai/runners/deepgram_runner.py +0 -131
  68. clarifai/runners/deepgram_runner.py~ +0 -130
  69. clarifai/runners/example_llama2.py~ +0 -72
  70. clarifai/runners/matt_example.py +0 -89
  71. clarifai/runners/matt_example.py~ +0 -87
  72. clarifai/runners/matt_llm_example.py +0 -129
  73. clarifai/runners/matt_llm_example.py~ +0 -128
  74. clarifai/schema/__pycache__/search.cpython-310.pyc +0 -0
  75. clarifai/urls/__pycache__/helper.cpython-310.pyc +0 -0
  76. clarifai/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  77. clarifai/utils/__pycache__/logging.cpython-310.pyc +0 -0
  78. clarifai/utils/__pycache__/misc.cpython-310.pyc +0 -0
  79. clarifai/utils/__pycache__/model_train.cpython-310.pyc +0 -0
  80. clarifai/workflows/__pycache__/__init__.cpython-310.pyc +0 -0
  81. clarifai/workflows/__pycache__/export.cpython-310.pyc +0 -0
  82. clarifai/workflows/__pycache__/utils.cpython-310.pyc +0 -0
  83. clarifai/workflows/__pycache__/validate.cpython-310.pyc +0 -0
  84. {clarifai-10.5.3.dist-info → clarifai-10.5.4.dist-info}/LICENSE +0 -0
  85. {clarifai-10.5.3.dist-info → clarifai-10.5.4.dist-info}/entry_points.txt +0 -0
  86. {clarifai-10.5.3.dist-info → clarifai-10.5.4.dist-info}/top_level.txt +0 -0
@@ -1,98 +0,0 @@
1
- import os
2
- import httpx
3
- from dotenv import load_dotenv
4
- import threading
5
-
6
- from deepgram import (
7
- DeepgramClient,
8
- LiveTranscriptionEvents,
9
- LiveOptions,
10
- )
11
-
12
- load_dotenv()
13
-
14
- # URL for the realtime streaming audio you would like to transcribe
15
- URL = "http://stream.live.vc.bbcmedia.co.uk/bbc_world_service"
16
-
17
- os.environ["DG_API_KEY"] = "1d5666523f0f2fbaf3e2db6ae7717f6f87280b5e"
18
- API_KEY = os.getenv("DG_API_KEY")
19
-
20
-
21
- def main():
22
- try:
23
- # STEP 1: Create a Deepgram client using the API key
24
- deepgram = DeepgramClient(API_KEY)
25
-
26
- # STEP 2: Create a websocket connection to Deepgram
27
- dg_connection = deepgram.listen.live.v("1")
28
-
29
- # STEP 3: Define the event handlers for the connection
30
- def on_message(self, result, **kwargs):
31
- sentence = result.channel.alternatives[0].transcript
32
- if len(sentence) == 0:
33
- return
34
- print(f"speaker: {sentence}")
35
-
36
- def on_metadata(self, metadata, **kwargs):
37
- print(f"\n\n{metadata}\n\n")
38
-
39
- def on_error(self, error, **kwargs):
40
- print(f"\n\n{error}\n\n")
41
-
42
- # STEP 4: Register the event handlers
43
- dg_connection.on(LiveTranscriptionEvents.Transcript, on_message)
44
- dg_connection.on(LiveTranscriptionEvents.Metadata, on_metadata)
45
- dg_connection.on(LiveTranscriptionEvents.Error, on_error)
46
-
47
- # STEP 5: Configure Deepgram options for live transcription
48
- options = LiveOptions(
49
- model="nova-2",
50
- language="en-US",
51
- smart_format=True,
52
- )
53
-
54
- # STEP 6: Start the connection
55
- dg_connection.start(options)
56
-
57
- # STEP 7: Create a lock and a flag for thread synchronization
58
- lock_exit = threading.Lock()
59
- exit = False
60
-
61
- # STEP 8: Define a thread that streams the audio and sends it to Deepgram
62
- def myThread():
63
- with httpx.stream("GET", URL) as r:
64
- for data in r.iter_bytes():
65
- lock_exit.acquire()
66
- if exit:
67
- break
68
- lock_exit.release()
69
-
70
- dg_connection.send(data)
71
-
72
- # STEP 9: Start the thread
73
- myHttp = threading.Thread(target=myThread)
74
- myHttp.start()
75
-
76
- # STEP 10: Wait for user input to stop recording
77
- input("Press Enter to stop recording...\n\n")
78
-
79
- # STEP 11: Set the exit flag to True to stop the thread
80
- lock_exit.acquire()
81
- exit = True
82
- lock_exit.release()
83
-
84
- # STEP 12: Wait for the thread to finish
85
- myHttp.join()
86
-
87
- # STEP 13: Close the connection to Deepgram
88
- dg_connection.finish()
89
-
90
- print("Finished")
91
-
92
- except Exception as e:
93
- print(f"Could not open socket: {e}")
94
- return
95
-
96
-
97
- if __name__ == "__main__":
98
- main()
@@ -1,98 +0,0 @@
1
- import os
2
- import httpx
3
- from dotenv import load_dotenv
4
- import threading
5
-
6
- from deepgram import (
7
- DeepgramClient,
8
- LiveTranscriptionEvents,
9
- LiveOptions,
10
- )
11
-
12
- load_dotenv()
13
-
14
- # URL for the realtime streaming audio you would like to transcribe
15
- URL = "http://stream.live.vc.bbcmedia.co.uk/bbc_world_service"
16
-
17
- os.environ.set("DG_API_KEY", "1d5666523f0f2fbaf3e2db6ae7717f6f87280b5e")
18
- API_KEY = os.getenv("DG_API_KEY")
19
-
20
-
21
- def main():
22
- try:
23
- # STEP 1: Create a Deepgram client using the API key
24
- deepgram = DeepgramClient(API_KEY)
25
-
26
- # STEP 2: Create a websocket connection to Deepgram
27
- dg_connection = deepgram.listen.live.v("1")
28
-
29
- # STEP 3: Define the event handlers for the connection
30
- def on_message(self, result, **kwargs):
31
- sentence = result.channel.alternatives[0].transcript
32
- if len(sentence) == 0:
33
- return
34
- print(f"speaker: {sentence}")
35
-
36
- def on_metadata(self, metadata, **kwargs):
37
- print(f"\n\n{metadata}\n\n")
38
-
39
- def on_error(self, error, **kwargs):
40
- print(f"\n\n{error}\n\n")
41
-
42
- # STEP 4: Register the event handlers
43
- dg_connection.on(LiveTranscriptionEvents.Transcript, on_message)
44
- dg_connection.on(LiveTranscriptionEvents.Metadata, on_metadata)
45
- dg_connection.on(LiveTranscriptionEvents.Error, on_error)
46
-
47
- # STEP 5: Configure Deepgram options for live transcription
48
- options = LiveOptions(
49
- model="nova-2",
50
- language="en-US",
51
- smart_format=True,
52
- )
53
-
54
- # STEP 6: Start the connection
55
- dg_connection.start(options)
56
-
57
- # STEP 7: Create a lock and a flag for thread synchronization
58
- lock_exit = threading.Lock()
59
- exit = False
60
-
61
- # STEP 8: Define a thread that streams the audio and sends it to Deepgram
62
- def myThread():
63
- with httpx.stream("GET", URL) as r:
64
- for data in r.iter_bytes():
65
- lock_exit.acquire()
66
- if exit:
67
- break
68
- lock_exit.release()
69
-
70
- dg_connection.send(data)
71
-
72
- # STEP 9: Start the thread
73
- myHttp = threading.Thread(target=myThread)
74
- myHttp.start()
75
-
76
- # STEP 10: Wait for user input to stop recording
77
- input("Press Enter to stop recording...\n\n")
78
-
79
- # STEP 11: Set the exit flag to True to stop the thread
80
- lock_exit.acquire()
81
- exit = True
82
- lock_exit.release()
83
-
84
- # STEP 12: Wait for the thread to finish
85
- myHttp.join()
86
-
87
- # STEP 13: Close the connection to Deepgram
88
- dg_connection.finish()
89
-
90
- print("Finished")
91
-
92
- except Exception as e:
93
- print(f"Could not open socket: {e}")
94
- return
95
-
96
-
97
- if __name__ == "__main__":
98
- main()
@@ -1,131 +0,0 @@
1
- import os
2
- import time
3
- import httpx
4
- from dotenv import load_dotenv
5
- import threading
6
- import queue
7
- import hashlib
8
-
9
- from deepgram import (
10
- DeepgramClient,
11
- LiveTranscriptionEvents,
12
- LiveOptions,
13
- )
14
-
15
- load_dotenv()
16
-
17
- os.environ["DG_API_KEY"] = "1d5666523f0f2fbaf3e2db6ae7717f6f87280b5e"
18
- API_KEY = os.getenv("DG_API_KEY")
19
-
20
- from clarifai_grpc.grpc.api import resources_pb2, service_pb2
21
- from collections.abc import Iterator
22
- from google.protobuf import json_format
23
-
24
- from clarifai.client.runner import Runner
25
-
26
-
27
- class MyRunner(Runner):
28
- """A custom runner that adds "Hello World" to the end of the text and replaces the domain of the
29
- image URL as an example.
30
- """
31
-
32
- def setup_connection(self):
33
- print("ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZz")
34
- # STEP 2: Create a websocket connection to Deepgram
35
- self.dg_connection = self.deepgram.listen.live.v("1")
36
-
37
- output_q = self.output_q
38
-
39
- # STEP 3: Define the event handlers for the connection
40
- def on_message(self, result, **kwargs):
41
- if result.is_final:
42
- sentence = result.channel.alternatives[0].transcript
43
- if len(sentence) == 0:
44
- return
45
- print("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS")
46
- print(f"speaker: {sentence}")
47
- # put it on a queue as we get responses from deepgram.
48
- output_q.put(sentence)
49
-
50
- def on_metadata(self, metadata, **kwargs):
51
- print(f"\n\n{metadata}\n\n")
52
-
53
- def on_error(self, error, **kwargs):
54
- print(f"\n\n{error}\n\n")
55
-
56
- # STEP 4: Register the event handlers
57
- self.dg_connection.on(LiveTranscriptionEvents.Transcript, on_message)
58
- self.dg_connection.on(LiveTranscriptionEvents.Metadata, on_metadata)
59
- self.dg_connection.on(LiveTranscriptionEvents.Error, on_error)
60
-
61
- # STEP 5: Configure Deepgram options for live transcription
62
- self.options = LiveOptions(
63
- model="nova-2",
64
- language="en-US",
65
- smart_format=True,
66
- )
67
-
68
- # STEP 6: Start the connection
69
- self.dg_connection.start(self.options)
70
-
71
- print("DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD")
72
- print(self.dg_connection._socket)
73
-
74
- def __init__(self, *args, **kwargs):
75
- print("MyRunner init")
76
- # STEP 1: Create a Deepgram client using the API key
77
- self.deepgram = DeepgramClient(API_KEY)
78
- self.output_q = queue.Queue()
79
-
80
- self.setup_connection()
81
-
82
- super().__init__(*args, **kwargs)
83
-
84
- def stream(self, request: service_pb2.PostModelOutputsRequest
85
- ) -> Iterator[service_pb2.MultiOutputResponse]:
86
- """Example yielding a whole batch of streamed stuff back.
87
- """
88
-
89
- assert len(request.inputs) == 1, "This runner only supports one input at a time."
90
-
91
- # Get the next chunk of data from the incoming stream.
92
-
93
- print("Got some audio data")
94
-
95
- data = request.inputs[0].data.audio.base64
96
- print(hashlib.md5(data).hexdigest())
97
-
98
- # FIXME(zeiler): this doesnt' work but isn't iportant to our system.
99
- if not self.dg_connection._socket:
100
- #self.dg_connection.finish()
101
- #self.dg_connection.start(self.options)
102
- self.setup_connection()
103
- self.dg_connection.send(data)
104
- print("Sent it to deepgram")
105
-
106
- while True:
107
- try:
108
- item = self.output_q.get(timeout=0.1)
109
- output = resources_pb2.Output()
110
- output.data.text.raw = item
111
- output.status.code = 10000
112
- print("YYYYYYYYYYYYYYYYYYYYYYYYYYYYYY")
113
- print("Yielding: ", output.data.text.raw)
114
- yield service_pb2.MultiOutputResponse(outputs=[
115
- output,
116
- ])
117
- except queue.Empty:
118
- print("Queue is empty, sleeping then breaking")
119
- break
120
-
121
- # # STEP 13: Close the connection to Deepgram
122
- # dg_connection.finish()
123
-
124
-
125
- if __name__ == '__main__':
126
- # Make sure you set these env vars before running the example.
127
- # CLARIFAI_PAT
128
- # CLARIFAI_USER_ID
129
-
130
- # You need to first create a runner in the Clarifai API and then use the ID here.
131
- MyRunner(runner_id="matt-test-runner", base_url="http://q6:32013", num_parallel_polls=1).start()
@@ -1,130 +0,0 @@
1
- import os
2
- import time
3
- import httpx
4
- from dotenv import load_dotenv
5
- import threading
6
- import queue
7
- import hashlib
8
-
9
- from deepgram import (
10
- DeepgramClient,
11
- LiveTranscriptionEvents,
12
- LiveOptions,
13
- )
14
-
15
- load_dotenv()
16
-
17
- os.environ["DG_API_KEY"] = "1d5666523f0f2fbaf3e2db6ae7717f6f87280b5e"
18
- API_KEY = os.getenv("DG_API_KEY")
19
-
20
- from clarifai_grpc.grpc.api import resources_pb2, service_pb2
21
- from collections.abc import Iterator
22
- from google.protobuf import json_format
23
-
24
- from clarifai.client.runner import Runner
25
-
26
-
27
- class MyRunner(Runner):
28
- """A custom runner that adds "Hello World" to the end of the text and replaces the domain of the
29
- image URL as an example.
30
- """
31
-
32
- def setup_connection(self):
33
- print("ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZz")
34
- # STEP 2: Create a websocket connection to Deepgram
35
- self.dg_connection = self.deepgram.listen.live.v("1")
36
-
37
- output_q = self.output_q
38
-
39
- # STEP 3: Define the event handlers for the connection
40
- def on_message(self, result, **kwargs):
41
- if result.is_final:
42
- sentence = result.channel.alternatives[0].transcript
43
- if len(sentence) == 0:
44
- return
45
- print("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS")
46
- print(f"speaker: {sentence}")
47
- # put it on a queue as we get responses from deepgram.
48
- output_q.put(sentence)
49
-
50
- def on_metadata(self, metadata, **kwargs):
51
- print(f"\n\n{metadata}\n\n")
52
-
53
- def on_error(self, error, **kwargs):
54
- print(f"\n\n{error}\n\n")
55
-
56
- # STEP 4: Register the event handlers
57
- self.dg_connection.on(LiveTranscriptionEvents.Transcript, on_message)
58
- self.dg_connection.on(LiveTranscriptionEvents.Metadata, on_metadata)
59
- self.dg_connection.on(LiveTranscriptionEvents.Error, on_error)
60
-
61
- # STEP 5: Configure Deepgram options for live transcription
62
- self.options = LiveOptions(
63
- model="nova-2",
64
- language="en-US",
65
- smart_format=True,
66
- )
67
-
68
- # STEP 6: Start the connection
69
- self.dg_connection.start(self.options)
70
-
71
- print("DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD")
72
- print(self.dg_connection._socket)
73
-
74
- def __init__(self, *args, **kwargs):
75
- print("MyRunner init")
76
- # STEP 1: Create a Deepgram client using the API key
77
- self.deepgram = DeepgramClient(API_KEY)
78
- self.output_q = queue.Queue()
79
-
80
- self.setup_connection()
81
-
82
- super().__init__(*args, **kwargs)
83
-
84
- def stream(self, request: service_pb2.PostModelOutputsRequest
85
- ) -> Iterator[service_pb2.MultiOutputResponse]:
86
- """Example yielding a whole batch of streamed stuff back.
87
- """
88
-
89
- assert len(request.inputs) == 1, "This runner only supports one input at a time."
90
-
91
- # Get the next chunk of data from the incoming stream.
92
-
93
- print("Got some audio data")
94
-
95
- data = request.inputs[0].data.audio.base64
96
- print(hashlib.md5(data).hexdigest())
97
-
98
- if not self.dg_connection._socket:
99
- self.dg_connection.finish()
100
- self.dg_connection.start(self.options)
101
- #self.setup_connection()
102
- self.dg_connection.send(data)
103
- print("Sent it to deepgram")
104
-
105
- while True:
106
- try:
107
- item = self.output_q.get(timeout=0.1)
108
- output = resources_pb2.Output()
109
- output.data.text.raw = item
110
- output.status.code = 10000
111
- print("YYYYYYYYYYYYYYYYYYYYYYYYYYYYYY")
112
- print("Yielding: ", output.data.text.raw)
113
- yield service_pb2.MultiOutputResponse(outputs=[
114
- output,
115
- ])
116
- except queue.Empty:
117
- print("Queue is empty, sleeping then breaking")
118
- break
119
-
120
- # # STEP 13: Close the connection to Deepgram
121
- # dg_connection.finish()
122
-
123
-
124
- if __name__ == '__main__':
125
- # Make sure you set these env vars before running the example.
126
- # CLARIFAI_PAT
127
- # CLARIFAI_USER_ID
128
-
129
- # You need to first create a runner in the Clarifai API and then use the ID here.
130
- MyRunner(runner_id="matt-test-runner", base_url="http://q6:32013", num_parallel_polls=1).start()
@@ -1,72 +0,0 @@
1
- from clarifai.client.runner import Runner
2
- from clarifai_grpc.grpc.api import resources_pb2
3
-
4
- # This example requires to run the following before running this example:
5
- # pip install transformers
6
-
7
- # https://huggingface.co/TheBloke/Llama-2-70B-chat-GPTQ
8
- model_name_or_path = "TheBloke/Llama-2-7B-chat-GPTQ"
9
- model_basename = "model"
10
-
11
- use_triton = False
12
-
13
-
14
- class Llama2Runner(Runner):
15
- """A custom runner that runs the LLama2 LLM.
16
- """
17
-
18
- def __init__(self, *args, **kwargs):
19
- print("Starting to load the model...")
20
- st = time.time()
21
- self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
22
- self.model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map='auto')
23
-
24
- self.logger.info("Loading model complete in (%f seconds), ready to loop for requests." %
25
- (time.time() - st))
26
- super(MyRunner, self).__init__(*args, **kwargs)
27
-
28
- def run_input(self, input: resources_pb2.Input,
29
- output_info: resources_pb2.OutputInfo) -> resources_pb2.Output:
30
- """This is the method that will be called when the runner is run. It takes in an input and
31
- returns an output.
32
- """
33
-
34
- output = resources_pb2.Output()
35
- data = input.data
36
- if data.text.raw != "":
37
- input_text = data.text.raw
38
- elif data.text.url != "":
39
- input_text = str(requests.get(data.text.url).text)
40
- else:
41
- raise Exception("Need to include data.text.raw or data.text.url in your inputs.")
42
-
43
- st = time.time()
44
- max_tokens = 4096
45
- # # Method 1
46
- # input_ids = self.tokenizer(input_text, return_tensors='pt').input_ids.cuda()
47
- # out = self.model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=max_tokens)
48
- # out_text = self.tokenizer.decode(out[0], skip_special_tokens=True)
49
- # output.data.text.raw = out_text.replace(input_text, '')
50
-
51
- # # Method 2
52
- pipe = pipeline(
53
- "text-generation",
54
- model=self.model,
55
- tokenizer=self.tokenizer,
56
- max_new_tokens=max_tokens,
57
- temperature=0.7,
58
- top_p=0.95,
59
- repetition_penalty=1.15,
60
- return_full_text=False)
61
- a = pipe(input_text)
62
- output.data.text.raw = a[0]['generated_text']
63
- return output
64
-
65
-
66
- if __name__ == '__main__':
67
- # Make sure you set these env vars before running the example.
68
- # CLARIFAI_PAT
69
- # CLARIFAI_USER_ID
70
-
71
- # You need to first create a runner in the Clarifai API and then use the ID here.
72
- Llama2Runner(runner_id="sdk-llama2-runner").start()
@@ -1,89 +0,0 @@
1
- from clarifai_grpc.grpc.api import resources_pb2, service_pb2
2
- from collections.abc import Iterator
3
- from google.protobuf import json_format
4
-
5
- from clarifai.client.runner import Runner
6
-
7
-
8
- class MyRunner(Runner):
9
- """A custom runner that adds "Hello World" to the end of the text and replaces the domain of the
10
- image URL as an example.
11
- """
12
-
13
- def run_input(self, input: resources_pb2.Input, output_info: resources_pb2.OutputInfo,
14
- **kwargs) -> resources_pb2.Output:
15
- """This is the method that will be called when the runner is run. It takes in an input and
16
- returns an output.
17
- """
18
-
19
- output = resources_pb2.Output()
20
-
21
- data = input.data
22
-
23
- # Optional use of output_info
24
- params_dict = {}
25
- if "params" in output_info:
26
- params_dict = output_info["params"]
27
-
28
- if data.text.raw != "":
29
- output.data.text.raw = data.text.raw + "Hello World" + params_dict.get(
30
- "hello", "") + kwargs.get("extra", "")
31
- if data.image.url != "":
32
- output.data.text.raw = data.image.url.replace("samples.clarifai.com",
33
- "newdomain.com" + params_dict.get("domain",))
34
- return output
35
-
36
- def generate(self, request: service_pb2.PostModelOutputsRequest
37
- ) -> Iterator[service_pb2.MultiOutputResponse]:
38
- """Example yielding a whole batch of streamed stuff back.
39
- """
40
-
41
- model = request.model
42
- output_info = None
43
- if request.model.model_version.id != "":
44
- output_info = json_format.MessageToDict(
45
- model.model_version.output_info, preserving_proto_field_name=True)
46
-
47
- for i in range(10): # fake something iterating generating 10 times.
48
-
49
- outputs = []
50
- for input in request.inputs:
51
- # output = self.run_input(input, output_info, extra=f" {i}")
52
- output = resources_pb2.Output()
53
- output.data.text.raw = f"Generate Hello World {i}"
54
- outputs.append(output)
55
- resp = service_pb2.MultiOutputResponse(outputs=outputs,)
56
- yield resp
57
-
58
- def stream(self, request: service_pb2.PostModelOutputsRequest
59
- ) -> Iterator[service_pb2.MultiOutputResponse]:
60
- """Example yielding a whole batch of streamed stuff back.
61
- """
62
-
63
- model = request.model
64
- output_info = None
65
- if request.model.model_version.id != "":
66
- output_info = json_format.MessageToDict(
67
- model.model_version.output_info, preserving_proto_field_name=True)
68
-
69
- for i in range(10): # fake something iterating generating 10 times.
70
-
71
- outputs = []
72
- for input in request.inputs:
73
- # output = self.run_input(input, output_info, extra=f" {i}")
74
- output = resources_pb2.Output()
75
- out_text = input.data.text.raw + f"Stream Hello World {i}"
76
- print(out_text)
77
- output.data.text.raw = out_text
78
- outputs.append(output)
79
- resp = service_pb2.MultiOutputResponse(outputs=outputs,)
80
- yield resp
81
-
82
-
83
- if __name__ == '__main__':
84
- # Make sure you set these env vars before running the example.
85
- # CLARIFAI_PAT
86
- # CLARIFAI_USER_ID
87
-
88
- # You need to first create a runner in the Clarifai API and then use the ID here.
89
- MyRunner(runner_id="matt-test-runner", base_url="http://q6:32013", num_parallel_polls=1).start()