edsl 0.1.39.dev4__py3-none-any.whl → 0.1.39.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.39.dev4"
1
+ __version__ = "0.1.39.dev5"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: edsl
3
- Version: 0.1.39.dev4
3
+ Version: 0.1.39.dev5
4
4
  Summary: Create and analyze LLM-based surveys
5
5
  Home-page: https://www.expectedparrot.com/
6
6
  License: MIT
@@ -2,7 +2,7 @@ edsl/Base.py,sha256=-U6ngLZJDqImrAwF-TSmYzMESre0CG5rq9ZFxFh31sY,12864
2
2
  edsl/BaseDiff.py,sha256=92BirXj2u3TEGHJWni9TBsvZjvq8wpb4wDL2vxX9Lb0,8253
3
3
  edsl/TemplateLoader.py,sha256=sDBlSMt7EfOduM7w3h6v03gvh_Rzn9hVrlS-iLSQdZA,849
4
4
  edsl/__init__.py,sha256=It2k2drMyde7O-xh0yesod2OxZ3W0yW21zZKRVifv6g,1896
5
- edsl/__version__.py,sha256=H4R9bwO-FoajAtm4SOl-2H3XIcxeoqT4Mzytie3DWvc,28
5
+ edsl/__version__.py,sha256=GwtkJWW-Z8kLjSF_sIF6wJrqTj4WtKxR8VIesSAr6uo,28
6
6
  edsl/agents/Agent.py,sha256=VLIe9gMnLcZkRCQpz8VofEIAbCWj7jNpWxDw5k3KiXI,40734
7
7
  edsl/agents/AgentList.py,sha256=iRfQfyUYtaJbJ3sRubPqPKRr77nKQgzhFEeZ0wcAEk0,18955
8
8
  edsl/agents/Invigilator.py,sha256=v9KkQ7yQfhwsrZB-7Bu4ZQ-zl_9Hwaxe0uQyxB-Wvss,11862
@@ -44,7 +44,6 @@ edsl/data/CacheHandler.py,sha256=wy2AdKkk_pmwP71htdmLV9UzXM4AuHm5pn1qscJlX9s,515
44
44
  edsl/data/RemoteCacheSync.py,sha256=qgvh2w0c-Ak7poGNucWRd4ze8ncMQYh36a9yp3ib0IU,6062
45
45
  edsl/data/SQLiteDict.py,sha256=V5Nfnxctgh4Iblqcw1KmbnkjtfmWrrombROSQ3mvg6A,8979
46
46
  edsl/data/__init__.py,sha256=i_bbYBc-vrdASBpDMcpIcfhbLKYOkvqA57R3ysBcQ6o,177
47
- edsl/data/hack.py,sha256=zQmf1xGRUfTU0sDeZCqSxPk5E4JQBGppH1epyx-i0Pg,261
48
47
  edsl/data/orm.py,sha256=Jz6rvw5SrlxwysTL0QI9r68EflKxeEBmf6j6himHDS8,238
49
48
  edsl/data_transfer_models.py,sha256=r7Nl2ZyR0FZhzqQg8tz2jxonTVBboK9W3qMicts69qc,1960
50
49
  edsl/enums.py,sha256=foBiWLk82b_ve9v9NpGCbM6tQDYsVCGvoIJ5PCzwbjo,6043
@@ -329,7 +328,6 @@ edsl/templates/error_reporting/performance_plot.html,sha256=NTXFj51VEwew59gLzbR8
329
328
  edsl/templates/error_reporting/report.css,sha256=e0kM4z4fF3xrKIJbbhvrzzh8gMJ8LD7rDu0ut63kg8c,1209
330
329
  edsl/templates/error_reporting/report.html,sha256=CWygdoBM-QXNI8HtqaQMfzspMSn4lUVRTXf6NA9ggqo,4523
331
330
  edsl/templates/error_reporting/report.js,sha256=PtF1N68RmSYB2OG-6ymO14-LcX7LUZTnUDFX0dN6xW4,957
332
- edsl/test_h,sha256=_f3wX2KArsvPvCzU9S1fKwZqcYh2dKvu2JV-AH20Kwk,23607
333
331
  edsl/tools/__init__.py,sha256=4iRiX1K0Yh8RGwlUBuzipvFfRrofKqCRQ0SzNK_2oiQ,41
334
332
  edsl/tools/clusters.py,sha256=uvDN76bfHIHS-ykB2iioXu0gKeP_UyD7Q9ee67w_fV4,6132
335
333
  edsl/tools/embeddings.py,sha256=-mHqApiFbGzj_2Cr_VVl_7XiBBDyPB5-6ZO7IsXvaig,677
@@ -346,7 +344,6 @@ edsl/utilities/data/scooter_results.json,sha256=tRtVAI5haLMh_-wjz9it_uk_I1bGe5qd
346
344
  edsl/utilities/decorators.py,sha256=rlTSMItwmWUxHQBIEUDxX3lFzgtiT8PnfNavakuf-2M,2319
347
345
  edsl/utilities/gcp_bucket/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
348
346
  edsl/utilities/gcp_bucket/cloud_storage.py,sha256=dStHsG181YP9ALW-bkyO-sgMWuLV5aaLsnsCOVD8jJw,3472
349
- edsl/utilities/gcp_bucket/example.py,sha256=Z6V5T4G8es3_tCKwMaPcQQgz8u-kPy6TtcV5PfeWKoA,1413
350
347
  edsl/utilities/interface.py,sha256=AaKpWiwWBwP2swNXmnFlIf3ZFsjfsR5bjXQAW47tD-8,19656
351
348
  edsl/utilities/is_notebook.py,sha256=UAT8LXl7oh2qvUnOWQ5QvO1wDno385a8hHMuPZuYGT8,713
352
349
  edsl/utilities/is_valid_variable_name.py,sha256=k_CEkw2rDjNykx1wBTLYkjzkTZI-3fK9xJlTQggX5nk,332
@@ -355,7 +352,7 @@ edsl/utilities/remove_edsl_version.py,sha256=3n2RoXvZ4pH3k-_lc7B-vkeUyHXHX6vKHQS
355
352
  edsl/utilities/repair_functions.py,sha256=tftmklAqam6LOQQu_-9U44N-llycffhW8LfO63vBmNw,929
356
353
  edsl/utilities/restricted_python.py,sha256=5-_zUhrNbos7pLhDl9nr8d24auRlquR6w-vKkmNjPiA,2060
357
354
  edsl/utilities/utilities.py,sha256=FbI9QYGD4eaHrwZ6ePx51jjpatZqwSPHJhimx-h6HyA,12660
358
- edsl-0.1.39.dev4.dist-info/LICENSE,sha256=_qszBDs8KHShVYcYzdMz3HNMtH-fKN_p5zjoVAVumFc,1111
359
- edsl-0.1.39.dev4.dist-info/METADATA,sha256=sYmHaamfbRsi-HeNkImWPOOg68rgQdswaXDujodIsME,4766
360
- edsl-0.1.39.dev4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
361
- edsl-0.1.39.dev4.dist-info/RECORD,,
355
+ edsl-0.1.39.dev5.dist-info/LICENSE,sha256=_qszBDs8KHShVYcYzdMz3HNMtH-fKN_p5zjoVAVumFc,1111
356
+ edsl-0.1.39.dev5.dist-info/METADATA,sha256=Bv16Ql3tzhK_nWnU-JSFhvZ3FHy3lullMd1QA2qMSFE,4766
357
+ edsl-0.1.39.dev5.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
358
+ edsl-0.1.39.dev5.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: poetry-core 1.9.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
edsl/data/hack.py DELETED
@@ -1,10 +0,0 @@
1
- from edsl.data.CacheEntry import CacheEntry
2
-
3
- first = 0
4
- for i in range(0,1000000):
5
- if i == 0:
6
- first = CacheEntry.example().key
7
- if first != "55ce2e13d38aa7fb6ec848053285edb4":
8
- print(first)
9
- print(CacheEntry.example().__dict__)
10
- break
edsl/test_h DELETED
@@ -1 +0,0 @@
1
- [{'model_name': 'nvidia/Nemotron-4-340B-Instruct', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Nemotron-4-340B-Instruct is a chat model intended for use for the English language, designed for Synthetic Data Generation', 'cover_img_url': 'https://shared.deepinfra.com/models/nvidia/Nemotron-4-340B-Instruct/cover_image.6223332b8dfbf8dba008d030548970e7dbee4cfb571412430cc5ff8c007b7072.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'meta-llama/Llama-2-7b-chat-hf', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. ', 'cover_img_url': 'https://shared.deepinfra.com/models/meta-llama/Llama-2-7b-chat-hf/cover_image.10373e7a429dd725e0eb9e57cd20aeb815426c077217b27d9aedce37bd5c2173.jpg', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': 'meta-llama/Meta-Llama-3-8B-Instruct', 'deprecated': 1718309480, 'quantization': 'fp16'}, {'model_name': 'meta-llama/Llama-2-13b-chat-hf', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. ', 'cover_img_url': 'https://shared.deepinfra.com/models/meta-llama/Llama-2-13b-chat-hf/cover_image.6cbca6a3445de3ac6e75983f8828fe6b6323ae8a2d3c025ae1561ac5e56e6326.jpg', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': 'meta-llama/Meta-Llama-3-8B-Instruct', 'deprecated': 1718228019, 'quantization': 'fp16'}, {'model_name': 'deepinfra/airoboros-70b', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Latest version of the Airoboros model fine-tunned version of llama-2-70b using the Airoboros dataset. This model is currently running jondurbin/airoboros-l2-70b-2.2.1 ', 'cover_img_url': 'https://shared.deepinfra.com/models/deepinfra/airoboros-70b/cover_image.4df6a78233488497ec8cad9a032e070a1c2e0c510c7d3b2ebd5c159e15b69793.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': 'lizpreciatior/lzlv_70b_fp16_hf', 'deprecated': 1718067221, 'quantization': 'fp16'}, {'model_name': '01-ai/Yi-34B-Chat', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': '', 'cover_img_url': 'https://shared.deepinfra.com/models/01-ai/Yi-34B-Chat/cover_image.ba50c187d0414ba799d008eef1756d102f2a016b33109023a5d05ac7370f21c8.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': 'cognitivecomputations/dolphin-2.6-mixtral-8x7b', 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'mistralai/Mistral-7B-Instruct-v0.2', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is a instruct fine-tuned version of the Mistral-7B-v0.2 generative text model using a variety of publicly available conversation datasets.', 'cover_img_url': 'https://shared.deepinfra.com/models/mistralai/Mistral-7B-Instruct-v0.2/cover_image.429fef8a2a09e5c4104ede511db12beaea9d3917c4754d709b05a65d5d6f6c1f.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'Qwen/Qwen2-7B-Instruct', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'The 7 billion parameter Qwen2 excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.', 'cover_img_url': 'https://shared.deepinfra.com/models/Qwen/Qwen2-7B-Instruct/cover_image.e8c2257ba46edbd20ef17c81343ad40fdf27f7bc3838837d079557accd2879e2.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'Qwen/Qwen2-72B-Instruct', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'The 72 billion parameter Qwen2 excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.', 'cover_img_url': 'https://shared.deepinfra.com/models/Qwen/Qwen2-72B-Instruct/cover_image.6771b7ff122c6ffaa9d5ae6b6ab54db0d46079a8f90441fa7770d60cfd4c6f4f.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'google/codegemma-7b-it', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'CodeGemma is a collection of lightweight open code models built on top of Gemma. CodeGemma models are text-to-text and text-to-code decoder-only models and are available as a 7 billion pretrained variant that specializes in code completion and code generation tasks, a 7 billion parameter instruction-tuned variant for code chat and instruction following and a 2 billion parameter pretrained variant for fast code completion.', 'cover_img_url': 'https://shared.deepinfra.com/models/google/codegemma-7b-it/cover_image.ecf88bf320526e3a113a1e51057501ffe65b8e264912e0c920ae7f95553f74b2.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 8192, 'replaced_by': 'google/gemma-1.1-7b-it', 'deprecated': 1718830428, 'quantization': 'fp16'}, {'model_name': 'mistralai/Mistral-7B-Instruct-v0.3', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Mistral-7B-Instruct-v0.3 is an instruction-tuned model, next iteration of of Mistral 7B that has larger vocabulary, newer tokenizer and supports function calling.', 'cover_img_url': 'https://shared.deepinfra.com/models/mistralai/Mistral-7B-Instruct-v0.3/cover_image.716d64cdc98717436953bde0b80dede06d7071f66cb46f1df85caaa270e5cdd6.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Mixtral is mixture of expert large language model (LLM) from Mistral AI. This is state of the art machine learning model using a mixture 8 of experts (MoE) 7b models. During inference 2 expers are selected. This architecture allows large models to be fast and cheap at inference. The Mixtral-8x7B outperforms Llama 2 70B on most benchmarks.', 'cover_img_url': 'https://shared.deepinfra.com/models/mistralai/Mixtral-8x7B-Instruct-v0.1/cover_image.a3146cc88bb3c77e6eae14b35d8db03d7952a597633a53378ef8182186c5a9d7.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'openchat/openchat_3.5', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'OpenChat is a library of open-source language models that have been fine-tuned with C-RLFT, a strategy inspired by offline reinforcement learning. These models can learn from mixed-quality data without preference labels and have achieved exceptional performance comparable to ChatGPT. The developers of OpenChat are dedicated to creating a high-performance, commercially viable, open-source large language model and are continuously making progress towards this goal.', 'cover_img_url': 'https://shared.deepinfra.com/models/openchat/openchat_3.5/cover_image.6112a8e07a704c30bd7c354351fa79c13904d9df7667a0064fb6b30bc80e728b.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 8192, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'bigcode/starcoder2-15b-instruct-v0.1', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'We introduce StarCoder2-15B-Instruct-v0.1, the very first entirely self-aligned code Large Language Model (LLM) trained with a fully permissive and transparent pipeline. Our open-source pipeline uses StarCoder2-15B to generate thousands of instruction-response pairs, which are then used to fine-tune StarCoder-15B itself without any human annotations or distilled data from huge and proprietary LLMs.', 'cover_img_url': 'https://shared.deepinfra.com/models/bigcode/starcoder2-15b-instruct-v0.1/cover_image.f20836d5d430a93c8aa95eefc92154d14e93dc69b17967410ecce3187a07c601.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': None, 'replaced_by': 'Phind/Phind-CodeLlama-34B-v2', 'deprecated': 1718830388, 'quantization': 'fp16'}, {'model_name': 'mistralai/Mistral-7B-Instruct-v0.1', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'The Mistral-7B-Instruct-v0.1 Large Language Model (LLM) is a instruct fine-tuned version of the Mistral-7B-v0.1 generative text model using a variety of publicly available conversation datasets.', 'cover_img_url': 'https://shared.deepinfra.com/models/mistralai/Mistral-7B-Instruct-v0.1/cover_image.cdb70679749ff93eb56b8480215bb8cd1382cbeffcec00a06bdb0145be9ef511.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'meta-llama/Meta-Llama-3-8B-Instruct', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes.', 'cover_img_url': 'https://shared.deepinfra.com/models/meta-llama/Meta-Llama-3-8B-Instruct/cover_image.9ea753fd36aabfbca4939ee488b859e08e95c4626ffff71ec3a385be66b1d3ba.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 8192, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'codellama/CodeLlama-34b-Instruct-hf', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Code Llama is a state-of-the-art LLM capable of generating code, and natural language about code, from both code and natural language prompts. This particular instance is the 34b instruct variant', 'cover_img_url': 'https://shared.deepinfra.com/models/codellama/CodeLlama-34b-Instruct-hf/cover_image.6b78b2c46fdafdd29a6eb2bcc388412b0afea3f4286d5ac90ab3c65e2da61be7.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': 'Phind/Phind-CodeLlama-34B-v2', 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Zephyr 141B-A35B is an instruction-tuned (assistant) version of Mixtral-8x22B. It was fine-tuned on a mix of publicly available, synthetic datasets. It achieves strong performance on chat benchmarks.', 'cover_img_url': 'https://shared.deepinfra.com/models/HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1/cover_image.06f8e4df28f71df5ed7d0f5c995f296ca5e259f6c9b3a3e8914ba58805fb83a6.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 65536, 'replaced_by': 'mistralai/Mixtral-8x22B-Instruct-v0.1', 'deprecated': None, 'quantization': 'fp8'}, {'model_name': 'Gryphe/MythoMax-L2-13b-turbo', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Faster version of Gryphe/MythoMax-L2-13b running on multiple H100 cards in fp8 precision. Up to 160 tps. ', 'cover_img_url': 'https://shared.deepinfra.com/models/Gryphe/MythoMax-L2-13b-turbo/cover_image.1975a8dba4bcf6809a083fe29aff31cb2895a749171f89e0db650bc039da051a.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': 'Gryphe/MythoMax-L2-13b', 'deprecated': 1718830497, 'quantization': 'fp8'}, {'model_name': 'codellama/CodeLlama-70b-Instruct-hf', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'CodeLlama-70b is the largest and latest code generation from the Code Llama collection. ', 'cover_img_url': 'https://shared.deepinfra.com/models/codellama/CodeLlama-70b-Instruct-hf/cover_image.6fc1f8b121f95b8075d0bff6f5d6fe39fdac93454078d88467697abe8ecd416b.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': 'Phind/Phind-CodeLlama-34B-v2', 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'microsoft/WizardLM-2-8x22B', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': "WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to those leading proprietary models.", 'cover_img_url': 'https://shared.deepinfra.com/models/microsoft/WizardLM-2-8x22B/cover_image.395b63e0d661def89bf43c88976a699b066f69208b3b58ae5cc2663693033ee8.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 65536, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'databricks/dbrx-instruct', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'DBRX is an open source LLM created by Databricks. It uses mixture-of-experts (MoE) architecture with 132B total parameters of which 36B parameters are active on any input. It outperforms existing open source LLMs like Llama 2 70B and Mixtral-8x7B on standard industry benchmarks for language understanding, programming, math, and logic.', 'cover_img_url': 'https://shared.deepinfra.com/models/databricks/dbrx-instruct/cover_image.061dbf63fed77872a3f1b97053081189ceefa2d6fafdc912d2d74111cfbd75c0.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': 'cognitivecomputations/dolphin-2.6-mixtral-8x7b', 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'microsoft/WizardLM-2-7B', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': "WizardLM-2 7B is the smaller variant of Microsoft AI's latest Wizard model. It is the fastest and achieves comparable performance with existing 10x larger open-source leading models", 'cover_img_url': 'https://shared.deepinfra.com/models/microsoft/WizardLM-2-7B/cover_image.305876b2901f4833a14d17e09a3876b59ca7561b73017518970a75643fe9ff69.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'mistralai/Mixtral-8x22B-v0.1', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Mixtral-8x22B is the latest and largest mixture of expert large language model (LLM) from Mistral AI. This is state of the art machine learning model using a mixture 8 of experts (MoE) 22b models. During inference 2 expers are selected. This architecture allows large models to be fast and cheap at inference. This model is not instruction tuned. ', 'cover_img_url': 'https://shared.deepinfra.com/models/mistralai/Mixtral-8x22B-v0.1/cover_image.eb92d1199149a5d7fa5e7b2dc17dc991f7398301747b92bd60032c3b7fc77a0f.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 65536, 'replaced_by': 'mistralai/Mixtral-8x22B-Instruct-v0.1', 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'mistralai/Mixtral-8x22B-Instruct-v0.1', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'This is the instruction fine-tuned version of Mixtral-8x22B - the latest and largest mixture of experts large language model (LLM) from Mistral AI. This state of the art machine learning model uses a mixture 8 of experts (MoE) 22b models. During inference 2 experts are selected. This architecture allows large models to be fast and cheap at inference.', 'cover_img_url': 'https://shared.deepinfra.com/models/mistralai/Mixtral-8x22B-Instruct-v0.1/cover_image.8bb1b015367a1537fd23c69d5b8117675a86b207c9bd3cce326b750ef877bcb6.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 65536, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'meta-llama/Llama-2-70b-chat-hf', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'LLaMa 2 is a collections of LLMs trained by Meta. This is the 70B chat optimized version. This endpoint has per token pricing.', 'cover_img_url': 'https://shared.deepinfra.com/models/meta-llama/Llama-2-70b-chat-hf/cover_image.7b3407408b20bd422edfb75da90ee92d0a05649e94b59bf409c827e845fc3c46.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': 'meta-llama/Meta-Llama-3-70B-Instruct', 'deprecated': 1718309527, 'quantization': 'fp16'}, {'model_name': 'google/gemma-1.1-7b-it', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Gemma is an open-source model designed by Google. This is Gemma 1.1 7B (IT), an update over the original instruction-tuned Gemma release. Gemma 1.1 was trained using a novel RLHF method, leading to substantial gains on quality, coding capabilities, factuality, instruction following and multi-turn conversation quality.', 'cover_img_url': 'https://shared.deepinfra.com/models/google/gemma-1.1-7b-it/cover_image.18ce701e05fe6377f27c31c2e0d05649b7962f184f8dc894e31aad3ffb468f70.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 8192, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'Gryphe/MythoMax-L2-13b', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': '', 'cover_img_url': 'https://shared.deepinfra.com/models/Gryphe/MythoMax-L2-13b/cover_image.7567613d62f797fa930227a88202f7aee5ef30da38e6c9c7b775979cc71220bc.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'openchat/openchat-3.6-8b', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Openchat 3.6 is a LLama-3-8b fine tune that outperforms it on multiple benchmarks.', 'cover_img_url': 'https://shared.deepinfra.com/models/openchat/openchat-3.6-8b/cover_image.c5792073e4034a0847ff5112e00356adb411c4a5c900ed22c32ad65c5d97e8d1.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 8192, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'Austism/chronos-hermes-13b-v2', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'This offers the imaginative writing style of chronos while still retaining coherency and being capable. Outputs are long and utilize exceptional prose. Supports a maxium context length of 4096. The model follows the Alpaca prompt format.', 'cover_img_url': '', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'Phind/Phind-CodeLlama-34B-v2', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Phind-CodeLlama-34B-v2 is an open-source language model that has been fine-tuned on 1.5B tokens of high-quality programming-related data and achieved a pass@1 rate of 73.8% on HumanEval. It is multi-lingual and proficient in Python, C/C++, TypeScript, Java, and more. It has been trained on a proprietary dataset of instruction-answer pairs instead of code completion examples. The model is instruction-tuned on the Alpaca/Vicuna format to be steerable and easy-to-use. It accepts the Alpaca/Vicuna instruction format and can generate one completion for each prompt.', 'cover_img_url': 'https://shared.deepinfra.com/models/Phind/Phind-CodeLlama-34B-v2/cover_image.0d7cb500b84d00e46b7bf490b75cf8eda73a3ad775fa4360c8deba541c3349b3.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'lizpreciatior/lzlv_70b_fp16_hf', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'A Mythomax/MLewd_13B-style merge of selected 70B models A multi-model merge of several LLaMA2 70B finetunes for roleplaying and creative work. The goal was to create a model that combines creativity with intelligence for an enhanced experience.', 'cover_img_url': 'https://shared.deepinfra.com/models/lizpreciatior/lzlv_70b_fp16_hf/cover_image.2bb893141f7dce176afce500c4ec8ca22cfe5e2b00253d997fea31a7f60adc1b.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'cognitivecomputations/dolphin-2.6-mixtral-8x7b', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'The Dolphin 2.6 Mixtral 8x7b model is a finetuned version of the Mixtral-8x7b model, trained on a variety of data including coding data, for 3 days on 4 A100 GPUs. It is uncensored and requires trust_remote_code. The model is very obedient and good at coding, but not DPO tuned. The dataset has been filtered for alignment and bias. The model is compliant with user requests and can be used for various purposes such as generating code or engaging in general chat.', 'cover_img_url': 'https://shared.deepinfra.com/models/cognitivecomputations/dolphin-2.6-mixtral-8x7b/cover_image.b265207e1a422c62c06f23a86e6ef6e8ee326de40a24bb1c5d9f102c1f2acd6b.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 32768, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'bigcode/starcoder2-15b', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'StarCoder2-15B model is a 15B parameter model trained on 600+ programming languages. It specializes in code completion.', 'cover_img_url': 'https://shared.deepinfra.com/models/bigcode/starcoder2-15b/cover_image.8981f6e7c85d72bf816c2abcb90d811c905db812ce0560ba5216df8cbc314464.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 16384, 'replaced_by': 'Phind/Phind-CodeLlama-34B-v2', 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'llava-hf/llava-1.5-7b-hf', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'LLaVa is a multimodal model that supports vision and language models combined.', 'cover_img_url': 'https://shared.deepinfra.com/models/llava-hf/llava-1.5-7b-hf/cover_image.ed4fba7a25b147e7fe6675e9f760585e11274e8ee72596e6412447260493cd4f.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': None, 'deprecated': None, 'quantization': 'fp16'}, {'model_name': 'meta-llama/Meta-Llama-3-70B-Instruct', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'Model Details Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes.', 'cover_img_url': 'https://shared.deepinfra.com/models/meta-llama/Meta-Llama-3-70B-Instruct/cover_image.bcffae761540e7dd36aea32e2a576690d43592a0fc39b9edbe83a5420758aabf.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 8192, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}, {'model_name': 'microsoft/Phi-3-medium-4k-instruct', 'type': 'text-generation', 'reported_type': 'text-generation', 'description': 'The Phi-3-Medium-4K-Instruct is a powerful and lightweight language model with 14 billion parameters, trained on high-quality data to excel in instruction following and safety measures. It demonstrates exceptional performance across benchmarks, including common sense, language understanding, and logical reasoning, outperforming models of similar size.', 'cover_img_url': 'https://shared.deepinfra.com/models/microsoft/Phi-3-medium-4k-instruct/cover_image.6c7f810d3584719025ba43de13448d318bda84043a08af1b9718c61d9498b18c.webp', 'tags': [...], 'pricing': {...}, 'max_tokens': 4096, 'replaced_by': None, 'deprecated': None, 'quantization': 'bfloat16'}]
@@ -1,50 +0,0 @@
1
- from edsl.utilities.gcp_bucket.cloud_storage import CloudStorageManager
2
-
3
- # API URLs
4
- secret_token = "your-secret-token"
5
-
6
- # File paths and names
7
- upload_file_path = "./app.py"
8
- upload_file_name = "new_upload.py"
9
-
10
- # Initialize CloudStorageManager
11
- manager = CloudStorageManager(secret_token=secret_token) # secret_token only for upload operations
12
-
13
- # Upload Process
14
- try:
15
- manager.upload_file(upload_file_path, upload_file_name)
16
- print("File upload process completed.")
17
- except Exception as e:
18
- print(f"Upload error: {str(e)}")
19
-
20
- # Download Process
21
- file_name = "new_upload.py" # Name for the downloaded file
22
- save_name = "res_download.py"
23
- try:
24
- manager.download_file(file_name, save_name)
25
- print("File download process completed.")
26
- except Exception as e:
27
- print(f"Download error: {str(e)}")
28
-
29
- # List files
30
- try:
31
- print("listing files")
32
- out = manager.list_files()
33
- for x in out["data"]:
34
- print(f"file_name: {x['file_name']}", f"url: {x['url']}")
35
- except Exception as e:
36
- print(f"Exception in listing files", str(e))
37
-
38
- # Delete file
39
- try:
40
- manager.delete_file("new_upload.py")
41
- except Exception as e:
42
- print(f"Exception in deleting file", str(e))
43
- # List files
44
- try:
45
- print("listing files")
46
- out = manager.list_files()
47
- for x in out["data"]:
48
- print(f"file_name: {x['file_name']}", f"url: {x['url']}")
49
- except Exception as e:
50
- print(f"Exception in listing files", str(e))