voyageai-cli 1.19.2 → 1.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +158 -23
- package/package.json +7 -1
- package/src/cli.js +2 -0
- package/src/commands/app.js +155 -0
- package/src/commands/completions.js +19 -6
- package/src/commands/eval.js +353 -32
- package/src/commands/playground.js +45 -0
- package/src/lib/explanations.js +264 -0
- package/src/playground/icons/dark/128.png +0 -0
- package/src/playground/icons/dark/16.png +0 -0
- package/src/playground/icons/dark/256.png +0 -0
- package/src/playground/icons/dark/32.png +0 -0
- package/src/playground/icons/dark/64.png +0 -0
- package/src/playground/icons/glyphs/Bulb.svg +5 -0
- package/src/playground/icons/glyphs/Config.svg +3 -0
- package/src/playground/icons/glyphs/Gauge.svg +4 -0
- package/src/playground/icons/glyphs/InfoWithCircle.svg +3 -0
- package/src/playground/icons/glyphs/LightningBolt.svg +3 -0
- package/src/playground/icons/glyphs/MagnifyingGlass.svg +3 -0
- package/src/playground/icons/glyphs/MultiDirectionArrow.svg +6 -0
- package/src/playground/icons/light/128.png +0 -0
- package/src/playground/icons/light/16.png +0 -0
- package/src/playground/icons/light/256.png +0 -0
- package/src/playground/icons/light/32.png +0 -0
- package/src/playground/icons/light/64.png +0 -0
- package/src/playground/index.html +2396 -177
- package/NOTICE +0 -23
- package/demo-readme.gif +0 -0
package/src/lib/explanations.js
CHANGED
|
@@ -676,6 +676,244 @@ const concepts = {
|
|
|
676
676
|
'vai benchmark space',
|
|
677
677
|
],
|
|
678
678
|
},
|
|
679
|
+
|
|
680
|
+
'multimodal-embeddings': {
|
|
681
|
+
title: 'Multimodal Embeddings',
|
|
682
|
+
summary: 'Embed images and text into the same vector space',
|
|
683
|
+
content: [
|
|
684
|
+
`${pc.bold('What are multimodal embeddings?')}`,
|
|
685
|
+
`${pc.cyan('Multimodal embeddings')} encode both text and images into the same vector space.`,
|
|
686
|
+
`Unlike text-only models, a multimodal model can process a photo, a slide deck,`,
|
|
687
|
+
`a PDF screenshot, or a mix of text and images — and produce a vector that lives`,
|
|
688
|
+
`in the ${pc.cyan('same space')} as pure text embeddings.`,
|
|
689
|
+
``,
|
|
690
|
+
`${pc.bold('Why this matters:')}`,
|
|
691
|
+
` ${pc.dim('•')} Search images with text queries ("sunset over mountains")`,
|
|
692
|
+
` ${pc.dim('•')} Search text with image queries (drop a photo, find matching descriptions)`,
|
|
693
|
+
` ${pc.dim('•')} Compare images to text directly with cosine similarity`,
|
|
694
|
+
` ${pc.dim('•')} Build RAG pipelines over documents with charts, tables, and figures`,
|
|
695
|
+
` ${pc.dim('•')} No OCR or complex document parsing needed — the model sees the visuals`,
|
|
696
|
+
``,
|
|
697
|
+
`${pc.bold('How Voyage AI is different:')}`,
|
|
698
|
+
`Most multimodal models (CLIP, Cohere, Amazon Titan) use ${pc.cyan('separate encoders')} for`,
|
|
699
|
+
`text and images — a "text tower" and a "vision tower." This creates a fundamental`,
|
|
700
|
+
`problem called the ${pc.cyan('modality gap')}: text vectors cluster with other text, and image`,
|
|
701
|
+
`vectors cluster with other images, regardless of semantic content.`,
|
|
702
|
+
``,
|
|
703
|
+
`Voyage's ${pc.cyan('voyage-multimodal-3.5')} processes both modalities through a ${pc.cyan('single unified')}`,
|
|
704
|
+
`${pc.cyan('transformer backbone')} — the same architecture used in modern vision-language models,`,
|
|
705
|
+
`but for vectorization. This eliminates the modality gap and enables true cross-modal`,
|
|
706
|
+
`search where a text query finds the most relevant image, not just the closest text.`,
|
|
707
|
+
``,
|
|
708
|
+
`${pc.bold('Interleaved inputs:')}`,
|
|
709
|
+
`Unlike CLIP-style models that accept a single text OR a single image, Voyage`,
|
|
710
|
+
`multimodal models accept ${pc.cyan('interleaved sequences')} of text and images. You can embed`,
|
|
711
|
+
`a slide that has a title, a chart, and bullet points as a single input — the`,
|
|
712
|
+
`model captures the spatial and contextual relationships between all elements.`,
|
|
713
|
+
``,
|
|
714
|
+
`${pc.bold('Supported formats:')} PNG, JPEG, WebP, GIF`,
|
|
715
|
+
`${pc.bold('Max image size:')} 16 million pixels, 20 MB`,
|
|
716
|
+
`${pc.bold('Token counting:')} Every 560 pixels ≈ 1 token`,
|
|
717
|
+
].join('\n'),
|
|
718
|
+
links: [
|
|
719
|
+
'https://blog.voyageai.com/2026/01/15/voyage-multimodal-3-5/',
|
|
720
|
+
'https://blog.voyageai.com/2024/11/12/voyage-multimodal-3/',
|
|
721
|
+
'https://docs.voyageai.com/docs/multimodal-embeddings',
|
|
722
|
+
],
|
|
723
|
+
tryIt: [
|
|
724
|
+
'vai embed --image photo.jpg --model voyage-multimodal-3.5',
|
|
725
|
+
'vai embed --image chart.png --text "Q4 revenue growth" --model voyage-multimodal-3.5',
|
|
726
|
+
],
|
|
727
|
+
},
|
|
728
|
+
|
|
729
|
+
'cross-modal-search': {
|
|
730
|
+
title: 'Cross-Modal Search',
|
|
731
|
+
summary: 'Search images with text and text with images',
|
|
732
|
+
content: [
|
|
733
|
+
`${pc.bold('What is cross-modal search?')}`,
|
|
734
|
+
`${pc.cyan('Cross-modal search')} means querying across different data types — for example,`,
|
|
735
|
+
`typing "a cat sitting on a windowsill" and getting back the most relevant`,
|
|
736
|
+
`${pc.cyan('photos')} from your collection, or uploading a product photo and finding the`,
|
|
737
|
+
`most relevant ${pc.cyan('text descriptions')} in your catalog.`,
|
|
738
|
+
``,
|
|
739
|
+
`${pc.bold('How it works:')}`,
|
|
740
|
+
` ${pc.dim('1.')} Embed all your content (images, text, or mixed) with a multimodal model`,
|
|
741
|
+
` ${pc.dim('2.')} Store the vectors in a database like MongoDB Atlas Vector Search`,
|
|
742
|
+
` ${pc.dim('3.')} At query time, embed the query (text or image) with the same model`,
|
|
743
|
+
` ${pc.dim('4.')} Find nearest neighbors — results can be any modality`,
|
|
744
|
+
``,
|
|
745
|
+
`${pc.bold('Use cases:')}`,
|
|
746
|
+
` ${pc.dim('•')} ${pc.cyan('E-commerce:')} "Show me red running shoes" → product photos`,
|
|
747
|
+
` ${pc.dim('•')} ${pc.cyan('Medical:')} Upload an X-ray → find similar cases in the database`,
|
|
748
|
+
` ${pc.dim('•')} ${pc.cyan('Legal:')} Search for relevant contract clauses across scanned PDFs`,
|
|
749
|
+
` ${pc.dim('•')} ${pc.cyan('Education:')} "Explain photosynthesis" → find diagrams and text explanations`,
|
|
750
|
+
` ${pc.dim('•')} ${pc.cyan('Content moderation:')} Check if uploaded images match banned content descriptions`,
|
|
751
|
+
``,
|
|
752
|
+
`${pc.bold('Try it in the Multimodal tab:')}`,
|
|
753
|
+
`The Cross-Modal Gallery lets you build a mini corpus of images and text, then`,
|
|
754
|
+
`search across both modalities. Results are ranked by cosine similarity regardless`,
|
|
755
|
+
`of whether they're images or text — demonstrating true unified search.`,
|
|
756
|
+
``,
|
|
757
|
+
`${pc.bold('The modality gap problem:')}`,
|
|
758
|
+
`CLIP-style models suffer from a ${pc.cyan('modality gap')} — text and image vectors occupy`,
|
|
759
|
+
`different regions of the embedding space. A text query like "sunset" will rank`,
|
|
760
|
+
`${pc.cyan('irrelevant texts')} higher than a ${pc.cyan('perfect sunset photo')}, because the text vectors`,
|
|
761
|
+
`are geometrically closer to each other regardless of meaning.`,
|
|
762
|
+
``,
|
|
763
|
+
`Voyage's unified backbone eliminates this bias. In benchmarks, voyage-multimodal-3`,
|
|
764
|
+
`outperforms CLIP by ${pc.cyan('41%')} on figure/table retrieval and ${pc.cyan('27%')} on document screenshots.`,
|
|
765
|
+
].join('\n'),
|
|
766
|
+
links: [
|
|
767
|
+
'https://docs.voyageai.com/docs/multimodal-embeddings',
|
|
768
|
+
'https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-overview/',
|
|
769
|
+
],
|
|
770
|
+
tryIt: [
|
|
771
|
+
'vai embed --image product.jpg --model voyage-multimodal-3.5',
|
|
772
|
+
'vai embed "red running shoes" --model voyage-multimodal-3.5',
|
|
773
|
+
],
|
|
774
|
+
},
|
|
775
|
+
|
|
776
|
+
'modality-gap': {
|
|
777
|
+
title: 'The Modality Gap',
|
|
778
|
+
summary: 'Why CLIP-style models fail at mixed search — and how Voyage solves it',
|
|
779
|
+
content: [
|
|
780
|
+
`${pc.bold('What is the modality gap?')}`,
|
|
781
|
+
`The ${pc.cyan('modality gap')} is a well-documented phenomenon in CLIP-style multimodal models`,
|
|
782
|
+
`where text and image embeddings occupy ${pc.cyan('separate regions')} of the vector space, even`,
|
|
783
|
+
`when they represent the same concept.`,
|
|
784
|
+
``,
|
|
785
|
+
`${pc.bold('Example:')} Given the text "I address you, members of Congress..." and a screenshot`,
|
|
786
|
+
`of that exact text, a CLIP model will place the screenshot's vector ${pc.cyan('closer to')}`,
|
|
787
|
+
`${pc.cyan('other random images')} than to the text it literally contains. The vectors cluster`,
|
|
788
|
+
`by modality (text vs. image), not by meaning.`,
|
|
789
|
+
``,
|
|
790
|
+
`${pc.bold('Why it happens:')}`,
|
|
791
|
+
`CLIP-style architectures have two ${pc.cyan('independent encoders')} — a text transformer and`,
|
|
792
|
+
`a vision transformer — that are trained with contrastive learning to align their`,
|
|
793
|
+
`outputs. But because the encoders are separate, they develop different internal`,
|
|
794
|
+
`representations that never fully converge. The result is a geometric gap between`,
|
|
795
|
+
`the two modality clusters.`,
|
|
796
|
+
``,
|
|
797
|
+
`${pc.bold('Consequences for search:')}`,
|
|
798
|
+
` ${pc.dim('•')} Text queries retrieve ${pc.cyan('irrelevant text')} over ${pc.cyan('relevant images')}`,
|
|
799
|
+
` ${pc.dim('•')} Image queries retrieve ${pc.cyan('irrelevant images')} over ${pc.cyan('relevant text')}`,
|
|
800
|
+
` ${pc.dim('•')} You essentially get two separate search systems, not one unified one`,
|
|
801
|
+
` ${pc.dim('•')} Cosine similarity across modalities is not meaningful`,
|
|
802
|
+
``,
|
|
803
|
+
`${pc.bold('How Voyage AI solves this:')}`,
|
|
804
|
+
`${pc.cyan('voyage-multimodal-3.5')} uses a ${pc.cyan('single transformer backbone')} that processes both`,
|
|
805
|
+
`text and images through the same network — similar to modern vision-language`,
|
|
806
|
+
`models (GPT-4V, Claude), but optimized for vectorization instead of generation.`,
|
|
807
|
+
``,
|
|
808
|
+
`Because there's one encoder, not two, text and image vectors occupy the ${pc.cyan('same')}`,
|
|
809
|
+
`region of the embedding space. A sunset photo and the text "sunset over the`,
|
|
810
|
+
`ocean" are geometrically close — as they should be.`,
|
|
811
|
+
``,
|
|
812
|
+
`This is why Voyage multimodal models outperform CLIP by ${pc.cyan('40%+')} on cross-modal`,
|
|
813
|
+
`retrieval benchmarks. The gap simply doesn't exist.`,
|
|
814
|
+
].join('\n'),
|
|
815
|
+
links: [
|
|
816
|
+
'https://arxiv.org/abs/2203.02053',
|
|
817
|
+
'https://blog.voyageai.com/2024/11/12/voyage-multimodal-3/',
|
|
818
|
+
],
|
|
819
|
+
tryIt: [],
|
|
820
|
+
},
|
|
821
|
+
|
|
822
|
+
'multimodal-rag': {
|
|
823
|
+
title: 'Multimodal RAG',
|
|
824
|
+
summary: 'Build RAG pipelines over documents with images, charts, and tables',
|
|
825
|
+
content: [
|
|
826
|
+
`${pc.bold('What is multimodal RAG?')}`,
|
|
827
|
+
`Traditional RAG extracts text from documents, embeds it, and retrieves relevant`,
|
|
828
|
+
`chunks to augment LLM prompts. ${pc.cyan('Multimodal RAG')} extends this to documents that`,
|
|
829
|
+
`contain images, charts, tables, and complex layouts — without requiring OCR or`,
|
|
830
|
+
`text extraction.`,
|
|
831
|
+
``,
|
|
832
|
+
`${pc.bold('The traditional approach (fragile):')}`,
|
|
833
|
+
` ${pc.dim('1.')} Extract text from PDF (OCR, pdftotext, etc.)`,
|
|
834
|
+
` ${pc.dim('2.')} Hope the table structure is preserved (it usually isn't)`,
|
|
835
|
+
` ${pc.dim('3.')} Lose all charts, figures, and spatial layout`,
|
|
836
|
+
` ${pc.dim('4.')} Embed the extracted text`,
|
|
837
|
+
` ${pc.dim('5.')} Retrieve and pray the LLM understands the mangled tables`,
|
|
838
|
+
``,
|
|
839
|
+
`${pc.bold('The multimodal approach (robust):')}`,
|
|
840
|
+
` ${pc.dim('1.')} Screenshot each page of the PDF (or each slide, each figure)`,
|
|
841
|
+
` ${pc.dim('2.')} Embed the screenshots directly with ${pc.cyan('voyage-multimodal-3.5')}`,
|
|
842
|
+
` ${pc.dim('3.')} The model "reads" text, charts, tables, and layout natively`,
|
|
843
|
+
` ${pc.dim('4.')} Search with text queries — "What was Q4 revenue?" finds the right chart`,
|
|
844
|
+
` ${pc.dim('5.')} Pass the retrieved screenshot + query to a vision-capable LLM`,
|
|
845
|
+
``,
|
|
846
|
+
`${pc.bold('Why it works:')}`,
|
|
847
|
+
`Voyage's multimodal model captures ${pc.cyan('visual features')} that text extraction loses:`,
|
|
848
|
+
` ${pc.dim('•')} Font size and emphasis (headings vs. body text)`,
|
|
849
|
+
` ${pc.dim('•')} Spatial layout (sidebars, columns, captions)`,
|
|
850
|
+
` ${pc.dim('•')} Chart structure (bar charts, line graphs, pie charts)`,
|
|
851
|
+
` ${pc.dim('•')} Table formatting (rows, columns, headers)`,
|
|
852
|
+
` ${pc.dim('•')} Diagrams and flowcharts`,
|
|
853
|
+
``,
|
|
854
|
+
`${pc.bold('Best practices:')}`,
|
|
855
|
+
` ${pc.dim('•')} Use ${pc.cyan('input_type: "document"')} when embedding corpus pages/slides`,
|
|
856
|
+
` ${pc.dim('•')} Use ${pc.cyan('input_type: "query"')} when embedding the user's search query`,
|
|
857
|
+
` ${pc.dim('•')} Keep images under 16M pixels (resize if needed)`,
|
|
858
|
+
` ${pc.dim('•')} For long PDFs, embed each page separately for granular retrieval`,
|
|
859
|
+
` ${pc.dim('•')} Combine with text chunks for hybrid multimodal+text retrieval`,
|
|
860
|
+
].join('\n'),
|
|
861
|
+
links: [
|
|
862
|
+
'https://docs.voyageai.com/docs/multimodal-embeddings',
|
|
863
|
+
'https://www.mongodb.com/docs/atlas/atlas-vector-search/tutorials/',
|
|
864
|
+
],
|
|
865
|
+
tryIt: [
|
|
866
|
+
'vai embed --image slide1.png --input-type document --model voyage-multimodal-3.5',
|
|
867
|
+
'vai embed "What was Q4 revenue?" --input-type query --model voyage-multimodal-3.5',
|
|
868
|
+
],
|
|
869
|
+
},
|
|
870
|
+
|
|
871
|
+
'rerank-eval': {
|
|
872
|
+
title: 'Reranking Evaluation — nDCG, Recall, MRR for Rerankers',
|
|
873
|
+
summary: 'Measure how well a reranker surfaces relevant documents',
|
|
874
|
+
content: [
|
|
875
|
+
`${pc.bold('What is reranking evaluation?')}`,
|
|
876
|
+
`After an initial retrieval step (vector search, BM25, etc.) returns candidate`,
|
|
877
|
+
`documents, a reranker re-orders them by relevance. Evaluation measures how well`,
|
|
878
|
+
`the reranker puts the truly relevant documents at the top of the list.`,
|
|
879
|
+
``,
|
|
880
|
+
`${pc.bold('Key metrics:')}`,
|
|
881
|
+
` ${pc.dim('•')} ${pc.cyan('nDCG@K')} — Normalized Discounted Cumulative Gain. Measures ranking quality`,
|
|
882
|
+
` with position-weighted scoring. A result at rank 1 matters more than rank 10.`,
|
|
883
|
+
` Range: 0.0 (worst) to 1.0 (perfect). The standard metric for reranker leaderboards.`,
|
|
884
|
+
` ${pc.dim('•')} ${pc.cyan('Recall@K')} — What fraction of relevant documents appear in the top K?`,
|
|
885
|
+
` Recall@5 = 0.8 means 80% of relevant docs are in the top 5.`,
|
|
886
|
+
` ${pc.dim('•')} ${pc.cyan('MRR')} — Mean Reciprocal Rank. 1/position of the first relevant result.`,
|
|
887
|
+
` MRR = 1.0 means every query's first result is relevant.`,
|
|
888
|
+
` ${pc.dim('•')} ${pc.cyan('MAP')} — Mean Average Precision. Area under the precision-recall curve.`,
|
|
889
|
+
` ${pc.dim('•')} ${pc.cyan('P@K')} — Precision at K. Fraction of top-K results that are relevant.`,
|
|
890
|
+
``,
|
|
891
|
+
`${pc.bold('Test set format (JSONL):')}`,
|
|
892
|
+
` ${pc.dim('{"query": "...", "documents": ["doc1", "doc2", ...], "relevant": [0, 2]}')}`,
|
|
893
|
+
` • ${pc.cyan('query')} — The search query`,
|
|
894
|
+
` • ${pc.cyan('documents')} — Candidate documents to rerank`,
|
|
895
|
+
` • ${pc.cyan('relevant')} — Indices into documents array that are relevant`,
|
|
896
|
+
``,
|
|
897
|
+
`${pc.bold('Comparing rerankers:')}`,
|
|
898
|
+
`Use ${pc.cyan('--models')} to compare multiple rerank models side by side.`,
|
|
899
|
+
`The evaluation shows nDCG, Recall, latency, cost, and ranking agreement.`,
|
|
900
|
+
`High agreement (>80%) on top-5 rankings suggests the cheaper model is sufficient.`,
|
|
901
|
+
``,
|
|
902
|
+
`${pc.bold('How this relates to embedding eval:')}`,
|
|
903
|
+
` ${pc.dim('•')} ${pc.cyan('vai eval')} (default) evaluates the full pipeline: embed → search → (rerank)`,
|
|
904
|
+
` ${pc.dim('•')} ${pc.cyan('vai eval --mode rerank')} evaluates reranking in isolation`,
|
|
905
|
+
` Use both: first optimize your embedding retrieval, then optimize reranking.`,
|
|
906
|
+
].join('\n'),
|
|
907
|
+
links: [
|
|
908
|
+
'https://agentset.ai/rerankers',
|
|
909
|
+
'https://en.wikipedia.org/wiki/Discounted_cumulative_gain',
|
|
910
|
+
],
|
|
911
|
+
tryIt: [
|
|
912
|
+
'vai eval --mode rerank --test-set rerank-test.jsonl',
|
|
913
|
+
'vai eval --mode rerank --models "rerank-2.5,rerank-2.5-lite" --test-set test.jsonl',
|
|
914
|
+
'vai eval --mode rerank --test-set test.jsonl --json',
|
|
915
|
+
],
|
|
916
|
+
},
|
|
679
917
|
};
|
|
680
918
|
|
|
681
919
|
/**
|
|
@@ -750,6 +988,32 @@ const aliases = {
|
|
|
750
988
|
'open-weight': 'voyage-4-nano',
|
|
751
989
|
huggingface: 'voyage-4-nano',
|
|
752
990
|
local: 'voyage-4-nano',
|
|
991
|
+
multimodal: 'multimodal-embeddings',
|
|
992
|
+
'multimodal-embeddings': 'multimodal-embeddings',
|
|
993
|
+
'multimodal-3': 'multimodal-embeddings',
|
|
994
|
+
'multimodal-3.5': 'multimodal-embeddings',
|
|
995
|
+
'voyage-multimodal': 'multimodal-embeddings',
|
|
996
|
+
'voyage-multimodal-3': 'multimodal-embeddings',
|
|
997
|
+
'voyage-multimodal-3.5': 'multimodal-embeddings',
|
|
998
|
+
'image-embedding': 'multimodal-embeddings',
|
|
999
|
+
'image-embeddings': 'multimodal-embeddings',
|
|
1000
|
+
'cross-modal': 'cross-modal-search',
|
|
1001
|
+
'cross-modal-search': 'cross-modal-search',
|
|
1002
|
+
'crossmodal': 'cross-modal-search',
|
|
1003
|
+
'image-search': 'cross-modal-search',
|
|
1004
|
+
'modality-gap': 'modality-gap',
|
|
1005
|
+
'modality gap': 'modality-gap',
|
|
1006
|
+
clip: 'modality-gap',
|
|
1007
|
+
'multimodal-rag': 'multimodal-rag',
|
|
1008
|
+
'visual-rag': 'multimodal-rag',
|
|
1009
|
+
'document-screenshot': 'multimodal-rag',
|
|
1010
|
+
'pdf-rag': 'multimodal-rag',
|
|
1011
|
+
'rerank-eval': 'rerank-eval',
|
|
1012
|
+
'reranking-eval': 'rerank-eval',
|
|
1013
|
+
ndcg: 'rerank-eval',
|
|
1014
|
+
recall: 'rerank-eval',
|
|
1015
|
+
mrr: 'rerank-eval',
|
|
1016
|
+
'eval-rerank': 'rerank-eval',
|
|
753
1017
|
};
|
|
754
1018
|
|
|
755
1019
|
/**
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<path d="M12.3311 8.5C12.7565 7.76457 13 6.91072 13 6C13 3.23858 10.7614 1 8 1C5.23858 1 3 3.23858 3 6C3 6.94628 3.26287 7.83117 3.71958 8.58561L5.40749 11.501C5.58628 11.8099 5.91607 12 6.27291 12H6.5V6C6.5 5.17157 7.17157 4.5 8 4.5C8.82843 4.5 9.5 5.17157 9.5 6V12H9.72368C10.0793 12 10.4082 11.8111 10.5874 11.5039L12.34 8.5H12.3311Z" fill="#000000"/>
|
|
3
|
+
<path d="M7.5 6V12H8.5V6C8.5 5.72386 8.27614 5.5 8 5.5C7.72386 5.5 7.5 5.72386 7.5 6Z" fill="#000000"/>
|
|
4
|
+
<path d="M10 14V13H6V14C6 14.5523 6.44772 15 7 15H9C9.55228 15 10 14.5523 10 14Z" fill="#000000"/>
|
|
5
|
+
</svg>
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
<svg width="16" height="16" viewBox="0 0 16 16" fill="none">
|
|
2
|
+
<path d="M12.7 2.56989C12.41 2.56989 12.17 2.80989 12.17 3.09989V4.34989L11.32 3.49989C9.21 1.39989 5.75 1.51989 3.78 3.75989C2.65 5.03989 2.23 6.82989 2.68 8.48989C3.24 10.5299 4.98 12.0099 7.08 12.2299L8.1 12.3399C8.42 13.2799 9.3 13.9499 10.34 13.9499C11.65 13.9499 12.71 12.8899 12.71 11.5799C12.71 10.2699 11.65 9.20989 10.34 9.20989C9.14 9.20989 8.16 10.0999 8 11.2599L7.19 11.1799C5.53 11.0099 4.14 9.82989 3.7 8.21989C3.34 6.90989 3.67 5.48989 4.58 4.46989C6.15 2.68989 8.9 2.59989 10.57 4.26989L11.42 5.11989H10.17C9.88 5.11989 9.64 5.35989 9.64 5.64989C9.64 5.93989 9.88 6.17989 10.17 6.17989H12.71C13 6.17989 13.24 5.93989 13.24 5.64989V3.09989C13.24 2.80989 13 2.56989 12.71 2.56989H12.7ZM10.34 10.3099C11.04 10.3099 11.6 10.8799 11.6 11.5699C11.6 12.2599 11.03 12.8299 10.34 12.8299C9.65 12.8299 9.08 12.2599 9.08 11.5699C9.08 10.8799 9.65 10.3099 10.34 10.3099Z" fill="#000"/>
|
|
3
|
+
</svg>
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<path d="M1.041 10.2514C0.996713 10.6632 1.33666 11 1.75088 11H4.2449C4.65912 11 4.98569 10.6591 5.08798 10.2577C5.22027 9.73864 5.49013 9.25966 5.87533 8.87446C6.43906 8.31073 7.20364 7.99403 8.00088 7.99403C8.27011 7.99403 8.53562 8.03015 8.79093 8.0997L11.7818 5.10887C10.6623 4.39046 9.35172 4 8.00088 4C6.14436 4 4.36388 4.7375 3.05113 6.05025C1.91604 7.18534 1.21104 8.67012 1.041 10.2514Z" fill="#000000"/>
|
|
3
|
+
<path d="M13.2967 6.42237L10.455 9.26409C10.6678 9.56493 10.8231 9.90191 10.9138 10.2577C11.0161 10.6591 11.3426 11 11.7568 11L14.2509 11C14.6651 11 15.005 10.6632 14.9608 10.2514C14.8087 8.83759 14.229 7.50093 13.2967 6.42237Z" fill="#000000"/>
|
|
4
|
+
</svg>
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<path fill-rule="evenodd" clip-rule="evenodd" d="M8 15C11.866 15 15 11.866 15 8C15 4.13401 11.866 1 8 1C4.13401 1 1 4.13401 1 8C1 11.866 4.13401 15 8 15ZM9 4C9 4.55228 8.55228 5 8 5C7.44772 5 7 4.55228 7 4C7 3.44772 7.44772 3 8 3C8.55228 3 9 3.44772 9 4ZM8 6C8.55228 6 9 6.44772 9 7V11H9.5C9.77614 11 10 11.2239 10 11.5C10 11.7761 9.77614 12 9.5 12H6.5C6.22386 12 6 11.7761 6 11.5C6 11.2239 6.22386 11 6.5 11H7V7H6.5C6.22386 7 6 6.77614 6 6.5C6 6.22386 6.22386 6 6.5 6H8Z" fill="#000000"/>
|
|
3
|
+
</svg>
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<path d="M9.22274 1.99296C9.22274 1.49561 8.56293 1.31233 8.30107 1.73667L4.07384 8.58696C3.87133 8.91513 4.10921 9.33717 4.49748 9.33717H6.77682L6.77682 14.0066C6.77682 14.504 7.43627 14.6879 7.69813 14.2635L11.9262 7.4118C12.1288 7.08363 11.8903 6.66244 11.5021 6.66244H9.22274V1.99296Z" fill="#000000"/>
|
|
3
|
+
</svg>
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<path fill-rule="evenodd" clip-rule="evenodd" d="M2.3234 9.81874C4.07618 11.5715 6.75062 11.8398 8.78588 10.6244L12.93 14.7685C13.4377 15.2762 14.2608 15.2762 14.7685 14.7685C15.2762 14.2608 15.2762 13.4377 14.7685 12.93L10.6244 8.78588C11.8398 6.75062 11.5715 4.07619 9.81873 2.32341C7.74896 0.253628 4.39318 0.253628 2.3234 2.32341C0.253624 4.39319 0.253624 7.74896 2.3234 9.81874ZM7.98026 4.16188C9.03467 5.2163 9.03467 6.92585 7.98026 7.98026C6.92584 9.03468 5.2163 9.03468 4.16188 7.98026C3.10746 6.92585 3.10746 5.2163 4.16188 4.16188C5.2163 3.10747 6.92584 3.10747 7.98026 4.16188Z" fill="#000000"/>
|
|
3
|
+
</svg>
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<path d="M5 8.57279V13.4272C5 13.9565 4.39241 14.2015 4.0721 13.8014L2.12898 11.3742C1.95701 11.1594 1.95701 10.8406 2.12898 10.6258L4.0721 8.19856C4.39241 7.79846 5 8.04351 5 8.57279Z" fill="#000000"/>
|
|
3
|
+
<path d="M5 10H12.5C12.7761 10 13 10.2239 13 10.5V11.5C13 11.7761 12.7761 12 12.5 12H5V10Z" fill="#000000"/>
|
|
4
|
+
<path d="M11 7.42721V2.57279C11 2.04351 11.6076 1.79846 11.9279 2.19856L13.871 4.62577C14.043 4.84058 14.043 5.15942 13.871 5.37423L11.9279 7.80144C11.6076 8.20154 11 7.95649 11 7.42721Z" fill="#000000"/>
|
|
5
|
+
<path d="M3 4.5C3 4.22386 3.22386 4 3.5 4H11V6H3.5C3.22386 6 3 5.77614 3 5.5V4.5Z" fill="#000000"/>
|
|
6
|
+
</svg>
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|