@lobehub/chat 1.19.15 → 1.19.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/locales/it-IT/models.json +128 -0
- package/locales/nl-NL/models.json +128 -0
- package/locales/nl-NL/providers.json +4 -0
- package/locales/pl-PL/providers.json +4 -0
- package/package.json +1 -1
- package/src/libs/unstructured/__tests__/fixtures/table-parse/auto-partition-basic-output.json +17 -198
- package/src/libs/unstructured/__tests__/fixtures/table-parse/auto-partition-basic-raw.json +0 -92
- package/src/libs/unstructured/__tests__/index.test.ts +3 -3
- package/src/store/global/action.test.ts +53 -0
- package/src/store/global/action.ts +16 -2
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.19.16](https://github.com/lobehub/lobe-chat/compare/v1.19.15...v1.19.16)
|
6
|
+
|
7
|
+
<sup>Released on **2024-09-21**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Improve i18n for discover and improve version check.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Improve i18n for discover and improve version check, closes [#4052](https://github.com/lobehub/lobe-chat/issues/4052) ([ef93712](https://github.com/lobehub/lobe-chat/commit/ef93712))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.19.15](https://github.com/lobehub/lobe-chat/compare/v1.19.14...v1.19.15)
|
6
31
|
|
7
32
|
<sup>Released on **2024-09-20**</sup>
|
@@ -44,6 +44,27 @@
|
|
44
44
|
"NousResearch/Nous-Hermes-2-Yi-34B": {
|
45
45
|
"description": "Nous Hermes-2 Yi (34B) offre output linguistici ottimizzati e possibilità di applicazione diversificate."
|
46
46
|
},
|
47
|
+
"Phi-3-5-mini-instruct": {
|
48
|
+
"description": "Aggiornamento del modello Phi-3-mini."
|
49
|
+
},
|
50
|
+
"Phi-3-medium-128k-instruct": {
|
51
|
+
"description": "Stesso modello Phi-3-medium, ma con una dimensione di contesto più grande per RAG o prompting a pochi colpi."
|
52
|
+
},
|
53
|
+
"Phi-3-medium-4k-instruct": {
|
54
|
+
"description": "Un modello con 14 miliardi di parametri, dimostra una qualità migliore rispetto a Phi-3-mini, con un focus su dati densi di ragionamento di alta qualità."
|
55
|
+
},
|
56
|
+
"Phi-3-mini-128k-instruct": {
|
57
|
+
"description": "Stesso modello Phi-3-mini, ma con una dimensione di contesto più grande per RAG o prompting a pochi colpi."
|
58
|
+
},
|
59
|
+
"Phi-3-mini-4k-instruct": {
|
60
|
+
"description": "Il membro più piccolo della famiglia Phi-3. Ottimizzato sia per qualità che per bassa latenza."
|
61
|
+
},
|
62
|
+
"Phi-3-small-128k-instruct": {
|
63
|
+
"description": "Stesso modello Phi-3-small, ma con una dimensione di contesto più grande per RAG o prompting a pochi colpi."
|
64
|
+
},
|
65
|
+
"Phi-3-small-8k-instruct": {
|
66
|
+
"description": "Un modello con 7 miliardi di parametri, dimostra una qualità migliore rispetto a Phi-3-mini, con un focus su dati densi di ragionamento di alta qualità."
|
67
|
+
},
|
47
68
|
"Pro-128k": {
|
48
69
|
"description": "Spark Pro-128K è dotato di capacità di elaborazione del contesto eccezionalmente grandi, in grado di gestire fino a 128K di informazioni contestuali, particolarmente adatto per contenuti lunghi che richiedono analisi complete e gestione di associazioni logiche a lungo termine, fornendo logica fluida e coerenza in comunicazioni testuali complesse e supporto per citazioni varie."
|
49
70
|
},
|
@@ -56,6 +77,24 @@
|
|
56
77
|
"Qwen/Qwen2-72B-Instruct": {
|
57
78
|
"description": "Qwen2 è un modello di linguaggio universale avanzato, supportando vari tipi di istruzioni."
|
58
79
|
},
|
80
|
+
"Qwen/Qwen2.5-14B-Instruct": {
|
81
|
+
"description": "Qwen2.5 è una nuova serie di modelli di linguaggio di grandi dimensioni, progettata per ottimizzare l'elaborazione di compiti istruzionali."
|
82
|
+
},
|
83
|
+
"Qwen/Qwen2.5-32B-Instruct": {
|
84
|
+
"description": "Qwen2.5 è una nuova serie di modelli di linguaggio di grandi dimensioni, progettata per ottimizzare l'elaborazione di compiti istruzionali."
|
85
|
+
},
|
86
|
+
"Qwen/Qwen2.5-72B-Instruct": {
|
87
|
+
"description": "Qwen2.5 è una nuova serie di modelli di linguaggio di grandi dimensioni, con capacità di comprensione e generazione superiori."
|
88
|
+
},
|
89
|
+
"Qwen/Qwen2.5-7B-Instruct": {
|
90
|
+
"description": "Qwen2.5 è una nuova serie di modelli di linguaggio di grandi dimensioni, progettata per ottimizzare l'elaborazione di compiti istruzionali."
|
91
|
+
},
|
92
|
+
"Qwen/Qwen2.5-Coder-7B-Instruct": {
|
93
|
+
"description": "Qwen2.5-Coder si concentra sulla scrittura di codice."
|
94
|
+
},
|
95
|
+
"Qwen/Qwen2.5-Math-72B-Instruct": {
|
96
|
+
"description": "Qwen2.5-Math si concentra sulla risoluzione di problemi nel campo della matematica, fornendo risposte professionali a domande di alta difficoltà."
|
97
|
+
},
|
59
98
|
"THUDM/glm-4-9b-chat": {
|
60
99
|
"description": "GLM-4 9B è una versione open source, progettata per fornire un'esperienza di dialogo ottimizzata per applicazioni conversazionali."
|
61
100
|
},
|
@@ -131,6 +170,15 @@
|
|
131
170
|
"accounts/yi-01-ai/models/yi-large": {
|
132
171
|
"description": "Il modello Yi-Large offre capacità eccezionali di elaborazione multilingue, utilizzabile per vari compiti di generazione e comprensione del linguaggio."
|
133
172
|
},
|
173
|
+
"ai21-jamba-1.5-large": {
|
174
|
+
"description": "Un modello multilingue con 398 miliardi di parametri (94 miliardi attivi), offre una finestra di contesto lunga 256K, chiamata di funzione, output strutturato e generazione ancorata."
|
175
|
+
},
|
176
|
+
"ai21-jamba-1.5-mini": {
|
177
|
+
"description": "Un modello multilingue con 52 miliardi di parametri (12 miliardi attivi), offre una finestra di contesto lunga 256K, chiamata di funzione, output strutturato e generazione ancorata."
|
178
|
+
},
|
179
|
+
"ai21-jamba-instruct": {
|
180
|
+
"description": "Un modello LLM basato su Mamba di grado di produzione per ottenere prestazioni, qualità e efficienza dei costi di prim'ordine."
|
181
|
+
},
|
134
182
|
"anthropic.claude-3-5-sonnet-20240620-v1:0": {
|
135
183
|
"description": "Claude 3.5 Sonnet ha elevato gli standard del settore, superando i modelli concorrenti e Claude 3 Opus, dimostrando prestazioni eccezionali in una vasta gamma di valutazioni, mantenendo la velocità e i costi dei nostri modelli di livello medio."
|
136
184
|
},
|
@@ -227,6 +275,12 @@
|
|
227
275
|
"cognitivecomputations/dolphin-mixtral-8x22b": {
|
228
276
|
"description": "Dolphin Mixtral 8x22B è un modello progettato per seguire istruzioni, dialogo e programmazione."
|
229
277
|
},
|
278
|
+
"cohere-command-r": {
|
279
|
+
"description": "Command R è un modello generativo scalabile mirato a RAG e all'uso di strumenti per abilitare l'IA su scala aziendale."
|
280
|
+
},
|
281
|
+
"cohere-command-r-plus": {
|
282
|
+
"description": "Command R+ è un modello ottimizzato per RAG all'avanguardia progettato per affrontare carichi di lavoro di livello aziendale."
|
283
|
+
},
|
230
284
|
"command-r": {
|
231
285
|
"description": "Command R è un LLM ottimizzato per compiti di dialogo e contesti lunghi, particolarmente adatto per interazioni dinamiche e gestione della conoscenza."
|
232
286
|
},
|
@@ -434,6 +488,8 @@
|
|
434
488
|
"internlm/internlm2_5-7b-chat": {
|
435
489
|
"description": "InternLM2.5 offre soluzioni di dialogo intelligente in vari scenari."
|
436
490
|
},
|
491
|
+
"jamba-1.5-large": {},
|
492
|
+
"jamba-1.5-mini": {},
|
437
493
|
"llama-3.1-70b-instruct": {
|
438
494
|
"description": "Il modello Llama 3.1 70B Instruct, con 70B parametri, offre prestazioni eccezionali in generazione di testi di grandi dimensioni e compiti di istruzione."
|
439
495
|
},
|
@@ -497,6 +553,21 @@
|
|
497
553
|
"mathstral": {
|
498
554
|
"description": "MathΣtral è progettato per la ricerca scientifica e il ragionamento matematico, offre capacità di calcolo efficaci e interpretazione dei risultati."
|
499
555
|
},
|
556
|
+
"meta-llama-3-70b-instruct": {
|
557
|
+
"description": "Un potente modello con 70 miliardi di parametri che eccelle nel ragionamento, nella codifica e nelle ampie applicazioni linguistiche."
|
558
|
+
},
|
559
|
+
"meta-llama-3-8b-instruct": {
|
560
|
+
"description": "Un modello versatile con 8 miliardi di parametri ottimizzato per compiti di dialogo e generazione di testo."
|
561
|
+
},
|
562
|
+
"meta-llama-3.1-405b-instruct": {
|
563
|
+
"description": "I modelli di testo solo ottimizzati per istruzioni Llama 3.1 sono progettati per casi d'uso di dialogo multilingue e superano molti dei modelli di chat open source e chiusi disponibili su benchmark industriali comuni."
|
564
|
+
},
|
565
|
+
"meta-llama-3.1-70b-instruct": {
|
566
|
+
"description": "I modelli di testo solo ottimizzati per istruzioni Llama 3.1 sono progettati per casi d'uso di dialogo multilingue e superano molti dei modelli di chat open source e chiusi disponibili su benchmark industriali comuni."
|
567
|
+
},
|
568
|
+
"meta-llama-3.1-8b-instruct": {
|
569
|
+
"description": "I modelli di testo solo ottimizzati per istruzioni Llama 3.1 sono progettati per casi d'uso di dialogo multilingue e superano molti dei modelli di chat open source e chiusi disponibili su benchmark industriali comuni."
|
570
|
+
},
|
500
571
|
"meta-llama/Llama-2-13b-chat-hf": {
|
501
572
|
"description": "LLaMA-2 Chat (13B) offre eccellenti capacità di elaborazione linguistica e un'interazione di alta qualità."
|
502
573
|
},
|
@@ -584,12 +655,21 @@
|
|
584
655
|
"mistral-large": {
|
585
656
|
"description": "Mixtral Large è il modello di punta di Mistral, combinando capacità di generazione di codice, matematica e ragionamento, supporta una finestra di contesto di 128k."
|
586
657
|
},
|
658
|
+
"mistral-large-2407": {
|
659
|
+
"description": "Mistral Large (2407) è un modello di linguaggio avanzato (LLM) con capacità di ragionamento, conoscenza e codifica all'avanguardia."
|
660
|
+
},
|
587
661
|
"mistral-large-latest": {
|
588
662
|
"description": "Mistral Large è il modello di punta, specializzato in compiti multilingue, ragionamento complesso e generazione di codice, è la scelta ideale per applicazioni di alta gamma."
|
589
663
|
},
|
590
664
|
"mistral-nemo": {
|
591
665
|
"description": "Mistral Nemo è un modello da 12B lanciato in collaborazione tra Mistral AI e NVIDIA, offre prestazioni eccellenti."
|
592
666
|
},
|
667
|
+
"mistral-small": {
|
668
|
+
"description": "Mistral Small può essere utilizzato in qualsiasi compito basato su linguaggio che richiede alta efficienza e bassa latenza."
|
669
|
+
},
|
670
|
+
"mistral-small-latest": {
|
671
|
+
"description": "Mistral Small è un'opzione economica, veloce e affidabile, adatta per casi d'uso come traduzione, sintesi e analisi del sentiment."
|
672
|
+
},
|
593
673
|
"mistralai/Mistral-7B-Instruct-v0.1": {
|
594
674
|
"description": "Mistral (7B) Instruct è noto per le sue alte prestazioni, adatto per vari compiti linguistici."
|
595
675
|
},
|
@@ -677,9 +757,30 @@
|
|
677
757
|
"phi3:14b": {
|
678
758
|
"description": "Phi-3 è un modello open source leggero lanciato da Microsoft, adatto per integrazioni efficienti e ragionamento su larga scala."
|
679
759
|
},
|
760
|
+
"pixtral-12b-2409": {
|
761
|
+
"description": "Il modello Pixtral dimostra potenti capacità in compiti di comprensione di grafici e immagini, domande e risposte su documenti, ragionamento multimodale e rispetto delle istruzioni, in grado di elaborare immagini a risoluzione naturale e proporzioni, e di gestire un numero arbitrario di immagini in una finestra di contesto lunga fino a 128K token."
|
762
|
+
},
|
763
|
+
"qwen-coder-turbo-latest": {
|
764
|
+
"description": "Modello di codice Tongyi Qwen."
|
765
|
+
},
|
680
766
|
"qwen-long": {
|
681
767
|
"description": "Qwen è un modello di linguaggio su larga scala che supporta contesti di testo lunghi e funzionalità di dialogo basate su documenti lunghi e multipli."
|
682
768
|
},
|
769
|
+
"qwen-math-plus-latest": {
|
770
|
+
"description": "Il modello matematico Tongyi Qwen è progettato specificamente per la risoluzione di problemi matematici."
|
771
|
+
},
|
772
|
+
"qwen-math-turbo-latest": {
|
773
|
+
"description": "Il modello matematico Tongyi Qwen è progettato specificamente per la risoluzione di problemi matematici."
|
774
|
+
},
|
775
|
+
"qwen-max-latest": {
|
776
|
+
"description": "Modello linguistico su larga scala Tongyi Qwen con miliardi di parametri, supporta input in diverse lingue tra cui cinese e inglese, attualmente il modello API dietro la versione del prodotto Tongyi Qwen 2.5."
|
777
|
+
},
|
778
|
+
"qwen-plus-latest": {
|
779
|
+
"description": "Versione potenziata del modello linguistico su larga scala Tongyi Qwen, supporta input in diverse lingue tra cui cinese e inglese."
|
780
|
+
},
|
781
|
+
"qwen-turbo-latest": {
|
782
|
+
"description": "Il modello linguistico su larga scala Tongyi Qwen, supporta input in diverse lingue tra cui cinese e inglese."
|
783
|
+
},
|
683
784
|
"qwen-vl-chat-v1": {
|
684
785
|
"description": "Qwen VL supporta modalità di interazione flessibili, inclusi modelli di domande e risposte multipli e creativi."
|
685
786
|
},
|
@@ -698,6 +799,33 @@
|
|
698
799
|
"qwen2": {
|
699
800
|
"description": "Qwen2 è la nuova generazione di modelli di linguaggio su larga scala di Alibaba, supporta prestazioni eccellenti per esigenze applicative diversificate."
|
700
801
|
},
|
802
|
+
"qwen2.5-14b-instruct": {
|
803
|
+
"description": "Modello da 14B di Tongyi Qwen 2.5, open source."
|
804
|
+
},
|
805
|
+
"qwen2.5-32b-instruct": {
|
806
|
+
"description": "Modello da 32B di Tongyi Qwen 2.5, open source."
|
807
|
+
},
|
808
|
+
"qwen2.5-72b-instruct": {
|
809
|
+
"description": "Modello da 72B di Tongyi Qwen 2.5, open source."
|
810
|
+
},
|
811
|
+
"qwen2.5-7b-instruct": {
|
812
|
+
"description": "Modello da 7B di Tongyi Qwen 2.5, open source."
|
813
|
+
},
|
814
|
+
"qwen2.5-coder-1.5b-instruct": {
|
815
|
+
"description": "Versione open source del modello di codice Tongyi Qwen."
|
816
|
+
},
|
817
|
+
"qwen2.5-coder-7b-instruct": {
|
818
|
+
"description": "Versione open source del modello di codice Tongyi Qwen."
|
819
|
+
},
|
820
|
+
"qwen2.5-math-1.5b-instruct": {
|
821
|
+
"description": "Il modello Qwen-Math ha potenti capacità di risoluzione di problemi matematici."
|
822
|
+
},
|
823
|
+
"qwen2.5-math-72b-instruct": {
|
824
|
+
"description": "Il modello Qwen-Math ha potenti capacità di risoluzione di problemi matematici."
|
825
|
+
},
|
826
|
+
"qwen2.5-math-7b-instruct": {
|
827
|
+
"description": "Il modello Qwen-Math ha potenti capacità di risoluzione di problemi matematici."
|
828
|
+
},
|
701
829
|
"qwen2:0.5b": {
|
702
830
|
"description": "Qwen2 è la nuova generazione di modelli di linguaggio su larga scala di Alibaba, supporta prestazioni eccellenti per esigenze applicative diversificate."
|
703
831
|
},
|
@@ -44,6 +44,27 @@
|
|
44
44
|
"NousResearch/Nous-Hermes-2-Yi-34B": {
|
45
45
|
"description": "Nous Hermes-2 Yi (34B) biedt geoptimaliseerde taaloutput en diverse toepassingsmogelijkheden."
|
46
46
|
},
|
47
|
+
"Phi-3-5-mini-instruct": {
|
48
|
+
"description": "Vernieuwing van het Phi-3-mini model."
|
49
|
+
},
|
50
|
+
"Phi-3-medium-128k-instruct": {
|
51
|
+
"description": "Hetzelfde Phi-3-medium model, maar met een grotere contextgrootte voor RAG of few shot prompting."
|
52
|
+
},
|
53
|
+
"Phi-3-medium-4k-instruct": {
|
54
|
+
"description": "Een model met 14 miljard parameters, biedt betere kwaliteit dan Phi-3-mini, met een focus op hoogwaardige, redeneringsdichte gegevens."
|
55
|
+
},
|
56
|
+
"Phi-3-mini-128k-instruct": {
|
57
|
+
"description": "Hetzelfde Phi-3-mini model, maar met een grotere contextgrootte voor RAG of few shot prompting."
|
58
|
+
},
|
59
|
+
"Phi-3-mini-4k-instruct": {
|
60
|
+
"description": "De kleinste lid van de Phi-3 familie. Geoptimaliseerd voor zowel kwaliteit als lage latentie."
|
61
|
+
},
|
62
|
+
"Phi-3-small-128k-instruct": {
|
63
|
+
"description": "Hetzelfde Phi-3-small model, maar met een grotere contextgrootte voor RAG of few shot prompting."
|
64
|
+
},
|
65
|
+
"Phi-3-small-8k-instruct": {
|
66
|
+
"description": "Een model met 7 miljard parameters, biedt betere kwaliteit dan Phi-3-mini, met een focus op hoogwaardige, redeneringsdichte gegevens."
|
67
|
+
},
|
47
68
|
"Pro-128k": {
|
48
69
|
"description": "Spark Pro-128K is uitgerust met een enorme contextverwerkingscapaciteit, in staat om tot 128K contextinformatie te verwerken, bijzonder geschikt voor lange teksten die volledige analyse en langdurige logische verbanden vereisen, en biedt vloeiende en consistente logica met diverse referenties in complexe tekstcommunicatie."
|
49
70
|
},
|
@@ -56,6 +77,24 @@
|
|
56
77
|
"Qwen/Qwen2-72B-Instruct": {
|
57
78
|
"description": "Qwen2 is een geavanceerd algemeen taalmodel dat verschillende soorten instructies ondersteunt."
|
58
79
|
},
|
80
|
+
"Qwen/Qwen2.5-14B-Instruct": {
|
81
|
+
"description": "Qwen2.5 is een geheel nieuwe serie van grote taalmodellen, ontworpen om de verwerking van instructietaken te optimaliseren."
|
82
|
+
},
|
83
|
+
"Qwen/Qwen2.5-32B-Instruct": {
|
84
|
+
"description": "Qwen2.5 is een geheel nieuwe serie van grote taalmodellen, ontworpen om de verwerking van instructietaken te optimaliseren."
|
85
|
+
},
|
86
|
+
"Qwen/Qwen2.5-72B-Instruct": {
|
87
|
+
"description": "Qwen2.5 is een geheel nieuwe serie van grote taalmodellen, met sterkere begrip- en generatiecapaciteiten."
|
88
|
+
},
|
89
|
+
"Qwen/Qwen2.5-7B-Instruct": {
|
90
|
+
"description": "Qwen2.5 is een geheel nieuwe serie van grote taalmodellen, ontworpen om de verwerking van instructietaken te optimaliseren."
|
91
|
+
},
|
92
|
+
"Qwen/Qwen2.5-Coder-7B-Instruct": {
|
93
|
+
"description": "Qwen2.5-Coder richt zich op het schrijven van code."
|
94
|
+
},
|
95
|
+
"Qwen/Qwen2.5-Math-72B-Instruct": {
|
96
|
+
"description": "Qwen2.5-Math richt zich op het oplossen van wiskundige vraagstukken en biedt professionele antwoorden op moeilijke vragen."
|
97
|
+
},
|
59
98
|
"THUDM/glm-4-9b-chat": {
|
60
99
|
"description": "GLM-4 9B is de open-source versie die een geoptimaliseerde gesprekservaring biedt voor gespreksapplicaties."
|
61
100
|
},
|
@@ -131,6 +170,15 @@
|
|
131
170
|
"accounts/yi-01-ai/models/yi-large": {
|
132
171
|
"description": "Yi-Large model, met uitstekende meertalige verwerkingscapaciteiten, geschikt voor verschillende taalgeneratie- en begripstaken."
|
133
172
|
},
|
173
|
+
"ai21-jamba-1.5-large": {
|
174
|
+
"description": "Een meertalig model met 398 miljard parameters (94 miljard actief), biedt een contextvenster van 256K, functieaanroep, gestructureerde output en gegronde generatie."
|
175
|
+
},
|
176
|
+
"ai21-jamba-1.5-mini": {
|
177
|
+
"description": "Een meertalig model met 52 miljard parameters (12 miljard actief), biedt een contextvenster van 256K, functieaanroep, gestructureerde output en gegronde generatie."
|
178
|
+
},
|
179
|
+
"ai21-jamba-instruct": {
|
180
|
+
"description": "Een productieklare Mamba-gebaseerde LLM-model om de beste prestaties, kwaliteit en kostenefficiëntie te bereiken."
|
181
|
+
},
|
134
182
|
"anthropic.claude-3-5-sonnet-20240620-v1:0": {
|
135
183
|
"description": "Claude 3.5 Sonnet heeft de industrienormen verbeterd, met prestaties die de concurrentiemodellen en Claude 3 Opus overtreffen, en presteert uitstekend in brede evaluaties, met de snelheid en kosten van ons gemiddelde model."
|
136
184
|
},
|
@@ -227,6 +275,12 @@
|
|
227
275
|
"cognitivecomputations/dolphin-mixtral-8x22b": {
|
228
276
|
"description": "Dolphin Mixtral 8x22B is een model ontworpen voor instructievolging, gesprekken en programmeren."
|
229
277
|
},
|
278
|
+
"cohere-command-r": {
|
279
|
+
"description": "Command R is een schaalbaar generatief model gericht op RAG en Tool Use om productie-schaal AI voor ondernemingen mogelijk te maken."
|
280
|
+
},
|
281
|
+
"cohere-command-r-plus": {
|
282
|
+
"description": "Command R+ is een state-of-the-art RAG-geoptimaliseerd model ontworpen om enterprise-grade workloads aan te pakken."
|
283
|
+
},
|
230
284
|
"command-r": {
|
231
285
|
"description": "Command R is geoptimaliseerd voor conversatie- en lange contexttaken, bijzonder geschikt voor dynamische interactie en kennisbeheer."
|
232
286
|
},
|
@@ -434,6 +488,8 @@
|
|
434
488
|
"internlm/internlm2_5-7b-chat": {
|
435
489
|
"description": "InternLM2.5 biedt intelligente gespreksoplossingen voor meerdere scenario's."
|
436
490
|
},
|
491
|
+
"jamba-1.5-large": {},
|
492
|
+
"jamba-1.5-mini": {},
|
437
493
|
"llama-3.1-70b-instruct": {
|
438
494
|
"description": "Llama 3.1 70B Instruct model, met 70B parameters, biedt uitstekende prestaties in grote tekstgeneratie- en instructietaken."
|
439
495
|
},
|
@@ -497,6 +553,21 @@
|
|
497
553
|
"mathstral": {
|
498
554
|
"description": "MathΣtral is ontworpen voor wetenschappelijk onderzoek en wiskundige inferentie, biedt effectieve rekencapaciteiten en resultaatinterpretatie."
|
499
555
|
},
|
556
|
+
"meta-llama-3-70b-instruct": {
|
557
|
+
"description": "Een krachtig model met 70 miljard parameters dat uitblinkt in redeneren, coderen en brede taaltoepassingen."
|
558
|
+
},
|
559
|
+
"meta-llama-3-8b-instruct": {
|
560
|
+
"description": "Een veelzijdig model met 8 miljard parameters, geoptimaliseerd voor dialoog- en tekstgeneratietaken."
|
561
|
+
},
|
562
|
+
"meta-llama-3.1-405b-instruct": {
|
563
|
+
"description": "De Llama 3.1 instructie-geoptimaliseerde tekstmodellen zijn geoptimaliseerd voor meertalige dialoogtoepassingen en presteren beter dan veel beschikbare open source en gesloten chatmodellen op gangbare industriële benchmarks."
|
564
|
+
},
|
565
|
+
"meta-llama-3.1-70b-instruct": {
|
566
|
+
"description": "De Llama 3.1 instructie-geoptimaliseerde tekstmodellen zijn geoptimaliseerd voor meertalige dialoogtoepassingen en presteren beter dan veel beschikbare open source en gesloten chatmodellen op gangbare industriële benchmarks."
|
567
|
+
},
|
568
|
+
"meta-llama-3.1-8b-instruct": {
|
569
|
+
"description": "De Llama 3.1 instructie-geoptimaliseerde tekstmodellen zijn geoptimaliseerd voor meertalige dialoogtoepassingen en presteren beter dan veel beschikbare open source en gesloten chatmodellen op gangbare industriële benchmarks."
|
570
|
+
},
|
500
571
|
"meta-llama/Llama-2-13b-chat-hf": {
|
501
572
|
"description": "LLaMA-2 Chat (13B) biedt uitstekende taalverwerkingscapaciteiten en een geweldige interactie-ervaring."
|
502
573
|
},
|
@@ -584,12 +655,21 @@
|
|
584
655
|
"mistral-large": {
|
585
656
|
"description": "Mixtral Large is het vlaggenschipmodel van Mistral, dat de capaciteiten van codegeneratie, wiskunde en inferentie combineert, ondersteunt een contextvenster van 128k."
|
586
657
|
},
|
658
|
+
"mistral-large-2407": {
|
659
|
+
"description": "Mistral Large (2407) is een geavanceerd Large Language Model (LLM) met state-of-the-art redenerings-, kennis- en coderingscapaciteiten."
|
660
|
+
},
|
587
661
|
"mistral-large-latest": {
|
588
662
|
"description": "Mistral Large is het vlaggenschipmodel, dat uitblinkt in meertalige taken, complexe inferentie en codegeneratie, ideaal voor high-end toepassingen."
|
589
663
|
},
|
590
664
|
"mistral-nemo": {
|
591
665
|
"description": "Mistral Nemo is een 12B-model dat is ontwikkeld in samenwerking met Mistral AI en NVIDIA, biedt efficiënte prestaties."
|
592
666
|
},
|
667
|
+
"mistral-small": {
|
668
|
+
"description": "Mistral Small kan worden gebruikt voor elke taalkundige taak die hoge efficiëntie en lage latentie vereist."
|
669
|
+
},
|
670
|
+
"mistral-small-latest": {
|
671
|
+
"description": "Mistral Small is een kosteneffectieve, snelle en betrouwbare optie voor gebruikscases zoals vertaling, samenvatting en sentimentanalyse."
|
672
|
+
},
|
593
673
|
"mistralai/Mistral-7B-Instruct-v0.1": {
|
594
674
|
"description": "Mistral (7B) Instruct staat bekend om zijn hoge prestaties en is geschikt voor verschillende taalgerelateerde taken."
|
595
675
|
},
|
@@ -677,9 +757,30 @@
|
|
677
757
|
"phi3:14b": {
|
678
758
|
"description": "Phi-3 is een lichtgewicht open model van Microsoft, geschikt voor efficiënte integratie en grootschalige kennisinferentie."
|
679
759
|
},
|
760
|
+
"pixtral-12b-2409": {
|
761
|
+
"description": "Het Pixtral model toont sterke capaciteiten in taken zoals grafiek- en beeldbegrip, documentvraag-en-antwoord, multimodale redenering en instructievolging, en kan afbeeldingen met natuurlijke resolutie en beeldverhouding verwerken, evenals een onbeperkt aantal afbeeldingen in een lange contextvenster van maximaal 128K tokens."
|
762
|
+
},
|
763
|
+
"qwen-coder-turbo-latest": {
|
764
|
+
"description": "Het Tongyi Qianwen codeermodel."
|
765
|
+
},
|
680
766
|
"qwen-long": {
|
681
767
|
"description": "Qwen is een grootschalig taalmodel dat lange tekstcontexten ondersteunt, evenals dialoogfunctionaliteit op basis van lange documenten en meerdere documenten."
|
682
768
|
},
|
769
|
+
"qwen-math-plus-latest": {
|
770
|
+
"description": "Het Tongyi Qianwen wiskundemodel is speciaal ontworpen voor het oplossen van wiskundige problemen."
|
771
|
+
},
|
772
|
+
"qwen-math-turbo-latest": {
|
773
|
+
"description": "Het Tongyi Qianwen wiskundemodel is speciaal ontworpen voor het oplossen van wiskundige problemen."
|
774
|
+
},
|
775
|
+
"qwen-max-latest": {
|
776
|
+
"description": "Het Tongyi Qianwen model met een schaal van honderden miljarden, ondersteunt invoer in verschillende talen, waaronder Chinees en Engels, en is de API-model achter de huidige Tongyi Qianwen 2.5 productversie."
|
777
|
+
},
|
778
|
+
"qwen-plus-latest": {
|
779
|
+
"description": "De verbeterde versie van het Tongyi Qianwen supergrote taalmodel ondersteunt invoer in verschillende talen, waaronder Chinees en Engels."
|
780
|
+
},
|
781
|
+
"qwen-turbo-latest": {
|
782
|
+
"description": "De Tongyi Qianwen supergrote taalmodel ondersteunt invoer in verschillende talen, waaronder Chinees en Engels."
|
783
|
+
},
|
683
784
|
"qwen-vl-chat-v1": {
|
684
785
|
"description": "Qwen VL ondersteunt flexibele interactiemethoden, inclusief meerdere afbeeldingen, meerdere rondes van vraag en antwoord, en creatiecapaciteiten."
|
685
786
|
},
|
@@ -698,6 +799,33 @@
|
|
698
799
|
"qwen2": {
|
699
800
|
"description": "Qwen2 is Alibaba's nieuwe generatie grootschalig taalmodel, ondersteunt diverse toepassingsbehoeften met uitstekende prestaties."
|
700
801
|
},
|
802
|
+
"qwen2.5-14b-instruct": {
|
803
|
+
"description": "Het 14B model van Tongyi Qianwen 2.5 is open source beschikbaar."
|
804
|
+
},
|
805
|
+
"qwen2.5-32b-instruct": {
|
806
|
+
"description": "Het 32B model van Tongyi Qianwen 2.5 is open source beschikbaar."
|
807
|
+
},
|
808
|
+
"qwen2.5-72b-instruct": {
|
809
|
+
"description": "Het 72B model van Tongyi Qianwen 2.5 is open source beschikbaar."
|
810
|
+
},
|
811
|
+
"qwen2.5-7b-instruct": {
|
812
|
+
"description": "Het 7B model van Tongyi Qianwen 2.5 is open source beschikbaar."
|
813
|
+
},
|
814
|
+
"qwen2.5-coder-1.5b-instruct": {
|
815
|
+
"description": "De open source versie van het Tongyi Qianwen codeermodel."
|
816
|
+
},
|
817
|
+
"qwen2.5-coder-7b-instruct": {
|
818
|
+
"description": "De open source versie van het Tongyi Qianwen codeermodel."
|
819
|
+
},
|
820
|
+
"qwen2.5-math-1.5b-instruct": {
|
821
|
+
"description": "Het Qwen-Math model heeft krachtige capaciteiten voor het oplossen van wiskundige problemen."
|
822
|
+
},
|
823
|
+
"qwen2.5-math-72b-instruct": {
|
824
|
+
"description": "Het Qwen-Math model heeft krachtige capaciteiten voor het oplossen van wiskundige problemen."
|
825
|
+
},
|
826
|
+
"qwen2.5-math-7b-instruct": {
|
827
|
+
"description": "Het Qwen-Math model heeft krachtige capaciteiten voor het oplossen van wiskundige problemen."
|
828
|
+
},
|
701
829
|
"qwen2:0.5b": {
|
702
830
|
"description": "Qwen2 is Alibaba's nieuwe generatie grootschalig taalmodel, ondersteunt diverse toepassingsbehoeften met uitstekende prestaties."
|
703
831
|
},
|
@@ -1,4 +1,5 @@
|
|
1
1
|
{
|
2
|
+
"ai21": {},
|
2
3
|
"ai360": {
|
3
4
|
"description": "360 AI is een AI-model- en serviceplatform gelanceerd door het bedrijf 360, dat verschillende geavanceerde modellen voor natuurlijke taalverwerking biedt, waaronder 360GPT2 Pro, 360GPT Pro, 360GPT Turbo en 360GPT Turbo Responsibility 8K. Deze modellen combineren grootschalige parameters en multimodale capaciteiten, en worden breed toegepast in tekstgeneratie, semantisch begrip, dialoogsystemen en codegeneratie. Met flexibele prijsstrategieën voldoet 360 AI aan diverse gebruikersbehoeften, ondersteunt het ontwikkelaars bij integratie en bevordert het de innovatie en ontwikkeling van intelligente toepassingen."
|
4
5
|
},
|
@@ -20,6 +21,9 @@
|
|
20
21
|
"fireworksai": {
|
21
22
|
"description": "Fireworks AI is een toonaangevende aanbieder van geavanceerde taalmodellen, met een focus op functionele aanroepen en multimodale verwerking. Hun nieuwste model Firefunction V2 is gebaseerd op Llama-3 en geoptimaliseerd voor functieaanroepen, dialogen en het volgen van instructies. Het visuele taalmodel FireLLaVA-13B ondersteunt gemengde invoer van afbeeldingen en tekst. Andere opmerkelijke modellen zijn de Llama-serie en de Mixtral-serie, die efficiënte ondersteuning bieden voor meertalig volgen van instructies en genereren."
|
22
23
|
},
|
24
|
+
"github": {
|
25
|
+
"description": "Met GitHub-modellen kunnen ontwikkelaars AI-ingenieurs worden en bouwen met de toonaangevende AI-modellen in de industrie."
|
26
|
+
},
|
23
27
|
"google": {
|
24
28
|
"description": "De Gemini-serie van Google is hun meest geavanceerde, algemene AI-modellen, ontwikkeld door Google DeepMind, speciaal ontworpen voor multimodale toepassingen, en ondersteunt naadloze begrip en verwerking van tekst, code, afbeeldingen, audio en video. Geschikt voor verschillende omgevingen, van datacenters tot mobiele apparaten, verhoogt het de efficiëntie en toepasbaarheid van AI-modellen aanzienlijk."
|
25
29
|
},
|
@@ -1,4 +1,5 @@
|
|
1
1
|
{
|
2
|
+
"ai21": {},
|
2
3
|
"ai360": {
|
3
4
|
"description": "360 AI to platforma modeli i usług AI wprowadzona przez firmę 360, oferująca różnorodne zaawansowane modele przetwarzania języka naturalnego, w tym 360GPT2 Pro, 360GPT Pro, 360GPT Turbo i 360GPT Turbo Responsibility 8K. Modele te łączą dużą liczbę parametrów z multimodalnymi zdolnościami, szeroko stosowanymi w generowaniu tekstu, rozumieniu semantycznym, systemach dialogowych i generowaniu kodu. Dzięki elastycznej strategii cenowej, 360 AI zaspokaja zróżnicowane potrzeby użytkowników, wspierając integrację przez deweloperów, co przyczynia się do innowacji i rozwoju aplikacji inteligentnych."
|
4
5
|
},
|
@@ -20,6 +21,9 @@
|
|
20
21
|
"fireworksai": {
|
21
22
|
"description": "Fireworks AI to wiodący dostawca zaawansowanych modeli językowych, skoncentrowany na wywołaniach funkcji i przetwarzaniu multimodalnym. Jego najnowszy model Firefunction V2 oparty na Llama-3, zoptymalizowany do wywołań funkcji, dialogów i przestrzegania instrukcji. Model wizualny FireLLaVA-13B wspiera mieszane wejścia obrazów i tekstu. Inne znaczące modele to seria Llama i seria Mixtral, oferujące efektywne wsparcie dla wielojęzycznego przestrzegania instrukcji i generacji."
|
22
23
|
},
|
24
|
+
"github": {
|
25
|
+
"description": "Dzięki modelom GitHub, deweloperzy mogą stać się inżynierami AI i budować z wykorzystaniem wiodących modeli AI w branży."
|
26
|
+
},
|
23
27
|
"google": {
|
24
28
|
"description": "Seria Gemini od Google to najnowocześniejsze, uniwersalne modele AI stworzone przez Google DeepMind, zaprojektowane z myślą o multimodalności, wspierające bezproblemowe rozumienie i przetwarzanie tekstu, kodu, obrazów, dźwięku i wideo. Nadają się do różnych środowisk, od centrów danych po urządzenia mobilne, znacznie zwiększając wydajność i wszechstronność modeli AI."
|
25
29
|
},
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.19.
|
3
|
+
"version": "1.19.16",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
package/src/libs/unstructured/__tests__/fixtures/table-parse/auto-partition-basic-output.json
CHANGED
@@ -12,14 +12,27 @@
|
|
12
12
|
}
|
13
13
|
},
|
14
14
|
{
|
15
|
-
"type": "Table",
|
16
15
|
"element_id": "6ac9e7aa-6618-4cae-8e78-a4ec6fd73cf5",
|
17
16
|
"metadata": {
|
17
|
+
"coordinates": {
|
18
|
+
"layout_height": 2205,
|
19
|
+
"layout_width": 1654,
|
20
|
+
"points": [
|
21
|
+
[67.7, 232.3],
|
22
|
+
[67.7, 1326.8],
|
23
|
+
[1541.1, 1326.8],
|
24
|
+
[1541.1, 232.3]
|
25
|
+
],
|
26
|
+
"system": "PixelSpace"
|
27
|
+
},
|
28
|
+
"detection_class_prob": 0.95674,
|
29
|
+
"filetype": "PPM",
|
18
30
|
"languages": ["eng"],
|
19
31
|
"page_number": 1,
|
20
32
|
"text_as_html": "<table><thead><tr><th></th><th colspan=\"3\">Male</th><th colspan=\"3\">Female</th><th colspan=\"2\">Overall</th><th rowspan=\"2\">p-value</th></tr><tr><th></th><th>MS subjects</th><th>Control \nsubjects</th><th></th><th>p-value MS subjects Control \nsubjects</th><th></th><th>p-value</th><th>MS \nsubjects</th><th>Control \nsubjects</th></tr></thead><tbody><tr><td>N</td><td>3685</td><td>21,931</td><td></td><td>8566</td><td>50,640</td><td></td><td>12,251</td><td>72,572</td><td></td></tr><tr><td>Follow-up time (years)</td><td>9.9 (6.1)</td><td>11.4(6.5)</td><td><0.001</td><td>10.4 (6.3)</td><td>11.5(6.5)</td><td><0.001</td><td>10.3(6.3)</td><td>11.5(6.5)</td><td><0.001</td></tr><tr><td>Female (%)</td><td></td><td></td><td></td><td></td><td></td><td></td><td>69.9</td><td>69.8</td><td>0.752</td></tr><tr><td>Age (years)</td><td>46.3 (13.3)</td><td>46.3(13.3)</td><td>0.852</td><td>44.3(13.3)</td><td>44.3(13.3)</td><td>0.907</td><td>44.9(13.3)</td><td>44.9(13.3)</td><td>0.727</td></tr><tr><td>Ethnicity — white (%)</td><td>923</td><td>93.5</td><td>0.013</td><td>915</td><td>94.1</td><td><0.001</td><td>93.9</td><td>91.2</td><td><0.00</td></tr><tr><td colspan=\"10\">Smoking status (%)</td></tr><tr><td>Non-smoker</td><td>415</td><td>53.8</td><td></td><td>49.5</td><td>60.0</td><td></td><td>47.1</td><td>58.1</td><td></td></tr><tr><td>Ex-smoker</td><td>17.5</td><td>14.7</td><td><0.001</td><td>13.9</td><td>11.6</td><td><0.001</td><td>15</td><td>12.5</td><td><0.00</td></tr><tr><td>Current smoker</td><td>41.1</td><td>31.6</td><td></td><td>36.6</td><td>28.5</td><td></td><td>379</td><td>294</td><td></td></tr><tr><td>eFI ratio</td><td>0.02 (0.04)</td><td>0.01 (0.03)</td><td><0.001</td><td>0.03 (0.04)</td><td>0.02 (0.04)</td><td><0.001</td><td>0.03 (0.04)</td><td>0.02 (0.04)</td><td><0.00</td></tr><tr><td>Fit</td><td>97.2</td><td>98.3</td><td></td><td>95.8</td><td>97.1</td><td></td><td>96.2</td><td>97.4</td><td></td></tr><tr><td>Mid frailty</td><td>2.8</td><td>1.6</td><td></td><td>4.0</td><td>2.8</td><td></td><td>3.7</td><td>2.5</td><td></td></tr><tr><td>Moderate frailty</td><td>0.0</td><td>0.0</td><td><0.001</td><td>0.2</td><td>0.1</td><td><0.001</td><td>0.1</td><td>0.1</td><td><0.00</td></tr><tr><td>Severe frailty</td><td>0.0</td><td>0.0</td><td></td><td>0.0</td><td>0.0</td><td></td><td>0.0</td><td>0.0</td><td></td></tr><tr><td>Number of primary care visits in previous year</td><td>6.9 (10.3)</td><td>22(5.0)</td><td><0.001</td><td>82(11.5)</td><td>32(6.1)</td><td><0.001</td><td>78(112)</td><td>29(5.9)</td><td><0.00</td></tr><tr><td colspan=\"10\">Index of multiple deprivation (IMD; %)</td></tr><tr><td>1Q — least deprived</td><td>13.7</td><td>13.7</td><td></td><td>14.6</td><td>14.6</td><td></td><td>144</td><td>144</td><td></td></tr><tr><td>2Q</td><td>18.5</td><td>18.4</td><td></td><td>18.5</td><td>18.5</td><td></td><td>18.5</td><td>18.5</td><td></td></tr><tr><td>3Q</td><td>17.6</td><td>17.6</td><td></td><td>17.9</td><td>17.9</td><td></td><td>17.8</td><td>17.8</td><td></td></tr><tr><td>4Q</td><td>20.2</td><td>203</td><td>1.000</td><td>18.8</td><td>18.8</td><td>1.000</td><td>19.2</td><td>19.2</td><td>1.000</td></tr><tr><td>5Q — most deprived</td><td>20.6</td><td>20.6</td><td></td><td>20.3</td><td>20.3</td><td></td><td>204</td><td>204</td><td></td></tr><tr><td>Missing data</td><td>9.3</td><td>9.4</td><td></td><td>9.8</td><td>9.9</td><td></td><td>9.7</td><td>9.7</td><td></td></tr></tbody></table>"
|
21
33
|
},
|
22
|
-
"text": "Male Female Overall MS subjects Control subjects p-value MS subjects Control subjects p-value MS subjects Control subjects p-value N Follow-up time (years) Female (%) Age (years) Ethnicity – white (%) Smoking status (%) Non-smoker Ex-smoker Current smoker eFI ratio Fit Mid frailty Moderate frailty Severe frailty Number of primary care visits in previous year Index of multiple deprivation (IMD; %) 3685 9.9 (6.1) 46.3 (13.3) 92.3 41.5 17.5 41.1 0.02 (0.04) 97.2 2.8 0.0 0.0 6.9 (10.3) 21,931 11.4 (6.5) <0.001 46.3 (13.3) 93.5 0.852 0.013 53.8 14.7 31.6 0.01 (0.03) <0.001 98.3 1.6 0.0 0.0 2.2 (5.0) <0.001 <0.001 <0.001 8566 10.4 (6.3) 44.3 (13.3) 91.5 49.5 13.9 36.6 0.03 (0.04) 95.8 4.0 0.2 0.0 8.2 (11.5) 50,640 11.5 (6.5) <0.001 44.3 (13.3) 94.1 0.907 <0.001 60.0 11.6 28.5 0.02 (0.04) <0.001 97.1 2.8 0.1 0.0 3.2 (6.1) <0.001 <0.001 <0.001 12,251 10.3 (6.3) 69.9 44.9 (13.3) 93.9 47.1 15 37.9 0.03 (0.04) 96.2 3.7 0.1 0.0 7.8 (11.2) 72,572 11.5 (6.5) <0.001 69.8 44.9 (13.3) 91.2 0.752 0.727 <0.001 58.1 12.5 29.4 0.02 (0.04) <0.001 97.4 2.5 0.1 0.0 2.9 (5.9) <0.001 <0.001 <0.001 1Q – least deprived 13.7 13.7 14.6 14.6 14.4 14.4 2Q 3Q 4Q 5Q – most deprived Missing data 18.5 17.6 20.2 20.6 9.3 18.4 17.6 20.3 20.6 9.4 1.000 18.5 17.9 18.8 20.3 9.8 18.5 17.9 18.8 20.3 9.9 1.000 18.5 17.8 19.2 20.4 9.7 18.5 17.8 19.2 20.4 9.7 1.000"
|
34
|
+
"text": "Male Female Overall MS subjects Control subjects p-value MS subjects Control subjects p-value MS subjects Control subjects p-value N Follow-up time (years) Female (%) Age (years) Ethnicity – white (%) Smoking status (%) Non-smoker Ex-smoker Current smoker eFI ratio Fit Mid frailty Moderate frailty Severe frailty Number of primary care visits in previous year Index of multiple deprivation (IMD; %) 3685 9.9 (6.1) 46.3 (13.3) 92.3 41.5 17.5 41.1 0.02 (0.04) 97.2 2.8 0.0 0.0 6.9 (10.3) 21,931 11.4 (6.5) <0.001 46.3 (13.3) 93.5 0.852 0.013 53.8 14.7 31.6 0.01 (0.03) <0.001 98.3 1.6 0.0 0.0 2.2 (5.0) <0.001 <0.001 <0.001 8566 10.4 (6.3) 44.3 (13.3) 91.5 49.5 13.9 36.6 0.03 (0.04) 95.8 4.0 0.2 0.0 8.2 (11.5) 50,640 11.5 (6.5) <0.001 44.3 (13.3) 94.1 0.907 <0.001 60.0 11.6 28.5 0.02 (0.04) <0.001 97.1 2.8 0.1 0.0 3.2 (6.1) <0.001 <0.001 <0.001 12,251 10.3 (6.3) 69.9 44.9 (13.3) 93.9 47.1 15 37.9 0.03 (0.04) 96.2 3.7 0.1 0.0 7.8 (11.2) 72,572 11.5 (6.5) <0.001 69.8 44.9 (13.3) 91.2 0.752 0.727 <0.001 58.1 12.5 29.4 0.02 (0.04) <0.001 97.4 2.5 0.1 0.0 2.9 (5.9) <0.001 <0.001 <0.001 1Q – least deprived 13.7 13.7 14.6 14.6 14.4 14.4 2Q 3Q 4Q 5Q – most deprived Missing data 18.5 17.6 20.2 20.6 9.3 18.4 17.6 20.3 20.6 9.4 1.000 18.5 17.9 18.8 20.3 9.8 18.5 17.9 18.8 20.3 9.9 1.000 18.5 17.8 19.2 20.4 9.7 18.5 17.8 19.2 20.4 9.7 1.000",
|
35
|
+
"type": "Table"
|
23
36
|
},
|
24
37
|
{
|
25
38
|
"type": "CompositeElement",
|
@@ -31,39 +44,6 @@
|
|
31
44
|
"page_number": 1,
|
32
45
|
"filename": "table-parse.pdf"
|
33
46
|
}
|
34
|
-
},
|
35
|
-
{
|
36
|
-
"type": "CompositeElement",
|
37
|
-
"element_id": "875d46a9-6106-40d2-b891-b803d0bb87ac",
|
38
|
-
"text": "Hyperlipidaemia. Treatment with lipid-lowering medications was lower in PwMS, as compared with matched controls (PR = 0.63, 95% CI = 0.54, 0.74). This was particularly pronounced for men (women: PR = 0.71, 95% CI = 0.59, 0.87; men: PR = 0.41, 95% CI = 0.37, 0.62).\n\nDifferences in risk factor severity\n\nAs compared with matched controls, after adjustment, PwMS had a 0.4-mm Hg lower systolic blood",
|
39
|
-
"metadata": {
|
40
|
-
"filetype": "application/pdf",
|
41
|
-
"languages": ["eng"],
|
42
|
-
"page_number": 1,
|
43
|
-
"filename": "table-parse.pdf"
|
44
|
-
}
|
45
|
-
},
|
46
|
-
{
|
47
|
-
"type": "CompositeElement",
|
48
|
-
"element_id": "2990794c-e50d-4ed2-b3b3-b76a2fc76693",
|
49
|
-
"text": "pressure at the index year. The magnitude was greater for men than women, considering that men had almost a 3-mm Hg lower blood pressure than matched con- trols (overall: coeff. = −0.37, 95% CI = −0.60, −0.13; women: −0.54, 95% CI = −0.96, −0.12; men: −2.81, 95% CI = −3.84, −1.77). The differences were greater when restricting analyses to only those with a diagno- sis of hypertension at baseline, as PwMS had a 3.3- mm Hg lower systolic blood pressure then matched controls (coeff. = −3.27, 95% CI = −5.04, −1.50); dif- ferences were confirmed in women but not in men (women: coeff. = −2.56, 95% CI = −3.84, −1.27; men: −0.27, 95% CI = 0.01, −2.56).",
|
50
|
-
"metadata": {
|
51
|
-
"filetype": "application/pdf",
|
52
|
-
"languages": ["eng"],
|
53
|
-
"page_number": 1,
|
54
|
-
"filename": "table-parse.pdf"
|
55
|
-
}
|
56
|
-
},
|
57
|
-
{
|
58
|
-
"type": "CompositeElement",
|
59
|
-
"element_id": "9e6198b1-08c2-4e33-91af-8a3361e02925",
|
60
|
-
"text": "In contrast, PwMS had higher levels of diastolic blood pressure at baseline, as compared with matched controls (coeff. = 0.29, 95% CI = 0.14, 0.43). However,\n\n674\n\njournals.sagepub.com/home/msj",
|
61
|
-
"metadata": {
|
62
|
-
"filetype": "application/pdf",
|
63
|
-
"languages": ["eng"],
|
64
|
-
"page_number": 1,
|
65
|
-
"filename": "table-parse.pdf"
|
66
|
-
}
|
67
47
|
}
|
68
48
|
],
|
69
49
|
"originElements": [
|
@@ -114,7 +94,6 @@
|
|
114
94
|
"compositeId": "32855cf6-5605-4a8e-97a3-3ac0b509b725"
|
115
95
|
},
|
116
96
|
{
|
117
|
-
"type": "Table",
|
118
97
|
"element_id": "6ac9e7aa-6618-4cae-8e78-a4ec6fd73cf5",
|
119
98
|
"metadata": {
|
120
99
|
"coordinates": {
|
@@ -134,7 +113,8 @@
|
|
134
113
|
"page_number": 1,
|
135
114
|
"text_as_html": "<table><thead><tr><th></th><th colspan=\"3\">Male</th><th colspan=\"3\">Female</th><th colspan=\"2\">Overall</th><th rowspan=\"2\">p-value</th></tr><tr><th></th><th>MS subjects</th><th>Control \nsubjects</th><th></th><th>p-value MS subjects Control \nsubjects</th><th></th><th>p-value</th><th>MS \nsubjects</th><th>Control \nsubjects</th></tr></thead><tbody><tr><td>N</td><td>3685</td><td>21,931</td><td></td><td>8566</td><td>50,640</td><td></td><td>12,251</td><td>72,572</td><td></td></tr><tr><td>Follow-up time (years)</td><td>9.9 (6.1)</td><td>11.4(6.5)</td><td><0.001</td><td>10.4 (6.3)</td><td>11.5(6.5)</td><td><0.001</td><td>10.3(6.3)</td><td>11.5(6.5)</td><td><0.001</td></tr><tr><td>Female (%)</td><td></td><td></td><td></td><td></td><td></td><td></td><td>69.9</td><td>69.8</td><td>0.752</td></tr><tr><td>Age (years)</td><td>46.3 (13.3)</td><td>46.3(13.3)</td><td>0.852</td><td>44.3(13.3)</td><td>44.3(13.3)</td><td>0.907</td><td>44.9(13.3)</td><td>44.9(13.3)</td><td>0.727</td></tr><tr><td>Ethnicity — white (%)</td><td>923</td><td>93.5</td><td>0.013</td><td>915</td><td>94.1</td><td><0.001</td><td>93.9</td><td>91.2</td><td><0.00</td></tr><tr><td colspan=\"10\">Smoking status (%)</td></tr><tr><td>Non-smoker</td><td>415</td><td>53.8</td><td></td><td>49.5</td><td>60.0</td><td></td><td>47.1</td><td>58.1</td><td></td></tr><tr><td>Ex-smoker</td><td>17.5</td><td>14.7</td><td><0.001</td><td>13.9</td><td>11.6</td><td><0.001</td><td>15</td><td>12.5</td><td><0.00</td></tr><tr><td>Current smoker</td><td>41.1</td><td>31.6</td><td></td><td>36.6</td><td>28.5</td><td></td><td>379</td><td>294</td><td></td></tr><tr><td>eFI ratio</td><td>0.02 (0.04)</td><td>0.01 (0.03)</td><td><0.001</td><td>0.03 (0.04)</td><td>0.02 (0.04)</td><td><0.001</td><td>0.03 (0.04)</td><td>0.02 (0.04)</td><td><0.00</td></tr><tr><td>Fit</td><td>97.2</td><td>98.3</td><td></td><td>95.8</td><td>97.1</td><td></td><td>96.2</td><td>97.4</td><td></td></tr><tr><td>Mid frailty</td><td>2.8</td><td>1.6</td><td></td><td>4.0</td><td>2.8</td><td></td><td>3.7</td><td>2.5</td><td></td></tr><tr><td>Moderate frailty</td><td>0.0</td><td>0.0</td><td><0.001</td><td>0.2</td><td>0.1</td><td><0.001</td><td>0.1</td><td>0.1</td><td><0.00</td></tr><tr><td>Severe frailty</td><td>0.0</td><td>0.0</td><td></td><td>0.0</td><td>0.0</td><td></td><td>0.0</td><td>0.0</td><td></td></tr><tr><td>Number of primary care visits in previous year</td><td>6.9 (10.3)</td><td>22(5.0)</td><td><0.001</td><td>82(11.5)</td><td>32(6.1)</td><td><0.001</td><td>78(112)</td><td>29(5.9)</td><td><0.00</td></tr><tr><td colspan=\"10\">Index of multiple deprivation (IMD; %)</td></tr><tr><td>1Q — least deprived</td><td>13.7</td><td>13.7</td><td></td><td>14.6</td><td>14.6</td><td></td><td>144</td><td>144</td><td></td></tr><tr><td>2Q</td><td>18.5</td><td>18.4</td><td></td><td>18.5</td><td>18.5</td><td></td><td>18.5</td><td>18.5</td><td></td></tr><tr><td>3Q</td><td>17.6</td><td>17.6</td><td></td><td>17.9</td><td>17.9</td><td></td><td>17.8</td><td>17.8</td><td></td></tr><tr><td>4Q</td><td>20.2</td><td>203</td><td>1.000</td><td>18.8</td><td>18.8</td><td>1.000</td><td>19.2</td><td>19.2</td><td>1.000</td></tr><tr><td>5Q — most deprived</td><td>20.6</td><td>20.6</td><td></td><td>20.3</td><td>20.3</td><td></td><td>204</td><td>204</td><td></td></tr><tr><td>Missing data</td><td>9.3</td><td>9.4</td><td></td><td>9.8</td><td>9.9</td><td></td><td>9.7</td><td>9.7</td><td></td></tr></tbody></table>"
|
136
115
|
},
|
137
|
-
"text": "Male Female Overall MS subjects Control subjects p-value MS subjects Control subjects p-value MS subjects Control subjects p-value N Follow-up time (years) Female (%) Age (years) Ethnicity – white (%) Smoking status (%) Non-smoker Ex-smoker Current smoker eFI ratio Fit Mid frailty Moderate frailty Severe frailty Number of primary care visits in previous year Index of multiple deprivation (IMD; %) 3685 9.9 (6.1) 46.3 (13.3) 92.3 41.5 17.5 41.1 0.02 (0.04) 97.2 2.8 0.0 0.0 6.9 (10.3) 21,931 11.4 (6.5) <0.001 46.3 (13.3) 93.5 0.852 0.013 53.8 14.7 31.6 0.01 (0.03) <0.001 98.3 1.6 0.0 0.0 2.2 (5.0) <0.001 <0.001 <0.001 8566 10.4 (6.3) 44.3 (13.3) 91.5 49.5 13.9 36.6 0.03 (0.04) 95.8 4.0 0.2 0.0 8.2 (11.5) 50,640 11.5 (6.5) <0.001 44.3 (13.3) 94.1 0.907 <0.001 60.0 11.6 28.5 0.02 (0.04) <0.001 97.1 2.8 0.1 0.0 3.2 (6.1) <0.001 <0.001 <0.001 12,251 10.3 (6.3) 69.9 44.9 (13.3) 93.9 47.1 15 37.9 0.03 (0.04) 96.2 3.7 0.1 0.0 7.8 (11.2) 72,572 11.5 (6.5) <0.001 69.8 44.9 (13.3) 91.2 0.752 0.727 <0.001 58.1 12.5 29.4 0.02 (0.04) <0.001 97.4 2.5 0.1 0.0 2.9 (5.9) <0.001 <0.001 <0.001 1Q – least deprived 13.7 13.7 14.6 14.6 14.4 14.4 2Q 3Q 4Q 5Q – most deprived Missing data 18.5 17.6 20.2 20.6 9.3 18.4 17.6 20.3 20.6 9.4 1.000 18.5 17.9 18.8 20.3 9.8 18.5 17.9 18.8 20.3 9.9 1.000 18.5 17.8 19.2 20.4 9.7 18.5 17.8 19.2 20.4 9.7 1.000"
|
116
|
+
"text": "Male Female Overall MS subjects Control subjects p-value MS subjects Control subjects p-value MS subjects Control subjects p-value N Follow-up time (years) Female (%) Age (years) Ethnicity – white (%) Smoking status (%) Non-smoker Ex-smoker Current smoker eFI ratio Fit Mid frailty Moderate frailty Severe frailty Number of primary care visits in previous year Index of multiple deprivation (IMD; %) 3685 9.9 (6.1) 46.3 (13.3) 92.3 41.5 17.5 41.1 0.02 (0.04) 97.2 2.8 0.0 0.0 6.9 (10.3) 21,931 11.4 (6.5) <0.001 46.3 (13.3) 93.5 0.852 0.013 53.8 14.7 31.6 0.01 (0.03) <0.001 98.3 1.6 0.0 0.0 2.2 (5.0) <0.001 <0.001 <0.001 8566 10.4 (6.3) 44.3 (13.3) 91.5 49.5 13.9 36.6 0.03 (0.04) 95.8 4.0 0.2 0.0 8.2 (11.5) 50,640 11.5 (6.5) <0.001 44.3 (13.3) 94.1 0.907 <0.001 60.0 11.6 28.5 0.02 (0.04) <0.001 97.1 2.8 0.1 0.0 3.2 (6.1) <0.001 <0.001 <0.001 12,251 10.3 (6.3) 69.9 44.9 (13.3) 93.9 47.1 15 37.9 0.03 (0.04) 96.2 3.7 0.1 0.0 7.8 (11.2) 72,572 11.5 (6.5) <0.001 69.8 44.9 (13.3) 91.2 0.752 0.727 <0.001 58.1 12.5 29.4 0.02 (0.04) <0.001 97.4 2.5 0.1 0.0 2.9 (5.9) <0.001 <0.001 <0.001 1Q – least deprived 13.7 13.7 14.6 14.6 14.4 14.4 2Q 3Q 4Q 5Q – most deprived Missing data 18.5 17.6 20.2 20.6 9.3 18.4 17.6 20.3 20.6 9.4 1.000 18.5 17.9 18.8 20.3 9.8 18.5 17.9 18.8 20.3 9.9 1.000 18.5 17.8 19.2 20.4 9.7 18.5 17.8 19.2 20.4 9.7 1.000",
|
117
|
+
"type": "Table"
|
138
118
|
},
|
139
119
|
{
|
140
120
|
"element_id": "d9f1985b-d5b5-462d-b7db-7c3554eb3d03",
|
@@ -181,167 +161,6 @@
|
|
181
161
|
"text": "was even lower in men (PR = 0.27, 95% CI = 0.18, 0.39) than in women (PR = 0.38, 95% CI = 0.30, 0.49; Figure 1), but the difference was not statistically sig- nificant (p = 0.097).",
|
182
162
|
"type": "NarrativeText",
|
183
163
|
"compositeId": "499fc9c7-95f4-412c-87e2-10fa1a2e950e"
|
184
|
-
},
|
185
|
-
{
|
186
|
-
"element_id": "a32d209f-a092-42c3-b6a5-3816df43bfdb",
|
187
|
-
"metadata": {
|
188
|
-
"coordinates": {
|
189
|
-
"layout_height": 2205,
|
190
|
-
"layout_width": 1654,
|
191
|
-
"points": [
|
192
|
-
[310.4, 1659.6],
|
193
|
-
[310.4, 1858.1],
|
194
|
-
[915.5, 1858.1],
|
195
|
-
[915.5, 1659.6]
|
196
|
-
],
|
197
|
-
"system": "PixelSpace"
|
198
|
-
},
|
199
|
-
"detection_class_prob": 0.94907,
|
200
|
-
"filetype": "PPM",
|
201
|
-
"languages": ["eng"],
|
202
|
-
"page_number": 1
|
203
|
-
},
|
204
|
-
"text": "Hyperlipidaemia. Treatment with lipid-lowering medications was lower in PwMS, as compared with matched controls (PR = 0.63, 95% CI = 0.54, 0.74). This was particularly pronounced for men (women: PR = 0.71, 95% CI = 0.59, 0.87; men: PR = 0.41, 95% CI = 0.37, 0.62).",
|
205
|
-
"type": "NarrativeText",
|
206
|
-
"compositeId": "875d46a9-6106-40d2-b891-b803d0bb87ac"
|
207
|
-
},
|
208
|
-
{
|
209
|
-
"element_id": "7a0f58ed-97e3-4822-a8f2-1e39d48321fa",
|
210
|
-
"metadata": {
|
211
|
-
"coordinates": {
|
212
|
-
"layout_height": 2205,
|
213
|
-
"layout_width": 1654,
|
214
|
-
"points": [
|
215
|
-
[311.8, 1930.6],
|
216
|
-
[311.8, 1964.6],
|
217
|
-
[690.9, 1964.6],
|
218
|
-
[690.9, 1930.6]
|
219
|
-
],
|
220
|
-
"system": "PixelSpace"
|
221
|
-
},
|
222
|
-
"detection_class_prob": 0.628,
|
223
|
-
"filetype": "PPM",
|
224
|
-
"languages": ["eng"],
|
225
|
-
"page_number": 1
|
226
|
-
},
|
227
|
-
"text": "Differences in risk factor severity",
|
228
|
-
"type": "NarrativeText",
|
229
|
-
"compositeId": "875d46a9-6106-40d2-b891-b803d0bb87ac"
|
230
|
-
},
|
231
|
-
{
|
232
|
-
"element_id": "3b4b747e-3609-4697-adbe-e66431aad148",
|
233
|
-
"metadata": {
|
234
|
-
"coordinates": {
|
235
|
-
"layout_height": 2205,
|
236
|
-
"layout_width": 1654,
|
237
|
-
"points": [
|
238
|
-
[304.5, 1973.3],
|
239
|
-
[304.5, 2040.1],
|
240
|
-
[915.5, 2040.1],
|
241
|
-
[915.5, 1973.3]
|
242
|
-
],
|
243
|
-
"system": "PixelSpace"
|
244
|
-
},
|
245
|
-
"detection_class_prob": 0.90552,
|
246
|
-
"filetype": "PPM",
|
247
|
-
"languages": ["eng"],
|
248
|
-
"page_number": 1
|
249
|
-
},
|
250
|
-
"text": "As compared with matched controls, after adjustment, PwMS had a 0.4-mm Hg lower systolic blood",
|
251
|
-
"type": "NarrativeText",
|
252
|
-
"compositeId": "875d46a9-6106-40d2-b891-b803d0bb87ac"
|
253
|
-
},
|
254
|
-
{
|
255
|
-
"element_id": "47d380de-794f-4205-a182-43bdb12d2578",
|
256
|
-
"metadata": {
|
257
|
-
"coordinates": {
|
258
|
-
"layout_height": 2205,
|
259
|
-
"layout_width": 1654,
|
260
|
-
"points": [
|
261
|
-
[941.5, 1483.4],
|
262
|
-
[941.5, 1913.8],
|
263
|
-
[1542.4, 1913.8],
|
264
|
-
[1542.4, 1483.4]
|
265
|
-
],
|
266
|
-
"system": "PixelSpace"
|
267
|
-
},
|
268
|
-
"detection_class_prob": 0.9414,
|
269
|
-
"filetype": "PPM",
|
270
|
-
"languages": ["eng"],
|
271
|
-
"page_number": 1
|
272
|
-
},
|
273
|
-
"text": "pressure at the index year. The magnitude was greater for men than women, considering that men had almost a 3-mm Hg lower blood pressure than matched con- trols (overall: coeff. = −0.37, 95% CI = −0.60, −0.13; women: −0.54, 95% CI = −0.96, −0.12; men: −2.81, 95% CI = −3.84, −1.77). The differences were greater when restricting analyses to only those with a diagno- sis of hypertension at baseline, as PwMS had a 3.3- mm Hg lower systolic blood pressure then matched controls (coeff. = −3.27, 95% CI = −5.04, −1.50); dif- ferences were confirmed in women but not in men (women: coeff. = −2.56, 95% CI = −3.84, −1.27; men: −0.27, 95% CI = 0.01, −2.56).",
|
274
|
-
"type": "NarrativeText",
|
275
|
-
"compositeId": "2990794c-e50d-4ed2-b3b3-b76a2fc76693"
|
276
|
-
},
|
277
|
-
{
|
278
|
-
"element_id": "ea18e521-02e5-4217-b89b-3e63a54a6d7e",
|
279
|
-
"metadata": {
|
280
|
-
"coordinates": {
|
281
|
-
"layout_height": 2205,
|
282
|
-
"layout_width": 1654,
|
283
|
-
"points": [
|
284
|
-
[936.9, 1938.4],
|
285
|
-
[936.9, 2038.1],
|
286
|
-
[1542.4, 2038.1],
|
287
|
-
[1542.4, 1938.4]
|
288
|
-
],
|
289
|
-
"system": "PixelSpace"
|
290
|
-
},
|
291
|
-
"detection_class_prob": 0.94049,
|
292
|
-
"filetype": "PPM",
|
293
|
-
"languages": ["eng"],
|
294
|
-
"page_number": 1
|
295
|
-
},
|
296
|
-
"text": "In contrast, PwMS had higher levels of diastolic blood pressure at baseline, as compared with matched controls (coeff. = 0.29, 95% CI = 0.14, 0.43). However,",
|
297
|
-
"type": "NarrativeText",
|
298
|
-
"compositeId": "9e6198b1-08c2-4e33-91af-8a3361e02925"
|
299
|
-
},
|
300
|
-
{
|
301
|
-
"element_id": "267417f1-2475-4c77-8ea4-e61527222bf5",
|
302
|
-
"metadata": {
|
303
|
-
"coordinates": {
|
304
|
-
"layout_height": 2205,
|
305
|
-
"layout_width": 1654,
|
306
|
-
"points": [
|
307
|
-
[69.1, 2094.9],
|
308
|
-
[69.1, 2115.9],
|
309
|
-
[102.5, 2115.9],
|
310
|
-
[102.5, 2094.9]
|
311
|
-
],
|
312
|
-
"system": "PixelSpace"
|
313
|
-
},
|
314
|
-
"detection_class_prob": 0.76658,
|
315
|
-
"filetype": "PPM",
|
316
|
-
"languages": ["eng"],
|
317
|
-
"page_number": 1
|
318
|
-
},
|
319
|
-
"text": "674",
|
320
|
-
"type": "PageNumber",
|
321
|
-
"compositeId": "9e6198b1-08c2-4e33-91af-8a3361e02925"
|
322
|
-
},
|
323
|
-
{
|
324
|
-
"element_id": "2831f8d1-5844-4c65-ab90-d22421928a30",
|
325
|
-
"metadata": {
|
326
|
-
"coordinates": {
|
327
|
-
"layout_height": 2205,
|
328
|
-
"layout_width": 1654,
|
329
|
-
"points": [
|
330
|
-
[1297.9, 2094.9],
|
331
|
-
[1297.9, 2117.9],
|
332
|
-
[1538.4, 2117.9],
|
333
|
-
[1538.4, 2094.9]
|
334
|
-
],
|
335
|
-
"system": "PixelSpace"
|
336
|
-
},
|
337
|
-
"detection_class_prob": 0.8409,
|
338
|
-
"filetype": "PPM",
|
339
|
-
"languages": ["eng"],
|
340
|
-
"page_number": 1
|
341
|
-
},
|
342
|
-
"text": "journals.sagepub.com/home/msj",
|
343
|
-
"type": "Footer",
|
344
|
-
"compositeId": "9e6198b1-08c2-4e33-91af-8a3361e02925"
|
345
164
|
}
|
346
165
|
]
|
347
166
|
}
|
@@ -24,62 +24,6 @@
|
|
24
24
|
"filename": "table-parse.pdf"
|
25
25
|
}
|
26
26
|
},
|
27
|
-
{
|
28
|
-
"type": "Table",
|
29
|
-
"element_id": "6863ab6a-557c-408e-8511-a0412fc562c9",
|
30
|
-
"text": "97.1 2.8 0.1 0.0 3.2 (6.1) <0.001 <0.001 <0.001 12,251 10.3 (6.3) 69.9 44.9 (13.3) 93.9 47.1 15 37.9 0.03 (0.04) 96.2 3.7 0.1 0.0 7.8 (11.2) 72,572 11.5 (6.5) <0.001 69.8 44.9 (13.3) 91.2 0.752 0.727 <0.001 58.1 12.5 29.4 0.02 (0.04) <0.001 97.4 2.5 0.1 0.0 2.9 (5.9) <0.001 <0.001 <0.001 1Q – least deprived 13.7 13.7 14.6 14.6 14.4 14.4 2Q 3Q 4Q 5Q – most deprived Missing data 18.5 17.6 20.2 20.6 9.3 18.4 17.6 20.3 20.6 9.4 1.000 18.5 17.9 18.8 20.3 9.8 18.5 17.9 18.8 20.3 9.9 1.000 18.5 17.8 19.2 20.4 9.7 18.5 17.8 19.2 20.4 9.7 1.000",
|
31
|
-
"metadata": {
|
32
|
-
"text_as_html": "tr><tr><td>Age (years)</td><td>46.3 (13.3)</td><td>46.3(13.3)</td><td>0.852</td><td>44.3(13.3)</td><td>44.3(13.3)</td><td>0.907</td><td>44.9(13.3)</td><td>44.9(13.3)</td><td>0.727</td></tr><tr><td>Ethnicity — white (%)</td><td>923</td><td>93.5</td><td>0.013</td><td>915</td><td>94.1</td><td><0.001</td><td>93.9</td><td>91.2</td><td><0.00</td></tr><tr><td colspan=\"10\">Smoking status (%)</td></tr><tr><td>Non-smoker</td><td>415</td><td>53.8</td><td></td><td>49.5</td><td>60.0</td><td></td><td>47.1</td><td>58.1</td><td></td></tr><tr><td>Ex-smoker</td><td>17.5</td><td>14.7</td><td><0.001</td><td>13.9</td><td>11.6</td><td><0.001</td><td>15</td><td>12.5</td><td><0.00</td></tr><tr><td>Current smoker</td><td>41.1</td><td>31.6</td><td></td><td>36.6</td><td>28.5</td><td></td><td>379</td><",
|
33
|
-
"filetype": "application/pdf",
|
34
|
-
"languages": ["eng"],
|
35
|
-
"page_number": 1,
|
36
|
-
"orig_elements": "eJytGGtv4zbsrxABNuSAxrXk964LMNx2wH1od0X3rT0ETqxrvDl2YCttg8P995FKYtOxkrXbPtQVKVJ8iOIj999GqlArVepZno1+gtFcqiD25+kknKts4seuO4nnaTjxF2G6iDIvDoQaXcBopXSapTpFnm+jRVXVWV6mWjUGLtJttdGzpcoflxoxUroB8uzRz3mml4gVYeAjdl3lpSa++/swcqILkJ50vC8XsAeFJ0MnJlgEvnCEDbNjQcyo2TZarciSz/mLKu7W6UKNvuNGprRa6LwqZ4sibZrZuq7mSOY6SRBGpMfXvFB6u1aG9/P1yChcPm7SR2PV/UiVjyMSsUbMrNys5qomKxCj1Yuepc1sqVcFsV/pdF6o6ZVeqjTDfzUtp1eX9NFLWFRFs07Lnx9G3sNoep0W6sTWR7Wyb0rc/P1J1WlRtLt19cx215OntNjseS+NBn0tptd30Gzmf6JTmhb3oSp1XRXwUA622sX+ZGD88AY+Lt9CfuokY8LlwaHzKtvuLcqmN4jPzMoL46AFpLhIPNGC7SIOwrAFAvci9N0hkZAXMuiYI3kRRPKIrPNqNv1YFUX1PNmsQecrBeOtSuvmXcuQOAmMQ0d0GCEcHzFBh/mx0O9dx3U7qcJ1fGLzemzBK9i8N3L1TDFBB+Mf3g3d8qZFiFZzIG4B14kCOZT8y+PQcz4aAmPhcXMId4RynTjo7sf3BwQWFD59N+IEyZDnGIWay2io+W96WeaLXG/hYSNd4cPzMtd9HybS69aeE7AjXcG2RLeT+I44dWO7UxLG58gB7UBRlkKEi1niblX9lZeP0OhUb5pOX27bTVVOGqRTdecYpmTgsYvtKBJmYYi6WEgiZl0QO8dPteffl2MVRMQECN+JzjhKcEfhWwjP0bJTJRNx0qXTD5u6xvoJAx8xizwulKUrhpUxk9aRRJ3mMvHPuEh9/AR1igWOB5aEMX59Hr+uMDjvXAahfQvn4LT/zmlJP7nugjpiQZ3Ejjd0UBKw8EuiQRQRNuSnRM45L17nGXyt07zQ287vTIL1Hn0W3tL2GjwWnnJwzT35VYZVHVPHsRL8CfG17QokW59LIHzXRjlU705h0/F65f4vgl46Mo0XVF9hXeertN7CIkWVnvImx0YkLxGtnvIKsxlVki4HUQWmytjFoJTjwHHPRXMsx1Q6OxJP9qv4kCWKkUUyIQkKSV4R+Mep+VOZqRcyc7UpdL7GipwptPiJnngJ40/Xv74HW7oWt4caVKi00XsulfFsGNkBnk9DO8CwvnU9VEjedoQ8xyFwzGYhseTEfyLhwj0mPOJGRVajIl4oOMCxsR0YCvc74ZK/Sul2iUxgLLjcmvgE0KdL2HF9oKPjqgRtVKwqW1CgfqEd4FjPDjCsb13b8mzTUONBI2SXm9mZiS04eA+Z2G4nYdGcDCJ7P0PshofL3YxmZkOa32huo1EM9v3vfryyjjkt4tws9O+IbsA+SkDXlgPrk+Go8/S6zhOGzR10vRy0LRX0GxhoGwnAUgysIMJxcYJ+OYC3pWZ4XYYDGuqgHaCADQSQ4NQP2GgFQO0grQSwbgOofwCsyIQ0f10VgN14CDSHgZmN4Mpk8r4AbNTBzBZg2nSgbheo3QRq6YC1Uwd2alNgv2f+JOpgCs2Bov+PxlHoJj2gQaUVT6ZRKw3UwgL1i8A6LKDmB3wjxygIMckyJQt2ky3ZFxzZxwX4xmM4Bx12qVcnphCoIe1582AgNll7pxpvg0dCzd1YDdyN0mSitzeRBkNSI2FuRgSdKwLwMO/2rcT+DYVErUDMusZK+Q52g7nFSpo3+zKE8VFkrhKHuAMhTR5ArT5IzDknDPZBGmeI/Y0mYKr6CYNvD2+xX4HpEqP9B2tq+/F3H3kL3i34txC0/L1cDTxnAtU9inq8J7p8Stn4SDzC+y3eO+ARRXWh5UpoFe9IyFF2fHLEhXTJTpaPu9FpPHHR72eHn9P+oFQ7+v7lbwgiwu8=",
|
37
|
-
"is_continuation": true,
|
38
|
-
"filename": "table-parse.pdf"
|
39
|
-
}
|
40
|
-
},
|
41
|
-
{
|
42
|
-
"type": "Table",
|
43
|
-
"element_id": "e8e6fa12-9b5d-43d6-9886-597c00112158",
|
44
|
-
"text": "",
|
45
|
-
"metadata": {
|
46
|
-
"text_as_html": "td>294</td><td></td></tr><tr><td>eFI ratio</td><td>0.02 (0.04)</td><td>0.01 (0.03)</td><td><0.001</td><td>0.03 (0.04)</td><td>0.02 (0.04)</td><td><0.001</td><td>0.03 (0.04)</td><td>0.02 (0.04)</td><td><0.00</td></tr><tr><td>Fit</td><td>97.2</td><td>98.3</td><td></td><td>95.8</td><td>97.1</td><td></td><td>96.2</td><td>97.4</td><td></td></tr><tr><td>Mid frailty</td><td>2.8</td><td>1.6</td><td></td><td>4.0</td><td>2.8</td><td></td><td>3.7</td><td>2.5</td><td></td></tr><tr><td>Moderate frailty</td><td>0.0</td><td>0.0</td><td><0.001</td><td>0.2</td><td>0.1</td><td><0.001</td><td>0.1</td><td>0.1</td><td><0.00</td></tr><tr><td>Severe frailty</td><td>0.0</td><td>0.0</td><td></td><td>0.0</td><td>0.0</td><td></td><td>0.0</td><td>0.0</td><td></td></tr><tr><td>Number of primary care ",
|
47
|
-
"filetype": "application/pdf",
|
48
|
-
"languages": ["eng"],
|
49
|
-
"page_number": 1,
|
50
|
-
"orig_elements": "eJytGGtv4zbsrxABNuSAxrXk964LMNx2wH3o7YruW3sInFhtvDl2YCttg8P995FKYtOxkrXbPtQVKVJ8iOIjd99GqlArVepZno1+gtE8mT8sFg/uRGZxMPGVTCep+yAmiVwsUj/KFjL2RxcwWimdZqlOkefbaFFVdZaXqVaNgYt0W230bKnyx6VGjJRugDx79HOe6SViRRj4iF1XeamJ7+4ujJzoAqQnHe/rBexB4cnQiQkWgS8cYcPsWBAzaraNViuy5Ev+oorbdbpQo++4kSmtFjqvytmiSJtmtq6rOZK5ThKEEenxkBdKb9fK8H65HhmFy8dN+misuhup8nFEItaImZWb1VzVZAVitHrRs7SZLfWqIPYrnc4LNb3SS5Vm+K+m5fTqkj56CYuqaNZp+fP9yLsfTa/TQp3Y+qhW9k2Jm78/qTotina3rp7Z7nrylBabPe+l0aCvxfT6FprN/E90StPiPlSlrqsC7svBVrvYnwyMH97Ax+VbyE+dZEy4PDh0XmXbvUXZ9DPiM7PywjhoASkuEk+0YLuIgzBsgcC9CH13SCTkhQw65kheBJE8Iuu8mk0/VkVRPU82a9D5SsF4q9K6edcyJE4C49ARHUYIx0dM0GF+LPR713HdTqpwHZ/YvB5b8Ao2741cPVNM0MH4h3dDt7xpEaLVHIhbwHWiQA4l//I49JyPhsBYeNwcwh2hXCcOuvvx/QGBBYVP3404QTLkOUah5jIaav6bXpb5ItdbuN9IV/jwvMx134eJ9Lq15wTsSFewLdHtJL4jTt3Y7pSE8TlyQDtQlKUQ4WKWuF1Vf+XlIzQ61Zum05fb9rkqJw3SqbpzDFMy8NjFdhQJszBEXSwkEbMuiJ3jp9rz78uxCiJiAoTvRGccJbij8C2E52jZqZKJOOnS6YdNXWP9hIGPmEUeF8rSFcPKmEnrSKJOc5n4Z1ykPn6COsUCxwNLwhi/Po9fVxicdy6D0L6Fc3Daf+e0pJ9cd0EdsaBOYscbOigJWPgl0SCKCBvyUyLnnBev8wwe6jQv9LbzO5NgvUefhbe0vQaPhaccXHNPfpVhVcfUcawEf0J8bbsCydbnEgjftVEO1btV2HS8Xrn/i6CXjkzjBdUDrOt8ldZbWKSo0lPe5NiI5CWi1VNeYTajStLlIKrAVBm7GJRyHDjuuWiO5ZhKZ0fiyX4VH7JEMbJIJiRBIckrAv84NX8qM/VCZq42hc7XWJEzhRY/0RMvYfzp+tf3YEvX4uZQgwqVNnrPpTKeDSM7wPNpaAcY1reuhwrJm46Q5zgEjtksJJac+E8kXLjHhEfcqMhqVMQLBQc4NrYDQ+F+J1zyVyndLpEJjAWXWxOfAPp0CTuuD3R0XJWgjYpVZQsK1C+0Axzr2QGG9a1rW55tGmo8aITscjM7M7EFB+8hE9vtJCyak0Fk72eI3fBwuZvRzGxI8xvNbTSKwb7/3Y9X1jGnRZybhf4d0WewjxLQteXA+mQ46jy9rvOEYXMHXS8HbUsF/QYG2kYCsBQDK4hwXJygXw7gbakZXpfhgIY6aAcoYAMBJDj1AzZaAVA7SCsBrNsA6h8AKzIhzV9XBWA3HgLNYWBmI7gymbwvABt1MLMFmDYdqNsFajeBWjpg7dSBndoU2O+ZP4k6mEJzoOj/o3EUukkPaFBpxZNp1EoDtbBA/SKwDguo+QHfyDEKQkyyTMmC3WRL9gVH9nEBvvEYzkGHXerViSkEakh73jwYiE3W3qnG2+CRUHM3VgN3ozSZ6O1NpMGQ1EiYmxFB54oAPMy7fSuxf0MhUSsQs66xUr6D3WBusZLmzb4MYXwUmavEIe5ASJMHUKsPEnPOCYN9kMYZYn+jCZiqfsLgm8Nb7FdgusRo/8Ga2n783UfegHcD/g0ELX8vVwPPmUB1j6Ie74kun1I2PhKP8H6L9w54RFFdaLkSWsU7EnKUHZ8ccSFdspPl4250Gk9c9PvZ4ee0PyjVjr5//RtD9sNK",
|
51
|
-
"is_continuation": true,
|
52
|
-
"filename": "table-parse.pdf"
|
53
|
-
}
|
54
|
-
},
|
55
|
-
{
|
56
|
-
"type": "Table",
|
57
|
-
"element_id": "9d93c6f7-aecb-4773-ae20-587989b448f8",
|
58
|
-
"text": "",
|
59
|
-
"metadata": {
|
60
|
-
"text_as_html": "visits in previous year</td><td>6.9 (10.3)</td><td>22(5.0)</td><td><0.001</td><td>82(11.5)</td><td>32(6.1)</td><td><0.001</td><td>78(112)</td><td>29(5.9)</td><td><0.00</td></tr><tr><td colspan=\"10\">Index of multiple deprivation (IMD; %)</td></tr><tr><td>1Q — least deprived</td><td>13.7</td><td>13.7</td><td></td><td>14.6</td><td>14.6</td><td></td><td>144</td><td>144</td><td></td></tr><tr><td>2Q</td><td>18.5</td><td>18.4</td><td></td><td>18.5</td><td>18.5</td><td></td><td>18.5</td><td>18.5</td><td></td></tr><tr><td>3Q</td><td>17.6</td><td>17.6</td><td></td><td>17.9</td><td>17.9</td><td></td><td>17.8</td><td>17.8</td><td></td></tr><tr><td>4Q</td><td>20.2</td><td>203</td><td>1.000</td><td>18.8</td><td>18.8</td><td>1.000</td><td>19.2</td><td>19.2</td><td>1.000</td></tr><tr><td>5Q — mos",
|
61
|
-
"filetype": "application/pdf",
|
62
|
-
"languages": ["eng"],
|
63
|
-
"page_number": 1,
|
64
|
-
"orig_elements": "eJytGGtv4zbsrxABNuSA1rUlv7TrCgy3HXAferui+9YeCjdWG2+OHdhK2+Bw/32kkth0rGTttg91RYoUH6L4yM23iS71QlfmrsgnP8HEf/BV/JBHp0rO5GkYSnV6nwWz0+xepomMhZbZbHICk4U2WZ6ZDHm+TWZ13eRFlRndWrjM1vXK3M118Tg3iBHCj5Bni34ucjNHbBBHIWKXdVEZ4ru5iRMvOQEhhSe/nsAWDKSIvZTgIAoDL3BhNiyImbTr1ugFWfKleNHl9TKb6cl33Mi10TNT1NXdrMza9m7Z1PdI5nsqihPS46EotVkvteX9cjmxClePq+zRWnUz0dXjhEQsEXNXrRb3uiErEGP0i7nL2ru5WZTEfm6y+1JfnJu5znL819Dy4vyMPmYOs7psl1n18+1E3k4uLrNSH9j6qBfuTYGbvz/pJivLbrepn9nu8vQpK1db3jOrwVCLi8traFf3f6JT2g73oa5MU5dwW422usX2ZGD88AY+Lt9Bfugka8LZzqH3db7eWpRffEZ8blcyTqMOEMGJkkEHdos0iuMOiPyTOPTHRIE4EVHPnIiTKBF7ZL1X84uPdVnWz6erJZhioWG61lnTvusYlKdgGntBjwkCL0RM1GN+LM173/P9XmrgeyGxyQFb9Ao2+UaugSk26GD6w7uxW960iNFqDqQd4HtJJMaSf3kcey5EQ2AaSG4O4fZQvpdG/f2E4YjAgcKn7yecQI159lGouUjGmv9m5lUxK8wablfCD0J4nhdm6EMlZL+WXsSO9AO2FfQ7KvSCQze2OUUxPk+MaEeKshQS+Jglrhf1X0X1CK3JzKrt9eW2fa6r0xbpdNM7hikZSXaxPYViFsaoi4MkYdZFqbf/VAf+fdlXIUiYgCD0kiOOCrij8C3Ex2jZqYKJOOjSiw+rpsH6CSMfMYskF8rSFcOKlEnrSZJec6HCIy7SHz9Bk2GB44ElYIrfkMevH1icPJZBaN/BOTrtv3M60k9h+qBOWFCr1JNjB6mIhZ9KRlFE2JifknjHvHhZ5PDQZEVp1r3fmQTnPYYsvIXrNUgWnmJ0zQP5dY5VHVPHvhL8CfG16woEWx9LIHzXRTlW71pj0/F65f4vgkE6so0X1A+wbIpF1qxhlqFKT0VbYCNSVIjWT0WN2YwqSZ+DqAJTZexjUIhp5PnHojkVUyqdPYkUwyo+ZklSZBFMiEIh6hWBv5+aP1W5fiEzF6vSFEusyLlGi5/oiVcw/XT563twpevgaleDSp21Zsulc54NEzfA82nsBhg2dK7HComrnpDnOAT22Rwkjpz4TyRcuGTCE25U4jQq4YWCAxybuoGx8LAXLvirFH6fyAKMBZ9bkx4AhnSKHTcEejquStRFxaJ2BQXqF7sBjpVugGFD59qVZ9uWGg8aIfvczM5UruDgPaRy3Y5i0axGkb2dITbDw9lmRrOzIc1vNLfRKAbb/nc7XjnHnA5xbBb6d0SfwT1KQN+WA+uTYa/zlH3nCePmDvpeDrqWCoYNDHSNBGApBlYQYb84wbAcwNtSM7wuwwENddANUMAGAlA49QM2WhFQO0irAFi3AdQ/AFZkQtq/vgrAZjwEmsPAzkZwbjP5UAA26mBnC7BtOlC3C9RuArV0wNqpHTu1KbDds38CdbCFZkcx/EfjKPSTHtCg0okn06iVBmphgfpFYB0WUPMDoZVjFYSUZNmSBZvJluyL9uzjAkLrMZyDdrvUqxNTDNSQDry5MxCbrK1TrbdBklB7N04DN6M0mSi3JtJgSGoo5mZE0LlBBBLz7tBK7N9QSNIJxKxrrRTvYDOYO6ykeXMoI7A+SuxV4hC3I6TJA6jVB4E554DBIQjrjGB7owpsVT9g8NXuLQ4rMF1isv1gTe0+4eYjrkBeQXgFUcc/yNXAcyZQ3aOox3uiy6eUjY9EEj7s8HKHRxTVhY5L0SrdkJCj3Hi1x4V0aiMrxN3kMJ646Pez3c9pf1CqnXz/+jdlk8KP",
|
65
|
-
"is_continuation": true,
|
66
|
-
"filename": "table-parse.pdf"
|
67
|
-
}
|
68
|
-
},
|
69
|
-
{
|
70
|
-
"type": "Table",
|
71
|
-
"element_id": "21843169-a334-4356-9ee1-53c0d98c3366",
|
72
|
-
"text": "",
|
73
|
-
"metadata": {
|
74
|
-
"text_as_html": "t deprived</td><td>20.6</td><td>20.6</td><td></td><td>20.3</td><td>20.3</td><td></td><td>204</td><td>204</td><td></td></tr><tr><td>Missing data</td><td>9.3</td><td>9.4</td><td></td><td>9.8</td><td>9.9</td><td></td><td>9.7</td><td>9.7</td><td></td></tr></tbody></table>",
|
75
|
-
"filetype": "application/pdf",
|
76
|
-
"languages": ["eng"],
|
77
|
-
"page_number": 1,
|
78
|
-
"orig_elements": "eJytGGtv4zbsrxABNuSA1rUkP3ddgeG2A+5Db1d039pD4cZq482xA1tpGxzuv49UEpuOlazd9qGuSJHiQxQfufk20aVe6MrcFfnkJ5gIPbufyYf7UzFLotMgDfzTTKnwNJspEYd5EooHNTmByUKbLM9MhjzfJrO6bvKiyoxuLVxm63pl7ua6eJwbxEjph8izRT8XuZkjVkRhgNhlXVSG+G5uotiLT0Aq6amvJ7AFhZKRlxAswkB4woXZsCBm0q5boxdkyZfiRZfXy2ymJ99xI9dGz0xRV3ezMmvbu2VT3yOZ76VhFJMeD0WpzXqpLe+Xy4lVuHpcZY/WqpuJrh4nJGKJmLtqtbjXDVmBGKNfzF3W3s3NoiT2c5Pdl/ri3Mx1luO/hpYX52f0MXOY1WW7zKqfbyfqdnJxmZX6wNZHvXBvStz8/Uk3WVl2u039zHaXp09ZudrynlkNhlpcXF5Du7r/E53SdrgPdWWauoTbarTVLbYnA+OHN/Bx+Q7yQydZE852Dr2v8/XWovziM+Jzu1JREnaAFCepEh3YLZIwijog9E+iwB8TCXkiw545lidhLPfIeq/mFx/rsqyfT1dLMMVCw3Sts6Z91zGkXgrTyBM9RggvQEzYY34szXvf8/1eqvC9gNjUgC18BZt6I9fAFBt0MP3h3dgtb1pEaDUHkg7wvTiUY8m/PI49F6AhMBWKm0O4PZTvJWF/P0EwInCg8On7MSdIxzz7KNRcxmPNfzPzqpgVZg23K+mLAJ7nhRn6MJWqXysvZEf6gm2JficNPHHoxjanpIzPkyPakaIshQgfs8T1ov6rqB6hNZlZtb2+3LbPdXXaIp1uescwJUPFLranSJmFEeriIImZdWHi7T/VgX9f9lUQMRMgAi8+4ijBHYVvITpGy06VTMRBl158WDUN1k8Y+YhZpLhQlq4YViZMWk8S95rLNDjiIv3xEzQZFjgeWBKm+A14/PrC4tSxDEL7Ds7Raf+d05F+CtMHdcyCOk08NXZQGrLwS+NRFBE24qfE3jEvXhY5PDRZUZp173cmwXmPAQtv6XoNioWnHF3zQH6dY1XH1LGvBH9CfO26AsnWxxII33VRjtW71th0vF65/4tgkI5s4wX1AyybYpE1a5hlqNJT0RbYiBQVovVTUWM2o0rS5yCqwFQZ+xiUchp6/rFoTuSUSmdPouSwio9Z4gRZJBOSopD0FYG/n5o/Vbl+ITMXq9IUS6zIuUaLn+iJVzD9dPnre3Cla3G1q0Glzlqz5dI5z4axG+D5NHIDDBs412OF5FVPyHMcAvtsDhJHTvwnEi5cMeExNyp2GhXzQsEBjk3cwFh40AuX/FVKv09kAmPB59YkB4AhXcqOGwI9HVcl7KJiUbuCAvWL3ADHKjfAsIFz7cqzbUuNB42QfW5mZ6au4OA9ZOq6nZRFczqK7O0MsRkezjYzmp0NaX6juY1GMdj2v9vxyjnmdIhjs9C/I/oM7lEC+rYcWJ8Me52n6jtPGDd30Pdy0LVUMGxgoGskAEsxsIII+8UJhuUA3paa4XUZDmiog26AAjYQQIpTP2CjFQK1g7QSwLoNoP4BsCIT0v71VQA24yHQHAZ2NoJzm8mHArBRBztbgG3TgbpdoHYTqKUD1k7t2KlNge2e/ZOogy00O4rhPxpHoZ/0gAaVTjyZRq00UAsL1C8C67CAmh8IrByrICQky5Ys2Ey2ZF+4Zx8XEFiP4Ry026VenZgioIZ04M2dgdhkbZ1qvQ2KhNq7cRq4GaXJRLU1kQZDUiNlbkYEnStCUJh3h1Zi/4ZC4k4gZl1rpXwHm8HcYSXNm0MZwvootleJQ9yOkCYPoFYfJOacAwYHIK0zxPZGU7BV/YDBV7u3OKzAdInx9oM1tfsEm4+8AnUFwRWEHf8gVwPPmUB1j6Ie74kun1I2PhJF+KDDqx0eUVQXOq6UVsmGhBzlxqd7XEiXbmQFuBsfxhMX/X62+zntD0q1k+9f/wbjZsKI",
|
79
|
-
"is_continuation": true,
|
80
|
-
"filename": "table-parse.pdf"
|
81
|
-
}
|
82
|
-
},
|
83
27
|
{
|
84
28
|
"type": "CompositeElement",
|
85
29
|
"element_id": "499fc9c7-95f4-412c-87e2-10fa1a2e950e",
|
@@ -91,41 +35,5 @@
|
|
91
35
|
"orig_elements": "eJy1k19r2zAUxb/KxTBoITb+b2thTx2FMDbKOthDVoJsXTtiiuRZctJQ9t135aZdN0oZY4M8REfnSPfq/ry+C1DhDrXbSBG8hkCwLmF10YSiaIowL1MRNpVowqrNiiLHJhNxFiwg2KHjgjtOmbugNWYUUnOHdl4rfjST22xR9ltHSprGBWVO8kEKtyU1KYuc1MFI7Xxuva5YVC4gybI8Km4W8LDOkypifp0USR3Vzyr3GZICe7QOd76XK3mL6nrgLQbfaUOgw9ZJozet4tZuhtE0ZIsjFhepr6+TCt1xwDl79T6YS9b9xPu5r3WAug/8FQMpGz3tGhx9H/5wh7e+02ClhdxLMXFl4YAjwnyX7CQK4BY66RYgO3BbBLxcgW0NeQ6006AyByomSZewk0qoI3Qjl+rR/tTqDoh6NgPXgv6kOaWMwJFm8EfJND8ls3IJFvdU6gs5b5v9vDF7jOCt7DpK6BZ/ntmPZhpOTVPLSD8BuBuUOUrdw8VWhvbbxGmXMKFXuHaTIO6+TGmcMAsu9PJ8ib/73TjZr1zNu9lnrpS0pxyVwwea3TBKajbyU3oY2gc+0gPIPX7y06Cx/E53nRPenDVhwzkP81S0IU+aOmRpmdZtmVdp2fw3urOYRamHt66ixMP7IJTEcOkFlhRR8ZxwH/lLuvOUsX9Et2eBWNFArOIIUgM9LpxdfYQ3nqlqAax4BRereZnQd0ngsHMaKNfefDBP7Vn9iz2LvT1nS7iU/USYJOcLaCY34yAegZt51MaBdTRq62RLcBzByj4ETd9Zy7WDs2E+MmbV+UuA3PwAHmiH2Q==",
|
92
36
|
"filename": "table-parse.pdf"
|
93
37
|
}
|
94
|
-
},
|
95
|
-
{
|
96
|
-
"type": "CompositeElement",
|
97
|
-
"element_id": "875d46a9-6106-40d2-b891-b803d0bb87ac",
|
98
|
-
"text": "Hyperlipidaemia. Treatment with lipid-lowering medications was lower in PwMS, as compared with matched controls (PR = 0.63, 95% CI = 0.54, 0.74). This was particularly pronounced for men (women: PR = 0.71, 95% CI = 0.59, 0.87; men: PR = 0.41, 95% CI = 0.37, 0.62).\n\nDifferences in risk factor severity\n\nAs compared with matched controls, after adjustment, PwMS had a 0.4-mm Hg lower systolic blood",
|
99
|
-
"metadata": {
|
100
|
-
"filetype": "application/pdf",
|
101
|
-
"languages": ["eng"],
|
102
|
-
"page_number": 1,
|
103
|
-
"orig_elements": "eJy1VE1r3DAQ/SvCUEhgbWRL/lBKD6U9pIeUpcltCcvYGq/V2paxtdksof+9I++mdNvQ0pCcjN7MG+vNvNHqIcAWO+zd2ujgggUgEp1wVYfAVRLKpBJhmUEaiiLOdC1FWesyWLCgQwcaHBDnIaisHbXpweE0n1vY261bN2g2jSMkSXhKnCO8M9o1hMZZKgkdrOmd561WIuYRQRRQUXa7YD+BIi2i2AMqTqP0KeBAISSY9pPDzmtZmntsrweoMPhOAY0OK2dsv65amKb1MNqS0nikpOI5JdSmRbcfcOYur4L5yv1mC5tZ1yrAfhP4XwyErPttV+LodfjiDu+90uCS+GNrBqMBOwMRuxkRnG8w2xnXsDkUtnaHo+k3rENtKvCXmtgOJjYHmOnZcnd1vWCEVLYbYER9oHfgqoYOle3daNuJnS2/sHekIRMLptI37MOn+eg7y6NcntMFGnOoTWWcqbYtjO2ekfjebvuKatV2pHv07Gxn6XPBjhXz+LSi8hWL/C37NUmeJoncJ2XJeeSb99jLzzCOpPEOb3yTqFu/my4HXqcF6lDlKEJZJEkIRZ2EMQqlZSGSuIZXNF0cFeQgJfij6Y5AJg9ApsgjTwIz5Xmmy5LihSz30dQ1jkiznLxzRjN9YzVUjsY64R35zO3/axqilGUucwxFxlUoM5WHoEsMMcukiAF0LIvXmwaX8z6rXERinsYBSLjkJxv/B3CkPPMJ4GmavNA83v9raWmva0drDvrrdprfhsW876wBzcBvVdh17HJzfA28GtuaipWttfovo7z9ATguvV4=",
|
104
|
-
"filename": "table-parse.pdf"
|
105
|
-
}
|
106
|
-
},
|
107
|
-
{
|
108
|
-
"type": "CompositeElement",
|
109
|
-
"element_id": "2990794c-e50d-4ed2-b3b3-b76a2fc76693",
|
110
|
-
"text": "pressure at the index year. The magnitude was greater for men than women, considering that men had almost a 3-mm Hg lower blood pressure than matched con- trols (overall: coeff. = −0.37, 95% CI = −0.60, −0.13; women: −0.54, 95% CI = −0.96, −0.12; men: −2.81, 95% CI = −3.84, −1.77). The differences were greater when restricting analyses to only those with a diagno- sis of hypertension at baseline, as PwMS had a 3.3- mm Hg lower systolic blood pressure then matched controls (coeff. = −3.27, 95% CI = −5.04, −1.50); dif- ferences were confirmed in women but not in men (women: coeff. = −2.56, 95% CI = −3.84, −1.27; men: −0.27, 95% CI = 0.01, −2.56).",
|
111
|
-
"metadata": {
|
112
|
-
"filetype": "application/pdf",
|
113
|
-
"languages": ["eng"],
|
114
|
-
"page_number": 1,
|
115
|
-
"orig_elements": "eJyFU11r4zAQ/CuL4aAFW9iynY+We7qXu4cehfYtV4JsrW2BLAVJbhpK//utkjQkzcG9SeOd3Z0ZefWeoMYRTVgrmdxBUs1lucglZvNl1WUVz+tMFAueVWUjm4JLXs8XSQrJiEFIEQRx3pPWWieVEQH9/q7Fzk5hPaDqh0AIpzbEOcJbJcNAaDGrK0I3VpkQeavVsioYFRbVomTVSwonYFmUbBGBoq44q/6JHEgEJX7nA45RzaN6Q/20ES0mH/RBYsA2KGvWrRberzfONlSWM5oTV+mUxrDb4J76+JDsdzb9JPq9sFWCpk/ihA0hazONDbooJPYO+BalJhuH3k8OQQQIA4IyEt9gh8IxeKb7KHqjwiQRtsJD75Bcc9BZB5QCMYSBraVjCq01Xkl0yvQRD/uCQUgQerQ+gIAyG0f42YO2W+rRaGslnObvW40itAPK2CuD4Kz2cGNf0Qmt7wjErmPwHf5MnBc8Z+U8hWX9DX78OgNneXo6F+X9Ybu7ExQzvOIsZ2ccfg9nDM4WxRWDsqw+GQWbz28PXknVdejQtOiBFOLJru1AVpDQ4BTlSf4II/TOU1mwYI3ekXrryWIVBvJJKjLdZuCVB9vBQBG7gOSuNTGmRnjUymAKlMjj9uHp4DKUrMzg3OH4sKxW7bXVeGH10egv/paMX/tbs/xMeZ3f3kfVGVzqpp6dciN1V8fnAc0UwNgQgXi9OcbyZSZn9ew/bvP5RT755ZY5y4v0rNkti3/F50/yWzgngnrF5/j6P17+AkclSxo=",
|
116
|
-
"filename": "table-parse.pdf"
|
117
|
-
}
|
118
|
-
},
|
119
|
-
{
|
120
|
-
"type": "CompositeElement",
|
121
|
-
"element_id": "9e6198b1-08c2-4e33-91af-8a3361e02925",
|
122
|
-
"text": "In contrast, PwMS had higher levels of diastolic blood pressure at baseline, as compared with matched controls (coeff. = 0.29, 95% CI = 0.14, 0.43). However,\n\n674\n\njournals.sagepub.com/home/msj",
|
123
|
-
"metadata": {
|
124
|
-
"filetype": "application/pdf",
|
125
|
-
"languages": ["eng"],
|
126
|
-
"page_number": 1,
|
127
|
-
"orig_elements": "eJy1k01r3DAQhv+KEBRasB1LlvxR6KlQmkPCQnpbwiJb47WCbRlLziaE/veOvLshlD21yXEez2j8vjOzfaHQwwCj3xlNvxIKipUgOYtTDjIWnBVxXVZ1nEGeKSlUrgugEaEDeKWVV1jzQhtrZ21G5cGtca+e7eJ3HZh955FwnkqsOeGD0b5DynIpkE7WjD7UbbdVlidVRFiVlYm4j8gZ8BQBC4BJwRNxiZyKEFH37DwMQc3GPEF/N6kG6G/8oMFD440dd02vnNtNs60xLU0qkQrsQ1vTg3+eYK3d3ND1p8f9ovarsi2FcU9DiwnJblyGGuagJDzu4SlopdcjaezoZ+V8RDaHmzvSKU06tAJm0sMj9I7YlmiDCbY3Dal7azWZZnBumYEoT2rloDcjREQ5fGyY1AyaHIzvyKB802GwtrD41OfGQtsm5BvK4Kihkp/I9+s1ZGhLmojsS0J+2gN2nqOg6CzwVs2z8uYRfoU/Rwl/7wLPC8GKlsVcFLgLTVHEJSgRQ84kLzjndSs/bBfyKmFhzpVIqjDnU8yYPMYs5Ym8BI4V/7YHRZ7L8p32AM176/YGM2+PiZesLjPWlprFshQCrc5lrOoqjTXneIMVL1WWfpjVjFfF8czOZr8SxooTkeG8LpL/MbwU6Xvd3YNd5lH1LnGYMy11godz1dkBrgb38HYSP6z1YQr3fwBhSXjP",
|
128
|
-
"filename": "table-parse.pdf"
|
129
|
-
}
|
130
38
|
}
|
131
39
|
]
|
@@ -117,10 +117,10 @@ describe('Unstructured', () => {
|
|
117
117
|
chunkingStrategy: ChunkingStrategy.Basic,
|
118
118
|
});
|
119
119
|
|
120
|
-
expect(result.compositeElements).toHaveLength(
|
121
|
-
expect(result.originElements).toHaveLength(
|
120
|
+
expect(result.compositeElements).toHaveLength(3);
|
121
|
+
expect(result.originElements).toHaveLength(5);
|
122
122
|
|
123
|
-
expect(result.
|
123
|
+
expect(result.compositeElements).toEqual(AutoWithChunkingOutput.compositeElements);
|
124
124
|
expect(result.originElements).toEqual(AutoWithChunkingOutput.originElements);
|
125
125
|
});
|
126
126
|
|
@@ -1,7 +1,9 @@
|
|
1
1
|
import { act, renderHook, waitFor } from '@testing-library/react';
|
2
|
+
import { major, minor } from 'semver';
|
2
3
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
4
|
import { withSWR } from '~test-utils';
|
4
5
|
|
6
|
+
import { CURRENT_VERSION } from '@/const/version';
|
5
7
|
import { globalService } from '@/services/global';
|
6
8
|
import { useGlobalStore } from '@/store/global/index';
|
7
9
|
import { initialState } from '@/store/global/initialState';
|
@@ -157,6 +159,57 @@ describe('createPreferenceSlice', () => {
|
|
157
159
|
expect(useGlobalStore.getState().hasNewVersion).toBe(true);
|
158
160
|
expect(useGlobalStore.getState().latestVersion).toBe(latestVersion);
|
159
161
|
});
|
162
|
+
|
163
|
+
it('should set hasNewVersion to false if the version is same minor', async () => {
|
164
|
+
const latestVersion = `${major(CURRENT_VERSION)}.${minor(CURRENT_VERSION)}.9999999`;
|
165
|
+
|
166
|
+
vi.spyOn(globalService, 'getLatestVersion').mockResolvedValueOnce(latestVersion);
|
167
|
+
|
168
|
+
const { result } = renderHook(() => useGlobalStore().useCheckLatestVersion(), {
|
169
|
+
wrapper: withSWR,
|
170
|
+
});
|
171
|
+
|
172
|
+
await waitFor(() => {
|
173
|
+
expect(result.current.data).toBe(latestVersion);
|
174
|
+
});
|
175
|
+
|
176
|
+
expect(useGlobalStore.getState().hasNewVersion).toBeUndefined();
|
177
|
+
expect(useGlobalStore.getState().latestVersion).toBeUndefined();
|
178
|
+
});
|
179
|
+
|
180
|
+
it('should set hasNewVersion to true if there is a minor version', async () => {
|
181
|
+
const latestVersion = `${major(CURRENT_VERSION)}.${minor(CURRENT_VERSION) + 10}.0`;
|
182
|
+
|
183
|
+
vi.spyOn(globalService, 'getLatestVersion').mockResolvedValueOnce(latestVersion);
|
184
|
+
|
185
|
+
const { result } = renderHook(() => useGlobalStore().useCheckLatestVersion(), {
|
186
|
+
wrapper: withSWR,
|
187
|
+
});
|
188
|
+
|
189
|
+
await waitFor(() => {
|
190
|
+
expect(result.current.data).toBe(latestVersion);
|
191
|
+
});
|
192
|
+
|
193
|
+
expect(useGlobalStore.getState().hasNewVersion).toBe(true);
|
194
|
+
expect(useGlobalStore.getState().latestVersion).toBe(latestVersion);
|
195
|
+
});
|
196
|
+
|
197
|
+
it('should handle invalid latest version', async () => {
|
198
|
+
const latestVersion = 'invalid.version';
|
199
|
+
|
200
|
+
vi.spyOn(globalService, 'getLatestVersion').mockResolvedValueOnce(latestVersion);
|
201
|
+
|
202
|
+
const { result } = renderHook(() => useGlobalStore().useCheckLatestVersion(), {
|
203
|
+
wrapper: withSWR,
|
204
|
+
});
|
205
|
+
|
206
|
+
await waitFor(() => {
|
207
|
+
expect(result.current.data).toBe(latestVersion);
|
208
|
+
});
|
209
|
+
|
210
|
+
expect(useGlobalStore.getState().hasNewVersion).toBeUndefined();
|
211
|
+
expect(useGlobalStore.getState().latestVersion).toBeUndefined();
|
212
|
+
});
|
160
213
|
});
|
161
214
|
|
162
215
|
describe('useInitGlobalPreference', () => {
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import isEqual from 'fast-deep-equal';
|
2
2
|
import { produce } from 'immer';
|
3
|
-
import { gt } from 'semver';
|
3
|
+
import { gt, parse, valid } from 'semver';
|
4
4
|
import useSWR, { SWRResponse } from 'swr';
|
5
5
|
import type { StateCreator } from 'zustand/vanilla';
|
6
6
|
|
@@ -94,8 +94,22 @@ export const globalActionSlice: StateCreator<
|
|
94
94
|
// check latest version every 30 minutes
|
95
95
|
focusThrottleInterval: 1000 * 60 * 30,
|
96
96
|
onSuccess: (data: string) => {
|
97
|
-
if (
|
97
|
+
if (!valid(CURRENT_VERSION) || !valid(data)) return;
|
98
|
+
|
99
|
+
// Parse versions to ensure we're working with valid SemVer objects
|
100
|
+
const currentVersion = parse(CURRENT_VERSION);
|
101
|
+
const latestVersion = parse(data);
|
102
|
+
|
103
|
+
if (!currentVersion || !latestVersion) return;
|
104
|
+
|
105
|
+
// only compare major and minor versions
|
106
|
+
// solve the problem of frequent patch updates
|
107
|
+
const currentMajorMinor = `${currentVersion.major}.${currentVersion.minor}.0`;
|
108
|
+
const latestMajorMinor = `${latestVersion.major}.${latestVersion.minor}.0`;
|
109
|
+
|
110
|
+
if (gt(latestMajorMinor, currentMajorMinor)) {
|
98
111
|
set({ hasNewVersion: true, latestVersion: data }, false, n('checkLatestVersion'));
|
112
|
+
}
|
99
113
|
},
|
100
114
|
}),
|
101
115
|
|