ezmedicationinput 0.1.16 → 0.1.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -8,6 +8,7 @@
8
8
  - Emits timing abbreviations (`timing.code`) and repeat structures simultaneously where possible.
9
9
  - Maps meal/time blocks to the correct `Timing.repeat.when` **EventTiming** codes and can auto-expand AC/PC/C into specific meals.
10
10
  - Outputs SNOMED CT route codings (while providing friendly text) and round-trips known SNOMED routes back into the parser.
11
+ - Auto-codes common PRN (as-needed) reasons and additional dosage instructions while keeping the raw text when no coding is available.
11
12
  - Understands ocular and intravitreal shorthand (OD/OS/OU, LE/RE/BE, IVT*, VOD/VOS, etc.) and warns when intravitreal instructions omit an eye side.
12
13
  - Parses fractional/ minute-based intervals (`q0.5h`, `q30 min`, `q1/4hr`) plus dose and timing ranges.
13
14
  - Supports extensible dictionaries for routes, units, frequency shorthands, and event timing tokens.
@@ -50,6 +51,59 @@ Example output:
50
51
  }
51
52
  ```
52
53
 
54
+ ### PRN reasons & additional instructions
55
+
56
+ `parseSig` identifies PRN (as-needed) clauses and trailing instructions, then
57
+ codes them with SNOMED CT whenever possible.
58
+
59
+ ```ts
60
+ const result = parseSig("1 tab po q4h prn headache; do not exceed 6 tabs/day");
61
+
62
+ result.fhir.asNeededFor;
63
+ // → [{
64
+ // text: "headache",
65
+ // coding: [{
66
+ // system: "http://snomed.info/sct",
67
+ // code: "25064002",
68
+ // display: "Headache"
69
+ // }]
70
+ // }]
71
+
72
+ result.fhir.additionalInstruction;
73
+ // → [{ text: "Do not exceed 6 tablets daily" }]
74
+ ```
75
+
76
+ Customize the dictionaries and lookups through `ParseOptions`:
77
+
78
+ ```ts
79
+ parseSig(input, {
80
+ prnReasonMap: {
81
+ migraine: {
82
+ text: "Migraine",
83
+ coding: {
84
+ system: "http://snomed.info/sct",
85
+ code: "37796009",
86
+ display: "Migraine"
87
+ }
88
+ }
89
+ },
90
+ prnReasonResolvers: async (request) => terminologyService.lookup(request),
91
+ prnReasonSuggestionResolvers: async (request) => terminologyService.suggest(request),
92
+ });
93
+ ```
94
+
95
+ Use `{reason}` in the sig string (e.g. `prn {migraine}`) to force a lookup even
96
+ when a direct match exists. Additional instructions are sourced from a built-in
97
+ set of SNOMED CT concepts under *419492006 – Additional dosage instructions* and
98
+ fall back to plain text when no coding is available. Parsed instructions are
99
+ also echoed in `ParseResult.meta.normalized.additionalInstructions` for quick UI
100
+ rendering.
101
+
102
+ When a PRN reason cannot be auto-resolved, any registered suggestion resolvers
103
+ are invoked and their responses are surfaced through
104
+ `ParseResult.meta.prnReasonLookups` so client applications can prompt the user
105
+ to choose a coded concept.
106
+
53
107
  ### Sig (directions) suggestions
54
108
 
55
109
  Use `suggestSig` to drive autocomplete experiences while the clinician is
@@ -126,6 +180,8 @@ result.fhir.site?.coding?.[0];
126
180
 
127
181
  When the parser encounters an unfamiliar site, it leaves the text untouched and records nothing in `meta.siteLookups`. Wrapping the phrase in braces (e.g. `apply to {mole on scalp}`) preserves the same parsing behavior but flags the entry as a **probe** so `meta.siteLookups` always contains the request. This allows UIs to display lookup widgets even before a matching code exists. Braces are optional when the site is already recognized—they simply make the clinician's intent explicit.
128
182
 
183
+ Unknown body sites still populate `Dosage.site.text` and `ParseResult.meta.normalized.site.text`, allowing UIs to echo the verbatim phrase while terminology lookups run asynchronously.
184
+
129
185
  You can extend or replace the built-in codings via `ParseOptions`:
130
186
 
131
187
  ```ts
package/dist/format.js CHANGED
@@ -69,6 +69,11 @@ const ROUTE_GRAMMAR = {
69
69
  routePhrase: ({ hasSite }) => (hasSite ? undefined : "into the eye"),
70
70
  sitePreposition: "into"
71
71
  },
72
+ [types_1.RouteCode["Per rectum"]]: {
73
+ verb: "Use",
74
+ routePhrase: ({ hasSite }) => (hasSite ? undefined : "rectally"),
75
+ sitePreposition: "into"
76
+ },
72
77
  [types_1.RouteCode["Topical route"]]: {
73
78
  verb: "Apply",
74
79
  routePhrase: ({ hasSite }) => (hasSite ? undefined : "topically"),
@@ -137,6 +142,9 @@ function grammarFromRouteText(text) {
137
142
  if (normalized.includes("intravenous") || normalized === "iv") {
138
143
  return ROUTE_GRAMMAR[types_1.RouteCode["Intravenous route"]];
139
144
  }
145
+ if (normalized.includes("rectal") || normalized.includes("rectum")) {
146
+ return ROUTE_GRAMMAR[types_1.RouteCode["Per rectum"]];
147
+ }
140
148
  if (normalized.includes("nasal")) {
141
149
  return ROUTE_GRAMMAR[types_1.RouteCode["Nasal route"]];
142
150
  }
@@ -412,6 +420,11 @@ function formatSite(internal, grammar) {
412
420
  return undefined;
413
421
  }
414
422
  const lower = text.toLowerCase();
423
+ if (internal.routeCode === types_1.RouteCode["Per rectum"]) {
424
+ if (lower === "rectum" || lower === "rectal") {
425
+ return undefined;
426
+ }
427
+ }
415
428
  let preposition = grammar.sitePreposition;
416
429
  if (!preposition) {
417
430
  if (lower.includes("eye")) {
package/dist/maps.js CHANGED
@@ -1254,6 +1254,7 @@ exports.DEFAULT_UNIT_BY_ROUTE = (() => {
1254
1254
  ensure(types_1.RouteCode["Otic route"], "drop");
1255
1255
  ensure(types_1.RouteCode["Respiratory tract route (qualifier value)"], "puff");
1256
1256
  ensure(types_1.RouteCode["Transdermal route"], "patch");
1257
+ ensure(types_1.RouteCode["Per rectum"], "suppository");
1257
1258
  return resolved;
1258
1259
  })();
1259
1260
  function normalizePrnReasonKey(value) {
@@ -1281,7 +1282,7 @@ const DEFAULT_PRN_REASON_SOURCE = [
1281
1282
  }
1282
1283
  },
1283
1284
  {
1284
- names: ["nausea", "queasiness"],
1285
+ names: ["nausea", "queasiness", "vomiting", "n/v", "nausea and vomiting"],
1285
1286
  definition: {
1286
1287
  coding: { system: SNOMED_SYSTEM, code: "422587007", display: "Nausea" },
1287
1288
  text: "Nausea"
package/dist/parser.js CHANGED
@@ -1897,6 +1897,7 @@ function parseInternal(input, options) {
1897
1897
  if (internal.asNeeded && prnReasonStart !== undefined) {
1898
1898
  const reasonTokens = [];
1899
1899
  const reasonIndices = [];
1900
+ const reasonObjects = [];
1900
1901
  for (let i = prnReasonStart; i < tokens.length; i++) {
1901
1902
  const token = tokens[i];
1902
1903
  if (internal.consumed.has(token.index)) {
@@ -1904,36 +1905,77 @@ function parseInternal(input, options) {
1904
1905
  }
1905
1906
  reasonTokens.push(token.original);
1906
1907
  reasonIndices.push(token.index);
1908
+ reasonObjects.push(token);
1907
1909
  mark(internal.consumed, token);
1908
1910
  }
1909
1911
  if (reasonTokens.length > 0) {
1910
- const joined = reasonTokens.join(" ").trim();
1911
- if (joined) {
1912
- const sortedIndices = reasonIndices.sort((a, b) => a - b);
1913
- const range = computeTokenRange(internal.input, tokens, sortedIndices);
1914
- const sourceText = range ? internal.input.slice(range.start, range.end) : undefined;
1915
- let sanitized = joined.replace(/\s+/g, " ").trim();
1916
- let isProbe = false;
1917
- const probeMatch = sanitized.match(/^\{(.+)}$/);
1918
- if (probeMatch) {
1919
- isProbe = true;
1920
- sanitized = probeMatch[1];
1912
+ let sortedIndices = reasonIndices.slice().sort((a, b) => a - b);
1913
+ let range = computeTokenRange(internal.input, tokens, sortedIndices);
1914
+ let sourceText = range ? internal.input.slice(range.start, range.end) : undefined;
1915
+ if (sourceText) {
1916
+ const cutoff = determinePrnReasonCutoff(reasonObjects, sourceText);
1917
+ if (cutoff !== undefined) {
1918
+ for (let i = cutoff; i < reasonObjects.length; i++) {
1919
+ internal.consumed.delete(reasonObjects[i].index);
1920
+ }
1921
+ reasonObjects.splice(cutoff);
1922
+ reasonTokens.splice(cutoff);
1923
+ reasonIndices.splice(cutoff);
1924
+ while (reasonTokens.length > 0) {
1925
+ const lastToken = reasonTokens[reasonTokens.length - 1];
1926
+ if (!lastToken || /^[;:.,-]+$/.test(lastToken.trim())) {
1927
+ const removedObject = reasonObjects.pop();
1928
+ if (removedObject) {
1929
+ internal.consumed.delete(removedObject.index);
1930
+ }
1931
+ reasonTokens.pop();
1932
+ const removedIndex = reasonIndices.pop();
1933
+ if (removedIndex !== undefined) {
1934
+ internal.consumed.delete(removedIndex);
1935
+ }
1936
+ continue;
1937
+ }
1938
+ break;
1939
+ }
1940
+ if (reasonTokens.length > 0) {
1941
+ sortedIndices = reasonIndices.slice().sort((a, b) => a - b);
1942
+ range = computeTokenRange(internal.input, tokens, sortedIndices);
1943
+ sourceText = range ? internal.input.slice(range.start, range.end) : undefined;
1944
+ }
1945
+ else {
1946
+ range = undefined;
1947
+ sourceText = undefined;
1948
+ }
1949
+ }
1950
+ }
1951
+ if (reasonTokens.length > 0) {
1952
+ const joined = reasonTokens.join(" ").trim();
1953
+ if (joined) {
1954
+ let sanitized = joined.replace(/\s+/g, " ").trim();
1955
+ let isProbe = false;
1956
+ const probeMatch = sanitized.match(/^\{(.+)}$/);
1957
+ if (probeMatch) {
1958
+ isProbe = true;
1959
+ sanitized = probeMatch[1];
1960
+ }
1961
+ sanitized = sanitized.replace(/[{}]/g, " ").replace(/\s+/g, " ").trim();
1962
+ const text = sanitized || joined;
1963
+ internal.asNeededReason = text;
1964
+ const normalized = text.toLowerCase();
1965
+ const canonical = sanitized
1966
+ ? (0, maps_1.normalizePrnReasonKey)(sanitized)
1967
+ : (0, maps_1.normalizePrnReasonKey)(text);
1968
+ internal.prnReasonLookupRequest = {
1969
+ originalText: joined,
1970
+ text,
1971
+ normalized,
1972
+ canonical: canonical !== null && canonical !== void 0 ? canonical : "",
1973
+ isProbe,
1974
+ inputText: internal.input,
1975
+ sourceText,
1976
+ range
1977
+ };
1921
1978
  }
1922
- sanitized = sanitized.replace(/[{}]/g, " ").replace(/\s+/g, " ").trim();
1923
- const text = sanitized || joined;
1924
- internal.asNeededReason = text;
1925
- const normalized = text.toLowerCase();
1926
- const canonical = sanitized ? (0, maps_1.normalizePrnReasonKey)(sanitized) : (0, maps_1.normalizePrnReasonKey)(text);
1927
- internal.prnReasonLookupRequest = {
1928
- originalText: joined,
1929
- text,
1930
- normalized,
1931
- canonical: canonical !== null && canonical !== void 0 ? canonical : "",
1932
- isProbe,
1933
- inputText: internal.input,
1934
- sourceText,
1935
- range
1936
- };
1937
1979
  }
1938
1980
  }
1939
1981
  }
@@ -2028,27 +2070,30 @@ function parseInternal(input, options) {
2028
2070
  sanitized = sanitized.replace(/[{}]/g, " ").replace(/\s+/g, " ").trim();
2029
2071
  const range = refineSiteRange(internal.input, sanitized, tokenRange);
2030
2072
  const sourceText = range ? internal.input.slice(range.start, range.end) : undefined;
2073
+ const displayText = normalizeSiteDisplayText(sanitized, options === null || options === void 0 ? void 0 : options.siteCodeMap);
2074
+ const displayLower = displayText.toLowerCase();
2075
+ const canonical = displayText ? (0, maps_1.normalizeBodySiteKey)(displayText) : "";
2031
2076
  internal.siteLookupRequest = {
2032
2077
  originalText: normalizedSite,
2033
- text: sanitized,
2034
- normalized: sanitized.toLowerCase(),
2035
- canonical: sanitized ? (0, maps_1.normalizeBodySiteKey)(sanitized) : "",
2078
+ text: displayText,
2079
+ normalized: displayLower,
2080
+ canonical,
2036
2081
  isProbe,
2037
2082
  inputText: internal.input,
2038
2083
  sourceText,
2039
2084
  range
2040
2085
  };
2041
- if (sanitized) {
2086
+ if (displayText) {
2042
2087
  const normalizedLower = sanitized.toLowerCase();
2043
2088
  const strippedDescriptor = normalizeRouteDescriptorPhrase(normalizedLower);
2044
- const siteWords = normalizedLower.split(/\s+/).filter((word) => word.length > 0);
2089
+ const siteWords = displayLower.split(/\s+/).filter((word) => word.length > 0);
2045
2090
  const hasNonSiteWords = siteWords.some((word) => !isBodySiteHint(word, internal.customSiteHints));
2046
2091
  const shouldAttemptRouteDescriptor = strippedDescriptor !== normalizedLower || hasNonSiteWords || strippedDescriptor === "mouth";
2047
2092
  const appliedRouteDescriptor = shouldAttemptRouteDescriptor && maybeApplyRouteDescriptor(sanitized);
2048
2093
  if (!appliedRouteDescriptor) {
2049
2094
  // Preserve the clean site text for FHIR output and resolver context
2050
2095
  // whenever we keep the original phrase.
2051
- internal.siteText = sanitized;
2096
+ internal.siteText = displayText;
2052
2097
  if (!internal.siteSource) {
2053
2098
  internal.siteSource = "text";
2054
2099
  }
@@ -2399,29 +2444,194 @@ function findAdditionalInstructionDefinition(text, canonical) {
2399
2444
  }
2400
2445
  return undefined;
2401
2446
  }
2447
+ const BODY_SITE_ADJECTIVE_SUFFIXES = [
2448
+ "al",
2449
+ "ial",
2450
+ "ual",
2451
+ "ic",
2452
+ "ous",
2453
+ "ive",
2454
+ "ary",
2455
+ "ory",
2456
+ "atic",
2457
+ "etic",
2458
+ "ular",
2459
+ "otic",
2460
+ "ile",
2461
+ "eal",
2462
+ "inal",
2463
+ "aneal",
2464
+ "enal"
2465
+ ];
2466
+ const DEFAULT_SITE_SYNONYM_KEYS = (() => {
2467
+ const map = new Map();
2468
+ for (const [key, definition] of (0, object_1.objectEntries)(maps_1.DEFAULT_BODY_SITE_SNOMED)) {
2469
+ if (!definition) {
2470
+ continue;
2471
+ }
2472
+ const normalized = key.trim();
2473
+ if (!normalized) {
2474
+ continue;
2475
+ }
2476
+ const existing = map.get(definition);
2477
+ if (existing) {
2478
+ if (existing.indexOf(normalized) === -1) {
2479
+ existing.push(normalized);
2480
+ }
2481
+ }
2482
+ else {
2483
+ map.set(definition, [normalized]);
2484
+ }
2485
+ }
2486
+ return map;
2487
+ })();
2488
+ function normalizeSiteDisplayText(text, customSiteMap) {
2489
+ var _a;
2490
+ const trimmed = text.trim();
2491
+ if (!trimmed) {
2492
+ return trimmed;
2493
+ }
2494
+ const canonicalInput = (0, maps_1.normalizeBodySiteKey)(trimmed);
2495
+ if (!canonicalInput || !isAdjectivalSitePhrase(canonicalInput)) {
2496
+ return trimmed;
2497
+ }
2498
+ const definition = (_a = lookupBodySiteDefinition(customSiteMap, canonicalInput)) !== null && _a !== void 0 ? _a : maps_1.DEFAULT_BODY_SITE_SNOMED[canonicalInput];
2499
+ if (!definition) {
2500
+ return trimmed;
2501
+ }
2502
+ const preferred = pickPreferredBodySitePhrase(canonicalInput, definition, customSiteMap);
2503
+ if (!preferred) {
2504
+ return trimmed;
2505
+ }
2506
+ return preferred;
2507
+ }
2508
+ function pickPreferredBodySitePhrase(canonical, definition, customSiteMap) {
2509
+ const synonyms = new Set();
2510
+ synonyms.add(canonical);
2511
+ if (definition.aliases) {
2512
+ for (const alias of definition.aliases) {
2513
+ const normalizedAlias = (0, maps_1.normalizeBodySiteKey)(alias);
2514
+ if (normalizedAlias) {
2515
+ synonyms.add(normalizedAlias);
2516
+ }
2517
+ }
2518
+ }
2519
+ const defaultSynonyms = DEFAULT_SITE_SYNONYM_KEYS.get(definition);
2520
+ if (defaultSynonyms) {
2521
+ for (const synonym of defaultSynonyms) {
2522
+ synonyms.add(synonym);
2523
+ }
2524
+ }
2525
+ if (customSiteMap) {
2526
+ for (const [key, candidate] of (0, object_1.objectEntries)(customSiteMap)) {
2527
+ if (!candidate) {
2528
+ continue;
2529
+ }
2530
+ if (candidate === definition) {
2531
+ const normalizedKey = (0, maps_1.normalizeBodySiteKey)(key);
2532
+ if (normalizedKey) {
2533
+ synonyms.add(normalizedKey);
2534
+ }
2535
+ }
2536
+ if (candidate.aliases) {
2537
+ for (const alias of candidate.aliases) {
2538
+ const normalizedAlias = (0, maps_1.normalizeBodySiteKey)(alias);
2539
+ if (normalizedAlias) {
2540
+ synonyms.add(normalizedAlias);
2541
+ }
2542
+ }
2543
+ }
2544
+ }
2545
+ }
2546
+ const candidates = Array.from(synonyms).filter((phrase) => phrase && !isAdjectivalSitePhrase(phrase));
2547
+ if (!candidates.length) {
2548
+ return undefined;
2549
+ }
2550
+ candidates.sort((a, b) => scoreBodySitePhrase(b) - scoreBodySitePhrase(a));
2551
+ const best = candidates[0];
2552
+ if (!best) {
2553
+ return undefined;
2554
+ }
2555
+ if ((0, maps_1.normalizeBodySiteKey)(best) === canonical) {
2556
+ return undefined;
2557
+ }
2558
+ return best;
2559
+ }
2560
+ function scoreBodySitePhrase(phrase) {
2561
+ const lower = phrase.toLowerCase();
2562
+ const words = lower.split(/\s+/).filter((part) => part.length > 0);
2563
+ let score = 0;
2564
+ if (!/(structure|region|entire|proper|body)/.test(lower)) {
2565
+ score += 3;
2566
+ }
2567
+ if (!lower.includes(" of ")) {
2568
+ score += 1;
2569
+ }
2570
+ if (words.length <= 2) {
2571
+ score += 1;
2572
+ }
2573
+ if (words.length === 1) {
2574
+ score += 0.5;
2575
+ }
2576
+ score -= words.length * 0.2;
2577
+ score -= lower.length * 0.01;
2578
+ return score;
2579
+ }
2580
+ function isAdjectivalSitePhrase(phrase) {
2581
+ const normalized = phrase.trim().toLowerCase();
2582
+ if (!normalized) {
2583
+ return false;
2584
+ }
2585
+ const words = normalized.split(/\s+/).filter((word) => word.length > 0);
2586
+ if (words.length !== 1) {
2587
+ return false;
2588
+ }
2589
+ const last = words[words.length - 1];
2590
+ if (last.length <= 3) {
2591
+ return false;
2592
+ }
2593
+ return BODY_SITE_ADJECTIVE_SUFFIXES.some((suffix) => last.endsWith(suffix));
2594
+ }
2402
2595
  function collectAdditionalInstructions(internal, tokens) {
2403
2596
  var _a, _b, _c, _d, _e, _f;
2404
2597
  if (internal.additionalInstructions.length) {
2405
2598
  return;
2406
2599
  }
2407
- const leftover = tokens.filter((token) => !internal.consumed.has(token.index));
2408
- if (!leftover.length) {
2409
- return;
2410
- }
2411
2600
  const punctuationOnly = /^[;:.,-]+$/;
2412
- const contentTokens = leftover.filter((token) => !punctuationOnly.test(token.original));
2413
- if (!contentTokens.length) {
2601
+ const trailing = [];
2602
+ let expectedIndex;
2603
+ for (let cursor = tokens.length - 1; cursor >= 0; cursor--) {
2604
+ const token = tokens[cursor];
2605
+ if (!token) {
2606
+ continue;
2607
+ }
2608
+ if (internal.consumed.has(token.index)) {
2609
+ if (trailing.length > 0) {
2610
+ break;
2611
+ }
2612
+ continue;
2613
+ }
2614
+ if (expectedIndex !== undefined && token.index !== expectedIndex - 1) {
2615
+ break;
2616
+ }
2617
+ trailing.unshift(token);
2618
+ expectedIndex = token.index;
2619
+ }
2620
+ if (!trailing.length) {
2414
2621
  return;
2415
2622
  }
2416
- const leftoverIndices = leftover.map((token) => token.index).sort((a, b) => a - b);
2417
- const contiguous = leftoverIndices.every((index, i) => i === 0 || index === leftoverIndices[i - 1] + 1);
2418
- if (!contiguous) {
2623
+ const contentTokens = trailing.filter((token) => !punctuationOnly.test(token.original));
2624
+ if (!contentTokens.length) {
2419
2625
  return;
2420
2626
  }
2421
- const lastIndex = leftoverIndices[leftoverIndices.length - 1];
2627
+ const trailingIndices = trailing.map((token) => token.index).sort((a, b) => a - b);
2628
+ const lastIndex = trailingIndices[trailingIndices.length - 1];
2422
2629
  for (let i = lastIndex + 1; i < tokens.length; i++) {
2423
- const trailingToken = tokens[i];
2424
- if (!internal.consumed.has(trailingToken.index)) {
2630
+ const nextToken = tokens[i];
2631
+ if (!nextToken) {
2632
+ continue;
2633
+ }
2634
+ if (!internal.consumed.has(nextToken.index)) {
2425
2635
  return;
2426
2636
  }
2427
2637
  }
@@ -2434,7 +2644,33 @@ function collectAdditionalInstructions(internal, tokens) {
2434
2644
  return;
2435
2645
  }
2436
2646
  const contentIndices = contentTokens.map((token) => token.index).sort((a, b) => a - b);
2437
- const range = computeTokenRange(internal.input, tokens, contentIndices);
2647
+ const lowerInput = internal.input.toLowerCase();
2648
+ let trailingRange;
2649
+ let searchEnd = lowerInput.length;
2650
+ let rangeStart;
2651
+ let rangeEnd;
2652
+ for (let i = contentTokens.length - 1; i >= 0; i--) {
2653
+ const fragment = contentTokens[i].original.trim();
2654
+ if (!fragment) {
2655
+ continue;
2656
+ }
2657
+ const lowerFragment = fragment.toLowerCase();
2658
+ const foundIndex = lowerInput.lastIndexOf(lowerFragment, searchEnd - 1);
2659
+ if (foundIndex === -1) {
2660
+ rangeStart = undefined;
2661
+ rangeEnd = undefined;
2662
+ break;
2663
+ }
2664
+ rangeStart = foundIndex;
2665
+ if (rangeEnd === undefined) {
2666
+ rangeEnd = foundIndex + lowerFragment.length;
2667
+ }
2668
+ searchEnd = foundIndex;
2669
+ }
2670
+ if (rangeStart !== undefined && rangeEnd !== undefined) {
2671
+ trailingRange = { start: rangeStart, end: rangeEnd };
2672
+ }
2673
+ const range = trailingRange !== null && trailingRange !== void 0 ? trailingRange : computeTokenRange(internal.input, tokens, contentIndices);
2438
2674
  let separatorDetected = false;
2439
2675
  if (range) {
2440
2676
  for (let cursor = range.start - 1; cursor >= 0; cursor--) {
@@ -2499,11 +2735,81 @@ function collectAdditionalInstructions(internal, tokens) {
2499
2735
  }
2500
2736
  if (instructions.length) {
2501
2737
  internal.additionalInstructions = instructions;
2502
- for (const token of leftover) {
2738
+ for (const token of trailing) {
2503
2739
  mark(internal.consumed, token);
2504
2740
  }
2505
2741
  }
2506
2742
  }
2743
+ function determinePrnReasonCutoff(tokens, sourceText) {
2744
+ const separatorIndex = findPrnReasonSeparator(sourceText);
2745
+ if (separatorIndex === undefined) {
2746
+ return undefined;
2747
+ }
2748
+ const lowerSource = sourceText.toLowerCase();
2749
+ let searchOffset = 0;
2750
+ for (let i = 0; i < tokens.length; i++) {
2751
+ const token = tokens[i];
2752
+ const fragment = token.original.trim();
2753
+ if (!fragment) {
2754
+ continue;
2755
+ }
2756
+ const lowerFragment = fragment.toLowerCase();
2757
+ const position = lowerSource.indexOf(lowerFragment, searchOffset);
2758
+ if (position === -1) {
2759
+ continue;
2760
+ }
2761
+ const end = position + lowerFragment.length;
2762
+ searchOffset = end;
2763
+ if (position >= separatorIndex) {
2764
+ return i;
2765
+ }
2766
+ }
2767
+ return undefined;
2768
+ }
2769
+ function findPrnReasonSeparator(sourceText) {
2770
+ var _a;
2771
+ for (let i = 0; i < sourceText.length; i++) {
2772
+ const ch = sourceText[i];
2773
+ if (ch === "\n" || ch === "\r") {
2774
+ if (sourceText.slice(i + 1).trim().length > 0) {
2775
+ return i;
2776
+ }
2777
+ continue;
2778
+ }
2779
+ if (ch === ";") {
2780
+ if (sourceText.slice(i + 1).trim().length > 0) {
2781
+ return i;
2782
+ }
2783
+ continue;
2784
+ }
2785
+ if (ch === "-") {
2786
+ const prev = sourceText[i - 1];
2787
+ const next = sourceText[i + 1];
2788
+ const hasWhitespaceAround = (!prev || /\s/.test(prev)) && (!next || /\s/.test(next));
2789
+ if (hasWhitespaceAround && sourceText.slice(i + 1).trim().length > 0) {
2790
+ return i;
2791
+ }
2792
+ continue;
2793
+ }
2794
+ if (ch === ":" || ch === ".") {
2795
+ const rest = sourceText.slice(i + 1);
2796
+ if (!rest.trim().length) {
2797
+ continue;
2798
+ }
2799
+ const nextChar = rest.replace(/^\s+/, "")[0];
2800
+ if (!nextChar) {
2801
+ continue;
2802
+ }
2803
+ if (ch === "." &&
2804
+ /[0-9]/.test((_a = sourceText[i - 1]) !== null && _a !== void 0 ? _a : "") &&
2805
+ /[0-9]/.test(nextChar)) {
2806
+ continue;
2807
+ }
2808
+ return i;
2809
+ }
2810
+ }
2811
+ return undefined;
2812
+ }
2507
2813
  function lookupPrnReasonDefinition(map, canonical) {
2508
2814
  if (!map) {
2509
2815
  return undefined;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ezmedicationinput",
3
- "version": "0.1.16",
3
+ "version": "0.1.17",
4
4
  "description": "Parse concise medication sigs into FHIR R5 Dosage JSON",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",