@eagleoutice/flowr 2.7.4 → 2.7.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +14 -14
  2. package/cli/repl/commands/repl-commands.d.ts +2 -0
  3. package/cli/repl/commands/repl-commands.js +2 -0
  4. package/cli/repl/commands/repl-dataflow.d.ts +2 -0
  5. package/cli/repl/commands/repl-dataflow.js +38 -1
  6. package/cli/repl/core.js +22 -0
  7. package/config.d.ts +5 -0
  8. package/config.js +6 -0
  9. package/dataflow/graph/graph.js +2 -0
  10. package/documentation/wiki-analyzer.js +12 -0
  11. package/documentation/wiki-interface.js +3 -0
  12. package/documentation/wiki-query.js +1 -1
  13. package/package.json +3 -1
  14. package/project/context/flowr-analyzer-dependencies-context.d.ts +5 -1
  15. package/project/context/flowr-analyzer-functions-context.d.ts +16 -0
  16. package/project/context/flowr-analyzer-functions-context.js +6 -0
  17. package/project/context/flowr-analyzer-loading-order-context.d.ts +4 -4
  18. package/project/context/flowr-analyzer-loading-order-context.js +4 -0
  19. package/project/plugins/file-plugins/files/flowr-description-file.d.ts +18 -1
  20. package/project/plugins/file-plugins/files/flowr-description-file.js +47 -13
  21. package/project/plugins/loading-order-plugins/flowr-analyzer-loading-order-description-file-plugin.js +8 -3
  22. package/project/plugins/package-version-plugins/flowr-analyzer-package-versions-description-file-plugin.js +5 -2
  23. package/project/plugins/package-version-plugins/flowr-analyzer-package-versions-namespace-file-plugin.js +6 -1
  24. package/project/plugins/package-version-plugins/package.js +1 -1
  25. package/project/plugins/project-discovery/flowr-analyzer-project-discovery-plugin.js +12 -2
  26. package/queries/catalog/project-query/project-query-executor.js +12 -2
  27. package/queries/catalog/project-query/project-query-format.d.ts +13 -0
  28. package/queries/catalog/project-query/project-query-format.js +25 -2
  29. package/queries/query-print.js +8 -3
  30. package/util/mermaid/cfg.d.ts +3 -0
  31. package/util/mermaid/cfg.js +25 -0
  32. package/util/r-author.d.ts +39 -0
  33. package/util/r-author.js +194 -0
  34. package/util/r-license.d.ts +23 -0
  35. package/util/r-license.js +196 -0
  36. package/util/simple-df/dfg-ascii.d.ts +5 -0
  37. package/util/simple-df/dfg-ascii.js +272 -0
  38. package/util/version.js +1 -1
@@ -13,7 +13,12 @@ class FlowrAnalyzerPackageVersionsNamespaceFilePlugin extends flowr_analyzer_pac
13
13
  version = new semver_1.SemVer('0.1.0');
14
14
  process(ctx) {
15
15
  const nmspcFiles = ctx.files.getFilesByRole(flowr_file_1.FileRole.Namespace);
16
- exports.namespaceFileLog.info(`Found ${nmspcFiles.length} namespace files!`);
16
+ if (nmspcFiles.length === 0) {
17
+ exports.namespaceFileLog.warn('No namespace file found, cannot extract package versions.');
18
+ }
19
+ else if (nmspcFiles.length > 1) {
20
+ exports.namespaceFileLog.warn(`Found ${nmspcFiles.length} namespace files, expected exactly one.`);
21
+ }
17
22
  /** this will do the caching etc. for me */
18
23
  const deps = nmspcFiles[0].content();
19
24
  for (const pkg in deps) {
@@ -56,7 +56,7 @@ class Package {
56
56
  this.derivedVersion ??= versionConstraints[0];
57
57
  for (const constraint of versionConstraints) {
58
58
  if (!this.derivedVersion?.intersects(constraint)) {
59
- throw Error('Version constraint mismatch!');
59
+ throw new Error('Version constraint mismatch!');
60
60
  }
61
61
  this.versionConstraints.push(constraint);
62
62
  this.derivedVersion = this.deriveVersion();
@@ -5,6 +5,7 @@ const flowr_analyzer_plugin_1 = require("../flowr-analyzer-plugin");
5
5
  const semver_1 = require("semver");
6
6
  const flowr_file_1 = require("../../context/flowr-file");
7
7
  const files_1 = require("../../../util/files");
8
+ const built_in_source_1 = require("../../../dataflow/internal/process/functions/call/built-in/built-in-source");
8
9
  /**
9
10
  * This is the base class for all plugins that discover files in a project for analysis.
10
11
  * These plugins interplay with the {@link FlowrAnalyzerFilesContext} to gather information about the files in the project.
@@ -21,6 +22,7 @@ class FlowrAnalyzerProjectDiscoveryPlugin extends flowr_analyzer_plugin_1.FlowrA
21
22
  exports.FlowrAnalyzerProjectDiscoveryPlugin = FlowrAnalyzerProjectDiscoveryPlugin;
22
23
  const discoverRSourcesRegex = /\.(r|rmd|ipynb|qmd)$/i;
23
24
  const ignorePathsWith = /(\.git|\.svn|\.hg|renv|packrat|node_modules|__pycache__|\.Rproj\.user)/i;
25
+ const excludeRequestsForPaths = /vignettes?|tests?|revdep|inst|data/i;
24
26
  /**
25
27
  * This is the default dummy implementation of the {@link FlowrAnalyzerProjectDiscoveryPlugin}.
26
28
  * It simply collects all files in the given folder and returns them as either {@link RParseRequest} (for R and Rmd files) or {@link FlowrTextFile} (for all other files).
@@ -31,10 +33,18 @@ class DefaultFlowrAnalyzerProjectDiscoveryPlugin extends FlowrAnalyzerProjectDis
31
33
  version = new semver_1.SemVer('0.0.0');
32
34
  supportedExtensions;
33
35
  ignorePathsRegex;
34
- constructor(triggerOnExtensions = discoverRSourcesRegex, ignorePathsRegex = ignorePathsWith) {
36
+ excludePathsRegex = excludeRequestsForPaths;
37
+ /**
38
+ * Creates a new instance of the default project discovery plugin.
39
+ * @param triggerOnExtensions - the regex to trigger R source file discovery on (and hence analyze them as R files)
40
+ * @param ignorePathsRegex - the regex to ignore certain paths entirely
41
+ * @param excludePathsRegex - the regex to exclude certain paths from being requested as R files (they are still collected as text files)
42
+ */
43
+ constructor(triggerOnExtensions = discoverRSourcesRegex, ignorePathsRegex = ignorePathsWith, excludePathsRegex = excludeRequestsForPaths) {
35
44
  super();
36
45
  this.supportedExtensions = triggerOnExtensions;
37
46
  this.ignorePathsRegex = ignorePathsRegex;
47
+ this.excludePathsRegex = excludePathsRegex;
38
48
  }
39
49
  process(_context, args) {
40
50
  const requests = [];
@@ -43,7 +53,7 @@ class DefaultFlowrAnalyzerProjectDiscoveryPlugin extends FlowrAnalyzerProjectDis
43
53
  if (this.ignorePathsRegex.test(file)) {
44
54
  continue;
45
55
  }
46
- if (this.supportedExtensions.test(file)) {
56
+ if (this.supportedExtensions.test(file) && !this.excludePathsRegex.test((0, built_in_source_1.platformDirname)(file))) {
47
57
  requests.push({ content: file, request: 'file' });
48
58
  }
49
59
  else {
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.executeProjectQuery = executeProjectQuery;
4
4
  const log_1 = require("../../../util/log");
5
+ const flowr_file_1 = require("../../../project/context/flowr-file");
5
6
  /**
6
7
  * Executes the given project queries.
7
8
  */
@@ -9,14 +10,23 @@ async function executeProjectQuery({ analyzer }, queries) {
9
10
  if (queries.length !== 1) {
10
11
  log_1.log.warn('Project query expects only up to one query, but got', queries.length);
11
12
  }
13
+ const withDf = queries.some(q => q.withDf);
12
14
  // we need to know what is considered by the analyzer
13
- await analyzer.dataflow();
15
+ if (withDf) {
16
+ await analyzer.dataflow();
17
+ }
18
+ const descFile = analyzer.inspectContext().files.getFilesByRole(flowr_file_1.FileRole.Description);
19
+ const desc = descFile[0];
14
20
  return {
15
21
  '.meta': {
16
22
  /* there is no sense in measuring a get */
17
23
  timing: 0
18
24
  },
19
- files: [...analyzer.inspectContext().files.consideredFilesList()]
25
+ files: Array.from(analyzer.inspectContext().files.consideredFilesList()),
26
+ authors: desc?.authors(),
27
+ encoding: desc?.content().get('Encoding')?.[0],
28
+ version: desc?.content().get('Version')?.[0],
29
+ licenses: desc?.license()
20
30
  };
21
31
  }
22
32
  //# sourceMappingURL=project-query-executor.js.map
@@ -1,11 +1,24 @@
1
1
  import type { BaseQueryFormat, BaseQueryResult } from '../../base-query-format';
2
2
  import { executeProjectQuery } from './project-query-executor';
3
3
  import Joi from 'joi';
4
+ import type { RAuthorInfo } from '../../../util/r-author';
5
+ import type { RLicenseElementInfo } from '../../../util/r-license';
4
6
  export interface ProjectQuery extends BaseQueryFormat {
5
7
  readonly type: 'project';
8
+ /** Whether to include Dataflow information in the result. */
9
+ readonly withDf?: boolean;
6
10
  }
7
11
  export interface ProjectQueryResult extends BaseQueryResult {
12
+ /** The authors of the project. */
13
+ readonly authors?: RAuthorInfo[];
14
+ /** The files considered part of the project. */
8
15
  readonly files: (string | '<inline>')[];
16
+ /** The licenses of the project. */
17
+ readonly licenses?: RLicenseElementInfo[];
18
+ /** The encoding of the project files. */
19
+ readonly encoding?: string;
20
+ /** The version of the project, if available. */
21
+ readonly version?: string;
9
22
  }
10
23
  export declare const ProjectQueryDefinition: {
11
24
  readonly executor: typeof executeProjectQuery;
@@ -8,19 +8,42 @@ const project_query_executor_1 = require("./project-query-executor");
8
8
  const ansi_1 = require("../../../util/text/ansi");
9
9
  const time_1 = require("../../../util/text/time");
10
10
  const joi_1 = __importDefault(require("joi"));
11
+ const r_author_1 = require("../../../util/r-author");
11
12
  exports.ProjectQueryDefinition = {
12
13
  executor: project_query_executor_1.executeProjectQuery,
13
14
  asciiSummarizer: (formatter, _analyzer, queryResults, result) => {
14
15
  const out = queryResults;
15
16
  result.push(`Query: ${(0, ansi_1.bold)('project', formatter)} (${(0, time_1.printAsMs)(out['.meta'].timing, 0)})`);
16
- result.push(` ╰ Contains ${out.files.length} file${out.files.length === 1 ? '' : 's'}`);
17
- for (const file of out.files) {
17
+ if (out.version) {
18
+ result.push(` ╰ Version: ${out.version}`);
19
+ }
20
+ if (out.encoding) {
21
+ result.push(` ╰ Encoding: ${out.encoding}`);
22
+ }
23
+ if (out.authors && out.authors.length > 0) {
24
+ result.push(' ╰ Author(s):');
25
+ for (const author of out.authors) {
26
+ result.push(` ╰ ${(0, r_author_1.rAuthorInfoToReadable)(author)}`);
27
+ }
28
+ }
29
+ if (out.licenses && out.licenses.length > 0) {
30
+ result.push(' ╰ License(s):');
31
+ for (const license of out.licenses) {
32
+ result.push(` ╰ ${JSON.stringify(license)}`);
33
+ }
34
+ }
35
+ result.push(` ╰ Dataflow Analysis considered ${out.files.length} file${out.files.length === 1 ? '' : 's'}`);
36
+ for (const file of out.files.slice(0, 20)) {
18
37
  result.push(` ╰ \`${file}\``);
19
38
  }
39
+ if (out.files.length > 20) {
40
+ result.push(` ╰ ... and ${out.files.length - 20} more files`);
41
+ }
20
42
  return true;
21
43
  },
22
44
  schema: joi_1.default.object({
23
45
  type: joi_1.default.string().valid('project').required().description('The type of the query.'),
46
+ withDf: joi_1.default.boolean().optional().default(false).description('Whether to include Dataflow information in the result.')
24
47
  }).description('The project query provides information on the analyzed project.'),
25
48
  flattenInvolvedNodes: () => []
26
49
  };
@@ -23,7 +23,7 @@ function nodeString(nodeId, formatter, idMap) {
23
23
  }
24
24
  function asciiCallContextSubHit(formatter, results, idMap) {
25
25
  const result = [];
26
- for (const { id, calls = [], linkedIds = [], aliasRoots = [] } of results) {
26
+ for (const { id, calls = [], linkedIds = [], aliasRoots = [] } of results.slice(0, 20)) {
27
27
  const node = idMap.get(id);
28
28
  if (node === undefined) {
29
29
  result.push(` ${(0, ansi_1.bold)('UNKNOWN: ' + JSON.stringify({ calls, linkedIds }))}`);
@@ -41,6 +41,9 @@ function asciiCallContextSubHit(formatter, results, idMap) {
41
41
  }
42
42
  result.push(line);
43
43
  }
44
+ if (results.length > 20) {
45
+ result.push(` ... and ${results.length - 20} more hits`);
46
+ }
44
47
  return result.join(', ');
45
48
  }
46
49
  /**
@@ -50,9 +53,11 @@ function asciiCallContext(formatter, results, idMap) {
50
53
  /* traverse over 'kinds' and within them 'subkinds' */
51
54
  const result = [];
52
55
  for (const [kind, { subkinds }] of Object.entries(results['kinds'])) {
53
- result.push(` ╰ ${(0, ansi_1.bold)(kind, formatter)}`);
56
+ const amountOfHits = Object.values(subkinds).reduce((acc, cur) => acc + cur.length, 0);
57
+ result.push(` ╰ ${(0, ansi_1.bold)(kind, formatter)} (${amountOfHits} hit${amountOfHits === 1 ? '' : 's'}):`);
54
58
  for (const [subkind, values] of Object.entries(subkinds)) {
55
- result.push(` ╰ ${(0, ansi_1.bold)(subkind, formatter)}: ${asciiCallContextSubHit(formatter, values, idMap)}`);
59
+ const amountOfSubHits = values.length;
60
+ result.push(` ╰ ${(0, ansi_1.bold)(subkind, formatter)} (${amountOfSubHits} hit${amountOfSubHits === 1 ? '' : 's'}): ${asciiCallContextSubHit(formatter, values, idMap)}`);
56
61
  }
57
62
  }
58
63
  return result.join('\n');
@@ -7,6 +7,9 @@ import { type MermaidGraphPrinterInfo } from './info';
7
7
  * @param normalizedAst - The normalized AST to use for the vertex content.
8
8
  * @param prefix - The prefix to use for the mermaid string.
9
9
  * @param simplify - Whether to simplify the control flow graph (especially in the context of basic blocks).
10
+ * @param markStyle - The style to use for marked vertices and edges.
11
+ * @param includeOnlyIds - If provided, only include the vertices with the given IDs.
12
+ * @param mark - If provided, mark the given vertices and edges.
10
13
  */
11
14
  export declare function cfgToMermaid(cfg: ControlFlowInformation, normalizedAst: NormalizedAst, { prefix, simplify, markStyle, includeOnlyIds, mark }?: MermaidGraphPrinterInfo): string;
12
15
  /**
@@ -8,6 +8,7 @@ const control_flow_graph_1 = require("../../control-flow/control-flow-graph");
8
8
  const reconstruct_1 = require("../../reconstruct/reconstruct");
9
9
  const auto_select_defaults_1 = require("../../reconstruct/auto-select/auto-select-defaults");
10
10
  const info_1 = require("./info");
11
+ const collect_1 = require("../../r-bridge/lang-4.x/ast/model/collect");
11
12
  function getLexeme(n) {
12
13
  return n ? n.info.fullLexeme ?? n.lexeme ?? '' : undefined;
13
14
  }
@@ -30,9 +31,33 @@ const getDirRegex = /flowchart\s+([A-Za-z]+)/;
30
31
  * @param normalizedAst - The normalized AST to use for the vertex content.
31
32
  * @param prefix - The prefix to use for the mermaid string.
32
33
  * @param simplify - Whether to simplify the control flow graph (especially in the context of basic blocks).
34
+ * @param markStyle - The style to use for marked vertices and edges.
35
+ * @param includeOnlyIds - If provided, only include the vertices with the given IDs.
36
+ * @param mark - If provided, mark the given vertices and edges.
33
37
  */
34
38
  function cfgToMermaid(cfg, normalizedAst, { prefix = 'flowchart BT\n', simplify = false, markStyle = info_1.MermaidDefaultMarkStyle, includeOnlyIds, mark } = {}) {
35
39
  let output = prefix;
40
+ if (includeOnlyIds) {
41
+ const completed = new Set(includeOnlyIds);
42
+ // foreach nast id we add all children
43
+ for (const id of includeOnlyIds.values()) {
44
+ const nastNode = normalizedAst.idMap.get(id);
45
+ if (!nastNode) {
46
+ continue;
47
+ }
48
+ const ids = (0, collect_1.collectAllIds)(nastNode);
49
+ for (const childId of ids) {
50
+ completed.add(childId);
51
+ }
52
+ }
53
+ // if we have a filter, we automatically add all vertices in the cfg that are *markers* for these ids and
54
+ for (const [id, v] of cfg.graph.vertices()) {
55
+ if (v.type === control_flow_graph_1.CfgVertexType.EndMarker && completed.has(v.root)) {
56
+ completed.add(id);
57
+ }
58
+ }
59
+ includeOnlyIds = completed;
60
+ }
36
61
  const dirIs = getDirRegex.exec(prefix)?.at(1) ?? 'LR';
37
62
  for (const [id, vertex] of cfg.graph.vertices(false)) {
38
63
  const normalizedVertex = normalizedAst?.idMap.get(id);
@@ -0,0 +1,39 @@
1
+ /** https://r-pkgs.org/description.html#sec-description-authors-at-r */
2
+ export declare enum AuthorRole {
3
+ /** the creator or maintainer, the person you should bother if you have problems. Despite being short for “creator”, this is the correct role to use for the current maintainer, even if they are not the initial creator of the package. */
4
+ Creator = "cre",
5
+ /** authors, those who have made significant contributions to the package. */
6
+ Author = "aut",
7
+ /** contributors, those who have made smaller contributions, like patches. */
8
+ Contributor = "ctb",
9
+ /** copyright holder. This is used to list additional copyright holders who are not authors, typically companies, like an employer of one or more of the authors. */
10
+ CopyrightHolder = "cph",
11
+ /** funder, the people or organizations that have provided financial support for the development of the package. */
12
+ Funder = "fnd"
13
+ }
14
+ /**
15
+ * Information about an author.
16
+ * See {@link parseRAuthorString} for parsing R `Authors@R` strings, and {@link rAuthorInfoToReadable} for printing them.
17
+ */
18
+ export interface RAuthorInfo {
19
+ /** The name (components) of the author. */
20
+ readonly name: string[];
21
+ /** The email of the author, if available. */
22
+ readonly email?: string;
23
+ /** The roles of the author in the project. */
24
+ readonly roles: AuthorRole[];
25
+ /** The ORCID of the author, if available. */
26
+ readonly orcid?: string;
27
+ /** Any additional comments about the author. */
28
+ readonly comment?: string[];
29
+ }
30
+ /**
31
+ * Convert structured R author information into an R `Authors@R` string.
32
+ */
33
+ export declare function rAuthorInfoToReadable(author: RAuthorInfo): string;
34
+ /**
35
+ * Parse an R `Authors@R` string into structured author information.
36
+ * These are mostly found in `R` DESCRIPTION files and are a vector of `person()` calls.
37
+ * For now, this works *without* the full dataflow engine, so complex cases may not be parsed correctly.
38
+ */
39
+ export declare function parseRAuthorString(authorString: string): RAuthorInfo[];
@@ -0,0 +1,194 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.AuthorRole = void 0;
4
+ exports.rAuthorInfoToReadable = rAuthorInfoToReadable;
5
+ exports.parseRAuthorString = parseRAuthorString;
6
+ const args_1 = require("./text/args");
7
+ const assert_1 = require("./assert");
8
+ const objects_1 = require("./objects");
9
+ const retriever_1 = require("../r-bridge/retriever");
10
+ /** https://r-pkgs.org/description.html#sec-description-authors-at-r */
11
+ var AuthorRole;
12
+ (function (AuthorRole) {
13
+ /** the creator or maintainer, the person you should bother if you have problems. Despite being short for “creator”, this is the correct role to use for the current maintainer, even if they are not the initial creator of the package. */
14
+ AuthorRole["Creator"] = "cre";
15
+ /** authors, those who have made significant contributions to the package. */
16
+ AuthorRole["Author"] = "aut";
17
+ /** contributors, those who have made smaller contributions, like patches. */
18
+ AuthorRole["Contributor"] = "ctb";
19
+ /** copyright holder. This is used to list additional copyright holders who are not authors, typically companies, like an employer of one or more of the authors. */
20
+ AuthorRole["CopyrightHolder"] = "cph";
21
+ /** funder, the people or organizations that have provided financial support for the development of the package. */
22
+ AuthorRole["Funder"] = "fnd";
23
+ })(AuthorRole || (exports.AuthorRole = AuthorRole = {}));
24
+ /**
25
+ * Convert structured R author information into an R `Authors@R` string.
26
+ */
27
+ function rAuthorInfoToReadable(author) {
28
+ const nameStr = author.name.join(' ');
29
+ const emailStr = author.email ? ` <${author.email}>` : '';
30
+ const rolesStr = author.roles.length > 0 ? ` [${author.roles.join(', ')}]` : '';
31
+ const orcidStr = author.orcid ? ` (ORCID: ${author.orcid})` : '';
32
+ const commentStr = author.comment && author.comment.length > 0 ? ` {${author.comment.join('; ')}}` : '';
33
+ return `${nameStr}${emailStr}${rolesStr}${orcidStr}${commentStr}`;
34
+ }
35
+ /**
36
+ * Parse an R `Authors@R` string into structured author information.
37
+ * These are mostly found in `R` DESCRIPTION files and are a vector of `person()` calls.
38
+ * For now, this works *without* the full dataflow engine, so complex cases may not be parsed correctly.
39
+ */
40
+ function parseRAuthorString(authorString) {
41
+ const str = authorString.trim();
42
+ if (str.startsWith('c(') && str.endsWith(')')) {
43
+ const inner = str.slice(2, -1).trim();
44
+ const parts = joinPartsWithVectors((0, args_1.splitAtEscapeSensitive)(inner, false, ','));
45
+ const authors = [];
46
+ for (const part of parts) {
47
+ const author = parseRPersonCall(part);
48
+ if (author) {
49
+ authors.push(author);
50
+ }
51
+ }
52
+ return authors;
53
+ }
54
+ else if (str.startsWith('person(') && str.endsWith(')')) {
55
+ const author = parseRPersonCall(str);
56
+ return author ? [author] : [];
57
+ }
58
+ return [];
59
+ }
60
+ function splitArgNameValue(arg) {
61
+ const eqIndex = arg.indexOf('=');
62
+ if (eqIndex === -1) {
63
+ const trimmedArg = arg.trim();
64
+ return { value: trimmedArg.length === 0 ? undefined : trimmedArg };
65
+ }
66
+ else {
67
+ const name = arg.slice(0, eqIndex).trim();
68
+ const value = arg.slice(eqIndex + 1).trim();
69
+ return { name, value };
70
+ }
71
+ }
72
+ // Joins parts that may be split by c(...) vectors back together, ...
73
+ function joinPartsWithVectors(parts) {
74
+ const result = [];
75
+ let buffer = [];
76
+ let parenthesisLevel = 0;
77
+ for (const part of parts) {
78
+ const trimmed = part.trim();
79
+ // check whether parenthesis are balanced
80
+ for (const char of trimmed) {
81
+ if (char === '(') {
82
+ parenthesisLevel++;
83
+ }
84
+ else if (char === ')') {
85
+ parenthesisLevel--;
86
+ }
87
+ }
88
+ if (parenthesisLevel === 0) {
89
+ buffer.push(trimmed);
90
+ result.push(buffer.join(', '));
91
+ buffer = [];
92
+ }
93
+ else {
94
+ buffer.push(trimmed);
95
+ }
96
+ }
97
+ if (buffer.length > 0) {
98
+ result.push(buffer.join(', '));
99
+ }
100
+ return result;
101
+ }
102
+ const defaultPosArgNames = ['given', 'family', 'middle', 'email', 'role', 'comment', 'first', 'last'];
103
+ function splitVector(roleStr) {
104
+ if (roleStr.startsWith('c(') && roleStr.endsWith(')')) {
105
+ const inner = roleStr.slice(2, -1).trim();
106
+ return joinPartsWithVectors((0, args_1.splitAtEscapeSensitive)(inner, false, ','));
107
+ }
108
+ else {
109
+ return [roleStr.trim()];
110
+ }
111
+ }
112
+ function parseRoles(roleStr) {
113
+ if (!roleStr) {
114
+ return [];
115
+ }
116
+ const roles = [];
117
+ const parts = splitVector(roleStr);
118
+ for (const part of parts) {
119
+ const trimmed = part.trim();
120
+ const roleValue = (0, retriever_1.removeRQuotes)(trimmed);
121
+ if (Object.values(AuthorRole).includes(roleValue)) {
122
+ roles.push(roleValue);
123
+ }
124
+ }
125
+ return roles;
126
+ }
127
+ function parseComments(commentStr) {
128
+ if (!commentStr) {
129
+ return undefined;
130
+ }
131
+ const comments = [];
132
+ const parts = splitVector(commentStr);
133
+ let orcid = undefined;
134
+ for (const part of parts) {
135
+ const trimmed = part.trim();
136
+ const commentValue = (0, retriever_1.removeRQuotes)(trimmed);
137
+ if (/ORCID\s*=/ig.test(commentValue)) {
138
+ const orcidIndex = commentValue.indexOf('=');
139
+ if (orcidIndex !== -1) {
140
+ orcid = (0, retriever_1.removeRQuotes)(commentValue.slice(orcidIndex + 1).trim());
141
+ }
142
+ continue;
143
+ }
144
+ comments.push(commentValue);
145
+ }
146
+ return comments.length > 0 || orcid ? { contents: comments, orcid: orcid } : undefined;
147
+ }
148
+ function assignArg(argMap, split) {
149
+ argMap.set(split.name, split.value === undefined || split.value?.length === 0 ? undefined : (0, retriever_1.removeRQuotes)(split.value));
150
+ }
151
+ function parseRPersonCall(personCall) {
152
+ /* function(given = NULL, family = NULL, middle = NULL,
153
+ email = NULL, role = NULL, comment = NULL,
154
+ first = NULL, last = NULL), but we neither use nor support full R semantics here for now */
155
+ personCall = personCall.trim();
156
+ if (!personCall.startsWith('person(') || !personCall.endsWith(')')) {
157
+ return undefined;
158
+ }
159
+ const inner = personCall.slice(7, -1).trim();
160
+ // these may also split unescaped commas inside c(...)
161
+ const parArgs = joinPartsWithVectors((0, args_1.splitAtEscapeSensitive)(inner, false, ','));
162
+ const argMap = new Map();
163
+ const unnamed = [];
164
+ for (const arg of parArgs) {
165
+ const split = splitArgNameValue(arg.trim());
166
+ if (!split.name) {
167
+ unnamed.push(arg.trim());
168
+ continue;
169
+ }
170
+ assignArg(argMap, split);
171
+ }
172
+ // assign unnamed args in order
173
+ for (let i = 0; i < unnamed.length; i++) {
174
+ if (i >= defaultPosArgNames.length) {
175
+ break;
176
+ }
177
+ const argIdx = defaultPosArgNames.findIndex(x => !argMap.has(x));
178
+ if (argIdx === -1) {
179
+ break;
180
+ }
181
+ const argName = defaultPosArgNames[argIdx];
182
+ const value = unnamed[i];
183
+ assignArg(argMap, { name: argName, value });
184
+ }
185
+ const comments = parseComments(argMap.get('comment'));
186
+ return (0, objects_1.compactRecord)({
187
+ name: [argMap.get('given') ?? argMap.get('first'), argMap.get('middle'), argMap.get('family') ?? argMap.get('last')].filter(assert_1.isNotUndefined),
188
+ email: argMap.get('email'),
189
+ roles: parseRoles(argMap.get('role')),
190
+ comment: comments?.contents,
191
+ orcid: comments?.orcid
192
+ });
193
+ }
194
+ //# sourceMappingURL=r-author.js.map
@@ -0,0 +1,23 @@
1
+ import { Range } from 'semver';
2
+ export interface RLicenseInfo {
3
+ type: 'license';
4
+ license: string;
5
+ versionConstraint?: Range;
6
+ }
7
+ export interface RLicenseExceptionInfo {
8
+ type: 'exception';
9
+ exception: string;
10
+ }
11
+ export interface RLicenseCombinationInfo {
12
+ type: 'combination';
13
+ combination: 'and' | 'or' | 'with';
14
+ elements: [left: RLicenseElementInfo, right: RLicenseElementInfo];
15
+ }
16
+ export interface NoLicenseInfo {
17
+ type: 'no-license';
18
+ }
19
+ export type RLicenseElementInfo = RLicenseInfo | RLicenseExceptionInfo | RLicenseCombinationInfo | NoLicenseInfo;
20
+ /**
21
+ * Parses an R license string into its structured representation.
22
+ */
23
+ export declare function parseRLicense(licenseString: string): RLicenseElementInfo;