@modelence/ai 0.1.1 → 0.1.3-dev.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,2 +1,2 @@
1
- import {getConfig}from'modelence/server';import {startTransaction,captureError}from'modelence/telemetry';import {generateText}from'ai';import {createOpenAI}from'@ai-sdk/openai';import {createAnthropic}from'@ai-sdk/anthropic';import {createGoogleGenerativeAI}from'@ai-sdk/google';function d(e,r){switch(e){case "openai":return createOpenAI({apiKey:String(getConfig("_system.openai.apiKey"))})(r);case "anthropic":return createAnthropic({apiKey:String(getConfig("_system.anthropic.apiKey"))})(r);case "google":return createGoogleGenerativeAI({apiKey:String(getConfig("_system.google.apiKey"))})(r);default:throw new Error(`Unsupported provider: ${e}`)}}async function G(e){let{provider:r,model:o,...i}=e,n=startTransaction("ai","ai:generateText",{provider:r,model:o,messageCount:Array.isArray(e.messages)?e.messages.length:0,temperature:e.temperature});try{let t=await generateText({model:d(r,o),...i});return n.end(),t}catch(t){throw captureError(t),n.end("error"),t}}export{G as generateText};//# sourceMappingURL=index.js.map
1
+ import {getConfig}from'modelence/server';import {startTransaction,captureError}from'modelence/telemetry';import {generateText}from'ai';import {createOpenAI}from'@ai-sdk/openai';import {createAnthropic}from'@ai-sdk/anthropic';import {createGoogleGenerativeAI}from'@ai-sdk/google';function u(e,t){switch(e){case "openai":return createOpenAI({apiKey:String(getConfig("_system.openai.apiKey"))})(t);case "anthropic":return createAnthropic({apiKey:String(getConfig("_system.anthropic.apiKey"))})(t);case "google":return createGoogleGenerativeAI({apiKey:String(getConfig("_system.google.apiKey"))})(t);default:throw new Error(`Unsupported provider: ${e}`)}}async function G(e){let{provider:t,model:n,...i}=e,o=startTransaction("ai","ai:generateText",{provider:t,model:n,messageCount:Array.isArray(e.messages)?e.messages.length:0,temperature:e.temperature});try{let r=await generateText({model:u(t,n),...i});return "setContext"in o?o.end("success",{context:{usage:{promptTokens:r.usage.promptTokens,completionTokens:r.usage.completionTokens,totalTokens:r.usage.totalTokens}}}):o.end("success"),r}catch(r){throw captureError(r),o.end("error"),r}}export{G as generateText};//# sourceMappingURL=index.js.map
2
2
  //# sourceMappingURL=index.js.map
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts"],"names":["getProviderModel","provider","model","createOpenAI","getConfig","createAnthropic","createGoogleGenerativeAI","generateText","options","restOptions","transaction","startTransaction","result","originalGenerateText","error","captureError"],"mappings":"uRA4BA,SAASA,CAAiBC,CAAAA,CAAAA,CAAoBC,CAAe,CAAA,CAC3D,OAAQD,CAAU,EAChB,KAAK,QAAA,CACH,OAAOE,YAAa,CAAA,CAClB,MAAQ,CAAA,MAAA,CAAOC,UAAU,uBAAuB,CAAC,CACnD,CAAC,EAAEF,CAAK,CAAA,CAEV,KAAK,WAAA,CACH,OAAOG,eAAgB,CAAA,CACrB,MAAQ,CAAA,MAAA,CAAOD,UAAU,0BAA0B,CAAC,CACtD,CAAC,EAAEF,CAAK,CAAA,CAEV,KAAK,QAAA,CACH,OAAOI,wBAAyB,CAAA,CAC9B,MAAQ,CAAA,MAAA,CAAOF,UAAU,uBAAuB,CAAC,CACnD,CAAC,CAAA,CAAEF,CAAK,CAEV,CAAA,QACE,MAAM,IAAI,MAAM,CAAyBD,sBAAAA,EAAAA,CAAQ,CAAE,CAAA,CACvD,CACF,CA2BA,eAAsBM,CAAaC,CAAAA,CAAAA,CAA8B,CAC/D,GAAM,CAAE,QAAAP,CAAAA,CAAAA,CAAU,MAAAC,CAAO,CAAA,GAAGO,CAAY,CAAA,CAAID,EAEtCE,CAAcC,CAAAA,gBAAAA,CAAiB,IAAM,CAAA,iBAAA,CAAmB,CAC5D,QAAAV,CAAAA,CAAAA,CACA,KAAAC,CAAAA,CAAAA,CACA,aAAc,KAAM,CAAA,OAAA,CAAQM,EAAQ,QAAQ,CAAA,CAAIA,EAAQ,QAAS,CAAA,MAAA,CAAS,CAC1E,CAAA,WAAA,CAAaA,EAAQ,WACvB,CAAC,CAED,CAAA,GAAI,CACF,IAAMI,CAAAA,CAAS,MAAMC,YAAAA,CAAqB,CACxC,KAAOb,CAAAA,CAAAA,CAAiBC,CAAUC,CAAAA,CAAK,EACvC,GAAGO,CACL,CAAC,CAAA,CAED,OAAAC,CAAY,CAAA,GAAA,EACLE,CAAAA,CACT,OAASE,CAAO,CAAA,CACd,MAAAC,YAAAA,CAAaD,CAAc,CAC3BJ,CAAAA,CAAAA,CAAY,IAAI,OAAO,CAAA,CACjBI,CACR,CACF","file":"index.js","sourcesContent":["import { getConfig } from 'modelence/server';\nimport { startTransaction, captureError } from 'modelence/telemetry';\nimport { generateText as originalGenerateText } from 'ai';\nimport { createOpenAI } from '@ai-sdk/openai';\nimport { createAnthropic } from '@ai-sdk/anthropic';\nimport { createGoogleGenerativeAI } from '@ai-sdk/google';\n\n/**\n * Supported AI providers for text generation.\n */\ntype Provider = 'openai' | 'anthropic' | 'google';\n\n// Extract the original generateText parameters and override the model property\ntype OriginalGenerateTextParams = Parameters<typeof originalGenerateText>[0];\n\n/**\n * Options for the Modelence generateText function.\n * \n * This interface extends all the standard AI SDK generateText options,\n * but replaces the model parameter with separate provider and model parameters.\n */\nexport interface GenerateTextOptions extends Omit<OriginalGenerateTextParams, 'model'> {\n /** The AI provider name */\n provider: Provider;\n /** The specific model name */\n model: string;\n}\n\nfunction getProviderModel(provider: Provider, model: string) {\n switch (provider) {\n case 'openai':\n return createOpenAI({\n apiKey: String(getConfig('_system.openai.apiKey')),\n })(model);\n \n case 'anthropic':\n return createAnthropic({\n apiKey: String(getConfig('_system.anthropic.apiKey')),\n })(model);\n \n case 'google':\n return createGoogleGenerativeAI({\n apiKey: String(getConfig('_system.google.apiKey')),\n })(model);\n \n default:\n throw new Error(`Unsupported provider: ${provider}`);\n }\n}\n\n/**\n * Generates text using AI models with built-in Modelence configuration and telemetry.\n * \n * This is a wrapper around the AI SDK's generateText function that automatically\n * configures providers using Modelence's server-side configuration system.\n * \n * @param options - Configuration options for text generation\n * @returns A promise that resolves to the generated text result\n * \n * @example\n * ```typescript\n * import { generateText } from '@modelence/ai';\n * \n * const response = await generateText({\n * provider: 'openai',\n * model: 'gpt-4o',\n * messages: [\n * { role: 'user', content: 'Write a haiku about programming' }\n * ],\n * temperature: 0.7\n * });\n * \n * console.log(response.text);\n * ```\n */\nexport async function generateText(options: GenerateTextOptions) {\n const { provider, model, ...restOptions } = options;\n \n const transaction = startTransaction('ai', 'ai:generateText', {\n provider, \n model,\n messageCount: Array.isArray(options.messages) ? options.messages.length : 0,\n temperature: options.temperature\n });\n\n try {\n const result = await originalGenerateText({\n model: getProviderModel(provider, model),\n ...restOptions,\n });\n \n transaction.end();\n return result;\n } catch (error) {\n captureError(error as Error);\n transaction.end('error');\n throw error;\n }\n}\n"]}
1
+ {"version":3,"sources":["../src/index.ts"],"names":["getProviderModel","provider","model","createOpenAI","getConfig","createAnthropic","createGoogleGenerativeAI","generateText","options","restOptions","transaction","startTransaction","result","originalGenerateText","error","captureError"],"mappings":"uRA4BA,SAASA,CAAAA,CAAiBC,EAAoBC,CAAe,CAAA,CAC3D,OAAQD,CAAU,EAChB,KAAK,QACH,CAAA,OAAOE,aAAa,CAClB,MAAA,CAAQ,OAAOC,SAAU,CAAA,uBAAuB,CAAC,CACnD,CAAC,EAAEF,CAAK,CAAA,CAEV,KAAK,WACH,CAAA,OAAOG,gBAAgB,CACrB,MAAA,CAAQ,OAAOD,SAAU,CAAA,0BAA0B,CAAC,CACtD,CAAC,EAAEF,CAAK,CAAA,CAEV,KAAK,QACH,CAAA,OAAOI,yBAAyB,CAC9B,MAAA,CAAQ,OAAOF,SAAU,CAAA,uBAAuB,CAAC,CACnD,CAAC,EAAEF,CAAK,CAAA,CAEV,QACE,MAAM,IAAI,MAAM,CAAyBD,sBAAAA,EAAAA,CAAQ,EAAE,CACvD,CACF,CA2BA,eAAsBM,CAAAA,CAAaC,EAA8B,CAC/D,GAAM,CAAE,QAAA,CAAAP,EAAU,KAAAC,CAAAA,CAAAA,CAAO,GAAGO,CAAY,CAAA,CAAID,EAEtCE,CAAcC,CAAAA,gBAAAA,CAAiB,KAAM,iBAAmB,CAAA,CAC5D,SAAAV,CACA,CAAA,KAAA,CAAAC,EACA,YAAc,CAAA,KAAA,CAAM,QAAQM,CAAQ,CAAA,QAAQ,EAAIA,CAAQ,CAAA,QAAA,CAAS,OAAS,CAC1E,CAAA,WAAA,CAAaA,EAAQ,WACvB,CAAC,EAED,GAAI,CACF,IAAMI,CAAS,CAAA,MAAMC,aAAqB,CACxC,KAAA,CAAOb,EAAiBC,CAAUC,CAAAA,CAAK,EACvC,GAAGO,CACL,CAAC,CAED,CAAA,OAAI,eAAgBC,CAClBA,CAAAA,CAAAA,CAAY,IAAI,SAAW,CAAA,CACzB,QAAS,CACP,KAAA,CAAO,CACL,YAAcE,CAAAA,CAAAA,CAAO,MAAM,YAC3B,CAAA,gBAAA,CAAkBA,EAAO,KAAM,CAAA,gBAAA,CAC/B,YAAaA,CAAO,CAAA,KAAA,CAAM,WAC5B,CACF,CACF,CAAC,CAIDF,CAAAA,CAAAA,CAAY,IAAI,SAAS,CAAA,CAEpBE,CACT,CAASE,MAAAA,CAAAA,CAAO,CACd,MAAAC,YAAAA,CAAaD,CAAc,CAC3BJ,CAAAA,CAAAA,CAAY,IAAI,OAAO,CAAA,CACjBI,CACR,CACF","file":"index.js","sourcesContent":["import { getConfig } from 'modelence/server';\nimport { startTransaction, captureError } from 'modelence/telemetry';\nimport { generateText as originalGenerateText } from 'ai';\nimport { createOpenAI } from '@ai-sdk/openai';\nimport { createAnthropic } from '@ai-sdk/anthropic';\nimport { createGoogleGenerativeAI } from '@ai-sdk/google';\n\n/**\n * Supported AI providers for text generation.\n */\ntype Provider = 'openai' | 'anthropic' | 'google';\n\n// Extract the original generateText parameters and override the model property\ntype OriginalGenerateTextParams = Parameters<typeof originalGenerateText>[0];\n\n/**\n * Options for the Modelence generateText function.\n * \n * This interface extends all the standard AI SDK generateText options,\n * but replaces the model parameter with separate provider and model parameters.\n */\nexport interface GenerateTextOptions extends Omit<OriginalGenerateTextParams, 'model'> {\n /** The AI provider name */\n provider: Provider;\n /** The specific model name */\n model: string;\n}\n\nfunction getProviderModel(provider: Provider, model: string) {\n switch (provider) {\n case 'openai':\n return createOpenAI({\n apiKey: String(getConfig('_system.openai.apiKey')),\n })(model);\n \n case 'anthropic':\n return createAnthropic({\n apiKey: String(getConfig('_system.anthropic.apiKey')),\n })(model);\n \n case 'google':\n return createGoogleGenerativeAI({\n apiKey: String(getConfig('_system.google.apiKey')),\n })(model);\n \n default:\n throw new Error(`Unsupported provider: ${provider}`);\n }\n}\n\n/**\n * Generates text using AI models with built-in Modelence configuration and telemetry.\n * \n * This is a wrapper around the AI SDK's generateText function that automatically\n * configures providers using Modelence's server-side configuration system.\n * \n * @param options - Configuration options for text generation\n * @returns A promise that resolves to the generated text result\n * \n * @example\n * ```typescript\n * import { generateText } from '@modelence/ai';\n * \n * const response = await generateText({\n * provider: 'openai',\n * model: 'gpt-4o',\n * messages: [\n * { role: 'user', content: 'Write a haiku about programming' }\n * ],\n * temperature: 0.7\n * });\n * \n * console.log(response.text);\n * ```\n */\nexport async function generateText(options: GenerateTextOptions) {\n const { provider, model, ...restOptions } = options;\n \n const transaction = startTransaction('ai', 'ai:generateText', {\n provider, \n model,\n messageCount: Array.isArray(options.messages) ? options.messages.length : 0,\n temperature: options.temperature\n });\n\n try {\n const result = await originalGenerateText({\n model: getProviderModel(provider, model),\n ...restOptions,\n });\n \n if ('setContext' in transaction) {\n transaction.end('success', {\n context: {\n usage: {\n promptTokens: result.usage.promptTokens,\n completionTokens: result.usage.completionTokens,\n totalTokens: result.usage.totalTokens,\n }\n }\n });\n } else {\n // Backwards compatibility for older versions of Modelence\n // @ts-ignore\n transaction.end('success');\n }\n return result;\n } catch (error) {\n captureError(error as Error);\n transaction.end('error');\n throw error;\n }\n}\n"]}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "type": "module",
3
3
  "name": "@modelence/ai",
4
- "version": "0.1.1",
4
+ "version": "0.1.3-dev.0",
5
5
  "description": "Modelence AI engine",
6
6
  "exports": {
7
7
  ".": {
@@ -25,10 +25,10 @@
25
25
  "ai": "^4.3.16"
26
26
  },
27
27
  "peerDependencies": {
28
- "modelence": ">=0.5.1"
28
+ "modelence": "*"
29
29
  },
30
30
  "devDependencies": {
31
- "modelence": "^0.5.1",
31
+ "modelence": "^0.5.7-mod-51.0",
32
32
  "tsup": "^8.3.6",
33
33
  "typescript": "^5.7.2"
34
34
  }
package/src/index.ts CHANGED
@@ -89,7 +89,21 @@ export async function generateText(options: GenerateTextOptions) {
89
89
  ...restOptions,
90
90
  });
91
91
 
92
- transaction.end();
92
+ if ('setContext' in transaction) {
93
+ transaction.end('success', {
94
+ context: {
95
+ usage: {
96
+ promptTokens: result.usage.promptTokens,
97
+ completionTokens: result.usage.completionTokens,
98
+ totalTokens: result.usage.totalTokens,
99
+ }
100
+ }
101
+ });
102
+ } else {
103
+ // Backwards compatibility for older versions of Modelence
104
+ // @ts-ignore
105
+ transaction.end('success');
106
+ }
93
107
  return result;
94
108
  } catch (error) {
95
109
  captureError(error as Error);
package/typedoc.json ADDED
@@ -0,0 +1,10 @@
1
+ {
2
+ "$schema": "https://typedoc.org/schema.json",
3
+ "readme": "none",
4
+ "excludePrivate": true,
5
+ "excludeInternal": true,
6
+ "exclude": [
7
+ "**/node_modules/**",
8
+ "**/dist/**"
9
+ ]
10
+ }