@iqai/adk 0.1.19 → 0.1.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # @iqai/adk
2
2
 
3
+ ## 0.1.20
4
+
5
+ ### Patch Changes
6
+
7
+ - 85473c7: Fix OpenAI and AI SDK LLMs not taking in the schema from MCP tools
8
+
3
9
  ## 0.1.19
4
10
 
5
11
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -2577,76 +2577,46 @@ declare class LlmRequest {
2577
2577
  }
2578
2578
 
2579
2579
  /**
2580
- * Google LLM Variant enum
2581
- */
2582
- declare enum GoogleLLMVariant {
2583
- VERTEX_AI = "VERTEX_AI",
2584
- GEMINI_API = "GEMINI_API"
2585
- }
2586
- /**
2587
- * Integration for Gemini models.
2580
+ * AI SDK integration that accepts a pre-configured LanguageModel.
2581
+ * Enables ADK to work with any provider supported by Vercel's AI SDK.
2588
2582
  */
2589
- declare class GoogleLlm extends BaseLlm {
2590
- private _apiClient?;
2591
- private _liveApiClient?;
2592
- private _apiBackend?;
2593
- private _trackingHeaders?;
2583
+ declare class AiSdkLlm extends BaseLlm {
2584
+ private modelInstance;
2585
+ protected logger: Logger;
2594
2586
  /**
2595
- * Constructor for Gemini
2587
+ * Constructor accepts a pre-configured LanguageModel instance
2588
+ * @param model - Pre-configured LanguageModel from provider(modelName)
2596
2589
  */
2597
- constructor(model?: string);
2590
+ constructor(modelInstance: LanguageModel);
2598
2591
  /**
2599
- * Provides the list of supported models.
2592
+ * Returns empty array - following Python ADK pattern
2600
2593
  */
2601
2594
  static supportedModels(): string[];
2595
+ protected generateContentAsyncImpl(request: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void, unknown>;
2602
2596
  /**
2603
- * Main content generation method - handles both streaming and non-streaming
2604
- */
2605
- protected generateContentAsyncImpl(llmRequest: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void, unknown>;
2606
- /**
2607
- * Connects to the Gemini model and returns an llm connection.
2608
- */
2609
- connect(_llmRequest: LlmRequest): BaseLLMConnection;
2610
- /**
2611
- * Check if response has inline data
2612
- */
2613
- private hasInlineData;
2614
- /**
2615
- * Convert LlmRequest contents to GoogleGenAI format
2616
- */
2617
- private convertContents;
2618
- /**
2619
- * Preprocesses the request based on the API backend.
2620
- */
2621
- private preprocessRequest;
2622
- /**
2623
- * Sets display_name to null for the Gemini API (non-Vertex) backend.
2624
- */
2625
- private removeDisplayNameIfPresent;
2626
- /**
2627
- * Builds function declaration log string.
2597
+ * Convert ADK LlmRequest to AI SDK CoreMessage format
2628
2598
  */
2629
- private buildFunctionDeclarationLog;
2599
+ private convertToAiSdkMessages;
2630
2600
  /**
2631
- * Provides the api client.
2601
+ * Transform JSON schema to use lowercase types for AI SDK compatibility
2632
2602
  */
2633
- get apiClient(): GoogleGenAI;
2603
+ private transformSchemaForAiSdk;
2634
2604
  /**
2635
- * Gets the API backend type.
2605
+ * Convert ADK tools to AI SDK tools format
2636
2606
  */
2637
- get apiBackend(): GoogleLLMVariant;
2607
+ private convertToAiSdkTools;
2638
2608
  /**
2639
- * Gets the tracking headers.
2609
+ * Convert ADK Content to AI SDK CoreMessage
2640
2610
  */
2641
- get trackingHeaders(): Record<string, string>;
2611
+ private contentToAiSdkMessage;
2642
2612
  /**
2643
- * Gets the live API version.
2613
+ * Map ADK role to AI SDK role
2644
2614
  */
2645
- get liveApiVersion(): string;
2615
+ private mapRole;
2646
2616
  /**
2647
- * Gets the live API client.
2617
+ * Map AI SDK finish reason to ADK finish reason
2648
2618
  */
2649
- get liveApiClient(): GoogleGenAI;
2619
+ private mapFinishReason;
2650
2620
  }
2651
2621
 
2652
2622
  /**
@@ -2709,6 +2679,79 @@ declare class AnthropicLlm extends BaseLlm {
2709
2679
  private get client();
2710
2680
  }
2711
2681
 
2682
+ /**
2683
+ * Google LLM Variant enum
2684
+ */
2685
+ declare enum GoogleLLMVariant {
2686
+ VERTEX_AI = "VERTEX_AI",
2687
+ GEMINI_API = "GEMINI_API"
2688
+ }
2689
+ /**
2690
+ * Integration for Gemini models.
2691
+ */
2692
+ declare class GoogleLlm extends BaseLlm {
2693
+ private _apiClient?;
2694
+ private _liveApiClient?;
2695
+ private _apiBackend?;
2696
+ private _trackingHeaders?;
2697
+ /**
2698
+ * Constructor for Gemini
2699
+ */
2700
+ constructor(model?: string);
2701
+ /**
2702
+ * Provides the list of supported models.
2703
+ */
2704
+ static supportedModels(): string[];
2705
+ /**
2706
+ * Main content generation method - handles both streaming and non-streaming
2707
+ */
2708
+ protected generateContentAsyncImpl(llmRequest: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void, unknown>;
2709
+ /**
2710
+ * Connects to the Gemini model and returns an llm connection.
2711
+ */
2712
+ connect(_llmRequest: LlmRequest): BaseLLMConnection;
2713
+ /**
2714
+ * Check if response has inline data
2715
+ */
2716
+ private hasInlineData;
2717
+ /**
2718
+ * Convert LlmRequest contents to GoogleGenAI format
2719
+ */
2720
+ private convertContents;
2721
+ /**
2722
+ * Preprocesses the request based on the API backend.
2723
+ */
2724
+ private preprocessRequest;
2725
+ /**
2726
+ * Sets display_name to null for the Gemini API (non-Vertex) backend.
2727
+ */
2728
+ private removeDisplayNameIfPresent;
2729
+ /**
2730
+ * Builds function declaration log string.
2731
+ */
2732
+ private buildFunctionDeclarationLog;
2733
+ /**
2734
+ * Provides the api client.
2735
+ */
2736
+ get apiClient(): GoogleGenAI;
2737
+ /**
2738
+ * Gets the API backend type.
2739
+ */
2740
+ get apiBackend(): GoogleLLMVariant;
2741
+ /**
2742
+ * Gets the tracking headers.
2743
+ */
2744
+ get trackingHeaders(): Record<string, string>;
2745
+ /**
2746
+ * Gets the live API version.
2747
+ */
2748
+ get liveApiVersion(): string;
2749
+ /**
2750
+ * Gets the live API client.
2751
+ */
2752
+ get liveApiClient(): GoogleGenAI;
2753
+ }
2754
+
2712
2755
  /**
2713
2756
  * OpenAI LLM implementation using GPT models
2714
2757
  * Enhanced with comprehensive debug logging similar to Google LLM
@@ -2747,6 +2790,10 @@ declare class OpenAiLlm extends BaseLlm {
2747
2790
  * Convert ADK Part to OpenAI message content
2748
2791
  */
2749
2792
  private partToOpenAiContent;
2793
+ /**
2794
+ * Transform JSON schema to use lowercase types for OpenAI compatibility
2795
+ */
2796
+ private transformSchemaForOpenAi;
2750
2797
  /**
2751
2798
  * Convert ADK function declaration to OpenAI tool
2752
2799
  */
@@ -2782,45 +2829,6 @@ declare class OpenAiLlm extends BaseLlm {
2782
2829
  private get client();
2783
2830
  }
2784
2831
 
2785
- /**
2786
- * AI SDK integration that accepts a pre-configured LanguageModel.
2787
- * Enables ADK to work with any provider supported by Vercel's AI SDK.
2788
- */
2789
- declare class AiSdkLlm extends BaseLlm {
2790
- private modelInstance;
2791
- protected logger: Logger;
2792
- /**
2793
- * Constructor accepts a pre-configured LanguageModel instance
2794
- * @param model - Pre-configured LanguageModel from provider(modelName)
2795
- */
2796
- constructor(modelInstance: LanguageModel);
2797
- /**
2798
- * Returns empty array - following Python ADK pattern
2799
- */
2800
- static supportedModels(): string[];
2801
- protected generateContentAsyncImpl(request: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void, unknown>;
2802
- /**
2803
- * Convert ADK LlmRequest to AI SDK CoreMessage format
2804
- */
2805
- private convertToAiSdkMessages;
2806
- /**
2807
- * Convert ADK tools to AI SDK tools format
2808
- */
2809
- private convertToAiSdkTools;
2810
- /**
2811
- * Convert ADK Content to AI SDK CoreMessage
2812
- */
2813
- private contentToAiSdkMessage;
2814
- /**
2815
- * Map ADK role to AI SDK role
2816
- */
2817
- private mapRole;
2818
- /**
2819
- * Map AI SDK finish reason to ADK finish reason
2820
- */
2821
- private mapFinishReason;
2822
- }
2823
-
2824
2832
  /**
2825
2833
  * Type for LLM constructor with static methods
2826
2834
  */
package/dist/index.d.ts CHANGED
@@ -2577,76 +2577,46 @@ declare class LlmRequest {
2577
2577
  }
2578
2578
 
2579
2579
  /**
2580
- * Google LLM Variant enum
2581
- */
2582
- declare enum GoogleLLMVariant {
2583
- VERTEX_AI = "VERTEX_AI",
2584
- GEMINI_API = "GEMINI_API"
2585
- }
2586
- /**
2587
- * Integration for Gemini models.
2580
+ * AI SDK integration that accepts a pre-configured LanguageModel.
2581
+ * Enables ADK to work with any provider supported by Vercel's AI SDK.
2588
2582
  */
2589
- declare class GoogleLlm extends BaseLlm {
2590
- private _apiClient?;
2591
- private _liveApiClient?;
2592
- private _apiBackend?;
2593
- private _trackingHeaders?;
2583
+ declare class AiSdkLlm extends BaseLlm {
2584
+ private modelInstance;
2585
+ protected logger: Logger;
2594
2586
  /**
2595
- * Constructor for Gemini
2587
+ * Constructor accepts a pre-configured LanguageModel instance
2588
+ * @param model - Pre-configured LanguageModel from provider(modelName)
2596
2589
  */
2597
- constructor(model?: string);
2590
+ constructor(modelInstance: LanguageModel);
2598
2591
  /**
2599
- * Provides the list of supported models.
2592
+ * Returns empty array - following Python ADK pattern
2600
2593
  */
2601
2594
  static supportedModels(): string[];
2595
+ protected generateContentAsyncImpl(request: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void, unknown>;
2602
2596
  /**
2603
- * Main content generation method - handles both streaming and non-streaming
2604
- */
2605
- protected generateContentAsyncImpl(llmRequest: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void, unknown>;
2606
- /**
2607
- * Connects to the Gemini model and returns an llm connection.
2608
- */
2609
- connect(_llmRequest: LlmRequest): BaseLLMConnection;
2610
- /**
2611
- * Check if response has inline data
2612
- */
2613
- private hasInlineData;
2614
- /**
2615
- * Convert LlmRequest contents to GoogleGenAI format
2616
- */
2617
- private convertContents;
2618
- /**
2619
- * Preprocesses the request based on the API backend.
2620
- */
2621
- private preprocessRequest;
2622
- /**
2623
- * Sets display_name to null for the Gemini API (non-Vertex) backend.
2624
- */
2625
- private removeDisplayNameIfPresent;
2626
- /**
2627
- * Builds function declaration log string.
2597
+ * Convert ADK LlmRequest to AI SDK CoreMessage format
2628
2598
  */
2629
- private buildFunctionDeclarationLog;
2599
+ private convertToAiSdkMessages;
2630
2600
  /**
2631
- * Provides the api client.
2601
+ * Transform JSON schema to use lowercase types for AI SDK compatibility
2632
2602
  */
2633
- get apiClient(): GoogleGenAI;
2603
+ private transformSchemaForAiSdk;
2634
2604
  /**
2635
- * Gets the API backend type.
2605
+ * Convert ADK tools to AI SDK tools format
2636
2606
  */
2637
- get apiBackend(): GoogleLLMVariant;
2607
+ private convertToAiSdkTools;
2638
2608
  /**
2639
- * Gets the tracking headers.
2609
+ * Convert ADK Content to AI SDK CoreMessage
2640
2610
  */
2641
- get trackingHeaders(): Record<string, string>;
2611
+ private contentToAiSdkMessage;
2642
2612
  /**
2643
- * Gets the live API version.
2613
+ * Map ADK role to AI SDK role
2644
2614
  */
2645
- get liveApiVersion(): string;
2615
+ private mapRole;
2646
2616
  /**
2647
- * Gets the live API client.
2617
+ * Map AI SDK finish reason to ADK finish reason
2648
2618
  */
2649
- get liveApiClient(): GoogleGenAI;
2619
+ private mapFinishReason;
2650
2620
  }
2651
2621
 
2652
2622
  /**
@@ -2709,6 +2679,79 @@ declare class AnthropicLlm extends BaseLlm {
2709
2679
  private get client();
2710
2680
  }
2711
2681
 
2682
+ /**
2683
+ * Google LLM Variant enum
2684
+ */
2685
+ declare enum GoogleLLMVariant {
2686
+ VERTEX_AI = "VERTEX_AI",
2687
+ GEMINI_API = "GEMINI_API"
2688
+ }
2689
+ /**
2690
+ * Integration for Gemini models.
2691
+ */
2692
+ declare class GoogleLlm extends BaseLlm {
2693
+ private _apiClient?;
2694
+ private _liveApiClient?;
2695
+ private _apiBackend?;
2696
+ private _trackingHeaders?;
2697
+ /**
2698
+ * Constructor for Gemini
2699
+ */
2700
+ constructor(model?: string);
2701
+ /**
2702
+ * Provides the list of supported models.
2703
+ */
2704
+ static supportedModels(): string[];
2705
+ /**
2706
+ * Main content generation method - handles both streaming and non-streaming
2707
+ */
2708
+ protected generateContentAsyncImpl(llmRequest: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void, unknown>;
2709
+ /**
2710
+ * Connects to the Gemini model and returns an llm connection.
2711
+ */
2712
+ connect(_llmRequest: LlmRequest): BaseLLMConnection;
2713
+ /**
2714
+ * Check if response has inline data
2715
+ */
2716
+ private hasInlineData;
2717
+ /**
2718
+ * Convert LlmRequest contents to GoogleGenAI format
2719
+ */
2720
+ private convertContents;
2721
+ /**
2722
+ * Preprocesses the request based on the API backend.
2723
+ */
2724
+ private preprocessRequest;
2725
+ /**
2726
+ * Sets display_name to null for the Gemini API (non-Vertex) backend.
2727
+ */
2728
+ private removeDisplayNameIfPresent;
2729
+ /**
2730
+ * Builds function declaration log string.
2731
+ */
2732
+ private buildFunctionDeclarationLog;
2733
+ /**
2734
+ * Provides the api client.
2735
+ */
2736
+ get apiClient(): GoogleGenAI;
2737
+ /**
2738
+ * Gets the API backend type.
2739
+ */
2740
+ get apiBackend(): GoogleLLMVariant;
2741
+ /**
2742
+ * Gets the tracking headers.
2743
+ */
2744
+ get trackingHeaders(): Record<string, string>;
2745
+ /**
2746
+ * Gets the live API version.
2747
+ */
2748
+ get liveApiVersion(): string;
2749
+ /**
2750
+ * Gets the live API client.
2751
+ */
2752
+ get liveApiClient(): GoogleGenAI;
2753
+ }
2754
+
2712
2755
  /**
2713
2756
  * OpenAI LLM implementation using GPT models
2714
2757
  * Enhanced with comprehensive debug logging similar to Google LLM
@@ -2747,6 +2790,10 @@ declare class OpenAiLlm extends BaseLlm {
2747
2790
  * Convert ADK Part to OpenAI message content
2748
2791
  */
2749
2792
  private partToOpenAiContent;
2793
+ /**
2794
+ * Transform JSON schema to use lowercase types for OpenAI compatibility
2795
+ */
2796
+ private transformSchemaForOpenAi;
2750
2797
  /**
2751
2798
  * Convert ADK function declaration to OpenAI tool
2752
2799
  */
@@ -2782,45 +2829,6 @@ declare class OpenAiLlm extends BaseLlm {
2782
2829
  private get client();
2783
2830
  }
2784
2831
 
2785
- /**
2786
- * AI SDK integration that accepts a pre-configured LanguageModel.
2787
- * Enables ADK to work with any provider supported by Vercel's AI SDK.
2788
- */
2789
- declare class AiSdkLlm extends BaseLlm {
2790
- private modelInstance;
2791
- protected logger: Logger;
2792
- /**
2793
- * Constructor accepts a pre-configured LanguageModel instance
2794
- * @param model - Pre-configured LanguageModel from provider(modelName)
2795
- */
2796
- constructor(modelInstance: LanguageModel);
2797
- /**
2798
- * Returns empty array - following Python ADK pattern
2799
- */
2800
- static supportedModels(): string[];
2801
- protected generateContentAsyncImpl(request: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void, unknown>;
2802
- /**
2803
- * Convert ADK LlmRequest to AI SDK CoreMessage format
2804
- */
2805
- private convertToAiSdkMessages;
2806
- /**
2807
- * Convert ADK tools to AI SDK tools format
2808
- */
2809
- private convertToAiSdkTools;
2810
- /**
2811
- * Convert ADK Content to AI SDK CoreMessage
2812
- */
2813
- private contentToAiSdkMessage;
2814
- /**
2815
- * Map ADK role to AI SDK role
2816
- */
2817
- private mapRole;
2818
- /**
2819
- * Map AI SDK finish reason to ADK finish reason
2820
- */
2821
- private mapFinishReason;
2822
- }
2823
-
2824
2832
  /**
2825
2833
  * Type for LLM constructor with static methods
2826
2834
  */