From db975dfe2a09a6d056d02bc03c1247ac10f6da7d Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Thu, 31 Jul 2025 15:59:07 -0700 Subject: [PATCH 01/41] chore: prevent triggering of _load_from_yaml_config in AgentLoader PiperOrigin-RevId: 789502695 --- src/google/adk/cli/utils/agent_loader.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/google/adk/cli/utils/agent_loader.py b/src/google/adk/cli/utils/agent_loader.py index 5b8924871..0bc44abd8 100644 --- a/src/google/adk/cli/utils/agent_loader.py +++ b/src/google/adk/cli/utils/agent_loader.py @@ -178,7 +178,9 @@ def _perform_load(self, agent_name: str) -> BaseAgent: if root_agent := self._load_from_submodule(agent_name): return root_agent - if root_agent := self._load_from_yaml_config(agent_name): + if os.getenv("ADK_ALLOW_WIP_FEATURES") and ( + root_agent := self._load_from_yaml_config(agent_name) + ): return root_agent # If no root_agent was found by any pattern From 9656ccc4075df17d8f6835927cd7560e12980500 Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Thu, 31 Jul 2025 23:32:56 -0700 Subject: [PATCH 02/41] feat(config): add GenerateContentConfig to LlmAgentConfig PiperOrigin-RevId: 789631181 --- .../agents/config_schemas/AgentConfig.json | 4407 +++++++++++++++-- src/google/adk/agents/llm_agent.py | 2 + src/google/adk/agents/llm_agent_config.py | 4 + 3 files changed, 4135 insertions(+), 278 deletions(-) diff --git a/src/google/adk/agents/config_schemas/AgentConfig.json b/src/google/adk/agents/config_schemas/AgentConfig.json index fdf025485..08d14cd34 100644 --- a/src/google/adk/agents/config_schemas/AgentConfig.json +++ b/src/google/adk/agents/config_schemas/AgentConfig.json @@ -32,270 +32,244 @@ "title": "AgentRefConfig", "type": "object" }, - "ArgumentConfig": { + "ApiAuth": { "additionalProperties": false, - "description": "An argument passed to a function or a class's constructor.", + "description": "The generic reusable api auth config.\n\nDeprecated. Please use AuthConfig (google/cloud/aiplatform/master/auth.proto)\ninstead.", "properties": { - "name": { + "apiKeyConfig": { "anyOf": [ { - "type": "string" + "$ref": "#/$defs/ApiAuthApiKeyConfig" }, { "type": "null" } ], "default": null, - "title": "Name" - }, - "value": { - "title": "Value" + "description": "The API secret." } }, - "required": [ - "value" - ], - "title": "ArgumentConfig", + "title": "ApiAuth", "type": "object" }, - "BaseAgentConfig": { - "additionalProperties": true, - "description": "The config for the YAML schema of a BaseAgent.\n\nDo not use this class directly. It's the base class for all agent configs.", + "ApiAuthApiKeyConfig": { + "additionalProperties": false, + "description": "The API secret.", "properties": { - "agent_class": { + "apiKeySecretVersion": { "anyOf": [ - { - "const": "BaseAgent", - "type": "string" - }, { "type": "string" - } - ], - "default": "BaseAgent", - "title": "Agent Class" - }, - "name": { - "title": "Name", - "type": "string" - }, - "description": { - "default": "", - "title": "Description", - "type": "string" - }, - "sub_agents": { - "anyOf": [ - { - "items": { - "$ref": "#/$defs/AgentRefConfig" - }, - "type": "array" }, { "type": "null" } ], "default": null, - "title": "Sub Agents" + "description": "Required. The SecretManager secret version resource name storing API key. e.g. projects/{project}/secrets/{secret}/versions/{version}", + "title": "Apikeysecretversion" }, - "before_agent_callbacks": { + "apiKeyString": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], "default": null, - "title": "Before Agent Callbacks" - }, - "after_agent_callbacks": { + "description": "The API key string. Either this or `api_key_secret_version` must be set.", + "title": "Apikeystring" + } + }, + "title": "ApiAuthApiKeyConfig", + "type": "object" + }, + "ApiKeyConfig": { + "additionalProperties": false, + "description": "Config for authentication with API key.", + "properties": { + "apiKeyString": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], "default": null, - "title": "After Agent Callbacks" + "description": "The API key to be used in the request directly.", + "title": "Apikeystring" } }, - "required": [ - "name" - ], - "title": "BaseAgentConfig", + "title": "ApiKeyConfig", "type": "object" }, - "CodeConfig": { + "ApiSpec": { + "description": "The API spec that the external API implements.", + "enum": [ + "API_SPEC_UNSPECIFIED", + "SIMPLE_SEARCH", + "ELASTIC_SEARCH" + ], + "title": "ApiSpec", + "type": "string" + }, + "ArgumentConfig": { "additionalProperties": false, - "description": "Code reference config for a variable, a function, or a class.\n\nThis config is used for configuring callbacks and tools.", + "description": "An argument passed to a function or a class's constructor.", "properties": { "name": { - "title": "Name", - "type": "string" - }, - "args": { "anyOf": [ { - "items": { - "$ref": "#/$defs/ArgumentConfig" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], "default": null, - "title": "Args" + "title": "Name" + }, + "value": { + "title": "Value" } }, "required": [ - "name" + "value" ], - "title": "CodeConfig", + "title": "ArgumentConfig", "type": "object" }, - "LlmAgentConfig": { + "AuthConfig": { "additionalProperties": false, - "description": "The config for the YAML schema of a LlmAgent.", + "description": "Auth configuration to run the extension.", "properties": { - "agent_class": { - "default": "LlmAgent", - "enum": [ - "LlmAgent", - "" - ], - "title": "Agent Class", - "type": "string" - }, - "name": { - "title": "Name", - "type": "string" - }, - "description": { - "default": "", - "title": "Description", - "type": "string" - }, - "sub_agents": { + "apiKeyConfig": { "anyOf": [ { - "items": { - "$ref": "#/$defs/AgentRefConfig" - }, - "type": "array" + "$ref": "#/$defs/ApiKeyConfig" }, { "type": "null" } ], "default": null, - "title": "Sub Agents" + "description": "Config for API key auth." }, - "before_agent_callbacks": { + "authType": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "$ref": "#/$defs/AuthType" }, { "type": "null" } ], "default": null, - "title": "Before Agent Callbacks" + "description": "Type of auth scheme." }, - "after_agent_callbacks": { + "googleServiceAccountConfig": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "$ref": "#/$defs/AuthConfigGoogleServiceAccountConfig" }, { "type": "null" } ], "default": null, - "title": "After Agent Callbacks" + "description": "Config for Google Service Account auth." }, - "model": { + "httpBasicAuthConfig": { "anyOf": [ { - "type": "string" + "$ref": "#/$defs/AuthConfigHttpBasicAuthConfig" }, { "type": "null" } ], "default": null, - "title": "Model" - }, - "instruction": { - "title": "Instruction", - "type": "string" + "description": "Config for HTTP Basic auth." }, - "disallow_transfer_to_parent": { + "oauthConfig": { "anyOf": [ { - "type": "boolean" + "$ref": "#/$defs/AuthConfigOauthConfig" }, { "type": "null" } ], "default": null, - "title": "Disallow Transfer To Parent" + "description": "Config for user oauth." }, - "disallow_transfer_to_peers": { + "oidcConfig": { "anyOf": [ { - "type": "boolean" + "$ref": "#/$defs/AuthConfigOidcConfig" }, { "type": "null" } ], "default": null, - "title": "Disallow Transfer To Peers" - }, - "input_schema": { + "description": "Config for user OIDC auth." + } + }, + "title": "AuthConfig", + "type": "object" + }, + "AuthConfigGoogleServiceAccountConfig": { + "additionalProperties": false, + "description": "Config for Google Service Account Authentication.", + "properties": { + "serviceAccount": { "anyOf": [ { - "$ref": "#/$defs/CodeConfig" + "type": "string" }, { "type": "null" } ], - "default": null - }, - "output_schema": { + "default": null, + "description": "Optional. The service account that the extension execution service runs as. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified service account. - If not specified, the Vertex AI Extension Service Agent will be used to execute the Extension.", + "title": "Serviceaccount" + } + }, + "title": "AuthConfigGoogleServiceAccountConfig", + "type": "object" + }, + "AuthConfigHttpBasicAuthConfig": { + "additionalProperties": false, + "description": "Config for HTTP Basic Authentication.", + "properties": { + "credentialSecret": { "anyOf": [ { - "$ref": "#/$defs/CodeConfig" + "type": "string" }, { "type": "null" } ], - "default": null - }, - "output_key": { + "default": null, + "description": "Required. The name of the SecretManager secret version resource storing the base64 encoded credentials. Format: `projects/{project}/secrets/{secrete}/versions/{version}` - If specified, the `secretmanager.versions.access` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the specified resource.", + "title": "Credentialsecret" + } + }, + "title": "AuthConfigHttpBasicAuthConfig", + "type": "object" + }, + "AuthConfigOauthConfig": { + "additionalProperties": false, + "description": "Config for user oauth.", + "properties": { + "accessToken": { "anyOf": [ { "type": "string" @@ -305,109 +279,137 @@ } ], "default": null, - "title": "Output Key" + "description": "Access token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", + "title": "Accesstoken" }, - "include_contents": { - "default": "default", - "enum": [ - "default", - "none" + "serviceAccount": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } ], - "title": "Include Contents", - "type": "string" - }, - "tools": { + "default": null, + "description": "The service account used to generate access tokens for executing the Extension. - If the service account is specified, the `iam.serviceAccounts.getAccessToken` permission should be granted to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) on the provided service account.", + "title": "Serviceaccount" + } + }, + "title": "AuthConfigOauthConfig", + "type": "object" + }, + "AuthConfigOidcConfig": { + "additionalProperties": false, + "description": "Config for user OIDC auth.", + "properties": { + "idToken": { "anyOf": [ { - "items": { - "$ref": "#/$defs/ToolConfig" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], "default": null, - "title": "Tools" + "description": "OpenID Connect formatted ID token for extension endpoint. Only used to propagate token from [[ExecuteExtensionRequest.runtime_auth_config]] at request time.", + "title": "Idtoken" }, - "before_model_callbacks": { + "serviceAccount": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], "default": null, - "title": "Before Model Callbacks" - }, - "after_model_callbacks": { + "description": "The service account used to generate an OpenID Connect (OIDC)-compatible JWT token signed by the Google OIDC Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - The audience for the token will be set to the URL in the server url defined in the OpenApi spec. - If the service account is provided, the service account should grant `iam.serviceAccounts.getOpenIdToken` permission to Vertex AI Extension Service Agent (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents).", + "title": "Serviceaccount" + } + }, + "title": "AuthConfigOidcConfig", + "type": "object" + }, + "AuthType": { + "description": "Type of auth scheme.", + "enum": [ + "AUTH_TYPE_UNSPECIFIED", + "NO_AUTH", + "API_KEY_AUTH", + "HTTP_BASIC_AUTH", + "GOOGLE_SERVICE_ACCOUNT_AUTH", + "OAUTH", + "OIDC_AUTH" + ], + "title": "AuthType", + "type": "string" + }, + "AutomaticFunctionCallingConfig": { + "additionalProperties": false, + "description": "The configuration for automatic function calling.", + "properties": { + "disable": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "boolean" }, { "type": "null" } ], "default": null, - "title": "After Model Callbacks" + "description": "Whether to disable automatic function calling.\n If not set or set to False, will enable automatic function calling.\n If set to True, will disable automatic function calling.\n ", + "title": "Disable" }, - "before_tool_callbacks": { + "maximumRemoteCalls": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "integer" }, { "type": "null" } ], - "default": null, - "title": "Before Tool Callbacks" + "default": 10, + "description": "If automatic function calling is enabled,\n maximum number of remote calls for automatic function calling.\n This number should be a positive integer.\n If not set, SDK will set maximum number of remote calls to 10.\n ", + "title": "Maximumremotecalls" }, - "after_tool_callbacks": { + "ignoreCallHistory": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "boolean" }, { "type": "null" } ], "default": null, - "title": "After Tool Callbacks" + "description": "If automatic function calling is enabled,\n whether to ignore call history to the response.\n If not set, SDK will set ignore_call_history to false,\n and will append the call history to\n GenerateContentResponse.automatic_function_calling_history.\n ", + "title": "Ignorecallhistory" } }, - "required": [ - "name", - "instruction" - ], - "title": "LlmAgentConfig", + "title": "AutomaticFunctionCallingConfig", "type": "object" }, - "LoopAgentConfig": { - "additionalProperties": false, - "description": "The config for the YAML schema of a LoopAgent.", + "BaseAgentConfig": { + "additionalProperties": true, + "description": "The config for the YAML schema of a BaseAgent.\n\nDo not use this class directly. It's the base class for all agent configs.", "properties": { "agent_class": { - "const": "LoopAgent", - "default": "LoopAgent", - "title": "Agent Class", - "type": "string" + "anyOf": [ + { + "const": "BaseAgent", + "type": "string" + }, + { + "type": "string" + } + ], + "default": "BaseAgent", + "title": "Agent Class" }, "name": { "title": "Name", @@ -462,50 +464,85 @@ ], "default": null, "title": "After Agent Callbacks" + } + }, + "required": [ + "name" + ], + "title": "BaseAgentConfig", + "type": "object" + }, + "Behavior": { + "description": "Defines the function behavior. Defaults to `BLOCKING`.", + "enum": [ + "UNSPECIFIED", + "BLOCKING", + "NON_BLOCKING" + ], + "title": "Behavior", + "type": "string" + }, + "Blob": { + "additionalProperties": false, + "description": "Content blob.", + "properties": { + "displayName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Display name of the blob. Used to provide a label or filename to distinguish blobs. This field is not currently used in the Gemini GenerateContent calls.", + "title": "Displayname" }, - "max_iterations": { + "data": { "anyOf": [ { - "type": "integer" + "format": "base64url", + "type": "string" }, { "type": "null" } ], "default": null, - "title": "Max Iterations" + "description": "Required. Raw bytes.", + "title": "Data" + }, + "mimeType": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The IANA standard MIME type of the source data.", + "title": "Mimetype" } }, - "required": [ - "name" - ], - "title": "LoopAgentConfig", + "title": "Blob", "type": "object" }, - "ParallelAgentConfig": { + "CodeConfig": { "additionalProperties": false, - "description": "The config for the YAML schema of a ParallelAgent.", + "description": "Code reference config for a variable, a function, or a class.\n\nThis config is used for configuring callbacks and tools.", "properties": { - "agent_class": { - "const": "ParallelAgent", - "default": "ParallelAgent", - "title": "Agent Class", - "type": "string" - }, "name": { "title": "Name", "type": "string" }, - "description": { - "default": "", - "title": "Description", - "type": "string" - }, - "sub_agents": { + "args": { "anyOf": [ { "items": { - "$ref": "#/$defs/AgentRefConfig" + "$ref": "#/$defs/ArgumentConfig" }, "type": "array" }, @@ -514,69 +551,57 @@ } ], "default": null, - "title": "Sub Agents" - }, - "before_agent_callbacks": { + "title": "Args" + } + }, + "required": [ + "name" + ], + "title": "CodeConfig", + "type": "object" + }, + "CodeExecutionResult": { + "additionalProperties": false, + "description": "Result of executing the [ExecutableCode].\n\nOnly generated when using the [CodeExecution] tool, and always follows a\n`part` containing the [ExecutableCode].", + "properties": { + "outcome": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "$ref": "#/$defs/Outcome" }, { "type": "null" } ], "default": null, - "title": "Before Agent Callbacks" + "description": "Required. Outcome of the code execution." }, - "after_agent_callbacks": { + "output": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], "default": null, - "title": "After Agent Callbacks" + "description": "Optional. Contains stdout when code execution is successful, stderr or other description otherwise.", + "title": "Output" } }, - "required": [ - "name" - ], - "title": "ParallelAgentConfig", + "title": "CodeExecutionResult", "type": "object" }, - "SequentialAgentConfig": { + "Content": { "additionalProperties": false, - "description": "The config for the YAML schema of a SequentialAgent.", + "description": "Contains the multi-part content of a message.", "properties": { - "agent_class": { - "const": "SequentialAgent", - "default": "SequentialAgent", - "title": "Agent Class", - "type": "string" - }, - "name": { - "title": "Name", - "type": "string" - }, - "description": { - "default": "", - "title": "Description", - "type": "string" - }, - "sub_agents": { + "parts": { "anyOf": [ { "items": { - "$ref": "#/$defs/AgentRefConfig" + "$ref": "#/$defs/Part" }, "type": "array" }, @@ -585,76 +610,3902 @@ } ], "default": null, - "title": "Sub Agents" + "description": "List of parts that constitute a single message. Each part may have\n a different IANA MIME type.", + "title": "Parts" }, - "before_agent_callbacks": { + "role": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], "default": null, - "title": "Before Agent Callbacks" + "description": "Optional. The producer of the content. Must be either 'user' or\n 'model'. Useful to set for multi-turn conversations, otherwise can be\n empty. If role is not specified, SDK will determine the role.", + "title": "Role" + } + }, + "title": "Content", + "type": "object" + }, + "DynamicRetrievalConfig": { + "additionalProperties": false, + "description": "Describes the options to customize dynamic retrieval.", + "properties": { + "mode": { + "anyOf": [ + { + "$ref": "#/$defs/DynamicRetrievalConfigMode" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The mode of the predictor to be used in dynamic retrieval." }, - "after_agent_callbacks": { + "dynamicThreshold": { "anyOf": [ { - "items": { - "$ref": "#/$defs/CodeConfig" - }, - "type": "array" + "type": "number" }, { "type": "null" } ], "default": null, - "title": "After Agent Callbacks" + "description": "Optional. The threshold to be used in dynamic retrieval. If not set, a system default value is used.", + "title": "Dynamicthreshold" } }, - "required": [ - "name" - ], - "title": "SequentialAgentConfig", + "title": "DynamicRetrievalConfig", "type": "object" }, - "ToolArgsConfig": { - "additionalProperties": true, - "description": "The configuration for tool arguments.\n\nThis config allows arbitrary key-value pairs as tool arguments.", + "DynamicRetrievalConfigMode": { + "description": "Config for the dynamic retrieval config mode.", + "enum": [ + "MODE_UNSPECIFIED", + "MODE_DYNAMIC" + ], + "title": "DynamicRetrievalConfigMode", + "type": "string" + }, + "EnterpriseWebSearch": { + "additionalProperties": false, + "description": "Tool to search public web data, powered by Vertex AI Search and Sec4 compliance.", "properties": {}, - "title": "ToolArgsConfig", + "title": "EnterpriseWebSearch", "type": "object" }, - "ToolConfig": { + "Environment": { + "description": "Required. The environment being operated.", + "enum": [ + "ENVIRONMENT_UNSPECIFIED", + "ENVIRONMENT_BROWSER" + ], + "title": "Environment", + "type": "string" + }, + "ExecutableCode": { "additionalProperties": false, - "description": "The configuration for a tool.\n\nThe config supports these types of tools:\n1. ADK built-in tools\n2. User-defined tool instances\n3. User-defined tool classes\n4. User-defined functions that generate tool instances\n5. User-defined function tools\n\nFor examples:\n\n 1. For ADK built-in tool instances or classes in `google.adk.tools` package,\n they can be referenced directly with the `name` and optionally with\n `config`.\n\n ```\n tools:\n - name: google_search\n - name: AgentTool\n config:\n agent: ./another_agent.yaml\n skip_summarization: true\n ```\n\n 2. For user-defined tool instances, the `name` is the fully qualified path\n to the tool instance.\n\n ```\n tools:\n - name: my_package.my_module.my_tool\n ```\n\n 3. For user-defined tool classes (custom tools), the `name` is the fully\n qualified path to the tool class and `config` is the arguments for the tool.\n\n ```\n tools:\n - name: my_package.my_module.my_tool_class\n config:\n my_tool_arg1: value1\n my_tool_arg2: value2\n ```\n\n 4. For user-defined functions that generate tool instances, the `name` is the\n fully qualified path to the function and `config` is passed to the function\n as arguments.\n\n ```\n tools:\n - name: my_package.my_module.my_tool_function\n config:\n my_function_arg1: value1\n my_function_arg2: value2\n ```\n\n The function must have the following signature:\n ```\n def my_function(config: ToolArgsConfig) -> BaseTool:\n ...\n ```\n\n 5. For user-defined function tools, the `name` is the fully qualified path\n to the function.\n\n ```\n tools:\n - name: my_package.my_module.my_function_tool\n ```", + "description": "Code generated by the model that is meant to be executed, and the result returned to the model.\n\nGenerated when using the [CodeExecution] tool, in which the code will be\nautomatically executed, and a corresponding [CodeExecutionResult] will also be\ngenerated.", "properties": { - "name": { - "title": "Name", - "type": "string" + "code": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The code to be executed.", + "title": "Code" }, - "args": { + "language": { "anyOf": [ { - "$ref": "#/$defs/ToolArgsConfig" + "$ref": "#/$defs/Language" }, { "type": "null" } ], - "default": null + "default": null, + "description": "Required. Programming language of the `code`." } }, - "required": [ - "name" - ], - "title": "ToolConfig", + "title": "ExecutableCode", + "type": "object" + }, + "ExternalApi": { + "additionalProperties": false, + "description": "Retrieve from data source powered by external API for grounding.\n\nThe external API is not owned by Google, but need to follow the pre-defined\nAPI spec.", + "properties": { + "apiAuth": { + "anyOf": [ + { + "$ref": "#/$defs/ApiAuth" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The authentication config to access the API. Deprecated. Please use auth_config instead." + }, + "apiSpec": { + "anyOf": [ + { + "$ref": "#/$defs/ApiSpec" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The API spec that the external API implements." + }, + "authConfig": { + "anyOf": [ + { + "$ref": "#/$defs/AuthConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The authentication config to access the API." + }, + "elasticSearchParams": { + "anyOf": [ + { + "$ref": "#/$defs/ExternalApiElasticSearchParams" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Parameters for the elastic search API." + }, + "endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The endpoint of the external API. The system will call the API at this endpoint to retrieve the data for grounding. Example: https://acme.com:443/search", + "title": "Endpoint" + }, + "simpleSearchParams": { + "anyOf": [ + { + "$ref": "#/$defs/ExternalApiSimpleSearchParams" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Parameters for the simple search API." + } + }, + "title": "ExternalApi", + "type": "object" + }, + "ExternalApiElasticSearchParams": { + "additionalProperties": false, + "description": "The search parameters to use for the ELASTIC_SEARCH spec.", + "properties": { + "index": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The ElasticSearch index to use.", + "title": "Index" + }, + "numHits": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Number of hits (chunks) to request. When specified, it is passed to Elasticsearch as the `num_hits` param.", + "title": "Numhits" + }, + "searchTemplate": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The ElasticSearch search template to use.", + "title": "Searchtemplate" + } + }, + "title": "ExternalApiElasticSearchParams", + "type": "object" + }, + "ExternalApiSimpleSearchParams": { + "additionalProperties": false, + "description": "The search parameters to use for SIMPLE_SEARCH spec.", + "properties": {}, + "title": "ExternalApiSimpleSearchParams", + "type": "object" + }, + "FeatureSelectionPreference": { + "description": "Options for feature selection preference.", + "enum": [ + "FEATURE_SELECTION_PREFERENCE_UNSPECIFIED", + "PRIORITIZE_QUALITY", + "BALANCED", + "PRIORITIZE_COST" + ], + "title": "FeatureSelectionPreference", + "type": "string" + }, + "File": { + "additionalProperties": false, + "description": "A file uploaded to the API.", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The `File` resource name. The ID (name excluding the \"files/\" prefix) can contain up to 40 characters that are lowercase alphanumeric or dashes (-). The ID cannot start or end with a dash. If the name is empty on create, a unique name will be generated. Example: `files/123-456`", + "title": "Name" + }, + "displayName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The human-readable display name for the `File`. The display name must be no more than 512 characters in length, including spaces. Example: 'Welcome Image'", + "title": "Displayname" + }, + "mimeType": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. MIME type of the file.", + "title": "Mimetype" + }, + "sizeBytes": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. Size of the file in bytes.", + "title": "Sizebytes" + }, + "createTime": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. The timestamp of when the `File` was created.", + "title": "Createtime" + }, + "expirationTime": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. The timestamp of when the `File` will be deleted. Only set if the `File` is scheduled to expire.", + "title": "Expirationtime" + }, + "updateTime": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. The timestamp of when the `File` was last updated.", + "title": "Updatetime" + }, + "sha256Hash": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. SHA-256 hash of the uploaded bytes. The hash value is encoded in base64 format.", + "title": "Sha256Hash" + }, + "uri": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. The URI of the `File`.", + "title": "Uri" + }, + "downloadUri": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. The URI of the `File`, only set for downloadable (generated) files.", + "title": "Downloaduri" + }, + "state": { + "anyOf": [ + { + "$ref": "#/$defs/FileState" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. Processing state of the File." + }, + "source": { + "anyOf": [ + { + "$ref": "#/$defs/FileSource" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. The source of the `File`." + }, + "videoMetadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. Metadata for a video.", + "title": "Videometadata" + }, + "error": { + "anyOf": [ + { + "$ref": "#/$defs/FileStatus" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output only. Error status if File processing failed." + } + }, + "title": "File", + "type": "object" + }, + "FileData": { + "additionalProperties": false, + "description": "URI based data.", + "properties": { + "displayName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. It is not currently used in the Gemini GenerateContent calls.", + "title": "Displayname" + }, + "fileUri": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. URI.", + "title": "Fileuri" + }, + "mimeType": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The IANA standard MIME type of the source data.", + "title": "Mimetype" + } + }, + "title": "FileData", + "type": "object" + }, + "FileSource": { + "description": "Source of the File.", + "enum": [ + "SOURCE_UNSPECIFIED", + "UPLOADED", + "GENERATED" + ], + "title": "FileSource", + "type": "string" + }, + "FileState": { + "description": "State for the lifecycle of a File.", + "enum": [ + "STATE_UNSPECIFIED", + "PROCESSING", + "ACTIVE", + "FAILED" + ], + "title": "FileState", + "type": "string" + }, + "FileStatus": { + "additionalProperties": false, + "description": "Status of a File that uses a common error model.", + "properties": { + "details": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "title": "Details" + }, + "message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "title": "Message" + }, + "code": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The status code. 0 for OK, 1 for CANCELLED", + "title": "Code" + } + }, + "title": "FileStatus", + "type": "object" + }, + "FunctionCall": { + "additionalProperties": false, + "description": "A function call.", + "properties": { + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The unique id of the function call. If populated, the client to execute the\n `function_call` and return the response with the matching `id`.", + "title": "Id" + }, + "args": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details.", + "title": "Args" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The name of the function to call. Matches [FunctionDeclaration.name].", + "title": "Name" + } + }, + "title": "FunctionCall", + "type": "object" + }, + "FunctionCallingConfig": { + "additionalProperties": false, + "description": "Function calling config.", + "properties": { + "mode": { + "anyOf": [ + { + "$ref": "#/$defs/FunctionCallingConfigMode" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Function calling mode." + }, + "allowedFunctionNames": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided.", + "title": "Allowedfunctionnames" + } + }, + "title": "FunctionCallingConfig", + "type": "object" + }, + "FunctionCallingConfigMode": { + "description": "Config for the function calling config mode.", + "enum": [ + "MODE_UNSPECIFIED", + "AUTO", + "ANY", + "NONE" + ], + "title": "FunctionCallingConfigMode", + "type": "string" + }, + "FunctionDeclaration": { + "additionalProperties": false, + "description": "Defines a function that the model can generate JSON inputs for.\n\nThe inputs are based on `OpenAPI 3.0 specifications\n`_.", + "properties": { + "behavior": { + "anyOf": [ + { + "$ref": "#/$defs/Behavior" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Defines the function behavior." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Description and purpose of the function. Model uses it to decide how and whether to call the function.", + "title": "Description" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The name of the function to call. Must start with a letter or an underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, with a maximum length of 64.", + "title": "Name" + }, + "parameters": { + "anyOf": [ + { + "$ref": "#/$defs/Schema" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1" + }, + "parametersJsonSchema": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Describes the parameters to the function in JSON Schema format. The schema must describe an object where the properties are the parameters to the function. For example: ``` { \"type\": \"object\", \"properties\": { \"name\": { \"type\": \"string\" }, \"age\": { \"type\": \"integer\" } }, \"additionalProperties\": false, \"required\": [\"name\", \"age\"], \"propertyOrdering\": [\"name\", \"age\"] } ``` This field is mutually exclusive with `parameters`.", + "title": "Parametersjsonschema" + }, + "response": { + "anyOf": [ + { + "$ref": "#/$defs/Schema" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function." + }, + "responseJsonSchema": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Describes the output from this function in JSON Schema format. The value specified by the schema is the response value of the function. This field is mutually exclusive with `response`.", + "title": "Responsejsonschema" + } + }, + "title": "FunctionDeclaration", + "type": "object" + }, + "FunctionResponse": { + "additionalProperties": false, + "description": "A function response.", + "properties": { + "willContinue": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Signals that function call continues, and more responses will be returned, turning the function call into a generator. Is only applicable to NON_BLOCKING function calls (see FunctionDeclaration.behavior for details), ignored otherwise. If false, the default, future responses will not be considered. Is only applicable to NON_BLOCKING function calls, is ignored otherwise. If set to false, future responses will not be considered. It is allowed to return empty `response` with `will_continue=False` to signal that the function call is finished.", + "title": "Willcontinue" + }, + "scheduling": { + "anyOf": [ + { + "$ref": "#/$defs/FunctionResponseScheduling" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specifies how the response should be scheduled in the conversation. Only applicable to NON_BLOCKING function calls, is ignored otherwise. Defaults to WHEN_IDLE." + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The id of the function call this response is for. Populated by the client to match the corresponding function call `id`.", + "title": "Id" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name].", + "title": "Name" + }, + "response": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The function response in JSON object format. Use \"output\" key to specify function output and \"error\" key to specify error details (if any). If \"output\" and \"error\" keys are not specified, then whole \"response\" is treated as function output.", + "title": "Response" + } + }, + "title": "FunctionResponse", + "type": "object" + }, + "FunctionResponseScheduling": { + "description": "Specifies how the response should be scheduled in the conversation.", + "enum": [ + "SCHEDULING_UNSPECIFIED", + "SILENT", + "WHEN_IDLE", + "INTERRUPT" + ], + "title": "FunctionResponseScheduling", + "type": "string" + }, + "GenerateContentConfig": { + "additionalProperties": false, + "description": "Optional model configuration parameters.\n\nFor more information, see `Content generation parameters\n`_.", + "properties": { + "httpOptions": { + "anyOf": [ + { + "$ref": "#/$defs/HttpOptions" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Used to override HTTP request options." + }, + "systemInstruction": { + "anyOf": [ + { + "$ref": "#/$defs/Content" + }, + { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/File" + }, + { + "$ref": "#/$defs/Part" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + { + "$ref": "#/$defs/File" + }, + { + "$ref": "#/$defs/Part" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the model to steer it toward better performance.\n For example, \"Answer as concisely as possible\" or \"Don't use technical\n terms in your response\".\n ", + "title": "Systeminstruction" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Value that controls the degree of randomness in token selection.\n Lower temperatures are good for prompts that require a less open-ended or\n creative response, while higher temperatures can lead to more diverse or\n creative results.\n ", + "title": "Temperature" + }, + "topP": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tokens are selected from the most to least probable until the sum\n of their probabilities equals this value. Use a lower value for less\n random responses and a higher value for more random responses.\n ", + "title": "Topp" + }, + "topK": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "For each token selection step, the ``top_k`` tokens with the\n highest probabilities are sampled. Then tokens are further filtered based\n on ``top_p`` with the final token selected using temperature sampling. Use\n a lower number for less random responses and a higher number for more\n random responses.\n ", + "title": "Topk" + }, + "candidateCount": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of response variations to return.\n ", + "title": "Candidatecount" + }, + "maxOutputTokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens that can be generated in the response.\n ", + "title": "Maxoutputtokens" + }, + "stopSequences": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of strings that tells the model to stop generating text if one\n of the strings is encountered in the response.\n ", + "title": "Stopsequences" + }, + "responseLogprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to return the log probabilities of the tokens that were\n chosen by the model at each step.\n ", + "title": "Responselogprobs" + }, + "logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top candidate tokens to return the log probabilities for\n at each generation step.\n ", + "title": "Logprobs" + }, + "presencePenalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Positive values penalize tokens that already appear in the\n generated text, increasing the probability of generating more diverse\n content.\n ", + "title": "Presencepenalty" + }, + "frequencyPenalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Positive values penalize tokens that repeatedly appear in the\n generated text, increasing the probability of generating more diverse\n content.\n ", + "title": "Frequencypenalty" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "When ``seed`` is fixed to a specific number, the model makes a best\n effort to provide the same response for repeated requests. By default, a\n random number is used.\n ", + "title": "Seed" + }, + "responseMimeType": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Output response mimetype of the generated candidate text.\n Supported mimetype:\n - `text/plain`: (default) Text output.\n - `application/json`: JSON response in the candidates.\n The model needs to be prompted to output the appropriate response type,\n otherwise the behavior is undefined.\n This is a preview feature.\n ", + "title": "Responsemimetype" + }, + "responseSchema": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "$ref": "#/$defs/Schema" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The `Schema` object allows the definition of input and output data types.\n These types can be objects, but also primitives and arrays.\n Represents a select subset of an [OpenAPI 3.0 schema\n object](https://spec.openapis.org/oas/v3.0.3#schema).\n If set, a compatible response_mime_type must also be set.\n Compatible mimetypes: `application/json`: Schema for JSON response.\n ", + "title": "Responseschema" + }, + "responseJsonSchema": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Output schema of the generated response.\n This is an alternative to `response_schema` that accepts [JSON\n Schema](https://json-schema.org/). If set, `response_schema` must be\n omitted, but `response_mime_type` is required. While the full JSON Schema\n may be sent, not all features are supported. Specifically, only the\n following properties are supported: - `$id` - `$defs` - `$ref` - `$anchor`\n - `type` - `format` - `title` - `description` - `enum` (for strings and\n numbers) - `items` - `prefixItems` - `minItems` - `maxItems` - `minimum` -\n `maximum` - `anyOf` - `oneOf` (interpreted the same as `anyOf`) -\n `properties` - `additionalProperties` - `required` The non-standard\n `propertyOrdering` property may also be set. Cyclic references are\n unrolled to a limited degree and, as such, may only be used within\n non-required properties. (Nullable properties are not sufficient.) If\n `$ref` is set on a sub-schema, no other properties, except for than those\n starting as a `$`, may be set.", + "title": "Responsejsonschema" + }, + "routingConfig": { + "anyOf": [ + { + "$ref": "#/$defs/GenerationConfigRoutingConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Configuration for model router requests.\n " + }, + "modelSelectionConfig": { + "anyOf": [ + { + "$ref": "#/$defs/ModelSelectionConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Configuration for model selection.\n " + }, + "safetySettings": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/SafetySetting" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Safety settings in the request to block unsafe content in the\n response.\n ", + "title": "Safetysettings" + }, + "tools": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/google__genai__types__Tool" + }, + { + "$ref": "#/$defs/mcp__types__Tool" + } + ] + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Code that enables the system to interact with external systems to\n perform an action outside of the knowledge and scope of the model.\n ", + "title": "Tools" + }, + "toolConfig": { + "anyOf": [ + { + "$ref": "#/$defs/google__genai__types__ToolConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Associates model output to a specific function call.\n " + }, + "labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Labels with user-defined metadata to break down billed charges.", + "title": "Labels" + }, + "cachedContent": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Resource name of a context cache that can be used in subsequent\n requests.\n ", + "title": "Cachedcontent" + }, + "responseModalities": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The requested modalities of the response. Represents the set of\n modalities that the model can return.\n ", + "title": "Responsemodalities" + }, + "mediaResolution": { + "anyOf": [ + { + "$ref": "#/$defs/MediaResolution" + }, + { + "type": "null" + } + ], + "default": null, + "description": "If specified, the media resolution specified will be used.\n " + }, + "speechConfig": { + "anyOf": [ + { + "$ref": "#/$defs/SpeechConfig" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The speech generation configuration.\n ", + "title": "Speechconfig" + }, + "audioTimestamp": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "If enabled, audio timestamp will be included in the request to the\n model.\n ", + "title": "Audiotimestamp" + }, + "automaticFunctionCalling": { + "anyOf": [ + { + "$ref": "#/$defs/AutomaticFunctionCallingConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The configuration for automatic function calling.\n " + }, + "thinkingConfig": { + "anyOf": [ + { + "$ref": "#/$defs/ThinkingConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The thinking features configuration.\n " + } + }, + "title": "GenerateContentConfig", + "type": "object" + }, + "GenerationConfigRoutingConfig": { + "additionalProperties": false, + "description": "The configuration for routing the request to a specific model.", + "properties": { + "autoMode": { + "anyOf": [ + { + "$ref": "#/$defs/GenerationConfigRoutingConfigAutoRoutingMode" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Automated routing." + }, + "manualMode": { + "anyOf": [ + { + "$ref": "#/$defs/GenerationConfigRoutingConfigManualRoutingMode" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Manual routing." + } + }, + "title": "GenerationConfigRoutingConfig", + "type": "object" + }, + "GenerationConfigRoutingConfigAutoRoutingMode": { + "additionalProperties": false, + "description": "When automated routing is specified, the routing will be determined by the pretrained routing model and customer provided model routing preference.", + "properties": { + "modelRoutingPreference": { + "anyOf": [ + { + "enum": [ + "UNKNOWN", + "PRIORITIZE_QUALITY", + "BALANCED", + "PRIORITIZE_COST" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The model routing preference.", + "title": "Modelroutingpreference" + } + }, + "title": "GenerationConfigRoutingConfigAutoRoutingMode", + "type": "object" + }, + "GenerationConfigRoutingConfigManualRoutingMode": { + "additionalProperties": false, + "description": "When manual routing is set, the specified model will be used directly.", + "properties": { + "modelName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The model name to use. Only the public LLM models are accepted. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).", + "title": "Modelname" + } + }, + "title": "GenerationConfigRoutingConfigManualRoutingMode", + "type": "object" + }, + "GoogleMaps": { + "additionalProperties": false, + "description": "Tool to support Google Maps in Model.", + "properties": { + "authConfig": { + "anyOf": [ + { + "$ref": "#/$defs/AuthConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Auth config for the Google Maps tool." + } + }, + "title": "GoogleMaps", + "type": "object" + }, + "GoogleSearch": { + "additionalProperties": false, + "description": "Tool to support Google Search in Model. Powered by Google.", + "properties": { + "timeRangeFilter": { + "anyOf": [ + { + "$ref": "#/$defs/Interval" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Filter search results to a specific time range.\n If customers set a start time, they must set an end time (and vice versa).\n " + } + }, + "title": "GoogleSearch", + "type": "object" + }, + "GoogleSearchRetrieval": { + "additionalProperties": false, + "description": "Tool to retrieve public web data for grounding, powered by Google.", + "properties": { + "dynamicRetrievalConfig": { + "anyOf": [ + { + "$ref": "#/$defs/DynamicRetrievalConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specifies the dynamic retrieval configuration for the given source." + } + }, + "title": "GoogleSearchRetrieval", + "type": "object" + }, + "HarmBlockMethod": { + "description": "Optional.\n\nSpecify if the threshold is used for probability or severity score. If not\nspecified, the threshold is used for probability score.", + "enum": [ + "HARM_BLOCK_METHOD_UNSPECIFIED", + "SEVERITY", + "PROBABILITY" + ], + "title": "HarmBlockMethod", + "type": "string" + }, + "HarmBlockThreshold": { + "description": "Required. The harm block threshold.", + "enum": [ + "HARM_BLOCK_THRESHOLD_UNSPECIFIED", + "BLOCK_LOW_AND_ABOVE", + "BLOCK_MEDIUM_AND_ABOVE", + "BLOCK_ONLY_HIGH", + "BLOCK_NONE", + "OFF" + ], + "title": "HarmBlockThreshold", + "type": "string" + }, + "HarmCategory": { + "description": "Required. Harm category.", + "enum": [ + "HARM_CATEGORY_UNSPECIFIED", + "HARM_CATEGORY_HATE_SPEECH", + "HARM_CATEGORY_DANGEROUS_CONTENT", + "HARM_CATEGORY_HARASSMENT", + "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "HARM_CATEGORY_CIVIC_INTEGRITY", + "HARM_CATEGORY_IMAGE_HATE", + "HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT", + "HARM_CATEGORY_IMAGE_HARASSMENT", + "HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT" + ], + "title": "HarmCategory", + "type": "string" + }, + "HttpOptions": { + "additionalProperties": false, + "description": "HTTP options to be used in each of the requests.", + "properties": { + "baseUrl": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The base URL for the AI platform service endpoint.", + "title": "Baseurl" + }, + "apiVersion": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specifies the version of the API to use.", + "title": "Apiversion" + }, + "headers": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional HTTP headers to be sent with the request.", + "title": "Headers" + }, + "timeout": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Timeout for the request in milliseconds.", + "title": "Timeout" + }, + "clientArgs": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Args passed to the HTTP client.", + "title": "Clientargs" + }, + "asyncClientArgs": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Args passed to the async HTTP client.", + "title": "Asyncclientargs" + }, + "extraBody": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Extra parameters to add to the request body.", + "title": "Extrabody" + }, + "retryOptions": { + "anyOf": [ + { + "$ref": "#/$defs/HttpRetryOptions" + }, + { + "type": "null" + } + ], + "default": null, + "description": "HTTP retry options for the request." + } + }, + "title": "HttpOptions", + "type": "object" + }, + "HttpRetryOptions": { + "additionalProperties": false, + "description": "HTTP retry options to be used in each of the requests.", + "properties": { + "attempts": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of attempts, including the original request.\n If 0 or 1, it means no retries.", + "title": "Attempts" + }, + "initialDelay": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Initial delay before the first retry, in fractions of a second.", + "title": "Initialdelay" + }, + "maxDelay": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum delay between retries, in fractions of a second.", + "title": "Maxdelay" + }, + "expBase": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Multiplier by which the delay increases after each attempt.", + "title": "Expbase" + }, + "jitter": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Randomness factor for the delay.", + "title": "Jitter" + }, + "httpStatusCodes": { + "anyOf": [ + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of HTTP status codes that should trigger a retry.\n If not specified, a default set of retryable codes may be used.", + "title": "Httpstatuscodes" + } + }, + "title": "HttpRetryOptions", + "type": "object" + }, + "Interval": { + "additionalProperties": false, + "description": "Represents a time interval, encoded as a start time (inclusive) and an end time (exclusive).\n\nThe start time must be less than or equal to the end time.\nWhen the start equals the end time, the interval is an empty interval.\n(matches no time)\nWhen both start and end are unspecified, the interval matches any time.", + "properties": { + "startTime": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The start time of the interval.", + "title": "Starttime" + }, + "endTime": { + "anyOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The end time of the interval.", + "title": "Endtime" + } + }, + "title": "Interval", + "type": "object" + }, + "Language": { + "description": "Required. Programming language of the `code`.", + "enum": [ + "LANGUAGE_UNSPECIFIED", + "PYTHON" + ], + "title": "Language", + "type": "string" + }, + "LatLng": { + "additionalProperties": false, + "description": "An object that represents a latitude/longitude pair.\n\nThis is expressed as a pair of doubles to represent degrees latitude and\ndegrees longitude. Unless specified otherwise, this object must conform to the\n\nWGS84 standard. Values must be within normalized ranges.", + "properties": { + "latitude": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The latitude in degrees. It must be in the range [-90.0, +90.0].", + "title": "Latitude" + }, + "longitude": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The longitude in degrees. It must be in the range [-180.0, +180.0]", + "title": "Longitude" + } + }, + "title": "LatLng", + "type": "object" + }, + "LlmAgentConfig": { + "additionalProperties": false, + "description": "The config for the YAML schema of a LlmAgent.", + "properties": { + "agent_class": { + "default": "LlmAgent", + "enum": [ + "LlmAgent", + "" + ], + "title": "Agent Class", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string" + }, + "sub_agents": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/AgentRefConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Sub Agents" + }, + "before_agent_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before Agent Callbacks" + }, + "after_agent_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "After Agent Callbacks" + }, + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Model" + }, + "instruction": { + "title": "Instruction", + "type": "string" + }, + "disallow_transfer_to_parent": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Disallow Transfer To Parent" + }, + "disallow_transfer_to_peers": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Disallow Transfer To Peers" + }, + "input_schema": { + "anyOf": [ + { + "$ref": "#/$defs/CodeConfig" + }, + { + "type": "null" + } + ], + "default": null + }, + "output_schema": { + "anyOf": [ + { + "$ref": "#/$defs/CodeConfig" + }, + { + "type": "null" + } + ], + "default": null + }, + "output_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Output Key" + }, + "include_contents": { + "default": "default", + "enum": [ + "default", + "none" + ], + "title": "Include Contents", + "type": "string" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/google__adk__tools__base_tool__ToolConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Tools" + }, + "before_model_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before Model Callbacks" + }, + "after_model_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "After Model Callbacks" + }, + "before_tool_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before Tool Callbacks" + }, + "after_tool_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "After Tool Callbacks" + }, + "generate_content_config": { + "anyOf": [ + { + "$ref": "#/$defs/GenerateContentConfig" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "required": [ + "name", + "instruction" + ], + "title": "LlmAgentConfig", + "type": "object" + }, + "LoopAgentConfig": { + "additionalProperties": false, + "description": "The config for the YAML schema of a LoopAgent.", + "properties": { + "agent_class": { + "const": "LoopAgent", + "default": "LoopAgent", + "title": "Agent Class", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string" + }, + "sub_agents": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/AgentRefConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Sub Agents" + }, + "before_agent_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before Agent Callbacks" + }, + "after_agent_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "After Agent Callbacks" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Max Iterations" + } + }, + "required": [ + "name" + ], + "title": "LoopAgentConfig", + "type": "object" + }, + "MediaResolution": { + "description": "The media resolution to use.", + "enum": [ + "MEDIA_RESOLUTION_UNSPECIFIED", + "MEDIA_RESOLUTION_LOW", + "MEDIA_RESOLUTION_MEDIUM", + "MEDIA_RESOLUTION_HIGH" + ], + "title": "MediaResolution", + "type": "string" + }, + "ModelSelectionConfig": { + "additionalProperties": false, + "description": "Config for model selection.", + "properties": { + "featureSelectionPreference": { + "anyOf": [ + { + "$ref": "#/$defs/FeatureSelectionPreference" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Options for feature selection preference." + } + }, + "title": "ModelSelectionConfig", + "type": "object" + }, + "MultiSpeakerVoiceConfig": { + "additionalProperties": false, + "description": "The configuration for the multi-speaker setup.", + "properties": { + "speakerVoiceConfigs": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/SpeakerVoiceConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The configuration for the speaker to use.", + "title": "Speakervoiceconfigs" + } + }, + "title": "MultiSpeakerVoiceConfig", + "type": "object" + }, + "Outcome": { + "description": "Required. Outcome of the code execution.", + "enum": [ + "OUTCOME_UNSPECIFIED", + "OUTCOME_OK", + "OUTCOME_FAILED", + "OUTCOME_DEADLINE_EXCEEDED" + ], + "title": "Outcome", + "type": "string" + }, + "ParallelAgentConfig": { + "additionalProperties": false, + "description": "The config for the YAML schema of a ParallelAgent.", + "properties": { + "agent_class": { + "const": "ParallelAgent", + "default": "ParallelAgent", + "title": "Agent Class", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string" + }, + "sub_agents": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/AgentRefConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Sub Agents" + }, + "before_agent_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before Agent Callbacks" + }, + "after_agent_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "After Agent Callbacks" + } + }, + "required": [ + "name" + ], + "title": "ParallelAgentConfig", + "type": "object" + }, + "Part": { + "additionalProperties": false, + "description": "A datatype containing media content.\n\nExactly one field within a Part should be set, representing the specific type\nof content being conveyed. Using multiple fields within the same `Part`\ninstance is considered invalid.", + "properties": { + "videoMetadata": { + "anyOf": [ + { + "$ref": "#/$defs/VideoMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Metadata for a given video." + }, + "thought": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Indicates if the part is thought from the model.", + "title": "Thought" + }, + "inlineData": { + "anyOf": [ + { + "$ref": "#/$defs/Blob" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Inlined bytes data." + }, + "fileData": { + "anyOf": [ + { + "$ref": "#/$defs/FileData" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. URI based data." + }, + "thoughtSignature": { + "anyOf": [ + { + "format": "base64url", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "An opaque signature for the thought so it can be reused in subsequent requests.", + "title": "Thoughtsignature" + }, + "codeExecutionResult": { + "anyOf": [ + { + "$ref": "#/$defs/CodeExecutionResult" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Result of executing the [ExecutableCode]." + }, + "executableCode": { + "anyOf": [ + { + "$ref": "#/$defs/ExecutableCode" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Code generated by the model that is meant to be executed." + }, + "functionCall": { + "anyOf": [ + { + "$ref": "#/$defs/FunctionCall" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values." + }, + "functionResponse": { + "anyOf": [ + { + "$ref": "#/$defs/FunctionResponse" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model." + }, + "text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Text part (can be code).", + "title": "Text" + } + }, + "title": "Part", + "type": "object" + }, + "PrebuiltVoiceConfig": { + "additionalProperties": false, + "description": "The configuration for the prebuilt speaker to use.", + "properties": { + "voiceName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The name of the prebuilt voice to use.", + "title": "Voicename" + } + }, + "title": "PrebuiltVoiceConfig", + "type": "object" + }, + "RagRetrievalConfig": { + "additionalProperties": false, + "description": "Specifies the context retrieval config.", + "properties": { + "filter": { + "anyOf": [ + { + "$ref": "#/$defs/RagRetrievalConfigFilter" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Config for filters." + }, + "hybridSearch": { + "anyOf": [ + { + "$ref": "#/$defs/RagRetrievalConfigHybridSearch" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Config for Hybrid Search." + }, + "ranking": { + "anyOf": [ + { + "$ref": "#/$defs/RagRetrievalConfigRanking" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Config for ranking and reranking." + }, + "topK": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The number of contexts to retrieve.", + "title": "Topk" + } + }, + "title": "RagRetrievalConfig", + "type": "object" + }, + "RagRetrievalConfigFilter": { + "additionalProperties": false, + "description": "Config for filters.", + "properties": { + "metadataFilter": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. String for metadata filtering.", + "title": "Metadatafilter" + }, + "vectorDistanceThreshold": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Only returns contexts with vector distance smaller than the threshold.", + "title": "Vectordistancethreshold" + }, + "vectorSimilarityThreshold": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Only returns contexts with vector similarity larger than the threshold.", + "title": "Vectorsimilaritythreshold" + } + }, + "title": "RagRetrievalConfigFilter", + "type": "object" + }, + "RagRetrievalConfigHybridSearch": { + "additionalProperties": false, + "description": "Config for Hybrid Search.", + "properties": { + "alpha": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Alpha value controls the weight between dense and sparse vector search results. The range is [0, 1], while 0 means sparse vector search only and 1 means dense vector search only. The default value is 0.5 which balances sparse and dense vector search equally.", + "title": "Alpha" + } + }, + "title": "RagRetrievalConfigHybridSearch", + "type": "object" + }, + "RagRetrievalConfigRanking": { + "additionalProperties": false, + "description": "Config for ranking and reranking.", + "properties": { + "llmRanker": { + "anyOf": [ + { + "$ref": "#/$defs/RagRetrievalConfigRankingLlmRanker" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Config for LlmRanker." + }, + "rankService": { + "anyOf": [ + { + "$ref": "#/$defs/RagRetrievalConfigRankingRankService" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Config for Rank Service." + } + }, + "title": "RagRetrievalConfigRanking", + "type": "object" + }, + "RagRetrievalConfigRankingLlmRanker": { + "additionalProperties": false, + "description": "Config for LlmRanker.", + "properties": { + "modelName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The model name used for ranking. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models).", + "title": "Modelname" + } + }, + "title": "RagRetrievalConfigRankingLlmRanker", + "type": "object" + }, + "RagRetrievalConfigRankingRankService": { + "additionalProperties": false, + "description": "Config for Rank Service.", + "properties": { + "modelName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The model name of the rank service. Format: `semantic-ranker-512@latest`", + "title": "Modelname" + } + }, + "title": "RagRetrievalConfigRankingRankService", + "type": "object" + }, + "Retrieval": { + "additionalProperties": false, + "description": "Defines a retrieval tool that model can call to access external knowledge.", + "properties": { + "disableAttribution": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Deprecated. This option is no longer supported.", + "title": "Disableattribution" + }, + "externalApi": { + "anyOf": [ + { + "$ref": "#/$defs/ExternalApi" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Use data source powered by external API for grounding." + }, + "vertexAiSearch": { + "anyOf": [ + { + "$ref": "#/$defs/VertexAISearch" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to use data source powered by Vertex AI Search." + }, + "vertexRagStore": { + "anyOf": [ + { + "$ref": "#/$defs/VertexRagStore" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService." + } + }, + "title": "Retrieval", + "type": "object" + }, + "RetrievalConfig": { + "additionalProperties": false, + "description": "Retrieval config.", + "properties": { + "latLng": { + "anyOf": [ + { + "$ref": "#/$defs/LatLng" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The location of the user." + }, + "languageCode": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The language code of the user.", + "title": "Languagecode" + } + }, + "title": "RetrievalConfig", + "type": "object" + }, + "SafetySetting": { + "additionalProperties": false, + "description": "Safety settings.", + "properties": { + "method": { + "anyOf": [ + { + "$ref": "#/$defs/HarmBlockMethod" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Determines if the harm block method uses probability or probability\n and severity scores." + }, + "category": { + "anyOf": [ + { + "$ref": "#/$defs/HarmCategory" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. Harm category." + }, + "threshold": { + "anyOf": [ + { + "$ref": "#/$defs/HarmBlockThreshold" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The harm block threshold." + } + }, + "title": "SafetySetting", + "type": "object" + }, + "Schema": { + "additionalProperties": false, + "description": "Schema is used to define the format of input/output data.\n\nRepresents a select subset of an [OpenAPI 3.0 schema\nobject](https://spec.openapis.org/oas/v3.0.3#schema-object). More fields may\nbe added in the future as needed.", + "properties": { + "additionalProperties": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Can either be a boolean or an object; controls the presence of additional properties.", + "title": "Additionalproperties" + }, + "defs": { + "anyOf": [ + { + "additionalProperties": { + "$ref": "#/$defs/Schema" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. A map of definitions for use by `ref` Only allowed at the root of the schema.", + "title": "Defs" + }, + "ref": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Allows indirect references between schema nodes. The value should be a valid reference to a child of the root `defs`. For example, the following schema defines a reference to a schema node named \"Pet\": type: object properties: pet: ref: #/defs/Pet defs: Pet: type: object properties: name: type: string The value of the \"pet\" property is a reference to the schema node named \"Pet\". See details in https://json-schema.org/understanding-json-schema/structuring", + "title": "Ref" + }, + "anyOf": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/Schema" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The value should be validated against any (one or more) of the subschemas in the list.", + "title": "Anyof" + }, + "default": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Default value of the data.", + "title": "Default" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The description of the data.", + "title": "Description" + }, + "enum": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:[\"EAST\", NORTH\", \"SOUTH\", \"WEST\"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:[\"101\", \"201\", \"301\"]}", + "title": "Enum" + }, + "example": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Example of the object. Will only populated when the object is the root.", + "title": "Example" + }, + "format": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The format of the data. Supported formats: for NUMBER type: \"float\", \"double\" for INTEGER type: \"int32\", \"int64\" for STRING type: \"email\", \"byte\", etc", + "title": "Format" + }, + "items": { + "anyOf": [ + { + "$ref": "#/$defs/Schema" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY." + }, + "maxItems": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Maximum number of the elements for Type.ARRAY.", + "title": "Maxitems" + }, + "maxLength": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Maximum length of the Type.STRING", + "title": "Maxlength" + }, + "maxProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Maximum number of the properties for Type.OBJECT.", + "title": "Maxproperties" + }, + "maximum": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Maximum value of the Type.INTEGER and Type.NUMBER", + "title": "Maximum" + }, + "minItems": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Minimum number of the elements for Type.ARRAY.", + "title": "Minitems" + }, + "minLength": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING", + "title": "Minlength" + }, + "minProperties": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Minimum number of the properties for Type.OBJECT.", + "title": "Minproperties" + }, + "minimum": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER", + "title": "Minimum" + }, + "nullable": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Indicates if the value may be null.", + "title": "Nullable" + }, + "pattern": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Pattern of the Type.STRING to restrict a string to a regular expression.", + "title": "Pattern" + }, + "properties": { + "anyOf": [ + { + "additionalProperties": { + "$ref": "#/$defs/Schema" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT.", + "title": "Properties" + }, + "propertyOrdering": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The order of the properties. Not a standard field in open api spec. Only used to support the order of the properties.", + "title": "Propertyordering" + }, + "required": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Required properties of Type.OBJECT.", + "title": "Required" + }, + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The title of the Schema.", + "title": "Title" + }, + "type": { + "anyOf": [ + { + "$ref": "#/$defs/Type" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The type of the data." + } + }, + "title": "Schema", + "type": "object" + }, + "SequentialAgentConfig": { + "additionalProperties": false, + "description": "The config for the YAML schema of a SequentialAgent.", + "properties": { + "agent_class": { + "const": "SequentialAgent", + "default": "SequentialAgent", + "title": "Agent Class", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "description": { + "default": "", + "title": "Description", + "type": "string" + }, + "sub_agents": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/AgentRefConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Sub Agents" + }, + "before_agent_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Before Agent Callbacks" + }, + "after_agent_callbacks": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/CodeConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "After Agent Callbacks" + } + }, + "required": [ + "name" + ], + "title": "SequentialAgentConfig", + "type": "object" + }, + "SpeakerVoiceConfig": { + "additionalProperties": false, + "description": "The configuration for the speaker to use.", + "properties": { + "speaker": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The name of the speaker to use. Should be the same as in the\n prompt.", + "title": "Speaker" + }, + "voiceConfig": { + "anyOf": [ + { + "$ref": "#/$defs/VoiceConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The configuration for the voice to use." + } + }, + "title": "SpeakerVoiceConfig", + "type": "object" + }, + "SpeechConfig": { + "additionalProperties": false, + "description": "The speech generation configuration.", + "properties": { + "voiceConfig": { + "anyOf": [ + { + "$ref": "#/$defs/VoiceConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The configuration for the speaker to use.\n " + }, + "multiSpeakerVoiceConfig": { + "anyOf": [ + { + "$ref": "#/$defs/MultiSpeakerVoiceConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The configuration for the multi-speaker setup.\n It is mutually exclusive with the voice_config field.\n " + }, + "languageCode": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code (ISO 639. e.g. en-US) for the speech synthesization.\n Only available for Live API.\n ", + "title": "Languagecode" + } + }, + "title": "SpeechConfig", + "type": "object" + }, + "ThinkingConfig": { + "additionalProperties": false, + "description": "The thinking features configuration.", + "properties": { + "includeThoughts": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available.\n ", + "title": "Includethoughts" + }, + "thinkingBudget": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Indicates the thinking budget in tokens. 0 is DISABLED. -1 is AUTOMATIC. The default values and allowed ranges are model dependent.\n ", + "title": "Thinkingbudget" + } + }, + "title": "ThinkingConfig", + "type": "object" + }, + "ToolAnnotations": { + "additionalProperties": true, + "description": "Additional properties describing a Tool to clients.\n\nNOTE: all properties in ToolAnnotations are **hints**.\nThey are not guaranteed to provide a faithful description of\ntool behavior (including descriptive properties like `title`).\n\nClients should never make tool use decisions based on ToolAnnotations\nreceived from untrusted servers.", + "properties": { + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Title" + }, + "readOnlyHint": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Readonlyhint" + }, + "destructiveHint": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Destructivehint" + }, + "idempotentHint": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Idempotenthint" + }, + "openWorldHint": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Openworldhint" + } + }, + "title": "ToolAnnotations", + "type": "object" + }, + "ToolArgsConfig": { + "additionalProperties": true, + "description": "The configuration for tool arguments.\n\nThis config allows arbitrary key-value pairs as tool arguments.", + "properties": {}, + "title": "ToolArgsConfig", + "type": "object" + }, + "ToolCodeExecution": { + "additionalProperties": false, + "description": "Tool that executes code generated by the model, and automatically returns the result to the model.\n\nSee also [ExecutableCode]and [CodeExecutionResult] which are input and output\nto this tool.", + "properties": {}, + "title": "ToolCodeExecution", + "type": "object" + }, + "ToolComputerUse": { + "additionalProperties": false, + "description": "Tool to support computer use.", + "properties": { + "environment": { + "anyOf": [ + { + "$ref": "#/$defs/Environment" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Required. The environment being operated." + } + }, + "title": "ToolComputerUse", + "type": "object" + }, + "Type": { + "description": "Optional. The type of the data.", + "enum": [ + "TYPE_UNSPECIFIED", + "STRING", + "NUMBER", + "INTEGER", + "BOOLEAN", + "ARRAY", + "OBJECT", + "NULL" + ], + "title": "Type", + "type": "string" + }, + "UrlContext": { + "additionalProperties": false, + "description": "Tool to support URL context retrieval.", + "properties": {}, + "title": "UrlContext", + "type": "object" + }, + "VertexAISearch": { + "additionalProperties": false, + "description": "Retrieve from Vertex AI Search datastore or engine for grounding.\n\ndatastore and engine are mutually exclusive. See\nhttps://cloud.google.com/products/agent-builder", + "properties": { + "dataStoreSpecs": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/VertexAISearchDataStoreSpec" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Specifications that define the specific DataStores to be searched, along with configurations for those data stores. This is only considered for Engines with multiple data stores. It should only be set if engine is used.", + "title": "Datastorespecs" + }, + "datastore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", + "title": "Datastore" + }, + "engine": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}`", + "title": "Engine" + }, + "filter": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Filter strings to be passed to the search API.", + "title": "Filter" + }, + "maxResults": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Number of search results to return per query. The default value is 10. The maximumm allowed value is 10.", + "title": "Maxresults" + } + }, + "title": "VertexAISearch", + "type": "object" + }, + "VertexAISearchDataStoreSpec": { + "additionalProperties": false, + "description": "Define data stores within engine to filter on in a search call and configurations for those data stores.\n\nFor more information, see\nhttps://cloud.google.com/generative-ai-app-builder/docs/reference/rpc/google.cloud.discoveryengine.v1#datastorespec", + "properties": { + "dataStore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Full resource name of DataStore, such as Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`", + "title": "Datastore" + }, + "filter": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Filter specification to filter documents in the data store specified by data_store field. For more information on filtering, see [Filtering](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata)", + "title": "Filter" + } + }, + "title": "VertexAISearchDataStoreSpec", + "type": "object" + }, + "VertexRagStore": { + "additionalProperties": false, + "description": "Retrieve from Vertex RAG Store for grounding.", + "properties": { + "ragCorpora": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Deprecated. Please use rag_resources instead.", + "title": "Ragcorpora" + }, + "ragResources": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/VertexRagStoreRagResource" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.", + "title": "Ragresources" + }, + "ragRetrievalConfig": { + "anyOf": [ + { + "$ref": "#/$defs/RagRetrievalConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The retrieval config for the Rag query." + }, + "similarityTopK": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Number of top k results to return from the selected corpora.", + "title": "Similaritytopk" + }, + "storeContext": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Currently only supported for Gemini Multimodal Live API. In Gemini Multimodal Live API, if `store_context` bool is specified, Gemini will leverage it to automatically memorize the interactions between the client and Gemini, and retrieve context when needed to augment the response generation for users' ongoing and future interactions.", + "title": "Storecontext" + }, + "vectorDistanceThreshold": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Only return results with vector distance smaller than the threshold.", + "title": "Vectordistancethreshold" + } + }, + "title": "VertexRagStore", + "type": "object" + }, + "VertexRagStoreRagResource": { + "additionalProperties": false, + "description": "The definition of the Rag resource.", + "properties": { + "ragCorpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", + "title": "Ragcorpus" + }, + "ragFileIds": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.", + "title": "Ragfileids" + } + }, + "title": "VertexRagStoreRagResource", + "type": "object" + }, + "VideoMetadata": { + "additionalProperties": false, + "description": "Describes how the video in the Part should be used by the model.", + "properties": { + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The frame rate of the video sent to the model. If not specified, the\n default value will be 1.0. The fps range is (0.0, 24.0].", + "title": "Fps" + }, + "endOffset": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The end offset of the video.", + "title": "Endoffset" + }, + "startOffset": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. The start offset of the video.", + "title": "Startoffset" + } + }, + "title": "VideoMetadata", + "type": "object" + }, + "VoiceConfig": { + "additionalProperties": false, + "description": "The configuration for the voice to use.", + "properties": { + "prebuiltVoiceConfig": { + "anyOf": [ + { + "$ref": "#/$defs/PrebuiltVoiceConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The configuration for the speaker to use.\n " + } + }, + "title": "VoiceConfig", + "type": "object" + }, + "google__adk__tools__base_tool__ToolConfig": { + "additionalProperties": false, + "description": "The configuration for a tool.\n\nThe config supports these types of tools:\n1. ADK built-in tools\n2. User-defined tool instances\n3. User-defined tool classes\n4. User-defined functions that generate tool instances\n5. User-defined function tools\n\nFor examples:\n\n 1. For ADK built-in tool instances or classes in `google.adk.tools` package,\n they can be referenced directly with the `name` and optionally with\n `config`.\n\n ```\n tools:\n - name: google_search\n - name: AgentTool\n config:\n agent: ./another_agent.yaml\n skip_summarization: true\n ```\n\n 2. For user-defined tool instances, the `name` is the fully qualified path\n to the tool instance.\n\n ```\n tools:\n - name: my_package.my_module.my_tool\n ```\n\n 3. For user-defined tool classes (custom tools), the `name` is the fully\n qualified path to the tool class and `config` is the arguments for the tool.\n\n ```\n tools:\n - name: my_package.my_module.my_tool_class\n config:\n my_tool_arg1: value1\n my_tool_arg2: value2\n ```\n\n 4. For user-defined functions that generate tool instances, the `name` is the\n fully qualified path to the function and `config` is passed to the function\n as arguments.\n\n ```\n tools:\n - name: my_package.my_module.my_tool_function\n config:\n my_function_arg1: value1\n my_function_arg2: value2\n ```\n\n The function must have the following signature:\n ```\n def my_function(config: ToolArgsConfig) -> BaseTool:\n ...\n ```\n\n 5. For user-defined function tools, the `name` is the fully qualified path\n to the function.\n\n ```\n tools:\n - name: my_package.my_module.my_function_tool\n ```", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "args": { + "anyOf": [ + { + "$ref": "#/$defs/ToolArgsConfig" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "required": [ + "name" + ], + "title": "ToolConfig", + "type": "object" + }, + "google__genai__types__Tool": { + "additionalProperties": false, + "description": "Tool details of a tool that the model may use to generate a response.", + "properties": { + "functionDeclarations": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/FunctionDeclaration" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of function declarations that the tool supports.", + "title": "Functiondeclarations" + }, + "retrieval": { + "anyOf": [ + { + "$ref": "#/$defs/Retrieval" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Retrieval tool type. System will always execute the provided retrieval tool(s) to get external knowledge to answer the prompt. Retrieval results are presented to the model for generation." + }, + "googleSearch": { + "anyOf": [ + { + "$ref": "#/$defs/GoogleSearch" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Google Search tool type. Specialized retrieval tool\n that is powered by Google Search." + }, + "googleSearchRetrieval": { + "anyOf": [ + { + "$ref": "#/$defs/GoogleSearchRetrieval" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search." + }, + "enterpriseWebSearch": { + "anyOf": [ + { + "$ref": "#/$defs/EnterpriseWebSearch" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Enterprise web search tool type. Specialized retrieval\n tool that is powered by Vertex AI Search and Sec4 compliance." + }, + "googleMaps": { + "anyOf": [ + { + "$ref": "#/$defs/GoogleMaps" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Google Maps tool type. Specialized retrieval tool\n that is powered by Google Maps." + }, + "urlContext": { + "anyOf": [ + { + "$ref": "#/$defs/UrlContext" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Tool to support URL context retrieval." + }, + "codeExecution": { + "anyOf": [ + { + "$ref": "#/$defs/ToolCodeExecution" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. CodeExecution tool type. Enables the model to execute code as part of generation." + }, + "computerUse": { + "anyOf": [ + { + "$ref": "#/$defs/ToolComputerUse" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Tool to support the model interacting directly with the computer. If enabled, it automatically populates computer-use specific Function Declarations." + } + }, + "title": "Tool", + "type": "object" + }, + "google__genai__types__ToolConfig": { + "additionalProperties": false, + "description": "Tool config.\n\nThis config is shared for all tools provided in the request.", + "properties": { + "functionCallingConfig": { + "anyOf": [ + { + "$ref": "#/$defs/FunctionCallingConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Function calling config." + }, + "retrievalConfig": { + "anyOf": [ + { + "$ref": "#/$defs/RetrievalConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional. Retrieval config." + } + }, + "title": "ToolConfig", + "type": "object" + }, + "mcp__types__Tool": { + "additionalProperties": true, + "description": "Definition for a tool the client can call.", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Title" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + }, + "inputSchema": { + "additionalProperties": true, + "title": "Inputschema", + "type": "object" + }, + "outputSchema": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Outputschema" + }, + "annotations": { + "anyOf": [ + { + "$ref": "#/$defs/ToolAnnotations" + }, + { + "type": "null" + } + ], + "default": null + }, + "_meta": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Meta" + } + }, + "required": [ + "name", + "inputSchema" + ], + "title": "Tool", "type": "object" } }, diff --git a/src/google/adk/agents/llm_agent.py b/src/google/adk/agents/llm_agent.py index 68219318e..75d859944 100644 --- a/src/google/adk/agents/llm_agent.py +++ b/src/google/adk/agents/llm_agent.py @@ -628,6 +628,8 @@ def from_config( ) if config.after_tool_callbacks: agent.after_tool_callback = resolve_callbacks(config.after_tool_callbacks) + if config.generate_content_config: + agent.generate_content_config = config.generate_content_config return agent diff --git a/src/google/adk/agents/llm_agent_config.py b/src/google/adk/agents/llm_agent_config.py index 0a08e3482..d65ec34ee 100644 --- a/src/google/adk/agents/llm_agent_config.py +++ b/src/google/adk/agents/llm_agent_config.py @@ -19,6 +19,7 @@ from typing import Literal from typing import Optional +from google.genai import types from pydantic import ConfigDict from ..tools.base_tool import ToolConfig @@ -138,3 +139,6 @@ class LlmAgentConfig(BaseAgentConfig): after_tool_callbacks: Optional[List[CodeConfig]] = None """Optional. LlmAgent.after_tool_callbacks.""" + + generate_content_config: Optional[types.GenerateContentConfig] = None + """Optional. LlmAgent.generate_content_config.""" From 16a15c8709b47c9bebe7cffe888e8e7e48ec605a Mon Sep 17 00:00:00 2001 From: Google Team Member Date: Fri, 1 Aug 2025 01:30:46 -0700 Subject: [PATCH 03/41] docs: fix typos PiperOrigin-RevId: 789660536 --- llms-full.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llms-full.txt b/llms-full.txt index b196a5077..4ce28660f 100644 --- a/llms-full.txt +++ b/llms-full.txt @@ -14984,16 +14984,16 @@ While you have considerable flexibility in defining your function, remember that Designed for tasks that require a significant amount of processing time without blocking the agent's execution. This tool is a subclass of `FunctionTool`. -When using a `LongRunningFunctionTool`, your function can initiate the long-running operation and optionally return an **initial result**** (e.g. the long-running operation id). Once a long running function tool is invoked the agent runner will pause the agent run and let the agent client to decide whether to continue or wait until the long-running operation finishes. The agent client can query the progress of the long-running operation and send back an intermediate or final response. The agent can then continue with other tasks. An example is the human-in-the-loop scenario where the agent needs human approval before proceeding with a task. +When using a `LongRunningFunctionTool`, your function can initiate the long-running operation and optionally return an **initial result** (e.g. the long-running operation id). Once a long running function tool is invoked the agent runner will pause the agent run and let the agent client to decide whether to continue or wait until the long-running operation finishes. The agent client can query the progress of the long-running operation and send back an intermediate or final response. The agent can then continue with other tasks. An example is the human-in-the-loop scenario where the agent needs human approval before proceeding with a task. ### How it Works -In Python, you wrap a function with `LongRunningFunctionTool`. In Java, you pass a Method name to `LongRunningFunctionTool.create()`. +In Python, you wrap a function with `LongRunningFunctionTool`. In Java, you pass a Method name to `LongRunningFunctionTool.create()`. 1. **Initiation:** When the LLM calls the tool, your function starts the long-running operation. -2. **Initial Updates:** Your function should optionally return an initial result (e.g. the long-running operaiton id). The ADK framework takes the result and sends it back to the LLM packaged within a `FunctionResponse`. This allows the LLM to inform the user (e.g., status, percentage complete, messages). And then the agent run is ended / paused. +2. **Initial Updates:** Your function should optionally return an initial result (e.g. the long-running operation id). The ADK framework takes the result and sends it back to the LLM packaged within a `FunctionResponse`. This allows the LLM to inform the user (e.g., status, percentage complete, messages). And then the agent run is ended / paused. 3. **Continue or Wait:** After each agent run is completed. Agent client can query the progress of the long-running operation and decide whether to continue the agent run with an intermediate response (to update the progress) or wait until a final response is retrieved. Agent client should send the intermediate or final response back to the agent for the next run. From 041f04e89cee30532facccce4900d10f1b8c69ce Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Fri, 1 Aug 2025 09:35:42 -0700 Subject: [PATCH 04/41] chore: change `LlmRequest.config`'s default value to be `types.GenerateContentConfig()` instead of None PiperOrigin-RevId: 789792582 --- src/google/adk/models/llm_request.py | 4 +++- tests/unittests/models/test_google_llm.py | 1 - tests/unittests/tools/test_base_tool.py | 2 +- tests/unittests/tools/test_google_search_tool.py | 4 ++-- tests/unittests/tools/test_url_context_tool.py | 4 ++-- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/google/adk/models/llm_request.py b/src/google/adk/models/llm_request.py index dcb616bd5..79770182a 100644 --- a/src/google/adk/models/llm_request.py +++ b/src/google/adk/models/llm_request.py @@ -45,7 +45,9 @@ class LlmRequest(BaseModel): contents: list[types.Content] = Field(default_factory=list) """The contents to send to the model.""" - config: Optional[types.GenerateContentConfig] = None + config: types.GenerateContentConfig = Field( + default_factory=types.GenerateContentConfig + ) live_connect_config: types.LiveConnectConfig = types.LiveConnectConfig() """Additional config for the generate content request. diff --git a/tests/unittests/models/test_google_llm.py b/tests/unittests/models/test_google_llm.py index 03d18ec6d..9004245c8 100644 --- a/tests/unittests/models/test_google_llm.py +++ b/tests/unittests/models/test_google_llm.py @@ -1505,7 +1505,6 @@ async def test_computer_use_with_no_config(): contents=[ types.Content(role="user", parts=[types.Part.from_text(text="Hello")]) ], - config=None, ) # Should not raise an exception diff --git a/tests/unittests/tools/test_base_tool.py b/tests/unittests/tools/test_base_tool.py index d450cc0ea..da1dda64d 100644 --- a/tests/unittests/tools/test_base_tool.py +++ b/tests/unittests/tools/test_base_tool.py @@ -62,7 +62,7 @@ async def test_process_llm_request_no_declaration(): tool_context=tool_context, llm_request=llm_request ) - assert llm_request.config is None + assert llm_request.config == types.GenerateContentConfig() @pytest.mark.asyncio diff --git a/tests/unittests/tools/test_google_search_tool.py b/tests/unittests/tools/test_google_search_tool.py index c297e438d..9623875aa 100644 --- a/tests/unittests/tools/test_google_search_tool.py +++ b/tests/unittests/tools/test_google_search_tool.py @@ -322,12 +322,12 @@ async def test_process_llm_request_with_empty_model_raises_error(self): ) @pytest.mark.asyncio - async def test_process_llm_request_with_none_config(self): + async def test_process_llm_request_with_no_config(self): """Test processing LLM request with None config.""" tool = GoogleSearchTool() tool_context = await _create_tool_context() - llm_request = LlmRequest(model='gemini-2.0-flash', config=None) + llm_request = LlmRequest(model='gemini-2.0-flash') await tool.process_llm_request( tool_context=tool_context, llm_request=llm_request diff --git a/tests/unittests/tools/test_url_context_tool.py b/tests/unittests/tools/test_url_context_tool.py index e4c533020..cbbbb0c9a 100644 --- a/tests/unittests/tools/test_url_context_tool.py +++ b/tests/unittests/tools/test_url_context_tool.py @@ -242,12 +242,12 @@ async def test_process_llm_request_with_empty_model_raises_error(self): ) @pytest.mark.asyncio - async def test_process_llm_request_with_none_config(self): + async def test_process_llm_request_with_no_config(self): """Test processing LLM request with None config.""" tool = UrlContextTool() tool_context = await _create_tool_context() - llm_request = LlmRequest(model='gemini-2.0-flash', config=None) + llm_request = LlmRequest(model='gemini-2.0-flash') await tool.process_llm_request( tool_context=tool_context, llm_request=llm_request From bead607364be7ac8109357c9d3076d9b345e9e8a Mon Sep 17 00:00:00 2001 From: Google Team Member Date: Fri, 1 Aug 2025 10:15:55 -0700 Subject: [PATCH 05/41] chore: Hide the ask_data_insights tool until the API is publicly available PiperOrigin-RevId: 789806535 --- contributing/samples/bigquery/README.md | 10 ---------- src/google/adk/tools/bigquery/bigquery_toolset.py | 2 -- .../unittests/tools/bigquery/test_bigquery_toolset.py | 3 +-- 3 files changed, 1 insertion(+), 14 deletions(-) diff --git a/contributing/samples/bigquery/README.md b/contributing/samples/bigquery/README.md index c1d2b1611..050ce1332 100644 --- a/contributing/samples/bigquery/README.md +++ b/contributing/samples/bigquery/README.md @@ -25,16 +25,6 @@ distributed via the `google.adk.tools.bigquery` module. These tools include: Runs a SQL query in BigQuery. -1. `ask_data_insights` - - Natural language-in, natural language-out tool that answers questions - about structured data in BigQuery. Provides a one-stop solution for generating - insights from data. - - **Note**: This tool requires additional setup in your project. Please refer to - the official [Conversational Analytics API documentation](https://cloud.google.com/gemini/docs/conversational-analytics-api/overview) - for instructions. - ## How to use Set up environment variables in your `.env` file for using diff --git a/src/google/adk/tools/bigquery/bigquery_toolset.py b/src/google/adk/tools/bigquery/bigquery_toolset.py index 2c872d757..313cf4990 100644 --- a/src/google/adk/tools/bigquery/bigquery_toolset.py +++ b/src/google/adk/tools/bigquery/bigquery_toolset.py @@ -21,7 +21,6 @@ from google.adk.agents.readonly_context import ReadonlyContext from typing_extensions import override -from . import data_insights_tool from . import metadata_tool from . import query_tool from ...tools.base_tool import BaseTool @@ -79,7 +78,6 @@ async def get_tools( metadata_tool.list_dataset_ids, metadata_tool.list_table_ids, query_tool.get_execute_sql(self._tool_config), - data_insights_tool.ask_data_insights, ] ] diff --git a/tests/unittests/tools/bigquery/test_bigquery_toolset.py b/tests/unittests/tools/bigquery/test_bigquery_toolset.py index 24488db5d..4129dc512 100644 --- a/tests/unittests/tools/bigquery/test_bigquery_toolset.py +++ b/tests/unittests/tools/bigquery/test_bigquery_toolset.py @@ -34,7 +34,7 @@ async def test_bigquery_toolset_tools_default(): tools = await toolset.get_tools() assert tools is not None - assert len(tools) == 6 + assert len(tools) == 5 assert all([isinstance(tool, BigQueryTool) for tool in tools]) expected_tool_names = set([ @@ -43,7 +43,6 @@ async def test_bigquery_toolset_tools_default(): "list_table_ids", "get_table_info", "execute_sql", - "ask_data_insights", ]) actual_tool_names = set([tool.name for tool in tools]) assert actual_tool_names == expected_tool_names From faadef167ee8e4dd1faf4da5685a577c3155556e Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Fri, 1 Aug 2025 10:19:32 -0700 Subject: [PATCH 06/41] fix: incompatible a2a sdk changes a. camel case to snake case b. A2ACardResolver moved to different module PiperOrigin-RevId: 789807686 --- pyproject.toml | 2 +- src/google/adk/a2a/logs/log_utils.py | 8 ++-- src/google/adk/agents/remote_a2a_agent.py | 2 +- tests/unittests/a2a/logs/test_log_utils.py | 2 +- .../unittests/agents/test_remote_a2a_agent.py | 43 +++++++++++-------- 5 files changed, 31 insertions(+), 26 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e64149db9..2d1414afe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -81,7 +81,7 @@ dev = [ a2a = [ # go/keep-sorted start - "a2a-sdk>=0.2.16,<0.3.0;python_version>='3.10'", + "a2a-sdk>=0.3.0,<0.4.0;python_version>='3.10'", # go/keep-sorted end ] diff --git a/src/google/adk/a2a/logs/log_utils.py b/src/google/adk/a2a/logs/log_utils.py index 901cd631a..78ca43715 100644 --- a/src/google/adk/a2a/logs/log_utils.py +++ b/src/google/adk/a2a/logs/log_utils.py @@ -136,11 +136,11 @@ def build_a2a_request_log(req: SendMessageRequest) -> str: config_log = "None" if req.params.configuration: config_data = { - "acceptedOutputModes": req.params.configuration.acceptedOutputModes, + "accepted_output_modes": req.params.configuration.accepted_output_modes, "blocking": req.params.configuration.blocking, - "historyLength": req.params.configuration.historyLength, - "pushNotificationConfig": bool( - req.params.configuration.pushNotificationConfig + "history_length": req.params.configuration.history_length, + "push_notification_config": bool( + req.params.configuration.push_notification_config ), } config_log = json.dumps(config_data, indent=2) diff --git a/src/google/adk/agents/remote_a2a_agent.py b/src/google/adk/agents/remote_a2a_agent.py index 02d06a1bf..cc9fb75ad 100644 --- a/src/google/adk/agents/remote_a2a_agent.py +++ b/src/google/adk/agents/remote_a2a_agent.py @@ -26,7 +26,7 @@ try: from a2a.client import A2AClient - from a2a.client.client import A2ACardResolver + from a2a.client.card_resolver import A2ACardResolver from a2a.types import AgentCard from a2a.types import Message as A2AMessage from a2a.types import MessageSendParams as A2AMessageSendParams diff --git a/tests/unittests/a2a/logs/test_log_utils.py b/tests/unittests/a2a/logs/test_log_utils.py index 2ca432cc1..9673dab4c 100644 --- a/tests/unittests/a2a/logs/test_log_utils.py +++ b/tests/unittests/a2a/logs/test_log_utils.py @@ -184,7 +184,7 @@ def test_request_with_parts_and_config(self): assert "Part 0:" in result assert "Part 1:" in result assert '"blocking": true' in result - assert '"historyLength": 10' in result + assert '"history_length": 10' in result assert '"key1": "value1"' in result def test_request_without_parts(self): diff --git a/tests/unittests/agents/test_remote_a2a_agent.py b/tests/unittests/agents/test_remote_a2a_agent.py index fa1a20fef..7ef32de66 100644 --- a/tests/unittests/agents/test_remote_a2a_agent.py +++ b/tests/unittests/agents/test_remote_a2a_agent.py @@ -20,7 +20,10 @@ from unittest.mock import Mock from unittest.mock import patch -# Try to import a2a library - will fail on Python < 3.10 +import pytest + +# Check if A2A dependencies are available +A2A_AVAILABLE = True try: from a2a.types import AgentCapabilities from a2a.types import AgentCard @@ -32,32 +35,34 @@ from google.adk.agents.remote_a2a_agent import A2A_METADATA_PREFIX from google.adk.agents.remote_a2a_agent import AgentCardResolutionError from google.adk.agents.remote_a2a_agent import RemoteA2aAgent - - A2A_AVAILABLE = True except ImportError: A2A_AVAILABLE = False + # Create dummy classes to prevent NameError during test collection - AgentCapabilities = type("AgentCapabilities", (), {}) - AgentCard = type("AgentCard", (), {}) - AgentSkill = type("AgentSkill", (), {}) - A2AMessage = type("A2AMessage", (), {}) - SendMessageSuccessResponse = type("SendMessageSuccessResponse", (), {}) - A2ATask = type("A2ATask", (), {}) + class DummyTypes: + pass + + AgentCapabilities = DummyTypes() + AgentCard = DummyTypes() + AgentSkill = DummyTypes() + A2AMessage = DummyTypes() + SendMessageSuccessResponse = DummyTypes() + A2ATask = DummyTypes() + InvocationContext = DummyTypes() + RemoteA2aAgent = DummyTypes() + AgentCardResolutionError = Exception + A2A_METADATA_PREFIX = "" + +# Skip all tests in this module if Python < 3.10 or A2A dependencies are not available +pytestmark = pytest.mark.skipif( + sys.version_info < (3, 10) or not A2A_AVAILABLE, + reason="A2A requires Python 3.10+ and A2A dependencies must be available", +) from google.adk.events.event import Event from google.adk.sessions.session import Session import httpx -import pytest - -# Skip all tests in this module if Python < 3.10 or a2a library is not available -pytestmark = pytest.mark.skipif( - sys.version_info < (3, 10) or not A2A_AVAILABLE, - reason=( - "a2a library requires Python 3.10+ and is not available, skipping" - " RemoteA2aAgent tests" - ), -) # Helper function to create a proper AgentCard for testing From 86a44873e9b2dfc7e62fa31a9ac3be57c0bbff7b Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Fri, 1 Aug 2025 10:20:43 -0700 Subject: [PATCH 07/41] fix: Annotate response type as None for transfer_to_agent tool and set empty Schema as response schema when tool has no response annotation 1. if a function has no return type annotation, we should treat it as returning any type 2. we use empty schema (with `type` as None) to indicate no type constraints and this is already supported by model server PiperOrigin-RevId: 789808104 --- .../tools/_automatic_function_calling_util.py | 23 ++- .../tools/_function_parameter_parse_util.py | 4 + .../adk/tools/transfer_to_agent_tool.py | 4 +- .../flows/llm_flows/test_agent_transfer.py | 8 +- .../tools/test_build_function_declaration.py | 14 +- .../tools/test_from_function_with_options.py | 26 ++- ...t_function_tool_with_import_annotations.py | 179 ++++++++++++++++++ 7 files changed, 242 insertions(+), 16 deletions(-) create mode 100644 tests/unittests/tools/test_function_tool_with_import_annotations.py diff --git a/src/google/adk/tools/_automatic_function_calling_util.py b/src/google/adk/tools/_automatic_function_calling_util.py index 3a26862ea..5e32f68e0 100644 --- a/src/google/adk/tools/_automatic_function_calling_util.py +++ b/src/google/adk/tools/_automatic_function_calling_util.py @@ -329,11 +329,28 @@ def from_function_with_options( return_annotation = inspect.signature(func).return_annotation - # Handle functions with no return annotation or that return None + # Handle functions with no return annotation + if return_annotation is inspect._empty: + # Functions with no return annotation can return any type + return_value = inspect.Parameter( + 'return_value', + inspect.Parameter.POSITIONAL_OR_KEYWORD, + annotation=typing.Any, + ) + declaration.response = ( + _function_parameter_parse_util._parse_schema_from_parameter( + variant, + return_value, + func.__name__, + ) + ) + return declaration + + # Handle functions that explicitly return None if ( - return_annotation is inspect._empty - or return_annotation is None + return_annotation is None or return_annotation is type(None) + or (isinstance(return_annotation, str) and return_annotation == 'None') ): # Create a response schema for None/null return return_value = inspect.Parameter( diff --git a/src/google/adk/tools/_function_parameter_parse_util.py b/src/google/adk/tools/_function_parameter_parse_util.py index ba1e3c9ad..a0168fbe2 100644 --- a/src/google/adk/tools/_function_parameter_parse_util.py +++ b/src/google/adk/tools/_function_parameter_parse_util.py @@ -38,6 +38,10 @@ list: types.Type.ARRAY, dict: types.Type.OBJECT, None: types.Type.NULL, + # TODO requested google GenAI SDK to add a Type.ANY and do the mapping on + # their side, once new enum is added, replace the below one with + # Any: types.Type.ANY + Any: None, } logger = logging.getLogger('google_adk.' + __name__) diff --git a/src/google/adk/tools/transfer_to_agent_tool.py b/src/google/adk/tools/transfer_to_agent_tool.py index a16afca04..99ee234b3 100644 --- a/src/google/adk/tools/transfer_to_agent_tool.py +++ b/src/google/adk/tools/transfer_to_agent_tool.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from .tool_context import ToolContext -def transfer_to_agent(agent_name: str, tool_context: ToolContext): +def transfer_to_agent(agent_name: str, tool_context: ToolContext) -> None: """Transfer the question to another agent. This tool hands off control to another agent when it's more suitable to diff --git a/tests/unittests/flows/llm_flows/test_agent_transfer.py b/tests/unittests/flows/llm_flows/test_agent_transfer.py index 4cb48c845..5268d0ca0 100644 --- a/tests/unittests/flows/llm_flows/test_agent_transfer.py +++ b/tests/unittests/flows/llm_flows/test_agent_transfer.py @@ -89,7 +89,7 @@ def test_auto_to_single(): ('sub_agent_1', 'response1'), ] - # root_agent should still be the current agent, becaues sub_agent_1 is single. + # root_agent should still be the current agent, because sub_agent_1 is single. assert testing_utils.simplify_events(runner.run('test2')) == [ ('root_agent', 'response2'), ] @@ -140,7 +140,7 @@ def test_auto_to_auto_to_single(): def test_auto_to_sequential(): response = [ transfer_call_part('sub_agent_1'), - # sub_agent_1 responds directly instead of transfering. + # sub_agent_1 responds directly instead of transferring. 'response1', 'response2', 'response3', @@ -189,7 +189,7 @@ def test_auto_to_sequential(): def test_auto_to_sequential_to_auto(): response = [ transfer_call_part('sub_agent_1'), - # sub_agent_1 responds directly instead of transfering. + # sub_agent_1 responds directly instead of transferring. 'response1', transfer_call_part('sub_agent_1_2_1'), 'response2', @@ -250,7 +250,7 @@ def test_auto_to_sequential_to_auto(): def test_auto_to_loop(): response = [ transfer_call_part('sub_agent_1'), - # sub_agent_1 responds directly instead of transfering. + # sub_agent_1 responds directly instead of transferring. 'response1', 'response2', 'response3', diff --git a/tests/unittests/tools/test_build_function_declaration.py b/tests/unittests/tools/test_build_function_declaration.py index 444fbd99b..edf3c7128 100644 --- a/tests/unittests/tools/test_build_function_declaration.py +++ b/tests/unittests/tools/test_build_function_declaration.py @@ -298,9 +298,10 @@ def function_no_return(param: str): assert function_decl.name == 'function_no_return' assert function_decl.parameters.type == 'OBJECT' assert function_decl.parameters.properties['param'].type == 'STRING' - # VERTEX_AI should have response schema for None return + # VERTEX_AI should have response schema for functions with no return annotation + # Changed: Now uses Any type instead of NULL for no return annotation assert function_decl.response is not None - assert function_decl.response.type == types.Type.NULL + assert function_decl.response.type is None # Any type maps to None in schema def test_function_explicit_none_return_vertex_ai(): @@ -359,8 +360,8 @@ def function_string_return(param: str) -> str: assert function_decl.response.type == types.Type.STRING -def test_transfer_to_agent_like_function(): - """Test a function similar to transfer_to_agent that caused the original issue.""" +def test_fucntion_with_no_response_annotations(): + """Test a function that has no response annotations.""" def transfer_to_agent(agent_name: str, tool_context: ToolContext): """Transfer the question to another agent.""" @@ -376,6 +377,7 @@ def transfer_to_agent(agent_name: str, tool_context: ToolContext): assert function_decl.parameters.type == 'OBJECT' assert function_decl.parameters.properties['agent_name'].type == 'STRING' assert 'tool_context' not in function_decl.parameters.properties - # This should now have a response schema for VERTEX_AI variant + # This function has no return annotation, so it gets Any type instead of NULL + # Changed: Now uses Any type instead of NULL for no return annotation assert function_decl.response is not None - assert function_decl.response.type == types.Type.NULL + assert function_decl.response.type is None # Any type maps to None in schema diff --git a/tests/unittests/tools/test_from_function_with_options.py b/tests/unittests/tools/test_from_function_with_options.py index 328eefab3..3ae5e1f52 100644 --- a/tests/unittests/tools/test_from_function_with_options.py +++ b/tests/unittests/tools/test_from_function_with_options.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any from typing import Dict from google.adk.tools import _automatic_function_calling_util @@ -51,9 +52,10 @@ def test_function(param: str): assert declaration.name == 'test_function' assert declaration.parameters.type == 'OBJECT' assert declaration.parameters.properties['param'].type == 'STRING' - # VERTEX_AI should have response schema for None return + # VERTEX_AI should have response schema for functions with no return annotation + # Changed: Now uses Any type instead of NULL for no return annotation assert declaration.response is not None - assert declaration.response.type == types.Type.NULL + assert declaration.response.type is None # Any type maps to None in schema def test_from_function_with_options_explicit_none_return_vertex(): @@ -150,6 +152,26 @@ def test_function(param: str) -> int: assert declaration.response.type == types.Type.INTEGER +def test_from_function_with_options_any_annotation_vertex(): + """Test from_function_with_options with Any type annotation for VERTEX_AI.""" + + def test_function(param: Any) -> Any: + """A test function that uses Any type annotations.""" + return param + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.VERTEX_AI + ) + + assert declaration.name == 'test_function' + assert declaration.parameters.type == 'OBJECT' + # Any type should map to None in schema (TYPE_UNSPECIFIED behavior) + assert declaration.parameters.properties['param'].type is None + # VERTEX_AI should have response schema for Any return + assert declaration.response is not None + assert declaration.response.type is None # Any type maps to None in schema + + def test_from_function_with_options_no_params(): """Test from_function_with_options with no parameters.""" diff --git a/tests/unittests/tools/test_function_tool_with_import_annotations.py b/tests/unittests/tools/test_function_tool_with_import_annotations.py new file mode 100644 index 000000000..99309a060 --- /dev/null +++ b/tests/unittests/tools/test_function_tool_with_import_annotations.py @@ -0,0 +1,179 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any +from typing import Dict + +from google.adk.tools import _automatic_function_calling_util +from google.adk.utils.variant_utils import GoogleLLMVariant +from google.genai import types + + +def test_string_annotation_none_return_vertex(): + """Test function with string annotation 'None' return for VERTEX_AI.""" + + def test_function(_param: str) -> None: + """A test function that returns None with string annotation.""" + pass + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.VERTEX_AI + ) + + assert declaration.name == 'test_function' + assert declaration.parameters.type == 'OBJECT' + assert declaration.parameters.properties['_param'].type == 'STRING' + # VERTEX_AI should have response schema for None return (stored as string) + assert declaration.response is not None + assert declaration.response.type == types.Type.NULL + + +def test_string_annotation_none_return_gemini(): + """Test function with string annotation 'None' return for GEMINI_API.""" + + def test_function(_param: str) -> None: + """A test function that returns None with string annotation.""" + pass + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.GEMINI_API + ) + + assert declaration.name == 'test_function' + assert declaration.parameters.type == 'OBJECT' + assert declaration.parameters.properties['_param'].type == 'STRING' + # GEMINI_API should not have response schema + assert declaration.response is None + + +def test_string_annotation_str_return_vertex(): + """Test function with string annotation 'str' return for VERTEX_AI.""" + + def test_function(_param: str) -> str: + """A test function that returns a string with string annotation.""" + return _param + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.VERTEX_AI + ) + + assert declaration.name == 'test_function' + assert declaration.parameters.type == 'OBJECT' + assert declaration.parameters.properties['_param'].type == 'STRING' + # VERTEX_AI should have response schema for string return (stored as string) + assert declaration.response is not None + assert declaration.response.type == types.Type.STRING + + +def test_string_annotation_int_return_vertex(): + """Test function with string annotation 'int' return for VERTEX_AI.""" + + def test_function(_param: str) -> int: + """A test function that returns an int with string annotation.""" + return 42 + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.VERTEX_AI + ) + + assert declaration.name == 'test_function' + assert declaration.parameters.type == 'OBJECT' + assert declaration.parameters.properties['_param'].type == 'STRING' + # VERTEX_AI should have response schema for int return (stored as string) + assert declaration.response is not None + assert declaration.response.type == types.Type.INTEGER + + +def test_string_annotation_dict_return_vertex(): + """Test function with string annotation Dict return for VERTEX_AI.""" + + def test_function(_param: str) -> Dict[str, str]: + """A test function that returns a dict with string annotation.""" + return {'result': _param} + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.VERTEX_AI + ) + + assert declaration.name == 'test_function' + assert declaration.parameters.type == 'OBJECT' + assert declaration.parameters.properties['_param'].type == 'STRING' + # VERTEX_AI should have response schema for dict return (stored as string) + assert declaration.response is not None + assert declaration.response.type == types.Type.OBJECT + + +def test_string_annotation_any_return_vertex(): + """Test function with string annotation 'Any' return for VERTEX_AI.""" + + def test_function(_param: Any) -> Any: + """A test function that uses Any type with string annotations.""" + return _param + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.VERTEX_AI + ) + + assert declaration.name == 'test_function' + assert declaration.parameters.type == 'OBJECT' + # Any type should map to None in schema (TYPE_UNSPECIFIED behavior) + assert declaration.parameters.properties['_param'].type is None + # VERTEX_AI should have response schema for Any return (stored as string) + assert declaration.response is not None + assert declaration.response.type is None # Any type maps to None in schema + + +def test_string_annotation_mixed_parameters_vertex(): + """Test function with mixed string annotations for parameters.""" + + def test_function(str_param: str, int_param: int, any_param: Any) -> str: + """A test function with mixed parameter types as string annotations.""" + return f'{str_param}-{int_param}-{any_param}' + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.VERTEX_AI + ) + + assert declaration.name == 'test_function' + assert declaration.parameters.type == 'OBJECT' + assert declaration.parameters.properties['str_param'].type == 'STRING' + assert declaration.parameters.properties['int_param'].type == 'INTEGER' + assert declaration.parameters.properties['any_param'].type is None # Any type + # VERTEX_AI should have response schema for string return (stored as string) + assert declaration.response is not None + assert declaration.response.type == types.Type.STRING + + +def test_string_annotation_no_params_vertex(): + """Test function with no parameters but string annotation return.""" + + def test_function() -> str: + """A test function with no parameters that returns string (string annotation).""" + return 'hello' + + declaration = _automatic_function_calling_util.from_function_with_options( + test_function, GoogleLLMVariant.VERTEX_AI + ) + + assert declaration.name == 'test_function' + # No parameters should result in no parameters field or empty parameters + assert ( + declaration.parameters is None + or len(declaration.parameters.properties) == 0 + ) + # VERTEX_AI should have response schema for string return (stored as string) + assert declaration.response is not None + assert declaration.response.type == types.Type.STRING From 2bb20411f4909685d42784ec17e6c77813571262 Mon Sep 17 00:00:00 2001 From: "Wei Sun (Jack)" Date: Fri, 1 Aug 2025 12:01:18 -0700 Subject: [PATCH 08/41] feat(config): Adds `BaseAgent.config_type` field to indicate the config for the current agent and removes if-else branches against LlmAgent/LoopAgent/... in `config_agent_utils::from_config` This makes the logic work with any user-defined agent with user-defined XxxAgentConfig. PiperOrigin-RevId: 789845354 --- src/google/adk/agents/base_agent.py | 17 ++++++ src/google/adk/agents/config_agent_utils.py | 59 ++++++++++++++------- src/google/adk/agents/llm_agent.py | 5 ++ src/google/adk/agents/loop_agent.py | 5 ++ src/google/adk/agents/parallel_agent.py | 5 ++ src/google/adk/agents/sequential_agent.py | 4 ++ 6 files changed, 77 insertions(+), 18 deletions(-) diff --git a/src/google/adk/agents/base_agent.py b/src/google/adk/agents/base_agent.py index 9ee7477aa..1d2d8c027 100644 --- a/src/google/adk/agents/base_agent.py +++ b/src/google/adk/agents/base_agent.py @@ -19,6 +19,7 @@ from typing import AsyncGenerator from typing import Awaitable from typing import Callable +from typing import ClassVar from typing import Dict from typing import final from typing import Mapping @@ -75,6 +76,22 @@ class BaseAgent(BaseModel): ) """The pydantic model config.""" + config_type: ClassVar[type[BaseAgentConfig]] = BaseAgentConfig + """The config type for this agent. + + Sub-classes should override this to specify their own config type. + + Example: + + ``` + class MyAgentConfig(BaseAgentConfig): + my_field: str = '' + + class MyAgent(BaseAgent): + config_type: ClassVar[type[BaseAgentConfig]] = MyAgentConfig + ``` + """ + name: str """The agent's name. diff --git a/src/google/adk/agents/config_agent_utils.py b/src/google/adk/agents/config_agent_utils.py index 8bbcdc954..4a22366fc 100644 --- a/src/google/adk/agents/config_agent_utils.py +++ b/src/google/adk/agents/config_agent_utils.py @@ -15,6 +15,7 @@ from __future__ import annotations import importlib +import inspect import os from typing import Any from typing import List @@ -24,16 +25,9 @@ from ..utils.feature_decorator import working_in_progress from .agent_config import AgentConfig from .base_agent import BaseAgent +from .base_agent_config import BaseAgentConfig from .common_configs import AgentRefConfig from .common_configs import CodeConfig -from .llm_agent import LlmAgent -from .llm_agent_config import LlmAgentConfig -from .loop_agent import LoopAgent -from .loop_agent_config import LoopAgentConfig -from .parallel_agent import ParallelAgent -from .parallel_agent import ParallelAgentConfig -from .sequential_agent import SequentialAgent -from .sequential_agent import SequentialAgentConfig @working_in_progress("from_config is not ready for use.") @@ -53,17 +47,36 @@ def from_config(config_path: str) -> BaseAgent: """ abs_path = os.path.abspath(config_path) config = _load_config_from_path(abs_path) - - if isinstance(config.root, LlmAgentConfig): - return LlmAgent.from_config(config.root, abs_path) - elif isinstance(config.root, LoopAgentConfig): - return LoopAgent.from_config(config.root, abs_path) - elif isinstance(config.root, ParallelAgentConfig): - return ParallelAgent.from_config(config.root, abs_path) - elif isinstance(config.root, SequentialAgentConfig): - return SequentialAgent.from_config(config.root, abs_path) + agent_config = config.root + + # pylint: disable=unidiomatic-typecheck Needs exact class matching. + if type(agent_config) is BaseAgentConfig: + # Resolve the concrete agent config for user-defined agent classes. + agent_class = _resolve_agent_class(agent_config.agent_class) + agent_config = agent_class.config_type.model_validate( + agent_config.model_dump() + ) + return agent_class.from_config(agent_config, abs_path) else: - raise ValueError("Unsupported config type") + # For built-in agent classes, no need to re-validate. + agent_class = _resolve_agent_class(agent_config.agent_class) + return agent_class.from_config(agent_config, abs_path) + + +def _resolve_agent_class(agent_class: str) -> type[BaseAgent]: + """Resolve the agent class from its fully qualified name.""" + agent_class_name = agent_class or "LlmAgent" + if "." not in agent_class_name: + agent_class_name = f"google.adk.agents.{agent_class_name}" + + agent_class = _resolve_fully_qualified_name(agent_class_name) + if inspect.isclass(agent_class) and issubclass(agent_class, BaseAgent): + return agent_class + + raise ValueError( + f"Invalid agent class `{agent_class_name}`. It must be a subclass of" + " BaseAgent." + ) @working_in_progress("_load_config_from_path is not ready for use.") @@ -90,6 +103,16 @@ def _load_config_from_path(config_path: str) -> AgentConfig: return AgentConfig.model_validate(config_data) +@working_in_progress("_resolve_fully_qualified_name is not ready for use.") +def _resolve_fully_qualified_name(name: str) -> Any: + try: + module_path, obj_name = name.rsplit(".", 1) + module = importlib.import_module(module_path) + return getattr(module, obj_name) + except Exception as e: + raise ValueError(f"Invalid fully qualified name: {name}") from e + + @working_in_progress("resolve_agent_reference is not ready for use.") def resolve_agent_reference( ref_config: AgentRefConfig, referencing_agent_config_abs_path: str diff --git a/src/google/adk/agents/llm_agent.py b/src/google/adk/agents/llm_agent.py index 75d859944..92faddb8a 100644 --- a/src/google/adk/agents/llm_agent.py +++ b/src/google/adk/agents/llm_agent.py @@ -22,6 +22,7 @@ from typing import AsyncGenerator from typing import Awaitable from typing import Callable +from typing import ClassVar from typing import Literal from typing import Optional from typing import Type @@ -55,6 +56,7 @@ from ..tools.tool_context import ToolContext from ..utils.feature_decorator import working_in_progress from .base_agent import BaseAgent +from .base_agent_config import BaseAgentConfig from .callback_context import CallbackContext from .common_configs import CodeConfig from .invocation_context import InvocationContext @@ -131,6 +133,9 @@ class LlmAgent(BaseAgent): When not set, the agent will inherit the model from its ancestor. """ + config_type: ClassVar[type[BaseAgentConfig]] = LlmAgentConfig + """The config type for this agent.""" + instruction: Union[str, InstructionProvider] = '' """Instructions for the LLM model, guiding the agent's behavior.""" diff --git a/src/google/adk/agents/loop_agent.py b/src/google/adk/agents/loop_agent.py index c093c4ace..812361a32 100644 --- a/src/google/adk/agents/loop_agent.py +++ b/src/google/adk/agents/loop_agent.py @@ -17,6 +17,7 @@ from __future__ import annotations from typing import AsyncGenerator +from typing import ClassVar from typing import Optional from typing import Type @@ -26,6 +27,7 @@ from ..events.event import Event from ..utils.feature_decorator import working_in_progress from .base_agent import BaseAgent +from .base_agent_config import BaseAgentConfig from .loop_agent_config import LoopAgentConfig @@ -36,6 +38,9 @@ class LoopAgent(BaseAgent): reached, the loop agent will stop. """ + config_type: ClassVar[type[BaseAgentConfig]] = LoopAgentConfig + """The config type for this agent.""" + max_iterations: Optional[int] = None """The maximum number of iterations to run the loop agent. diff --git a/src/google/adk/agents/parallel_agent.py b/src/google/adk/agents/parallel_agent.py index cb747bcb7..f8c4c28e4 100644 --- a/src/google/adk/agents/parallel_agent.py +++ b/src/google/adk/agents/parallel_agent.py @@ -18,6 +18,7 @@ import asyncio from typing import AsyncGenerator +from typing import ClassVar from typing import Type from typing_extensions import override @@ -25,6 +26,7 @@ from ..events.event import Event from ..utils.feature_decorator import working_in_progress from .base_agent import BaseAgent +from .base_agent_config import BaseAgentConfig from .invocation_context import InvocationContext from .parallel_agent_config import ParallelAgentConfig @@ -95,6 +97,9 @@ class ParallelAgent(BaseAgent): - Generating multiple responses for review by a subsequent evaluation agent. """ + config_type: ClassVar[type[BaseAgentConfig]] = ParallelAgentConfig + """The config type for this agent.""" + @override async def _run_async_impl( self, ctx: InvocationContext diff --git a/src/google/adk/agents/sequential_agent.py b/src/google/adk/agents/sequential_agent.py index e5b7bdd2d..c0c832ff1 100644 --- a/src/google/adk/agents/sequential_agent.py +++ b/src/google/adk/agents/sequential_agent.py @@ -24,6 +24,7 @@ from ..events.event import Event from ..utils.feature_decorator import working_in_progress from .base_agent import BaseAgent +from .base_agent import BaseAgentConfig from .invocation_context import InvocationContext from .llm_agent import LlmAgent from .sequential_agent_config import SequentialAgentConfig @@ -32,6 +33,9 @@ class SequentialAgent(BaseAgent): """A shell agent that runs its sub-agents in sequence.""" + config_type: Type[BaseAgentConfig] = SequentialAgentConfig + """The config type for this agent.""" + @override async def _run_async_impl( self, ctx: InvocationContext From 7556ebc76abd3c776922c2803aed831661cf7f82 Mon Sep 17 00:00:00 2001 From: Divyansh Shukla Date: Fri, 1 Aug 2025 14:43:23 -0700 Subject: [PATCH 09/41] feat: Allow max tokens to be customizable in Claude PiperOrigin-RevId: 789901925 --- src/google/adk/models/anthropic_llm.py | 6 ++-- tests/unittests/models/test_anthropic_llm.py | 29 ++++++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/src/google/adk/models/anthropic_llm.py b/src/google/adk/models/anthropic_llm.py index 7556a5e61..bd64e2c47 100644 --- a/src/google/adk/models/anthropic_llm.py +++ b/src/google/adk/models/anthropic_llm.py @@ -46,8 +46,6 @@ logger = logging.getLogger("google_adk." + __name__) -MAX_TOKEN = 8192 - class ClaudeRequest(BaseModel): system_instruction: str @@ -245,9 +243,11 @@ class Claude(BaseLlm): Attributes: model: The name of the Claude model. + max_tokens: The maximum number of tokens to generate. """ model: str = "claude-3-5-sonnet-v2@20241022" + max_tokens: int = 8192 @staticmethod @override @@ -284,7 +284,7 @@ async def generate_content_async( messages=messages, tools=tools, tool_choice=tool_choice, - max_tokens=MAX_TOKEN, + max_tokens=self.max_tokens, ) yield message_to_generate_content_response(message) diff --git a/tests/unittests/models/test_anthropic_llm.py b/tests/unittests/models/test_anthropic_llm.py index 33f840f6d..ad03ac608 100644 --- a/tests/unittests/models/test_anthropic_llm.py +++ b/tests/unittests/models/test_anthropic_llm.py @@ -122,3 +122,32 @@ async def mock_coro(): assert len(responses) == 1 assert isinstance(responses[0], LlmResponse) assert responses[0].content.parts[0].text == "Hello, how can I help you?" + + +@pytest.mark.asyncio +async def test_generate_content_async_with_max_tokens( + llm_request, generate_content_response, generate_llm_response +): + claude_llm = Claude(model="claude-3-5-sonnet-v2@20241022", max_tokens=4096) + with mock.patch.object(claude_llm, "_anthropic_client") as mock_client: + with mock.patch.object( + anthropic_llm, + "message_to_generate_content_response", + return_value=generate_llm_response, + ): + # Create a mock coroutine that returns the generate_content_response. + async def mock_coro(): + return generate_content_response + + # Assign the coroutine to the mocked method + mock_client.messages.create.return_value = mock_coro() + + _ = [ + resp + async for resp in claude_llm.generate_content_async( + llm_request, stream=False + ) + ] + mock_client.messages.create.assert_called_once() + _, kwargs = mock_client.messages.create.call_args + assert kwargs["max_tokens"] == 4096 From 57cd41f424b469fb834bb8f2777b5f7be9aa6cdf Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Sat, 2 Aug 2025 12:27:38 -0700 Subject: [PATCH 10/41] feat: Support parallel execution of parallel function calls PiperOrigin-RevId: 790182046 --- src/google/adk/flows/llm_flows/functions.py | 438 +++++++++++------- .../flows/llm_flows/test_functions_simple.py | 220 +++++++++ tests/unittests/streaming/test_streaming.py | 91 +++- 3 files changed, 575 insertions(+), 174 deletions(-) diff --git a/src/google/adk/flows/llm_flows/functions.py b/src/google/adk/flows/llm_flows/functions.py index 4fa44caf6..05ab2e653 100644 --- a/src/google/adk/flows/llm_flows/functions.py +++ b/src/google/adk/flows/llm_flows/functions.py @@ -20,10 +20,12 @@ import copy import inspect import logging +import threading from typing import Any from typing import AsyncGenerator from typing import cast from typing import Optional +from typing import TYPE_CHECKING import uuid from google.genai import types @@ -39,6 +41,9 @@ from ...tools.base_tool import BaseTool from ...tools.tool_context import ToolContext +if TYPE_CHECKING: + from ...agents.llm_agent import LlmAgent + AF_FUNCTION_CALL_ID_PREFIX = 'adk-' REQUEST_EUC_FUNCTION_CALL_NAME = 'adk_request_credential' @@ -135,117 +140,42 @@ async def handle_function_calls_async( agent = invocation_context.agent if not isinstance(agent, LlmAgent): - return + return None function_calls = function_call_event.get_function_calls() - function_response_events: list[Event] = [] - for function_call in function_calls: - if filters and function_call.id not in filters: - continue - tool, tool_context = _get_tool_and_context( - invocation_context, - function_call_event, - function_call, - tools_dict, - ) + # Filter function calls + filtered_calls = [ + fc for fc in function_calls if not filters or fc.id in filters + ] - with tracer.start_as_current_span(f'execute_tool {tool.name}'): - # Do not use "args" as the variable name, because it is a reserved keyword - # in python debugger. - # Make a deep copy to avoid being modified. - function_args = ( - copy.deepcopy(function_call.args) if function_call.args else {} - ) + if not filtered_calls: + return None - # Step 1: Check if plugin before_tool_callback overrides the function - # response. - function_response = ( - await invocation_context.plugin_manager.run_before_tool_callback( - tool=tool, tool_args=function_args, tool_context=tool_context + # Create tasks for parallel execution + tasks = [ + asyncio.create_task( + _execute_single_function_call_async( + invocation_context, + function_call, + tools_dict, + agent, ) ) + for function_call in filtered_calls + ] - # Step 2: If no overrides are provided from the plugins, further run the - # canonical callback. - if function_response is None: - for callback in agent.canonical_before_tool_callbacks: - function_response = callback( - tool=tool, args=function_args, tool_context=tool_context - ) - if inspect.isawaitable(function_response): - function_response = await function_response - if function_response: - break - - # Step 3: Otherwise, proceed calling the tool normally. - if function_response is None: - try: - function_response = await __call_tool_async( - tool, args=function_args, tool_context=tool_context - ) - except Exception as tool_error: - error_response = await invocation_context.plugin_manager.run_on_tool_error_callback( - tool=tool, - tool_args=function_args, - tool_context=tool_context, - error=tool_error, - ) - if error_response is not None: - function_response = error_response - else: - raise tool_error + # Wait for all tasks to complete + function_response_events = await asyncio.gather(*tasks) - # Step 4: Check if plugin after_tool_callback overrides the function - # response. - altered_function_response = ( - await invocation_context.plugin_manager.run_after_tool_callback( - tool=tool, - tool_args=function_args, - tool_context=tool_context, - result=function_response, - ) - ) - - # Step 5: If no overrides are provided from the plugins, further run the - # canonical after_tool_callbacks. - if altered_function_response is None: - for callback in agent.canonical_after_tool_callbacks: - altered_function_response = callback( - tool=tool, - args=function_args, - tool_context=tool_context, - tool_response=function_response, - ) - if inspect.isawaitable(altered_function_response): - altered_function_response = await altered_function_response - if altered_function_response: - break - - # Step 6: If alternative response exists from after_tool_callback, use it - # instead of the original function response. - if altered_function_response is not None: - function_response = altered_function_response - - if tool.is_long_running: - # Allow long running function to return None to not provide function - # response. - if not function_response: - continue - - # Builds the function response event. - function_response_event = __build_response_event( - tool, function_response, tool_context, invocation_context - ) - trace_tool_call( - tool=tool, - args=function_args, - function_response_event=function_response_event, - ) - function_response_events.append(function_response_event) + # Filter out None results + function_response_events = [ + event for event in function_response_events if event is not None + ] if not function_response_events: return None + merged_event = merge_parallel_function_response_events( function_response_events ) @@ -262,33 +192,38 @@ async def handle_function_calls_async( return merged_event -async def handle_function_calls_live( +async def _execute_single_function_call_async( invocation_context: InvocationContext, - function_call_event: Event, + function_call: types.FunctionCall, tools_dict: dict[str, BaseTool], -) -> Event: - """Calls the functions and returns the function response event.""" - from ...agents.llm_agent import LlmAgent + agent: LlmAgent, +) -> Optional[Event]: + """Execute a single function call with thread safety for state modifications.""" + tool, tool_context = _get_tool_and_context( + invocation_context, + function_call, + tools_dict, + ) - agent = cast(LlmAgent, invocation_context.agent) - function_calls = function_call_event.get_function_calls() + with tracer.start_as_current_span(f'execute_tool {tool.name}'): + # Do not use "args" as the variable name, because it is a reserved keyword + # in python debugger. + # Make a deep copy to avoid being modified. + function_args = ( + copy.deepcopy(function_call.args) if function_call.args else {} + ) - function_response_events: list[Event] = [] - for function_call in function_calls: - tool, tool_context = _get_tool_and_context( - invocation_context, function_call_event, function_call, tools_dict + # Step 1: Check if plugin before_tool_callback overrides the function + # response. + function_response = ( + await invocation_context.plugin_manager.run_before_tool_callback( + tool=tool, tool_args=function_args, tool_context=tool_context + ) ) - with tracer.start_as_current_span(f'execute_tool {tool.name}'): - # Do not use "args" as the variable name, because it is a reserved keyword - # in python debugger. - # Make a deep copy to avoid being modified. - function_args = ( - copy.deepcopy(function_call.args) if function_call.args else {} - ) - function_response = None - # Handle before_tool_callbacks - iterate through the canonical callback - # list + # Step 2: If no overrides are provided from the plugins, further run the + # canonical callback. + if function_response is None: for callback in agent.canonical_before_tool_callbacks: function_response = callback( tool=tool, args=function_args, tool_context=tool_context @@ -298,13 +233,40 @@ async def handle_function_calls_live( if function_response: break - if function_response is None: - function_response = await _process_function_live_helper( - tool, tool_context, function_call, function_args, invocation_context + # Step 3: Otherwise, proceed calling the tool normally. + if function_response is None: + try: + function_response = await __call_tool_async( + tool, args=function_args, tool_context=tool_context + ) + except Exception as tool_error: + error_response = ( + await invocation_context.plugin_manager.run_on_tool_error_callback( + tool=tool, + tool_args=function_args, + tool_context=tool_context, + error=tool_error, + ) + ) + if error_response is not None: + function_response = error_response + else: + raise tool_error + + # Step 4: Check if plugin after_tool_callback overrides the function + # response. + altered_function_response = ( + await invocation_context.plugin_manager.run_after_tool_callback( + tool=tool, + tool_args=function_args, + tool_context=tool_context, + result=function_response, ) + ) - # Calls after_tool_callback if it exists. - altered_function_response = None + # Step 5: If no overrides are provided from the plugins, further run the + # canonical after_tool_callbacks. + if altered_function_response is None: for callback in agent.canonical_after_tool_callbacks: altered_function_response = callback( tool=tool, @@ -317,27 +279,75 @@ async def handle_function_calls_live( if altered_function_response: break - if altered_function_response is not None: - function_response = altered_function_response + # Step 6: If alternative response exists from after_tool_callback, use it + # instead of the original function response. + if altered_function_response is not None: + function_response = altered_function_response - if tool.is_long_running: - # Allow async function to return None to not provide function response. - if not function_response: - continue + if tool.is_long_running: + # Allow long running function to return None to not provide function + # response. + if not function_response: + return None - # Builds the function response event. - function_response_event = __build_response_event( - tool, function_response, tool_context, invocation_context - ) - trace_tool_call( - tool=tool, - args=function_args, - function_response_event=function_response_event, + # Note: State deltas are not applied here - they are collected in + # tool_context.actions.state_delta and applied later when the session + # service processes the events + + # Builds the function response event. + function_response_event = __build_response_event( + tool, function_response, tool_context, invocation_context + ) + trace_tool_call( + tool=tool, + args=function_args, + function_response_event=function_response_event, + ) + return function_response_event + + +async def handle_function_calls_live( + invocation_context: InvocationContext, + function_call_event: Event, + tools_dict: dict[str, BaseTool], +) -> Event: + """Calls the functions and returns the function response event.""" + from ...agents.llm_agent import LlmAgent + + agent = cast(LlmAgent, invocation_context.agent) + function_calls = function_call_event.get_function_calls() + + if not function_calls: + return None + + # Create thread-safe lock for active_streaming_tools modifications + streaming_lock = threading.Lock() + + # Create tasks for parallel execution + tasks = [ + asyncio.create_task( + _execute_single_function_call_live( + invocation_context, + function_call, + tools_dict, + agent, + streaming_lock, + ) ) - function_response_events.append(function_response_event) + for function_call in function_calls + ] + + # Wait for all tasks to complete + function_response_events = await asyncio.gather(*tasks) + + # Filter out None results + function_response_events = [ + event for event in function_response_events if event is not None + ] if not function_response_events: return None + merged_event = merge_parallel_function_response_events( function_response_events ) @@ -353,8 +363,92 @@ async def handle_function_calls_live( return merged_event +async def _execute_single_function_call_live( + invocation_context: InvocationContext, + function_call: types.FunctionCall, + tools_dict: dict[str, BaseTool], + agent: LlmAgent, + streaming_lock: threading.Lock, +) -> Optional[Event]: + """Execute a single function call for live mode with thread safety.""" + tool, tool_context = _get_tool_and_context( + invocation_context, function_call, tools_dict + ) + with tracer.start_as_current_span(f'execute_tool {tool.name}'): + # Do not use "args" as the variable name, because it is a reserved keyword + # in python debugger. + # Make a deep copy to avoid being modified. + function_args = ( + copy.deepcopy(function_call.args) if function_call.args else {} + ) + function_response = None + + # Handle before_tool_callbacks - iterate through the canonical callback + # list + for callback in agent.canonical_before_tool_callbacks: + function_response = callback( + tool=tool, args=function_args, tool_context=tool_context + ) + if inspect.isawaitable(function_response): + function_response = await function_response + if function_response: + break + + if function_response is None: + function_response = await _process_function_live_helper( + tool, + tool_context, + function_call, + function_args, + invocation_context, + streaming_lock, + ) + + # Calls after_tool_callback if it exists. + altered_function_response = None + for callback in agent.canonical_after_tool_callbacks: + altered_function_response = callback( + tool=tool, + args=function_args, + tool_context=tool_context, + tool_response=function_response, + ) + if inspect.isawaitable(altered_function_response): + altered_function_response = await altered_function_response + if altered_function_response: + break + + if altered_function_response is not None: + function_response = altered_function_response + + if tool.is_long_running: + # Allow async function to return None to not provide function response. + if not function_response: + return None + + # Note: State deltas are not applied here - they are collected in + # tool_context.actions.state_delta and applied later when the session + # service processes the events + + # Builds the function response event. + function_response_event = __build_response_event( + tool, function_response, tool_context, invocation_context + ) + trace_tool_call( + tool=tool, + args=function_args, + function_response_event=function_response_event, + ) + return function_response_event + + async def _process_function_live_helper( - tool, tool_context, function_call, function_args, invocation_context + tool, + tool_context, + function_call, + function_args, + invocation_context, + streaming_lock: threading.Lock, ): function_response = None # Check if this is a stop_streaming function call @@ -363,13 +457,20 @@ async def _process_function_live_helper( and 'function_name' in function_args ): function_name = function_args['function_name'] - active_tasks = invocation_context.active_streaming_tools - if ( - function_name in active_tasks - and active_tasks[function_name].task - and not active_tasks[function_name].task.done() - ): - task = active_tasks[function_name].task + # Thread-safe access to active_streaming_tools + with streaming_lock: + active_tasks = invocation_context.active_streaming_tools + if ( + active_tasks + and function_name in active_tasks + and active_tasks[function_name].task + and not active_tasks[function_name].task.done() + ): + task = active_tasks[function_name].task + else: + task = None + + if task: task.cancel() try: # Wait for the task to be cancelled @@ -377,20 +478,25 @@ async def _process_function_live_helper( except (asyncio.CancelledError, asyncio.TimeoutError): # Log the specific condition if task.cancelled(): - logging.info(f'Task {function_name} was cancelled successfully') + logging.info('Task %s was cancelled successfully', function_name) elif task.done(): - logging.info(f'Task {function_name} completed during cancellation') + logging.info('Task %s completed during cancellation', function_name) else: logging.warning( - f'Task {function_name} might still be running after' - ' cancellation timeout' + 'Task %s might still be running after cancellation timeout', + function_name, ) function_response = { 'status': f'The task is not cancelled yet for {function_name}.' } if not function_response: - # Clean up the reference - active_tasks[function_name].task = None + # Clean up the reference under lock + with streaming_lock: + if ( + invocation_context.active_streaming_tools + and function_name in invocation_context.active_streaming_tools + ): + invocation_context.active_streaming_tools[function_name].task = None function_response = { 'status': f'Successfully stopped streaming function {function_name}' @@ -425,14 +531,19 @@ async def run_tool_and_update_queue(tool, function_args, tool_context): task = asyncio.create_task( run_tool_and_update_queue(tool, function_args, tool_context) ) - if invocation_context.active_streaming_tools is None: - invocation_context.active_streaming_tools = {} - if tool.name in invocation_context.active_streaming_tools: - invocation_context.active_streaming_tools[tool.name].task = task - else: - invocation_context.active_streaming_tools[tool.name] = ( - ActiveStreamingTool(task=task) - ) + + # Register streaming tool using original logic + with streaming_lock: + if invocation_context.active_streaming_tools is None: + invocation_context.active_streaming_tools = {} + + if tool.name in invocation_context.active_streaming_tools: + invocation_context.active_streaming_tools[tool.name].task = task + else: + invocation_context.active_streaming_tools[tool.name] = ( + ActiveStreamingTool(task=task) + ) + # Immediately return a pending response. # This is required by current live model. function_response = { @@ -450,7 +561,6 @@ async def run_tool_and_update_queue(tool, function_args, tool_context): def _get_tool_and_context( invocation_context: InvocationContext, - function_call_event: Event, function_call: types.FunctionCall, tools_dict: dict[str, BaseTool], ): @@ -552,7 +662,7 @@ def merge_parallel_function_response_events( base_event = function_response_events[0] # Merge actions from all events - merged_actions_data = {} + merged_actions_data: dict[str, Any] = {} for event in function_response_events: if event.actions: # Use `by_alias=True` because it converts the model to a dictionary while respecting field aliases, ensuring that the enum fields are correctly handled without creating a duplicate. diff --git a/tests/unittests/flows/llm_flows/test_functions_simple.py b/tests/unittests/flows/llm_flows/test_functions_simple.py index df6fcb3c0..dbaf3c8c9 100644 --- a/tests/unittests/flows/llm_flows/test_functions_simple.py +++ b/tests/unittests/flows/llm_flows/test_functions_simple.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import asyncio from typing import Any from typing import Callable @@ -676,3 +677,222 @@ def test_shallow_vs_deep_copy_demonstration(): deep_copy['nested_dict']['inner']['value'] == 'modified' ) # Copy is modified assert 'new_item' in deep_copy['list_param'] # Copy is modified + + +@pytest.mark.asyncio +async def test_parallel_function_execution_timing(): + """Test that multiple function calls are executed in parallel, not sequentially.""" + import time + + execution_order = [] + execution_times = {} + + async def slow_function_1(delay: float = 0.1) -> dict: + start_time = time.time() + execution_order.append('start_1') + await asyncio.sleep(delay) + end_time = time.time() + execution_times['func_1'] = (start_time, end_time) + execution_order.append('end_1') + return {'result': 'function_1_result'} + + async def slow_function_2(delay: float = 0.1) -> dict: + start_time = time.time() + execution_order.append('start_2') + await asyncio.sleep(delay) + end_time = time.time() + execution_times['func_2'] = (start_time, end_time) + execution_order.append('end_2') + return {'result': 'function_2_result'} + + # Create function calls + function_calls = [ + types.Part.from_function_call( + name='slow_function_1', args={'delay': 0.1} + ), + types.Part.from_function_call( + name='slow_function_2', args={'delay': 0.1} + ), + ] + + function_responses = [ + types.Part.from_function_response( + name='slow_function_1', response={'result': 'function_1_result'} + ), + types.Part.from_function_response( + name='slow_function_2', response={'result': 'function_2_result'} + ), + ] + + responses: list[types.Content] = [ + function_calls, + 'response1', + ] + mock_model = testing_utils.MockModel.create(responses=responses) + + agent = Agent( + name='test_agent', + model=mock_model, + tools=[slow_function_1, slow_function_2], + ) + runner = testing_utils.TestInMemoryRunner(agent) + + # Measure total execution time + start_time = time.time() + events = await runner.run_async_with_new_session('test') + total_time = time.time() - start_time + + # Verify parallel execution by checking execution order + # In parallel execution, both functions should start before either finishes + assert 'start_1' in execution_order + assert 'start_2' in execution_order + assert 'end_1' in execution_order + assert 'end_2' in execution_order + + # Verify both functions started within a reasonable time window + func_1_start, func_1_end = execution_times['func_1'] + func_2_start, func_2_end = execution_times['func_2'] + + # Functions should start at approximately the same time (within 10ms) + start_time_diff = abs(func_1_start - func_2_start) + assert ( + start_time_diff < 0.01 + ), f'Functions started too far apart: {start_time_diff}s' + + # Total execution time should be closer to 0.1s (parallel) than 0.2s (sequential) + # Allow some overhead for task creation and synchronization + assert ( + total_time < 0.15 + ), f'Execution took too long: {total_time}s, expected < 0.15s' + + # Verify the results are correct + assert testing_utils.simplify_events(events) == [ + ('test_agent', function_calls), + ('test_agent', function_responses), + ('test_agent', 'response1'), + ] + + +@pytest.mark.asyncio +async def test_parallel_state_modifications_thread_safety(): + """Test that parallel function calls modifying state are thread-safe.""" + state_modifications = [] + + def modify_state_1(tool_context: ToolContext) -> dict: + # Track when this function modifies state + current_state = dict(tool_context.state.to_dict()) + state_modifications.append(('func_1_start', current_state)) + + tool_context.state['counter'] = tool_context.state.get('counter', 0) + 1 + tool_context.state['func_1_executed'] = True + + final_state = dict(tool_context.state.to_dict()) + state_modifications.append(('func_1_end', final_state)) + return {'result': 'modified_state_1'} + + def modify_state_2(tool_context: ToolContext) -> dict: + # Track when this function modifies state + current_state = dict(tool_context.state.to_dict()) + state_modifications.append(('func_2_start', current_state)) + + tool_context.state['counter'] = tool_context.state.get('counter', 0) + 1 + tool_context.state['func_2_executed'] = True + + final_state = dict(tool_context.state.to_dict()) + state_modifications.append(('func_2_end', final_state)) + return {'result': 'modified_state_2'} + + # Create function calls + function_calls = [ + types.Part.from_function_call(name='modify_state_1', args={}), + types.Part.from_function_call(name='modify_state_2', args={}), + ] + + responses: list[types.Content] = [ + function_calls, + 'response1', + ] + mock_model = testing_utils.MockModel.create(responses=responses) + + agent = Agent( + name='test_agent', + model=mock_model, + tools=[modify_state_1, modify_state_2], + ) + runner = testing_utils.TestInMemoryRunner(agent) + events = await runner.run_async_with_new_session('test') + + # Verify the parallel execution worked correctly by checking the events + # The function response event should have the merged state_delta + function_response_event = events[ + 1 + ] # Second event should be the function response + assert function_response_event.actions.state_delta['counter'] == 2 + assert function_response_event.actions.state_delta['func_1_executed'] is True + assert function_response_event.actions.state_delta['func_2_executed'] is True + + # Verify both functions were called + assert len(state_modifications) == 4 # 2 functions × 2 events each + + # Extract function names from modifications + func_names = [mod[0] for mod in state_modifications] + assert 'func_1_start' in func_names + assert 'func_1_end' in func_names + assert 'func_2_start' in func_names + assert 'func_2_end' in func_names + + +@pytest.mark.asyncio +async def test_parallel_mixed_sync_async_functions(): + """Test parallel execution with mix of sync and async functions.""" + execution_log = [] + + def sync_function(value: int) -> dict: + execution_log.append(f'sync_start_{value}') + # Simulate some work + import time + + time.sleep(0.05) # 50ms + execution_log.append(f'sync_end_{value}') + return {'result': f'sync_{value}'} + + async def async_function(value: int) -> dict: + execution_log.append(f'async_start_{value}') + await asyncio.sleep(0.05) # 50ms + execution_log.append(f'async_end_{value}') + return {'result': f'async_{value}'} + + # Create function calls + function_calls = [ + types.Part.from_function_call(name='sync_function', args={'value': 1}), + types.Part.from_function_call(name='async_function', args={'value': 2}), + types.Part.from_function_call(name='sync_function', args={'value': 3}), + ] + + responses: list[types.Content] = [function_calls, 'response1'] + mock_model = testing_utils.MockModel.create(responses=responses) + + agent = Agent( + name='test_agent', + model=mock_model, + tools=[sync_function, async_function], + ) + runner = testing_utils.TestInMemoryRunner(agent) + + import time + + start_time = time.time() + events = await runner.run_async_with_new_session('test') + total_time = time.time() - start_time + + # Should complete in less than 120ms (parallel) rather than 150ms (sequential) + # Allow for overhead from task creation and synchronization + assert total_time < 0.12, f'Execution took {total_time}s, expected < 0.12s' + + # Verify all functions were called + assert 'sync_start_1' in execution_log + assert 'sync_end_1' in execution_log + assert 'async_start_2' in execution_log + assert 'async_end_2' in execution_log + assert 'sync_start_3' in execution_log + assert 'sync_end_3' in execution_log diff --git a/tests/unittests/streaming/test_streaming.py b/tests/unittests/streaming/test_streaming.py index dd0e6d5c8..ac827a453 100644 --- a/tests/unittests/streaming/test_streaming.py +++ b/tests/unittests/streaming/test_streaming.py @@ -110,8 +110,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - # Add timeout to prevent hanging - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): # Return whatever we collected so far pass @@ -217,7 +224,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): pass @@ -315,7 +330,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): pass @@ -407,7 +430,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): pass @@ -490,7 +521,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): pass @@ -582,7 +621,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): pass @@ -686,7 +733,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): pass @@ -794,7 +849,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): pass @@ -903,7 +966,15 @@ async def consume_responses(session: testing_utils.Session): try: session = self.session - asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + # Create a new event loop to avoid nested event loop issues + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + asyncio.wait_for(consume_responses(session), timeout=5.0) + ) + finally: + loop.close() except (asyncio.TimeoutError, asyncio.CancelledError): pass From 90b9193a20499b8dd7f57d119cda4c534fcfda10 Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Sat, 2 Aug 2025 14:52:33 -0700 Subject: [PATCH 11/41] chore: Add sample agent for testing parallel functions execution PiperOrigin-RevId: 790208057 --- .../samples/parallel_functions/README.md | 103 ++++++++ .../samples/parallel_functions/__init__.py | 15 ++ .../samples/parallel_functions/agent.py | 246 ++++++++++++++++++ 3 files changed, 364 insertions(+) create mode 100644 contributing/samples/parallel_functions/README.md create mode 100644 contributing/samples/parallel_functions/__init__.py create mode 100644 contributing/samples/parallel_functions/agent.py diff --git a/contributing/samples/parallel_functions/README.md b/contributing/samples/parallel_functions/README.md new file mode 100644 index 000000000..8fde66f98 --- /dev/null +++ b/contributing/samples/parallel_functions/README.md @@ -0,0 +1,103 @@ +# Parallel Function Test Agent + +This agent demonstrates parallel function calling functionality in ADK. It includes multiple tools with different processing times to showcase how parallel execution improves performance compared to sequential execution. + +## Features + +- **Multiple async tool types**: All functions use proper async patterns for true parallelism +- **Thread safety testing**: Tools modify shared state to verify thread-safe operations +- **Performance demonstration**: Clear time differences between parallel and sequential execution +- **GIL-aware design**: Uses `await asyncio.sleep()` instead of `time.sleep()` to avoid blocking + +## Tools + +1. **get_weather(city)** - Async function, 2-second delay +2. **get_currency_rate(from_currency, to_currency)** - Async function, 1.5-second delay +3. **calculate_distance(city1, city2)** - Async function, 1-second delay +4. **get_population(cities)** - Async function, 0.5 seconds per city + +**Important**: All functions use `await asyncio.sleep()` instead of `time.sleep()` to ensure true parallel execution. Using `time.sleep()` would block Python's GIL and force sequential execution despite asyncio parallelism. + +## Testing Parallel Function Calling + +### Basic Parallel Test +``` +Get the weather for New York, London, and Tokyo +``` +Expected: 3 parallel get_weather calls (~2 seconds total instead of ~6 seconds sequential) + +### Mixed Function Types Test +``` +Get the weather in Paris, the USD to EUR exchange rate, and the distance between New York and London +``` +Expected: 3 parallel async calls with different functions (~2 seconds total) + +### Complex Parallel Test +``` +Compare New York and London by getting weather, population, and distance between them +``` +Expected: Multiple parallel calls combining different data types + +### Performance Comparison Test +You can test the timing difference by asking for the same information in different ways: + +**Sequential-style request:** +``` +First get the weather in New York, then get the weather in London, then get the weather in Tokyo +``` +*Expected time: ~6 seconds (2s + 2s + 2s)* + +**Parallel-style request:** +``` +Get the weather in New York, London, and Tokyo +``` +*Expected time: ~2 seconds (max of parallel 2s delays)* + +The parallel version should be **3x faster** due to concurrent execution. + +## Thread Safety Testing + +All tools modify the agent's state (`tool_context.state`) with request logs including timestamps. This helps verify that: +- Multiple tools can safely modify state concurrently +- No race conditions occur during parallel execution +- State modifications are preserved correctly + +## Running the Agent + +```bash +# Start the agent in interactive mode +adk run contributing/samples/parallel_functions + +# Or use the web interface +adk web +``` + +## Example Queries + +- "Get weather for New York, London, Tokyo, and Paris" *(4 parallel calls, ~2s total)* +- "What's the USD to EUR rate and GBP to USD rate?" *(2 parallel calls, ~1.5s total)* +- "Compare New York and San Francisco: weather, population, and distance" *(3 parallel calls, ~2s total)* +- "Get population data for Tokyo, London, Paris, and Sydney" *(1 call with 4 cities, ~2s total)* +- "What's the weather in Paris and the distance from Paris to London?" *(2 parallel calls, ~2s total)* + +## Common Issues and Solutions + +### ❌ Problem: Functions still execute sequentially (6+ seconds for 3 weather calls) + +**Root Cause**: Using blocking operations like `time.sleep()` in function implementations. + +**Solution**: Always use async patterns: +```python +# ❌ Wrong - blocks the GIL, forces sequential execution +def my_tool(): + time.sleep(2) # Blocks entire event loop + +# ✅ Correct - allows true parallelism +async def my_tool(): + await asyncio.sleep(2) # Non-blocking, parallel-friendly +``` + +### ✅ Verification: Check execution timing +- Parallel execution: ~2 seconds for 3 weather calls +- Sequential execution: ~6 seconds for 3 weather calls +- If you see 6+ seconds, your functions are blocking the GIL diff --git a/contributing/samples/parallel_functions/__init__.py b/contributing/samples/parallel_functions/__init__.py new file mode 100644 index 000000000..c48963cdc --- /dev/null +++ b/contributing/samples/parallel_functions/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import agent diff --git a/contributing/samples/parallel_functions/agent.py b/contributing/samples/parallel_functions/agent.py new file mode 100644 index 000000000..af4cad8b4 --- /dev/null +++ b/contributing/samples/parallel_functions/agent.py @@ -0,0 +1,246 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Sample agent for testing parallel function calling.""" + +import asyncio +import time +from typing import List + +from google.adk import Agent +from google.adk.tools.tool_context import ToolContext + + +async def get_weather(city: str, tool_context: ToolContext) -> dict: + """Get the current weather for a city. + + Args: + city: The name of the city to get weather for. + + Returns: + A dictionary with weather information. + """ + # Simulate some async processing time (non-blocking) + await asyncio.sleep(2) + + # Mock weather data + weather_data = { + 'New York': {'temp': 72, 'condition': 'sunny', 'humidity': 45}, + 'London': {'temp': 60, 'condition': 'cloudy', 'humidity': 80}, + 'Tokyo': {'temp': 68, 'condition': 'rainy', 'humidity': 90}, + 'San Francisco': {'temp': 65, 'condition': 'foggy', 'humidity': 85}, + 'Paris': {'temp': 58, 'condition': 'overcast', 'humidity': 70}, + 'Sydney': {'temp': 75, 'condition': 'sunny', 'humidity': 60}, + } + + result = weather_data.get( + city, + { + 'temp': 70, + 'condition': 'unknown', + 'humidity': 50, + 'note': ( + f'Weather data not available for {city}, showing default values' + ), + }, + ) + + # Store in context for testing thread safety + if 'weather_requests' not in tool_context.state: + tool_context.state['weather_requests'] = [] + tool_context.state['weather_requests'].append( + {'city': city, 'timestamp': time.time(), 'result': result} + ) + + return { + 'city': city, + 'temperature': result['temp'], + 'condition': result['condition'], + 'humidity': result['humidity'], + **({'note': result['note']} if 'note' in result else {}), + } + + +async def get_currency_rate( + from_currency: str, to_currency: str, tool_context: ToolContext +) -> dict: + """Get the exchange rate between two currencies. + + Args: + from_currency: The source currency code (e.g., 'USD'). + to_currency: The target currency code (e.g., 'EUR'). + + Returns: + A dictionary with exchange rate information. + """ + # Simulate async processing time + await asyncio.sleep(1.5) + + # Mock exchange rates + rates = { + ('USD', 'EUR'): 0.85, + ('USD', 'GBP'): 0.75, + ('USD', 'JPY'): 110.0, + ('EUR', 'USD'): 1.18, + ('EUR', 'GBP'): 0.88, + ('GBP', 'USD'): 1.33, + ('GBP', 'EUR'): 1.14, + ('JPY', 'USD'): 0.009, + } + + rate = rates.get((from_currency, to_currency), 1.0) + + # Store in context for testing thread safety + if 'currency_requests' not in tool_context.state: + tool_context.state['currency_requests'] = [] + tool_context.state['currency_requests'].append({ + 'from': from_currency, + 'to': to_currency, + 'rate': rate, + 'timestamp': time.time(), + }) + + return { + 'from_currency': from_currency, + 'to_currency': to_currency, + 'exchange_rate': rate, + 'timestamp': time.time(), + } + + +async def calculate_distance( + city1: str, city2: str, tool_context: ToolContext +) -> dict: + """Calculate the distance between two cities. + + Args: + city1: The first city. + city2: The second city. + + Returns: + A dictionary with distance information. + """ + # Simulate async processing time (non-blocking) + await asyncio.sleep(1) + + # Mock distances (in kilometers) + city_coords = { + 'New York': (40.7128, -74.0060), + 'London': (51.5074, -0.1278), + 'Tokyo': (35.6762, 139.6503), + 'San Francisco': (37.7749, -122.4194), + 'Paris': (48.8566, 2.3522), + 'Sydney': (-33.8688, 151.2093), + } + + # Simple distance calculation (mock) + if city1 in city_coords and city2 in city_coords: + coord1 = city_coords[city1] + coord2 = city_coords[city2] + # Simplified distance calculation + distance = int( + ((coord1[0] - coord2[0]) ** 2 + (coord1[1] - coord2[1]) ** 2) ** 0.5 + * 111 + ) # rough km conversion + else: + distance = 5000 # default distance + + # Store in context for testing thread safety + if 'distance_requests' not in tool_context.state: + tool_context.state['distance_requests'] = [] + tool_context.state['distance_requests'].append({ + 'city1': city1, + 'city2': city2, + 'distance': distance, + 'timestamp': time.time(), + }) + + return { + 'city1': city1, + 'city2': city2, + 'distance_km': distance, + 'distance_miles': int(distance * 0.621371), + } + + +async def get_population(cities: List[str], tool_context: ToolContext) -> dict: + """Get population information for multiple cities. + + Args: + cities: A list of city names. + + Returns: + A dictionary with population data for each city. + """ + # Simulate async processing time proportional to number of cities (non-blocking) + await asyncio.sleep(len(cities) * 0.5) + + # Mock population data + populations = { + 'New York': 8336817, + 'London': 9648110, + 'Tokyo': 13960000, + 'San Francisco': 873965, + 'Paris': 2161000, + 'Sydney': 5312163, + } + + results = {} + for city in cities: + results[city] = populations.get(city, 1000000) # default 1M if not found + + # Store in context for testing thread safety + if 'population_requests' not in tool_context.state: + tool_context.state['population_requests'] = [] + tool_context.state['population_requests'].append( + {'cities': cities, 'results': results, 'timestamp': time.time()} + ) + + return { + 'populations': results, + 'total_population': sum(results.values()), + 'cities_count': len(cities), + } + + +root_agent = Agent( + model='gemini-2.0-flash', + name='parallel_function_test_agent', + description=( + 'Agent for testing parallel function calling performance and thread' + ' safety.' + ), + instruction=""" + You are a helpful assistant that can provide information about weather, currency rates, + distances between cities, and population data. You have access to multiple tools and + should use them efficiently. + + When users ask for information about multiple cities or multiple types of data, + you should call multiple functions in parallel to provide faster responses. + + For example: + - If asked about weather in multiple cities, call get_weather for each city in parallel + - If asked about weather and currency rates, call both functions in parallel + - If asked to compare cities, you might need weather, population, and distance data in parallel + + Always aim to be efficient and call multiple functions simultaneously when possible. + Be informative and provide clear, well-structured responses. + """, + tools=[ + get_weather, + get_currency_rate, + calculate_distance, + get_population, + ], +) From d620bcb384d3068228ea2059fb70274e68e69682 Mon Sep 17 00:00:00 2001 From: Carol Zheng Date: Sun, 3 Aug 2025 22:47:48 -0700 Subject: [PATCH 12/41] fix: Remove thoughts from contents in llm requests Merge https://github.com/google/adk-python/pull/2320 Fix #843 COPYBARA_INTEGRATE_REVIEW=https://github.com/google/adk-python/pull/2320 from CAROLZXYZXY:cazheng/fix-843 5b4a4b256928cb766a44a3e18d4300b7ee5f779f PiperOrigin-RevId: 790592793 --- src/google/adk/flows/llm_flows/contents.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index e5f584905..ae1bd44ad 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -322,7 +322,8 @@ def _convert_foreign_event(event: Event) -> Event: content.role = 'user' content.parts = [types.Part(text='For context:')] for part in event.content.parts: - if part.text: + # Exclude thoughts from the context. + if part.text and not part.thought: content.parts.append( types.Part(text=f'[{event.author}] said: {part.text}') ) From e41dbccf7f610e249108f9321f60f71fe2cc10f4 Mon Sep 17 00:00:00 2001 From: "Wei Sun (Jack)" Date: Mon, 4 Aug 2025 09:15:41 -0700 Subject: [PATCH 13/41] fix(cli): Fixes adk deploy cloud_run cli Fixes #2328 PiperOrigin-RevId: 790775592 --- src/google/adk/cli/cli_tools_click.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/google/adk/cli/cli_tools_click.py b/src/google/adk/cli/cli_tools_click.py index d02f914f3..fc1662ada 100644 --- a/src/google/adk/cli/cli_tools_click.py +++ b/src/google/adk/cli/cli_tools_click.py @@ -920,6 +920,12 @@ def cli_api_server( " (default: a timestamped folder in the system temp directory)." ), ) +@click.option( + "--log_level", + type=LOG_LEVELS, + default="INFO", + help="Optional. Set the logging level", +) @click.option( "--verbosity", type=LOG_LEVELS, @@ -954,7 +960,7 @@ def cli_deploy_cloud_run( trace_to_cloud: bool, with_ui: bool, adk_version: str, - verbosity: str = "WARNING", + verbosity: Optional[str], reload: bool = True, allow_origins: Optional[list[str]] = None, log_level: Optional[str] = None, @@ -975,7 +981,14 @@ def cli_deploy_cloud_run( adk deploy cloud_run --project=[project] --region=[region] path/to/my_agent """ - log_level = log_level or verbosity + if verbosity: + click.secho( + "WARNING: The --verbosity option is deprecated. Use --log_level" + " instead.", + fg="yellow", + err=True, + ) + session_service_uri = session_service_uri or session_db_url artifact_service_uri = artifact_service_uri or artifact_storage_uri try: From 74589a1db7df65e319d1ad2f0676ee0cf5d6ec1d Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Mon, 4 Aug 2025 12:42:34 -0700 Subject: [PATCH 14/41] chore: make `LlmRequest.LiveConnectConfig` field default to a factory to avoid sharing a mutable instance PiperOrigin-RevId: 790854215 --- src/google/adk/models/llm_request.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/google/adk/models/llm_request.py b/src/google/adk/models/llm_request.py index 79770182a..39fddef41 100644 --- a/src/google/adk/models/llm_request.py +++ b/src/google/adk/models/llm_request.py @@ -48,7 +48,9 @@ class LlmRequest(BaseModel): config: types.GenerateContentConfig = Field( default_factory=types.GenerateContentConfig ) - live_connect_config: types.LiveConnectConfig = types.LiveConnectConfig() + live_connect_config: types.LiveConnectConfig = Field( + default_factory=types.LiveConnectConfig + ) """Additional config for the generate content request. tools in generate_content_config should not be set. From e369c283b3ac47282207fd6940ac2d249fe04463 Mon Sep 17 00:00:00 2001 From: nikkie Date: Mon, 4 Aug 2025 13:31:29 -0700 Subject: [PATCH 15/41] fix: typo againt (in `adk run --replay` help) Merge https://github.com/google/adk-python/pull/2327 `adk run --help` (adk 1.9.0) ``` --replay FILE The json file that contains the initial state of the session and user queries. A new session will be created using this state. And user queries are run againt the newly created session. Users cannot continue to interact with the agent. ``` ``` $ git grep againt src/google/adk/cli/cli_tools_click.py: " queries are run againt the newly created session. Users cannot" ``` COPYBARA_INTEGRATE_REVIEW=https://github.com/google/adk-python/pull/2327 from ftnext:fix-typo-run-replay-help 77cae65a235d9119810fe3d209910562672713c8 PiperOrigin-RevId: 790872246 --- src/google/adk/cli/cli_tools_click.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/google/adk/cli/cli_tools_click.py b/src/google/adk/cli/cli_tools_click.py index fc1662ada..ccf43783f 100644 --- a/src/google/adk/cli/cli_tools_click.py +++ b/src/google/adk/cli/cli_tools_click.py @@ -225,7 +225,7 @@ def validate_exclusive(ctx, param, value): help=( "The json file that contains the initial state of the session and user" " queries. A new session will be created using this state. And user" - " queries are run againt the newly created session. Users cannot" + " queries are run against the newly created session. Users cannot" " continue to interact with the agent." ), callback=validate_exclusive, From 283303032a174d51b8d72f14df83c794d66cb605 Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Mon, 4 Aug 2025 13:59:21 -0700 Subject: [PATCH 16/41] chore: update the prompt to make the ADK Answering Agent more objective PiperOrigin-RevId: 790882938 --- contributing/samples/adk_answering_agent/agent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/contributing/samples/adk_answering_agent/agent.py b/contributing/samples/adk_answering_agent/agent.py index 8b250f297..41f9aa807 100644 --- a/contributing/samples/adk_answering_agent/agent.py +++ b/contributing/samples/adk_answering_agent/agent.py @@ -247,6 +247,7 @@ def add_label_to_discussion( * {APPROVAL_INSTRUCTION} * Your response should be based on the information you found in the document store. Do not invent information that is not in the document store. Do not invent citations which are not in the document store. + * **Be Objective**: your answer should be based on the facts you found in the document store, do not be misled by user's assumptions or user's understanding of ADK. * If you can't find the answer or information in the document store, **do not** respond. * Include a bolded note (e.g. "Response from ADK Answering Agent") in your comment to indicate this comment was added by an ADK Answering Agent. From 97318bcd199acdacadfe8664da3fbfc3c806cdd2 Mon Sep 17 00:00:00 2001 From: Google Team Member Date: Mon, 4 Aug 2025 19:25:13 -0700 Subject: [PATCH 17/41] fix: correct type annotation Overridden `supported_models` should be a `classmethod` rather than a `staticmethod`. PiperOrigin-RevId: 790989895 --- src/google/adk/models/anthropic_llm.py | 4 ++-- src/google/adk/models/google_llm.py | 4 ++-- src/google/adk/models/lite_llm.py | 4 ++-- tests/unittests/testing_utils.py | 5 +++-- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/google/adk/models/anthropic_llm.py b/src/google/adk/models/anthropic_llm.py index bd64e2c47..ae69a6529 100644 --- a/src/google/adk/models/anthropic_llm.py +++ b/src/google/adk/models/anthropic_llm.py @@ -249,9 +249,9 @@ class Claude(BaseLlm): model: str = "claude-3-5-sonnet-v2@20241022" max_tokens: int = 8192 - @staticmethod + @classmethod @override - def supported_models() -> list[str]: + def supported_models(cls) -> list[str]: return [r"claude-3-.*", r"claude-.*-4.*"] @override diff --git a/src/google/adk/models/google_llm.py b/src/google/adk/models/google_llm.py index 50c820c14..a68af6297 100644 --- a/src/google/adk/models/google_llm.py +++ b/src/google/adk/models/google_llm.py @@ -75,9 +75,9 @@ class Gemini(BaseLlm): ``` """ - @staticmethod + @classmethod @override - def supported_models() -> list[str]: + def supported_models(cls) -> list[str]: """Provides the list of supported models. Returns: diff --git a/src/google/adk/models/lite_llm.py b/src/google/adk/models/lite_llm.py index ddce6a73f..7c78d6f15 100644 --- a/src/google/adk/models/lite_llm.py +++ b/src/google/adk/models/lite_llm.py @@ -848,9 +848,9 @@ async def generate_content_async( response = await self.llm_client.acompletion(**completion_args) yield _model_response_to_generate_content_response(response) - @staticmethod + @classmethod @override - def supported_models() -> list[str]: + def supported_models(cls) -> list[str]: """Provides the list of supported models. LiteLlm supports all models supported by litellm. We do not keep track of diff --git a/tests/unittests/testing_utils.py b/tests/unittests/testing_utils.py index 59cb72503..44e68b84d 100644 --- a/tests/unittests/testing_utils.py +++ b/tests/unittests/testing_utils.py @@ -282,8 +282,9 @@ def create( return cls(responses=responses) - @staticmethod - def supported_models() -> list[str]: + @classmethod + @override + def supported_models(cls) -> list[str]: return ['mock'] def generate_content( From 8ef2177658fbfc74b1a74b0c3ea8150bae866796 Mon Sep 17 00:00:00 2001 From: "Wei Sun (Jack)" Date: Mon, 4 Aug 2025 21:08:16 -0700 Subject: [PATCH 18/41] test: Fixes adk cli options and method parameters mismatching and adds a unit test for future proof checking The test will fail if `@option` list and method parameter don't match. Future proof test for #2328 PiperOrigin-RevId: 791022512 --- src/google/adk/cli/cli_tools_click.py | 27 ++- .../test_cli_tools_click_option_mismatch.py | 165 ++++++++++++++++++ 2 files changed, 188 insertions(+), 4 deletions(-) create mode 100644 tests/unittests/cli/test_cli_tools_click_option_mismatch.py diff --git a/src/google/adk/cli/cli_tools_click.py b/src/google/adk/cli/cli_tools_click.py index ccf43783f..aae633d82 100644 --- a/src/google/adk/cli/cli_tools_click.py +++ b/src/google/adk/cli/cli_tools_click.py @@ -676,6 +676,15 @@ def decorator(func): show_default=True, help="Optional. Whether to enable live reload for agents changes.", ) + @click.option( + "--eval_storage_uri", + type=str, + help=( + "Optional. The evals storage URI to store agent evals," + " supported URIs: gs://." + ), + default=None, + ) @functools.wraps(func) @click.pass_context def wrapper(ctx, *args, **kwargs): @@ -947,6 +956,19 @@ def cli_api_server( " version in the dev environment)" ), ) +@click.option( + "--a2a", + is_flag=True, + show_default=True, + default=False, + help="Optional. Whether to enable A2A endpoint.", +) +@click.option( + "--allow_origins", + help="Optional. Any additional origins to allow for CORS.", + multiple=True, +) +# TODO: Add eval_storage_uri option back when evals are supported in Cloud Run. @adk_services_options() @deprecated_adk_services_options() def cli_deploy_cloud_run( @@ -960,18 +982,15 @@ def cli_deploy_cloud_run( trace_to_cloud: bool, with_ui: bool, adk_version: str, + log_level: str, verbosity: Optional[str], - reload: bool = True, allow_origins: Optional[list[str]] = None, - log_level: Optional[str] = None, session_service_uri: Optional[str] = None, artifact_service_uri: Optional[str] = None, memory_service_uri: Optional[str] = None, - eval_storage_uri: Optional[str] = None, session_db_url: Optional[str] = None, # Deprecated artifact_storage_uri: Optional[str] = None, # Deprecated a2a: bool = False, - reload_agents: bool = False, ): """Deploys an agent to Cloud Run. diff --git a/tests/unittests/cli/test_cli_tools_click_option_mismatch.py b/tests/unittests/cli/test_cli_tools_click_option_mismatch.py new file mode 100644 index 000000000..3a01c4694 --- /dev/null +++ b/tests/unittests/cli/test_cli_tools_click_option_mismatch.py @@ -0,0 +1,165 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests to check if any Click options and method parameters mismatch.""" + +import inspect +from typing import MutableMapping +from typing import Optional + +import click +from google.adk.cli.cli_tools_click import cli_api_server +from google.adk.cli.cli_tools_click import cli_create_cmd +from google.adk.cli.cli_tools_click import cli_deploy_agent_engine +from google.adk.cli.cli_tools_click import cli_deploy_cloud_run +from google.adk.cli.cli_tools_click import cli_deploy_gke +from google.adk.cli.cli_tools_click import cli_eval +from google.adk.cli.cli_tools_click import cli_run +from google.adk.cli.cli_tools_click import cli_web +from google.adk.cli.cli_tools_click import deploy +from google.adk.cli.cli_tools_click import main + + +def _get_command_by_name( + commands: MutableMapping[str, click.Command], name +) -> Optional[click.Command]: + """Return the command object with the given name from a commands dict.""" + return next((cmd for cmd in commands.values() if cmd.name == name), None) + + +def _get_click_options(command) -> set[str]: + """Extract Click option names from a command.""" + options = [] + for param in command.params: + if isinstance(param, (click.Option, click.Argument)): + options.append(param.name) + return set(options) + + +def _get_method_parameters(func) -> set[str]: + """Extract parameter names from a method signature.""" + sig = inspect.signature(func) + return set(sig.parameters.keys()) + + +def _check_options_in_parameters( + command, + func, + command_name, + ignore_params: Optional[set[str]] = None, +): + """Check if all Click options are present in method parameters.""" + click_options = _get_click_options(command) + method_params = _get_method_parameters(func) + + if ignore_params: + click_options -= ignore_params + method_params -= ignore_params + + option_only = click_options - method_params + parameter_only = method_params - click_options + + assert click_options == method_params, f"""\ +Click options and method parameters do not match for command: `{command_name}`. +Click options: {click_options} +Method parameters: {method_params} +Options only: {option_only} +Parameters only: {parameter_only} +""" + + +def test_adk_create(): + """Test that cli_create_cmd has all required parameters.""" + create_command = _get_command_by_name(main.commands, "create") + + assert create_command is not None, "Create command not found" + _check_options_in_parameters( + create_command, cli_create_cmd.callback, "create" + ) + + +def test_adk_run(): + """Test that cli_run has all required parameters.""" + run_command = _get_command_by_name(main.commands, "run") + + assert run_command is not None, "Run command not found" + _check_options_in_parameters(run_command, cli_run.callback, "run") + + +def test_adk_eval(): + """Test that cli_eval has all required parameters.""" + eval_command = _get_command_by_name(main.commands, "eval") + + assert eval_command is not None, "Eval command not found" + _check_options_in_parameters(eval_command, cli_eval.callback, "eval") + + +def test_adk_web(): + """Test that cli_web has all required parameters.""" + web_command = _get_command_by_name(main.commands, "web") + + assert web_command is not None, "Web command not found" + _check_options_in_parameters( + web_command, cli_web.callback, "web", ignore_params={"verbose"} + ) + + +def test_adk_api_server(): + """Test that cli_api_server has all required parameters.""" + api_server_command = _get_command_by_name(main.commands, "api_server") + + assert api_server_command is not None, "API server command not found" + _check_options_in_parameters( + api_server_command, + cli_api_server.callback, + "api_server", + ignore_params={"verbose"}, + ) + + +def test_adk_deploy_cloud_run(): + """Test that cli_deploy_cloud_run has all required parameters.""" + cloud_run_command = _get_command_by_name(deploy.commands, "cloud_run") + + assert cloud_run_command is not None, "Cloud Run deploy command not found" + _check_options_in_parameters( + cloud_run_command, + cli_deploy_cloud_run.callback, + "deploy cloud_run", + ignore_params={"verbose"}, + ) + + +def test_adk_deploy_agent_engine(): + """Test that cli_deploy_agent_engine has all required parameters.""" + agent_engine_command = _get_command_by_name(deploy.commands, "agent_engine") + + assert ( + agent_engine_command is not None + ), "Agent Engine deploy command not found" + _check_options_in_parameters( + agent_engine_command, + cli_deploy_agent_engine.callback, + "deploy agent_engine", + ) + + +def test_adk_deploy_gke(): + """Test that cli_deploy_gke has all required parameters.""" + gke_command = _get_command_by_name(deploy.commands, "gke") + + assert gke_command is not None, "GKE deploy command not found" + _check_options_in_parameters( + gke_command, cli_deploy_gke.callback, "deploy gke" + ) From 6da6c2a44cf1f8b9225487a711c3e6094ca5355f Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Tue, 5 Aug 2025 09:43:37 -0700 Subject: [PATCH 19/41] fix: using async lock for accessing shared object in parallel executions and update tests for testing various type of functions 1. given we are running parallel functions in one event loop (one thread) , we should use async lock instead of thread lock 2. test three kind of functions: a. sync function b. async function that doesn't yield c. async function that yield PiperOrigin-RevId: 791255012 --- src/google/adk/flows/llm_flows/functions.py | 14 +- .../flows/llm_flows/test_functions_simple.py | 193 +++++++++++++++--- 2 files changed, 167 insertions(+), 40 deletions(-) diff --git a/src/google/adk/flows/llm_flows/functions.py b/src/google/adk/flows/llm_flows/functions.py index 05ab2e653..86f7e30a4 100644 --- a/src/google/adk/flows/llm_flows/functions.py +++ b/src/google/adk/flows/llm_flows/functions.py @@ -320,8 +320,8 @@ async def handle_function_calls_live( if not function_calls: return None - # Create thread-safe lock for active_streaming_tools modifications - streaming_lock = threading.Lock() + # Create async lock for active_streaming_tools modifications + streaming_lock = asyncio.Lock() # Create tasks for parallel execution tasks = [ @@ -368,7 +368,7 @@ async def _execute_single_function_call_live( function_call: types.FunctionCall, tools_dict: dict[str, BaseTool], agent: LlmAgent, - streaming_lock: threading.Lock, + streaming_lock: asyncio.Lock, ) -> Optional[Event]: """Execute a single function call for live mode with thread safety.""" tool, tool_context = _get_tool_and_context( @@ -448,7 +448,7 @@ async def _process_function_live_helper( function_call, function_args, invocation_context, - streaming_lock: threading.Lock, + streaming_lock: asyncio.Lock, ): function_response = None # Check if this is a stop_streaming function call @@ -458,7 +458,7 @@ async def _process_function_live_helper( ): function_name = function_args['function_name'] # Thread-safe access to active_streaming_tools - with streaming_lock: + async with streaming_lock: active_tasks = invocation_context.active_streaming_tools if ( active_tasks @@ -491,7 +491,7 @@ async def _process_function_live_helper( } if not function_response: # Clean up the reference under lock - with streaming_lock: + async with streaming_lock: if ( invocation_context.active_streaming_tools and function_name in invocation_context.active_streaming_tools @@ -533,7 +533,7 @@ async def run_tool_and_update_queue(tool, function_args, tool_context): ) # Register streaming tool using original logic - with streaming_lock: + async with streaming_lock: if invocation_context.active_streaming_tools is None: invocation_context.active_streaming_tools = {} diff --git a/tests/unittests/flows/llm_flows/test_functions_simple.py b/tests/unittests/flows/llm_flows/test_functions_simple.py index dbaf3c8c9..166800cc1 100644 --- a/tests/unittests/flows/llm_flows/test_functions_simple.py +++ b/tests/unittests/flows/llm_flows/test_functions_simple.py @@ -843,30 +843,77 @@ def modify_state_2(tool_context: ToolContext) -> dict: @pytest.mark.asyncio -async def test_parallel_mixed_sync_async_functions(): - """Test parallel execution with mix of sync and async functions.""" - execution_log = [] +async def test_sync_function_blocks_async_functions(): + """Test that sync functions block async functions from running concurrently.""" + execution_order = [] + + def blocking_sync_function() -> dict: + execution_order.append('sync_A') + # Simulate CPU-intensive work that blocks the event loop + result = 0 + for i in range(1000000): # This blocks the event loop + result += i + execution_order.append('sync_B') + return {'result': 'sync_done'} + + async def yielding_async_function() -> dict: + execution_order.append('async_C') + await asyncio.sleep( + 0.001 + ) # This should yield, but can't if event loop is blocked + execution_order.append('async_D') + return {'result': 'async_done'} + + # Create function calls - these should run "in parallel" + function_calls = [ + types.Part.from_function_call(name='blocking_sync_function', args={}), + types.Part.from_function_call(name='yielding_async_function', args={}), + ] + + responses: list[types.Content] = [function_calls, 'response1'] + mock_model = testing_utils.MockModel.create(responses=responses) + + agent = Agent( + name='test_agent', + model=mock_model, + tools=[blocking_sync_function, yielding_async_function], + ) + runner = testing_utils.TestInMemoryRunner(agent) + events = await runner.run_async_with_new_session('test') - def sync_function(value: int) -> dict: - execution_log.append(f'sync_start_{value}') - # Simulate some work - import time + # With blocking sync function, execution should be sequential: A, B, C, D + # The sync function blocks, preventing the async function from yielding properly + assert execution_order == ['sync_A', 'sync_B', 'async_C', 'async_D'] - time.sleep(0.05) # 50ms - execution_log.append(f'sync_end_{value}') - return {'result': f'sync_{value}'} - async def async_function(value: int) -> dict: - execution_log.append(f'async_start_{value}') - await asyncio.sleep(0.05) # 50ms - execution_log.append(f'async_end_{value}') - return {'result': f'async_{value}'} +@pytest.mark.asyncio +async def test_async_function_without_yield_blocks_others(): + """Test that async functions without yield statements block other functions.""" + execution_order = [] + + async def non_yielding_async_function() -> dict: + execution_order.append('non_yield_A') + # CPU-intensive work without any await statements - blocks like sync function + result = 0 + for i in range(1000000): # No await here, so this blocks the event loop + result += i + execution_order.append('non_yield_B') + return {'result': 'non_yielding_done'} + + async def yielding_async_function() -> dict: + execution_order.append('yield_C') + await asyncio.sleep( + 0.001 + ) # This should yield, but can't if event loop is blocked + execution_order.append('yield_D') + return {'result': 'yielding_done'} # Create function calls function_calls = [ - types.Part.from_function_call(name='sync_function', args={'value': 1}), - types.Part.from_function_call(name='async_function', args={'value': 2}), - types.Part.from_function_call(name='sync_function', args={'value': 3}), + types.Part.from_function_call( + name='non_yielding_async_function', args={} + ), + types.Part.from_function_call(name='yielding_async_function', args={}), ] responses: list[types.Content] = [function_calls, 'response1'] @@ -875,24 +922,104 @@ async def async_function(value: int) -> dict: agent = Agent( name='test_agent', model=mock_model, - tools=[sync_function, async_function], + tools=[non_yielding_async_function, yielding_async_function], ) runner = testing_utils.TestInMemoryRunner(agent) + events = await runner.run_async_with_new_session('test') - import time + # Non-yielding async function blocks, so execution is sequential: A, B, C, D + assert execution_order == ['non_yield_A', 'non_yield_B', 'yield_C', 'yield_D'] - start_time = time.time() + +@pytest.mark.asyncio +async def test_yielding_async_functions_run_concurrently(): + """Test that async functions with proper yields run concurrently.""" + execution_order = [] + + async def yielding_async_function_1() -> dict: + execution_order.append('func1_A') + await asyncio.sleep(0.001) # Yield control + execution_order.append('func1_B') + return {'result': 'func1_done'} + + async def yielding_async_function_2() -> dict: + execution_order.append('func2_C') + await asyncio.sleep(0.001) # Yield control + execution_order.append('func2_D') + return {'result': 'func2_done'} + + # Create function calls + function_calls = [ + types.Part.from_function_call(name='yielding_async_function_1', args={}), + types.Part.from_function_call(name='yielding_async_function_2', args={}), + ] + + responses: list[types.Content] = [function_calls, 'response1'] + mock_model = testing_utils.MockModel.create(responses=responses) + + agent = Agent( + name='test_agent', + model=mock_model, + tools=[yielding_async_function_1, yielding_async_function_2], + ) + runner = testing_utils.TestInMemoryRunner(agent) events = await runner.run_async_with_new_session('test') - total_time = time.time() - start_time - # Should complete in less than 120ms (parallel) rather than 150ms (sequential) - # Allow for overhead from task creation and synchronization - assert total_time < 0.12, f'Execution took {total_time}s, expected < 0.12s' - - # Verify all functions were called - assert 'sync_start_1' in execution_log - assert 'sync_end_1' in execution_log - assert 'async_start_2' in execution_log - assert 'async_end_2' in execution_log - assert 'sync_start_3' in execution_log - assert 'sync_end_3' in execution_log + # With proper yielding, execution should interleave: A, C, B, D + # Both functions start, yield, then complete + assert execution_order == ['func1_A', 'func2_C', 'func1_B', 'func2_D'] + + +@pytest.mark.asyncio +async def test_mixed_function_types_execution_order(): + """Test execution order with all three types of functions.""" + execution_order = [] + + def sync_function() -> dict: + execution_order.append('sync_A') + # Small amount of blocking work + result = sum(range(100000)) + execution_order.append('sync_B') + return {'result': 'sync_done'} + + async def non_yielding_async() -> dict: + execution_order.append('non_yield_C') + # CPU work without yield + result = sum(range(100000)) + execution_order.append('non_yield_D') + return {'result': 'non_yield_done'} + + async def yielding_async() -> dict: + execution_order.append('yield_E') + await asyncio.sleep(0.001) # Proper yield + execution_order.append('yield_F') + return {'result': 'yield_done'} + + # Create function calls + function_calls = [ + types.Part.from_function_call(name='sync_function', args={}), + types.Part.from_function_call(name='non_yielding_async', args={}), + types.Part.from_function_call(name='yielding_async', args={}), + ] + + responses: list[types.Content] = [function_calls, 'response1'] + mock_model = testing_utils.MockModel.create(responses=responses) + + agent = Agent( + name='test_agent', + model=mock_model, + tools=[sync_function, non_yielding_async, yielding_async], + ) + runner = testing_utils.TestInMemoryRunner(agent) + events = await runner.run_async_with_new_session('test') + + # All blocking functions run sequentially, then the yielding one + # Expected order: sync_A, sync_B, non_yield_C, non_yield_D, yield_E, yield_F + assert execution_order == [ + 'sync_A', + 'sync_B', + 'non_yield_C', + 'non_yield_D', + 'yield_E', + 'yield_F', + ] From 0e28d64712e481cfd3b964be0166f529657024f6 Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Tue, 5 Aug 2025 10:11:02 -0700 Subject: [PATCH 20/41] feat(tools): create enterprise_web_search_tool as a tool instance There is no argument for the tool, so just like google_search, we should make it an easy-to-use tool instance. PiperOrigin-RevId: 791266806 --- src/google/adk/tools/__init__.py | 2 ++ src/google/adk/tools/enterprise_search_tool.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/src/google/adk/tools/__init__.py b/src/google/adk/tools/__init__.py index d93c15d1c..1b749bb1f 100644 --- a/src/google/adk/tools/__init__.py +++ b/src/google/adk/tools/__init__.py @@ -17,6 +17,7 @@ from .agent_tool import AgentTool from .apihub_tool.apihub_toolset import APIHubToolset from .base_tool import BaseTool +from .enterprise_search_tool import enterprise_web_search_tool as enterprise_web_search from .example_tool import ExampleTool from .exit_loop_tool import exit_loop from .function_tool import FunctionTool @@ -36,6 +37,7 @@ 'APIHubToolset', 'AuthToolArguments', 'BaseTool', + 'enterprise_web_search', 'google_search', 'url_context', 'VertexAiSearchTool', diff --git a/src/google/adk/tools/enterprise_search_tool.py b/src/google/adk/tools/enterprise_search_tool.py index fefdec8b1..f27b7de67 100644 --- a/src/google/adk/tools/enterprise_search_tool.py +++ b/src/google/adk/tools/enterprise_search_tool.py @@ -65,3 +65,6 @@ async def process_llm_request( 'Enterprise web search tool is not supported for model' f' {llm_request.model}' ) + + +enterprise_web_search_tool = EnterpriseWebSearchTool() From 37dae9b631db5060770b66fce0e25cf0ffb56948 Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Tue, 5 Aug 2025 10:27:56 -0700 Subject: [PATCH 21/41] chore: Import AGENT_CARD_WELL_KNOWN_PATH from adk instead of from a2a directly thus let adk handle import problem for a2a, e.g. python version need to be > 3.10 etc. PiperOrigin-RevId: 791273137 --- contributing/samples/a2a_root/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contributing/samples/a2a_root/agent.py b/contributing/samples/a2a_root/agent.py index e435743e8..c913a6fad 100755 --- a/contributing/samples/a2a_root/agent.py +++ b/contributing/samples/a2a_root/agent.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from a2a.utils.constants import AGENT_CARD_WELL_KNOWN_PATH +from google.adk.agents.remote_a2a_agent import AGENT_CARD_WELL_KNOWN_PATH from google.adk.agents.remote_a2a_agent import RemoteA2aAgent root_agent = RemoteA2aAgent( From 423542a43fb8316195e9f79d97f87593751bebd3 Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Tue, 5 Aug 2025 11:37:30 -0700 Subject: [PATCH 22/41] fix: shared default plugin manager and cost manager instances among multiple invocations PiperOrigin-RevId: 791303349 --- src/google/adk/agents/invocation_context.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/google/adk/agents/invocation_context.py b/src/google/adk/agents/invocation_context.py index 831aa818f..033d51a6a 100644 --- a/src/google/adk/agents/invocation_context.py +++ b/src/google/adk/agents/invocation_context.py @@ -20,6 +20,8 @@ from google.genai import types from pydantic import BaseModel from pydantic import ConfigDict +from pydantic import Field +from pydantic import PrivateAttr from ..artifacts.base_artifact_service import BaseArtifactService from ..auth.credential_service.base_credential_service import BaseCredentialService @@ -154,10 +156,12 @@ class InvocationContext(BaseModel): run_config: Optional[RunConfig] = None """Configurations for live agents under this invocation.""" - plugin_manager: PluginManager = PluginManager() + plugin_manager: PluginManager = Field(default_factory=PluginManager) """The manager for keeping track of plugins in this invocation.""" - _invocation_cost_manager: _InvocationCostManager = _InvocationCostManager() + _invocation_cost_manager: _InvocationCostManager = PrivateAttr( + default_factory=_InvocationCostManager + ) """A container to keep track of different kinds of costs incurred as a part of this invocation. """ From 71fbc9275b3d74700ec410cb4155ba0cb18580b7 Mon Sep 17 00:00:00 2001 From: Hangfei Lin Date: Tue, 5 Aug 2025 11:50:14 -0700 Subject: [PATCH 23/41] feat: Implement Live Session Resumption Previous implementation doesn't pass the actual handle to server. Now we cache the handle and pass it over when reconnection happens. To enable: run_config = RunConfig( session_resumption=types.SessionResumptionConfig(transparent=True) ) PiperOrigin-RevId: 791308462 --- .../live_bidi_streaming_multi_agent/agent.py | 4 +- .../live_bidi_streaming_tools_agent/agent.py | 4 +- .../live_tool_callbacks_agent/agent.py | 3 +- src/google/adk/agents/invocation_context.py | 3 + .../adk/flows/llm_flows/base_llm_flow.py | 188 ++++++++++------- .../adk/models/gemini_llm_connection.py | 7 + src/google/adk/models/google_llm.py | 1 + src/google/adk/models/llm_response.py | 5 + .../streaming/test_multi_agent_streaming.py | 194 ++++++++++++++++++ 9 files changed, 333 insertions(+), 76 deletions(-) create mode 100644 tests/unittests/streaming/test_multi_agent_streaming.py diff --git a/contributing/samples/live_bidi_streaming_multi_agent/agent.py b/contributing/samples/live_bidi_streaming_multi_agent/agent.py index ac50eb7ae..413e33a72 100644 --- a/contributing/samples/live_bidi_streaming_multi_agent/agent.py +++ b/contributing/samples/live_bidi_streaming_multi_agent/agent.py @@ -100,8 +100,8 @@ def get_current_weather(location: str): root_agent = Agent( # find supported models here: https://google.github.io/adk-docs/get-started/streaming/quickstart-streaming/ - # model='gemini-live-2.5-flash-preview-native-audio', # for Vertex project - model="gemini-live-2.5-flash-preview", # for AI studio key + model="gemini-2.0-flash-live-preview-04-09", # for Vertex project + # model="gemini-live-2.5-flash-preview", # for AI studio key name="root_agent", instruction=""" You are a helpful assistant that can check time, roll dice and check if numbers are prime. diff --git a/contributing/samples/live_bidi_streaming_tools_agent/agent.py b/contributing/samples/live_bidi_streaming_tools_agent/agent.py index cdb092171..c55651865 100644 --- a/contributing/samples/live_bidi_streaming_tools_agent/agent.py +++ b/contributing/samples/live_bidi_streaming_tools_agent/agent.py @@ -121,7 +121,9 @@ def stop_streaming(function_name: str): root_agent = Agent( - model="gemini-live-2.5-flash-preview", + # find supported models here: https://google.github.io/adk-docs/get-started/streaming/quickstart-streaming/ + model="gemini-2.0-flash-live-preview-04-09", # for Vertex project + # model="gemini-live-2.5-flash-preview", # for AI studio key name="video_streaming_agent", instruction=""" You are a monitoring agent. You can do video monitoring and stock price monitoring diff --git a/contributing/samples/live_tool_callbacks_agent/agent.py b/contributing/samples/live_tool_callbacks_agent/agent.py index 3f540b974..95af9d8f2 100644 --- a/contributing/samples/live_tool_callbacks_agent/agent.py +++ b/contributing/samples/live_tool_callbacks_agent/agent.py @@ -217,8 +217,9 @@ async def after_tool_async_callback( # Create the agent with tool callbacks root_agent = Agent( + # find supported models here: https://google.github.io/adk-docs/get-started/streaming/quickstart-streaming/ model="gemini-2.0-flash-live-preview-04-09", # for Vertex project - # model="gemini-2.0-flash-live-001", # for AI studio key + # model="gemini-live-2.5-flash-preview", # for AI studio key name="tool_callbacks_agent", description=( "Live streaming agent that demonstrates tool callbacks functionality. " diff --git a/src/google/adk/agents/invocation_context.py b/src/google/adk/agents/invocation_context.py index 033d51a6a..66c61ed68 100644 --- a/src/google/adk/agents/invocation_context.py +++ b/src/google/adk/agents/invocation_context.py @@ -153,6 +153,9 @@ class InvocationContext(BaseModel): transcription_cache: Optional[list[TranscriptionEntry]] = None """Caches necessary data, audio or contents, that are needed by transcription.""" + live_session_resumption_handle: Optional[str] = None + """The handle for live session resumption.""" + run_config: Optional[RunConfig] = None """Configurations for live agents under this invocation.""" diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index b38866710..0a1cdb916 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -25,6 +25,7 @@ from typing import TYPE_CHECKING from google.genai import types +from websockets.exceptions import ConnectionClosed from websockets.exceptions import ConnectionClosedOK from . import functions @@ -86,80 +87,115 @@ async def run_live( invocation_context.agent.name, llm_request, ) - async with llm.connect(llm_request) as llm_connection: - if llm_request.contents: - # Sends the conversation history to the model. - with tracer.start_as_current_span('send_data'): - - if invocation_context.transcription_cache: - from . import audio_transcriber - - audio_transcriber = audio_transcriber.AudioTranscriber( - init_client=True - if invocation_context.run_config.input_audio_transcription - is None - else False - ) - contents = audio_transcriber.transcribe_file(invocation_context) - logger.debug('Sending history to model: %s', contents) - await llm_connection.send_history(contents) - invocation_context.transcription_cache = None - trace_send_data(invocation_context, event_id, contents) - else: - await llm_connection.send_history(llm_request.contents) - trace_send_data(invocation_context, event_id, llm_request.contents) - - send_task = asyncio.create_task( - self._send_to_model(llm_connection, invocation_context) - ) + attempt = 1 + while True: try: - async for event in self._receive_from_model( - llm_connection, - event_id, - invocation_context, - llm_request, - ): - # Empty event means the queue is closed. - if not event: - break - logger.debug('Receive new event: %s', event) - yield event - # send back the function response - if event.get_function_responses(): - logger.debug('Sending back last function response event: %s', event) - invocation_context.live_request_queue.send_content(event.content) - if ( - event.content - and event.content.parts - and event.content.parts[0].function_response - and event.content.parts[0].function_response.name - == 'transfer_to_agent' - ): - await asyncio.sleep(1) - # cancel the tasks that belongs to the closed connection. - send_task.cancel() - await llm_connection.close() - if ( - event.content - and event.content.parts - and event.content.parts[0].function_response - and event.content.parts[0].function_response.name - == 'task_completed' - ): - # this is used for sequential agent to signal the end of the agent. - await asyncio.sleep(1) - # cancel the tasks that belongs to the closed connection. - send_task.cancel() - return - finally: - # Clean up - if not send_task.done(): - send_task.cancel() - try: - await send_task - except asyncio.CancelledError: - pass + # On subsequent attempts, use the saved token to reconnect + if invocation_context.live_session_resumption_handle: + logger.info('Attempting to reconnect (Attempt %s)...', attempt) + attempt += 1 + if not llm_request.live_connect_config: + llm_request.live_connect_config = types.LiveConnectConfig() + llm_request.live_connect_config.session_resumption.handle = ( + invocation_context.live_session_resumption_handle + ) + llm_request.live_connect_config.session_resumption.transparent = True + + logger.info( + 'Establishing live connection for agent: %s', + invocation_context.agent.name, + ) + async with llm.connect(llm_request) as llm_connection: + if llm_request.contents: + # Sends the conversation history to the model. + with tracer.start_as_current_span('send_data'): + + if invocation_context.transcription_cache: + from . import audio_transcriber + + audio_transcriber = audio_transcriber.AudioTranscriber( + init_client=True + if invocation_context.run_config.input_audio_transcription + is None + else False + ) + contents = audio_transcriber.transcribe_file(invocation_context) + logger.debug('Sending history to model: %s', contents) + await llm_connection.send_history(contents) + invocation_context.transcription_cache = None + trace_send_data(invocation_context, event_id, contents) + else: + await llm_connection.send_history(llm_request.contents) + trace_send_data( + invocation_context, event_id, llm_request.contents + ) + + send_task = asyncio.create_task( + self._send_to_model(llm_connection, invocation_context) + ) + + try: + async for event in self._receive_from_model( + llm_connection, + event_id, + invocation_context, + llm_request, + ): + # Empty event means the queue is closed. + if not event: + break + logger.debug('Receive new event: %s', event) + yield event + # send back the function response + if event.get_function_responses(): + logger.debug( + 'Sending back last function response event: %s', event + ) + invocation_context.live_request_queue.send_content( + event.content + ) + if ( + event.content + and event.content.parts + and event.content.parts[0].function_response + and event.content.parts[0].function_response.name + == 'transfer_to_agent' + ): + await asyncio.sleep(1) + # cancel the tasks that belongs to the closed connection. + send_task.cancel() + await llm_connection.close() + if ( + event.content + and event.content.parts + and event.content.parts[0].function_response + and event.content.parts[0].function_response.name + == 'task_completed' + ): + # this is used for sequential agent to signal the end of the agent. + await asyncio.sleep(1) + # cancel the tasks that belongs to the closed connection. + send_task.cancel() + return + finally: + # Clean up + if not send_task.done(): + send_task.cancel() + try: + await send_task + except asyncio.CancelledError: + pass + except (ConnectionClosed, ConnectionClosedOK) as e: + # when the session timeout, it will just close and not throw exception. + # so this is for bad cases + logger.error(f'Connection closed: {e}.') + raise + except Exception as e: + logger.error( + f'An unexpected error occurred in live flow: {e}', exc_info=True + ) + raise async def _send_to_model( self, @@ -246,6 +282,14 @@ def get_author_for_event(llm_response): try: while True: async for llm_response in llm_connection.receive(): + if llm_response.live_session_resumption_update: + logger.info( + 'Update session resumption hanlde:' + f' {llm_response.live_session_resumption_update}.' + ) + invocation_context.live_session_resumption_handle = ( + llm_response.live_session_resumption_update.new_handle + ) model_response_event = Event( id=Event.new_id(), invocation_id=invocation_context.invocation_id, diff --git a/src/google/adk/models/gemini_llm_connection.py b/src/google/adk/models/gemini_llm_connection.py index 3a902c562..3b46c91ad 100644 --- a/src/google/adk/models/gemini_llm_connection.py +++ b/src/google/adk/models/gemini_llm_connection.py @@ -219,6 +219,13 @@ async def receive(self) -> AsyncGenerator[LlmResponse, None]: for function_call in message.tool_call.function_calls ] yield LlmResponse(content=types.Content(role='model', parts=parts)) + if message.session_resumption_update: + logger.info('Redeived session reassumption message: %s', message) + yield ( + LlmResponse( + live_session_resumption_update=message.session_resumption_update + ) + ) async def close(self): """Closes the llm server connection.""" diff --git a/src/google/adk/models/google_llm.py b/src/google/adk/models/google_llm.py index a68af6297..b1cad1c54 100644 --- a/src/google/adk/models/google_llm.py +++ b/src/google/adk/models/google_llm.py @@ -289,6 +289,7 @@ async def connect(self, llm_request: LlmRequest) -> BaseLlmConnection: ], ) llm_request.live_connect_config.tools = llm_request.config.tools + logger.info('Connecting to live with llm_request:%s', llm_request) async with self._live_api_client.aio.live.connect( model=llm_request.model, config=llm_request.live_connect_config ) as live_session: diff --git a/src/google/adk/models/llm_response.py b/src/google/adk/models/llm_response.py index 6539ff1ad..2f39ad428 100644 --- a/src/google/adk/models/llm_response.py +++ b/src/google/adk/models/llm_response.py @@ -89,6 +89,11 @@ class LlmResponse(BaseModel): usage_metadata: Optional[types.GenerateContentResponseUsageMetadata] = None """The usage metadata of the LlmResponse""" + live_session_resumption_update: Optional[ + types.LiveServerSessionResumptionUpdate + ] = None + """The session resumption update of the LlmResponse""" + @staticmethod def create( generate_content_response: types.GenerateContentResponse, diff --git a/tests/unittests/streaming/test_multi_agent_streaming.py b/tests/unittests/streaming/test_multi_agent_streaming.py new file mode 100644 index 000000000..f7f9cb0d9 --- /dev/null +++ b/tests/unittests/streaming/test_multi_agent_streaming.py @@ -0,0 +1,194 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import contextlib +from typing import AsyncGenerator + +from google.adk.agents.live_request_queue import LiveRequestQueue +from google.adk.agents.llm_agent import Agent +from google.adk.models.llm_response import LlmResponse +from google.genai import types +import pytest +from typing_extensions import override # <-- FIX: Add this import +from websockets import frames # <-- FIX 1: Import the frames module +from websockets.exceptions import ConnectionClosed + +from .. import testing_utils + + +def test_live_streaming_multi_agent_single_tool(): + """Test live streaming with multi-agent delegation for a single tool call.""" + # --- 1. Mock LLM Responses --- + + # Mock response for the root_agent to delegate the task to the roll_agent. + # FIX: Use from_function_call to represent delegation to a sub-agent. + delegation_to_roll_agent = types.Part.from_function_call( + name='transfer_to_agent', args={'agent_name': 'roll_agent'} + ) + + root_response1 = LlmResponse( + content=types.Content(role='model', parts=[delegation_to_roll_agent]), + turn_complete=False, + ) + root_response2 = LlmResponse(turn_complete=True) + mock_root_model = testing_utils.MockModel.create( + [root_response1, root_response2] + ) + + # Mock response for the roll_agent to call its `roll_die` tool. + function_call = types.Part.from_function_call( + name='roll_die', args={'sides': 20} + ) + roll_agent_response1 = LlmResponse( + content=types.Content(role='model', parts=[function_call]), + turn_complete=False, + ) + roll_agent_response2 = LlmResponse(turn_complete=True) + mock_roll_model = testing_utils.MockModel.create( + [roll_agent_response1, roll_agent_response2] + ) + + # --- 2. Mock Tools and Agents --- + + def roll_die(sides: int) -> int: + """Rolls a die and returns a fixed result for testing.""" + return 15 + + mock_roll_sub_agent = Agent( + name='roll_agent', + model=mock_roll_model, + tools=[roll_die], + ) + + main_agent = Agent( + name='root_agent', + model=mock_root_model, + sub_agents=[mock_roll_sub_agent], + ) + + # --- 3. Test Runner Setup --- + class CustomTestRunner(testing_utils.InMemoryRunner): + + def run_live( + self, + live_request_queue: LiveRequestQueue, + run_config: testing_utils.RunConfig = None, + ) -> list[testing_utils.Event]: + collected_responses = [] + + async def consume_responses(session: testing_utils.Session): + run_res = self.runner.run_live( + session=session, + live_request_queue=live_request_queue, + run_config=run_config or testing_utils.RunConfig(), + ) + async for response in run_res: + collected_responses.append(response) + if len(collected_responses) >= 5: + return + + try: + session = self.session + asyncio.run(asyncio.wait_for(consume_responses(session), timeout=5.0)) + except (asyncio.TimeoutError, asyncio.CancelledError): + pass + return collected_responses + + runner = CustomTestRunner(root_agent=main_agent) + live_request_queue = LiveRequestQueue() + live_request_queue.send_realtime( + blob=types.Blob(data=b'Roll a 20-sided die', mime_type='audio/pcm') + ) + + # --- 4. Run and Assert --- + res_events = runner.run_live(live_request_queue) + + assert res_events is not None, 'Expected a list of events, but got None.' + assert len(res_events) >= 1, 'Expected at least one event.' + + delegation_found = False + tool_call_found = False + tool_response_found = False + + for event in res_events: + if event.content and event.content.parts: + for part in event.content.parts: + if part.function_call: + # FIX: Check for the function call that represents delegation. + if part.function_call.name == 'transfer_to_agent': + delegation_found = True + assert part.function_call.args == {'agent_name': 'roll_agent'} + + # Check for the function call made by the roll_agent. + if part.function_call.name == 'roll_die': + tool_call_found = True + assert part.function_call.args['sides'] == 20 + + # Check for the result from the executed function. + if part.function_response and part.function_response.name == 'roll_die': + tool_response_found = True + assert part.function_response.response['result'] == 15 + + assert delegation_found, 'A function_call event for delegation was not found.' + assert tool_call_found, 'A function_call event for roll_die was not found.' + assert tool_response_found, 'A function_response for roll_die was not found.' + + +def test_live_streaming_connection_error_on_connect(): + """ + Tests that the runner correctly handles a ConnectionClosed exception + raised from the model's `connect` method during a live run. + """ + + # 1. Create a mock model that fails during the connection phase. + class MockModelThatFailsToConnect(testing_utils.MockModel): + + @contextlib.asynccontextmanager + @override + async def connect(self, llm_request: testing_utils.LlmRequest): + """Override connect to simulate an immediate connection failure.""" + + # FIX 2: Create a proper `Close` frame object first. + close_frame = frames.Close( + 1007, + 'gemini-live-2.5-flash-preview is not supported in the live api.', + ) + + # FIX 3: Pass the frame object to the `rcvd` parameter of the exception. + raise ConnectionClosed(rcvd=close_frame, sent=None) + + yield # pragma: no cover + + # 2. Instantiate the custom mock model. + mock_model = MockModelThatFailsToConnect(responses=[]) + + # 3. Set up the agent and runner. + agent = Agent(name='test_agent_for_connection_failure', model=mock_model) + runner = testing_utils.InMemoryRunner(root_agent=agent) + live_request_queue = LiveRequestQueue() + live_request_queue.send_realtime( + blob=types.Blob(data=b'Initial audio chunk', mime_type='audio/pcm') + ) + + # 4. Assert that `run_live` raises `ConnectionClosed`. + with pytest.raises(ConnectionClosed) as excinfo: + runner.run_live(live_request_queue) + + # 5. Verify the details of the exception. The `code` and `reason` are + # attributes of the received frame (`rcvd`), not the exception itself. + assert excinfo.value.rcvd.code == 1007 + assert ( + 'is not supported in the live api' in excinfo.value.rcvd.reason + ), 'The exception reason should match the simulated server error.' From 8dc0c949afb9024738ff7ac1b2c19282175c3200 Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Tue, 5 Aug 2025 16:06:38 -0700 Subject: [PATCH 24/41] chore: add Github workflow config for the ADK Answering agent PiperOrigin-RevId: 791407331 --- .github/workflows/discussion_answering.yml | 43 ++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 .github/workflows/discussion_answering.yml diff --git a/.github/workflows/discussion_answering.yml b/.github/workflows/discussion_answering.yml new file mode 100644 index 000000000..03429327f --- /dev/null +++ b/.github/workflows/discussion_answering.yml @@ -0,0 +1,43 @@ +on: + discussion: + types: [created] + discussion_comment: + types: [created] + +jobs: + agent-answer-questions: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Authenticate to Google Cloud + id: auth + uses: 'google-github-actions/auth@v2' + with: + credentials_json: '${{ secrets.ADK_GCP_SA_KEY }}' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install google-adk + + - name: Run Answering Script + env: + GITHUB_TOKEN: ${{ secrets.ADK_TRIAGE_AGENT }} + GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} + GOOGLE_CLOUD_LOCATION: ${{ secrets.GOOGLE_CLOUD_LOCATION }} + VERTEXAI_DATASTORE_ID: ${{ secrets.VERTEXAI_DATASTORE_ID }} + GOOGLE_GENAI_USE_VERTEXAI: 1 + OWNER: 'google' + REPO: 'adk-python' + INTERACTIVE: 0 + DISCUSSION_NUMBER: ${{ github.event.discussion.number }} + PYTHONPATH: contributing/samples + run: python -m adk_answering_agent.main From a3b31ca9504ea12803b55c8e78e3b4533ed8ab55 Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Tue, 5 Aug 2025 16:23:56 -0700 Subject: [PATCH 25/41] chore: add the missing name for the ADK Answering Agent workflow PiperOrigin-RevId: 791413949 --- .github/workflows/discussion_answering.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/discussion_answering.yml b/.github/workflows/discussion_answering.yml index 03429327f..aaaa614dc 100644 --- a/.github/workflows/discussion_answering.yml +++ b/.github/workflows/discussion_answering.yml @@ -1,3 +1,5 @@ +name: ADK Answering Agent for Discussions + on: discussion: types: [created] From 2fff882fb0c66d21bd39e0a116205c4f879f61fb Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Tue, 5 Aug 2025 22:25:37 -0700 Subject: [PATCH 26/41] feat(config): implement from_config() for BaseTool PiperOrigin-RevId: 791520708 --- src/google/adk/agents/config_agent_utils.py | 6 +- src/google/adk/tools/base_tool.py | 81 +++++++++++++++++++-- tests/unittests/tools/test_tool_config.py | 56 ++++++++++++++ 3 files changed, 134 insertions(+), 9 deletions(-) create mode 100644 tests/unittests/tools/test_tool_config.py diff --git a/src/google/adk/agents/config_agent_utils.py b/src/google/adk/agents/config_agent_utils.py index 4a22366fc..80ea93b0f 100644 --- a/src/google/adk/agents/config_agent_utils.py +++ b/src/google/adk/agents/config_agent_utils.py @@ -69,7 +69,7 @@ def _resolve_agent_class(agent_class: str) -> type[BaseAgent]: if "." not in agent_class_name: agent_class_name = f"google.adk.agents.{agent_class_name}" - agent_class = _resolve_fully_qualified_name(agent_class_name) + agent_class = resolve_fully_qualified_name(agent_class_name) if inspect.isclass(agent_class) and issubclass(agent_class, BaseAgent): return agent_class @@ -103,8 +103,8 @@ def _load_config_from_path(config_path: str) -> AgentConfig: return AgentConfig.model_validate(config_data) -@working_in_progress("_resolve_fully_qualified_name is not ready for use.") -def _resolve_fully_qualified_name(name: str) -> Any: +@working_in_progress("resolve_fully_qualified_name is not ready for use.") +def resolve_fully_qualified_name(name: str) -> Any: try: module_path, obj_name = name.rsplit(".", 1) module = importlib.import_module(module_path) diff --git a/src/google/adk/tools/base_tool.py b/src/google/adk/tools/base_tool.py index b13f3abaf..20ad7d3e8 100644 --- a/src/google/adk/tools/base_tool.py +++ b/src/google/adk/tools/base_tool.py @@ -15,11 +15,18 @@ from __future__ import annotations from abc import ABC +import inspect +import logging from typing import Any +from typing import Callable +from typing import get_args +from typing import get_origin +from typing import get_type_hints from typing import Optional from typing import Type from typing import TYPE_CHECKING from typing import TypeVar +from typing import Union from google.genai import types from pydantic import BaseModel @@ -29,6 +36,8 @@ from ..utils.variant_utils import GoogleLLMVariant from .tool_context import ToolContext +logger = logging.getLogger("google_adk." + __name__) + if TYPE_CHECKING: from ..models.llm_request import LlmRequest @@ -134,8 +143,9 @@ def from_config( ) -> SelfTool: """Creates a tool instance from a config. - Subclasses should override and implement this method to do custom - initialization from a config. + This default implementation uses inspect to automatically map config values + to constructor arguments based on their type hints. Subclasses should + override this method for custom initialization logic. Args: config: The config for the tool. @@ -145,7 +155,66 @@ def from_config( Returns: The tool instance. """ - raise NotImplementedError(f"from_config for {cls} not implemented.") + from ..agents import config_agent_utils + + # Get the constructor signature and resolve type hints + sig = inspect.signature(cls.__init__) + type_hints = get_type_hints(cls.__init__) + config_dict = config.model_dump() + kwargs = {} + + # Iterate through constructor parameters (skip "self") + for param_name, _ in sig.parameters.items(): + if param_name == "self": + continue + param_type = type_hints.get(param_name) + + if param_name in config_dict: + value = config_dict[param_name] + + # Get the actual type T of the parameter if it's Optional[T] + if get_origin(param_type) is Union: + # This is Optional[T] which is Union[T, None] + args = get_args(param_type) + if len(args) == 2 and type(None) in args: + # Get the non-None type + actual_type = args[0] if args[1] is type(None) else args[1] + param_type = actual_type + + if param_type in (int, str, bool, float): + kwargs[param_name] = value + elif ( + inspect.isclass(param_type) + and issubclass(param_type, BaseModel) + and value is not None + ): + kwargs[param_name] = param_type.model_validate(value) + elif param_type is Callable or get_origin(param_type) is Callable: + kwargs[param_name] = config_agent_utils.resolve_fully_qualified_name( + value + ) + elif param_type in (list, set, dict): + kwargs[param_name] = param_type(value) + elif get_origin(param_type) is list: + list_args = get_args(param_type) + if issubclass(list_args[0], BaseModel): + kwargs[param_name] = [ + list_args[0].model_validate(item) for item in value + ] + elif list_args[0] in (int, str, bool, float): + kwargs[param_name] = value + elif list_args[0] is Callable or get_origin(list_args[0]) is Callable: + kwargs[param_name] = [ + config_agent_utils.resolve_fully_qualified_name(item) + for item in value + ] + else: + logger.warning( + "Unsupported parsing for list argument: %s.", param_name + ) + else: + logger.warning("Unsupported parsing for argument: %s.", param_name) + return cls(**kwargs) def _find_tool_with_function_declarations( @@ -218,9 +287,9 @@ class ToolConfig(BaseModel): my_tool_arg2: value2 ``` - 4. For user-defined functions that generate tool instances, the `name` is the - fully qualified path to the function and `config` is passed to the function - as arguments. + 4. For user-defined functions that generate tool instances, the `name` is + the fully qualified path to the function and `config` is passed to the + function as arguments. ``` tools: diff --git a/tests/unittests/tools/test_tool_config.py b/tests/unittests/tools/test_tool_config.py new file mode 100644 index 000000000..b12bbe5fc --- /dev/null +++ b/tests/unittests/tools/test_tool_config.py @@ -0,0 +1,56 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.adk.tools import VertexAiSearchTool +from google.adk.tools.base_tool import ToolConfig +from google.genai import types +import yaml + + +def test_vertex_ai_search_tool_config(): + yaml_content = """\ +name: VertexAiSearchTool +args: + data_store_specs: + - data_store: projects/my-project/locations/us-central1/collections/my-collection/dataStores/my-datastore1 + filter: filter1 + - data_store: projects/my-project/locations/us-central1/collections/my-collection/dataStores/my-dataStore2 + filter: filter2 + filter: filter + max_results: 10 + search_engine_id: projects/my-project/locations/us-central1/collections/my-collection/engines/my-engine + """ + config_data = yaml.safe_load(yaml_content) + config = ToolConfig.model_validate(config_data) + + tool = VertexAiSearchTool.from_config(config.args, "") + assert isinstance(tool, VertexAiSearchTool) + assert isinstance(tool.data_store_specs[0], types.VertexAISearchDataStoreSpec) + assert ( + tool.data_store_specs[0].data_store + == "projects/my-project/locations/us-central1/collections/my-collection/dataStores/my-datastore1" + ) + assert tool.data_store_specs[0].filter == "filter1" + assert isinstance(tool.data_store_specs[0], types.VertexAISearchDataStoreSpec) + assert ( + tool.data_store_specs[1].data_store + == "projects/my-project/locations/us-central1/collections/my-collection/dataStores/my-dataStore2" + ) + assert tool.data_store_specs[1].filter == "filter2" + assert tool.filter == "filter" + assert tool.max_results == 10 + assert ( + tool.search_engine_id + == "projects/my-project/locations/us-central1/collections/my-collection/engines/my-engine" + ) From e3c2bf30620a53e9f61c60eac891e6f366aa9ba2 Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Tue, 5 Aug 2025 23:25:22 -0700 Subject: [PATCH 27/41] chore: remove unused Example-related classes PiperOrigin-RevId: 791538058 --- src/google/adk/agents/llm_agent.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/google/adk/agents/llm_agent.py b/src/google/adk/agents/llm_agent.py index 92faddb8a..86ace586a 100644 --- a/src/google/adk/agents/llm_agent.py +++ b/src/google/adk/agents/llm_agent.py @@ -38,8 +38,6 @@ from ..code_executors.base_code_executor import BaseCodeExecutor from ..events.event import Event -from ..examples.base_example_provider import BaseExampleProvider -from ..examples.example import Example from ..flows.llm_flows.auto_flow import AutoFlow from ..flows.llm_flows.base_llm_flow import BaseLlmFlow from ..flows.llm_flows.single_flow import SingleFlow @@ -110,7 +108,6 @@ ] ToolUnion: TypeAlias = Union[Callable, BaseTool, BaseToolset] -ExamplesUnion = Union[list[Example], BaseExampleProvider] async def _convert_tool_union_to_tools( From 53803522b6acbdfd929ca93565a27477ddeae855 Mon Sep 17 00:00:00 2001 From: "Wei Sun (Jack)" Date: Tue, 5 Aug 2025 23:52:50 -0700 Subject: [PATCH 28/41] refactor(config): Makes `BaseAgent.from_config` a final method and let sub-class to optionally override `_parse_config` to update kwargs if needed This ensures that the pydantic hooks (e.g. model_validators) are triggered correctly. PiperOrigin-RevId: 791545704 --- src/google/adk/agents/base_agent.py | 45 ++++++++++++++++++---- src/google/adk/agents/llm_agent.py | 46 +++++++++++------------ src/google/adk/agents/loop_agent.py | 17 +++++---- src/google/adk/agents/parallel_agent.py | 12 +----- src/google/adk/agents/sequential_agent.py | 10 ----- 5 files changed, 72 insertions(+), 58 deletions(-) diff --git a/src/google/adk/agents/base_agent.py b/src/google/adk/agents/base_agent.py index 1d2d8c027..98f7b1254 100644 --- a/src/google/adk/agents/base_agent.py +++ b/src/google/adk/agents/base_agent.py @@ -504,8 +504,8 @@ def __set_parent_agent_for_sub_agents(self) -> BaseAgent: sub_agent.parent_agent = self return self + @final @classmethod - @working_in_progress('BaseAgent.from_config is not ready for use.') def from_config( cls: Type[SelfAgent], config: BaseAgentConfig, @@ -513,11 +513,8 @@ def from_config( ) -> SelfAgent: """Creates an agent from a config. - This method converts fields in a config to the corresponding - fields in an agent. - - Child classes should re-implement this method to support loading from their - custom config types. + If sub-classes uses a custom agent config, override `_from_config_kwargs` + method to return an updated kwargs for agent construstor. Args: config: The config to create the agent from. @@ -527,6 +524,40 @@ def from_config( Returns: The created agent. """ + kwargs = cls.__create_kwargs(config, config_abs_path) + kwargs = cls._parse_config(config, config_abs_path, kwargs) + return cls(**kwargs) + + @classmethod + def _parse_config( + cls: Type[SelfAgent], + config: BaseAgentConfig, + config_abs_path: str, + kwargs: Dict[str, Any], + ) -> Dict[str, Any]: + """Parses the config and returns updated kwargs to construct the agent. + + Sub-classes should override this method to use a custome agent config class. + + Args: + config: The config to parse. + config_abs_path: The absolute path to the config file that contains the + agent config. + kwargs: The keyword arguments used for agent constructor. + + Returns: + The updated keyword arguments used for agent constructor. + """ + return kwargs + + @classmethod + def __create_kwargs( + cls, + config: BaseAgentConfig, + config_abs_path: str, + ) -> Dict[str, Any]: + """Creates kwargs for the fields of BaseAgent.""" + from .config_agent_utils import resolve_agent_reference from .config_agent_utils import resolve_callbacks @@ -549,4 +580,4 @@ def from_config( kwargs['after_agent_callback'] = resolve_callbacks( config.after_agent_callbacks ) - return cls(**kwargs) + return kwargs diff --git a/src/google/adk/agents/llm_agent.py b/src/google/adk/agents/llm_agent.py index 86ace586a..db957f93e 100644 --- a/src/google/adk/agents/llm_agent.py +++ b/src/google/adk/agents/llm_agent.py @@ -17,12 +17,12 @@ import importlib import inspect import logging -import os from typing import Any from typing import AsyncGenerator from typing import Awaitable from typing import Callable from typing import ClassVar +from typing import Dict from typing import Literal from typing import Optional from typing import Type @@ -46,7 +46,6 @@ from ..models.llm_response import LlmResponse from ..models.registry import LLMRegistry from ..planners.base_planner import BasePlanner -from ..tools.agent_tool import AgentTool from ..tools.base_tool import BaseTool from ..tools.base_tool import ToolConfig from ..tools.base_toolset import BaseToolset @@ -56,7 +55,6 @@ from .base_agent import BaseAgent from .base_agent_config import BaseAgentConfig from .callback_context import CallbackContext -from .common_configs import CodeConfig from .invocation_context import InvocationContext from .llm_agent_config import LlmAgentConfig from .readonly_context import ReadonlyContext @@ -586,53 +584,55 @@ def _resolve_tools( return resolved_tools - @classmethod @override - @working_in_progress('LlmAgent.from_config is not ready for use.') - def from_config( + @classmethod + def _parse_config( cls: Type[LlmAgent], config: LlmAgentConfig, config_abs_path: str, - ) -> LlmAgent: + kwargs: Dict[str, Any], + ) -> Dict[str, Any]: from .config_agent_utils import resolve_callbacks from .config_agent_utils import resolve_code_reference - agent = super().from_config(config, config_abs_path) if config.model: - agent.model = config.model + kwargs['model'] = config.model if config.instruction: - agent.instruction = config.instruction + kwargs['instruction'] = config.instruction if config.disallow_transfer_to_parent: - agent.disallow_transfer_to_parent = config.disallow_transfer_to_parent + kwargs['disallow_transfer_to_parent'] = config.disallow_transfer_to_parent if config.disallow_transfer_to_peers: - agent.disallow_transfer_to_peers = config.disallow_transfer_to_peers + kwargs['disallow_transfer_to_peers'] = config.disallow_transfer_to_peers if config.include_contents != 'default': - agent.include_contents = config.include_contents + kwargs['include_contents'] = config.include_contents if config.input_schema: - agent.input_schema = resolve_code_reference(config.input_schema) + kwargs['input_schema'] = resolve_code_reference(config.input_schema) if config.output_schema: - agent.output_schema = resolve_code_reference(config.output_schema) + kwargs['output_schema'] = resolve_code_reference(config.output_schema) if config.output_key: - agent.output_key = config.output_key + kwargs['output_key'] = config.output_key if config.tools: - agent.tools = cls._resolve_tools(config.tools, config_abs_path) + kwargs['tools'] = cls._resolve_tools(config.tools, config_abs_path) if config.before_model_callbacks: - agent.before_model_callback = resolve_callbacks( + kwargs['before_model_callback'] = resolve_callbacks( config.before_model_callbacks ) if config.after_model_callbacks: - agent.after_model_callback = resolve_callbacks( + kwargs['after_model_callback'] = resolve_callbacks( config.after_model_callbacks ) if config.before_tool_callbacks: - agent.before_tool_callback = resolve_callbacks( + kwargs['before_tool_callback'] = resolve_callbacks( config.before_tool_callbacks ) if config.after_tool_callbacks: - agent.after_tool_callback = resolve_callbacks(config.after_tool_callbacks) + kwargs['after_tool_callback'] = resolve_callbacks( + config.after_tool_callbacks + ) if config.generate_content_config: - agent.generate_content_config = config.generate_content_config - return agent + kwargs['generate_content_config'] = config.generate_content_config + + return kwargs Agent: TypeAlias = LlmAgent diff --git a/src/google/adk/agents/loop_agent.py b/src/google/adk/agents/loop_agent.py index 812361a32..de4f34381 100644 --- a/src/google/adk/agents/loop_agent.py +++ b/src/google/adk/agents/loop_agent.py @@ -16,8 +16,10 @@ from __future__ import annotations +from typing import Any from typing import AsyncGenerator from typing import ClassVar +from typing import Dict from typing import Optional from typing import Type @@ -74,15 +76,14 @@ async def _run_live_impl( raise NotImplementedError('This is not supported yet for LoopAgent.') yield # AsyncGenerator requires having at least one yield statement - @classmethod @override - @working_in_progress('LoopAgent.from_config is not ready for use.') - def from_config( - cls: Type[LoopAgent], + @classmethod + def _parse_config( + cls: type[LoopAgent], config: LoopAgentConfig, config_abs_path: str, - ) -> LoopAgent: - agent = super().from_config(config, config_abs_path) + kwargs: Dict[str, Any], + ) -> Dict[str, Any]: if config.max_iterations: - agent.max_iterations = config.max_iterations - return agent + kwargs['max_iterations'] = config.max_iterations + return kwargs diff --git a/src/google/adk/agents/parallel_agent.py b/src/google/adk/agents/parallel_agent.py index f8c4c28e4..bb8e0a462 100644 --- a/src/google/adk/agents/parallel_agent.py +++ b/src/google/adk/agents/parallel_agent.py @@ -17,8 +17,10 @@ from __future__ import annotations import asyncio +from typing import Any from typing import AsyncGenerator from typing import ClassVar +from typing import Dict from typing import Type from typing_extensions import override @@ -119,13 +121,3 @@ async def _run_live_impl( ) -> AsyncGenerator[Event, None]: raise NotImplementedError('This is not supported yet for ParallelAgent.') yield # AsyncGenerator requires having at least one yield statement - - @classmethod - @override - @working_in_progress('ParallelAgent.from_config is not ready for use.') - def from_config( - cls: Type[ParallelAgent], - config: ParallelAgentConfig, - config_abs_path: str, - ) -> ParallelAgent: - return super().from_config(config, config_abs_path) diff --git a/src/google/adk/agents/sequential_agent.py b/src/google/adk/agents/sequential_agent.py index c0c832ff1..10d1e7c2a 100644 --- a/src/google/adk/agents/sequential_agent.py +++ b/src/google/adk/agents/sequential_agent.py @@ -81,13 +81,3 @@ def task_completed(): for sub_agent in self.sub_agents: async for event in sub_agent.run_live(ctx): yield event - - @classmethod - @override - @working_in_progress('SequentialAgent.from_config is not ready for use.') - def from_config( - cls: Type[SequentialAgent], - config: SequentialAgentConfig, - config_abs_path: str, - ) -> SequentialAgent: - return super().from_config(config, config_abs_path) From 1686cc57c28897c2cca5f97f6e3d3382aaa9d26c Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Wed, 6 Aug 2025 10:18:19 -0700 Subject: [PATCH 29/41] feat(config): implement configs and from_config() for CrewaiTool and LangchainTool PiperOrigin-RevId: 791742964 --- src/google/adk/tools/crewai_tool.py | 28 +++++++++++++++++++ src/google/adk/tools/langchain_tool.py | 38 +++++++++++++++++++++++--- 2 files changed, 62 insertions(+), 4 deletions(-) diff --git a/src/google/adk/tools/crewai_tool.py b/src/google/adk/tools/crewai_tool.py index db4c533d2..50be61c5e 100644 --- a/src/google/adk/tools/crewai_tool.py +++ b/src/google/adk/tools/crewai_tool.py @@ -18,6 +18,8 @@ from typing_extensions import override from . import _automatic_function_calling_util +from .base_tool import BaseToolConfig +from .base_tool import ToolArgsConfig from .function_tool import FunctionTool try: @@ -70,3 +72,29 @@ def _get_declaration(self) -> types.FunctionDeclaration: self.tool.args_schema.model_json_schema(), ) return function_declaration + + @override + @classmethod + def from_config( + cls: type[CrewaiTool], config: ToolArgsConfig, config_abs_path: str + ) -> CrewaiTool: + from ..agents import config_agent_utils + + crewai_tool_config = CrewaiToolConfig.model_validate(config.model_dump()) + tool = config_agent_utils.resolve_fully_qualified_name( + crewai_tool_config.tool + ) + name = crewai_tool_config.name + description = crewai_tool_config.description + return cls(tool, name=name, description=description) + + +class CrewaiToolConfig(BaseToolConfig): + tool: str + """The fully qualified path of the CrewAI tool instance.""" + + name: str = "" + """The name of the tool.""" + + description: str = "" + """The description of the tool.""" diff --git a/src/google/adk/tools/langchain_tool.py b/src/google/adk/tools/langchain_tool.py index 1d91beb53..f4d2206ea 100644 --- a/src/google/adk/tools/langchain_tool.py +++ b/src/google/adk/tools/langchain_tool.py @@ -19,11 +19,13 @@ from google.genai import types from langchain.agents import Tool -from langchain_core.tools import BaseTool +from langchain_core.tools import BaseTool as LangchainBaseTool from langchain_core.tools.structured import StructuredTool from typing_extensions import override from . import _automatic_function_calling_util +from .base_tool import BaseToolConfig +from .base_tool import ToolArgsConfig from .function_tool import FunctionTool @@ -50,12 +52,12 @@ class LangchainTool(FunctionTool): wrapped_tool = LangchainTool(search_tool) """ - _langchain_tool: Union[BaseTool, object] + _langchain_tool: Union[LangchainBaseTool, object] """The wrapped langchain tool.""" def __init__( self, - tool: Union[BaseTool, object], + tool: Union[LangchainBaseTool, object], name: Optional[str] = None, description: Optional[str] = None, ): @@ -114,7 +116,7 @@ def _get_declaration(self) -> types.FunctionDeclaration: # 2. Other tools: the tool doesn't inherit any class but follow some # conventions, like having a "run" method. # Handle BaseTool type (preferred Langchain approach) - if isinstance(self._langchain_tool, BaseTool): + if isinstance(self._langchain_tool, LangchainBaseTool): tool_wrapper = Tool( name=self.name, func=self.func, @@ -148,3 +150,31 @@ def _get_declaration(self) -> types.FunctionDeclaration: raise ValueError( f'Failed to build function declaration for Langchain tool: {e}' ) from e + + @override + @classmethod + def from_config( + cls: type[LangchainTool], config: ToolArgsConfig, config_abs_path: str + ) -> LangchainTool: + from ..agents import config_agent_utils + + langchain_tool_config = LangchainToolConfig.model_validate( + config.model_dump() + ) + tool = config_agent_utils.resolve_fully_qualified_name( + langchain_tool_config.tool + ) + name = langchain_tool_config.name + description = langchain_tool_config.description + return cls(tool, name=name, description=description) + + +class LangchainToolConfig(BaseToolConfig): + tool: str + """The fully qualified path of the Langchain tool instance.""" + + name: str = '' + """The name of the tool.""" + + description: str = '' + """The description of the tool.""" From e528749a1c9653e2b457e21e4ae6b0b2da834b2c Mon Sep 17 00:00:00 2001 From: "Xiang (Sean) Zhou" Date: Wed, 6 Aug 2025 10:54:17 -0700 Subject: [PATCH 30/41] fix: lazy import VertexAiRagRetrieval original codes try to eagerly import VertexAiRagRetrieval while it doesn't want to raise error if client try to import other names in this package and dependencies of VertexAiRagRetrieval is missing. so it swallow the import error which doesn't make sense, given vertex sdk is a must have for VertexAiRagRetrieval, we should fail fast. this fix achieve the same purpose but fail fast if client try to import VertexAiRagRetrieval from this package and miss certain dependencies (e.g. vertex sdk) PiperOrigin-RevId: 791759776 --- src/google/adk/tools/retrieval/__init__.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/src/google/adk/tools/retrieval/__init__.py b/src/google/adk/tools/retrieval/__init__.py index 5eb5d77e2..537780611 100644 --- a/src/google/adk/tools/retrieval/__init__.py +++ b/src/google/adk/tools/retrieval/__init__.py @@ -20,17 +20,13 @@ 'BaseRetrievalTool', 'FilesRetrieval', 'LlamaIndexRetrieval', + 'VertexAiRagRetrieval', ] -try: - from .vertex_ai_rag_retrieval import VertexAiRagRetrieval - __all__.append('VertexAiRagRetrieval') -except ImportError: - import logging +def __getattr__(name: str): + if name == 'VertexAiRagRetrieval': + from .vertex_ai_rag_retrieval import VertexAiRagRetrieval - logger = logging.getLogger('google_adk.' + __name__) - logger.debug( - 'The Vertex sdk is not installed. If you want to use the Vertex RAG with' - ' agents, please install it. If not, you can ignore this warning.' - ) + return VertexAiRagRetrieval + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") From e73d71d32415c03274671601a10b131d6927c116 Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Wed, 6 Aug 2025 11:04:46 -0700 Subject: [PATCH 31/41] feat(config): implement config and from_config for ExampleTool Only list[Example] is supported in config. BaseExampleProvider will need to be used in code. PiperOrigin-RevId: 791763913 --- src/google/adk/tools/example_tool.py | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/src/google/adk/tools/example_tool.py b/src/google/adk/tools/example_tool.py index a59c0a276..4eab01c6b 100644 --- a/src/google/adk/tools/example_tool.py +++ b/src/google/adk/tools/example_tool.py @@ -24,6 +24,8 @@ from ..examples.base_example_provider import BaseExampleProvider from ..examples.example import Example from .base_tool import BaseTool +from .base_tool import BaseToolConfig +from .base_tool import ToolArgsConfig from .tool_context import ToolContext if TYPE_CHECKING: @@ -60,3 +62,34 @@ async def process_llm_request( self.examples, parts[0].text, llm_request.model ) ]) + + @override + @classmethod + def from_config( + cls: type[ExampleTool], config: ToolArgsConfig, config_abs_path: str + ) -> ExampleTool: + from ..agents import config_agent_utils + + example_tool_config = ExampleToolConfig.model_validate(config.model_dump()) + if isinstance(example_tool_config.examples, str): + example_provider = config_agent_utils.resolve_fully_qualified_name( + example_tool_config.examples + ) + if not isinstance(example_provider, BaseExampleProvider): + raise ValueError( + 'Example provider must be an instance of BaseExampleProvider.' + ) + return cls(example_provider) + elif isinstance(example_tool_config.examples, list): + return cls(example_tool_config.examples) + else: + raise ValueError( + 'Example tool config must be a list of examples or a fully-qualified' + ' name to a BaseExampleProvider object in code.' + ) + + +class ExampleToolConfig(BaseToolConfig): + examples: Union[list[Example], str] + """The examples to add to the LLM request. User can either provide a list of + examples or a fully-qualified name to a BaseExampleProvider object in code.""" From 54cc849de7e4c01e7954b2d896cc47d0ec390886 Mon Sep 17 00:00:00 2001 From: Google Team Member Date: Wed, 6 Aug 2025 12:10:53 -0700 Subject: [PATCH 32/41] feat: Add metadata field to ADK BaseTool PiperOrigin-RevId: 791790030 --- src/google/adk/runners.py | 3 +++ src/google/adk/tools/base_tool.py | 19 ++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/google/adk/runners.py b/src/google/adk/runners.py index c6cd0eef2..51fdb9658 100644 --- a/src/google/adk/runners.py +++ b/src/google/adk/runners.py @@ -71,6 +71,7 @@ class Runner: plugin_manager: The plugin manager for the runner. session_service: The session service for the runner. memory_service: The memory service for the runner. + credential_service: The credential service for the runner. """ app_name: str @@ -104,9 +105,11 @@ def __init__( Args: app_name: The application name of the runner. agent: The root agent to run. + plugins: A list of plugins for the runner. artifact_service: The artifact service for the runner. session_service: The session service for the runner. memory_service: The memory service for the runner. + credential_service: The credential service for the runner. """ self.app_name = app_name self.agent = agent diff --git a/src/google/adk/tools/base_tool.py b/src/google/adk/tools/base_tool.py index 20ad7d3e8..d38acd080 100644 --- a/src/google/adk/tools/base_tool.py +++ b/src/google/adk/tools/base_tool.py @@ -56,10 +56,27 @@ class BaseTool(ABC): """Whether the tool is a long running operation, which typically returns a resource id first and finishes the operation later.""" - def __init__(self, *, name, description, is_long_running: bool = False): + custom_metadata: Optional[dict[str, Any]] = None + """The custom metadata of the BaseTool. + + An optional key-value pair for storing and retrieving tool-specific metadata, + such as tool manifests, etc. + + NOTE: the entire dict must be JSON serializable. + """ + + def __init__( + self, + *, + name, + description, + is_long_running: bool = False, + custom_metadata: Optional[dict[str, Any]] = None, + ): self.name = name self.description = description self.is_long_running = is_long_running + self.custom_metadata = custom_metadata def _get_declaration(self) -> Optional[types.FunctionDeclaration]: """Gets the OpenAPI specification of this tool in the form of a FunctionDeclaration. From ef837015f3eda8f5ab41ecdc21a5565365ceebdf Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Wed, 6 Aug 2025 14:17:34 -0700 Subject: [PATCH 33/41] refactor(config): move BaseToolConfig to a separate file PiperOrigin-RevId: 791841562 --- src/google/adk/agents/llm_agent.py | 2 +- src/google/adk/agents/llm_agent_config.py | 2 +- src/google/adk/tools/agent_tool.py | 4 +- src/google/adk/tools/base_tool.py | 103 ----------------- src/google/adk/tools/crewai_tool.py | 4 +- src/google/adk/tools/example_tool.py | 4 +- src/google/adk/tools/langchain_tool.py | 4 +- src/google/adk/tools/tool_configs.py | 128 ++++++++++++++++++++++ tests/unittests/tools/test_tool_config.py | 2 +- 9 files changed, 139 insertions(+), 114 deletions(-) create mode 100644 src/google/adk/tools/tool_configs.py diff --git a/src/google/adk/agents/llm_agent.py b/src/google/adk/agents/llm_agent.py index db957f93e..23450df17 100644 --- a/src/google/adk/agents/llm_agent.py +++ b/src/google/adk/agents/llm_agent.py @@ -47,9 +47,9 @@ from ..models.registry import LLMRegistry from ..planners.base_planner import BasePlanner from ..tools.base_tool import BaseTool -from ..tools.base_tool import ToolConfig from ..tools.base_toolset import BaseToolset from ..tools.function_tool import FunctionTool +from ..tools.tool_configs import ToolConfig from ..tools.tool_context import ToolContext from ..utils.feature_decorator import working_in_progress from .base_agent import BaseAgent diff --git a/src/google/adk/agents/llm_agent_config.py b/src/google/adk/agents/llm_agent_config.py index d65ec34ee..c7500d096 100644 --- a/src/google/adk/agents/llm_agent_config.py +++ b/src/google/adk/agents/llm_agent_config.py @@ -22,7 +22,7 @@ from google.genai import types from pydantic import ConfigDict -from ..tools.base_tool import ToolConfig +from ..tools.tool_configs import ToolConfig from .base_agent_config import BaseAgentConfig from .common_configs import CodeConfig diff --git a/src/google/adk/tools/agent_tool.py b/src/google/adk/tools/agent_tool.py index de46b9a7b..c0d07238d 100644 --- a/src/google/adk/tools/agent_tool.py +++ b/src/google/adk/tools/agent_tool.py @@ -28,8 +28,8 @@ from ..memory.in_memory_memory_service import InMemoryMemoryService from ._forwarding_artifact_service import ForwardingArtifactService from .base_tool import BaseTool -from .base_tool import BaseToolConfig -from .base_tool import ToolArgsConfig +from .tool_configs import BaseToolConfig +from .tool_configs import ToolArgsConfig from .tool_context import ToolContext if TYPE_CHECKING: diff --git a/src/google/adk/tools/base_tool.py b/src/google/adk/tools/base_tool.py index d38acd080..21f721fba 100644 --- a/src/google/adk/tools/base_tool.py +++ b/src/google/adk/tools/base_tool.py @@ -249,106 +249,3 @@ def _find_tool_with_function_declarations( ), None, ) - - -class ToolArgsConfig(BaseModel): - """The configuration for tool arguments. - - This config allows arbitrary key-value pairs as tool arguments. - """ - - model_config = ConfigDict(extra="allow") - - -class ToolConfig(BaseModel): - """The configuration for a tool. - - The config supports these types of tools: - 1. ADK built-in tools - 2. User-defined tool instances - 3. User-defined tool classes - 4. User-defined functions that generate tool instances - 5. User-defined function tools - - For examples: - - 1. For ADK built-in tool instances or classes in `google.adk.tools` package, - they can be referenced directly with the `name` and optionally with - `config`. - - ``` - tools: - - name: google_search - - name: AgentTool - config: - agent: ./another_agent.yaml - skip_summarization: true - ``` - - 2. For user-defined tool instances, the `name` is the fully qualified path - to the tool instance. - - ``` - tools: - - name: my_package.my_module.my_tool - ``` - - 3. For user-defined tool classes (custom tools), the `name` is the fully - qualified path to the tool class and `config` is the arguments for the tool. - - ``` - tools: - - name: my_package.my_module.my_tool_class - config: - my_tool_arg1: value1 - my_tool_arg2: value2 - ``` - - 4. For user-defined functions that generate tool instances, the `name` is - the fully qualified path to the function and `config` is passed to the - function as arguments. - - ``` - tools: - - name: my_package.my_module.my_tool_function - config: - my_function_arg1: value1 - my_function_arg2: value2 - ``` - - The function must have the following signature: - ``` - def my_function(config: ToolArgsConfig) -> BaseTool: - ... - ``` - - 5. For user-defined function tools, the `name` is the fully qualified path - to the function. - - ``` - tools: - - name: my_package.my_module.my_function_tool - ``` - """ - - model_config = ConfigDict(extra="forbid") - - name: str - """The name of the tool. - - For ADK built-in tools, the name is the name of the tool, e.g. `google_search` - or `AgentTool`. - - For user-defined tools, the name is the fully qualified path to the tool, e.g. - `my_package.my_module.my_tool`. - """ - - args: Optional[ToolArgsConfig] = None - """The args for the tool.""" - - -class BaseToolConfig(BaseModel): - """The base configurations for all the tools.""" - - model_config = ConfigDict(extra="forbid") - """Forbid extra fields.""" diff --git a/src/google/adk/tools/crewai_tool.py b/src/google/adk/tools/crewai_tool.py index 50be61c5e..9dfe7cc75 100644 --- a/src/google/adk/tools/crewai_tool.py +++ b/src/google/adk/tools/crewai_tool.py @@ -18,9 +18,9 @@ from typing_extensions import override from . import _automatic_function_calling_util -from .base_tool import BaseToolConfig -from .base_tool import ToolArgsConfig from .function_tool import FunctionTool +from .tool_configs import BaseToolConfig +from .tool_configs import ToolArgsConfig try: from crewai.tools import BaseTool as CrewaiBaseTool diff --git a/src/google/adk/tools/example_tool.py b/src/google/adk/tools/example_tool.py index 4eab01c6b..67197dc38 100644 --- a/src/google/adk/tools/example_tool.py +++ b/src/google/adk/tools/example_tool.py @@ -24,8 +24,8 @@ from ..examples.base_example_provider import BaseExampleProvider from ..examples.example import Example from .base_tool import BaseTool -from .base_tool import BaseToolConfig -from .base_tool import ToolArgsConfig +from .tool_configs import BaseToolConfig +from .tool_configs import ToolArgsConfig from .tool_context import ToolContext if TYPE_CHECKING: diff --git a/src/google/adk/tools/langchain_tool.py b/src/google/adk/tools/langchain_tool.py index f4d2206ea..44f884ff6 100644 --- a/src/google/adk/tools/langchain_tool.py +++ b/src/google/adk/tools/langchain_tool.py @@ -24,9 +24,9 @@ from typing_extensions import override from . import _automatic_function_calling_util -from .base_tool import BaseToolConfig -from .base_tool import ToolArgsConfig from .function_tool import FunctionTool +from .tool_configs import BaseToolConfig +from .tool_configs import ToolArgsConfig class LangchainTool(FunctionTool): diff --git a/src/google/adk/tools/tool_configs.py b/src/google/adk/tools/tool_configs.py new file mode 100644 index 000000000..a1b82077a --- /dev/null +++ b/src/google/adk/tools/tool_configs.py @@ -0,0 +1,128 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel +from pydantic import ConfigDict + +from ..utils.feature_decorator import working_in_progress + + +@working_in_progress("BaseToolConfig is not ready for use.") +class BaseToolConfig(BaseModel): + """The base class for all tool configs.""" + + model_config = ConfigDict(extra="forbid") + """Forbid extra fields.""" + + +@working_in_progress("ToolArgsConfig is not ready for use.") +class ToolArgsConfig(BaseModel): + """Config to host free key-value pairs for the args in ToolConfig.""" + + model_config = ConfigDict(extra="allow") + + +@working_in_progress("ToolConfig is not ready for use.") +class ToolConfig(BaseModel): + """The configuration for a tool. + + The config supports these types of tools: + 1. ADK built-in tools + 2. User-defined tool instances + 3. User-defined tool classes + 4. User-defined functions that generate tool instances + 5. User-defined function tools + + For examples: + + 1. For ADK built-in tool instances or classes in `google.adk.tools` package, + they can be referenced directly with the `name` and optionally with + `args`. + + ``` + tools: + - name: google_search + - name: AgentTool + args: + agent: ./another_agent.yaml + skip_summarization: true + ``` + + 2. For user-defined tool instances, the `name` is the fully qualified path + to the tool instance. + + ``` + tools: + - name: my_package.my_module.my_tool + ``` + + 3. For user-defined tool classes (custom tools), the `name` is the fully + qualified path to the tool class and `args` is the arguments for the tool. + + ``` + tools: + - name: my_package.my_module.my_tool_class + args: + my_tool_arg1: value1 + my_tool_arg2: value2 + ``` + + 4. For user-defined functions that generate tool instances, the `name` is + the fully qualified path to the function and `args` is passed to the + function as arguments. + + ``` + tools: + - name: my_package.my_module.my_tool_function + args: + my_function_arg1: value1 + my_function_arg2: value2 + ``` + + The function must have the following signature: + ``` + def my_function(args: ToolArgsConfig) -> BaseTool: + ... + ``` + + 5. For user-defined function tools, the `name` is the fully qualified path + to the function. + + ``` + tools: + - name: my_package.my_module.my_function_tool + ``` + + If the above use cases don't suffice, users can define a custom tool config + by extending BaseToolConfig and override from_config() in the custom tool. + """ + + model_config = ConfigDict(extra="forbid") + + name: str + """The name of the tool. + + For ADK built-in tools, `name` is the name of the tool, e.g. `google_search` + or `AgentTool`. + + For user-defined tools, the name is the fully qualified path to the tool, e.g. + `my_package.my_module.my_tool`. + """ + + args: Optional[ToolArgsConfig] = None + """The args for the tool.""" diff --git a/tests/unittests/tools/test_tool_config.py b/tests/unittests/tools/test_tool_config.py index b12bbe5fc..fefa50603 100644 --- a/tests/unittests/tools/test_tool_config.py +++ b/tests/unittests/tools/test_tool_config.py @@ -13,7 +13,7 @@ # limitations under the License. from google.adk.tools import VertexAiSearchTool -from google.adk.tools.base_tool import ToolConfig +from google.adk.tools.tool_configs import ToolConfig from google.genai import types import yaml From 6277dae7494f7e03e735af836b099333c44e949d Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Wed, 6 Aug 2025 17:48:33 -0700 Subject: [PATCH 34/41] chore: add a disclaimer for the response from the answering agent PiperOrigin-RevId: 791915465 --- contributing/samples/adk_answering_agent/agent.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/contributing/samples/adk_answering_agent/agent.py b/contributing/samples/adk_answering_agent/agent.py index 41f9aa807..d2e21668d 100644 --- a/contributing/samples/adk_answering_agent/agent.py +++ b/contributing/samples/adk_answering_agent/agent.py @@ -137,6 +137,12 @@ def add_comment_to_discussion( } } """ + comment_body = ( + "**Response from ADK Answering Agent (experimental, answer may be" + " inaccurate)**\n\n" + + comment_body + ) + variables = {"discussionId": discussion_id, "body": comment_body} try: response = run_graphql_query(query, variables) @@ -249,9 +255,6 @@ def add_label_to_discussion( information that is not in the document store. Do not invent citations which are not in the document store. * **Be Objective**: your answer should be based on the facts you found in the document store, do not be misled by user's assumptions or user's understanding of ADK. * If you can't find the answer or information in the document store, **do not** respond. - * Include a bolded note (e.g. "Response from ADK Answering Agent") in your comment - to indicate this comment was added by an ADK Answering Agent. - * Have an empty line between the note and the rest of your response. * Inlclude a short summary of your response in the comment as a TLDR, e.g. "**TLDR**: ". * Have a divider line between the TLDR and your detail response. * Do not respond to any other discussion except the one specified by the user. From dc193f7969227f0968842eaf8664a7b1d5687554 Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Wed, 6 Aug 2025 18:09:14 -0700 Subject: [PATCH 35/41] fix(config): fix `adk create --type=config` Previously click didn't convert the input into the enum type. PiperOrigin-RevId: 791922529 --- src/google/adk/cli/cli_create.py | 23 +++++++------------- src/google/adk/cli/cli_tools_click.py | 6 ++--- tests/unittests/cli/utils/test_cli_create.py | 15 +++++++++---- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/google/adk/cli/cli_create.py b/src/google/adk/cli/cli_create.py index dcaff53ea..fb6f19b6e 100644 --- a/src/google/adk/cli/cli_create.py +++ b/src/google/adk/cli/cli_create.py @@ -14,7 +14,6 @@ from __future__ import annotations -import enum import os import subprocess from typing import Optional @@ -22,12 +21,6 @@ import click - -class Type(enum.Enum): - CONFIG = "config" - CODE = "code" - - _INIT_PY_TEMPLATE = """\ from . import agent """ @@ -179,7 +172,7 @@ def _generate_files( google_cloud_project: Optional[str] = None, google_cloud_region: Optional[str] = None, model: Optional[str] = None, - type: Optional[Type] = None, + type: str, ): """Generates a folder name for the agent.""" os.makedirs(agent_folder, exist_ok=True) @@ -203,7 +196,7 @@ def _generate_files( lines.append(f"GOOGLE_CLOUD_LOCATION={google_cloud_region}") f.write("\n".join(lines)) - if type == Type.CONFIG: + if type == "config": with open(agent_config_file_path, "w", encoding="utf-8") as f: f.write(_AGENT_CONFIG_TEMPLATE.format(model_name=model)) with open(init_file_path, "w", encoding="utf-8") as f: @@ -263,7 +256,7 @@ def _prompt_to_choose_backend( return google_api_key, google_cloud_project, google_cloud_region -def _prompt_to_choose_type() -> Type: +def _prompt_to_choose_type() -> str: """Prompts user to choose type of agent to create.""" type_choice = click.prompt( """\ @@ -274,9 +267,9 @@ def _prompt_to_choose_type() -> Type: type=click.Choice(["1", "2"]), ) if type_choice == "1": - return Type.CONFIG + return "CONFIG" else: - return Type.CODE + return "CODE" def run_cmd( @@ -286,7 +279,7 @@ def run_cmd( google_api_key: Optional[str], google_cloud_project: Optional[str], google_cloud_region: Optional[str], - type: Optional[Type], + type: Optional[str], ): """Runs `adk create` command to create agent template. @@ -298,7 +291,7 @@ def run_cmd( VertexAI as backend. google_cloud_region: Optional[str], The Google Cloud region for using VertexAI as backend. - type: Optional[Type], Whether to define agent with config file or code. + type: Optional[str], Whether to define agent with config file or code. """ agent_folder = os.path.join(os.getcwd(), agent_name) # check folder doesn't exist or it's empty. Otherwise, throw @@ -331,5 +324,5 @@ def run_cmd( google_cloud_project=google_cloud_project, google_cloud_region=google_cloud_region, model=model, - type=type, + type=type.lower(), ) diff --git a/src/google/adk/cli/cli_tools_click.py b/src/google/adk/cli/cli_tools_click.py index aae633d82..13a0a620a 100644 --- a/src/google/adk/cli/cli_tools_click.py +++ b/src/google/adk/cli/cli_tools_click.py @@ -145,13 +145,13 @@ def deploy(): ) @click.option( "--type", - type=click.Choice([t.value for t in cli_create.Type]), + type=click.Choice(["CODE", "CONFIG"], case_sensitive=False), help=( "EXPERIMENTAL Optional. Type of agent to create: 'config' or 'code'." " 'config' is not ready for use so it defaults to 'code'. It may change" " later once 'config' is ready for use." ), - default=cli_create.Type.CODE.value, + default="CODE", show_default=True, hidden=True, # Won't show in --help output. Not ready for use. ) @@ -162,7 +162,7 @@ def cli_create_cmd( api_key: Optional[str], project: Optional[str], region: Optional[str], - type: Optional[cli_create.Type], + type: Optional[str], ): """Creates a new app in the current folder with prepopulated agent template. diff --git a/tests/unittests/cli/utils/test_cli_create.py b/tests/unittests/cli/utils/test_cli_create.py index 72ecdf957..14351a812 100644 --- a/tests/unittests/cli/utils/test_cli_create.py +++ b/tests/unittests/cli/utils/test_cli_create.py @@ -62,6 +62,7 @@ def test_generate_files_with_api_key(agent_folder: Path) -> None: str(agent_folder), google_api_key="dummy-key", model="gemini-2.0-flash-001", + type="code", ) env_content = (agent_folder / ".env").read_text() @@ -78,6 +79,7 @@ def test_generate_files_with_gcp(agent_folder: Path) -> None: google_cloud_project="proj", google_cloud_region="us-central1", model="gemini-2.0-flash-001", + type="code", ) env_content = (agent_folder / ".env").read_text() @@ -95,6 +97,7 @@ def test_generate_files_overwrite(agent_folder: Path) -> None: str(agent_folder), google_api_key="new-key", model="gemini-2.0-flash-001", + type="code", ) assert "GOOGLE_API_KEY=new-key" in (agent_folder / ".env").read_text() @@ -108,12 +111,16 @@ def test_generate_files_permission_error( os, "makedirs", lambda *a, **k: (_ for _ in ()).throw(PermissionError()) ) with pytest.raises(PermissionError): - cli_create._generate_files(str(agent_folder), model="gemini-2.0-flash-001") + cli_create._generate_files( + str(agent_folder), model="gemini-2.0-flash-001", type="code" + ) def test_generate_files_no_params(agent_folder: Path) -> None: """No backend parameters → minimal .env file is generated.""" - cli_create._generate_files(str(agent_folder), model="gemini-2.0-flash-001") + cli_create._generate_files( + str(agent_folder), model="gemini-2.0-flash-001", type="code" + ) env_content = (agent_folder / ".env").read_text() for key in ( @@ -147,7 +154,7 @@ def test_run_cmd_overwrite_reject( google_api_key=None, google_cloud_project=None, google_cloud_region=None, - type=cli_create.Type.CODE, + type="code", ) @@ -166,7 +173,7 @@ def test_run_cmd_with_type_config( google_api_key="test-key", google_cloud_project=None, google_cloud_region=None, - type=cli_create.Type.CONFIG, + type="config", ) agent_dir = tmp_path / agent_name From d9ce2e691c2d77d2bb8977c3bc1359a3fdeca3af Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Wed, 6 Aug 2025 20:45:22 -0700 Subject: [PATCH 36/41] feat(config): implement config and from_config for MCPToolset The connection_params argument in the constructor is split into four arguments in the config class because some of them have identical fields. In order to identify which is which, a separate name is more convenient. PiperOrigin-RevId: 791965995 --- src/google/adk/tools/__init__.py | 17 ++++- src/google/adk/tools/mcp_tool/mcp_toolset.py | 69 ++++++++++++++++++++ 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/src/google/adk/tools/__init__.py b/src/google/adk/tools/__init__.py index 1b749bb1f..bb26d4941 100644 --- a/src/google/adk/tools/__init__.py +++ b/src/google/adk/tools/__init__.py @@ -11,7 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import logging +import sys from ..auth.auth_tool import AuthToolArguments from .agent_tool import AgentTool @@ -52,3 +53,17 @@ 'ToolContext', 'transfer_to_agent', ] + + +if sys.version_info < (3, 10): + logger = logging.getLogger('google_adk.' + __name__) + logger.warning( + 'MCP requires Python 3.10 or above. Please upgrade your Python' + ' version in order to use it.' + ) +else: + from .mcp_tool.mcp_toolset import MCPToolset + + __all__.extend([ + 'MCPToolset', + ]) diff --git a/src/google/adk/tools/mcp_tool/mcp_toolset.py b/src/google/adk/tools/mcp_tool/mcp_toolset.py index 2fc9d640a..92cc3e970 100644 --- a/src/google/adk/tools/mcp_tool/mcp_toolset.py +++ b/src/google/adk/tools/mcp_tool/mcp_toolset.py @@ -21,12 +21,17 @@ from typing import TextIO from typing import Union +from pydantic import model_validator +from typing_extensions import override + from ...agents.readonly_context import ReadonlyContext from ...auth.auth_credential import AuthCredential from ...auth.auth_schemes import AuthScheme from ..base_tool import BaseTool from ..base_toolset import BaseToolset from ..base_toolset import ToolPredicate +from ..tool_configs import BaseToolConfig +from ..tool_configs import ToolArgsConfig from .mcp_session_manager import MCPSessionManager from .mcp_session_manager import retry_on_closed_resource from .mcp_session_manager import SseConnectionParams @@ -178,3 +183,67 @@ async def close(self) -> None: except Exception as e: # Log the error but don't re-raise to avoid blocking shutdown print(f"Warning: Error during MCPToolset cleanup: {e}", file=self._errlog) + + @override + @classmethod + def from_config( + cls: type[MCPToolset], config: ToolArgsConfig, config_abs_path: str + ) -> MCPToolset: + """Creates an MCPToolset from a configuration object.""" + mcp_toolset_config = MCPToolsetConfig.model_validate(config.model_dump()) + + if mcp_toolset_config.stdio_server_params: + connection_params = mcp_toolset_config.stdio_server_params + elif mcp_toolset_config.stdio_connection_params: + connection_params = mcp_toolset_config.stdio_connection_params + elif mcp_toolset_config.sse_connection_params: + connection_params = mcp_toolset_config.sse_connection_params + elif mcp_toolset_config.streamable_http_connection_params: + connection_params = mcp_toolset_config.streamable_http_connection_params + else: + raise ValueError("No connection params found in MCPToolsetConfig.") + + return cls( + connection_params=connection_params, + tool_filter=mcp_toolset_config.tool_filter, + auth_scheme=mcp_toolset_config.auth_scheme, + auth_credential=mcp_toolset_config.auth_credential, + ) + + +class MCPToolsetConfig(BaseToolConfig): + """The config for MCPToolset.""" + + stdio_server_params: Optional[StdioServerParameters] = None + + stdio_connection_params: Optional[StdioConnectionParams] = None + + sse_connection_params: Optional[SseConnectionParams] = None + + streamable_http_connection_params: Optional[ + StreamableHTTPConnectionParams + ] = None + + tool_filter: Optional[List[str]] = None + + auth_scheme: Optional[AuthScheme] = None + + auth_credential: Optional[AuthCredential] = None + + @model_validator(mode="after") + def _check_only_one_params_field(self): + param_fields = [ + self.stdio_server_params, + self.stdio_connection_params, + self.sse_connection_params, + self.streamable_http_connection_params, + ] + populated_fields = [f for f in param_fields if f is not None] + + if len(populated_fields) != 1: + raise ValueError( + "Exactly one of stdio_server_params, stdio_connection_params," + " sse_connection_params, streamable_http_connection_params must be" + " set." + ) + return self From e0a8355219f7e2f98d177a0035b0b5708ede22bf Mon Sep 17 00:00:00 2001 From: Liang Wu Date: Wed, 6 Aug 2025 21:49:40 -0700 Subject: [PATCH 37/41] chore(config): add the public URL of JSON schema file to template PiperOrigin-RevId: 791983175 --- src/google/adk/cli/cli_create.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/google/adk/cli/cli_create.py b/src/google/adk/cli/cli_create.py index fb6f19b6e..9085586e1 100644 --- a/src/google/adk/cli/cli_create.py +++ b/src/google/adk/cli/cli_create.py @@ -37,6 +37,7 @@ """ _AGENT_CONFIG_TEMPLATE = """\ +# yaml-language-server: $schema=https://raw.githubusercontent.com/google/adk-python/refs/heads/main/src/google/adk/agents/config_schemas/AgentConfig.json name: root_agent description: A helpful assistant for user questions. instruction: Answer user questions to the best of your knowledge From f6a022cda3d774cdb5cf763184c52f0b3a20f01f Mon Sep 17 00:00:00 2001 From: Xuan Yang Date: Thu, 7 Aug 2025 00:18:53 -0700 Subject: [PATCH 38/41] chore: only run ADK Answering Agent automatically when Q&A discussion is created PiperOrigin-RevId: 792025646 --- .github/workflows/discussion_answering.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/discussion_answering.yml b/.github/workflows/discussion_answering.yml index aaaa614dc..f942cd09a 100644 --- a/.github/workflows/discussion_answering.yml +++ b/.github/workflows/discussion_answering.yml @@ -3,11 +3,10 @@ name: ADK Answering Agent for Discussions on: discussion: types: [created] - discussion_comment: - types: [created] jobs: agent-answer-questions: + if: github.event.discussion.category.name == 'Q&A' runs-on: ubuntu-latest steps: From 25b2806301bb03c6af5c56cde3116783403e8cc6 Mon Sep 17 00:00:00 2001 From: Google Team Member Date: Thu, 7 Aug 2025 00:30:11 -0700 Subject: [PATCH 39/41] fix: accommodate for open api schema that do not have any 'properties' PiperOrigin-RevId: 792028582 --- src/google/adk/tools/_gemini_schema_util.py | 2 +- tests/unittests/tools/test_gemini_schema_util.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/google/adk/tools/_gemini_schema_util.py b/src/google/adk/tools/_gemini_schema_util.py index 020e38fce..b7418da00 100644 --- a/src/google/adk/tools/_gemini_schema_util.py +++ b/src/google/adk/tools/_gemini_schema_util.py @@ -120,7 +120,7 @@ def _sanitize_schema_formats_for_gemini( snake_case_schema[field_name] = [ _sanitize_schema_formats_for_gemini(value) for value in field_value ] - elif field_name in dict_schema_field_names: + elif field_name in dict_schema_field_names and field_value is not None: snake_case_schema[field_name] = { key: _sanitize_schema_formats_for_gemini(value) for key, value in field_value.items() diff --git a/tests/unittests/tools/test_gemini_schema_util.py b/tests/unittests/tools/test_gemini_schema_util.py index 71143debc..31057a41a 100644 --- a/tests/unittests/tools/test_gemini_schema_util.py +++ b/tests/unittests/tools/test_gemini_schema_util.py @@ -511,6 +511,14 @@ def test_sanitize_schema_formats_for_gemini_nullable(self): "type": "object", } + def test_to_gemini_schema_properties_is_none(self): + """Tests schema conversion when 'properties' field is None.""" + openapi_schema = {"type": "object", "properties": None} + gemini_schema = _to_gemini_schema(openapi_schema) + assert isinstance(gemini_schema, Schema) + assert gemini_schema.type == Type.OBJECT + assert gemini_schema.properties is None + class TestToSnakeCase: From 83e5df78620789a0db745ee8279a522fa6cb18b9 Mon Sep 17 00:00:00 2001 From: Yeesian Ng Date: Thu, 7 Aug 2025 08:26:40 -0700 Subject: [PATCH 40/41] fix: Set the `agent_framework` when initializing module-based agent engine PiperOrigin-RevId: 792170095 --- src/google/adk/cli/cli_deploy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/google/adk/cli/cli_deploy.py b/src/google/adk/cli/cli_deploy.py index 5dc730e71..e3b4661e9 100644 --- a/src/google/adk/cli/cli_deploy.py +++ b/src/google/adk/cli/cli_deploy.py @@ -433,6 +433,7 @@ def to_agent_engine( 'stream': ['stream_query', 'streaming_agent_run_with_events'], }, sys_paths=[temp_folder[1:]], + agent_framework='google-adk', ) agent_config = dict( agent_engine=agent_engine, From 20c30d5819acca4a026f54cc709acc3348dcb5f3 Mon Sep 17 00:00:00 2001 From: Shangjie Chen Date: Thu, 7 Aug 2025 10:04:25 -0700 Subject: [PATCH 41/41] chore: Bump version number to 1.10.0 PiperOrigin-RevId: 792206610 --- CHANGELOG.md | 35 +++++++++++++++++++++++++++++++++++ src/google/adk/version.py | 2 +- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b5afb99f..133fc7611 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # Changelog +## [1.10.0](https://github.com/google/adk-python/compare/v1.9.0...v1.10.0) (2025-08-07) + +### Features + +* [Live] Implement Live Session Resumption ([71fbc92](https://github.com/google/adk-python/commit/71fbc9275b3d74700ec410cb4155ba0cb18580b7)) +* [Tool] Support parallel execution of parallel function calls ([57cd41f](https://github.com/google/adk-python/commit/57cd41f424b469fb834bb8f2777b5f7be9aa6cdf)) +* [Models] Allow max tokens to be customizable in Claude ([7556ebc](https://github.com/google/adk-python/commit/7556ebc76abd3c776922c2803aed831661cf7f82)) +* [Tool] Create enterprise_web_search_tool as a tool instance ([0e28d64](https://github.com/google/adk-python/commit/0e28d64712e481cfd3b964be0166f529657024f6)) + +### Bug Fixes + +* Fix shared default plugin manager and cost manager instances among multiple invocations ([423542a](https://github.com/google/adk-python/commit/423542a43fb8316195e9f79d97f87593751bebd3)) +* Correct the type annotation in anthropic_llm implementation ([97318bc](https://github.com/google/adk-python/commit/97318bcd199acdacadfe8664da3fbfc3c806cdd2)) +* Fix adk deploy cloud_run cli, which was broken in v1.9.0 ([e41dbcc](https://github.com/google/adk-python/commit/e41dbccf7f610e249108f9321f60f71fe2cc10f4)) +* Remove thoughts from contents in llm requests from history contents ([d620bcb](https://github.com/google/adk-python/commit/d620bcb384d3068228ea2059fb70274e68e69682)) +* Annotate response type as None for transfer_to_agent tool ([86a4487](https://github.com/google/adk-python/commit/86a44873e9b2dfc7e62fa31a9ac3be57c0bbff7b)) +* Fix incompatible a2a sdk changes ([faadef1](https://github.com/google/adk-python/commit/faadef167ee8e4dd1faf4da5685a577c3155556e)) +* Fix adk cli options and method parameters mismatching ([8ef2177](https://github.com/google/adk-python/commit/8ef2177658fbfc74b1a74b0c3ea8150bae866796)) + +### Improvements + +* Add Github workflow config for the ADK Answering agent ([8dc0c94](https://github.com/google/adk-python/commit/8dc0c949afb9024738ff7ac1b2c19282175c3200)) +* Import AGENT_CARD_WELL_KNOWN_PATH from adk instead of from a2a directly ([37dae9b](https://github.com/google/adk-python/commit/37dae9b631db5060770b66fce0e25cf0ffb56948)) +* Make `LlmRequest.LiveConnectConfig` field default to a factory ([74589a1](https://github.com/google/adk-python/commit/74589a1db7df65e319d1ad2f0676ee0cf5d6ec1d)) +* Update the prompt to make the ADK Answering Agent more objective ([2833030](https://github.com/google/adk-python/commit/283303032a174d51b8d72f14df83c794d66cb605)) +* Add sample agent for testing parallel functions execution ([90b9193](https://github.com/google/adk-python/commit/90b9193a20499b8dd7f57d119cda4c534fcfda10)) +* Hide the ask_data_insights tool until the API is publicly available ([bead607](https://github.com/google/adk-python/commit/bead607364be7ac8109357c9d3076d9b345e9e8a)) +* Change `LlmRequest.config`'s default value to be `types.GenerateContentConfig()` ([041f04e](https://github.com/google/adk-python/commit/041f04e89cee30532facccce4900d10f1b8c69ce)) +* Prevent triggering of _load_from_yaml_config in AgentLoader ([db975df](https://github.com/google/adk-python/commit/db975dfe2a09a6d056d02bc03c1247ac10f6da7d)) + +### Documentation + +* Fix typos ([16a15c8](https://github.com/google/adk-python/commit/16a15c8709b47c9bebe7cffe888e8e7e48ec605a)) + + ## [1.9.0](https://github.com/google/adk-python/compare/v1.8.0...v1.9.0) (2025-07-31) diff --git a/src/google/adk/version.py b/src/google/adk/version.py index 3354d73d1..6e3a62e95 100644 --- a/src/google/adk/version.py +++ b/src/google/adk/version.py @@ -13,4 +13,4 @@ # limitations under the License. # version: major.minor.patch -__version__ = "1.9.0" +__version__ = "1.10.0"