chore: reformat the codes using autoformat.sh

PiperOrigin-RevId: 762004002
This commit is contained in:
Xiang (Sean) Zhou 2025-05-22 09:43:03 -07:00 committed by Copybara-Service
parent a2263b1808
commit ff8a3c9b43
23 changed files with 496 additions and 447 deletions

View File

@ -21,6 +21,7 @@ from typing import List
from typing import Optional from typing import Optional
from typing import Union from typing import Union
from unittest import mock from unittest import mock
from google.adk.agents.base_agent import BaseAgent from google.adk.agents.base_agent import BaseAgent
from google.adk.agents.callback_context import CallbackContext from google.adk.agents.callback_context import CallbackContext
from google.adk.agents.invocation_context import InvocationContext from google.adk.agents.invocation_context import InvocationContext
@ -30,6 +31,7 @@ from google.genai import types
import pytest import pytest
import pytest_mock import pytest_mock
from typing_extensions import override from typing_extensions import override
from .. import testing_utils from .. import testing_utils

View File

@ -1,7 +1,11 @@
import pytest from unittest.mock import AsyncMock
from unittest.mock import MagicMock, AsyncMock, patch from unittest.mock import MagicMock
from google.adk.agents.live_request_queue import LiveRequest, LiveRequestQueue from unittest.mock import patch
from google.adk.agents.live_request_queue import LiveRequest
from google.adk.agents.live_request_queue import LiveRequestQueue
from google.genai import types from google.genai import types
import pytest
@pytest.mark.asyncio @pytest.mark.asyncio

View File

@ -15,7 +15,8 @@
"""Unit tests for canonical_xxx fields in LlmAgent.""" """Unit tests for canonical_xxx fields in LlmAgent."""
from typing import Any from typing import Any
from typing import Optional, cast from typing import cast
from typing import Optional
from google.adk.agents.callback_context import CallbackContext from google.adk.agents.callback_context import CallbackContext
from google.adk.agents.invocation_context import InvocationContext from google.adk.agents.invocation_context import InvocationContext
@ -146,6 +147,7 @@ async def test_canonical_global_instruction():
assert canonical_global_instruction == 'global instruction: state_value' assert canonical_global_instruction == 'global instruction: state_value'
assert bypass_state_injection assert bypass_state_injection
async def test_async_canonical_global_instruction(): async def test_async_canonical_global_instruction():
async def _global_instruction_provider(ctx: ReadonlyContext) -> str: async def _global_instruction_provider(ctx: ReadonlyContext) -> str:
return f'global instruction: {ctx.state["state_var"]}' return f'global instruction: {ctx.state["state_var"]}'

View File

@ -1,7 +1,8 @@
import pytest
from unittest.mock import MagicMock
from types import MappingProxyType from types import MappingProxyType
from unittest.mock import MagicMock
from google.adk.agents.readonly_context import ReadonlyContext from google.adk.agents.readonly_context import ReadonlyContext
import pytest
@pytest.fixture @pytest.fixture

View File

@ -1,8 +1,10 @@
import pytest
import sys
import logging import logging
from unittest.mock import patch, ANY import sys
from unittest.mock import ANY
from unittest.mock import patch
from google.adk.agents.run_config import RunConfig from google.adk.agents.run_config import RunConfig
import pytest
def test_validate_max_llm_calls_valid(): def test_validate_max_llm_calls_valid():

View File

@ -17,12 +17,11 @@
import enum import enum
from typing import Optional from typing import Optional
from typing import Union from typing import Union
from unittest import mock
from google.adk.artifacts import GcsArtifactService from google.adk.artifacts import GcsArtifactService
from google.adk.artifacts import InMemoryArtifactService from google.adk.artifacts import InMemoryArtifactService
from google.genai import types from google.genai import types
from unittest import mock
import pytest import pytest
Enum = enum.Enum Enum = enum.Enum

View File

@ -15,19 +15,18 @@
import copy import copy
from unittest.mock import patch from unittest.mock import patch
import pytest
from fastapi.openapi.models import APIKey from fastapi.openapi.models import APIKey
from fastapi.openapi.models import APIKeyIn from fastapi.openapi.models import APIKeyIn
from fastapi.openapi.models import OAuth2 from fastapi.openapi.models import OAuth2
from fastapi.openapi.models import OAuthFlowAuthorizationCode from fastapi.openapi.models import OAuthFlowAuthorizationCode
from fastapi.openapi.models import OAuthFlows from fastapi.openapi.models import OAuthFlows
from google.adk.auth.auth_credential import AuthCredential from google.adk.auth.auth_credential import AuthCredential
from google.adk.auth.auth_credential import AuthCredentialTypes from google.adk.auth.auth_credential import AuthCredentialTypes
from google.adk.auth.auth_credential import OAuth2Auth from google.adk.auth.auth_credential import OAuth2Auth
from google.adk.auth.auth_handler import AuthHandler from google.adk.auth.auth_handler import AuthHandler
from google.adk.auth.auth_schemes import OpenIdConnectWithConfig from google.adk.auth.auth_schemes import OpenIdConnectWithConfig
from google.adk.auth.auth_tool import AuthConfig from google.adk.auth.auth_tool import AuthConfig
import pytest
# Mock classes for testing # Mock classes for testing

View File

@ -16,16 +16,19 @@
from __future__ import annotations from __future__ import annotations
import click
import json import json
import pytest from pathlib import Path
import sys import sys
import types import types
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
import click
import google.adk.cli.cli as cli import google.adk.cli.cli as cli
import pytest
from pathlib import Path
from typing import Any, Dict, List, Tuple
# Helpers # Helpers
class _Recorder: class _Recorder:
@ -52,10 +55,12 @@ def _patch_types_and_runner(monkeypatch: pytest.MonkeyPatch) -> None:
# Dummy Part / Content # Dummy Part / Content
class _Part: class _Part:
def __init__(self, text: str | None = "") -> None: def __init__(self, text: str | None = "") -> None:
self.text = text self.text = text
class _Content: class _Content:
def __init__(self, role: str, parts: List[_Part]) -> None: def __init__(self, role: str, parts: List[_Part]) -> None:
self.role = role self.role = role
self.parts = parts self.parts = parts
@ -65,7 +70,9 @@ def _patch_types_and_runner(monkeypatch: pytest.MonkeyPatch) -> None:
# Fake Runner yielding a single assistant echo # Fake Runner yielding a single assistant echo
class _FakeRunner: class _FakeRunner:
def __init__(self, *a: Any, **k: Any) -> None: ...
def __init__(self, *a: Any, **k: Any) -> None:
...
async def run_async(self, *a: Any, **k: Any): async def run_async(self, *a: Any, **k: Any):
message = a[2] if len(a) >= 3 else k["new_message"] message = a[2] if len(a) >= 3 else k["new_message"]
@ -110,7 +117,9 @@ def fake_agent(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
# _run_input_file # _run_input_file
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_run_input_file_outputs(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: async def test_run_input_file_outputs(
tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
"""run_input_file should echo user & assistant messages and return a populated session.""" """run_input_file should echo user & assistant messages and return a populated session."""
recorder: List[str] = [] recorder: List[str] = []
@ -164,7 +173,9 @@ async def test_run_cli_with_input_file(fake_agent, tmp_path: Path) -> None:
# _run_cli (interactive + save session branch) # _run_cli (interactive + save session branch)
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_run_cli_save_session(fake_agent, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: async def test_run_cli_save_session(
fake_agent, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
"""run_cli should save a session file when save_session=True.""" """run_cli should save a session file when save_session=True."""
parent_dir, folder_name = fake_agent parent_dir, folder_name = fake_agent
@ -191,7 +202,9 @@ async def test_run_cli_save_session(fake_agent, tmp_path: Path, monkeypatch: pyt
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_run_interactively_whitespace_and_exit(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: async def test_run_interactively_whitespace_and_exit(
tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
"""run_interactively should skip blank input, echo once, then exit.""" """run_interactively should skip blank input, echo once, then exit."""
# make a session that belongs to dummy agent # make a session that belongs to dummy agent
svc = cli.InMemorySessionService() svc = cli.InMemorySessionService()

View File

@ -17,15 +17,18 @@
from __future__ import annotations from __future__ import annotations
import click
import os import os
import pytest
import subprocess
import google.adk.cli.cli_create as cli_create
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Tuple import subprocess
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
import click
import google.adk.cli.cli_create as cli_create
import pytest
# Helpers # Helpers
class _Recorder: class _Recorder:
@ -97,9 +100,13 @@ def test_generate_files_overwrite(agent_folder: Path) -> None:
assert "GOOGLE_API_KEY=new-key" in (agent_folder / ".env").read_text() assert "GOOGLE_API_KEY=new-key" in (agent_folder / ".env").read_text()
def test_generate_files_permission_error(monkeypatch: pytest.MonkeyPatch, agent_folder: Path) -> None: def test_generate_files_permission_error(
monkeypatch: pytest.MonkeyPatch, agent_folder: Path
) -> None:
"""PermissionError raised by os.makedirs should propagate.""" """PermissionError raised by os.makedirs should propagate."""
monkeypatch.setattr(os, "makedirs", lambda *a, **k: (_ for _ in ()).throw(PermissionError())) monkeypatch.setattr(
os, "makedirs", lambda *a, **k: (_ for _ in ()).throw(PermissionError())
)
with pytest.raises(PermissionError): with pytest.raises(PermissionError):
cli_create._generate_files(str(agent_folder), model="gemini-2.0-flash-001") cli_create._generate_files(str(agent_folder), model="gemini-2.0-flash-001")
@ -109,12 +116,19 @@ def test_generate_files_no_params(agent_folder: Path) -> None:
cli_create._generate_files(str(agent_folder), model="gemini-2.0-flash-001") cli_create._generate_files(str(agent_folder), model="gemini-2.0-flash-001")
env_content = (agent_folder / ".env").read_text() env_content = (agent_folder / ".env").read_text()
for key in ("GOOGLE_API_KEY", "GOOGLE_CLOUD_PROJECT", "GOOGLE_CLOUD_LOCATION", "GOOGLE_GENAI_USE_VERTEXAI"): for key in (
"GOOGLE_API_KEY",
"GOOGLE_CLOUD_PROJECT",
"GOOGLE_CLOUD_LOCATION",
"GOOGLE_GENAI_USE_VERTEXAI",
):
assert key not in env_content assert key not in env_content
# run_cmd # run_cmd
def test_run_cmd_overwrite_reject(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: def test_run_cmd_overwrite_reject(
monkeypatch: pytest.MonkeyPatch, tmp_path: Path
) -> None:
"""User rejecting overwrite should trigger click.Abort.""" """User rejecting overwrite should trigger click.Abort."""
agent_name = "agent" agent_name = "agent"
agent_dir = tmp_path / agent_name agent_dir = tmp_path / agent_name
@ -143,7 +157,9 @@ def test_prompt_for_google_cloud(monkeypatch: pytest.MonkeyPatch) -> None:
assert cli_create._prompt_for_google_cloud(None) == "test-proj" assert cli_create._prompt_for_google_cloud(None) == "test-proj"
def test_prompt_for_google_cloud_region(monkeypatch: pytest.MonkeyPatch) -> None: def test_prompt_for_google_cloud_region(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Prompt should return the region input.""" """Prompt should return the region input."""
monkeypatch.setattr(click, "prompt", lambda *a, **k: "asia-northeast1") monkeypatch.setattr(click, "prompt", lambda *a, **k: "asia-northeast1")
assert cli_create._prompt_for_google_cloud_region(None) == "asia-northeast1" assert cli_create._prompt_for_google_cloud_region(None) == "asia-northeast1"
@ -175,23 +191,28 @@ def test_prompt_for_model_other(monkeypatch: pytest.MonkeyPatch) -> None:
assert called.get("secho") is True assert called.get("secho") is True
# Backend selection helper # Backend selection helper
def test_prompt_to_choose_backend_api(monkeypatch: pytest.MonkeyPatch) -> None: def test_prompt_to_choose_backend_api(monkeypatch: pytest.MonkeyPatch) -> None:
"""Choosing API-key backend returns (api_key, None, None).""" """Choosing API-key backend returns (api_key, None, None)."""
monkeypatch.setattr(click, "prompt", lambda *a, **k: "1") monkeypatch.setattr(click, "prompt", lambda *a, **k: "1")
monkeypatch.setattr(cli_create, "_prompt_for_google_api_key", lambda _v: "api-key") monkeypatch.setattr(
cli_create, "_prompt_for_google_api_key", lambda _v: "api-key"
)
api_key, proj, region = cli_create._prompt_to_choose_backend(None, None, None) api_key, proj, region = cli_create._prompt_to_choose_backend(None, None, None)
assert api_key == "api-key" assert api_key == "api-key"
assert proj is None and region is None assert proj is None and region is None
def test_prompt_to_choose_backend_vertex(monkeypatch: pytest.MonkeyPatch) -> None: def test_prompt_to_choose_backend_vertex(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Choosing Vertex backend returns (None, project, region).""" """Choosing Vertex backend returns (None, project, region)."""
monkeypatch.setattr(click, "prompt", lambda *a, **k: "2") monkeypatch.setattr(click, "prompt", lambda *a, **k: "2")
monkeypatch.setattr(cli_create, "_prompt_for_google_cloud", lambda _v: "proj") monkeypatch.setattr(cli_create, "_prompt_for_google_cloud", lambda _v: "proj")
monkeypatch.setattr(cli_create, "_prompt_for_google_cloud_region", lambda _v: "region") monkeypatch.setattr(
cli_create, "_prompt_for_google_cloud_region", lambda _v: "region"
)
api_key, proj, region = cli_create._prompt_to_choose_backend(None, None, None) api_key, proj, region = cli_create._prompt_to_choose_backend(None, None, None)
assert api_key is None assert api_key is None
@ -199,7 +220,6 @@ def test_prompt_to_choose_backend_vertex(monkeypatch: pytest.MonkeyPatch) -> Non
assert region == "region" assert region == "region"
# prompt_str # prompt_str
def test_prompt_str_non_empty(monkeypatch: pytest.MonkeyPatch) -> None: def test_prompt_str_non_empty(monkeypatch: pytest.MonkeyPatch) -> None:
"""_prompt_str should retry until a non-blank string is provided.""" """_prompt_str should retry until a non-blank string is provided."""
@ -208,9 +228,10 @@ def test_prompt_str_non_empty(monkeypatch: pytest.MonkeyPatch) -> None:
assert cli_create._prompt_str("dummy") == "valid" assert cli_create._prompt_str("dummy") == "valid"
# gcloud fallback helpers # gcloud fallback helpers
def test_get_gcp_project_from_gcloud_fail(monkeypatch: pytest.MonkeyPatch) -> None: def test_get_gcp_project_from_gcloud_fail(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Failure of gcloud project lookup should return empty string.""" """Failure of gcloud project lookup should return empty string."""
monkeypatch.setattr( monkeypatch.setattr(
subprocess, subprocess,
@ -220,11 +241,15 @@ def test_get_gcp_project_from_gcloud_fail(monkeypatch: pytest.MonkeyPatch) -> No
assert cli_create._get_gcp_project_from_gcloud() == "" assert cli_create._get_gcp_project_from_gcloud() == ""
def test_get_gcp_region_from_gcloud_fail(monkeypatch: pytest.MonkeyPatch) -> None: def test_get_gcp_region_from_gcloud_fail(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""CalledProcessError should result in empty region string.""" """CalledProcessError should result in empty region string."""
monkeypatch.setattr( monkeypatch.setattr(
subprocess, subprocess,
"run", "run",
lambda *_a, **_k: (_ for _ in ()).throw(subprocess.CalledProcessError(1, "gcloud")), lambda *_a, **_k: (_ for _ in ()).throw(
subprocess.CalledProcessError(1, "gcloud")
),
) )
assert cli_create._get_gcp_region_from_gcloud() == "" assert cli_create._get_gcp_region_from_gcloud() == ""

View File

@ -17,19 +17,23 @@
from __future__ import annotations from __future__ import annotations
import click from pathlib import Path
import shutil import shutil
import pytest
import subprocess import subprocess
import tempfile import tempfile
import types import types
from typing import Any
import google.adk.cli.cli_deploy as cli_deploy from typing import Callable
from typing import Dict
from pathlib import Path from typing import List
from typing import Any, Callable, Dict, List, Tuple from typing import Tuple
from unittest import mock from unittest import mock
import click
import google.adk.cli.cli_deploy as cli_deploy
import pytest
# Helpers # Helpers
class _Recorder: class _Recorder:
"""A callable object that records every invocation.""" """A callable object that records every invocation."""
@ -128,7 +132,9 @@ def test_to_cloud_run_happy_path(
) )
# Assertions # Assertions
assert len(copy_recorder.calls) == 1, "Agent sources must be copied exactly once." assert (
len(copy_recorder.calls) == 1
), "Agent sources must be copied exactly once."
assert run_recorder.calls, "gcloud command should be executed at least once." assert run_recorder.calls, "gcloud command should be executed at least once."
assert (tmp_dir / "Dockerfile").exists(), "Dockerfile must be generated." assert (tmp_dir / "Dockerfile").exists(), "Dockerfile must be generated."

View File

@ -137,11 +137,15 @@ def test_get_input_files_not_exists(empty_state: State):
def test_add_input_files_new(empty_state: State): def test_add_input_files_new(empty_state: State):
"""Test adding input files to an empty session state.""" """Test adding input files to an empty session state."""
ctx = CodeExecutorContext(empty_state) ctx = CodeExecutorContext(empty_state)
new_files = [File(name="new.dat", content="Yg==", mime_type="application/octet-stream")] new_files = [
ctx.add_input_files(new_files) File(name="new.dat", content="Yg==", mime_type="application/octet-stream")
assert empty_state["_code_executor_input_files"] == [
{"name": "new.dat", "content": "Yg==", "mime_type": "application/octet-stream"}
] ]
ctx.add_input_files(new_files)
assert empty_state["_code_executor_input_files"] == [{
"name": "new.dat",
"content": "Yg==",
"mime_type": "application/octet-stream",
}]
def test_add_input_files_append(context_with_data: CodeExecutorContext): def test_add_input_files_append(context_with_data: CodeExecutorContext):
@ -239,9 +243,7 @@ def test_reset_error_count_no_error_key(empty_state: State):
def test_update_code_execution_result_new_invocation(empty_state: State): def test_update_code_execution_result_new_invocation(empty_state: State):
"""Test updating code execution result for a new invocation.""" """Test updating code execution result for a new invocation."""
ctx = CodeExecutorContext(empty_state) ctx = CodeExecutorContext(empty_state)
ctx.update_code_execution_result( ctx.update_code_execution_result("inv1", "print('hi')", "hi", "")
"inv1", "print('hi')", "hi", ""
)
results = empty_state["_code_execution_results"]["inv1"] results = empty_state["_code_execution_results"]["inv1"]
assert len(results) == 1 assert len(results) == 1
assert results[0]["code"] == "print('hi')" assert results[0]["code"] == "print('hi')"

View File

@ -15,6 +15,7 @@
"""Testings for the Trajectory Evaluator.""" """Testings for the Trajectory Evaluator."""
import math import math
from google.adk.evaluation.trajectory_evaluator import TrajectoryEvaluator from google.adk.evaluation.trajectory_evaluator import TrajectoryEvaluator
import pytest import pytest

View File

@ -18,7 +18,8 @@ import os
import sys import sys
import time import time
import types as ptypes import types as ptypes
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock
from unittest.mock import patch
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from google.adk.agents.base_agent import BaseAgent from google.adk.agents.base_agent import BaseAgent
@ -31,7 +32,6 @@ from google.adk.sessions.base_session_service import ListSessionsResponse
from google.genai import types from google.genai import types
import pytest import pytest
# Configure logging to help diagnose server startup issues # Configure logging to help diagnose server startup issues
logging.basicConfig( logging.basicConfig(
level=logging.INFO, level=logging.INFO,

View File

@ -100,6 +100,7 @@ async def test_function_system_instruction():
" test_id." " test_id."
) )
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_async_function_system_instruction(): async def test_async_function_system_instruction():
async def build_function_instruction( async def build_function_instruction(

View File

@ -15,6 +15,7 @@
from unittest.mock import AsyncMock from unittest.mock import AsyncMock
from unittest.mock import Mock from unittest.mock import Mock
from google.adk.models.lite_llm import _content_to_message_param from google.adk.models.lite_llm import _content_to_message_param
from google.adk.models.lite_llm import _function_declaration_to_tool_param from google.adk.models.lite_llm import _function_declaration_to_tool_param
from google.adk.models.lite_llm import _get_content from google.adk.models.lite_llm import _get_content
@ -169,6 +170,7 @@ STREAMING_MODEL_RESPONSE = [
), ),
] ]
@pytest.fixture @pytest.fixture
def mock_response(): def mock_response():
return ModelResponse( return ModelResponse(
@ -269,52 +271,54 @@ litellm_append_user_content_test_cases = [
contents=[ contents=[
types.Content( types.Content(
role="developer", role="developer",
parts=[types.Part.from_text(text="Test prompt")] parts=[types.Part.from_text(text="Test prompt")],
) )
] ]
), ),
2, 2,
id="litellm request without user content" id="litellm request without user content",
), ),
pytest.param( pytest.param(
LlmRequest( LlmRequest(
contents=[ contents=[
types.Content( types.Content(
role="user", role="user",
parts=[types.Part.from_text(text="user prompt")] parts=[types.Part.from_text(text="user prompt")],
) )
] ]
), ),
1, 1,
id="litellm request with user content" id="litellm request with user content",
), ),
pytest.param( pytest.param(
LlmRequest( LlmRequest(
contents=[ contents=[
types.Content( types.Content(
role="model", role="model",
parts=[types.Part.from_text(text="model prompt")] parts=[types.Part.from_text(text="model prompt")],
), ),
types.Content( types.Content(
role="user", role="user",
parts=[types.Part.from_text(text="user prompt")] parts=[types.Part.from_text(text="user prompt")],
), ),
types.Content( types.Content(
role="model", role="model",
parts=[types.Part.from_text(text="model prompt")] parts=[types.Part.from_text(text="model prompt")],
) ),
] ]
), ),
4, 4,
id="user content is not the last message scenario" id="user content is not the last message scenario",
) ),
] ]
@pytest.mark.parametrize( @pytest.mark.parametrize(
"llm_request, expected_output", "llm_request, expected_output", litellm_append_user_content_test_cases
litellm_append_user_content_test_cases
) )
def test_maybe_append_user_content(lite_llm_instance, llm_request, expected_output): def test_maybe_append_user_content(
lite_llm_instance, llm_request, expected_output
):
lite_llm_instance._maybe_append_user_content(llm_request) lite_llm_instance._maybe_append_user_content(llm_request)

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import enum import enum
import pytest
from google.adk.events import Event from google.adk.events import Event
from google.adk.events import EventActions from google.adk.events import EventActions
@ -21,6 +20,7 @@ from google.adk.sessions import DatabaseSessionService
from google.adk.sessions import InMemorySessionService from google.adk.sessions import InMemorySessionService
from google.adk.sessions.base_session_service import GetSessionConfig from google.adk.sessions.base_session_service import GetSessionConfig
from google.genai import types from google.genai import types
import pytest
class SessionServiceType(enum.Enum): class SessionServiceType(enum.Enum):

View File

@ -24,7 +24,6 @@ from google.adk.sessions import VertexAiSessionService
from google.genai import types from google.genai import types
import pytest import pytest
MOCK_SESSION_JSON_1 = { MOCK_SESSION_JSON_1 = {
'name': ( 'name': (
'projects/test-project/locations/test-location/' 'projects/test-project/locations/test-location/'

View File

@ -14,7 +14,9 @@
import base64 import base64
import json import json
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock
from unittest.mock import patch
from google.adk.tools.apihub_tool.clients.apihub_client import APIHubClient from google.adk.tools.apihub_tool.clients.apihub_client import APIHubClient
import pytest import pytest
from requests.exceptions import HTTPError from requests.exceptions import HTTPError
@ -464,9 +466,7 @@ class TestAPIHubClient:
MagicMock( MagicMock(
status_code=200, status_code=200,
json=lambda: { json=lambda: {
"name": ( "name": "projects/test-project/locations/us-central1/apis/api1/versions/v1",
"projects/test-project/locations/us-central1/apis/api1/versions/v1"
),
"specs": [], "specs": [],
}, },
), # No specs ), # No specs

View File

@ -16,7 +16,8 @@ from typing import Any
from typing import Dict from typing import Dict
from typing import List from typing import List
from fastapi.openapi.models import Response, Schema from fastapi.openapi.models import Response
from fastapi.openapi.models import Schema
from google.adk.tools.openapi_tool.common.common import ApiParameter from google.adk.tools.openapi_tool.common.common import ApiParameter
from google.adk.tools.openapi_tool.common.common import PydocHelper from google.adk.tools.openapi_tool.common.common import PydocHelper
from google.adk.tools.openapi_tool.common.common import rename_python_keywords from google.adk.tools.openapi_tool.common.common import rename_python_keywords

View File

@ -371,9 +371,7 @@ def test_parse_external_ref_raises_error(openapi_spec_generator):
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": ( "$ref": "external_file.json#/components/schemas/ExternalSchema"
"external_file.json#/components/schemas/ExternalSchema"
)
} }
} }
}, },

View File

@ -14,9 +14,11 @@
import json import json
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock
from unittest.mock import patch
from fastapi.openapi.models import MediaType, Operation from fastapi.openapi.models import MediaType
from fastapi.openapi.models import Operation
from fastapi.openapi.models import Parameter as OpenAPIParameter from fastapi.openapi.models import Parameter as OpenAPIParameter
from fastapi.openapi.models import RequestBody from fastapi.openapi.models import RequestBody
from fastapi.openapi.models import Schema as OpenAPISchema from fastapi.openapi.models import Schema as OpenAPISchema
@ -25,13 +27,13 @@ from google.adk.tools.openapi_tool.auth.auth_helpers import token_to_scheme_cred
from google.adk.tools.openapi_tool.common.common import ApiParameter from google.adk.tools.openapi_tool.common.common import ApiParameter
from google.adk.tools.openapi_tool.openapi_spec_parser.openapi_spec_parser import OperationEndpoint from google.adk.tools.openapi_tool.openapi_spec_parser.openapi_spec_parser import OperationEndpoint
from google.adk.tools.openapi_tool.openapi_spec_parser.operation_parser import OperationParser from google.adk.tools.openapi_tool.openapi_spec_parser.operation_parser import OperationParser
from google.adk.tools.openapi_tool.openapi_spec_parser.rest_api_tool import ( from google.adk.tools.openapi_tool.openapi_spec_parser.rest_api_tool import RestApiTool
RestApiTool, from google.adk.tools.openapi_tool.openapi_spec_parser.rest_api_tool import snake_to_lower_camel
snake_to_lower_camel, from google.adk.tools.openapi_tool.openapi_spec_parser.rest_api_tool import to_gemini_schema
to_gemini_schema,
)
from google.adk.tools.tool_context import ToolContext from google.adk.tools.tool_context import ToolContext
from google.genai.types import FunctionDeclaration, Schema, Type from google.genai.types import FunctionDeclaration
from google.genai.types import Schema
from google.genai.types import Type
import pytest import pytest

View File

@ -161,11 +161,9 @@ async def test_run_async_1_missing_arg_sync_func():
args = {"arg1": "test_value_1"} args = {"arg1": "test_value_1"}
result = await tool.run_async(args=args, tool_context=MagicMock()) result = await tool.run_async(args=args, tool_context=MagicMock())
assert result == { assert result == {
"error": ( "error": """Invoking `function_for_testing_with_2_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
"""Invoking `function_for_testing_with_2_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
arg2 arg2
You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters.""" You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters."""
)
} }
@ -176,11 +174,9 @@ async def test_run_async_1_missing_arg_async_func():
args = {"arg2": "test_value_1"} args = {"arg2": "test_value_1"}
result = await tool.run_async(args=args, tool_context=MagicMock()) result = await tool.run_async(args=args, tool_context=MagicMock())
assert result == { assert result == {
"error": ( "error": """Invoking `async_function_for_testing_with_2_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
"""Invoking `async_function_for_testing_with_2_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
arg1 arg1
You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters.""" You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters."""
)
} }
@ -191,13 +187,11 @@ async def test_run_async_3_missing_arg_sync_func():
args = {"arg2": "test_value_1"} args = {"arg2": "test_value_1"}
result = await tool.run_async(args=args, tool_context=MagicMock()) result = await tool.run_async(args=args, tool_context=MagicMock())
assert result == { assert result == {
"error": ( "error": """Invoking `function_for_testing_with_4_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
"""Invoking `function_for_testing_with_4_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
arg1 arg1
arg3 arg3
arg4 arg4
You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters.""" You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters."""
)
} }
@ -208,13 +202,11 @@ async def test_run_async_3_missing_arg_async_func():
args = {"arg3": "test_value_1"} args = {"arg3": "test_value_1"}
result = await tool.run_async(args=args, tool_context=MagicMock()) result = await tool.run_async(args=args, tool_context=MagicMock())
assert result == { assert result == {
"error": ( "error": """Invoking `async_function_for_testing_with_4_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
"""Invoking `async_function_for_testing_with_4_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
arg1 arg1
arg2 arg2
arg4 arg4
You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters.""" You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters."""
)
} }
@ -225,14 +217,12 @@ async def test_run_async_missing_all_arg_sync_func():
args = {} args = {}
result = await tool.run_async(args=args, tool_context=MagicMock()) result = await tool.run_async(args=args, tool_context=MagicMock())
assert result == { assert result == {
"error": ( "error": """Invoking `function_for_testing_with_4_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
"""Invoking `function_for_testing_with_4_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
arg1 arg1
arg2 arg2
arg3 arg3
arg4 arg4
You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters.""" You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters."""
)
} }
@ -243,14 +233,12 @@ async def test_run_async_missing_all_arg_async_func():
args = {} args = {}
result = await tool.run_async(args=args, tool_context=MagicMock()) result = await tool.run_async(args=args, tool_context=MagicMock())
assert result == { assert result == {
"error": ( "error": """Invoking `async_function_for_testing_with_4_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
"""Invoking `async_function_for_testing_with_4_arg_and_no_tool_context()` failed as the following mandatory input parameters are not present:
arg1 arg1
arg2 arg2
arg3 arg3
arg4 arg4
You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters.""" You could retry calling this tool, but it is IMPORTANT for you to provide all the mandatory parameters."""
)
} }