diff --git a/src/google/adk/sessions/vertex_ai_session_service.py b/src/google/adk/sessions/vertex_ai_session_service.py index 475377f..7174967 100644 --- a/src/google/adk/sessions/vertex_ai_session_service.py +++ b/src/google/adk/sessions/vertex_ai_session_service.py @@ -188,10 +188,10 @@ class VertexAiSessionService(BaseSessionService): ) -> ListSessionsResponse: reasoning_engine_id = _parse_reasoning_engine_id(app_name) - path = f"reasoningEngines/{reasoning_engine_id}/sessions" + path = f'reasoningEngines/{reasoning_engine_id}/sessions' if user_id: - parsed_user_id = urllib.parse.quote(f'''"{user_id}"''', safe="") - path = path + f"?filter=user_id={parsed_user_id}" + parsed_user_id = urllib.parse.quote(f'''"{user_id}"''', safe='') + path = path + f'?filter=user_id={parsed_user_id}' api_client = _get_api_client(self.project, self.location) api_response = await api_client.async_request( diff --git a/tests/unittests/models/test_google_llm.py b/tests/unittests/models/test_google_llm.py index 3b3e570..e1deadd 100644 --- a/tests/unittests/models/test_google_llm.py +++ b/tests/unittests/models/test_google_llm.py @@ -210,74 +210,76 @@ async def test_generate_content_async_stream(gemini_llm, llm_request): async def test_generate_content_async_stream_preserves_thinking_and_text_parts( gemini_llm, llm_request ): - with mock.patch.object(gemini_llm, "api_client") as mock_client: - class MockAsyncIterator: - def __init__(self, seq): - self._iter = iter(seq) + with mock.patch.object(gemini_llm, "api_client") as mock_client: - def __aiter__(self): - return self + class MockAsyncIterator: - async def __anext__(self): - try: - return next(self._iter) - except StopIteration: - raise StopAsyncIteration + def __init__(self, seq): + self._iter = iter(seq) - response1 = types.GenerateContentResponse( - candidates=[ - types.Candidate( - content=Content( - role="model", - parts=[Part(text="Think1", thought=True)], - ), - finish_reason=None, - ) - ] - ) - response2 = types.GenerateContentResponse( - candidates=[ - types.Candidate( - content=Content( - role="model", - parts=[Part(text="Think2", thought=True)], - ), - finish_reason=None, - ) - ] - ) - response3 = types.GenerateContentResponse( - candidates=[ - types.Candidate( - content=Content( - role="model", - parts=[Part.from_text(text="Answer.")], - ), - finish_reason=types.FinishReason.STOP, - ) - ] - ) + def __aiter__(self): + return self - async def mock_coro(): - return MockAsyncIterator([response1, response2, response3]) + async def __anext__(self): + try: + return next(self._iter) + except StopIteration: + raise StopAsyncIteration - mock_client.aio.models.generate_content_stream.return_value = mock_coro() - - responses = [ - resp - async for resp in gemini_llm.generate_content_async( - llm_request, stream=True + response1 = types.GenerateContentResponse( + candidates=[ + types.Candidate( + content=Content( + role="model", + parts=[Part(text="Think1", thought=True)], + ), + finish_reason=None, ) ] + ) + response2 = types.GenerateContentResponse( + candidates=[ + types.Candidate( + content=Content( + role="model", + parts=[Part(text="Think2", thought=True)], + ), + finish_reason=None, + ) + ] + ) + response3 = types.GenerateContentResponse( + candidates=[ + types.Candidate( + content=Content( + role="model", + parts=[Part.from_text(text="Answer.")], + ), + finish_reason=types.FinishReason.STOP, + ) + ] + ) - assert len(responses) == 4 - assert responses[0].partial is True - assert responses[1].partial is True - assert responses[2].partial is True - assert responses[3].content.parts[0].text == "Think1Think2" - assert responses[3].content.parts[0].thought is True - assert responses[3].content.parts[1].text == "Answer." - mock_client.aio.models.generate_content_stream.assert_called_once() + async def mock_coro(): + return MockAsyncIterator([response1, response2, response3]) + + mock_client.aio.models.generate_content_stream.return_value = mock_coro() + + responses = [ + resp + async for resp in gemini_llm.generate_content_async( + llm_request, stream=True + ) + ] + + assert len(responses) == 4 + assert responses[0].partial is True + assert responses[1].partial is True + assert responses[2].partial is True + assert responses[3].content.parts[0].text == "Think1Think2" + assert responses[3].content.parts[0].thought is True + assert responses[3].content.parts[1].text == "Answer." + mock_client.aio.models.generate_content_stream.assert_called_once() @pytest.mark.asyncio diff --git a/tests/unittests/sessions/test_vertex_ai_session_service.py b/tests/unittests/sessions/test_vertex_ai_session_service.py index 1794d7a..dc34079 100644 --- a/tests/unittests/sessions/test_vertex_ai_session_service.py +++ b/tests/unittests/sessions/test_vertex_ai_session_service.py @@ -111,7 +111,9 @@ MOCK_SESSION = Session( SESSION_REGEX = r'^reasoningEngines/([^/]+)/sessions/([^/]+)$' -SESSIONS_REGEX = r'^reasoningEngines/([^/]+)/sessions\?filter=user_id=%22([^%]+)%22.*$' # %22 represents double-quotes in a URL-encoded string +SESSIONS_REGEX = ( # %22 represents double-quotes in a URL-encoded string + r'^reasoningEngines/([^/]+)/sessions\?filter=user_id=%22([^%]+)%22.*$' +) EVENTS_REGEX = r'^reasoningEngines/([^/]+)/sessions/([^/]+)/events$' LRO_REGEX = r'^operations/([^/]+)$' @@ -156,7 +158,7 @@ class MockApiClient: return { 'name': path, 'done': True, - 'response': self.session_dict['4'] # Return the created session + 'response': self.session_dict['4'], # Return the created session } else: raise ValueError(f'Unsupported path: {path}')