Copybara import of the project:

--
ad923c2c8c503ba73c62db695e88f1a3ea1aeeea by YU MING HSU <abego452@gmail.com>:

docs: enhance Contribution process within CONTRIBUTING.md

--
8022924fb7e975ac278d38fce3b5fd593d874536 by YU MING HSU <abego452@gmail.com>:

fix: move _maybe_append_user_content from google_llm.py to base_llm.py,
so subclass can get benefit from it, call _maybe_append_user_content
from generate_content_async within lite_llm.py

--
cf891fb1a3bbccaaf9d0055b23f614ce52449977 by YU MING HSU <abego452@gmail.com>:

fix: modify install dependencies cmd, and use pyink to format codebase
COPYBARA_INTEGRATE_REVIEW=https://github.com/google/adk-python/pull/428 from hsuyuming:fix_litellm_error_issue_427 dbec4949798e6399a0410d1b8ba7cc6a7cad7bdd
PiperOrigin-RevId: 754124679
This commit is contained in:
hsuyuming 2025-05-02 13:59:14 -07:00 committed by Copybara-Service
parent 8f94a0c7b3
commit 879064343c
14 changed files with 170 additions and 85 deletions

View File

@ -24,6 +24,35 @@ This project follows
[Google's Open Source Community Guidelines](https://opensource.google/conduct/).
## Contribution process
1. **Clone the repository:**
```shell
git clone git@github.com:google/adk-python.git
cd adk-python
```
2. **Create and activate a virtual environment:**
```shell
python -m venv .venv
source .venv/bin/activate
pip install uv
```
3. **Install dependencies:**
```shell
uv pip install -e .[dev,test,extensions,eval]
```
4. **Run unit tests:**
```shell
uv run pytest ./tests/unittests
```
5. **Run pyink to format codebase:**
```shell
uv run pyink --config pyproject.toml ./src
```
### Requirement for PRs

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
class EvalConstants:
"""Holds constants for evaluation file constants."""

View File

@ -28,7 +28,7 @@ class ResponseEvaluator:
raw_eval_dataset: list[list[dict[str, Any]]],
evaluation_criteria: list[str],
*,
print_detailed_results: bool = False
print_detailed_results: bool = False,
):
r"""Returns the value of requested evaluation metrics.

View File

@ -23,5 +23,6 @@ class Example(BaseModel):
input: The input content for the example.
output: The expected output content for the example.
"""
input: types.Content
output: list[types.Content]

View File

@ -11,4 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -332,7 +332,7 @@ async def _process_function_live_helper(
function_response = {
'status': f'No active streaming function named {function_name} found'
}
elif hasattr(tool, "func") and inspect.isasyncgenfunction(tool.func):
elif hasattr(tool, 'func') and inspect.isasyncgenfunction(tool.func):
# for streaming tool use case
# we require the function to be a async generator function
async def run_tool_and_update_queue(tool, function_args, tool_context):

View File

@ -28,6 +28,7 @@ class MemoryResult(BaseModel):
session_id: The session id associated with the memory.
events: A list of events in the session.
"""
session_id: str
events: list[Event]
@ -38,6 +39,7 @@ class SearchMemoryResponse(BaseModel):
Attributes:
memories: A list of memory results matching the search query.
"""
memories: list[MemoryResult] = Field(default_factory=list)

View File

@ -17,6 +17,8 @@ from abc import abstractmethod
from typing import AsyncGenerator
from typing import TYPE_CHECKING
from google.genai import types
from pydantic import BaseModel
from pydantic import ConfigDict
@ -73,6 +75,48 @@ class BaseLlm(BaseModel):
)
yield # AsyncGenerator requires a yield statement in function body.
def _maybe_append_user_content(self, llm_request: LlmRequest):
"""Appends a user content, so that model can continue to output.
Args:
llm_request: LlmRequest, the request to send to the Gemini model.
"""
# If no content is provided, append a user content to hint model response
# using system instruction.
if not llm_request.contents:
llm_request.contents.append(
types.Content(
role='user',
parts=[
types.Part(
text=(
'Handle the requests as specified in the System'
' Instruction.'
)
)
],
)
)
return
# Insert a user content to preserve user intent and to avoid empty
# model response.
if llm_request.contents[-1].role != 'user':
llm_request.contents.append(
types.Content(
role='user',
parts=[
types.Part(
text=(
'Continue processing previous requests as instructed.'
' Exit or provide a summary if no more outputs are'
' needed.'
)
)
],
)
)
def connect(self, llm_request: LlmRequest) -> BaseLlmConnection:
"""Creates a live connection to the LLM.

View File

@ -210,48 +210,6 @@ class Gemini(BaseLlm):
) as live_session:
yield GeminiLlmConnection(live_session)
def _maybe_append_user_content(self, llm_request: LlmRequest):
"""Appends a user content, so that model can continue to output.
Args:
llm_request: LlmRequest, the request to send to the Gemini model.
"""
# If no content is provided, append a user content to hint model response
# using system instruction.
if not llm_request.contents:
llm_request.contents.append(
types.Content(
role='user',
parts=[
types.Part(
text=(
'Handle the requests as specified in the System'
' Instruction.'
)
)
],
)
)
return
# Insert a user content to preserve user intent and to avoid empty
# model response.
if llm_request.contents[-1].role != 'user':
llm_request.contents.append(
types.Content(
role='user',
parts=[
types.Part(
text=(
'Continue processing previous requests as instructed.'
' Exit or provide a summary if no more outputs are'
' needed.'
)
)
],
)
)
def _build_function_declaration_log(
func_decl: types.FunctionDeclaration,

View File

@ -172,19 +172,19 @@ def _content_to_message_param(
tool_calls = []
content_present = False
for part in content.parts:
if part.function_call:
tool_calls.append(
ChatCompletionMessageToolCall(
type="function",
id=part.function_call.id,
function=Function(
name=part.function_call.name,
arguments=part.function_call.args,
),
)
if part.function_call:
tool_calls.append(
ChatCompletionMessageToolCall(
type="function",
id=part.function_call.id,
function=Function(
name=part.function_call.name,
arguments=part.function_call.args,
),
)
elif part.text or part.inline_data:
content_present = True
)
elif part.text or part.inline_data:
content_present = True
final_content = message_content if content_present else None
@ -453,9 +453,9 @@ def _get_completion_inputs(
for content in llm_request.contents or []:
message_param_or_list = _content_to_message_param(content)
if isinstance(message_param_or_list, list):
messages.extend(message_param_or_list)
elif message_param_or_list: # Ensure it's not None before appending
messages.append(message_param_or_list)
messages.extend(message_param_or_list)
elif message_param_or_list: # Ensure it's not None before appending
messages.append(message_param_or_list)
if llm_request.config.system_instruction:
messages.insert(
@ -611,6 +611,7 @@ class LiteLlm(BaseLlm):
LlmResponse: The model response.
"""
self._maybe_append_user_content(llm_request)
logger.info(_build_request_log(llm_request))
messages, tools = _get_completion_inputs(llm_request)

View File

@ -26,6 +26,7 @@ from .state import State
class GetSessionConfig(BaseModel):
"""The configuration of getting a session."""
num_recent_events: Optional[int] = None
after_timestamp: Optional[float] = None
@ -35,11 +36,13 @@ class ListSessionsResponse(BaseModel):
The events and states are not set within each Session object.
"""
sessions: list[Session] = Field(default_factory=list)
class ListEventsResponse(BaseModel):
"""The response of listing events in a session."""
events: list[Event] = Field(default_factory=list)
next_page_token: Optional[str] = None

View File

@ -219,6 +219,7 @@ class StorageAppState(Base):
DateTime(), default=func.now(), onupdate=func.now()
)
class StorageUserState(Base):
"""Represents a user state stored in the database."""
@ -562,6 +563,7 @@ class DatabaseSessionService(BaseSessionService):
) -> ListEventsResponse:
raise NotImplementedError()
def convert_event(event: StorageEvent) -> Event:
"""Converts a storage event to an event."""
return Event(

View File

@ -312,9 +312,7 @@ class ConnectionsClient:
"content": {
"application/json": {
"schema": {
"$ref": (
f"#/components/schemas/{action_display_name}_Request"
)
"$ref": f"#/components/schemas/{action_display_name}_Request"
}
}
}
@ -325,9 +323,7 @@ class ConnectionsClient:
"content": {
"application/json": {
"schema": {
"$ref": (
f"#/components/schemas/{action_display_name}_Response"
),
"$ref": f"#/components/schemas/{action_display_name}_Response",
}
}
},
@ -346,11 +342,9 @@ class ConnectionsClient:
return {
"post": {
"summary": f"List {entity}",
"description": (
f"""Returns the list of {entity} data. If the page token was available in the response, let users know there are more records available. Ask if the user wants to fetch the next page of results. When passing filter use the
"description": f"""Returns the list of {entity} data. If the page token was available in the response, let users know there are more records available. Ask if the user wants to fetch the next page of results. When passing filter use the
following format: `field_name1='value1' AND field_name2='value2'
`. {tool_instructions}"""
),
`. {tool_instructions}""",
"x-operation": "LIST_ENTITIES",
"x-entity": f"{entity}",
"operationId": f"{tool_name}_list_{entity}",
@ -375,9 +369,7 @@ class ConnectionsClient:
f"Returns a list of {entity} of json"
f" schema: {schema_as_string}"
),
"$ref": (
"#/components/schemas/execute-connector_Response"
),
"$ref": "#/components/schemas/execute-connector_Response",
}
}
},
@ -421,9 +413,7 @@ class ConnectionsClient:
f"Returns {entity} of json schema:"
f" {schema_as_string}"
),
"$ref": (
"#/components/schemas/execute-connector_Response"
),
"$ref": "#/components/schemas/execute-connector_Response",
}
}
},
@ -460,9 +450,7 @@ class ConnectionsClient:
"content": {
"application/json": {
"schema": {
"$ref": (
"#/components/schemas/execute-connector_Response"
)
"$ref": "#/components/schemas/execute-connector_Response"
}
}
},
@ -499,9 +487,7 @@ class ConnectionsClient:
"content": {
"application/json": {
"schema": {
"$ref": (
"#/components/schemas/execute-connector_Response"
)
"$ref": "#/components/schemas/execute-connector_Response"
}
}
},
@ -538,9 +524,7 @@ class ConnectionsClient:
"content": {
"application/json": {
"schema": {
"$ref": (
"#/components/schemas/execute-connector_Response"
)
"$ref": "#/components/schemas/execute-connector_Response"
}
}
},

View File

@ -262,6 +262,67 @@ async def test_generate_content_async(mock_acompletion, lite_llm_instance):
)
litellm_append_user_content_test_cases = [
pytest.param(
LlmRequest(
contents=[
types.Content(
role="developer",
parts=[types.Part.from_text(text="Test prompt")]
)
]
),
2,
id="litellm request without user content"
),
pytest.param(
LlmRequest(
contents=[
types.Content(
role="user",
parts=[types.Part.from_text(text="user prompt")]
)
]
),
1,
id="litellm request with user content"
),
pytest.param(
LlmRequest(
contents=[
types.Content(
role="model",
parts=[types.Part.from_text(text="model prompt")]
),
types.Content(
role="user",
parts=[types.Part.from_text(text="user prompt")]
),
types.Content(
role="model",
parts=[types.Part.from_text(text="model prompt")]
)
]
),
4,
id="user content is not the last message scenario"
)
]
@pytest.mark.parametrize(
"llm_request, expected_output",
litellm_append_user_content_test_cases
)
def test_maybe_append_user_content(lite_llm_instance, llm_request, expected_output):
lite_llm_instance._maybe_append_user_content(
llm_request
)
assert len(llm_request.contents) == expected_output
function_declaration_test_cases = [
(
"simple_function",