Copybara import of the project:

--
ad923c2c8c503ba73c62db695e88f1a3ea1aeeea by YU MING HSU <abego452@gmail.com>:

docs: enhance Contribution process within CONTRIBUTING.md

--
8022924fb7e975ac278d38fce3b5fd593d874536 by YU MING HSU <abego452@gmail.com>:

fix: move _maybe_append_user_content from google_llm.py to base_llm.py,
so subclass can get benefit from it, call _maybe_append_user_content
from generate_content_async within lite_llm.py

--
cf891fb1a3bbccaaf9d0055b23f614ce52449977 by YU MING HSU <abego452@gmail.com>:

fix: modify install dependencies cmd, and use pyink to format codebase
COPYBARA_INTEGRATE_REVIEW=https://github.com/google/adk-python/pull/428 from hsuyuming:fix_litellm_error_issue_427 dbec4949798e6399a0410d1b8ba7cc6a7cad7bdd
PiperOrigin-RevId: 754124679
This commit is contained in:
hsuyuming
2025-05-02 13:59:14 -07:00
committed by Copybara-Service
parent 8f94a0c7b3
commit 879064343c
14 changed files with 170 additions and 85 deletions

View File

@@ -17,6 +17,8 @@ from abc import abstractmethod
from typing import AsyncGenerator
from typing import TYPE_CHECKING
from google.genai import types
from pydantic import BaseModel
from pydantic import ConfigDict
@@ -73,6 +75,48 @@ class BaseLlm(BaseModel):
)
yield # AsyncGenerator requires a yield statement in function body.
def _maybe_append_user_content(self, llm_request: LlmRequest):
"""Appends a user content, so that model can continue to output.
Args:
llm_request: LlmRequest, the request to send to the Gemini model.
"""
# If no content is provided, append a user content to hint model response
# using system instruction.
if not llm_request.contents:
llm_request.contents.append(
types.Content(
role='user',
parts=[
types.Part(
text=(
'Handle the requests as specified in the System'
' Instruction.'
)
)
],
)
)
return
# Insert a user content to preserve user intent and to avoid empty
# model response.
if llm_request.contents[-1].role != 'user':
llm_request.contents.append(
types.Content(
role='user',
parts=[
types.Part(
text=(
'Continue processing previous requests as instructed.'
' Exit or provide a summary if no more outputs are'
' needed.'
)
)
],
)
)
def connect(self, llm_request: LlmRequest) -> BaseLlmConnection:
"""Creates a live connection to the LLM.

View File

@@ -210,48 +210,6 @@ class Gemini(BaseLlm):
) as live_session:
yield GeminiLlmConnection(live_session)
def _maybe_append_user_content(self, llm_request: LlmRequest):
"""Appends a user content, so that model can continue to output.
Args:
llm_request: LlmRequest, the request to send to the Gemini model.
"""
# If no content is provided, append a user content to hint model response
# using system instruction.
if not llm_request.contents:
llm_request.contents.append(
types.Content(
role='user',
parts=[
types.Part(
text=(
'Handle the requests as specified in the System'
' Instruction.'
)
)
],
)
)
return
# Insert a user content to preserve user intent and to avoid empty
# model response.
if llm_request.contents[-1].role != 'user':
llm_request.contents.append(
types.Content(
role='user',
parts=[
types.Part(
text=(
'Continue processing previous requests as instructed.'
' Exit or provide a summary if no more outputs are'
' needed.'
)
)
],
)
)
def _build_function_declaration_log(
func_decl: types.FunctionDeclaration,

View File

@@ -172,19 +172,19 @@ def _content_to_message_param(
tool_calls = []
content_present = False
for part in content.parts:
if part.function_call:
tool_calls.append(
ChatCompletionMessageToolCall(
type="function",
id=part.function_call.id,
function=Function(
name=part.function_call.name,
arguments=part.function_call.args,
),
)
if part.function_call:
tool_calls.append(
ChatCompletionMessageToolCall(
type="function",
id=part.function_call.id,
function=Function(
name=part.function_call.name,
arguments=part.function_call.args,
),
)
elif part.text or part.inline_data:
content_present = True
)
elif part.text or part.inline_data:
content_present = True
final_content = message_content if content_present else None
@@ -453,9 +453,9 @@ def _get_completion_inputs(
for content in llm_request.contents or []:
message_param_or_list = _content_to_message_param(content)
if isinstance(message_param_or_list, list):
messages.extend(message_param_or_list)
elif message_param_or_list: # Ensure it's not None before appending
messages.append(message_param_or_list)
messages.extend(message_param_or_list)
elif message_param_or_list: # Ensure it's not None before appending
messages.append(message_param_or_list)
if llm_request.config.system_instruction:
messages.insert(
@@ -611,6 +611,7 @@ class LiteLlm(BaseLlm):
LlmResponse: The model response.
"""
self._maybe_append_user_content(llm_request)
logger.info(_build_request_log(llm_request))
messages, tools = _get_completion_inputs(llm_request)