mirror of
https://github.com/EvolutionAPI/adk-python.git
synced 2025-07-13 15:14:50 -06:00
Add usage_metadata for sse response with finish_reason
This is needed for covering the generation what ends with finish reason during streaming. PiperOrigin-RevId: 761144245
This commit is contained in:
parent
7d809eb9d9
commit
00798af881
@ -98,6 +98,7 @@ class Gemini(BaseLlm):
|
||||
)
|
||||
response = None
|
||||
text = ''
|
||||
usage_metadata = None
|
||||
# for sse, similar as bidi (see receive method in gemini_llm_connecton.py),
|
||||
# we need to mark those text content as partial and after all partial
|
||||
# contents are sent, we send an accumulated event which contains all the
|
||||
@ -106,6 +107,7 @@ class Gemini(BaseLlm):
|
||||
async for response in responses:
|
||||
logger.info(_build_response_log(response))
|
||||
llm_response = LlmResponse.create(response)
|
||||
usage_metadata = llm_response.usage_metadata
|
||||
if (
|
||||
llm_response.content
|
||||
and llm_response.content.parts
|
||||
@ -123,7 +125,7 @@ class Gemini(BaseLlm):
|
||||
content=types.ModelContent(
|
||||
parts=[types.Part.from_text(text=text)],
|
||||
),
|
||||
usage_metadata=llm_response.usage_metadata,
|
||||
usage_metadata=usage_metadata,
|
||||
)
|
||||
text = ''
|
||||
yield llm_response
|
||||
@ -137,6 +139,7 @@ class Gemini(BaseLlm):
|
||||
content=types.ModelContent(
|
||||
parts=[types.Part.from_text(text=text)],
|
||||
),
|
||||
usage_metadata=usage_metadata,
|
||||
)
|
||||
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user