add input transcription support for live/streaming.

Copybara import of the project:

--
d481e0604a79470e2c1308827b3ecb78bfb5327e by Alan B <alan@nerds.ai>:

feat: 🚧 catch user transcription

--
bba436bb76d1d2f9d5ba969fce38ff8b8a443254 by Alan B <alan@nerds.ai>:

feat:  send user transcription event as llm_response

--
ad2abf540c60895b79c50f9051a6289ce394b98d by Alan B <death1027@outlook.com>:

style: 💄 update lint problems

--
744703c06716300c0f9f41633d3bafdf4cb180a1 by Hangfei Lin <hangfeilin@gmail.com>:

fix: set right order for input transcription

--
31a5d42d6155b0e5caad0c73c8df43255322016f by Hangfei Lin <hangfeilin@gmail.com>:

remove print

--
59e5d9c72060f97d124883150989315401a4c1b5 by Hangfei Lin <hangfeilin@gmail.com>:

remove api version

COPYBARA_INTEGRATE_REVIEW=https://github.com/google/adk-python/pull/495 from BloodBoy21:main ea29015af041f9785abaa8583e2c767f9d8c8bc8
PiperOrigin-RevId: 755401615
This commit is contained in:
Alan
2025-05-06 09:26:00 -07:00
committed by Copybara-Service
parent 905c20dad6
commit fcca0afdac
5 changed files with 32 additions and 3 deletions

View File

@@ -145,7 +145,20 @@ class GeminiLlmConnection(BaseLlmConnection):
yield self.__build_full_text_response(text)
text = ''
yield llm_response
if (
message.server_content.input_transcription
and message.server_content.input_transcription.text
):
user_text = message.server_content.input_transcription.text
parts = [
types.Part.from_text(
text=user_text,
)
]
llm_response = LlmResponse(
content=types.Content(role='user', parts=parts)
)
yield llm_response
if (
message.server_content.output_transcription
and message.server_content.output_transcription.text