Copybara import of the project:

--
16994cb2d5d646341f5285ca71d72697d81d18fe by Nilanjan De <nilanjan.de@gmail.com>:

chore: fix typos
COPYBARA_INTEGRATE_REVIEW=https://github.com/google/adk-python/pull/272 from n1lanjan:fix-typos a1ab655b08ec08c5dd2da71aab9a2386e3610e84
PiperOrigin-RevId: 749690489
This commit is contained in:
Nilanjan De 2025-04-20 22:52:42 -07:00 committed by Copybara-Service
parent 23f0383284
commit 1664b45562
15 changed files with 23 additions and 24 deletions

View File

@ -45,12 +45,12 @@
* Initial release of the Agent Development Kit (ADK). * Initial release of the Agent Development Kit (ADK).
* Multi-agent, agent-as-workflow, and custom agent support * Multi-agent, agent-as-workflow, and custom agent support
* Tool authentication support * Tool authentication support
* Rich tool support, e.g. bult-in tools, google-cloud tools, third-party tools, and MCP tools * Rich tool support, e.g. built-in tools, google-cloud tools, third-party tools, and MCP tools
* Rich callback support * Rich callback support
* Built-in code execution capability * Built-in code execution capability
* Asynchronous runtime and execution * Asynchronous runtime and execution
* Session, and memory support * Session, and memory support
* Built-in evaluation support * Built-in evaluation support
* Development UI that makes local devlopment easy * Development UI that makes local development easy
* Deploy to Google Cloud Run, Agent Engine * Deploy to Google Cloud Run, Agent Engine
* (Experimental) Live(Bidi) auido/video agent support and Compositional Function Calling(CFC) support * (Experimental) Live(Bidi) auido/video agent support and Compositional Function Calling(CFC) support

View File

@ -45,7 +45,7 @@ confidence=
# can either give multiple identifiers separated by comma (,) or put this # can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration # option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to # file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if # disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all # you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have # --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes # no Warning level messages displayed, use"--disable=all --enable=classes

View File

@ -256,7 +256,7 @@ def run_evals(
) )
if final_eval_status == EvalStatus.PASSED: if final_eval_status == EvalStatus.PASSED:
result = "✅ Passsed" result = "✅ Passed"
else: else:
result = "❌ Failed" result = "❌ Failed"

View File

@ -55,7 +55,7 @@ def load_json(file_path: str) -> Union[Dict, List]:
class AgentEvaluator: class AgentEvaluator:
"""An evaluator for Agents, mainly intented for helping with test cases.""" """An evaluator for Agents, mainly intended for helping with test cases."""
@staticmethod @staticmethod
def find_config_for_test_file(test_file: str): def find_config_for_test_file(test_file: str):
@ -91,7 +91,7 @@ class AgentEvaluator:
look for 'root_agent' in the loaded module. look for 'root_agent' in the loaded module.
eval_dataset: The eval data set. This can be either a string representing eval_dataset: The eval data set. This can be either a string representing
full path to the file containing eval dataset, or a directory that is full path to the file containing eval dataset, or a directory that is
recusively explored for all files that have a `.test.json` suffix. recursively explored for all files that have a `.test.json` suffix.
num_runs: Number of times all entries in the eval dataset should be num_runs: Number of times all entries in the eval dataset should be
assessed. assessed.
agent_name: The name of the agent. agent_name: The name of the agent.

View File

@ -35,7 +35,7 @@ class ResponseEvaluator:
Args: Args:
raw_eval_dataset: The dataset that will be evaluated. raw_eval_dataset: The dataset that will be evaluated.
evaluation_criteria: The evaluation criteria to be used. This method evaluation_criteria: The evaluation criteria to be used. This method
support two criterias, `response_evaluation_score` and support two criteria, `response_evaluation_score` and
`response_match_score`. `response_match_score`.
print_detailed_results: Prints detailed results on the console. This is print_detailed_results: Prints detailed results on the console. This is
usually helpful during debugging. usually helpful during debugging.
@ -56,7 +56,7 @@ class ResponseEvaluator:
Value range: [0, 5], where 0 means that the agent's response is not Value range: [0, 5], where 0 means that the agent's response is not
coherent, while 5 means it is . High values are good. coherent, while 5 means it is . High values are good.
A note on raw_eval_dataset: A note on raw_eval_dataset:
The dataset should be a list session, where each sesssion is represented The dataset should be a list session, where each session is represented
as a list of interaction that need evaluation. Each evaluation is as a list of interaction that need evaluation. Each evaluation is
represented as a dictionary that is expected to have values for the represented as a dictionary that is expected to have values for the
following keys: following keys:

View File

@ -31,10 +31,9 @@ class TrajectoryEvaluator:
): ):
r"""Returns the mean tool use accuracy of the eval dataset. r"""Returns the mean tool use accuracy of the eval dataset.
Tool use accuracy is calculated by comparing the expected and actuall tool Tool use accuracy is calculated by comparing the expected and the actual
use trajectories. An exact match scores a 1, 0 otherwise. The final number tool use trajectories. An exact match scores a 1, 0 otherwise. The final
is an number is an average of these individual scores.
average of these individual scores.
Value range: [0, 1], where 0 is means none of the too use entries aligned, Value range: [0, 1], where 0 is means none of the too use entries aligned,
and 1 would mean all of them aligned. Higher value is good. and 1 would mean all of them aligned. Higher value is good.
@ -45,7 +44,7 @@ class TrajectoryEvaluator:
usually helpful during debugging. usually helpful during debugging.
A note on eval_dataset: A note on eval_dataset:
The dataset should be a list session, where each sesssion is represented The dataset should be a list session, where each session is represented
as a list of interaction that need evaluation. Each evaluation is as a list of interaction that need evaluation. Each evaluation is
represented as a dictionary that is expected to have values for the represented as a dictionary that is expected to have values for the
following keys: following keys:

View File

@ -94,7 +94,7 @@ can answer it.
If another agent is better for answering the question according to its If another agent is better for answering the question according to its
description, call `{_TRANSFER_TO_AGENT_FUNCTION_NAME}` function to transfer the description, call `{_TRANSFER_TO_AGENT_FUNCTION_NAME}` function to transfer the
question to that agent. When transfering, do not generate any text other than question to that agent. When transferring, do not generate any text other than
the function call. the function call.
""" """

View File

@ -115,7 +115,7 @@ class BaseLlmFlow(ABC):
yield event yield event
# send back the function response # send back the function response
if event.get_function_responses(): if event.get_function_responses():
logger.debug('Sending back last function resonse event: %s', event) logger.debug('Sending back last function response event: %s', event)
invocation_context.live_request_queue.send_content(event.content) invocation_context.live_request_queue.send_content(event.content)
if ( if (
event.content event.content

View File

@ -111,7 +111,7 @@ def _rearrange_events_for_latest_function_response(
"""Rearrange the events for the latest function_response. """Rearrange the events for the latest function_response.
If the latest function_response is for an async function_call, all events If the latest function_response is for an async function_call, all events
bewteen the initial function_call and the latest function_response will be between the initial function_call and the latest function_response will be
removed. removed.
Args: Args:

View File

@ -52,7 +52,7 @@ class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
# Appends global instructions if set. # Appends global instructions if set.
if ( if (
isinstance(root_agent, LlmAgent) and root_agent.global_instruction isinstance(root_agent, LlmAgent) and root_agent.global_instruction
): # not emtpy str ): # not empty str
raw_si = root_agent.canonical_global_instruction( raw_si = root_agent.canonical_global_instruction(
ReadonlyContext(invocation_context) ReadonlyContext(invocation_context)
) )
@ -60,7 +60,7 @@ class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
llm_request.append_instructions([si]) llm_request.append_instructions([si])
# Appends agent instructions if set. # Appends agent instructions if set.
if agent.instruction: # not emtpy str if agent.instruction: # not empty str
raw_si = agent.canonical_instruction(ReadonlyContext(invocation_context)) raw_si = agent.canonical_instruction(ReadonlyContext(invocation_context))
si = _populate_values(raw_si, invocation_context) si = _populate_values(raw_si, invocation_context)
llm_request.append_instructions([si]) llm_request.append_instructions([si])

View File

@ -152,7 +152,7 @@ class GeminiLlmConnection(BaseLlmConnection):
): ):
# TODO: Right now, we just support output_transcription without # TODO: Right now, we just support output_transcription without
# changing interface and data protocol. Later, we can consider to # changing interface and data protocol. Later, we can consider to
# support output_transcription as a separete field in LlmResponse. # support output_transcription as a separate field in LlmResponse.
# Transcription is always considered as partial event # Transcription is always considered as partial event
# We rely on other control signals to determine when to yield the # We rely on other control signals to determine when to yield the
@ -179,7 +179,7 @@ class GeminiLlmConnection(BaseLlmConnection):
# in case of empty content or parts, we sill surface it # in case of empty content or parts, we sill surface it
# in case it's an interrupted message, we merge the previous partial # in case it's an interrupted message, we merge the previous partial
# text. Other we don't merge. because content can be none when model # text. Other we don't merge. because content can be none when model
# safty threshold is triggered # safety threshold is triggered
if message.server_content.interrupted and text: if message.server_content.interrupted and text:
yield self.__build_full_text_response(text) yield self.__build_full_text_response(text)
text = '' text = ''

View File

@ -217,7 +217,7 @@ class DatabaseSessionService(BaseSessionService):
""" """
# 1. Create DB engine for db connection # 1. Create DB engine for db connection
# 2. Create all tables based on schema # 2. Create all tables based on schema
# 3. Initialize all properies # 3. Initialize all properties
try: try:
db_engine = create_engine(db_url) db_engine = create_engine(db_url)

View File

@ -26,7 +26,7 @@ class State:
""" """
Args: Args:
value: The current value of the state dict. value: The current value of the state dict.
delta: The delta change to the current value that hasn't been commited. delta: The delta change to the current value that hasn't been committed.
""" """
self._value = value self._value = value
self._delta = delta self._delta = delta

View File

@ -89,7 +89,7 @@ class LoadArtifactsTool(BaseTool):
than the function call. than the function call.
"""]) """])
# Attache the content of the artifacts if the model requests them. # Attach the content of the artifacts if the model requests them.
# This only adds the content to the model request, instead of the session. # This only adds the content to the model request, instead of the session.
if llm_request.contents and llm_request.contents[-1].parts: if llm_request.contents and llm_request.contents[-1].parts:
function_response = llm_request.contents[-1].parts[0].function_response function_response = llm_request.contents[-1].parts[0].function_response

View File

@ -66,7 +66,7 @@ class OAuth2CredentialExchanger(BaseAuthCredentialExchanger):
Returns: Returns:
An AuthCredential object containing the HTTP bearer access token. If the An AuthCredential object containing the HTTP bearer access token. If the
HTTO bearer token cannot be generated, return the origianl credential HTTP bearer token cannot be generated, return the original credential.
""" """
if "access_token" not in auth_credential.oauth2.token: if "access_token" not in auth_credential.oauth2.token: