From b4fb57e84dc9e4a3791c620ec73966f0f0425d45 Mon Sep 17 00:00:00 2001 From: Didier Durand Date: Fri, 19 Dec 2025 07:25:46 +0100 Subject: [PATCH 1/2] doc: fixing various typos --- .../samples/application_integration_agent/README.md | 2 +- contributing/samples/multi_agent_seq_config/README.md | 2 +- src/google/adk/cli/built_in_agents/tools/write_files.py | 2 +- src/google/adk/evaluation/eval_config.py | 2 +- .../simulation/per_turn_user_simulator_quality_v1.py | 2 +- src/google/adk/flows/llm_flows/contents.py | 2 +- src/google/adk/memory/vertex_ai_rag_memory_service.py | 2 +- src/google/adk/runners.py | 6 +++--- src/google/adk/tools/spanner/settings.py | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) diff --git a/contributing/samples/application_integration_agent/README.md b/contributing/samples/application_integration_agent/README.md index 0e0a70c17c..961a65eb53 100644 --- a/contributing/samples/application_integration_agent/README.md +++ b/contributing/samples/application_integration_agent/README.md @@ -7,7 +7,7 @@ This sample demonstrates how to use the `ApplicationIntegrationToolset` within a ## Prerequisites 1. **Set up Integration Connection:** - * You need an existing [Integration connection](https://cloud.google.com/integration-connectors/docs/overview) configured to interact with your Jira instance. Follow the [documentation](https://google.github.io/adk-docs/tools/google-cloud-tools/#use-integration-connectors) to provision the Integration Connector in Google Cloud and then use this [documentation](https://cloud.google.com/integration-connectors/docs/connectors/jiracloud/configure) to create an Jira connection. Note the `Connection Name`, `Project ID`, and `Location` of your connection. + * You need an existing [Integration connection](https://cloud.google.com/integration-connectors/docs/overview) configured to interact with your Jira instance. Follow the [documentation](https://google.github.io/adk-docs/tools/google-cloud-tools/#use-integration-connectors) to provision the Integration Connector in Google Cloud and then use this [documentation](https://cloud.google.com/integration-connectors/docs/connectors/jiracloud/configure) to create a Jira connection. Note the `Connection Name`, `Project ID`, and `Location` of your connection. * 2. **Configure Environment Variables:** diff --git a/contributing/samples/multi_agent_seq_config/README.md b/contributing/samples/multi_agent_seq_config/README.md index a2cd462465..af0dcee2fc 100644 --- a/contributing/samples/multi_agent_seq_config/README.md +++ b/contributing/samples/multi_agent_seq_config/README.md @@ -6,7 +6,7 @@ The whole process is: 1. An agent backed by a cheap and fast model to write initial version. 2. An agent backed by a smarter and a little more expensive to review the code. -3. An final agent backed by the smartest and slowest model to write the final revision. +3. A final agent backed by the smartest and slowest model to write the final revision. Sample queries: diff --git a/src/google/adk/cli/built_in_agents/tools/write_files.py b/src/google/adk/cli/built_in_agents/tools/write_files.py index 8ade17c536..3b2fa74ba2 100644 --- a/src/google/adk/cli/built_in_agents/tools/write_files.py +++ b/src/google/adk/cli/built_in_agents/tools/write_files.py @@ -51,7 +51,7 @@ async def write_files( - file_size: size of written file in bytes - existed_before: bool indicating if file existed before write - backup_created: bool indicating if backup was created - - backup_path: path to backup file if created + - backup_path: path to back up file if created - error: error message if write failed for this file - successful_writes: number of files written successfully - total_files: total number of files requested diff --git a/src/google/adk/evaluation/eval_config.py b/src/google/adk/evaluation/eval_config.py index 13b2e92274..2c620ffcb6 100644 --- a/src/google/adk/evaluation/eval_config.py +++ b/src/google/adk/evaluation/eval_config.py @@ -53,7 +53,7 @@ class EvalConfig(BaseModel): In the sample below, `tool_trajectory_avg_score`, `response_match_score` and `final_response_match_v2` are the standard eval metric names, represented as keys in the dictionary. The values in the dictionary are the corresponding -criterions. For the first two metrics, we use simple threshold as the criterion, +criteria. For the first two metrics, we use simple threshold as the criterion, the third one uses `LlmAsAJudgeCriterion`. { "criteria": { diff --git a/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py b/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py index 5624bc0ec9..fb20d44758 100644 --- a/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py +++ b/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py @@ -62,7 +62,7 @@ # Definition of Conversation History The Conversation History is the actual dialogue between the User Simulator and the Agent. -The Conversation History may not be complete, but the exsisting dialogue should adhere to the Conversation Plan. +The Conversation History may not be complete, but the existing dialogue should adhere to the Conversation Plan. The Conversation History may contain instances where the User Simulator troubleshoots an incorrect/inappropriate response from the Agent in order to enforce the Conversation Plan. The Conversation History is finished only when the User Simulator outputs `{stop_signal}` in its response. If this token is missing, the conversation between the User Simulator and the Agent has not finished, and more turns can be generated. diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index ce0df37e39..51e92ca6ba 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -220,7 +220,7 @@ def _rearrange_events_for_latest_function_response( def _is_part_invisible(p: types.Part) -> bool: - """A part is considered invisble if it's a thought, or has no visible content.""" + """A part is considered invisible if it's a thought, or has no visible content.""" return getattr(p, 'thought', False) or not ( p.text or p.inline_data diff --git a/src/google/adk/memory/vertex_ai_rag_memory_service.py b/src/google/adk/memory/vertex_ai_rag_memory_service.py index 236bf4b5ed..7e9689d0fd 100644 --- a/src/google/adk/memory/vertex_ai_rag_memory_service.py +++ b/src/google/adk/memory/vertex_ai_rag_memory_service.py @@ -52,7 +52,7 @@ def __init__( or ``{rag_corpus_id}`` similarity_top_k: The number of contexts to retrieve. vector_distance_threshold: Only returns contexts with vector distance - smaller than the threshold.. + smaller than the threshold. """ self._vertex_rag_store = types.VertexRagStore( rag_resources=[ diff --git a/src/google/adk/runners.py b/src/google/adk/runners.py index 1773729719..c8a728de99 100644 --- a/src/google/adk/runners.py +++ b/src/google/adk/runners.py @@ -714,7 +714,7 @@ async def _exec_with_plugin( # identified by checking if the transcription event is partial. When # the next transcription event is not partial, it means the previous # transcription is finished. Then if there is any buffered function - # call event, we should append them after this finished(non-parital) + # call event, we should append them after this finished(non-partial) # transcription event. buffered_events: list[Event] = [] is_transcribing: bool = False @@ -730,7 +730,7 @@ async def _exec_with_plugin( buffered_events.append(event) continue # Note for live/bidi: for audio response, it's considered as - # non-paritla event(event.partial=None) + # non-partial event(event.partial=None) # event.partial=False and event.partial=None are considered as # non-partial event; event.partial=True is considered as partial # event. @@ -870,7 +870,7 @@ async def run_live( **Events Yielded to Callers:** * **Live Model Audio Events with Inline Data:** Events containing raw audio `Blob` data(`inline_data`). - * **Live Model Audio Events with File Data:** Both input and ouput audio + * **Live Model Audio Events with File Data:** Both input and output audio data are aggregated into a audio file saved into artifacts. The reference to the file is saved in the event as `file_data`. * **Usage Metadata:** Events containing token usage. diff --git a/src/google/adk/tools/spanner/settings.py b/src/google/adk/tools/spanner/settings.py index ae7f6371aa..1d3c5b7bab 100644 --- a/src/google/adk/tools/spanner/settings.py +++ b/src/google/adk/tools/spanner/settings.py @@ -96,7 +96,7 @@ class SpannerVectorStoreSettings(BaseModel): """Required. The vector store table columns to return in the vector similarity search result. By default, only the `content_column` value and the distance value are returned. - If sepecified, the list of selected columns and the distance value are returned. + If specified, the list of selected columns and the distance value are returned. For example, if `selected_columns` is ['col1', 'col2'], then the result will contain the values of 'col1' and 'col2' columns and the distance value. """ From 087b42df546f88fd588755c126f9705e32c5f623 Mon Sep 17 00:00:00 2001 From: Didier Durand <2927957+didier-durand@users.noreply.github.com> Date: Fri, 19 Dec 2025 07:31:36 +0100 Subject: [PATCH 2/2] Update src/google/adk/cli/built_in_agents/tools/write_files.py Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- src/google/adk/cli/built_in_agents/tools/write_files.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/google/adk/cli/built_in_agents/tools/write_files.py b/src/google/adk/cli/built_in_agents/tools/write_files.py index 3b2fa74ba2..8ade17c536 100644 --- a/src/google/adk/cli/built_in_agents/tools/write_files.py +++ b/src/google/adk/cli/built_in_agents/tools/write_files.py @@ -51,7 +51,7 @@ async def write_files( - file_size: size of written file in bytes - existed_before: bool indicating if file existed before write - backup_created: bool indicating if backup was created - - backup_path: path to back up file if created + - backup_path: path to backup file if created - error: error message if write failed for this file - successful_writes: number of files written successfully - total_files: total number of files requested