Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 45 additions & 1 deletion src/google/adk/flows/llm_flows/contents.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,8 @@ def _is_part_invisible(p: types.Part) -> bool:
or p.file_data
or p.function_call
or p.function_response
or p.executable_code
or p.code_execution_result
)


Expand Down Expand Up @@ -452,7 +454,49 @@ def _get_contents(
if content:
remove_client_function_call_id(content)
contents.append(content)
return contents

# Merge consecutive model contents when they contain code execution parts.
# This is necessary for code execution where executable_code and
# code_execution_result are separate events but need to be in the same
# Content for the LLM to understand the execution flow.
merged_contents = []
for content in contents:
if (
merged_contents
and merged_contents[-1].role == content.role
and content.role == 'model'
and _should_merge_code_execution_content(merged_contents[-1], content)
):
# Merge parts into the previous content
merged_contents[-1].parts.extend(content.parts)
else:
merged_contents.append(content)

return merged_contents


def _content_has_code_execution_part(content: types.Content) -> bool:
"""Check if a content has executable_code or code_execution_result parts."""
if not content.parts:
return False
for part in content.parts:
if part.executable_code or part.code_execution_result:
return True
return False


def _should_merge_code_execution_content(
prev_content: types.Content, curr_content: types.Content
) -> bool:
"""Determine if two consecutive model contents should be merged.

Only merge when the previous content has executable_code and the current
content has code_execution_result. This ensures proper code execution flow
is maintained without affecting other consecutive model messages.
"""
return _content_has_code_execution_part(
prev_content
) or _content_has_code_execution_part(curr_content)


def _get_current_turn_contents(
Expand Down
73 changes: 73 additions & 0 deletions tests/unittests/flows/llm_flows/test_contents.py
Original file line number Diff line number Diff line change
Expand Up @@ -609,3 +609,76 @@ async def test_events_with_empty_content_are_skipped():
role="user",
),
]


@pytest.mark.asyncio
async def test_events_with_code_execution_are_included():
"""Test that events with executable_code and code_execution_result are included."""
agent = Agent(model="gemini-2.5-flash", name="test_agent")
llm_request = LlmRequest(model="gemini-2.5-flash")
invocation_context = await testing_utils.create_invocation_context(
agent=agent
)

events = [
Event(
invocation_id="inv1",
author="user",
content=types.UserContent("Run some code"),
),
# Event with executable_code part
Event(
invocation_id="inv2",
author="test_agent",
content=types.Content(
parts=[
types.Part(
executable_code=types.ExecutableCode(
code="print('hello')",
language="PYTHON",
)
)
],
role="model",
),
),
# Event with code_execution_result part
Event(
invocation_id="inv3",
author="test_agent",
content=types.Content(
parts=[
types.Part(
code_execution_result=types.CodeExecutionResult(
outcome="OUTCOME_OK",
output="hello",
)
)
],
role="model",
),
),
Event(
invocation_id="inv4",
author="user",
content=types.UserContent("What was the result?"),
),
]
invocation_context.session.events = events

# Process the request
async for _ in contents.request_processor.run_async(
invocation_context, llm_request
):
pass

# Verify events with executable_code and code_execution_result are included
# Consecutive model contents are merged into one content with multiple parts
assert len(llm_request.contents) == 3
assert llm_request.contents[0] == types.UserContent("Run some code")
# The merged model content should have both executable_code and code_execution_result
assert llm_request.contents[1].role == "model"
assert len(llm_request.contents[1].parts) == 2
assert llm_request.contents[1].parts[0].executable_code is not None
assert llm_request.contents[1].parts[1].code_execution_result is not None
assert llm_request.contents[2] == types.UserContent("What was the result?")