diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index ce0df37e39..79698bd8bd 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -227,6 +227,8 @@ def _is_part_invisible(p: types.Part) -> bool: or p.file_data or p.function_call or p.function_response + or p.executable_code + or p.code_execution_result ) @@ -452,7 +454,49 @@ def _get_contents( if content: remove_client_function_call_id(content) contents.append(content) - return contents + + # Merge consecutive model contents when they contain code execution parts. + # This is necessary for code execution where executable_code and + # code_execution_result are separate events but need to be in the same + # Content for the LLM to understand the execution flow. + merged_contents = [] + for content in contents: + if ( + merged_contents + and merged_contents[-1].role == content.role + and content.role == 'model' + and _should_merge_code_execution_content(merged_contents[-1], content) + ): + # Merge parts into the previous content + merged_contents[-1].parts.extend(content.parts) + else: + merged_contents.append(content) + + return merged_contents + + +def _content_has_code_execution_part(content: types.Content) -> bool: + """Check if a content has executable_code or code_execution_result parts.""" + if not content.parts: + return False + for part in content.parts: + if part.executable_code or part.code_execution_result: + return True + return False + + +def _should_merge_code_execution_content( + prev_content: types.Content, curr_content: types.Content +) -> bool: + """Determine if two consecutive model contents should be merged. + + Only merge when the previous content has executable_code and the current + content has code_execution_result. This ensures proper code execution flow + is maintained without affecting other consecutive model messages. + """ + return _content_has_code_execution_part( + prev_content + ) or _content_has_code_execution_part(curr_content) def _get_current_turn_contents( diff --git a/tests/unittests/flows/llm_flows/test_contents.py b/tests/unittests/flows/llm_flows/test_contents.py index bafaebed39..9543f4d90f 100644 --- a/tests/unittests/flows/llm_flows/test_contents.py +++ b/tests/unittests/flows/llm_flows/test_contents.py @@ -609,3 +609,76 @@ async def test_events_with_empty_content_are_skipped(): role="user", ), ] + + +@pytest.mark.asyncio +async def test_events_with_code_execution_are_included(): + """Test that events with executable_code and code_execution_result are included.""" + agent = Agent(model="gemini-2.5-flash", name="test_agent") + llm_request = LlmRequest(model="gemini-2.5-flash") + invocation_context = await testing_utils.create_invocation_context( + agent=agent + ) + + events = [ + Event( + invocation_id="inv1", + author="user", + content=types.UserContent("Run some code"), + ), + # Event with executable_code part + Event( + invocation_id="inv2", + author="test_agent", + content=types.Content( + parts=[ + types.Part( + executable_code=types.ExecutableCode( + code="print('hello')", + language="PYTHON", + ) + ) + ], + role="model", + ), + ), + # Event with code_execution_result part + Event( + invocation_id="inv3", + author="test_agent", + content=types.Content( + parts=[ + types.Part( + code_execution_result=types.CodeExecutionResult( + outcome="OUTCOME_OK", + output="hello", + ) + ) + ], + role="model", + ), + ), + Event( + invocation_id="inv4", + author="user", + content=types.UserContent("What was the result?"), + ), + ] + invocation_context.session.events = events + + # Process the request + async for _ in contents.request_processor.run_async( + invocation_context, llm_request + ): + pass + + # Verify events with executable_code and code_execution_result are included + # Consecutive model contents are merged into one content with multiple parts + assert len(llm_request.contents) == 3 + assert llm_request.contents[0] == types.UserContent("Run some code") + # The merged model content should have both executable_code and code_execution_result + assert llm_request.contents[1].role == "model" + assert len(llm_request.contents[1].parts) == 2 + assert llm_request.contents[1].parts[0].executable_code is not None + assert llm_request.contents[1].parts[1].code_execution_result is not None + assert llm_request.contents[2] == types.UserContent("What was the result?")