Python · 6075 bytes Raw Blame History
1 """Direct tests for dedicated assistant response route handlers."""
2
3 from __future__ import annotations
4
5 from pathlib import Path
6
7 import pytest
8
9 from loader.agent.loop import Agent, AgentConfig
10 from loader.llm.base import ToolCall
11 from loader.runtime.conversation import ConversationRuntime
12 from loader.runtime.phases import TurnPhase
13 from loader.runtime.repair import ToolCallAnalysis
14 from loader.runtime.response_route_types import ResponseRouteAction, ResponseRouteContext
15 from loader.runtime.turn_completion import TurnCompletionAction, TurnCompletionDecision
16 from tests.helpers.runtime_harness import ScriptedBackend
17
18
19 def non_streaming_config() -> AgentConfig:
20 """Shared config for direct route-handler tests."""
21
22 return AgentConfig(auto_context=False, stream=False, max_iterations=8)
23
24
25 async def _prepare_context(
26 runtime: ConversationRuntime,
27 *,
28 task: str,
29 continuation_count: int = 0,
30 consecutive_errors: int = 0,
31 ) -> tuple[ResponseRouteContext, list]:
32 events = []
33
34 async def capture(event) -> None:
35 events.append(event)
36
37 prepared = await runtime.turn_preparation.prepare(
38 task=task,
39 emit=capture,
40 requested_mode="execute",
41 original_task=None,
42 on_user_question=None,
43 )
44 await runtime.phase_tracker.enter(
45 TurnPhase.ASSISTANT,
46 capture,
47 detail="Requesting assistant response",
48 reason_code="request_assistant_response",
49 )
50 context = ResponseRouteContext(
51 task=prepared.task,
52 effective_task=prepared.effective_task,
53 iterations=1,
54 max_iterations=runtime.context.config.max_iterations,
55 actions_taken=[],
56 continuation_count=continuation_count,
57 consecutive_errors=consecutive_errors,
58 dod=prepared.definition_of_done,
59 summary=prepared.summary,
60 executor=prepared.executor,
61 rollback_plan=prepared.rollback_plan,
62 )
63 return context, events
64
65
66 @pytest.mark.asyncio
67 async def test_final_answer_route_handler_completes_response(
68 temp_dir: Path,
69 ) -> None:
70 agent = Agent(
71 backend=ScriptedBackend(completions=[]),
72 config=non_streaming_config(),
73 project_root=temp_dir,
74 )
75 runtime = ConversationRuntime(agent)
76 context, events = await _prepare_context(
77 runtime,
78 task="Explain whether final answers route correctly.",
79 continuation_count=1,
80 consecutive_errors=2,
81 )
82
83 async def capture(event) -> None:
84 events.append(event)
85
86 decision = await runtime.response_router.final_answer_handler.handle(
87 analysis=ToolCallAnalysis(
88 content="All set.",
89 response_content="Final Answer: All set.",
90 is_final_answer=True,
91 final_response="All set.",
92 ),
93 context=context,
94 emit=capture,
95 )
96
97 assert decision.action == ResponseRouteAction.COMPLETE
98 assert context.summary.final_response == "All set."
99 assert context.summary.assistant_messages[-1].content == "Final Answer: All set."
100 assert any(event.type == "response" and event.content == "All set." for event in events)
101
102
103 @pytest.mark.asyncio
104 async def test_tool_batch_route_handler_finalizes_halted_batch(
105 temp_dir: Path,
106 ) -> None:
107 config = non_streaming_config()
108 config.auto_recover = False
109 agent = Agent(
110 backend=ScriptedBackend(completions=[]),
111 config=config,
112 project_root=temp_dir,
113 )
114 runtime = ConversationRuntime(agent)
115 context, events = await _prepare_context(
116 runtime,
117 task="Inspect the missing file and recover honestly.",
118 consecutive_errors=2,
119 )
120
121 async def capture(event) -> None:
122 events.append(event)
123
124 decision = await runtime.response_router.tool_batch_handler.handle(
125 analysis=ToolCallAnalysis(
126 content="I'll inspect the file first.",
127 response_content="I'll inspect the file first.",
128 tool_calls=[
129 ToolCall(
130 id="read-missing",
131 name="read",
132 arguments={"file_path": "missing.md"},
133 )
134 ],
135 tool_source="native",
136 ),
137 pending_tool_calls_seen=set(),
138 context=context,
139 emit=capture,
140 on_confirmation=None,
141 on_user_question=None,
142 emit_confirmation=runtime._emit_confirmation(capture),
143 )
144
145 assert decision.action == ResponseRouteAction.FINALIZE
146 assert decision.finalize_reason_code == "tool_batch_halted"
147 assert decision.new_actions_taken == ["read: {'file_path': 'missing.md'}"]
148 assert any(event.type == "tool_call" and event.tool_name == "read" for event in events)
149
150
151 @pytest.mark.asyncio
152 async def test_text_completion_route_handler_maps_continue_decision(
153 temp_dir: Path,
154 ) -> None:
155 agent = Agent(
156 backend=ScriptedBackend(completions=[]),
157 config=non_streaming_config(),
158 project_root=temp_dir,
159 )
160 runtime = ConversationRuntime(agent)
161 context, events = await _prepare_context(
162 runtime,
163 task="Continue the investigation.",
164 continuation_count=2,
165 consecutive_errors=1,
166 )
167
168 async def capture(event) -> None:
169 events.append(event)
170
171 async def fake_handle_text_response(**kwargs) -> TurnCompletionDecision:
172 return TurnCompletionDecision(
173 action=TurnCompletionAction.CONTINUE,
174 continuation_count=3,
175 )
176
177 runtime.response_router.text_completion_handler.turn_completion.handle_text_response = ( # type: ignore[method-assign]
178 fake_handle_text_response
179 )
180
181 decision = await runtime.response_router.text_completion_handler.handle(
182 analysis=ToolCallAnalysis(
183 content="I looked into it.",
184 response_content="I looked into it.",
185 ),
186 context=context,
187 emit=capture,
188 )
189
190 assert decision.action == ResponseRouteAction.CONTINUE
191 assert decision.continuation_count == 3
192 assert decision.consecutive_errors == 1