Skip to content

Commit ba56ac6

Browse files
feat(langchain): add respond decision to HITL middleware (#37095)
Extends `HumanInTheLoopMiddleware` with a new `respond` decision type for "ask user" style tools — tools whose real implementation is the human's response. The interrupt is raised with the tool call as usual; the resume payload becomes the body of a synthetic `ToolMessage` with `status="success"`, and the tool itself is not executed. This complements `reject` (which produces a synthetic `ToolMessage` with `status="error"`) by enabling the symmetric success path: a reviewer can answer on the tool's behalf without invoking it. ## Changes - New `RespondDecision` `TypedDict` with a required `message: str` field; added to the `Decision` union. - `"respond"` added to the `DecisionType` literal. - `_process_decision` handles `"respond"` by emitting a `ToolMessage` with `status="success"` and preserving the original tool call on the `AIMessage` so provider-required tool-call/tool-message pairing is maintained. - The `True` shortcut in `interrupt_on` now expands to `["approve", "edit", "reject", "respond"]`, so existing callers that opted into "all decisions" pick up the new capability without code changes. The `reject` decision already permits a reviewer to inject arbitrary `ToolMessage` content, so `respond` extends the same trust model — not a new capability class. ## Example ```python from langchain.agents.middleware import HumanInTheLoopMiddleware middleware = HumanInTheLoopMiddleware( interrupt_on={"ask_user": {"allowed_decisions": ["respond"]}} ) # Resume payload: {"decisions": [{"type": "respond", "message": "blue"}]} # → synthetic ToolMessage(content="blue", status="success") for `ask_user`. ``` --- *Implementation drafted with AI-agent assistance.* Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
1 parent b6b836a commit ba56ac6

2 files changed

Lines changed: 159 additions & 4 deletions

File tree

libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ class ActionRequest(TypedDict):
3939
"""The description of the action to be reviewed."""
4040

4141

42-
DecisionType = Literal["approve", "edit", "reject"]
42+
DecisionType = Literal["approve", "edit", "reject", "respond"]
4343

4444

4545
class ReviewConfig(TypedDict):
@@ -95,7 +95,22 @@ class RejectDecision(TypedDict):
9595
"""The message sent to the model explaining why the action was rejected."""
9696

9797

98-
Decision = ApproveDecision | EditDecision | RejectDecision
98+
class RespondDecision(TypedDict):
99+
"""Response when a human answers on behalf of the tool, skipping execution.
100+
101+
Used for "ask user" style tools whose real implementation is the human's
102+
response. The tool is not executed; instead, a synthetic `ToolMessage` with
103+
`status="success"` and the provided `message` is returned to the model.
104+
"""
105+
106+
type: Literal["respond"]
107+
"""The type of response when a human responds on behalf of the tool."""
108+
109+
message: str
110+
"""Content of the synthetic `ToolMessage` returned to the model."""
111+
112+
113+
Decision = ApproveDecision | EditDecision | RejectDecision | RespondDecision
99114

100115

101116
class HITLResponse(TypedDict):
@@ -180,7 +195,8 @@ def __init__(
180195
181196
If a tool doesn't have an entry, it's auto-approved by default.
182197
183-
* `True` indicates all decisions are allowed: approve, edit, and reject.
198+
* `True` indicates all decisions are allowed: approve, edit, reject,
199+
and respond.
184200
* `False` indicates that the tool is auto-approved.
185201
* `InterruptOnConfig` indicates the specific decisions allowed for this
186202
tool.
@@ -200,7 +216,7 @@ def __init__(
200216
if isinstance(tool_config, bool):
201217
if tool_config is True:
202218
resolved_configs[tool_name] = InterruptOnConfig(
203-
allowed_decisions=["approve", "edit", "reject"]
219+
allowed_decisions=["approve", "edit", "reject", "respond"]
204220
)
205221
elif tool_config.get("allowed_decisions"):
206222
resolved_configs[tool_name] = tool_config
@@ -277,6 +293,15 @@ def _process_decision(
277293
status="error",
278294
)
279295
return tool_call, tool_message
296+
if decision["type"] == "respond" and "respond" in allowed_decisions:
297+
# Skip tool execution; the human answers on behalf of the tool.
298+
tool_message = ToolMessage(
299+
content=decision["message"],
300+
name=tool_call["name"],
301+
tool_call_id=tool_call["id"],
302+
status="success",
303+
)
304+
return tool_call, tool_message
280305
msg = (
281306
f"Unexpected human decision: {decision}. "
282307
f"Decision type '{decision.get('type')}' "

libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_human_in_the_loop.py

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,136 @@ def mock_response(_: Any) -> dict[str, Any]:
150150
assert result["messages"][1].tool_call_id == "1"
151151

152152

153+
def test_human_in_the_loop_middleware_single_tool_respond() -> None:
154+
"""Test HumanInTheLoopMiddleware with `respond` decision producing a success ToolMessage."""
155+
middleware = HumanInTheLoopMiddleware(
156+
interrupt_on={"ask_user": {"allowed_decisions": ["respond"]}}
157+
)
158+
159+
ai_message = AIMessage(
160+
content="Let me ask the user.",
161+
tool_calls=[{"name": "ask_user", "args": {"question": "favorite color?"}, "id": "1"}],
162+
)
163+
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
164+
165+
def mock_respond(_: Any) -> dict[str, Any]:
166+
return {"decisions": [{"type": "respond", "message": "blue"}]}
167+
168+
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_respond):
169+
result = middleware.after_model(state, Runtime())
170+
assert result is not None
171+
assert "messages" in result
172+
assert len(result["messages"]) == 2
173+
assert isinstance(result["messages"][0], AIMessage)
174+
# Tool call is preserved on the AI message (provider APIs require pairing).
175+
assert len(result["messages"][0].tool_calls) == 1
176+
assert result["messages"][0].tool_calls[0]["id"] == "1"
177+
178+
tool_message = result["messages"][1]
179+
assert isinstance(tool_message, ToolMessage)
180+
assert tool_message.content == "blue"
181+
assert tool_message.name == "ask_user"
182+
assert tool_message.tool_call_id == "1"
183+
assert tool_message.status == "success"
184+
185+
186+
def test_human_in_the_loop_middleware_respond_disallowed() -> None:
187+
"""Test that `respond` raises when not in `allowed_decisions`."""
188+
middleware = HumanInTheLoopMiddleware(
189+
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
190+
)
191+
192+
ai_message = AIMessage(
193+
content="I'll help you",
194+
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
195+
)
196+
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
197+
198+
def mock_respond(_: Any) -> dict[str, Any]:
199+
return {"decisions": [{"type": "respond", "message": "synthetic"}]}
200+
201+
with (
202+
patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_respond),
203+
pytest.raises(
204+
ValueError,
205+
match=re.escape(
206+
"Decision type 'respond' is not allowed for tool 'test_tool'. "
207+
"Expected one of ['approve', 'edit', 'reject'] based on the tool's "
208+
"configuration."
209+
),
210+
),
211+
):
212+
middleware.after_model(state, Runtime())
213+
214+
215+
def test_human_in_the_loop_middleware_mixed_with_respond() -> None:
216+
"""Test mixed decisions: one tool approved, one tool answered via `respond`."""
217+
middleware = HumanInTheLoopMiddleware(
218+
interrupt_on={
219+
"get_forecast": {"allowed_decisions": ["approve"]},
220+
"ask_user": {"allowed_decisions": ["respond"]},
221+
}
222+
)
223+
224+
ai_message = AIMessage(
225+
content="Two things",
226+
tool_calls=[
227+
{"name": "get_forecast", "args": {"location": "SF"}, "id": "1"},
228+
{"name": "ask_user", "args": {"question": "favorite color?"}, "id": "2"},
229+
],
230+
)
231+
state = AgentState[Any](messages=[HumanMessage(content="Hi"), ai_message])
232+
233+
def mock_mixed(_: Any) -> dict[str, Any]:
234+
return {
235+
"decisions": [
236+
{"type": "approve"},
237+
{"type": "respond", "message": "blue"},
238+
]
239+
}
240+
241+
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_mixed):
242+
result = middleware.after_model(state, Runtime())
243+
assert result is not None
244+
# AI message + 1 synthetic ToolMessage for the respond decision.
245+
assert len(result["messages"]) == 2
246+
247+
updated_ai_message = result["messages"][0]
248+
assert len(updated_ai_message.tool_calls) == 2
249+
assert updated_ai_message.tool_calls[0]["name"] == "get_forecast"
250+
assert updated_ai_message.tool_calls[1]["name"] == "ask_user"
251+
252+
tool_message = result["messages"][1]
253+
assert isinstance(tool_message, ToolMessage)
254+
assert tool_message.content == "blue"
255+
assert tool_message.name == "ask_user"
256+
assert tool_message.tool_call_id == "2"
257+
assert tool_message.status == "success"
258+
259+
260+
def test_human_in_the_loop_middleware_true_allows_respond() -> None:
261+
"""Test that the `True` shortcut permits `respond` decisions."""
262+
middleware = HumanInTheLoopMiddleware(interrupt_on={"ask_user": True})
263+
264+
ai_message = AIMessage(
265+
content="Asking",
266+
tool_calls=[{"name": "ask_user", "args": {"q": "?"}, "id": "1"}],
267+
)
268+
state = AgentState[Any](messages=[HumanMessage(content="Hi"), ai_message])
269+
270+
with patch(
271+
"langchain.agents.middleware.human_in_the_loop.interrupt",
272+
return_value={"decisions": [{"type": "respond", "message": "answer"}]},
273+
):
274+
result = middleware.after_model(state, Runtime())
275+
assert result is not None
276+
assert len(result["messages"]) == 2
277+
tool_message = result["messages"][1]
278+
assert isinstance(tool_message, ToolMessage)
279+
assert tool_message.content == "answer"
280+
assert tool_message.status == "success"
281+
282+
153283
def test_human_in_the_loop_middleware_multiple_tools_mixed_responses() -> None:
154284
"""Test HumanInTheLoopMiddleware with multiple tools and mixed response types."""
155285
middleware = HumanInTheLoopMiddleware(

0 commit comments

Comments
 (0)