From 8bdf3566a8422810d1953ddbe6b8e112086b2511 Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Sun, 31 Aug 2025 14:22:59 -0400 Subject: [PATCH] Reduce max_tokens requirement for OpenAI reasoning models to 16k --- dspy/clients/lm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dspy/clients/lm.py b/dspy/clients/lm.py index 02a532fda8..e0ff954da4 100644 --- a/dspy/clients/lm.py +++ b/dspy/clients/lm.py @@ -86,10 +86,10 @@ def __init__( model_pattern = re.match(r"^(?:o[1345]|gpt-5)(?:-(?:mini|nano))?", model_family) if model_pattern: - if max_tokens < 20000 or temperature != 1.0: + if max_tokens < 15000 or temperature != 1.0: raise ValueError( - "OpenAI's reasoning models require passing temperature=1.0 and max_tokens >= 20000 to " - "`dspy.LM(...)`, e.g., dspy.LM('openai/gpt-5', temperature=1.0, max_tokens=20000)" + "OpenAI's reasoning models require passing temperature=1.0 and max_tokens >= 15000 to " + "`dspy.LM(...)`, e.g., dspy.LM('openai/gpt-5', temperature=1.0, max_tokens=15000)" ) self.kwargs = dict(temperature=temperature, max_completion_tokens=max_tokens, **kwargs) if self.kwargs.get("rollout_id") is None: