@@ -83,6 +83,7 @@ class ApiContext:
83
83
prompt : str
84
84
files : List [InputFile ]
85
85
tools : List [Dict ]
86
+ strict : bool
86
87
temperature : float
87
88
max_tokens : int
88
89
detail : Optional [str ] = None
@@ -99,6 +100,7 @@ def __init__(self, session, index, name, func, args, prompt, files, tools):
99
100
self .prompt = prompt
100
101
self .files = files
101
102
self .tools = tools
103
+ self .strict = args .strict
102
104
self .detail = args .detail
103
105
self .temperature = args .temperature
104
106
self .max_tokens = args .max_tokens
@@ -276,14 +278,19 @@ async def openai_chat(ctx: ApiContext, path: str = "/chat/completions") -> ApiRe
276
278
url , headers = make_openai_url_and_headers (ctx , path )
277
279
kwargs = {"messages" : make_openai_messages (ctx )}
278
280
if ctx .tools :
279
- kwargs ["tools" ] = ctx .tools
281
+ tools = ctx .tools [:]
282
+ if ctx .strict :
283
+ for t in tools :
284
+ t ["function" ]["strict" ] = True
285
+ t ["function" ]["parameters" ]["additionalProperties" ] = False
286
+ kwargs ["tools" ] = tools
280
287
kwargs ["tool_choice" ] = "required"
281
288
if ctx .peft :
282
289
kwargs ["peft" ] = ctx .peft
283
290
# Some providers require opt-in for stream stats, but some providers don't like this opt-in.
284
- # Azure, ovh.net, and vLLM don't support stream stats at the moment.
291
+ # Regardless of opt-in, Azure and ovh.net don't return stream stats at the moment.
285
292
# See https://github.com/Azure/azure-rest-api-specs/issues/25062
286
- if not any (p in ctx .name for p in ["azure" , "databricks" , "fireworks" , "ultravox" ]):
293
+ if not any (p in ctx .name for p in ["azure" , "databricks" , "fireworks" ]):
287
294
kwargs ["stream_options" ] = {"include_usage" : True }
288
295
data = make_openai_chat_body (ctx , ** kwargs )
289
296
return await post (ctx , url , headers , data , openai_chunk_gen )
0 commit comments