Skip to content

Commit edcd8d0

Browse files
authored
Merge pull request #1318 from trheyi/main
Refactor connector settings to model capabilities
2 parents 8c06ddf + 033d94d commit edcd8d0

File tree

11 files changed

+481
-64
lines changed

11 files changed

+481
-64
lines changed

agent/assistant/agent.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -154,46 +154,46 @@ func (ast *Assistant) getConnectorCapabilities(connectorID string) *context.Mode
154154
Streaming: &falseVal,
155155
}
156156

157-
// Get connector setting from global settings
158-
setting, exists := connectorSettings[connectorID]
157+
// Get model capabilities from global configuration
158+
modelCaps, exists := modelCapabilities[connectorID]
159159
if !exists {
160-
// Return default capabilities if connector not found in settings
160+
// Return default capabilities if model not found in configuration
161161
return capabilities
162162
}
163163

164-
// Update capabilities based on connector settings
165-
if setting.Vision {
164+
// Update capabilities based on model configuration
165+
if modelCaps.Vision {
166166
v := true
167167
capabilities.Vision = &v
168168
}
169169

170170
// Handle both Tools (deprecated) and ToolCalls
171-
if setting.ToolCalls || setting.Tools {
171+
if modelCaps.ToolCalls || modelCaps.Tools {
172172
v := true
173173
capabilities.ToolCalls = &v
174174
}
175175

176-
if setting.Audio {
176+
if modelCaps.Audio {
177177
v := true
178178
capabilities.Audio = &v
179179
}
180180

181-
if setting.Reasoning {
181+
if modelCaps.Reasoning {
182182
v := true
183183
capabilities.Reasoning = &v
184184
}
185185

186-
if setting.Streaming {
186+
if modelCaps.Streaming {
187187
v := true
188188
capabilities.Streaming = &v
189189
}
190190

191-
if setting.JSON {
191+
if modelCaps.JSON {
192192
v := true
193193
capabilities.JSON = &v
194194
}
195195

196-
if setting.Multimodal {
196+
if modelCaps.Multimodal {
197197
v := true
198198
capabilities.Multimodal = &v
199199
}

agent/assistant/api.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -770,7 +770,7 @@ func (ast *Assistant) withOptions(options map[string]interface{}) map[string]int
770770

771771
// Add tool_calls
772772
if ast.Tools != nil && ast.Tools.Tools != nil && len(ast.Tools.Tools) > 0 {
773-
if settings, has := connectorSettings[ast.Connector]; has && settings.Tools {
773+
if capabilities, has := modelCapabilities[ast.Connector]; has && capabilities.Tools {
774774
options["tools"] = ast.Tools.Tools
775775
if options["tool_choice"] == nil {
776776
options["tool_choice"] = "auto"
@@ -794,8 +794,8 @@ func (ast *Assistant) withPrompts(messages []chatMessage.Message) []chatMessage.
794794

795795
// Add tool_calls
796796
if ast.Tools != nil && ast.Tools.Tools != nil && len(ast.Tools.Tools) > 0 {
797-
settings, has := connectorSettings[ast.Connector]
798-
if !has || !settings.Tools {
797+
capabilities, has := modelCapabilities[ast.Connector]
798+
if !has || !capabilities.Tools {
799799
// Convert store tools to runtime tools if not already done
800800
if ast.runtimeTools == nil {
801801
runtimeTools, err := ToRuntimeTools(ast.Tools.Tools)

agent/assistant/load.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import (
2727
var loaded = NewCache(200) // 200 is the default capacity
2828
var storage store.Store = nil
2929
var search interface{} = nil
30-
var connectorSettings map[string]ConnectorSetting = map[string]ConnectorSetting{}
30+
var modelCapabilities map[string]ModelCapabilities = map[string]ModelCapabilities{}
3131
var vision *agentvision.Vision = nil
3232
var defaultConnector string = "" // default connector
3333
var globalUses *context.Uses = nil // global uses configuration from agent.yml
@@ -137,9 +137,9 @@ func SetVision(v *agentvision.Vision) {
137137
vision = v
138138
}
139139

140-
// SetConnectorSettings set the connector settings
141-
func SetConnectorSettings(settings map[string]ConnectorSetting) {
142-
connectorSettings = settings
140+
// SetModelCapabilities set the model capabilities configuration
141+
func SetModelCapabilities(capabilities map[string]ModelCapabilities) {
142+
modelCapabilities = capabilities
143143
}
144144

145145
// SetConnector set the connector

agent/assistant/types.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ type Assistant struct {
108108
runtimeTools []Tool // Converted tools for business logic (OpenAI format)
109109
}
110110

111-
// ConnectorSetting the connector setting
112-
// Defines the capabilities of a connector/model
113-
type ConnectorSetting struct {
111+
// ModelCapabilities defines the capabilities of a language model
112+
// This configuration is loaded from agent/models.yml
113+
type ModelCapabilities struct {
114114
Vision bool `json:"vision,omitempty" yaml:"vision,omitempty"` // Supports vision/image input
115115
Tools bool `json:"tools,omitempty" yaml:"tools,omitempty"` // Supports tool/function calling (deprecated, use ToolCalls)
116116
ToolCalls bool `json:"tool_calls,omitempty" yaml:"tool_calls,omitempty"` // Supports tool/function calling

agent/context/openapi.go

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -122,13 +122,12 @@ func GetAssistantID(c *gin.Context, req *CompletionRequest) (string, error) {
122122
}
123123

124124
if model != "" {
125-
// Split by "-" and get the last field
126-
parts := strings.Split(model, "-")
127-
lastField := strings.TrimSpace(parts[len(parts)-1])
128-
129-
// Check if it has yao_ prefix
130-
if strings.HasPrefix(lastField, "yao_") {
131-
assistantID := strings.TrimPrefix(lastField, "yao_")
125+
// Parse model ID using the same logic as ParseModelID
126+
// Expected format: [prefix-]assistantName-model-yao_assistantID
127+
// Find the last occurrence of "-yao_"
128+
parts := strings.Split(model, "-yao_")
129+
if len(parts) >= 2 {
130+
assistantID := parts[len(parts)-1]
132131
if assistantID != "" {
133132
return assistantID, nil
134133
}

agent/context/types_llm.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,14 @@ type Uses struct {
1212
// ModelCapabilities defines the capabilities of a language model
1313
// Used by LLM to select appropriate provider and validate requests
1414
type ModelCapabilities struct {
15-
Vision *bool `json:"vision,omitempty"` // Supports vision/image input
16-
ToolCalls *bool `json:"tool_calls,omitempty"` // Supports tool/function calling
17-
Audio *bool `json:"audio,omitempty"` // Supports audio input/output
18-
Reasoning *bool `json:"reasoning,omitempty"` // Supports reasoning/thinking mode (o1, DeepSeek R1)
19-
Streaming *bool `json:"streaming,omitempty"` // Supports streaming responses
20-
JSON *bool `json:"json,omitempty"` // Supports JSON mode
21-
Multimodal *bool `json:"multimodal,omitempty"` // Supports multimodal input (text + images + audio)
15+
Vision *bool `json:"vision,omitempty"` // Supports vision/image input
16+
ToolCalls *bool `json:"tool_calls,omitempty"` // Supports tool/function calling
17+
Audio *bool `json:"audio,omitempty"` // Supports audio input/output
18+
Reasoning *bool `json:"reasoning,omitempty"` // Supports reasoning/thinking mode (o1, DeepSeek R1)
19+
Streaming *bool `json:"streaming,omitempty"` // Supports streaming responses
20+
JSON *bool `json:"json,omitempty"` // Supports JSON mode
21+
Multimodal *bool `json:"multimodal,omitempty"` // Supports multimodal input (text + images + audio)
22+
TemperatureAdjustable *bool `json:"temperature_adjustable,omitempty"` // Supports temperature adjustment (reasoning models typically don't)
2223
}
2324

2425
// CompletionOptions the completion request options

agent/llm/adapters/reasoning.go

Lines changed: 51 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -16,45 +16,83 @@ const (
1616

1717
// ReasoningAdapter handles reasoning content capability
1818
// - Manages reasoning_effort parameter (o1, GPT-5)
19+
// - Manages temperature parameter constraints (reasoning models typically require temperature=1)
1920
// - Extracts reasoning_tokens from usage
2021
// - Parses visible reasoning content (DeepSeek R1)
2122
type ReasoningAdapter struct {
2223
*BaseAdapter
23-
format ReasoningFormat
24-
supportsEffort bool // Whether the model supports reasoning_effort parameter
24+
format ReasoningFormat
25+
supportsEffort bool // Whether the model supports reasoning_effort parameter
26+
supportsTemperature bool // Whether the model supports temperature adjustment
2527
}
2628

2729
// NewReasoningAdapter creates a new reasoning adapter
28-
func NewReasoningAdapter(format ReasoningFormat) *ReasoningAdapter {
30+
// If cap.TemperatureAdjustable is provided, it overrides the default behavior
31+
func NewReasoningAdapter(format ReasoningFormat, cap *context.ModelCapabilities) *ReasoningAdapter {
2932
supportsEffort := false
33+
supportsTemperature := true
3034

31-
// Only OpenAI o1 and GPT-5 support reasoning_effort parameter
32-
if format == ReasoningFormatOpenAI || format == ReasoningFormatGPT5 {
35+
// Set defaults based on reasoning format
36+
switch format {
37+
case ReasoningFormatOpenAI, ReasoningFormatGPT5:
38+
// OpenAI o1 and GPT-5: support reasoning_effort, but NOT temperature adjustment
3339
supportsEffort = true
40+
supportsTemperature = false
41+
case ReasoningFormatDeepSeek:
42+
// DeepSeek R1: no reasoning_effort, no temperature adjustment
43+
supportsEffort = false
44+
supportsTemperature = false
45+
case ReasoningFormatNone:
46+
// Non-reasoning models: no reasoning_effort, but support temperature
47+
supportsEffort = false
48+
supportsTemperature = true
49+
}
50+
51+
// Override with explicit capability if provided
52+
if cap != nil && cap.TemperatureAdjustable != nil {
53+
supportsTemperature = *cap.TemperatureAdjustable
3454
}
3555

3656
return &ReasoningAdapter{
37-
BaseAdapter: NewBaseAdapter("ReasoningAdapter"),
38-
format: format,
39-
supportsEffort: supportsEffort,
57+
BaseAdapter: NewBaseAdapter("ReasoningAdapter"),
58+
format: format,
59+
supportsEffort: supportsEffort,
60+
supportsTemperature: supportsTemperature,
4061
}
4162
}
4263

43-
// PreprocessOptions handles reasoning_effort parameter
64+
// PreprocessOptions handles reasoning_effort and temperature parameters
4465
func (a *ReasoningAdapter) PreprocessOptions(options *context.CompletionOptions) (*context.CompletionOptions, error) {
4566
if options == nil {
4667
return options, nil
4768
}
4869

49-
// If model doesn't support reasoning_effort, remove it
50-
if !a.supportsEffort && options.ReasoningEffort != nil {
70+
newOptions := *options
71+
modified := false
72+
73+
// 1. Handle reasoning_effort parameter
74+
if !a.supportsEffort && newOptions.ReasoningEffort != nil {
5175
// Model doesn't support reasoning_effort, remove the parameter
52-
newOptions := *options
5376
newOptions.ReasoningEffort = nil
77+
modified = true
78+
}
79+
80+
// 2. Handle temperature parameter
81+
if !a.supportsTemperature && newOptions.Temperature != nil {
82+
currentTemp := *newOptions.Temperature
83+
if currentTemp != 1.0 {
84+
// Model doesn't support temperature adjustment, reset to default (1.0)
85+
defaultTemp := 1.0
86+
newOptions.Temperature = &defaultTemp
87+
modified = true
88+
}
89+
}
90+
91+
if modified {
5492
return &newOptions, nil
5593
}
5694

57-
// If model supports reasoning_effort, keep it as-is (user can set "low", "medium", or "high")
95+
// No modifications needed
5896
return options, nil
5997
}
6098

agent/llm/providers/openai/openai.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -141,16 +141,16 @@ func buildAdapters(cap *context.ModelCapabilities) []adapters.CapabilityAdapter
141141
result = append(result, adapters.NewAudioAdapter(*cap.Audio))
142142
}
143143

144-
// Reasoning adapter (always add to handle reasoning_effort parameter)
144+
// Reasoning adapter (always add to handle reasoning_effort and temperature parameters)
145145
// Even if the model doesn't support reasoning, we need the adapter to strip reasoning_effort
146146
if cap.Reasoning != nil {
147147
if *cap.Reasoning {
148148
// Detect reasoning format based on capabilities
149149
format := detectReasoningFormat(cap)
150-
result = append(result, adapters.NewReasoningAdapter(format))
150+
result = append(result, adapters.NewReasoningAdapter(format, cap))
151151
} else {
152152
// Model doesn't support reasoning, use None format to strip reasoning parameters
153-
result = append(result, adapters.NewReasoningAdapter(adapters.ReasoningFormatNone))
153+
result = append(result, adapters.NewReasoningAdapter(adapters.ReasoningFormatNone, cap))
154154
}
155155
}
156156

0 commit comments

Comments
 (0)