Skip to content

Commit 3e3f173

Browse files
cpcloudcursoragent
andcommitted
fix(chat): store current query to prevent index panic
Fixed an index out of range panic that occurred during SQL streaming. The code was trying to access m.chat.Messages[-3] to get the user's question, but after removing the "generating query" notice, the array was shorter than expected. Solution: - Added CurrentQuery field to chatState to track the active question - Store the question in handleSQLStreamStarted from sqlStreamStartedMsg - Use m.chat.CurrentQuery in executeSQLQuery instead of indexing messages This makes the code more robust and eliminates fragile assumptions about message array structure during streaming. Co-authored-by: Cursor <cursoragent@cursor.com>
1 parent c36f20d commit 3e3f173

File tree

1 file changed

+15
-10
lines changed

1 file changed

+15
-10
lines changed

internal/app/chat.go

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ type chatState struct {
3737
StreamCh <-chan llm.StreamChunk // for stage 2 (answer streaming)
3838
SQLStreamCh <-chan llm.StreamChunk // for stage 1 (SQL generation)
3939
CancelFn context.CancelFunc
40+
CurrentQuery string // the user's current question being processed
4041
Progress progress.Model // pull progress bar
4142
Pulling bool // true while a model pull is in progress
4243
PullDisplay string // current pull status text, rendered as fixed chrome
@@ -884,9 +885,10 @@ func (m *Model) handleSQLStreamStarted(msg sqlStreamStartedMsg) tea.Cmd {
884885
return nil
885886
}
886887

887-
// Store the cancel function and channel, then start reading chunks.
888+
// Store the cancel function, channel, and question, then start reading chunks.
888889
m.chat.CancelFn = msg.CancelFn
889890
m.chat.SQLStreamCh = msg.Channel
891+
m.chat.CurrentQuery = msg.Question
890892
return waitForSQLChunk(msg.Channel)
891893
}
892894

@@ -940,11 +942,12 @@ func (m *Model) handleSQLChunk(msg sqlChunkMsg) tea.Cmd {
940942
sql = llm.ExtractSQL(m.chat.Messages[len(m.chat.Messages)-1].SQL)
941943
}
942944
m.removeLastNotice() // Remove "generating query"
943-
945+
944946
if sql == "" {
945947
m.chat.Streaming = false
946948
// Remove incomplete assistant message.
947-
if len(m.chat.Messages) > 0 && m.chat.Messages[len(m.chat.Messages)-1].Role == "assistant" {
949+
if len(m.chat.Messages) > 0 &&
950+
m.chat.Messages[len(m.chat.Messages)-1].Role == "assistant" {
948951
m.chat.Messages = m.chat.Messages[:len(m.chat.Messages)-1]
949952
}
950953
m.chat.Messages = append(m.chat.Messages, chatMessage{
@@ -970,7 +973,7 @@ func (m *Model) handleSQLChunk(msg sqlChunkMsg) tea.Cmd {
970973
// executeSQLQuery runs the generated SQL and starts stage 2 (summary).
971974
func (m *Model) executeSQLQuery(sql string) tea.Cmd {
972975
store := m.store
973-
query := m.chat.Messages[len(m.chat.Messages)-3].Content // user message is 3 back
976+
query := m.chat.CurrentQuery
974977

975978
return func() tea.Msg {
976979
cols, rows, err := store.ReadOnlyQuery(sql)
@@ -1110,9 +1113,9 @@ func (m *Model) renderChatMessages() string {
11101113
label := m.styles.ChatAssistant.Render(" " + m.llmModelLabel() + " ")
11111114
text := msg.Content
11121115
sql := msg.SQL
1113-
1116+
11141117
var parts []string
1115-
1118+
11161119
// Show SQL if toggle is on and SQL exists.
11171120
if m.chat.ShowSQL && sql != "" {
11181121
sqlWidth := innerW - 8
@@ -1130,7 +1133,7 @@ func (m *Model) renderChatMessages() string {
11301133
)
11311134
parts = append(parts, sqlHeader, sqlBlock)
11321135
}
1133-
1136+
11341137
if text == "" && m.chat.Streaming {
11351138
statusLine := m.chat.Spinner.View() + " " + m.styles.HeaderHint.Render("thinking")
11361139
parts = append(parts, statusLine)
@@ -1145,13 +1148,13 @@ func (m *Model) renderChatMessages() string {
11451148
}
11461149
parts = append(parts, renderMarkdown(text, innerW-2))
11471150
}
1148-
1151+
11491152
if len(parts) > 0 {
11501153
rendered = label + "\n" + strings.Join(parts, "\n")
11511154
} else {
11521155
rendered = label
11531156
}
1154-
1157+
11551158
// Add subtle separator after assistant response (end of Q&A pair).
11561159
// Skip if it's the last message to avoid trailing separator.
11571160
if i < len(m.chat.Messages)-1 && text != "" {
@@ -1255,7 +1258,9 @@ func (m *Model) handleChatKey(key tea.KeyMsg) (tea.Model, tea.Cmd) {
12551258
// Remove "generating query" notice and add cancellation message.
12561259
m.removeLastNotice()
12571260
// If we have an incomplete assistant message, remove it.
1258-
if len(m.chat.Messages) > 0 && m.chat.Messages[len(m.chat.Messages)-1].Role == "assistant" && m.chat.Messages[len(m.chat.Messages)-1].Content == "" {
1261+
if len(m.chat.Messages) > 0 &&
1262+
m.chat.Messages[len(m.chat.Messages)-1].Role == "assistant" &&
1263+
m.chat.Messages[len(m.chat.Messages)-1].Content == "" {
12591264
m.chat.Messages = m.chat.Messages[:len(m.chat.Messages)-1]
12601265
}
12611266
m.chat.Messages = append(m.chat.Messages, chatMessage{

0 commit comments

Comments
 (0)