File tree Expand file tree Collapse file tree 4 files changed +4
-4
lines changed Expand file tree Collapse file tree 4 files changed +4
-4
lines changed Original file line number Diff line number Diff line change @@ -105,7 +105,7 @@ llama_context::llama_context(
105
105
106
106
{
107
107
const char * LLAMA_SET_ROWS = getenv (" LLAMA_SET_ROWS" );
108
- supports_set_rows = LLAMA_SET_ROWS ? (atoi (LLAMA_SET_ROWS) != 0 ) : supports_set_rows ;
108
+ supports_set_rows = LLAMA_SET_ROWS ? (atoi (LLAMA_SET_ROWS) != 0 ) : false ;
109
109
110
110
if (!supports_set_rows && !cparams.kv_unified ) {
111
111
LLAMA_LOG_WARN (" %s: non-unified KV cache requires ggml_set_rows() - forcing unified KV cache\n " , __func__);
Original file line number Diff line number Diff line change @@ -289,7 +289,7 @@ struct llama_context {
289
289
290
290
// env: LLAMA_SET_ROWS (temporary)
291
291
// ref: https://github.com/ggml-org/llama.cpp/pull/14285
292
- bool supports_set_rows = true ;
292
+ bool supports_set_rows = false ;
293
293
294
294
// env: LLAMA_GRAPH_REUSE_DISABLE
295
295
bool graph_reuse_disable = false ;
Original file line number Diff line number Diff line change @@ -197,7 +197,7 @@ llama_kv_cache_unified::llama_kv_cache_unified(
197
197
debug = LLAMA_KV_CACHE_DEBUG ? atoi (LLAMA_KV_CACHE_DEBUG) : 0 ;
198
198
199
199
const char * LLAMA_SET_ROWS = getenv (" LLAMA_SET_ROWS" );
200
- supports_set_rows = LLAMA_SET_ROWS ? atoi (LLAMA_SET_ROWS) != 0 : supports_set_rows ;
200
+ supports_set_rows = LLAMA_SET_ROWS ? atoi (LLAMA_SET_ROWS) != 0 : 0 ;
201
201
202
202
if (!supports_set_rows) {
203
203
// ref: https://github.com/ggml-org/llama.cpp/pull/14363
Original file line number Diff line number Diff line change @@ -230,7 +230,7 @@ class llama_kv_cache_unified : public llama_memory_i {
230
230
231
231
// env: LLAMA_SET_ROWS (temporary)
232
232
// ref: https://github.com/ggml-org/llama.cpp/pull/14285
233
- bool supports_set_rows = true ;
233
+ bool supports_set_rows = false ;
234
234
235
235
const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
236
236
You can’t perform that action at this time.
0 commit comments