Skip to content

Commit d51da9c

Browse files
committed
Remove some dora leftovers from pytorch flow
Signed-off-by: Amit Zuker <[email protected]>
1 parent 08c03aa commit d51da9c

File tree

3 files changed

+0
-19
lines changed

3 files changed

+0
-19
lines changed

tensorrt_llm/_torch/peft/lora/layer.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,6 @@ def forward(
107107
module_idx = int(module_idx)
108108
if module_idx in lora_params[layer_idx]:
109109
active_lora_module_ids.append(module_idx)
110-
# TODO (dafrimi): needs to pass this is_dora arg
111-
lora_params[layer_idx][module_idx]['is_dora']
112110
lora_ranks.append(
113111
lora_params[layer_idx][module_idx]['adapter_size'])
114112
lora_weight_pointers.append(

tensorrt_llm/_torch/pyexecutor/model_engine.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2086,7 +2086,6 @@ def _get_lora_params_from_requests(self,
20862086
module_id: dict
20872087
{
20882088
adapter_size: torch tensor: int
2089-
is_dora: torch tensor: bool
20902089
weight_pointers: torch tensor: int64
20912090
}
20922091
}
@@ -2111,7 +2110,6 @@ def _get_lora_params_from_requests(self,
21112110
if module_id not in lora_params[layer_id]:
21122111
lora_params[layer_id][module_id] = {
21132112
'adapter_size': [],
2114-
'is_dora': [],
21152113
'weight_pointers': [],
21162114
}
21172115

@@ -2121,7 +2119,6 @@ def _get_lora_params_from_requests(self,
21212119
tmp_lora_params[(request.py_request_id, layer_id,
21222120
module_id)] = {
21232121
'adapter_size': [module.adapter_size],
2124-
'is_dora': [scaling_vec_pointer == 0],
21252122
'weight_pointers': [
21262123
module.weights_in_pointer,
21272124
module.weights_out_pointer,
@@ -2136,7 +2133,6 @@ def _get_lora_params_from_requests(self,
21362133
for module_id in lora_params[layer_id]:
21372134
current_lora_params = lora_params[layer_id][module_id]
21382135
current_lora_params['adapter_size'].append(0)
2139-
current_lora_params['is_dora'].append(False)
21402136
current_lora_params['weight_pointers'] += [0, 0, 0]
21412137

21422138
else:
@@ -2147,23 +2143,18 @@ def _get_lora_params_from_requests(self,
21472143
current_lora_params = lora_params[layer_id][module_id]
21482144
if current_tmp_lora_params is None:
21492145
current_lora_params['adapter_size'].append(0)
2150-
current_lora_params['is_dora'].append(False)
21512146
current_lora_params['weight_pointers'] += [0, 0, 0]
21522147
else:
21532148
current_lora_params[
21542149
'adapter_size'] += current_tmp_lora_params[
21552150
'adapter_size']
2156-
current_lora_params[
2157-
'is_dora'] += current_tmp_lora_params['is_dora']
21582151
current_lora_params[
21592152
'weight_pointers'] += current_tmp_lora_params[
21602153
'weight_pointers']
21612154

21622155
for layer_id in lora_params:
21632156
for module_id in lora_params[layer_id]:
21642157
current_lora_params = lora_params[layer_id][module_id]
2165-
# TODO: When lora_grouped_gemm supports DoRA: convert 'is_dora' to a bool tensor.
2166-
# Until it's supported, that would just slow down this function, so better not to do it.
21672158
current_lora_params['adapter_size'] = torch.IntTensor(
21682159
current_lora_params['adapter_size'])
21692160
current_lora_params['weight_pointers'] = torch.LongTensor(

tests/unittest/_torch/modules/tests_lora_modules/test_lora_attention_pytorch_flow_vs_trt.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -419,31 +419,23 @@ def test_lora_attention(self):
419419
lora_params['lora_ranks'],
420420
'weight_pointers':
421421
lora_params['lora_weights_pointers'],
422-
'is_dora':
423-
False,
424422
},
425423
LoraModuleType.ATTENTION_K: {
426424
'adapter_size':
427425
lora_params['lora_ranks'],
428426
'weight_pointers': lora_params['lora_weights_pointers'],
429-
'is_dora':
430-
False,
431427
},
432428
LoraModuleType.ATTENTION_V: {
433429
'adapter_size':
434430
lora_params['lora_ranks'],
435431
'weight_pointers':
436432
lora_params['lora_weights_pointers'],
437-
'is_dora':
438-
False,
439433
},
440434
LoraModuleType.ATTENTION_DENSE: {
441435
'adapter_size':
442436
lora_params['lora_ranks'],
443437
'weight_pointers':
444438
lora_params['lora_weights_pointers'],
445-
'is_dora':
446-
False,
447439
}
448440
}
449441
}

0 commit comments

Comments
 (0)