Skip to content

Commit a26f59c

Browse files
authored
[Misc] Raise error for V1 not supporting Long LoRA. (vllm-project#16415)
Signed-off-by: Jee Jee Li <[email protected]>
1 parent aa3b3d7 commit a26f59c

File tree

2 files changed

+7
-1
lines changed

2 files changed

+7
-1
lines changed

vllm/config.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2573,6 +2573,11 @@ def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig):
25732573
logger.warning("LoRA with chunked prefill is still experimental "
25742574
"and may be unstable.")
25752575

2576+
def verify_lora_support(self):
2577+
if self.long_lora_scaling_factors is not None and envs.VLLM_USE_V1:
2578+
raise ValueError(
2579+
"V1 LoRA does not support long LoRA, please use V0.")
2580+
25762581

25772582
@dataclass
25782583
class PromptAdapterConfig:
@@ -3672,6 +3677,7 @@ def __post_init__(self):
36723677
self.lora_config.verify_with_model_config(self.model_config)
36733678
self.lora_config.verify_with_scheduler_config(
36743679
self.scheduler_config)
3680+
self.lora_config.verify_lora_support()
36753681
if self.prompt_adapter_config:
36763682
self.prompt_adapter_config.verify_with_model_config(
36773683
self.model_config)

vllm/lora/models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -364,7 +364,7 @@ def __init__(
364364
self._last_mapping: Optional[LoRAMapping] = None
365365
self._create_lora_modules()
366366
self.model.lora_manager = self
367-
self.adapter_type = 'LoRa'
367+
self.adapter_type = 'LoRA'
368368

369369
@property
370370
def capacity(self) -> int:

0 commit comments

Comments
 (0)