Skip to content

Commit 2ad2e1a

Browse files
Mary HippMary Hipp
authored andcommitted
Merge remote-tracking branch 'origin/main' into maryhipp/flux-kontext
2 parents 9b99cf7 + 6a78739 commit 2ad2e1a

File tree

27 files changed

+702
-72
lines changed

27 files changed

+702
-72
lines changed

invokeai/app/api/routers/images.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,9 @@ async def upload_image(
9999
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
100100

101101
try:
102-
np_image = pil_to_np(pil_image)
102+
# heuristic_resize_fast expects an RGB or RGBA image
103+
pil_rgba = pil_image.convert("RGBA")
104+
np_image = pil_to_np(pil_rgba)
103105
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
104106
pil_image = np_to_pil(np_image)
105107
except Exception:

invokeai/app/api_app.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def overridden_redoc() -> HTMLResponse:
158158
try:
159159
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
160160
except RuntimeError:
161-
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
161+
logger.warning(f"No UI found at {web_root_path}/dist, skipping UI mount")
162162
app.mount(
163163
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
164164
) # docs favicon is in here

invokeai/app/invocations/baseinvocation.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -499,7 +499,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
499499

500500
ui_type = field.json_schema_extra.get("ui_type", None)
501501
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
502-
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
502+
logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
503503
field.json_schema_extra.pop("ui_type")
504504
return None
505505

@@ -582,14 +582,16 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]:
582582

583583
fields: dict[str, tuple[Any, FieldInfo]] = {}
584584

585+
original_model_fields: dict[str, OriginalModelField] = {}
586+
585587
for field_name, field_info in cls.model_fields.items():
586588
annotation = field_info.annotation
587589
assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation."
588590
assert isinstance(field_info.json_schema_extra, dict), (
589591
f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?"
590592
)
591593

592-
cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
594+
original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
593595

594596
validate_field_default(cls.__name__, field_name, invocation_type, annotation, field_info)
595597

@@ -613,7 +615,7 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]:
613615
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
614616
uiconfig["version"] = version
615617
else:
616-
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
618+
logger.warning(f'No version specified for node "{invocation_type}", using "1.0.0"')
617619
uiconfig["version"] = "1.0.0"
618620

619621
cls.UIConfig = UIConfigBase(**uiconfig)
@@ -676,6 +678,7 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]:
676678
docstring = cls.__doc__
677679
new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) # type: ignore
678680
new_class.__doc__ = docstring
681+
new_class._original_model_fields = original_model_fields
679682

680683
InvocationRegistry.register_invocation(new_class)
681684

invokeai/app/invocations/compel.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,13 @@ def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
114114

115115
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
116116

117+
del compel
118+
del patched_tokenizer
119+
del tokenizer
120+
del ti_manager
121+
del text_encoder
122+
del text_encoder_info
123+
117124
c = c.detach().to("cpu")
118125

119126
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
@@ -222,7 +229,10 @@ def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
222229
else:
223230
c_pooled = None
224231

232+
del compel
233+
del patched_tokenizer
225234
del tokenizer
235+
del ti_manager
226236
del text_encoder
227237
del text_encoder_info
228238

invokeai/app/invocations/fields.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -438,7 +438,7 @@ class WithWorkflow:
438438
workflow = None
439439

440440
def __init_subclass__(cls) -> None:
441-
logger.warn(
441+
logger.warning(
442442
f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow."
443443
)
444444
super().__init_subclass__()
@@ -579,7 +579,7 @@ def InputField(
579579

580580
if default_factory is not _Unset and default_factory is not None:
581581
default = default_factory()
582-
logger.warn('"default_factory" is not supported, calling it now to set "default"')
582+
logger.warning('"default_factory" is not supported, calling it now to set "default"')
583583

584584
# These are the args we may wish pass to the pydantic `Field()` function
585585
field_args = {

invokeai/app/services/config/config_default.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
INIT_FILE = Path("invokeai.yaml")
2525
DB_FILE = Path("invokeai.db")
2626
LEGACY_INIT_FILE = Path("invokeai.init")
27-
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
2827
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
2928
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
3029
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
@@ -93,7 +92,7 @@ class InvokeAIAppConfig(BaseSettings):
9392
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
9493
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
9594
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
96-
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
95+
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
9796
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
9897
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
9998
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
@@ -176,7 +175,7 @@ class InvokeAIAppConfig(BaseSettings):
176175
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
177176

178177
# DEVICE
179-
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
178+
device: str = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)", pattern=r"^(auto|cpu|mps|cuda(:\d+)?)$")
180179
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
181180

182181
# GENERATION

invokeai/app/services/images/images_default.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def create(
7878
board_id=board_id, image_name=image_name
7979
)
8080
except Exception as e:
81-
self.__invoker.services.logger.warn(f"Failed to add image to board {board_id}: {str(e)}")
81+
self.__invoker.services.logger.warning(f"Failed to add image to board {board_id}: {str(e)}")
8282
self.__invoker.services.image_files.save(
8383
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
8484
)

invokeai/app/services/model_install/model_install_default.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def stop(self, invoker: Optional[Invoker] = None) -> None:
148148
def _clear_pending_jobs(self) -> None:
149149
for job in self.list_jobs():
150150
if not job.in_terminal_state:
151-
self._logger.warning("Cancelling job {job.id}")
151+
self._logger.warning(f"Cancelling job {job.id}")
152152
self.cancel_job(job)
153153
while True:
154154
try:

invokeai/app/services/session_processor/session_processor_default.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import gc
12
import traceback
23
from contextlib import suppress
34
from threading import BoundedSemaphore, Thread
@@ -439,6 +440,12 @@ def _process(
439440
poll_now_event.wait(self._polling_interval)
440441
continue
441442

443+
# GC-ing here can reduce peak memory usage of the invoke process by freeing allocated memory blocks.
444+
# Most queue items take seconds to execute, so the relative cost of a GC is very small.
445+
# Python will never cede allocated memory back to the OS, so anything we can do to reduce the peak
446+
# allocation is well worth it.
447+
gc.collect()
448+
442449
self._invoker.services.logger.info(
443450
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
444451
)

invokeai/app/services/session_queue/session_queue_sqlite.py

Lines changed: 15 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -104,11 +104,7 @@ def _get_highest_priority(self, queue_id: str) -> int:
104104
return cast(Union[int, None], cursor.fetchone()[0]) or 0
105105

106106
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
107-
return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
108-
109-
def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
110107
try:
111-
cursor = self._conn.cursor()
112108
# TODO: how does this work in a multi-user scenario?
113109
current_queue_size = self._get_current_queue_size(queue_id)
114110
max_queue_size = self.__invoker.services.configuration.max_queue_size
@@ -118,28 +114,29 @@ def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueB
118114
if prepend:
119115
priority = self._get_highest_priority(queue_id) + 1
120116

121-
requested_count = calc_session_count(batch)
122-
values_to_insert = prepare_values_to_insert(
117+
requested_count = await asyncio.to_thread(
118+
calc_session_count,
119+
batch=batch,
120+
)
121+
values_to_insert = await asyncio.to_thread(
122+
prepare_values_to_insert,
123123
queue_id=queue_id,
124124
batch=batch,
125125
priority=priority,
126126
max_new_queue_items=max_new_queue_items,
127127
)
128128
enqueued_count = len(values_to_insert)
129129

130-
if requested_count > enqueued_count:
131-
values_to_insert = values_to_insert[:max_new_queue_items]
132-
133-
cursor.executemany(
134-
"""--sql
135-
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
136-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
137-
""",
138-
values_to_insert,
139-
)
140-
self._conn.commit()
130+
with self._conn:
131+
cursor = self._conn.cursor()
132+
cursor.executemany(
133+
"""--sql
134+
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
135+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
136+
""",
137+
values_to_insert,
138+
)
141139
except Exception:
142-
self._conn.rollback()
143140
raise
144141
enqueue_result = EnqueueBatchResult(
145142
queue_id=queue_id,

0 commit comments

Comments
 (0)