@@ -415,7 +415,7 @@ class DOFALarge16_Weights(WeightsEnum): # type: ignore[misc]
415
415
)
416
416
417
417
418
- def dofa_small_patch16_224 (** kwargs : Any ) -> DOFA :
418
+ def dofa_small_patch16_224 (* args : Any , * *kwargs : Any ) -> DOFA :
419
419
"""Dynamic One-For-All (DOFA) small patch size 16 model.
420
420
421
421
If you use this model in your research, please cite the following paper:
@@ -425,17 +425,19 @@ def dofa_small_patch16_224(**kwargs: Any) -> DOFA:
425
425
.. versionadded:: 0.6
426
426
427
427
Args:
428
+ *args: Additional arguments to pass to :class:`DOFA`.
428
429
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
429
430
430
431
Returns:
431
432
A DOFA small 16 model.
432
433
"""
433
- model = DOFA (patch_size = 16 , embed_dim = 384 , depth = 12 , num_heads = 6 , ** kwargs )
434
+ kwargs |= {'patch_size' : 16 , 'embed_dim' : 384 , 'depth' : 12 , 'num_heads' : 6 }
435
+ model = DOFA (* args , ** kwargs )
434
436
return model
435
437
436
438
437
439
def dofa_base_patch16_224 (
438
- weights : DOFABase16_Weights | None = None , ** kwargs : Any
440
+ weights : DOFABase16_Weights | None = None , * args : Any , * *kwargs : Any
439
441
) -> DOFA :
440
442
"""Dynamic One-For-All (DOFA) base patch size 16 model.
441
443
@@ -447,12 +449,14 @@ def dofa_base_patch16_224(
447
449
448
450
Args:
449
451
weights: Pre-trained model weights to use.
452
+ *args: Additional arguments to pass to :class:`DOFA`.
450
453
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
451
454
452
455
Returns:
453
456
A DOFA base 16 model.
454
457
"""
455
- model = DOFA (patch_size = 16 , embed_dim = 768 , depth = 12 , num_heads = 12 , ** kwargs )
458
+ kwargs |= {'patch_size' : 16 , 'embed_dim' : 768 , 'depth' : 12 , 'num_heads' : 12 }
459
+ model = DOFA (* args , ** kwargs )
456
460
457
461
if weights :
458
462
missing_keys , unexpected_keys = model .load_state_dict (
@@ -471,7 +475,7 @@ def dofa_base_patch16_224(
471
475
472
476
473
477
def dofa_large_patch16_224 (
474
- weights : DOFALarge16_Weights | None = None , ** kwargs : Any
478
+ weights : DOFALarge16_Weights | None = None , * args : Any , * *kwargs : Any
475
479
) -> DOFA :
476
480
"""Dynamic One-For-All (DOFA) large patch size 16 model.
477
481
@@ -483,12 +487,14 @@ def dofa_large_patch16_224(
483
487
484
488
Args:
485
489
weights: Pre-trained model weights to use.
490
+ *args: Additional arguments to pass to :class:`DOFA`.
486
491
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
487
492
488
493
Returns:
489
494
A DOFA large 16 model.
490
495
"""
491
- model = DOFA (patch_size = 16 , embed_dim = 1024 , depth = 24 , num_heads = 16 , ** kwargs )
496
+ kwargs |= {'patch_size' : 16 , 'embed_dim' : 1024 , 'depth' : 24 , 'num_heads' : 16 }
497
+ model = DOFA (* args , ** kwargs )
492
498
493
499
if weights :
494
500
missing_keys , unexpected_keys = model .load_state_dict (
@@ -506,7 +512,7 @@ def dofa_large_patch16_224(
506
512
return model
507
513
508
514
509
- def dofa_huge_patch16_224 (** kwargs : Any ) -> DOFA :
515
+ def dofa_huge_patch16_224 (* args : Any , * *kwargs : Any ) -> DOFA :
510
516
"""Dynamic One-For-All (DOFA) huge patch size 16 model.
511
517
512
518
If you use this model in your research, please cite the following paper:
@@ -516,10 +522,12 @@ def dofa_huge_patch16_224(**kwargs: Any) -> DOFA:
516
522
.. versionadded:: 0.6
517
523
518
524
Args:
525
+ *args: Additional arguments to pass to :class:`DOFA`.
519
526
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
520
527
521
528
Returns:
522
529
A DOFA huge 16 model.
523
530
"""
524
- model = DOFA (patch_size = 14 , embed_dim = 1280 , depth = 32 , num_heads = 16 , ** kwargs )
531
+ kwargs |= {'patch_size' : 14 , 'embed_dim' : 1280 , 'depth' : 32 , 'num_heads' : 16 }
532
+ model = DOFA (* args , ** kwargs )
525
533
return model
0 commit comments