Skip to content

Commit dceaf05

Browse files
Adding support for custom eval configs (#1055)
1 parent 37d0be0 commit dceaf05

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

recipes/eleuther_eval.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
import lm_eval
2828
from lm_eval.evaluator import evaluate
2929
from lm_eval.models.huggingface import HFLM
30-
from lm_eval.tasks import get_task_dict
30+
from lm_eval.tasks import get_task_dict, TaskManager
3131
from lm_eval.utils import make_table
3232
except ImportError:
3333
logger.error(
@@ -241,7 +241,9 @@ def evaluate(self) -> None:
241241
except Exception:
242242
pass
243243

244-
task_dict = get_task_dict(self._tasks)
244+
task_manager = TaskManager(include_path=self._cfg.get("include_path", None))
245+
task_dict = get_task_dict(self._tasks, task_manager)
246+
245247
logger.info(f"Running evaluation on {self._tasks} tasks.")
246248
output = evaluate(
247249
model_eval_wrapper,

0 commit comments

Comments
 (0)