We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 37d0be0 commit dceaf05Copy full SHA for dceaf05
recipes/eleuther_eval.py
@@ -27,7 +27,7 @@
27
import lm_eval
28
from lm_eval.evaluator import evaluate
29
from lm_eval.models.huggingface import HFLM
30
- from lm_eval.tasks import get_task_dict
+ from lm_eval.tasks import get_task_dict, TaskManager
31
from lm_eval.utils import make_table
32
except ImportError:
33
logger.error(
@@ -241,7 +241,9 @@ def evaluate(self) -> None:
241
except Exception:
242
pass
243
244
- task_dict = get_task_dict(self._tasks)
+ task_manager = TaskManager(include_path=self._cfg.get("include_path", None))
245
+ task_dict = get_task_dict(self._tasks, task_manager)
246
+
247
logger.info(f"Running evaluation on {self._tasks} tasks.")
248
output = evaluate(
249
model_eval_wrapper,
0 commit comments