File tree Expand file tree Collapse file tree 11 files changed +33
-13
lines changed
Expand file tree Collapse file tree 11 files changed +33
-13
lines changed Original file line number Diff line number Diff line change 33# To launch, run the following command from root torchtune directory:
44# tune run eleuther_eval --config eleuther_evaluation tasks=["truthfulqa_mc2","hellaswag"]
55
6+ output_dir : ./ # Not needed
7+
68# Model Arguments
79model :
810 _component_ : torchtune.models.llama2.llama2_7b
@@ -14,7 +16,7 @@ checkpointer:
1416 pytorch_model-00001-of-00002.bin,
1517 pytorch_model-00002-of-00002.bin,
1618 ]
17- output_dir : /tmp/Llama-2-7b-hf
19+ output_dir : ${output_dir}
1820 model_type : LLAMA2
1921
2022# Tokenizer
Original file line number Diff line number Diff line change 33# To launch, run the following command:
44# tune run eleuther_eval --config gemma/evaluation
55
6+ output_dir : ./ # Not needed
7+
68# Model Arguments
79model :
810 _component_ : torchtune.models.gemma.gemma_2b
@@ -15,7 +17,7 @@ checkpointer:
1517 model-00001-of-00002.safetensors,
1618 model-00002-of-00002.safetensors,
1719 ]
18- output_dir : ./ # Not needed
20+ output_dir : ${output_dir}
1921 model_type : GEMMA
2022
2123# Tokenizer
Original file line number Diff line number Diff line change 33# To launch, run the following command from root torchtune directory:
44# tune run generate --config generation
55
6+ output_dir : ./ # Not needed
7+
68# Model arguments
79model :
810 _component_ : torchtune.models.llama2.llama2_7b
@@ -14,7 +16,7 @@ checkpointer:
1416 pytorch_model-00001-of-00002.bin,
1517 pytorch_model-00002-of-00002.bin,
1618 ]
17- output_dir : /tmp/Llama-2-7b-hf/
19+ output_dir : ${output_dir}
1820 model_type : LLAMA2
1921
2022device : cuda
Original file line number Diff line number Diff line change 66# To launch, run the following command:
77# tune run dev/generate_v2 --config llama2/generation_v2
88
9+ output_dir : ./ # Not needed
10+
911# Model arguments
1012model :
1113 _component_ : torchtune.models.llama2.llama2_7b
@@ -24,7 +26,7 @@ checkpointer:
2426 pytorch_model-00001-of-00002.bin,
2527 pytorch_model-00002-of-00002.bin
2628 ]
27- output_dir : ./
29+ output_dir : ${output_dir}
2830 model_type : LLAMA2
2931
3032# Device
Original file line number Diff line number Diff line change 99# To launch, run the following command from root torchtune directory:
1010# tune run eleuther_eval --config llama3_2_vision/11B_evaluation
1111
12+ output_dir : ./ # Not needed
13+
1214# Model arguments
1315model :
1416 _component_ : torchtune.models.llama3_2_vision.llama3_2_vision_11b
@@ -26,7 +28,7 @@ checkpointer:
2628 checkpoint_files :
2729 filename_format : model-{}-of-{}.safetensors
2830 max_filename : " 00005"
29- output_dir : ./
31+ output_dir : ${output_dir}
3032 model_type : LLAMA3_VISION
3133
3234# Environment
Original file line number Diff line number Diff line change 77# To launch, run the following command from root torchtune directory:
88# tune run dev/generate_v2 --config llama3_2_vision/generation_v2
99
10+ output_dir : ./ # Not needed
11+
1012# Model arguments
1113model :
1214 _component_ : torchtune.models.llama3_2_vision.llama3_2_vision_11b
@@ -25,7 +27,7 @@ checkpointer:
2527 checkpoint_files :
2628 filename_format : model-{}-of-{}.safetensors
2729 max_filename : " 00005"
28- output_dir : ./
30+ output_dir : ${output_dir}
2931 model_type : LLAMA3_VISION
3032
3133# Device
Original file line number Diff line number Diff line change 33# To launch, run the following command:
44# tune run eleuther_eval --config mistral/evaluation
55
6+ output_dir : ./ # Not needed
7+
68# Model Arguments
79model :
810 _component_ : torchtune.models.mistral.mistral_7b
@@ -15,7 +17,7 @@ checkpointer:
1517 pytorch_model-00001-of-00002.bin,
1618 pytorch_model-00002-of-00002.bin
1719 ]
18- output_dir : /tmp/Mistral-7B-v0.1/
20+ output_dir : ${output_dir}
1921 model_type : MISTRAL
2022resume_from_checkpoint : False
2123
Original file line number Diff line number Diff line change 33# To launch, run the following command:
44# tune run eleuther_eval --config phi3/evaluation
55
6+ output_dir : ./ # Not needed
7+
68# Model Arguments
79model :
810 _component_ : torchtune.models.phi3.phi3_mini
@@ -16,7 +18,7 @@ checkpointer:
1618 model-00002-of-00002.safetensors
1719 ]
1820 recipe_checkpoint : null
19- output_dir : /tmp/Phi-3-mini-4k-instruct
21+ output_dir : ${output_dir}
2022 model_type : PHI3_MINI
2123resume_from_checkpoint : False
2224
Original file line number Diff line number Diff line change 33# To launch, run the following command from root torchtune directory:
44# tune run quantize --config quantization
55
6+ output_dir : /tmp/torchtune/llama2_7B/quantized # /tmp may be deleted by your system. Change it to your preference.
7+
68#
79# Model arguments
810model :
@@ -16,7 +18,7 @@ checkpointer:
1618 pytorch_model-00002-of-00002.bin,
1719 ]
1820 recipe_checkpoint : null
19- output_dir : /tmp/Llama-2-7b-hf
21+ output_dir : ${output_dir}
2022 model_type : LLAMA2
2123
2224device : cuda
Original file line number Diff line number Diff line change 33# To launch, run the following command:
44# tune run eleuther_eval --config qwen2/evaluation
55
6+ output_dir : ./ # Not needed
7+
68# Model Arguments
79model :
810 _component_ : torchtune.models.qwen2.qwen2_7b
@@ -17,7 +19,7 @@ checkpointer:
1719 model-00003-of-00004.safetensors,
1820 model-00004-of-00004.safetensors
1921 ]
20- output_dir : ./ # Not needed
22+ output_dir : ${output_dir}
2123 model_type : QWEN2
2224
2325# Tokenizer
You can’t perform that action at this time.
0 commit comments