Upload folder using huggingface_hub
Browse files- README.md +3 -3
- config.json +2 -2
- special_tokens_map.json +1 -1
- tokenizer_config.json +1 -9
README.md
CHANGED
|
@@ -20,7 +20,7 @@ tags:
|
|
| 20 |
- meta
|
| 21 |
- pytorch
|
| 22 |
- llama
|
| 23 |
-
-
|
| 24 |
extra_gated_prompt: >-
|
| 25 |
**LLAMA 4 COMMUNITY LICENSE AGREEMENT**
|
| 26 |
|
|
@@ -176,7 +176,7 @@ Please, make sure you have transformers `v4.51.0` installed, or upgrade using `p
|
|
| 176 |
from transformers import AutoProcessor, Llama4ForConditionalGeneration
|
| 177 |
import torch
|
| 178 |
|
| 179 |
-
model_id = "meta-llama/Llama-4-
|
| 180 |
|
| 181 |
processor = AutoProcessor.from_pretrained(model_id)
|
| 182 |
model = Llama4ForConditionalGeneration.from_pretrained(
|
|
@@ -269,7 +269,7 @@ In this section, we report the results for Llama 4 relative to our previous mode
|
|
| 269 |
| Image Understanding | ChartQA | 0 | relaxed\_accuracy | | | 88.8 | 90.0 |
|
| 270 |
| | DocVQA (test) | 0 | anls | | | 94.4 | 94.4 |
|
| 271 |
| Coding | LiveCodeBench (10/01/2024-02/01/2025) | 0 | pass@1 | 33.3 | 27.7 | 32.8 | 43.4 |
|
| 272 |
-
| Reasoning & Knowledge | MMLU Pro | 0 | macro\_avg/
|
| 273 |
| | GPQA Diamond | 0 | accuracy | 50.5 | 49.0 | 57.2 | 69.8 |
|
| 274 |
| Multilingual | MGSM | 0 | average/em | 91.1 | 91.6 | 90.6 | 92.3 |
|
| 275 |
| Long context | MTOB (half book) eng-\>kgv/kgv-\>eng | \- | chrF | Context window is 128K | | 42.2/36.6 | 54.0/46.4 |
|
|
|
|
| 20 |
- meta
|
| 21 |
- pytorch
|
| 22 |
- llama
|
| 23 |
+
- llama4
|
| 24 |
extra_gated_prompt: >-
|
| 25 |
**LLAMA 4 COMMUNITY LICENSE AGREEMENT**
|
| 26 |
|
|
|
|
| 176 |
from transformers import AutoProcessor, Llama4ForConditionalGeneration
|
| 177 |
import torch
|
| 178 |
|
| 179 |
+
model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
|
| 180 |
|
| 181 |
processor = AutoProcessor.from_pretrained(model_id)
|
| 182 |
model = Llama4ForConditionalGeneration.from_pretrained(
|
|
|
|
| 269 |
| Image Understanding | ChartQA | 0 | relaxed\_accuracy | | | 88.8 | 90.0 |
|
| 270 |
| | DocVQA (test) | 0 | anls | | | 94.4 | 94.4 |
|
| 271 |
| Coding | LiveCodeBench (10/01/2024-02/01/2025) | 0 | pass@1 | 33.3 | 27.7 | 32.8 | 43.4 |
|
| 272 |
+
| Reasoning & Knowledge | MMLU Pro | 0 | macro\_avg/acc | 68.9 | 73.4 | 74.3 | 80.5 |
|
| 273 |
| | GPQA Diamond | 0 | accuracy | 50.5 | 49.0 | 57.2 | 69.8 |
|
| 274 |
| Multilingual | MGSM | 0 | average/em | 91.1 | 91.6 | 90.6 | 92.3 |
|
| 275 |
| Long context | MTOB (half book) eng-\>kgv/kgv-\>eng | \- | chrF | Context window is 128K | | 42.2/36.6 | 54.0/46.4 |
|
config.json
CHANGED
|
@@ -37,8 +37,8 @@
|
|
| 37 |
"pad_token_id": 200018,
|
| 38 |
"rms_norm_eps": 1e-05,
|
| 39 |
"rope_scaling": {
|
| 40 |
-
"factor":
|
| 41 |
-
"high_freq_factor":
|
| 42 |
"low_freq_factor": 1.0,
|
| 43 |
"original_max_position_embeddings": 8192,
|
| 44 |
"rope_type": "llama3"
|
|
|
|
| 37 |
"pad_token_id": 200018,
|
| 38 |
"rms_norm_eps": 1e-05,
|
| 39 |
"rope_scaling": {
|
| 40 |
+
"factor": 16.0,
|
| 41 |
+
"high_freq_factor": 1.0,
|
| 42 |
"low_freq_factor": 1.0,
|
| 43 |
"original_max_position_embeddings": 8192,
|
| 44 |
"rope_type": "llama3"
|
special_tokens_map.json
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
{
|
| 2 |
"bos_token": "<|begin_of_text|>",
|
| 3 |
"eos_token": "<|eot|>",
|
| 4 |
-
"pad_token": "<|
|
| 5 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"bos_token": "<|begin_of_text|>",
|
| 3 |
"eos_token": "<|eot|>",
|
| 4 |
+
"pad_token": "<|finetune_right_pad|>"
|
| 5 |
}
|
tokenizer_config.json
CHANGED
|
@@ -9071,14 +9071,6 @@
|
|
| 9071 |
"rstrip": false,
|
| 9072 |
"single_word": false,
|
| 9073 |
"special": true
|
| 9074 |
-
},
|
| 9075 |
-
"201134": {
|
| 9076 |
-
"content": "<|finetune_right_pad_id|>",
|
| 9077 |
-
"lstrip": false,
|
| 9078 |
-
"normalized": false,
|
| 9079 |
-
"rstrip": false,
|
| 9080 |
-
"single_word": false,
|
| 9081 |
-
"special": true
|
| 9082 |
}
|
| 9083 |
},
|
| 9084 |
"bos_token": "<|begin_of_text|>",
|
|
@@ -9091,7 +9083,7 @@
|
|
| 9091 |
"attention_mask"
|
| 9092 |
],
|
| 9093 |
"model_max_length": 10485760,
|
| 9094 |
-
"pad_token": "<|
|
| 9095 |
"processor_class": "Llama4Processor",
|
| 9096 |
"tokenizer_class": "PreTrainedTokenizer"
|
| 9097 |
}
|
|
|
|
| 9071 |
"rstrip": false,
|
| 9072 |
"single_word": false,
|
| 9073 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9074 |
}
|
| 9075 |
},
|
| 9076 |
"bos_token": "<|begin_of_text|>",
|
|
|
|
| 9083 |
"attention_mask"
|
| 9084 |
],
|
| 9085 |
"model_max_length": 10485760,
|
| 9086 |
+
"pad_token": "<|finetune_right_pad|>",
|
| 9087 |
"processor_class": "Llama4Processor",
|
| 9088 |
"tokenizer_class": "PreTrainedTokenizer"
|
| 9089 |
}
|