Upload folder using huggingface_hub
Browse files- hyperparameters.yaml +30 -0
- lit_model.pth +3 -0
- model_config.yaml +28 -0
- tokenizer.json +0 -0
- tokenizer_config.json +9 -0
hyperparameters.yaml
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_name: pythia-160m
|
| 2 |
+
out_dir: out/custom-model
|
| 3 |
+
resume: false
|
| 4 |
+
data:
|
| 5 |
+
class_path: litgpt.data.TextFiles
|
| 6 |
+
init_args:
|
| 7 |
+
train_data_path: train
|
| 8 |
+
seed: 42
|
| 9 |
+
num_workers: 4
|
| 10 |
+
train:
|
| 11 |
+
save_interval: 1000
|
| 12 |
+
log_interval: 1
|
| 13 |
+
global_batch_size: 512
|
| 14 |
+
micro_batch_size: 4
|
| 15 |
+
lr_warmup_steps: 2000
|
| 16 |
+
max_tokens: 150000000
|
| 17 |
+
tie_embeddings: false
|
| 18 |
+
learning_rate: 0.0004
|
| 19 |
+
weight_decay: 0.1
|
| 20 |
+
beta1: 0.9
|
| 21 |
+
beta2: 0.95
|
| 22 |
+
max_norm: 1.0
|
| 23 |
+
min_lr: 4.0e-05
|
| 24 |
+
eval:
|
| 25 |
+
interval: 1000
|
| 26 |
+
max_iters: 100
|
| 27 |
+
devices: auto
|
| 28 |
+
tokenizer_dir: checkpoints/EleutherAI/pythia-160m
|
| 29 |
+
logger_name: tensorboard
|
| 30 |
+
seed: 42
|
lit_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:06e7f79aa6aff933ec2804c2bc9a67c6d5e85efca5e88cf01b2705953d09a3ad
|
| 3 |
+
size 1948054098
|
model_config.yaml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
bias: true
|
| 2 |
+
block_size: 2048
|
| 3 |
+
gelu_approximate: none
|
| 4 |
+
head_size: 64
|
| 5 |
+
hf_config:
|
| 6 |
+
name: pythia-160m
|
| 7 |
+
org: EleutherAI
|
| 8 |
+
intermediate_size: 3072
|
| 9 |
+
lm_head_bias: false
|
| 10 |
+
mlp_class_name: GptNeoxMLP
|
| 11 |
+
n_embd: 768
|
| 12 |
+
n_expert: 0
|
| 13 |
+
n_expert_per_token: 0
|
| 14 |
+
n_head: 12
|
| 15 |
+
n_layer: 12
|
| 16 |
+
n_query_groups: 12
|
| 17 |
+
name: pythia-160m
|
| 18 |
+
norm_class_name: LayerNorm
|
| 19 |
+
norm_eps: 1.0e-05
|
| 20 |
+
padded_vocab_size: 50304
|
| 21 |
+
padding_multiple: 128
|
| 22 |
+
parallel_residual: true
|
| 23 |
+
rope_base: 10000
|
| 24 |
+
rope_condense_ratio: 1
|
| 25 |
+
rotary_percentage: 0.25
|
| 26 |
+
scale_embeddings: false
|
| 27 |
+
shared_attention_norm: false
|
| 28 |
+
vocab_size: 50254
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"bos_token": "<|endoftext|>",
|
| 4 |
+
"eos_token": "<|endoftext|>",
|
| 5 |
+
"name_or_path": "EleutherAI/gpt-neox-20b",
|
| 6 |
+
"special_tokens_map_file": "/admin/home-hailey/.cache/huggingface/hub/models--EleutherAI--gpt-neox-20b/snapshots/4e49eadb5d14bd22f314ec3f45b69a87b88c7691/special_tokens_map.json",
|
| 7 |
+
"tokenizer_class": "GPTNeoXTokenizer",
|
| 8 |
+
"unk_token": "<|endoftext|>"
|
| 9 |
+
}
|