Llama-3.1-8B-precise-if / training_config.yaml
pmahdavi's picture
Upload precise-if model (from checkpoint, without optimizer states)
8cf39a7 verified
raw
history blame contribute delete
731 Bytes
bf16: true
cutoff_len: 4096
dataset: tulu3_mixture_precise_if
ddp_timeout: 180000000
deepspeed: examples/deepspeed/ds_z3_config.json
do_train: true
eval_strategy: 'no'
finetuning_type: full
gradient_accumulation_steps: 32
learning_rate: 1.0e-05
logging_steps: 10
lr_scheduler_type: cosine
model_name_or_path: meta-llama/Llama-3.1-8B
num_train_epochs: 1.0
output_dir: /scratch/pxm5426/runs/lora-exploration/llama-factory/Llama-3.1-8B_tulu3_mixture_precise_if_full_ebs128_lr1e-05
overwrite_cache: true
overwrite_output_dir: false
per_device_train_batch_size: 2
plot_loss: true
preprocessing_num_workers: 12
report_to: wandb
save_steps: 300
save_strategy: steps
stage: sft
template: tulu_v3
trust_remote_code: true
warmup_ratio: 0.03