| { | |
| "model_type": "llama", | |
| "quantization": "q4f32_1", | |
| "model_config": { | |
| "hidden_size": 4096, | |
| "intermediate_size": 11008, | |
| "num_attention_heads": 32, | |
| "num_hidden_layers": 32, | |
| "rms_norm_eps": 1e-05, | |
| "vocab_size": 32000, | |
| "position_embedding_base": 10000.0, | |
| "context_window_size": 4096, | |
| "prefill_chunk_size": 4096, | |
| "num_key_value_heads": 32, | |
| "head_dim": 128, | |
| "tensor_parallel_shards": 2, | |
| "max_batch_size": 80 | |
| }, | |
| "vocab_size": 32000, | |
| "context_window_size": 4096, | |
| "sliding_window_size": -1, | |
| "prefill_chunk_size": 4096, | |
| "attention_sink_size": -1, | |
| "tensor_parallel_shards": 2, | |
| "mean_gen_len": 128, | |
| "max_gen_len": 512, | |
| "shift_fill_factor": 0.3, | |
| "temperature": 0.7, | |
| "presence_penalty": 0.0, | |
| "frequency_penalty": 0.0, | |
| "repetition_penalty": 1.0, | |
| "top_p": 0.95, | |
| "conv_template": { | |
| "name": "llama-2", | |
| "system_template": "[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n ", | |
| "system_message": "You are a helpful, respectful and honest assistant.", | |
| "roles": { | |
| "user": "[INST]", | |
| "assistant": "[/INST]", | |
| "tool": "[INST]" | |
| }, | |
| "role_templates": { | |
| "user": "{user_message}", | |
| "assistant": "{assistant_message}", | |
| "tool": "{tool_message}" | |
| }, | |
| "messages": [], | |
| "seps": [ | |
| " " | |
| ], | |
| "role_content_sep": " ", | |
| "role_empty_sep": " ", | |
| "stop_str": [ | |
| "[INST]" | |
| ], | |
| "stop_token_ids": [ | |
| 2 | |
| ], | |
| "function_string": "", | |
| "use_function_calling": false | |
| }, | |
| "pad_token_id": 0, | |
| "bos_token_id": 1, | |
| "eos_token_id": 2, | |
| "tokenizer_files": [ | |
| "tokenizer.model", | |
| "tokenizer_config.json", | |
| "tokenizer.json" | |
| ], | |
| "version": "0.1.0" | |
| } |