ashishscapsitech123 commited on
Commit
8bf3c49
·
verified ·
1 Parent(s): 8c8b148

Upload Qwen3VLForConditionalGeneration

Browse files
config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3VLForConditionalGeneration"
4
+ ],
5
+ "dtype": "float16",
6
+ "eos_token_id": 151645,
7
+ "image_token_id": 151655,
8
+ "model_type": "qwen3_vl",
9
+ "pad_token_id": 151654,
10
+ "quantization_config": {
11
+ "_load_in_4bit": true,
12
+ "_load_in_8bit": false,
13
+ "bnb_4bit_compute_dtype": "bfloat16",
14
+ "bnb_4bit_quant_storage": "uint8",
15
+ "bnb_4bit_quant_type": "nf4",
16
+ "bnb_4bit_use_double_quant": true,
17
+ "llm_int8_enable_fp32_cpu_offload": false,
18
+ "llm_int8_has_fp16_weight": false,
19
+ "llm_int8_skip_modules": [
20
+ "embed_tokens",
21
+ "embedding",
22
+ "lm_head",
23
+ "multi_modal_projector",
24
+ "merger",
25
+ "modality_projection",
26
+ "router",
27
+ "visual",
28
+ "vision_tower"
29
+ ],
30
+ "llm_int8_threshold": 6.0,
31
+ "load_in_4bit": true,
32
+ "load_in_8bit": false,
33
+ "quant_method": "bitsandbytes"
34
+ },
35
+ "text_config": {
36
+ "attention_bias": false,
37
+ "attention_dropout": 0.0,
38
+ "bos_token_id": 151643,
39
+ "dtype": "float16",
40
+ "eos_token_id": 151645,
41
+ "head_dim": 128,
42
+ "hidden_act": "silu",
43
+ "hidden_size": 4096,
44
+ "initializer_range": 0.02,
45
+ "intermediate_size": 12288,
46
+ "max_position_embeddings": 262144,
47
+ "model_type": "qwen3_vl_text",
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 36,
50
+ "num_key_value_heads": 8,
51
+ "rms_norm_eps": 1e-06,
52
+ "rope_scaling": {
53
+ "mrope_interleaved": true,
54
+ "mrope_section": [
55
+ 24,
56
+ 20,
57
+ 20
58
+ ],
59
+ "rope_type": "default"
60
+ },
61
+ "rope_theta": 5000000,
62
+ "use_cache": true,
63
+ "vocab_size": 151936
64
+ },
65
+ "tie_word_embeddings": false,
66
+ "transformers_version": "4.57.1",
67
+ "unsloth_fixed": true,
68
+ "video_token_id": 151656,
69
+ "vision_config": {
70
+ "deepstack_visual_indexes": [
71
+ 8,
72
+ 16,
73
+ 24
74
+ ],
75
+ "depth": 27,
76
+ "dtype": "float16",
77
+ "hidden_act": "gelu_pytorch_tanh",
78
+ "hidden_size": 1152,
79
+ "in_channels": 3,
80
+ "initializer_range": 0.02,
81
+ "intermediate_size": 4304,
82
+ "model_type": "qwen3_vl",
83
+ "num_heads": 16,
84
+ "num_position_embeddings": 2304,
85
+ "out_hidden_size": 4096,
86
+ "patch_size": 16,
87
+ "spatial_merge_size": 2,
88
+ "temporal_patch_size": 2
89
+ },
90
+ "vision_end_token_id": 151653,
91
+ "vision_start_token_id": 151652
92
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151654,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.8,
12
+ "transformers_version": "4.57.1"
13
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c4362c896e945ec505feae2dbe4e9296399cde6099e4db796e8d656ff0cecb
3
+ size 4999092688
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cb3317ef8b08c28576afe3ec8666edda452592cc3de9a6a302585b98af41ff4
3
+ size 2227305794
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff