lc700x commited on
Commit
7707866
·
verified ·
1 Parent(s): bb36363

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +80 -0
  2. config.json +159 -0
  3. model.safetensors +3 -0
  4. preprocessor_config.json +26 -0
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - vision
5
+ - depth-estimation
6
+ - dinov2
7
+ inference: false
8
+ ---
9
+
10
+ # Model Card: DPT model with DINOv2 backbone
11
+
12
+ ## Model Details
13
+
14
+ DPT (Dense Prediction Transformer) model with DINOv2 backbone as proposed in [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) by Oquab et al.
15
+
16
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/dpt_architecture.jpg"
17
+ alt="drawing" width="600"/>
18
+
19
+ <small> DPT architecture. Taken from the <a href="https://arxiv.org/abs/2103.13413" target="_blank">original paper</a>. </small>
20
+
21
+ ### Resources
22
+
23
+ - [DINOv2 Paper](https://arxiv.org/abs/2304.07193)
24
+ - [DPT Paper](https://arxiv.org/abs/2103.13413)
25
+
26
+
27
+ ### Use with Transformers
28
+
29
+ ```python
30
+ from transformers import AutoImageProcessor, DPTForDepthEstimation
31
+ import torch
32
+ import numpy as np
33
+ from PIL import Image
34
+ import requests
35
+
36
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
37
+ image = Image.open(requests.get(url, stream=True).raw)
38
+
39
+ image_processor = AutoImageProcessor.from_pretrained("facebook/dpt-dinov2-base-nyu")
40
+ model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-base-nyu")
41
+
42
+ # prepare image for the model
43
+ inputs = image_processor(images=image, return_tensors="pt")
44
+
45
+ with torch.no_grad():
46
+ outputs = model(**inputs)
47
+ predicted_depth = outputs.predicted_depth
48
+
49
+ # interpolate to original size
50
+ prediction = torch.nn.functional.interpolate(
51
+ predicted_depth.unsqueeze(1),
52
+ size=image.size[::-1],
53
+ mode="bicubic",
54
+ align_corners=False,
55
+ )
56
+
57
+ # visualize the prediction
58
+ output = prediction.squeeze().cpu().numpy()
59
+ formatted = (output * 255 / np.max(output)).astype("uint8")
60
+ depth = Image.fromarray(formatted)
61
+ ```
62
+
63
+ ## Model Use
64
+
65
+ ### Intended Use
66
+
67
+ The model is intended to showcase that using the DPT framework with DINOv2 as backbone yields a powerful depth estimator.
68
+
69
+ ### BibTeX entry and citation info
70
+
71
+ ```bibtex
72
+ @misc{oquab2023dinov2,
73
+ title={DINOv2: Learning Robust Visual Features without Supervision},
74
+ author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski},
75
+ year={2023},
76
+ eprint={2304.07193},
77
+ archivePrefix={arXiv},
78
+ primaryClass={cs.CV}
79
+ }
80
+ ```
config.json ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "add_projection": true,
4
+ "architectures": [
5
+ "DPTForDepthEstimation"
6
+ ],
7
+ "attention_probs_dropout_prob": null,
8
+ "auxiliary_loss_weight": 0.4,
9
+ "backbone_config": {
10
+ "_name_or_path": "",
11
+ "add_cross_attention": false,
12
+ "apply_layernorm": false,
13
+ "architectures": [
14
+ "Dinov2Model"
15
+ ],
16
+ "attention_probs_dropout_prob": 0.0,
17
+ "bad_words_ids": null,
18
+ "begin_suppress_tokens": null,
19
+ "bos_token_id": null,
20
+ "chunk_size_feed_forward": 0,
21
+ "cross_attention_hidden_size": null,
22
+ "decoder_start_token_id": null,
23
+ "diversity_penalty": 0.0,
24
+ "do_sample": false,
25
+ "drop_path_rate": 0.0,
26
+ "early_stopping": false,
27
+ "encoder_no_repeat_ngram_size": 0,
28
+ "eos_token_id": null,
29
+ "exponential_decay_length_penalty": null,
30
+ "finetuning_task": null,
31
+ "forced_bos_token_id": null,
32
+ "forced_eos_token_id": null,
33
+ "hidden_act": "gelu",
34
+ "hidden_dropout_prob": 0.0,
35
+ "hidden_size": 768,
36
+ "id2label": {
37
+ "0": "LABEL_0",
38
+ "1": "LABEL_1"
39
+ },
40
+ "image_size": 518,
41
+ "initializer_range": 0.02,
42
+ "is_decoder": false,
43
+ "is_encoder_decoder": false,
44
+ "label2id": {
45
+ "LABEL_0": 0,
46
+ "LABEL_1": 1
47
+ },
48
+ "layer_norm_eps": 1e-06,
49
+ "layerscale_value": 1.0,
50
+ "length_penalty": 1.0,
51
+ "max_length": 20,
52
+ "min_length": 0,
53
+ "mlp_ratio": 4,
54
+ "model_type": "dinov2",
55
+ "no_repeat_ngram_size": 0,
56
+ "num_attention_heads": 12,
57
+ "num_beam_groups": 1,
58
+ "num_beams": 1,
59
+ "num_channels": 3,
60
+ "num_hidden_layers": 12,
61
+ "num_return_sequences": 1,
62
+ "out_features": [
63
+ "stage3",
64
+ "stage6",
65
+ "stage9",
66
+ "stage12"
67
+ ],
68
+ "out_indices": [
69
+ 3,
70
+ 6,
71
+ 9,
72
+ 12
73
+ ],
74
+ "output_attentions": false,
75
+ "output_hidden_states": false,
76
+ "output_scores": false,
77
+ "pad_token_id": null,
78
+ "patch_size": 14,
79
+ "prefix": null,
80
+ "problem_type": null,
81
+ "pruned_heads": {},
82
+ "qkv_bias": true,
83
+ "remove_invalid_values": false,
84
+ "repetition_penalty": 1.0,
85
+ "reshape_hidden_states": false,
86
+ "return_dict": true,
87
+ "return_dict_in_generate": false,
88
+ "sep_token_id": null,
89
+ "stage_names": [
90
+ "stem",
91
+ "stage1",
92
+ "stage2",
93
+ "stage3",
94
+ "stage4",
95
+ "stage5",
96
+ "stage6",
97
+ "stage7",
98
+ "stage8",
99
+ "stage9",
100
+ "stage10",
101
+ "stage11",
102
+ "stage12"
103
+ ],
104
+ "suppress_tokens": null,
105
+ "task_specific_params": null,
106
+ "temperature": 1.0,
107
+ "tf_legacy_loss": false,
108
+ "tie_encoder_decoder": false,
109
+ "tie_word_embeddings": true,
110
+ "tokenizer_class": null,
111
+ "top_k": 50,
112
+ "top_p": 1.0,
113
+ "torch_dtype": "float32",
114
+ "torchscript": false,
115
+ "transformers_version": "4.35.0.dev0",
116
+ "typical_p": 1.0,
117
+ "use_bfloat16": false,
118
+ "use_swiglu_ffn": false
119
+ },
120
+ "backbone_featmap_shape": null,
121
+ "backbone_out_indices": null,
122
+ "fusion_hidden_size": 256,
123
+ "head_in_index": -1,
124
+ "hidden_act": "gelu",
125
+ "hidden_dropout_prob": null,
126
+ "hidden_size": 768,
127
+ "image_size": null,
128
+ "initializer_range": 0.02,
129
+ "intermediate_size": null,
130
+ "is_hybrid": false,
131
+ "layer_norm_eps": null,
132
+ "model_type": "dpt",
133
+ "neck_hidden_sizes": [
134
+ 96,
135
+ 192,
136
+ 384,
137
+ 768
138
+ ],
139
+ "neck_ignore_stages": [],
140
+ "num_attention_heads": null,
141
+ "num_channels": null,
142
+ "num_hidden_layers": null,
143
+ "patch_size": null,
144
+ "qkv_bias": null,
145
+ "readout_type": "project",
146
+ "reassemble_factors": [
147
+ 4,
148
+ 2,
149
+ 1,
150
+ 0.5
151
+ ],
152
+ "semantic_classifier_dropout": 0.1,
153
+ "semantic_loss_ignore_index": 255,
154
+ "torch_dtype": "float32",
155
+ "transformers_version": null,
156
+ "use_auxiliary_head": true,
157
+ "use_batch_norm_in_fusion_residual": false,
158
+ "use_bias_in_fusion_residual": false
159
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60460ae14c3abc07875faf44f6769811e0a410c7dfb3d9d5b82df909d3f5ad4a
3
+ size 447849116
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_pad": true,
4
+ "do_rescale": false,
5
+ "do_resize": false,
6
+ "ensure_multiple_of": 1,
7
+ "image_mean": [
8
+ 123.675,
9
+ 116.28,
10
+ 103.53
11
+ ],
12
+ "image_processor_type": "DPTImageProcessor",
13
+ "image_std": [
14
+ 58.395,
15
+ 57.12,
16
+ 57.375
17
+ ],
18
+ "keep_aspect_ratio": false,
19
+ "resample": 2,
20
+ "rescale_factor": 0.00392156862745098,
21
+ "size": {
22
+ "height": 384,
23
+ "width": 384
24
+ },
25
+ "size_divisor": 14
26
+ }